Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753310AbdLSTug (ORCPT ); Tue, 19 Dec 2017 14:50:36 -0500 Received: from mga01.intel.com ([192.55.52.88]:17633 "EHLO mga01.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752855AbdLSTgd (ORCPT ); Tue, 19 Dec 2017 14:36:33 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.45,428,1508828400"; d="scan'208";a="4018500" From: Dongwon Kim To: linux-kernel@vger.kernel.org Cc: dri-devel@lists.freedesktop.org, xen-devel@lists.xenproject.org, mateuszx.potrola@intel.com, dongwon.kim@intel.com Subject: [RFC PATCH 09/60] hyper_dmabuf: indirect DMA_BUF synchronization via shadowing Date: Tue, 19 Dec 2017 11:29:25 -0800 Message-Id: <1513711816-2618-9-git-send-email-dongwon.kim@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1513711816-2618-1-git-send-email-dongwon.kim@intel.com> References: <1513711816-2618-1-git-send-email-dongwon.kim@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 38747 Lines: 994 Importer now sends a synchronization request to the exporter when any of DMA_BUF operations on imported Hyper_DMABUF is executed (e.g dma_buf_map and dma_buf_unmap). This results in a creation of shadow DMA_BUF and exactly same DMA_BUF operation to be executed on it. The main purpose of this is to get DMA_BUF synchronized eventually between the original creator of DMA_BUF and the end consumer of it running on the importer VM. Signed-off-by: Dongwon Kim --- drivers/xen/hyper_dmabuf/Makefile | 1 + drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c | 90 ++++++---- drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 52 ++++-- drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c | 8 +- drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c | 43 +++-- .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c | 189 +++++++++++++++++++++ .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h | 6 + drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h | 32 +++- .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c | 52 +++++- .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h | 2 +- 10 files changed, 397 insertions(+), 78 deletions(-) create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h diff --git a/drivers/xen/hyper_dmabuf/Makefile b/drivers/xen/hyper_dmabuf/Makefile index 0be7445..3459382 100644 --- a/drivers/xen/hyper_dmabuf/Makefile +++ b/drivers/xen/hyper_dmabuf/Makefile @@ -7,6 +7,7 @@ ifneq ($(KERNELRELEASE),) hyper_dmabuf_list.o \ hyper_dmabuf_imp.o \ hyper_dmabuf_msg.o \ + hyper_dmabuf_remote_sync.o \ xen/hyper_dmabuf_xen_comm.o \ xen/hyper_dmabuf_xen_comm_list.o diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c index 6b16e37..2c78bc1 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c @@ -169,7 +169,8 @@ grant_ref_t hyper_dmabuf_create_addressing_tables(grant_ref_t *data_refs, int ne /* * Calculate number of pages needed for 2nd level addresing: */ - int n_2nd_level_pages = (nents/REFS_PER_PAGE + ((nents % REFS_PER_PAGE) ? 1: 0));/* rounding */ + int n_2nd_level_pages = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1: 0)); int i; unsigned long gref_page_start; grant_ref_t *tmp_page; @@ -187,7 +188,9 @@ grant_ref_t hyper_dmabuf_create_addressing_tables(grant_ref_t *data_refs, int ne /* Share 2nd level addressing pages in readonly mode*/ for (i=0; i< n_2nd_level_pages; i++) { - addr_refs[i] = gnttab_grant_foreign_access(rdomain, virt_to_mfn((unsigned long)tmp_page+i*PAGE_SIZE ), 1); + addr_refs[i] = gnttab_grant_foreign_access(rdomain, + virt_to_mfn((unsigned long)tmp_page+i*PAGE_SIZE ), + 1); } /* @@ -213,7 +216,9 @@ grant_ref_t hyper_dmabuf_create_addressing_tables(grant_ref_t *data_refs, int ne } /* Share top level addressing page in readonly mode*/ - top_level_ref = gnttab_grant_foreign_access(rdomain, virt_to_mfn((unsigned long)tmp_page), 1); + top_level_ref = gnttab_grant_foreign_access(rdomain, + virt_to_mfn((unsigned long)tmp_page), + 1); kfree(addr_refs); @@ -255,7 +260,9 @@ struct page** hyper_dmabuf_get_data_refs(grant_ref_t top_level_ref, int domid, i } addr = (unsigned long)pfn_to_kaddr(page_to_pfn(top_level_page)); - gnttab_set_map_op(&top_level_map_ops, addr, GNTMAP_host_map | GNTMAP_readonly, top_level_ref, domid); + gnttab_set_map_op(&top_level_map_ops, addr, GNTMAP_host_map | GNTMAP_readonly, + top_level_ref, domid); + gnttab_set_unmap_op(&top_level_unmap_ops, addr, GNTMAP_host_map | GNTMAP_readonly, -1); if (gnttab_map_refs(&top_level_map_ops, NULL, &top_level_page, 1)) { @@ -282,7 +289,8 @@ struct page** hyper_dmabuf_get_data_refs(grant_ref_t top_level_ref, int domid, i for (i = 0; i < n_level2_refs; i++) { addr = (unsigned long)pfn_to_kaddr(page_to_pfn(level2_pages[i])); - gnttab_set_map_op(&map_ops[i], addr, GNTMAP_host_map | GNTMAP_readonly, top_level_refs[i], domid); + gnttab_set_map_op(&map_ops[i], addr, GNTMAP_host_map | GNTMAP_readonly, + top_level_refs[i], domid); gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map | GNTMAP_readonly, -1); } @@ -295,7 +303,7 @@ struct page** hyper_dmabuf_get_data_refs(grant_ref_t top_level_ref, int domid, i for (i = 0; i < n_level2_refs; i++) { if (map_ops[i].status) { printk("\nxen: dom0: HYPERVISOR map grant ref failed status = %d", - map_ops[i].status); + map_ops[i].status); return NULL; } else { unmap_ops[i].handle = map_ops[i].handle; @@ -331,7 +339,9 @@ grant_ref_t hyper_dmabuf_create_gref_table(struct page **pages, int rdomain, int /* share data pages in rw mode*/ for (i=0; ishared_pages_info; grant_ref_t *ref = shared_pages_info->top_level_page; - int n_2nd_level_pages = (sgt_info->sgt->nents/REFS_PER_PAGE + ((sgt_info->sgt->nents % REFS_PER_PAGE) ? 1: 0));/* rounding */ + int n_2nd_level_pages = (sgt_info->active_sgts->sgt->nents/REFS_PER_PAGE + + ((sgt_info->active_sgts->sgt->nents % REFS_PER_PAGE) ? 1: 0)); if (shared_pages_info->data_refs == NULL || @@ -384,7 +395,7 @@ int hyper_dmabuf_cleanup_gref_table(struct hyper_dmabuf_sgt_info *sgt_info) { free_pages((unsigned long)shared_pages_info->top_level_page, 1); /* End foreign access for data pages, but do not free them */ - for (i = 0; i < sgt_info->sgt->nents; i++) { + for (i = 0; i < sgt_info->active_sgts->sgt->nents; i++) { if (gnttab_query_foreign_access(shared_pages_info->data_refs[i])) { printk("refid not shared !!\n"); } @@ -404,12 +415,14 @@ int hyper_dmabuf_cleanup_gref_table(struct hyper_dmabuf_sgt_info *sgt_info) { int hyper_dmabuf_cleanup_imported_pages(struct hyper_dmabuf_imported_sgt_info *sgt_info) { struct hyper_dmabuf_shared_pages_info *shared_pages_info = &sgt_info->shared_pages_info; - if(shared_pages_info->unmap_ops == NULL || shared_pages_info->data_pages == NULL) { + if(shared_pages_info->unmap_ops == NULL || + shared_pages_info->data_pages == NULL) { printk("Imported pages already cleaned up or buffer was not imported yet\n"); return 0; } - if (gnttab_unmap_refs(shared_pages_info->unmap_ops, NULL, shared_pages_info->data_pages, sgt_info->nents) ) { + if (gnttab_unmap_refs(shared_pages_info->unmap_ops, NULL, + shared_pages_info->data_pages, sgt_info->nents) ) { printk("Cannot unmap data pages\n"); return -EINVAL; } @@ -424,7 +437,8 @@ int hyper_dmabuf_cleanup_imported_pages(struct hyper_dmabuf_imported_sgt_info *s } /* map and construct sg_lists from reference numbers */ -struct sg_table* hyper_dmabuf_map_pages(grant_ref_t top_level_gref, int frst_ofst, int last_len, int nents, int sdomain, +struct sg_table* hyper_dmabuf_map_pages(grant_ref_t top_level_gref, int frst_ofst, + int last_len, int nents, int sdomain, struct hyper_dmabuf_shared_pages_info *shared_pages_info) { struct sg_table *st; @@ -451,13 +465,16 @@ struct sg_table* hyper_dmabuf_map_pages(grant_ref_t top_level_gref, int frst_ofs return NULL; } - ops = (struct gnttab_map_grant_ref *)kcalloc(nents, sizeof(struct gnttab_map_grant_ref), GFP_KERNEL); - unmap_ops = (struct gnttab_unmap_grant_ref *)kcalloc(nents, sizeof(struct gnttab_unmap_grant_ref), GFP_KERNEL); + ops = kcalloc(nents, sizeof(struct gnttab_map_grant_ref), + GFP_KERNEL); + unmap_ops = kcalloc(nents, sizeof(struct gnttab_unmap_grant_ref), + GFP_KERNEL); for (i=0; iunmap_ops, NULL, refid_pages, n_level2_refs) ) { + if (gnttab_unmap_refs(shared_pages_info->unmap_ops, NULL, refid_pages, + n_level2_refs) ) { printk("Cannot unmap 2nd level refs\n"); return NULL; } @@ -507,10 +525,8 @@ inline int hyper_dmabuf_sync_request_and_wait(int id, int ops) hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, &operands[0]); - /* send request */ - ret = hyper_dmabuf_send_request(id, req); - - /* TODO: wait until it gets response.. or can we just move on? */ + /* send request and wait for a response */ + ret = hyper_dmabuf_send_request(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(id), req, true); kfree(req); @@ -528,14 +544,14 @@ static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, struct device* dev, sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), - HYPER_DMABUF_OPS_ATTACH); + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, + HYPER_DMABUF_OPS_ATTACH); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); + return ret; } - /* Ignoring ret for now */ return 0; } @@ -549,8 +565,8 @@ static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, struct dma_buf_attac sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), - HYPER_DMABUF_OPS_DETACH); + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, + HYPER_DMABUF_OPS_DETACH); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); @@ -583,7 +599,7 @@ static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachme goto err_free_sg; } - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_MAP); if (ret < 0) { @@ -615,7 +631,7 @@ static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment, sg_free_table(sg); kfree(sg); - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_UNMAP); if (ret < 0) { @@ -633,7 +649,7 @@ static void hyper_dmabuf_ops_release(struct dma_buf *dmabuf) sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_RELEASE); if (ret < 0) { @@ -651,7 +667,7 @@ static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, enum dma_da sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); @@ -670,7 +686,7 @@ static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, enum dma_data sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_END_CPU_ACCESS); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); @@ -689,7 +705,7 @@ static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, unsigned long sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_KMAP_ATOMIC); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); @@ -708,7 +724,7 @@ static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, unsigned long sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_KUNMAP_ATOMIC); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); @@ -725,7 +741,7 @@ static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum) sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_KMAP); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); @@ -744,7 +760,7 @@ static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum, sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_KUNMAP); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); @@ -761,7 +777,7 @@ static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, struct vm_area_struct * sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_MMAP); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); @@ -780,7 +796,7 @@ static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf) sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_VMAP); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); @@ -799,7 +815,7 @@ static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr) sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv; - ret = hyper_dmabuf_sync_request_and_wait(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(sgt_info->hyper_dmabuf_id), + ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id, HYPER_DMABUF_OPS_VUNMAP); if (ret < 0) { printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__); diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c index 44a153b..bace8b2 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c @@ -6,6 +6,7 @@ #include #include #include +#include #include "hyper_dmabuf_struct.h" #include "hyper_dmabuf_imp.h" #include "hyper_dmabuf_list.h" @@ -121,7 +122,9 @@ static int hyper_dmabuf_export_remote(void *data) return -1; } - /* Clear ret, as that will cause whole ioctl to return failure to userspace, which is not true */ + /* Clear ret, as that will cause whole ioctl to return failure + * to userspace, which is not true + */ ret = 0; sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); @@ -131,10 +134,26 @@ static int hyper_dmabuf_export_remote(void *data) sgt_info->hyper_dmabuf_id = hyper_dmabuf_id_gen(); /* TODO: We might need to consider using port number on event channel? */ sgt_info->hyper_dmabuf_rdomain = export_remote_attr->remote_domain; - sgt_info->sgt = sgt; - sgt_info->attachment = attachment; sgt_info->dma_buf = dma_buf; + sgt_info->active_sgts = kcalloc(1, sizeof(struct sgt_list), GFP_KERNEL); + sgt_info->active_attached = kcalloc(1, sizeof(struct attachment_list), GFP_KERNEL); + sgt_info->va_kmapped = kcalloc(1, sizeof(struct kmap_vaddr_list), GFP_KERNEL); + sgt_info->va_vmapped = kcalloc(1, sizeof(struct vmap_vaddr_list), GFP_KERNEL); + + sgt_info->active_sgts->sgt = sgt; + sgt_info->active_attached->attach = attachment; + sgt_info->va_kmapped->vaddr = NULL; /* first vaddr is NULL */ + sgt_info->va_vmapped->vaddr = NULL; /* first vaddr is NULL */ + + /* initialize list of sgt, attachment and vaddr for dmabuf sync + * via shadow dma-buf + */ + INIT_LIST_HEAD(&sgt_info->active_sgts->list); + INIT_LIST_HEAD(&sgt_info->active_attached->list); + INIT_LIST_HEAD(&sgt_info->va_kmapped->list); + INIT_LIST_HEAD(&sgt_info->va_vmapped->list); + page_info = hyper_dmabuf_ext_pgs(sgt); if (page_info == NULL) goto fail_export; @@ -155,7 +174,7 @@ static int hyper_dmabuf_export_remote(void *data) operands[2] = page_info->frst_ofst; operands[3] = page_info->last_len; operands[4] = hyper_dmabuf_create_gref_table(page_info->pages, export_remote_attr->remote_domain, - page_info->nents, &sgt_info->shared_pages_info); + page_info->nents, &sgt_info->shared_pages_info); /* driver/application specific private info, max 32 bytes */ operands[5] = export_remote_attr->private[0]; operands[6] = export_remote_attr->private[1]; @@ -166,7 +185,7 @@ static int hyper_dmabuf_export_remote(void *data) /* composing a message to the importer */ hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT, &operands[0]); - if(hyper_dmabuf_send_request(export_remote_attr->remote_domain, req)) + if(hyper_dmabuf_send_request(export_remote_attr->remote_domain, req, false)) goto fail_send_request; /* free msg */ @@ -181,10 +200,17 @@ static int hyper_dmabuf_export_remote(void *data) hyper_dmabuf_remove_exported(sgt_info->hyper_dmabuf_id); fail_export: - dma_buf_unmap_attachment(sgt_info->attachment, sgt_info->sgt, DMA_BIDIRECTIONAL); - dma_buf_detach(sgt_info->dma_buf, sgt_info->attachment); + dma_buf_unmap_attachment(sgt_info->active_attached->attach, + sgt_info->active_sgts->sgt, + DMA_BIDIRECTIONAL); + dma_buf_detach(sgt_info->dma_buf, sgt_info->active_attached->attach); dma_buf_put(sgt_info->dma_buf); + kfree(sgt_info->active_attached); + kfree(sgt_info->active_sgts); + kfree(sgt_info->va_kmapped); + kfree(sgt_info->va_vmapped); + return -EINVAL; } @@ -233,7 +259,8 @@ static int hyper_dmabuf_export_fd_ioctl(void *data) } /* removing dmabuf from the database and send int req to the source domain -* to unmap it. */ + * to unmap it. + */ static int hyper_dmabuf_destroy(void *data) { struct ioctl_hyper_dmabuf_destroy *destroy_attr; @@ -250,7 +277,9 @@ static int hyper_dmabuf_destroy(void *data) /* find dmabuf in export list */ sgt_info = hyper_dmabuf_find_exported(destroy_attr->hyper_dmabuf_id); - if (sgt_info == NULL) { /* failed to find corresponding entry in export list */ + + /* failed to find corresponding entry in export list */ + if (sgt_info == NULL) { destroy_attr->status = -EINVAL; return -EFAULT; } @@ -260,8 +289,9 @@ static int hyper_dmabuf_destroy(void *data) hyper_dmabuf_create_request(req, HYPER_DMABUF_DESTROY, &destroy_attr->hyper_dmabuf_id); /* now send destroy request to remote domain - * currently assuming there's only one importer exist */ - ret = hyper_dmabuf_send_request(sgt_info->hyper_dmabuf_rdomain, req); + * currently assuming there's only one importer exist + */ + ret = hyper_dmabuf_send_request(sgt_info->hyper_dmabuf_rdomain, req, true); if (ret < 0) { kfree(req); return -EFAULT; diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c index ad2109c..2b3ef6b 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c @@ -33,7 +33,7 @@ int hyper_dmabuf_register_exported(struct hyper_dmabuf_sgt_info *info) info_entry->info = info; hash_add(hyper_dmabuf_hash_exported, &info_entry->node, - info_entry->info->hyper_dmabuf_id); + info_entry->info->hyper_dmabuf_id); return 0; } @@ -47,7 +47,7 @@ int hyper_dmabuf_register_imported(struct hyper_dmabuf_imported_sgt_info* info) info_entry->info = info; hash_add(hyper_dmabuf_hash_imported, &info_entry->node, - info_entry->info->hyper_dmabuf_id); + info_entry->info->hyper_dmabuf_id); return 0; } @@ -71,8 +71,8 @@ int hyper_dmabuf_find_id(struct dma_buf *dmabuf, int domid) int bkt; hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) - if(info_entry->info->attachment->dmabuf == dmabuf && - info_entry->info->hyper_dmabuf_rdomain == domid) + if(info_entry->info->dma_buf == dmabuf && + info_entry->info->hyper_dmabuf_rdomain == domid) return info_entry->info->hyper_dmabuf_id; return -1; diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c index 8a059c8..2432a4e 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c @@ -7,7 +7,7 @@ #include #include "hyper_dmabuf_drv.h" #include "hyper_dmabuf_imp.h" -//#include "hyper_dmabuf_remote_sync.h" +#include "hyper_dmabuf_remote_sync.h" #include "xen/hyper_dmabuf_xen_comm.h" #include "hyper_dmabuf_msg.h" #include "hyper_dmabuf_list.h" @@ -125,7 +125,9 @@ void cmd_process_work(struct work_struct *work) * operands0 : hyper_dmabuf_id */ - /* TODO: that should be done on workqueue, when received ack from all importers that buffer is no longer used */ + /* TODO: that should be done on workqueue, when received ack from + * all importers that buffer is no longer used + */ sgt_info = hyper_dmabuf_find_exported(req->operands[0]); @@ -133,8 +135,10 @@ void cmd_process_work(struct work_struct *work) hyper_dmabuf_cleanup_gref_table(sgt_info); /* unmap dmabuf */ - dma_buf_unmap_attachment(sgt_info->attachment, sgt_info->sgt, DMA_BIDIRECTIONAL); - dma_buf_detach(sgt_info->dma_buf, sgt_info->attachment); + dma_buf_unmap_attachment(sgt_info->active_attached->attach, + sgt_info->active_sgts->sgt, + DMA_BIDIRECTIONAL); + dma_buf_detach(sgt_info->dma_buf, sgt_info->active_attached->attach); dma_buf_put(sgt_info->dma_buf); /* TODO: Rest of cleanup, sgt cleanup etc */ @@ -147,16 +151,6 @@ void cmd_process_work(struct work_struct *work) /* for dmabuf synchronization */ break; - /* as importer, command to exporter */ - case HYPER_DMABUF_OPS_TO_SOURCE: - /* notifying dmabuf map/unmap to exporter, map will make the driver to do shadow mapping - * or unmapping for synchronization with original exporter (e.g. i915) */ - /* command : DMABUF_OPS_TO_SOURCE. - * operands0 : hyper_dmabuf_id - * operands1 : map(=1)/unmap(=2)/attach(=3)/detach(=4) - */ - break; - default: /* shouldn't get here */ /* no matched command, nothing to do.. just return error */ @@ -172,6 +166,7 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req) struct cmd_process *proc; struct hyper_dmabuf_ring_rq *temp_req; struct hyper_dmabuf_imported_sgt_info *imported_sgt_info; + int ret; if (!req) { printk("request is NULL\n"); @@ -216,7 +211,25 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req) return req->command; } - temp_req = (struct hyper_dmabuf_ring_rq *)kmalloc(sizeof(*temp_req), GFP_KERNEL); + /* dma buf remote synchronization */ + if (req->command == HYPER_DMABUF_OPS_TO_SOURCE) { + /* notifying dmabuf map/unmap to exporter, map will make the driver to do shadow mapping + * or unmapping for synchronization with original exporter (e.g. i915) */ + + /* command : DMABUF_OPS_TO_SOURCE. + * operands0 : hyper_dmabuf_id + * operands1 : enum hyper_dmabuf_ops {....} + */ + ret = hyper_dmabuf_remote_sync(req->operands[0], req->operands[1]); + if (ret) + req->status = HYPER_DMABUF_REQ_ERROR; + else + req->status = HYPER_DMABUF_REQ_PROCESSED; + + return req->command; + } + + temp_req = kmalloc(sizeof(*temp_req), GFP_KERNEL); memcpy(temp_req, req, sizeof(*temp_req)); diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c new file mode 100644 index 0000000..6ba932f --- /dev/null +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c @@ -0,0 +1,189 @@ +#include +#include +#include +#include +#include +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_drv.h" +#include "xen/hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_msg.h" + +extern struct hyper_dmabuf_private hyper_dmabuf_private; + +int hyper_dmabuf_remote_sync(int id, int ops) +{ + struct hyper_dmabuf_sgt_info *sgt_info; + struct sgt_list *sgtl; + struct attachment_list *attachl; + struct kmap_vaddr_list *va_kmapl; + struct vmap_vaddr_list *va_vmapl; + int ret; + + /* find a coresponding SGT for the id */ + sgt_info = hyper_dmabuf_find_exported(id); + + if (!sgt_info) { + printk("dmabuf remote sync::can't find exported list\n"); + return -EINVAL; + } + + switch (ops) { + case HYPER_DMABUF_OPS_ATTACH: + attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL); + + attachl->attach = dma_buf_attach(sgt_info->dma_buf, + hyper_dmabuf_private.device); + + if (!attachl->attach) { + kfree(attachl); + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_ATTACH\n"); + return -EINVAL; + } + + list_add(&attachl->list, &sgt_info->active_attached->list); + break; + + case HYPER_DMABUF_OPS_DETACH: + attachl = list_first_entry(&sgt_info->active_attached->list, + struct attachment_list, list); + + if (!attachl) { + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_DETACH\n"); + return -EINVAL; + } + dma_buf_detach(sgt_info->dma_buf, attachl->attach); + list_del(&attachl->list); + kfree(attachl); + break; + + case HYPER_DMABUF_OPS_MAP: + sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL); + attachl = list_first_entry(&sgt_info->active_attached->list, + struct attachment_list, list); + sgtl->sgt = dma_buf_map_attachment(attachl->attach, DMA_BIDIRECTIONAL); + if (!sgtl->sgt) { + kfree(sgtl); + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n"); + return -EINVAL; + } + list_add(&sgtl->list, &sgt_info->active_sgts->list); + break; + + case HYPER_DMABUF_OPS_UNMAP: + attachl = list_first_entry(&sgt_info->active_attached->list, + struct attachment_list, list); + sgtl = list_first_entry(&sgt_info->active_sgts->list, + struct sgt_list, list); + if (!attachl || !sgtl) { + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_UNMAP\n"); + return -EINVAL; + } + + dma_buf_unmap_attachment(attachl->attach, sgtl->sgt, + DMA_BIDIRECTIONAL); + list_del(&sgtl->list); + kfree(sgtl); + break; + + case HYPER_DMABUF_OPS_RELEASE: + /* remote importer shouldn't release dma_buf because + * exporter will hold handle to the dma_buf as + * far as dma_buf is shared with other domains. + */ + break; + + case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS: + ret = dma_buf_begin_cpu_access(sgt_info->dma_buf, DMA_BIDIRECTIONAL); + if (!ret) { + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n"); + ret = -EINVAL; + } + break; + + case HYPER_DMABUF_OPS_END_CPU_ACCESS: + ret = dma_buf_end_cpu_access(sgt_info->dma_buf, DMA_BIDIRECTIONAL); + if (!ret) { + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_END_CPU_ACCESS\n"); + ret = -EINVAL; + } + break; + + case HYPER_DMABUF_OPS_KMAP_ATOMIC: + case HYPER_DMABUF_OPS_KMAP: + va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL); + + /* dummy kmapping of 1 page */ + if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC) + va_kmapl->vaddr = dma_buf_kmap_atomic(sgt_info->dma_buf, 1); + else + va_kmapl->vaddr = dma_buf_kmap(sgt_info->dma_buf, 1); + + if (!va_kmapl->vaddr) { + kfree(va_kmapl); + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n"); + return -EINVAL; + } + list_add(&va_kmapl->list, &sgt_info->va_kmapped->list); + break; + + case HYPER_DMABUF_OPS_KUNMAP_ATOMIC: + case HYPER_DMABUF_OPS_KUNMAP: + va_kmapl = list_first_entry(&sgt_info->va_kmapped->list, + struct kmap_vaddr_list, list); + if (!va_kmapl || va_kmapl->vaddr == NULL) { + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n"); + return -EINVAL; + } + + /* unmapping 1 page */ + if (ops == HYPER_DMABUF_OPS_KUNMAP_ATOMIC) + dma_buf_kunmap_atomic(sgt_info->dma_buf, 1, va_kmapl->vaddr); + else + dma_buf_kunmap(sgt_info->dma_buf, 1, va_kmapl->vaddr); + + list_del(&va_kmapl->list); + kfree(va_kmapl); + break; + + case HYPER_DMABUF_OPS_MMAP: + /* currently not supported: looking for a way to create + * a dummy vma */ + printk("dmabuf remote sync::sychronized mmap is not supported\n"); + break; + + case HYPER_DMABUF_OPS_VMAP: + va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL); + + /* dummy vmapping */ + va_vmapl->vaddr = dma_buf_vmap(sgt_info->dma_buf); + + if (!va_vmapl->vaddr) { + kfree(va_vmapl); + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VMAP\n"); + return -EINVAL; + } + list_add(&va_vmapl->list, &sgt_info->va_vmapped->list); + break; + + case HYPER_DMABUF_OPS_VUNMAP: + va_vmapl = list_first_entry(&sgt_info->va_vmapped->list, + struct vmap_vaddr_list, list); + if (!va_vmapl || va_vmapl->vaddr == NULL) { + printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VUNMAP\n"); + return -EINVAL; + } + + dma_buf_vunmap(sgt_info->dma_buf, va_vmapl->vaddr); + + list_del(&va_vmapl->list); + kfree(va_vmapl); + break; + + default: + /* program should not get here */ + break; + } + + return 0; +} diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h new file mode 100644 index 0000000..fc85fa8 --- /dev/null +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h @@ -0,0 +1,6 @@ +#ifndef __HYPER_DMABUF_REMOTE_SYNC_H__ +#define __HYPER_DMABUF_REMOTE_SYNC_H__ + +int hyper_dmabuf_remote_sync(int id, int ops); + +#endif // __HYPER_DMABUF_REMOTE_SYNC_H__ diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h index c8a2f4d..bfe80ee 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h @@ -18,6 +18,30 @@ * frame buffer) */ #define MAX_ALLOWED_NUM_PAGES_FOR_GREF_NUM_ARRAYS 4 +/* stack of mapped sgts */ +struct sgt_list { + struct sg_table *sgt; + struct list_head list; +}; + +/* stack of attachments */ +struct attachment_list { + struct dma_buf_attachment *attach; + struct list_head list; +}; + +/* stack of vaddr mapped via kmap */ +struct kmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + +/* stack of vaddr mapped via vmap */ +struct vmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + struct hyper_dmabuf_shared_pages_info { grant_ref_t *data_refs; /* table with shared buffer pages refid */ grant_ref_t *addr_pages; /* pages of 2nd level addressing */ @@ -46,9 +70,13 @@ struct hyper_dmabuf_pages_info { struct hyper_dmabuf_sgt_info { int hyper_dmabuf_id; /* unique id to reference dmabuf in remote domain */ int hyper_dmabuf_rdomain; /* domain importing this sgt */ - struct sg_table *sgt; /* pointer to sgt */ + struct dma_buf *dma_buf; /* needed to store this for freeing it later */ - struct dma_buf_attachment *attachment; /* needed to store this for freeing this later */ + struct sgt_list *active_sgts; + struct attachment_list *active_attached; + struct kmap_vaddr_list *va_kmapped; + struct vmap_vaddr_list *va_vmapped; + struct hyper_dmabuf_shared_pages_info shared_pages_info; int private[4]; /* device specific info (e.g. image's meta info?) */ }; diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c index 5db58b0..576085f 100644 --- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c +++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -15,6 +16,8 @@ static int export_req_id = 0; +struct hyper_dmabuf_ring_rq req_pending = {0}; + /* Creates entry in xen store that will keep details of all exporter rings created by this domain */ int32_t hyper_dmabuf_setup_data_dir() { @@ -114,8 +117,8 @@ int hyper_dmabuf_next_req_id_export(void) } /* For now cache latast rings as global variables TODO: keep them in list*/ -static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, void *dev_id); -static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *dev_id); +static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, void *info); +static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *info); /* * Callback function that will be called on any change of xenbus path being watched. @@ -376,12 +379,13 @@ void hyper_dmabuf_cleanup_ringbufs(void) hyper_dmabuf_foreach_importer_ring(hyper_dmabuf_importer_ringbuf_cleanup); } -int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req) +int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req, int wait) { struct hyper_dmabuf_front_ring *ring; struct hyper_dmabuf_ring_rq *new_req; struct hyper_dmabuf_ring_info_export *ring_info; int notify; + int timeout = 1000; /* find a ring info for the channel */ ring_info = hyper_dmabuf_find_exporter_ring(domain); @@ -401,6 +405,10 @@ int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req) return -EIO; } + /* update req_pending with current request */ + memcpy(&req_pending, req, sizeof(req_pending)); + + /* pass current request to the ring */ memcpy(new_req, req, sizeof(*new_req)); ring->req_prod_pvt++; @@ -410,10 +418,24 @@ int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req) notify_remote_via_irq(ring_info->irq); } + if (wait) { + while (timeout--) { + if (req_pending.status != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + printk("request timed-out\n"); + return -EBUSY; + } + } + return 0; } -/* ISR for request from exporter (as an importer) */ +/* ISR for handling request */ static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *info) { RING_IDX rc, rp; @@ -444,6 +466,9 @@ static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *info) ret = hyper_dmabuf_msg_parse(ring_info->sdomain, &req); if (ret > 0) { + /* preparing a response for the request and send it to + * the requester + */ memcpy(&resp, &req, sizeof(resp)); memcpy(RING_GET_RESPONSE(ring, ring->rsp_prod_pvt), &resp, sizeof(resp)); @@ -465,7 +490,7 @@ static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *info) return IRQ_HANDLED; } -/* ISR for responses from importer */ +/* ISR for handling responses */ static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, void *info) { /* front ring only care about response from back */ @@ -483,10 +508,13 @@ static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, void *info) more_to_do = 0; rp = ring->sring->rsp_prod; for (i = ring->rsp_cons; i != rp; i++) { - unsigned long id; - resp = RING_GET_RESPONSE(ring, i); - id = resp->response_id; + + /* update pending request's status with what is + * in the response + */ + if (req_pending.request_id == resp->response_id) + req_pending.status = resp->status; if (resp->status == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) { /* parsing response */ @@ -496,6 +524,14 @@ static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, void *info) if (ret < 0) { printk("getting error while parsing response\n"); } + } else if (resp->status == HYPER_DMABUF_REQ_PROCESSED) { + /* for debugging dma_buf remote synchronization */ + printk("original request = 0x%x\n", resp->command); + printk("Just got HYPER_DMABUF_REQ_PROCESSED\n"); + } else if (resp->status == HYPER_DMABUF_REQ_ERROR) { + /* for debugging dma_buf remote synchronization */ + printk("original request = 0x%x\n", resp->command); + printk("Just got HYPER_DMABUF_REQ_ERROR\n"); } } diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h index a4819ca..4ab031a 100644 --- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h +++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h @@ -61,7 +61,7 @@ void hyper_dmabuf_importer_ringbuf_cleanup(int sdomain); void hyper_dmabuf_cleanup_ringbufs(void); /* send request to the remote domain */ -int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req); +int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req, int wait); /* called by interrupt (WORKQUEUE) */ int hyper_dmabuf_send_response(struct hyper_dmabuf_ring_rp* response, int domain); -- 2.7.4