Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753163AbdLSTu7 (ORCPT ); Tue, 19 Dec 2017 14:50:59 -0500 Received: from mga01.intel.com ([192.55.52.88]:17630 "EHLO mga01.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752840AbdLSTgc (ORCPT ); Tue, 19 Dec 2017 14:36:32 -0500 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.45,428,1508828400"; d="scan'208";a="4018495" From: Dongwon Kim To: linux-kernel@vger.kernel.org Cc: dri-devel@lists.freedesktop.org, xen-devel@lists.xenproject.org, mateuszx.potrola@intel.com, dongwon.kim@intel.com Subject: [RFC PATCH 08/60] hyper_dmabuf: automatic comm channel initialization using xenstore Date: Tue, 19 Dec 2017 11:29:24 -0800 Message-Id: <1513711816-2618-8-git-send-email-dongwon.kim@intel.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1513711816-2618-1-git-send-email-dongwon.kim@intel.com> References: <1513711816-2618-1-git-send-email-dongwon.kim@intel.com> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 24194 Lines: 681 From: Mateusz Polrola This introduces use of xenstore for creating and managing communication channels between two VMs in the system. When hyper_dmabuf driver is loaded in the service VM (host OS), a new xenstore directory, "/local/domain//data/hyper_dmabuf" is created in xenstore filesystem. Whenever a new guest OS creates and initailizes its own upstream channel the service VM, new directory is created under the main directory created above as shown here: /local/domain//data/hyper_dmabuf//port /local/domain//data/hyper_dmabuf//gref This patch also adds a "xenstore watch" callback is called when a new upstream connection is made from another VM (VM-b). Upon detection, this VM (VM-a) intializes a downstream channel ,paired with detected upstream connection as shown below. VM-a (downstream) <----- (upstream) VM-a And as soon as this downstream channel is created, a new upstream channel from VM-a to VM-b is automatically created and initialized via "xenstore watch" call back on VM-b. VM-a (upstream) <----- (downstream) VM-b As a result, there will be bi-directional communication channel available between two VMs. When upstream channel is removed (e.g. unloading driver), VM on the other side is notified and "xenstore watch" callback is invoked. Via this callback, VM can remove corresponding downstream channel. Signed-off-by: Dongwon Kim --- drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c | 11 +- drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h | 14 -- drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 30 +-- drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c | 31 +-- drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h | 2 - .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c | 226 +++++++++++++++++++-- .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h | 18 +- .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c | 22 ++ .../hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h | 6 + 9 files changed, 270 insertions(+), 90 deletions(-) diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c index 70b4878..5b5dae44 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c @@ -6,6 +6,7 @@ #include "hyper_dmabuf_conf.h" #include "hyper_dmabuf_list.h" #include "xen/hyper_dmabuf_xen_comm_list.h" +#include "xen/hyper_dmabuf_xen_comm.h" MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("IOTG-PED, INTEL"); @@ -43,6 +44,11 @@ static int hyper_dmabuf_drv_init(void) return -EINVAL; } + ret = hyper_dmabuf_setup_data_dir(); + if (ret < 0) { + return -EINVAL; + } + /* interrupt for comm should be registered here: */ return ret; } @@ -52,12 +58,15 @@ static void hyper_dmabuf_drv_exit(void) { /* hash tables for export/import entries and ring_infos */ hyper_dmabuf_table_destroy(); - hyper_dmabuf_ring_table_init(); + + hyper_dmabuf_cleanup_ringbufs(); + hyper_dmabuf_ring_table_destroy(); /* destroy workqueue */ if (hyper_dmabuf_private.work_queue) destroy_workqueue(hyper_dmabuf_private.work_queue); + hyper_dmabuf_destroy_data_dir(); printk( KERN_NOTICE "dma_buf-src_sink model: Exiting" ); unregister_device(); } diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h index 6145d29..7511afb 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h @@ -29,8 +29,6 @@ struct ioctl_hyper_dmabuf_exporter_ring_setup { /* IN parameters */ /* Remote domain id */ uint32_t remote_domain; - grant_ref_t ring_refid; /* assigned by driver, copied to userspace after initialization */ - uint32_t port; /* assigned by driver, copied to userspace after initialization */ }; #define IOCTL_HYPER_DMABUF_IMPORTER_RING_SETUP \ @@ -39,10 +37,6 @@ struct ioctl_hyper_dmabuf_importer_ring_setup { /* IN parameters */ /* Source domain id */ uint32_t source_domain; - /* Ring shared page refid */ - grant_ref_t ring_refid; - /* Port number */ - uint32_t port; }; #define IOCTL_HYPER_DMABUF_EXPORT_REMOTE \ @@ -95,12 +89,4 @@ struct ioctl_hyper_dmabuf_query { uint32_t info; }; -#define IOCTL_HYPER_DMABUF_REMOTE_EXPORTER_RING_SETUP \ -_IOC(_IOC_NONE, 'G', 6, sizeof(struct ioctl_hyper_dmabuf_remote_exporter_ring_setup)) -struct ioctl_hyper_dmabuf_remote_exporter_ring_setup { - /* in parameters */ - uint32_t rdomain; /* id of remote domain where exporter's ring need to be setup */ - uint32_t info; -}; - #endif //__LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c index e4d8316..44a153b 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c @@ -48,9 +48,7 @@ static int hyper_dmabuf_exporter_ring_setup(void *data) return 0; } - ret = hyper_dmabuf_exporter_ringbuf_init(ring_attr->remote_domain, - &ring_attr->ring_refid, - &ring_attr->port); + ret = hyper_dmabuf_exporter_ringbuf_init(ring_attr->remote_domain); return ret; } @@ -76,10 +74,7 @@ static int hyper_dmabuf_importer_ring_setup(void *data) return 0; } - /* user need to provide a port number and ref # for the page used as ring buffer */ - ret = hyper_dmabuf_importer_ringbuf_init(setup_imp_ring_attr->source_domain, - setup_imp_ring_attr->ring_refid, - setup_imp_ring_attr->port); + ret = hyper_dmabuf_importer_ringbuf_init(setup_imp_ring_attr->source_domain); return ret; } @@ -355,26 +350,6 @@ static int hyper_dmabuf_query(void *data) return ret; } -static int hyper_dmabuf_remote_exporter_ring_setup(void *data) -{ - struct ioctl_hyper_dmabuf_remote_exporter_ring_setup *remote_exporter_ring_setup; - struct hyper_dmabuf_ring_rq *req; - - remote_exporter_ring_setup = (struct ioctl_hyper_dmabuf_remote_exporter_ring_setup *)data; - - req = kcalloc(1, sizeof(*req), GFP_KERNEL); - hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORTER_RING_SETUP, NULL); - - /* requesting remote domain to set-up exporter's ring */ - if(hyper_dmabuf_send_request(remote_exporter_ring_setup->rdomain, req) < 0) { - kfree(req); - return -EINVAL; - } - - kfree(req); - return 0; -} - static const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = { HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORTER_RING_SETUP, hyper_dmabuf_exporter_ring_setup, 0), HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_IMPORTER_RING_SETUP, hyper_dmabuf_importer_ring_setup, 0), @@ -382,7 +357,6 @@ static const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = { HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD, hyper_dmabuf_export_fd_ioctl, 0), HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_DESTROY, hyper_dmabuf_destroy, 0), HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY, hyper_dmabuf_query, 0), - HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_REMOTE_EXPORTER_RING_SETUP, hyper_dmabuf_remote_exporter_ring_setup, 0), }; static long hyper_dmabuf_ioctl(struct file *filp, diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c index 0166e61..8a059c8 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c @@ -70,12 +70,6 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq *request, request->operands[i] = operands[i]; break; - /* requesting the other side to setup another ring channel for reverse direction */ - case HYPER_DMABUF_EXPORTER_RING_SETUP: - /* command : HYPER_DMABUF_EXPORTER_RING_SETUP */ - /* no operands needed */ - break; - default: /* no command found */ return; @@ -163,13 +157,6 @@ void cmd_process_work(struct work_struct *work) */ break; - case HYPER_DMABUF_IMPORTER_RING_SETUP: - /* command: HYPER_DMABUF_IMPORTER_RING_SETUP */ - /* no operands needed */ - hyper_dmabuf_importer_ringbuf_init(domid, req->operands[0], req->operands[1]); - - break; - default: /* shouldn't get here */ /* no matched command, nothing to do.. just return error */ @@ -185,7 +172,6 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req) struct cmd_process *proc; struct hyper_dmabuf_ring_rq *temp_req; struct hyper_dmabuf_imported_sgt_info *imported_sgt_info; - int ret; if (!req) { printk("request is NULL\n"); @@ -193,28 +179,13 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req) } if ((req->command < HYPER_DMABUF_EXPORT) || - (req->command > HYPER_DMABUF_IMPORTER_RING_SETUP)) { + (req->command > HYPER_DMABUF_OPS_TO_SOURCE)) { printk("invalid command\n"); return -EINVAL; } req->status = HYPER_DMABUF_REQ_PROCESSED; - /* HYPER_DMABUF_EXPORTER_RING_SETUP requires immediate - * follow up so can't be processed in workqueue - */ - if (req->command == HYPER_DMABUF_EXPORTER_RING_SETUP) { - ret = hyper_dmabuf_exporter_ringbuf_init(domid, &req->operands[0], &req->operands[1]); - if (ret < 0) { - req->status = HYPER_DMABUF_REQ_ERROR; - } - - req->status = HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP; - req->command = HYPER_DMABUF_IMPORTER_RING_SETUP; - - return req->command; - } - /* HYPER_DMABUF_DESTROY requires immediate * follow up so can't be processed in workqueue */ diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h index 44bfb70..9b25bdb 100644 --- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h +++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h @@ -7,8 +7,6 @@ enum hyper_dmabuf_command { HYPER_DMABUF_DESTROY_FINISH, HYPER_DMABUF_OPS_TO_REMOTE, HYPER_DMABUF_OPS_TO_SOURCE, - HYPER_DMABUF_EXPORTER_RING_SETUP, /* requesting remote domain to set up exporter's ring */ - HYPER_DMABUF_IMPORTER_RING_SETUP, /* requesting remote domain to set up importer's ring */ }; enum hyper_dmabuf_ops { diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c index 05855ba1..5db58b0 100644 --- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c +++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c @@ -15,6 +15,83 @@ static int export_req_id = 0; +/* Creates entry in xen store that will keep details of all exporter rings created by this domain */ +int32_t hyper_dmabuf_setup_data_dir() +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", hyper_dmabuf_get_domid()); + return xenbus_mkdir(XBT_NIL, buf, ""); +} + + +/* Removes entry from xenstore with exporter ring details. + * Other domains that has connected to any of exporter rings created by this domain, + * will be notified about removal of this entry and will treat that as signal to + * cleanup importer rings created for this domain + */ +int32_t hyper_dmabuf_destroy_data_dir() +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", hyper_dmabuf_get_domid()); + return xenbus_rm(XBT_NIL, buf, ""); +} + +/* + * Adds xenstore entries with details of exporter ring created for given remote domain. + * It requires special daemon running in dom0 to make sure that given remote domain will + * have right permissions to access that data. + */ +static int32_t hyper_dmabuf_expose_ring_details(uint32_t domid, uint32_t rdomid, uint32_t grefid, uint32_t port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", domid, rdomid); + ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", grefid); + + if (ret) { + printk("Failed to write xenbus entry %s: %d\n", buf, ret); + return ret; + } + + ret = xenbus_printf(XBT_NIL, buf, "port", "%d", port); + + if (ret) { + printk("Failed to write xenbus entry %s: %d\n", buf, ret); + return ret; + } + + return 0; +} + +/* + * Queries details of ring exposed by remote domain. + */ +static int32_t hyper_dmabuf_get_ring_details(uint32_t domid, uint32_t rdomid, uint32_t *grefid, uint32_t *port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", rdomid, domid); + ret = xenbus_scanf(XBT_NIL, buf, "grefid", "%d", grefid); + + if (ret <= 0) { + printk("Failed to read xenbus entry %s: %d\n", buf, ret); + return ret; + } + + ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", port); + + if (ret <= 0) { + printk("Failed to read xenbus entry %s: %d\n", buf, ret); + return ret; + } + + return (ret <= 0 ? 1 : 0); +} + int32_t hyper_dmabuf_get_domid(void) { struct xenbus_transaction xbt; @@ -40,8 +117,49 @@ int hyper_dmabuf_next_req_id_export(void) static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, void *dev_id); static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *dev_id); +/* + * Callback function that will be called on any change of xenbus path being watched. + * Used for detecting creation/destruction of remote domain exporter ring. + * When remote domain's exporter ring will be detected, importer ring on this domain will be created. + * When remote domain's exporter ring destruction will be detected it will celanup this domain importer ring. + * Destruction can be caused by unloading module by remote domain or it's crash/force shutdown. + */ +static void remote_domain_exporter_watch_cb(struct xenbus_watch *watch, + const char *path, const char *token) +{ + int rdom,ret; + uint32_t grefid, port; + struct hyper_dmabuf_ring_info_import *ring_info; + + /* Check which domain has changed its exporter rings */ + ret = sscanf(watch->node, "/local/domain/%d/", &rdom); + if (ret <= 0) { + return; + } + + /* Check if we have importer ring for given remote domain alrady created */ + ring_info = hyper_dmabuf_find_importer_ring(rdom); + + /* + * Try to query remote domain exporter ring details - if that will fail and we have + * importer ring that means remote domains has cleanup its exporter ring, so our + * importer ring is no longer useful. + * If querying details will succeed and we don't have importer ring, it means that + * remote domain has setup it for us and we should connect to it. + */ + ret = hyper_dmabuf_get_ring_details(hyper_dmabuf_get_domid(), rdom, &grefid, &port); + + if (ring_info && ret != 0) { + printk("Remote exporter closed, cleaninup importer\n"); + hyper_dmabuf_importer_ringbuf_cleanup(rdom); + } else if (!ring_info && ret == 0) { + printk("Registering importer\n"); + hyper_dmabuf_importer_ringbuf_init(rdom); + } +} + /* exporter needs to generated info for page sharing */ -int hyper_dmabuf_exporter_ringbuf_init(int rdomain, grant_ref_t *refid, int *port) +int hyper_dmabuf_exporter_ringbuf_init(int rdomain) { struct hyper_dmabuf_ring_info_export *ring_info; struct hyper_dmabuf_sring *sring; @@ -99,24 +217,58 @@ int hyper_dmabuf_exporter_ringbuf_init(int rdomain, grant_ref_t *refid, int *por ring_info->irq = ret; ring_info->port = alloc_unbound.port; - /* store refid and port numbers for userspace's use */ - *refid = ring_info->gref_ring; - *port = ring_info->port; - printk("%s: allocated eventchannel gref %d port: %d irq: %d\n", __func__, ring_info->gref_ring, ring_info->port, ring_info->irq); - /* register ring info */ ret = hyper_dmabuf_register_exporter_ring(ring_info); + ret = hyper_dmabuf_expose_ring_details(hyper_dmabuf_get_domid(), rdomain, + ring_info->gref_ring, ring_info->port); + + /* + * Register watch for remote domain exporter ring. + * When remote domain will setup its exporter ring, we will automatically connect our importer ring to it. + */ + ring_info->watch.callback = remote_domain_exporter_watch_cb; + ring_info->watch.node = (const char*) kmalloc(sizeof(char) * 255, GFP_KERNEL); + sprintf((char*)ring_info->watch.node, "/local/domain/%d/data/hyper_dmabuf/%d/port", rdomain, hyper_dmabuf_get_domid()); + register_xenbus_watch(&ring_info->watch); + return ret; } +/* cleans up exporter ring created for given remote domain */ +void hyper_dmabuf_exporter_ringbuf_cleanup(int rdomain) +{ + struct hyper_dmabuf_ring_info_export *ring_info; + + /* check if we at all have exporter ring for given rdomain */ + ring_info = hyper_dmabuf_find_exporter_ring(rdomain); + + if (!ring_info) { + return; + } + + hyper_dmabuf_remove_exporter_ring(rdomain); + + unregister_xenbus_watch(&ring_info->watch); + kfree(ring_info->watch.node); + + /* No need to close communication channel, will be done by this function */ + unbind_from_irqhandler(ring_info->irq, (void*) ring_info); + + /* No need to free sring page, will be freed by this function when other side will end its access */ + gnttab_end_foreign_access(ring_info->gref_ring, 0, + (unsigned long) ring_info->ring_front.sring); + + kfree(ring_info); +} + /* importer needs to know about shared page and port numbers for ring buffer and event channel */ -int hyper_dmabuf_importer_ringbuf_init(int sdomain, grant_ref_t gref, int port) +int hyper_dmabuf_importer_ringbuf_init(int sdomain) { struct hyper_dmabuf_ring_info_import *ring_info; struct hyper_dmabuf_sring *sring; @@ -124,24 +276,33 @@ int hyper_dmabuf_importer_ringbuf_init(int sdomain, grant_ref_t gref, int port) struct page *shared_ring; struct gnttab_map_grant_ref *ops; - struct gnttab_unmap_grant_ref *unmap_ops; int ret; + int importer_gref, importer_port; + + ret = hyper_dmabuf_get_ring_details(hyper_dmabuf_get_domid(), sdomain, + &importer_gref, &importer_port); + + if (ret) { + printk("Domain %d has not created exporter ring for current domain\n", sdomain); + return ret; + } ring_info = (struct hyper_dmabuf_ring_info_import *) kmalloc(sizeof(*ring_info), GFP_KERNEL); ring_info->sdomain = sdomain; - ring_info->evtchn = port; + ring_info->evtchn = importer_port; ops = (struct gnttab_map_grant_ref*)kmalloc(sizeof(*ops), GFP_KERNEL); - unmap_ops = (struct gnttab_unmap_grant_ref*)kmalloc(sizeof(*unmap_ops), GFP_KERNEL); if (gnttab_alloc_pages(1, &shared_ring)) { return -EINVAL; } gnttab_set_map_op(&ops[0], (unsigned long)pfn_to_kaddr(page_to_pfn(shared_ring)), - GNTMAP_host_map, gref, sdomain); + GNTMAP_host_map, importer_gref, sdomain); + gnttab_set_unmap_op(&ring_info->unmap_op, (unsigned long)pfn_to_kaddr(page_to_pfn(shared_ring)), + GNTMAP_host_map, -1); ret = gnttab_map_refs(ops, NULL, &shared_ring, 1); if (ret < 0) { @@ -152,13 +313,15 @@ int hyper_dmabuf_importer_ringbuf_init(int sdomain, grant_ref_t gref, int port) if (ops[0].status) { printk("Ring mapping failed\n"); return -EINVAL; + } else { + ring_info->unmap_op.handle = ops[0].handle; } sring = (struct hyper_dmabuf_sring*) pfn_to_kaddr(page_to_pfn(shared_ring)); BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE); - ret = bind_interdomain_evtchn_to_irqhandler(sdomain, port, + ret = bind_interdomain_evtchn_to_irqhandler(sdomain, importer_port, hyper_dmabuf_back_ring_isr, 0, NULL, (void*)ring_info); if (ret < 0) { @@ -168,14 +331,51 @@ int hyper_dmabuf_importer_ringbuf_init(int sdomain, grant_ref_t gref, int port) ring_info->irq = ret; printk("%s: bound to eventchannel port: %d irq: %d\n", __func__, - port, + importer_port, ring_info->irq); ret = hyper_dmabuf_register_importer_ring(ring_info); + /* Setup communcation channel in opposite direction */ + if (!hyper_dmabuf_find_exporter_ring(sdomain)) { + ret = hyper_dmabuf_exporter_ringbuf_init(sdomain); + } + return ret; } +/* clenas up importer ring create for given source domain */ +void hyper_dmabuf_importer_ringbuf_cleanup(int sdomain) +{ + struct hyper_dmabuf_ring_info_import *ring_info; + struct page *shared_ring; + + /* check if we have importer ring created for given sdomain */ + ring_info = hyper_dmabuf_find_importer_ring(sdomain); + + if (!ring_info) + return; + + hyper_dmabuf_remove_importer_ring(sdomain); + + /* no need to close event channel, will be done by that function */ + unbind_from_irqhandler(ring_info->irq, (void*) ring_info); + + /* unmapping shared ring page */ + shared_ring = virt_to_page(ring_info->ring_back.sring); + gnttab_unmap_refs(&ring_info->unmap_op, NULL, &shared_ring, 1); + gnttab_free_pages(1, &shared_ring); + + kfree(ring_info); +} + +/* cleans up all exporter/importer rings */ +void hyper_dmabuf_cleanup_ringbufs(void) +{ + hyper_dmabuf_foreach_exporter_ring(hyper_dmabuf_exporter_ringbuf_cleanup); + hyper_dmabuf_foreach_importer_ring(hyper_dmabuf_importer_ringbuf_cleanup); +} + int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req) { struct hyper_dmabuf_front_ring *ring; diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h index 4ad0529..a4819ca 100644 --- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h +++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h @@ -2,6 +2,7 @@ #define __HYPER_DMABUF_XEN_COMM_H__ #include "xen/interface/io/ring.h" +#include "xen/xenbus.h" #define MAX_NUMBER_OF_OPERANDS 9 @@ -27,6 +28,7 @@ struct hyper_dmabuf_ring_info_export { int gref_ring; int irq; int port; + struct xenbus_watch watch; }; struct hyper_dmabuf_ring_info_import { @@ -34,17 +36,29 @@ struct hyper_dmabuf_ring_info_import { int irq; int evtchn; struct hyper_dmabuf_back_ring ring_back; + struct gnttab_unmap_grant_ref unmap_op; }; int32_t hyper_dmabuf_get_domid(void); +int32_t hyper_dmabuf_setup_data_dir(void); +int32_t hyper_dmabuf_destroy_data_dir(void); int hyper_dmabuf_next_req_id_export(void); /* exporter needs to generated info for page sharing */ -int hyper_dmabuf_exporter_ringbuf_init(int rdomain, grant_ref_t *gref, int *port); +int hyper_dmabuf_exporter_ringbuf_init(int rdomain); /* importer needs to know about shared page and port numbers for ring buffer and event channel */ -int hyper_dmabuf_importer_ringbuf_init(int sdomain, grant_ref_t gref, int port); +int hyper_dmabuf_importer_ringbuf_init(int sdomain); + +/* cleans up exporter ring created for given domain */ +void hyper_dmabuf_exporter_ringbuf_cleanup(int rdomain); + +/* cleans up importer ring created for given domain */ +void hyper_dmabuf_importer_ringbuf_cleanup(int sdomain); + +/* cleans up all exporter/importer rings */ +void hyper_dmabuf_cleanup_ringbufs(void); /* send request to the remote domain */ int hyper_dmabuf_send_request(int domain, struct hyper_dmabuf_ring_rq *req); diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c index 15c9d29..5778468 100644 --- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c +++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c @@ -104,3 +104,25 @@ int hyper_dmabuf_remove_importer_ring(int domid) return -1; } + +void hyper_dmabuf_foreach_exporter_ring(void (*func)(int rdom)) +{ + struct hyper_dmabuf_exporter_ring_info *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(hyper_dmabuf_hash_exporter_ring, bkt, tmp, info_entry, node) { + func(info_entry->info->rdomain); + } +} + +void hyper_dmabuf_foreach_importer_ring(void (*func)(int sdom)) +{ + struct hyper_dmabuf_importer_ring_info *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(hyper_dmabuf_hash_importer_ring, bkt, tmp, info_entry, node) { + func(info_entry->info->sdomain); + } +} diff --git a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h index 5929f99..fd1958c 100644 --- a/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h +++ b/drivers/xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h @@ -32,4 +32,10 @@ int hyper_dmabuf_remove_exporter_ring(int domid); int hyper_dmabuf_remove_importer_ring(int domid); +/* iterates over all exporter rings and calls provided function for each of them */ +void hyper_dmabuf_foreach_exporter_ring(void (*func)(int rdom)); + +/* iterates over all importer rings and calls provided function for each of them */ +void hyper_dmabuf_foreach_importer_ring(void (*func)(int sdom)); + #endif // __HYPER_DMABUF_XEN_COMM_LIST_H__ -- 2.7.4