Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754750AbdCFUN0 (ORCPT ); Mon, 6 Mar 2017 15:13:26 -0500 Received: from mail.kernel.org ([198.145.29.136]:51912 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754577AbdCFULS (ORCPT ); Mon, 6 Mar 2017 15:11:18 -0500 From: Stefano Stabellini To: xen-devel@lists.xenproject.org Cc: linux-kernel@vger.kernel.org, sstabellini@kernel.org, Stefano Stabellini , boris.ostrovsky@oracle.com, jgross@suse.com, Eric Van Hensbergen , Ron Minnich , Latchesar Ionkov , v9fs-developer@lists.sourceforge.net Subject: [PATCH 4/7] xen/9pfs: connect to the backend Date: Mon, 6 Mar 2017 12:01:25 -0800 Message-Id: <1488830488-18506-4-git-send-email-sstabellini@kernel.org> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1488830488-18506-1-git-send-email-sstabellini@kernel.org> References: <1488830488-18506-1-git-send-email-sstabellini@kernel.org> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7933 Lines: 292 Implement functions to handle the xenbus handshake. Upon connection, allocate the rings according to the protocol specification. Initialize a work_struct and a wait_queue. The work_struct will be used to schedule work upon receiving an event channel notification from the backend. The wait_queue will be used to wait when the ring is full and we need to send a new request. Signed-off-by: Stefano Stabellini CC: boris.ostrovsky@oracle.com CC: jgross@suse.com CC: Eric Van Hensbergen CC: Ron Minnich CC: Latchesar Ionkov CC: v9fs-developer@lists.sourceforge.net --- net/9p/trans_xen.c | 227 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 227 insertions(+) diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 877dfd0..9f6cf8d 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -17,6 +17,36 @@ #include #include +#define XEN_9PFS_NUM_RINGS 2 + +/* One per ring, more than one per 9pfs share */ +struct xen_9pfs_dataring { + struct xen_9pfs_front_priv *priv; + + struct xen_9pfs_data_intf *intf; + grant_ref_t ref; + int evtchn; + int irq; + spinlock_t lock; + + void *bytes; + struct xen_9pfs_data ring; + wait_queue_head_t wq; + struct work_struct work; +}; + +/* One per 9pfs share */ +struct xen_9pfs_front_priv { + struct list_head list; + struct xenbus_device *dev; + char *tag; + struct p9_client *client; + + int num_rings; + struct xen_9pfs_dataring *rings; +}; +static LIST_HEAD(xen_9pfs_devs); + static int p9_xen_cancel(struct p9_client *client, struct p9_req_t *req) { return 0; @@ -36,6 +66,21 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req) return 0; } +static void p9_xen_response(struct work_struct *work) +{ +} + +static irqreturn_t xen_9pfs_front_event_handler(int irq, void *r) +{ + struct xen_9pfs_dataring *ring = r; + BUG_ON(!ring || !ring->priv->client); + + wake_up_interruptible(&ring->wq); + schedule_work(&ring->work); + + return IRQ_HANDLED; +} + static struct p9_trans_module p9_xen_trans = { .name = "xen", .maxsize = (1 << (XEN_9PFS_RING_ORDER + XEN_PAGE_SHIFT)), @@ -52,25 +97,207 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req) { "" } }; +static int xen_9pfs_front_free(struct xen_9pfs_front_priv *priv) +{ + int i, j; + + list_del(&priv->list); + + for (i = 0; i < priv->num_rings; i++) { + if (priv->rings[i].intf == NULL) + break; + if (priv->rings[i].irq > 0) + unbind_from_irqhandler(priv->rings[i].irq, priv->dev); + if (priv->rings[i].bytes != NULL) { + for (j = 0; j < (1 << XEN_9PFS_RING_ORDER); j++) + gnttab_end_foreign_access(priv->rings[i].intf->ref[j], 0, 0); + free_pages((unsigned long)priv->rings[i].bytes, XEN_9PFS_RING_ORDER); + } + gnttab_end_foreign_access(priv->rings[i].ref, 0, 0); + free_page((unsigned long)priv->rings[i].intf); + } + kfree(priv->rings); + kfree(priv); + + return 0; +} + static int xen_9pfs_front_remove(struct xenbus_device *dev) { + int ret; + struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev); + + dev_set_drvdata(&dev->dev, NULL); + ret = xen_9pfs_front_free(priv); + return ret; +} + +static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev, + struct xen_9pfs_dataring *ring) +{ + int i; + int ret = -ENOMEM; + + init_waitqueue_head(&ring->wq); + spin_lock_init(&ring->lock); + INIT_WORK(&ring->work, p9_xen_response); + + ring->intf = (struct xen_9pfs_data_intf *) __get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!ring->intf) + goto error; + memset(ring->intf, 0, XEN_PAGE_SIZE); + ring->bytes = (void*)__get_free_pages(GFP_KERNEL | __GFP_ZERO, XEN_9PFS_RING_ORDER); + if (ring->bytes == NULL) + goto error; + for (i = 0; i < (1 << XEN_9PFS_RING_ORDER); i++) + ring->intf->ref[i] = gnttab_grant_foreign_access(dev->otherend_id, pfn_to_gfn(virt_to_pfn((void*)ring->bytes) + i), 0); + ring->ref = gnttab_grant_foreign_access(dev->otherend_id, pfn_to_gfn(virt_to_pfn((void*)ring->intf)), 0); + ring->ring.in = ring->bytes; + ring->ring.out = ring->bytes + XEN_9PFS_RING_SIZE; + + ret = xenbus_alloc_evtchn(dev, &ring->evtchn); + if (ret) + goto error; + ring->irq = bind_evtchn_to_irqhandler(ring->evtchn, xen_9pfs_front_event_handler, + 0, "xen_9pfs-frontend", ring); + if (ring->irq < 0) { + xenbus_free_evtchn(dev, ring->evtchn); + ret = ring->irq; + goto error; + } return 0; + +error: + if (ring->intf != NULL) + kfree(ring->intf); + if (ring->bytes != NULL) + kfree(ring->bytes); + return ret; } static int xen_9pfs_front_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { + int ret = -EFAULT, i; + struct xenbus_transaction xbt; + struct xen_9pfs_front_priv *priv = NULL; + char *versions; + unsigned int max_rings, max_ring_order, len; + + versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len); + if (!len || strcmp(versions, "1")) + return -EINVAL; + kfree(versions); + ret = xenbus_scanf(XBT_NIL, dev->otherend, "max-rings", "%u", &max_rings); + if (ret < 0 || max_rings < XEN_9PFS_NUM_RINGS) + return -EINVAL; + ret = xenbus_scanf(XBT_NIL, dev->otherend, "max-ring-page-order", "%u", &max_ring_order); + if (ret < 0|| max_ring_order < XEN_9PFS_RING_ORDER) + return -EINVAL; + + + priv = kzalloc(sizeof(struct xen_9pfs_front_priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->num_rings = XEN_9PFS_NUM_RINGS; + priv->rings = kzalloc(sizeof(struct xen_9pfs_dataring) * priv->num_rings, + GFP_KERNEL); + if (!priv->rings) { + kfree(priv); + return -ENOMEM; + } + + again: + ret = xenbus_transaction_start(&xbt); + if (ret) { + xenbus_dev_fatal(dev, ret, "starting transaction"); + goto error; + } + ret = xenbus_printf(xbt, dev->nodename, "version", "%u", 1); + if (ret) + goto error_xenbus; + ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u", priv->num_rings); + if (ret) + goto error_xenbus; + for (i = 0; i < priv->num_rings; i++) { + char str[16]; + + priv->rings[i].priv = priv; + ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]); + if (ret < 0) + goto error_xenbus; + + sprintf(str, "ring-ref%u", i); + ret = xenbus_printf(xbt, dev->nodename, str, "%d", priv->rings[i].ref); + if (ret) + goto error_xenbus; + + sprintf(str, "event-channel-%u", i); + ret = xenbus_printf(xbt, dev->nodename, str, "%u", priv->rings[i].evtchn); + if (ret) + goto error_xenbus; + } + priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL); + if (ret) + goto error_xenbus; + ret = xenbus_transaction_end(xbt, 0); + if (ret) { + if (ret == -EAGAIN) + goto again; + xenbus_dev_fatal(dev, ret, "completing transaction"); + goto error; + } + + + list_add_tail(&priv->list, &xen_9pfs_devs); + dev_set_drvdata(&dev->dev, priv); + xenbus_switch_state(dev, XenbusStateInitialised); + return 0; + + error_xenbus: + xenbus_transaction_end(xbt, 1); + xenbus_dev_fatal(dev, ret, "writing xenstore"); + error: + dev_set_drvdata(&dev->dev, NULL); + xen_9pfs_front_free(priv); + return ret; } static int xen_9pfs_front_resume(struct xenbus_device *dev) { + dev_warn(&dev->dev, "suspsend/resume unsupported\n"); return 0; } static void xen_9pfs_front_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { + switch (backend_state) { + case XenbusStateReconfiguring: + case XenbusStateReconfigured: + case XenbusStateInitialising: + case XenbusStateInitialised: + case XenbusStateUnknown: + break; + + case XenbusStateInitWait: + break; + + case XenbusStateConnected: + xenbus_switch_state(dev, XenbusStateConnected); + break; + + case XenbusStateClosed: + if (dev->state == XenbusStateClosed) + break; + /* Missed the backend's CLOSING state -- fallthrough */ + case XenbusStateClosing: + xenbus_frontend_closed(dev); + break; + } } static struct xenbus_driver xen_9pfs_front_driver = { -- 1.9.1