Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755220AbbBOIU1 (ORCPT ); Sun, 15 Feb 2015 03:20:27 -0500 Received: from userp1040.oracle.com ([156.151.31.81]:20000 "EHLO userp1040.oracle.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755189AbbBOIUW (ORCPT ); Sun, 15 Feb 2015 03:20:22 -0500 From: Bob Liu To: xen-devel@lists.xen.org Cc: david.vrabel@citrix.com, linux-kernel@vger.kernel.org, roger.pau@citrix.com, konrad.wilk@oracle.com, felipe.franciosi@citrix.com, axboe@fb.com, hch@infradead.org, avanzini.arianna@gmail.com, Bob Liu Subject: [PATCH 08/10] xen/blkfront: negotiate hardware queue number with backend Date: Sun, 15 Feb 2015 16:19:03 +0800 Message-Id: <1423988345-4005-9-git-send-email-bob.liu@oracle.com> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1423988345-4005-1-git-send-email-bob.liu@oracle.com> References: <1423988345-4005-1-git-send-email-bob.liu@oracle.com> X-Source-IP: ucsinet22.oracle.com [156.151.31.94] Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5456 Lines: 162 The max number of hardware queues for xen/blkfront is num_online_cpus() or set by module parameter, while the number xen/blkback supported is notified through xenstore("multi-queue-max-queues"). The negotiated number was the smaller one, and was written back to xen/blkback as "multi-queue-num-queues". Signed-off-by: Bob Liu --- drivers/block/xen-blkfront.c | 71 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 66 insertions(+), 5 deletions(-) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index d551be0..32caf85 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -99,6 +99,10 @@ static unsigned int xen_blkif_max_segments = 32; module_param_named(max, xen_blkif_max_segments, int, S_IRUGO); MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)"); +static unsigned int xenblkif_max_queues; +module_param_named(max_queues, xenblkif_max_queues, uint, 0644); +MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues per virtual disk"); + #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) /* @@ -677,7 +681,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, memset(&info->tag_set, 0, sizeof(info->tag_set)); info->tag_set.ops = &blkfront_mq_ops; - info->tag_set.nr_hw_queues = 1; + info->tag_set.nr_hw_queues = info->nr_rings; info->tag_set.queue_depth = BLK_RING_SIZE; info->tag_set.numa_node = NUMA_NO_NODE; info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; @@ -1338,6 +1342,8 @@ static int talk_to_blkback(struct xenbus_device *dev, struct xenbus_transaction xbt; int err, i; struct blkfront_ring_info *rinfo; + char *path; + size_t pathsize; for (i = 0; i < info->nr_rings; i++) { rinfo = &info->rinfo[i]; @@ -1354,6 +1360,13 @@ again: goto out; } + /* Write the number of queues */ + err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u", info->nr_rings); + if (err) { + message = "writing multi-queue-num-queues"; + goto abort_transaction; + } + if (info->nr_rings == 1) { rinfo = &info->rinfo[0]; err = xenbus_printf(xbt, dev->nodename, @@ -1369,8 +1382,33 @@ again: goto abort_transaction; } } else { - /* Not supported at this stage */ - goto abort_transaction; + pathsize = strlen(dev->nodename) + 12; + path = kzalloc(pathsize, GFP_KERNEL); + if (!path) { + err = -ENOMEM; + message = "ENOMEM while writing ring references"; + goto abort_transaction; + } + for (i = 0; i < info->nr_rings; i++) { + memset(path, 0, pathsize); + snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i); + + err = xenbus_printf(xbt, path, + "ring-ref", "%u", info->rinfo[i].ring_ref); + if (err) { + message = "writing ring-ref"; + kfree(path); + goto abort_transaction; + } + err = xenbus_printf(xbt, path, + "event-channel", "%u", info->rinfo[i].evtchn); + if (err) { + message = "writing event-channel"; + kfree(path); + goto abort_transaction; + } + } + kfree(path); } err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); @@ -1420,6 +1458,7 @@ static int blkfront_probe(struct xenbus_device *dev, int err, vdevice, i, rindex; struct blkfront_info *info; struct blkfront_ring_info *rinfo; + unsigned int max_queues = 0; /* FIXME: Use dynamic device id if this is not set. */ err = xenbus_scanf(XBT_NIL, dev->nodename, @@ -1473,7 +1512,14 @@ static int blkfront_probe(struct xenbus_device *dev, info->vdevice = vdevice; info->connected = BLKIF_STATE_DISCONNECTED; - info->nr_rings = 1; + /* Check if backend supports multiple queues */ + err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, + "multi-queue-max-queues", "%u", &max_queues); + if (err < 0) + max_queues = 1; + + info->nr_rings = min(max_queues, xenblkif_max_queues); + printk("xen/blkfront probe info->nr_rings:%d, backend support:%d\n", info->nr_rings, max_queues); info->rinfo = kzalloc(sizeof(*rinfo) * info->nr_rings, GFP_KERNEL); if (!info->rinfo) { xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure"); @@ -1654,12 +1700,24 @@ static int blkif_recover(struct blkfront_info *info) static int blkfront_resume(struct xenbus_device *dev) { struct blkfront_info *info = dev_get_drvdata(&dev->dev); - int err; + int err = 0; + unsigned int max_queues = 0; dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "multi-queue-max-queues", "%u", &max_queues, NULL); + if (err) + max_queues = 1; + + if (info->nr_rings != min(max_queues, xenblkif_max_queues)) { + /* At this stage, not support resume to a different hardware queue + * number */ + return -1; + } + err = talk_to_blkback(dev, info); /* @@ -2165,6 +2223,9 @@ static int __init xlblk_init(void) return -ENODEV; } + /* Allow as many queues as there are CPUs, by default */ + xenblkif_max_queues = num_online_cpus(); + ret = xenbus_register_frontend(&blkfront_driver); if (ret) { unregister_blkdev(XENVBD_MAJOR, DEV_NAME); -- 1.8.3.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/