Received: by 2002:a25:ad19:0:0:0:0:0 with SMTP id y25csp121431ybi; Mon, 15 Jul 2019 17:47:57 -0700 (PDT) X-Google-Smtp-Source: APXvYqz9ro9s8QQUORjkj/k3vxv7k4SUOo6dOgPgywEx4xh+GFc8mCbbxm32B1tkh12RHZvypqZC X-Received: by 2002:a65:448a:: with SMTP id l10mr5596698pgq.327.1563238077259; Mon, 15 Jul 2019 17:47:57 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1563238077; cv=none; d=google.com; s=arc-20160816; b=DY13C13/OmuKZemar0kCQSLp4wwTAwdu00QWHyVHHYhqf4US686DvRkTes24lOY3tG TrXfI/puRBeSWjDo8jMXNQXWMgYJ6fjInUQfGFZE5z0Hf00kO5rdBsnwuqfRMx5v4qx+ LzwOFCGtr5yialtKzPSbioxQK44RBGIVjsPXVFFrVzVxJkUK1gR3JjRJZJ8+/N2fgnKe Dba8OMvtssalVVl47sedrLiDW24eXf2UiSVRoZMSvez35kfwlIU1Z8MRHu5+k7nAj/WI lQ5fiIrK8yxYRM8Kc8JoXS6w96+LxKk5Uhnu0B8y7DEY8qdPS60t+QYMh9iS2V73yZ+q ARiw== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:message-id:date:subject:cc:to:from; bh=3ZaEORjLF4G63i98TUITcutyWdTIdnQLU3cjlj9FLmw=; b=WAKfwNS+5GV4XFJt1vBxg1LSZxkY2aWYielPepTHWGpMDTXd2BEcOhRwM788sJuXe2 2BgyifDplB+SOmmrL2YvBDtu2D1mr7AsZBpJrh8/6vtMxZG5+hs2EUEBeOVedsO9nNFh Z/jOvK65HX8niPj6epQLu83GUvPIu/IKvP97ZuzJS2i18u0HWq5gHP4rEUPbKURFzDbE srP1oov3o5dBkXAQQ08byPHbBRKLOEKMj1y401dmAwcvCAK/jhBBPxv+DzSi04+GrhCN Pk7TXC+qtizIrCxcCSCZfNpZkQ0wFWLK3WHAkhMKuJYd8zzWPD7qqE9wz04vTCxoIN1w zeFg== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id a5si17722489pfc.245.2019.07.15.17.47.40; Mon, 15 Jul 2019 17:47:57 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1733078AbfGPArU (ORCPT + 99 others); Mon, 15 Jul 2019 20:47:20 -0400 Received: from gate.crashing.org ([63.228.1.57]:36339 "EHLO gate.crashing.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1730355AbfGPArU (ORCPT ); Mon, 15 Jul 2019 20:47:20 -0400 Received: from ufdda393ec48b57.ant.amazon.com (localhost.localdomain [127.0.0.1]) by gate.crashing.org (8.14.1/8.14.1) with ESMTP id x6G0l1P5001806; Mon, 15 Jul 2019 19:47:02 -0500 From: Benjamin Herrenschmidt To: linux-nvme@lists.infradead.org Cc: linux-kernel@vger.kernel.org, Jens Axboe , Keith Busch , Christoph Hellwig , Paul Pawlowski , Benjamin Herrenschmidt Subject: [PATCH 1/3] nvme: Pass the queue to SQ_SIZE/CQ_SIZE macros Date: Tue, 16 Jul 2019 10:46:47 +1000 Message-Id: <20190716004649.17799-1-benh@kernel.crashing.org> X-Mailer: git-send-email 2.17.1 Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This will make it easier to handle variable queue entry sizes later. No functional change. Signed-off-by: Benjamin Herrenschmidt # Conflicts: # drivers/nvme/host/pci.c --- drivers/nvme/host/pci.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index dd10cf78f2d3..8f006638452b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -28,8 +28,8 @@ #include "trace.h" #include "nvme.h" -#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) -#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) +#define SQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_command)) +#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) @@ -1344,16 +1344,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) static void nvme_free_queue(struct nvme_queue *nvmeq) { - dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth), + dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); if (!nvmeq->sq_cmds) return; if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), - nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth)); + nvmeq->sq_cmds, SQ_SIZE(nvmeq)); } else { - dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth), + dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), nvmeq->sq_cmds, nvmeq->sq_dma_addr); } } @@ -1433,12 +1433,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, } static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, - int qid, int depth) + int qid) { struct pci_dev *pdev = to_pci_dev(dev->dev); if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { - nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth)); + nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); if (nvmeq->sq_cmds) { nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, nvmeq->sq_cmds); @@ -1447,11 +1447,11 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, return 0; } - pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth)); + pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); } } - nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), &nvmeq->sq_dma_addr, GFP_KERNEL); if (!nvmeq->sq_cmds) return -ENOMEM; @@ -1465,12 +1465,13 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) if (dev->ctrl.queue_count > qid) return 0; - nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth), + nvmeq->q_depth = depth; + nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), &nvmeq->cq_dma_addr, GFP_KERNEL); if (!nvmeq->cqes) goto free_nvmeq; - if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) + if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) goto free_cqdma; nvmeq->dev = dev; @@ -1479,15 +1480,14 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) nvmeq->cq_head = 0; nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; - nvmeq->q_depth = depth; nvmeq->qid = qid; dev->ctrl.queue_count++; return 0; free_cqdma: - dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, - nvmeq->cq_dma_addr); + dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, + nvmeq->cq_dma_addr); free_nvmeq: return -ENOMEM; } @@ -1515,7 +1515,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) nvmeq->cq_head = 0; nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; - memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); + memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); nvme_dbbuf_init(dev, nvmeq, qid); dev->online_queues++; wmb(); /* ensure the first interrupt sees the initialization */ -- 2.17.1