Received: by 2002:a25:ad19:0:0:0:0:0 with SMTP id y25csp135114ybi; Tue, 16 Jul 2019 17:47:14 -0700 (PDT) X-Google-Smtp-Source: APXvYqwxeDBGA6N2r/8KnVeUJ88eLrewWcztmbbJM+ng6jNBShT2bWwWBPa4240wFfOhOQEQV69u X-Received: by 2002:a17:902:2bc5:: with SMTP id l63mr40560154plb.30.1563324433917; Tue, 16 Jul 2019 17:47:13 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1563324433; cv=none; d=google.com; s=arc-20160816; b=v8RR+502YdDBPkwbvdD/pPwrJ3KW+93Fn0VfmW4kRyqyDv+5l547kHAVDpwcLvwSOG iQ8tC42edjfclzHqzbYVC72W5ZIVb3dP8PUQ8qY9kIQT1EFr+KB+JjDZ3f6HqtBSrxx/ 05NCtz1/R9nRUoPIjLsTZhSkrwjmGbd872GvcvgHPhiW/ReRK3NJJ4/iyZBOZ+vlhfWg ESTxYTDTyyKVaYqthc31+uY3VnlM4AoiuRppebzrekHX68wnts82UfbHnwhBubi47JFf 8e4Fx2KPG5n82RalN8SJDnCnCAti+334zYrYSkVerxH2s8R54FA2IqvRx0dDI5AJ3L5W h2rQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:message-id:date:subject:cc:to:from; bh=B9eoh1pANq+/15XguWG/pWR31ma2YrgHVZhKTT6PqxY=; b=BwjXQdSOI92RUJx3fGFxwnAe0xUu0Qpj8uMMABKVJ7G4XqvCn2XQmytuvYHKCIoC3w sDJR/flB2+0zFm8b7//cnouzVFKAmgVcoVx8tvdvL7/OmdUXmwhlwvDU8MkZIrJSQSrE RA2blUEUC6/sOyuW5DmqetFnuPiZxpBCyWYHnoiPx3o+rfIRWwmG5AcMhlVM22DoW9z3 TVfMfNCC3aCK6+lY9wMrVUNYFK+NxUC8wQoJF1QovypnqiGoxrvZ2jr1VldU7J4925i1 2qBh57NfVa/kK/lF3zC8B0l5KD+yO4rxJLg9r/2laOi/PPQiqmZHD6G5fSkrpAqcdRC0 1UHw== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id v186si20960672pgd.358.2019.07.16.17.46.57; Tue, 16 Jul 2019 17:47:13 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S2389050AbfGQApx (ORCPT + 99 others); Tue, 16 Jul 2019 20:45:53 -0400 Received: from gate.crashing.org ([63.228.1.57]:56497 "EHLO gate.crashing.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726344AbfGQApw (ORCPT ); Tue, 16 Jul 2019 20:45:52 -0400 Received: from ufdda393ec48b57.ant.amazon.com (localhost.localdomain [127.0.0.1]) by gate.crashing.org (8.14.1/8.14.1) with ESMTP id x6H0jXnh032429; Tue, 16 Jul 2019 19:45:34 -0500 From: Benjamin Herrenschmidt To: linux-nvme@lists.infradead.org Cc: linux-kernel@vger.kernel.org, Jens Axboe , Keith Busch , Christoph Hellwig , Paul Pawlowski , Benjamin Herrenschmidt Subject: [PATCH v2 1/3] nvme-pci: Pass the queue to SQ_SIZE/CQ_SIZE macros Date: Wed, 17 Jul 2019 10:45:25 +1000 Message-Id: <20190717004527.30363-1-benh@kernel.crashing.org> X-Mailer: git-send-email 2.17.1 Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This will make it easier to handle variable queue entry sizes later. No functional change. Signed-off-by: Benjamin Herrenschmidt Reviewed-by: Christoph Hellwig --- drivers/nvme/host/pci.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index dd10cf78f2d3..8f006638452b 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -28,8 +28,8 @@ #include "trace.h" #include "nvme.h" -#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) -#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) +#define SQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_command)) +#define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) @@ -1344,16 +1344,16 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) static void nvme_free_queue(struct nvme_queue *nvmeq) { - dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq->q_depth), + dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); if (!nvmeq->sq_cmds) return; if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), - nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth)); + nvmeq->sq_cmds, SQ_SIZE(nvmeq)); } else { - dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq->q_depth), + dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), nvmeq->sq_cmds, nvmeq->sq_dma_addr); } } @@ -1433,12 +1433,12 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, } static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, - int qid, int depth) + int qid) { struct pci_dev *pdev = to_pci_dev(dev->dev); if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { - nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth)); + nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); if (nvmeq->sq_cmds) { nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, nvmeq->sq_cmds); @@ -1447,11 +1447,11 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, return 0; } - pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(depth)); + pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); } } - nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), + nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), &nvmeq->sq_dma_addr, GFP_KERNEL); if (!nvmeq->sq_cmds) return -ENOMEM; @@ -1465,12 +1465,13 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) if (dev->ctrl.queue_count > qid) return 0; - nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(depth), + nvmeq->q_depth = depth; + nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), &nvmeq->cq_dma_addr, GFP_KERNEL); if (!nvmeq->cqes) goto free_nvmeq; - if (nvme_alloc_sq_cmds(dev, nvmeq, qid, depth)) + if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) goto free_cqdma; nvmeq->dev = dev; @@ -1479,15 +1480,14 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) nvmeq->cq_head = 0; nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; - nvmeq->q_depth = depth; nvmeq->qid = qid; dev->ctrl.queue_count++; return 0; free_cqdma: - dma_free_coherent(dev->dev, CQ_SIZE(depth), (void *)nvmeq->cqes, - nvmeq->cq_dma_addr); + dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, + nvmeq->cq_dma_addr); free_nvmeq: return -ENOMEM; } @@ -1515,7 +1515,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) nvmeq->cq_head = 0; nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; - memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); + memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); nvme_dbbuf_init(dev, nvmeq, qid); dev->online_queues++; wmb(); /* ensure the first interrupt sees the initialization */ -- 2.17.1