2014-01-28 08:40:39

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 00/14] NVMe: Cleanup device initialization

Hi Keith, Matthew,

Here are few fixes to the code that caught my eye.

These are unrelated to the patch "nvme: Use pci_enable_msi_range()
and pci_enable_msix_range()" I sent earlier, which still waits for
your review.

Thanks!

Alexander Gordeev (14):
NVMe: Fix setup of affinity hint for unallocated queues
NVMe: Cleanup nvme_alloc_queue() and nvme_free_queue()
NVMe: Cleanup nvme_create_queue() and nvme_disable_queue()
NVMe: Cleanup adapter_alloc_cq/sg() and adapter_delete_cq/sg()
NVMe: Get rid of superfluous qid parameter to nvme_init_queue()
NVMe: Get rid of superfluous dev parameter to queue_request_irq()
NVMe: Make returning value consistent across all functions
NVMe: nvme_dev_map() is a bad place to set admin queue IRQ number
NVMe: Access interrupt vectors using nvme_queue::cq_vector only
NVMe: Factor out nvme_set_queue_count()
NVMe: Factor out nvme_init_bar()
NVMe: Factor out nvme_init_interrupts()
NVMe: Factor out nvme_setup_interrupts()
NVMe: Rework "NVMe: Disable admin queue on init failure" commit

drivers/block/nvme-core.c | 205 +++++++++++++++++++++++++++++----------------
1 files changed, 133 insertions(+), 72 deletions(-)

--
1.7.7.6


2014-01-28 08:37:53

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 01/14] NVMe: Fix setup of affinity hint for unallocated queues

IRQ affinity hints are attempted to setup for some or all
queues which have not yet been allocated: either on device
probe or resume. This update moves the setup after all
queues are successfully created.

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b59a93a..3dfb0d4 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1956,12 +1956,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
spin_unlock(&dev_list_lock);

- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < nr_io_queues; i++) {
- irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
- cpu = cpumask_next(cpu, cpu_online_mask);
- }
-
q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
NVME_Q_DEPTH);
for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
@@ -1986,6 +1980,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
}

+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < nr_io_queues; i++) {
+ irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
return 0;

free_queues:
--
1.7.7.6

2014-01-28 08:37:59

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 07/14] NVMe: Make returning value consistent across all functions

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index f03f123..e004c09 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1315,7 +1315,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq)
nvme_init_queue(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);

- return result;
+ return 0;

release_sq:
adapter_delete_sq(nvmeq);
@@ -1431,7 +1431,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
- return result;
+ return 0;
}

struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
@@ -2414,7 +2414,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
if (result && result != -EBUSY)
goto disable;

- return result;
+ return 0;

disable:
nvme_disable_queue(dev->queues[0]);
--
1.7.7.6

2014-01-28 08:38:10

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 12/14] NVMe: Factor out nvme_init_interrupts()

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 22 ++++++++++++++++------
1 files changed, 16 insertions(+), 6 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 1e30c42..39868be 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1909,13 +1909,10 @@ static int nvme_init_bar(struct nvme_dev *dev, int nr_io_queues)
return nr_io_queues;
}

-static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
+static int nvme_init_interrupts(struct nvme_dev *dev, int nr_io_queues)
{
struct pci_dev *pdev = dev->pci_dev;
- int result, cpu, i, vecs, q_depth;
-
- /* Deregister the admin queue's interrupt */
- free_irq(dev->entry[dev->queues[0]->cq_vector].vector, dev->queues[0]);
+ int result, i, vecs;

vecs = nr_io_queues;
for (i = 0; i < vecs; i++)
@@ -1945,13 +1942,26 @@ static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
}
}

+ return vecs;
+}
+
+static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
+{
+ int result, cpu, i, q_depth;
+
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[dev->queues[0]->cq_vector].vector, dev->queues[0]);
+
/*
* Should investigate if there's a performance win from allocating
* more queues than interrupt vectors; it might allow the submission
* path to scale better, even if the receive path is limited by the
* number of interrupts.
+ *
+ * The admin queue's interrupt number is changed in case of
+ * successful MSI-X or MSI allocation.
*/
- nr_io_queues = vecs;
+ nr_io_queues = nvme_init_interrupts(dev, nr_io_queues);

result = queue_request_irq(dev->queues[0], "nvme admin");
if (result) {
--
1.7.7.6

2014-01-28 08:38:08

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 14/14] NVMe: Rework "NVMe: Disable admin queue on init failure" commit

This update partially reverts commit c5dc919 ("NVMe: Disable
admin queue on init failure") to roll back easy readable
nvme_free_queues() function. Commit's c5dc919 functionality
aimed to free admin queue IRQ on init failure is preserved.

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 11 ++++++-----
1 files changed, 6 insertions(+), 5 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 83d57b3..a51f342 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1161,11 +1161,11 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
dev->queues[qid] = NULL;
}

-static void nvme_free_queues(struct nvme_dev *dev, int lowest)
+static void nvme_free_queues(struct nvme_dev *dev)
{
int i;

- for (i = dev->queue_count - 1; i >= lowest; i--)
+ for (i = dev->queue_count - 1; i >= 0; i--)
nvme_free_queue(dev->queues[i]);
}

@@ -2035,7 +2035,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
return 0;

free_queues:
- nvme_free_queues(dev, 1);
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_free_queue(dev->queues[i]);
return result;
}

@@ -2601,7 +2602,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shutdown:
nvme_dev_shutdown(dev);
release_pools:
- nvme_free_queues(dev, 0);
+ nvme_free_queues(dev);
nvme_release_prp_pools(dev);
release:
nvme_release_instance(dev);
@@ -2625,7 +2626,7 @@ static void nvme_remove(struct pci_dev *pdev)
misc_deregister(&dev->miscdev);
nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
- nvme_free_queues(dev, 0);
+ nvme_free_queues(dev);
nvme_release_instance(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
--
1.7.7.6

2014-01-28 08:38:07

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 13/14] NVMe: Factor out nvme_setup_interrupts()

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 33 ++++++++++++++++++++++++++-------
1 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 39868be..83d57b3 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1945,12 +1945,18 @@ static int nvme_init_interrupts(struct nvme_dev *dev, int nr_io_queues)
return vecs;
}

-static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
+static int nvme_setup_interrupts(struct nvme_dev *dev, int nr_io_queues)
{
- int result, cpu, i, q_depth;
+ struct nvme_queue *adminq = dev->queues[0];
+ int result;

- /* Deregister the admin queue's interrupt */
- free_irq(dev->entry[dev->queues[0]->cq_vector].vector, dev->queues[0]);
+ /*
+ * Deregister the admin queue's interrupt, since it is about
+ * to move to other IRQ number. We do not re-configure the
+ * admin queue - are there any adverse effects of this trick?
+ * Should we call nvme_clear_queue() to mimic nvme_disable_queue()?
+ */
+ free_irq(dev->entry[adminq->cq_vector].vector, adminq);

/*
* Should investigate if there's a performance win from allocating
@@ -1963,12 +1969,19 @@ static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
*/
nr_io_queues = nvme_init_interrupts(dev, nr_io_queues);

- result = queue_request_irq(dev->queues[0], "nvme admin");
+ result = queue_request_irq(adminq, "nvme admin");
if (result) {
- dev->queues[0]->q_suspended = 1;
- goto free_queues;
+ adminq->q_suspended = 1;
+ return result;
}

+ return nr_io_queues;
+}
+
+static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
+{
+ int result, cpu, i, q_depth;
+
/* Free previously allocated queues that are no longer usable */
spin_lock(&dev_list_lock);
for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
@@ -2442,6 +2455,11 @@ static int nvme_dev_start(struct nvme_dev *dev)
if (result < 0)
goto disable;

+ result = nvme_setup_interrupts(dev, result);
+ if (result < 0)
+ /* Admin queue interrupt has been torn down - can not go on */
+ goto delete;
+
result = nvme_setup_io_queues(dev, result);
if (result)
goto disable;
@@ -2453,6 +2471,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
return -EBUSY;

nvme_disable_queue(dev->queues[0]);
+ delete:
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
--
1.7.7.6

2014-01-28 08:37:57

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 09/14] NVMe: Access interrupt vectors using nvme_queue::cq_vector only

Minimize a poissible error when accessing dev-entry[] array
entries using an arbitrary index rather than the legitimate
nvme_queue::cq_vector value. This update also makes affinity
hint setup conform to the rest of the code around.

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 17 ++++++++++++-----
1 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 98d4b51..1821091 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1424,7 +1424,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;

- dev->entry[0].vector = dev->pci_dev->irq;
+ dev->entry[nvmeq->cq_vector].vector = dev->pci_dev->irq;
result = queue_request_irq(nvmeq, "nvme admin");
if (result)
return result;
@@ -1908,7 +1908,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}

/* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
+ free_irq(dev->entry[dev->queues[0]->cq_vector].vector, dev->queues[0]);

vecs = nr_io_queues;
for (i = 0; i < vecs; i++)
@@ -1989,9 +1989,16 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
}

- cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < nr_io_queues; i++) {
- irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+ cpu = nr_cpu_ids;
+ for (i = 1; i < dev->queue_count; i++) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(cpu_online_mask);
+
+ irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+ get_cpu_mask(cpu));
+
cpu = cpumask_next(cpu, cpu_online_mask);
}

--
1.7.7.6

2014-01-28 08:37:52

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 03/14] NVMe: Cleanup nvme_create_queue() and nvme_disable_queue()

A queue structure contains both a pointer to the device it
belongs to and the queue ID - there is no need to drag these
parameters around.

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 20 +++++++++++---------
1 files changed, 11 insertions(+), 9 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 60c6c05..606a2e1 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1197,9 +1197,10 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
spin_unlock_irq(&nvmeq->q_lock);
}

-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+static void nvme_disable_queue(struct nvme_queue *nvmeq)
{
- struct nvme_queue *nvmeq = dev->queues[qid];
+ struct nvme_dev *dev = nvmeq->dev;
+ int qid = nvmeq->qid;

if (!nvmeq)
return;
@@ -1288,9 +1289,10 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
nvmeq->q_suspended = 0;
}

-static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
+static int nvme_create_queue(struct nvme_queue *nvmeq)
{
struct nvme_dev *dev = nvmeq->dev;
+ int qid = nvmeq->qid;
int result;

result = adapter_alloc_cq(dev, qid, nvmeq);
@@ -1974,10 +1976,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}

for (i = 1; i < dev->queue_count; i++) {
- result = nvme_create_queue(dev->queues[i], i);
+ result = nvme_create_queue(dev->queues[i]);
if (result) {
for (--i; i > 0; i--)
- nvme_disable_queue(dev, i);
+ nvme_disable_queue(dev->queues[i]);
goto free_queues;
}
}
@@ -2140,7 +2142,7 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
set_current_state(TASK_RUNNING);

nvme_disable_ctrl(dev, readq(&dev->bar->cap));
- nvme_disable_queue(dev, 0);
+ nvme_disable_queue(dev->queues[0]);

send_sig(SIGKILL, dq->worker->task, 1);
flush_kthread_worker(dq->worker);
@@ -2236,7 +2238,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
dev_err(&dev->pci_dev->dev,
"Failed to create queue del task\n");
for (i = dev->queue_count - 1; i > 0; i--)
- nvme_disable_queue(dev, i);
+ nvme_disable_queue(dev->queues[i]);
return;
}

@@ -2276,7 +2278,7 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
} else {
nvme_disable_io_queues(dev);
nvme_shutdown_ctrl(dev);
- nvme_disable_queue(dev, 0);
+ nvme_disable_queue(dev->queues[0]);
}
nvme_dev_unmap(dev);
}
@@ -2411,7 +2413,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;

disable:
- nvme_disable_queue(dev, 0);
+ nvme_disable_queue(dev->queues[0]);
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
--
1.7.7.6

2014-01-28 08:37:51

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 02/14] NVMe: Cleanup nvme_alloc_queue() and nvme_free_queue()

While nvme_alloc_queue() and nvme_free_queue() are logically
counterparts, they are inconsistent with regard to how device
queue_count is updated - it is increased within the former,
but decreased outside of the latter. This update fixes the
described inconsistency and also makes further improvements
to the functions code.

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 18 +++++++++---------
1 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 3dfb0d4..60c6c05 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1137,6 +1137,9 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)

static void nvme_free_queue(struct nvme_queue *nvmeq)
{
+ struct nvme_dev *dev = nvmeq->dev;
+ int qid = nvmeq->qid;
+
spin_lock_irq(&nvmeq->q_lock);
while (bio_list_peek(&nvmeq->sq_cong)) {
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
@@ -1149,17 +1152,17 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
nvmeq->sq_cmds, nvmeq->sq_dma_addr);
kfree(nvmeq);
+
+ dev->queue_count--;
+ dev->queues[qid] = NULL;
}

static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{
int i;

- for (i = dev->queue_count - 1; i >= lowest; i--) {
+ for (i = dev->queue_count - 1; i >= lowest; i--)
nvme_free_queue(dev->queues[i]);
- dev->queue_count--;
- dev->queues[i] = NULL;
- }
}

/**
@@ -1245,6 +1248,8 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->cq_vector = vector;
nvmeq->qid = qid;
nvmeq->q_suspended = 1;
+
+ dev->queues[qid] = nvmeq;
dev->queue_count++;

return nvmeq;
@@ -1394,7 +1399,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
if (!nvmeq)
return -ENOMEM;
- dev->queues[0] = nvmeq;
}

aqa = nvmeq->q_depth - 1;
@@ -1951,8 +1955,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
spin_unlock_irq(&nvmeq->q_lock);

nvme_free_queue(nvmeq);
- dev->queue_count--;
- dev->queues[i] = NULL;
}
spin_unlock(&dev_list_lock);

@@ -2439,8 +2441,6 @@ static void nvme_remove_disks(struct work_struct *ws)
for (i = dev->queue_count - 1; i > 0; i--) {
BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
nvme_free_queue(dev->queues[i]);
- dev->queue_count--;
- dev->queues[i] = NULL;
}
spin_unlock(&dev_list_lock);
}
--
1.7.7.6

2014-01-28 08:37:49

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 04/14] NVMe: Cleanup adapter_alloc_cq/sg() and adapter_delete_cq/sg()

A queue structure contains both a pointer to the device it
belongs to and the queue ID - there is no need to drag these
parameters around.

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 34 +++++++++++++++++++---------------
1 files changed, 19 insertions(+), 15 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 606a2e1..652f0f6 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -942,8 +942,10 @@ int nvme_submit_admin_cmd_async(struct nvme_dev *dev, struct nvme_command *cmd,
return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo, ADMIN_TIMEOUT);
}

-static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+static int adapter_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
{
+ struct nvme_dev *dev = nvmeq->dev;
+ u16 id = nvmeq->qid;
int status;
struct nvme_command c;

@@ -957,9 +959,10 @@ static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
return 0;
}

-static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
- struct nvme_queue *nvmeq)
+static int adapter_alloc_cq(struct nvme_queue *nvmeq)
{
+ struct nvme_dev *dev = nvmeq->dev;
+ u16 qid = nvmeq->qid;
int status;
struct nvme_command c;
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
@@ -978,9 +981,10 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
return 0;
}

-static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
- struct nvme_queue *nvmeq)
+static int adapter_alloc_sq(struct nvme_queue *nvmeq)
{
+ struct nvme_dev *dev = nvmeq->dev;
+ u16 qid = nvmeq->qid;
int status;
struct nvme_command c;
int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
@@ -999,14 +1003,14 @@ static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
return 0;
}

-static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+static int adapter_delete_cq(struct nvme_queue *nvmeq)
{
- return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+ return adapter_delete_queue(nvmeq, nvme_admin_delete_cq);
}

-static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+static int adapter_delete_sq(struct nvme_queue *nvmeq)
{
- return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+ return adapter_delete_queue(nvmeq, nvme_admin_delete_sq);
}

int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
@@ -1210,8 +1214,8 @@ static void nvme_disable_queue(struct nvme_queue *nvmeq)
/* Don't tell the adapter to delete the admin queue.
* Don't tell a removed adapter to delete IO queues. */
if (qid && readl(&dev->bar->csts) != -1) {
- adapter_delete_sq(dev, qid);
- adapter_delete_cq(dev, qid);
+ adapter_delete_sq(nvmeq);
+ adapter_delete_cq(nvmeq);
}
nvme_clear_queue(nvmeq);
}
@@ -1295,11 +1299,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq)
int qid = nvmeq->qid;
int result;

- result = adapter_alloc_cq(dev, qid, nvmeq);
+ result = adapter_alloc_cq(nvmeq);
if (result < 0)
return result;

- result = adapter_alloc_sq(dev, qid, nvmeq);
+ result = adapter_alloc_sq(nvmeq);
if (result < 0)
goto release_cq;

@@ -1314,9 +1318,9 @@ static int nvme_create_queue(struct nvme_queue *nvmeq)
return result;

release_sq:
- adapter_delete_sq(dev, qid);
+ adapter_delete_sq(nvmeq);
release_cq:
- adapter_delete_cq(dev, qid);
+ adapter_delete_cq(nvmeq);
return result;
}

--
1.7.7.6

2014-01-28 08:41:10

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 06/14] NVMe: Get rid of superfluous dev parameter to queue_request_irq()

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 12 ++++++------
1 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 661277d..f03f123 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1267,9 +1267,10 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
return NULL;
}

-static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
- const char *name)
+static int queue_request_irq(struct nvme_queue *nvmeq, const char *name)
{
+ struct nvme_dev *dev = nvmeq->dev;
+
if (use_threaded_interrupts)
return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
nvme_irq_check, nvme_irq, IRQF_SHARED,
@@ -1296,7 +1297,6 @@ static void nvme_init_queue(struct nvme_queue *nvmeq)

static int nvme_create_queue(struct nvme_queue *nvmeq)
{
- struct nvme_dev *dev = nvmeq->dev;
int result;

result = adapter_alloc_cq(nvmeq);
@@ -1307,7 +1307,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq)
if (result < 0)
goto release_cq;

- result = queue_request_irq(dev, nvmeq, "nvme");
+ result = queue_request_irq(nvmeq, "nvme");
if (result < 0)
goto release_sq;

@@ -1424,7 +1424,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;

- result = queue_request_irq(dev, nvmeq, "nvme admin");
+ result = queue_request_irq(nvmeq, "nvme admin");
if (result)
return result;

@@ -1945,7 +1945,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
nr_io_queues = vecs;

- result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ result = queue_request_irq(dev->queues[0], "nvme admin");
if (result) {
dev->queues[0]->q_suspended = 1;
goto free_queues;
--
1.7.7.6

2014-01-28 08:37:48

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 05/14] NVMe: Get rid of superfluous qid parameter to nvme_init_queue()

Parameter qid is not only superfluous, but also confusing -
it suggests nvme_init_queue() could reinit the queue with an
ID other than index into device queues[] array.

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 8 ++++----
1 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 652f0f6..661277d 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1278,9 +1278,10 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
IRQF_SHARED, name, nvmeq);
}

-static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
+static void nvme_init_queue(struct nvme_queue *nvmeq)
{
struct nvme_dev *dev = nvmeq->dev;
+ u16 qid = nvmeq->qid;
unsigned extra = nvme_queue_extra(nvmeq->q_depth);

nvmeq->sq_tail = 0;
@@ -1296,7 +1297,6 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
static int nvme_create_queue(struct nvme_queue *nvmeq)
{
struct nvme_dev *dev = nvmeq->dev;
- int qid = nvmeq->qid;
int result;

result = adapter_alloc_cq(nvmeq);
@@ -1312,7 +1312,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq)
goto release_sq;

spin_lock_irq(&nvmeq->q_lock);
- nvme_init_queue(nvmeq, qid);
+ nvme_init_queue(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);

return result;
@@ -1429,7 +1429,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
return result;

spin_lock_irq(&nvmeq->q_lock);
- nvme_init_queue(nvmeq, 0);
+ nvme_init_queue(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
return result;
}
--
1.7.7.6

2014-01-28 08:59:25

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 11/14] NVMe: Factor out nvme_init_bar()

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 16 ++++++++++++++--
1 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index d81b4c4..1e30c42 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1886,10 +1886,10 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}

-static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
+static int nvme_init_bar(struct nvme_dev *dev, int nr_io_queues)
{
struct pci_dev *pdev = dev->pci_dev;
- int result, cpu, i, vecs, size, q_depth;
+ int size;

size = db_bar_size(dev, nr_io_queues);
if (size > 8192) {
@@ -1906,6 +1906,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
dev->queues[0]->q_db = dev->dbs;
}

+ return nr_io_queues;
+}
+
+static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
+{
+ struct pci_dev *pdev = dev->pci_dev;
+ int result, cpu, i, vecs, q_depth;
+
/* Deregister the admin queue's interrupt */
free_irq(dev->entry[dev->queues[0]->cq_vector].vector, dev->queues[0]);

@@ -2420,6 +2428,10 @@ static int nvme_dev_start(struct nvme_dev *dev)
if (result < 0)
goto disable;

+ result = nvme_init_bar(dev, result);
+ if (result < 0)
+ goto disable;
+
result = nvme_setup_io_queues(dev, result);
if (result)
goto disable;
--
1.7.7.6

2014-01-28 09:26:53

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 08/14] NVMe: nvme_dev_map() is a bad place to set admin queue IRQ number

Initialization of the admin queue interrupt number within
nvme_dev_map() is confusing. Just keep nvme_dev_map() and
nvme_dev_unmap() counterparts simple and let deal function
nvme_configure_admin_queue() with the admin queue in full.

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index e004c09..98d4b51 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1424,6 +1424,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;

+ dev->entry[0].vector = dev->pci_dev->irq;
result = queue_request_irq(nvmeq, "nvme admin");
if (result)
return result;
@@ -2077,7 +2078,6 @@ static int nvme_dev_map(struct nvme_dev *dev)
if (pci_enable_device_mem(pdev))
return result;

- dev->entry[0].vector = pdev->irq;
pci_set_master(pdev);
bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_request_selected_regions(pdev, bars, "nvme"))
--
1.7.7.6

2014-01-28 09:43:58

by Alexander Gordeev

[permalink] [raw]
Subject: [PATCH 10/14] NVMe: Factor out nvme_set_queue_count()

Function nvme_setup_io_queues() is quite big - make it more
readable by moving out a code with clearly distinguishable
functionality. This update is the first in a series.

Signed-off-by: Alexander Gordeev <[email protected]>
---
drivers/block/nvme-core.c | 28 +++++++++++++++++-----------
1 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 1821091..d81b4c4 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1875,22 +1875,21 @@ static int set_queue_count(struct nvme_dev *dev, int count)
return min(result & 0xffff, result >> 16) + 1;
}

+static int nvme_set_queue_count(struct nvme_dev *dev, int count)
+{
+ int result = set_queue_count(dev, count);
+ return min(result, count);
+}
+
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}

-static int nvme_setup_io_queues(struct nvme_dev *dev)
+static int nvme_setup_io_queues(struct nvme_dev *dev, int nr_io_queues)
{
struct pci_dev *pdev = dev->pci_dev;
- int result, cpu, i, vecs, nr_io_queues, size, q_depth;
-
- nr_io_queues = num_online_cpus();
- result = set_queue_count(dev, nr_io_queues);
- if (result < 0)
- return result;
- if (result < nr_io_queues)
- nr_io_queues = result;
+ int result, cpu, i, vecs, size, q_depth;

size = db_bar_size(dev, nr_io_queues);
if (size > 8192) {
@@ -2417,13 +2416,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
list_add(&dev->node, &dev_list);
spin_unlock(&dev_list_lock);

- result = nvme_setup_io_queues(dev);
- if (result && result != -EBUSY)
+ result = nvme_set_queue_count(dev, num_online_cpus());
+ if (result < 0)
+ goto disable;
+
+ result = nvme_setup_io_queues(dev, result);
+ if (result)
goto disable;

return 0;

disable:
+ if (result == -EBUSY)
+ return -EBUSY;
+
nvme_disable_queue(dev->queues[0]);
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
--
1.7.7.6

2014-02-18 16:52:20

by Alexander Gordeev

[permalink] [raw]
Subject: Re: [PATCH 00/14] NVMe: Cleanup device initialization

On Tue, Jan 28, 2014 at 09:38:46AM +0100, Alexander Gordeev wrote:
> Here are few fixes to the code that caught my eye.

Hi Keith, Matthew,

Any feedback?

Thanks!

--
Regards,
Alexander Gordeev
[email protected]