It seems to me that read_cqe.done could be executed under softirq
context, as done callbacks always do, and it acquires rsp_wr_wait_lock
along the following call chain.
nvmet_rdma_read_data_done()
--> nvmet_rdma_release_rsp()
--> spin_lock(&queue->rsp_wr_wait_lock)
So it seems more reasonable to use spin_lock_bh() on it, otherwise
there could be following potential deadlocks.
nvmet_rdma_queue_response()
--> nvmet_rdma_release_rsp()
--> spin_lock(&queue->rsp_wr_wait_lock)
<interrupt>
--> nvmet_rdma_read_data_done()
--> nvmet_rdma_release_rsp()
--> spin_lock(&queue->rsp_wr_wait_lock)
nvmet_rdma_cm_handler()
--> nvmet_rdma_handle_command()
--> spin_lock(&queue->rsp_wr_wait_lock)
<interrupt>
--> nvmet_rdma_read_data_done()
--> nvmet_rdma_release_rsp()
--> spin_lock(&queue->rsp_wr_wait_lock)
Signed-off-by: Chengfeng Ye <[email protected]>
---
drivers/nvme/target/rdma.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 4597bca43a6d..a01ed29fbd8a 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -520,7 +520,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
{
- spin_lock(&queue->rsp_wr_wait_lock);
+ spin_lock_bh(&queue->rsp_wr_wait_lock);
while (!list_empty(&queue->rsp_wr_wait_list)) {
struct nvmet_rdma_rsp *rsp;
bool ret;
@@ -529,16 +529,16 @@ static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
struct nvmet_rdma_rsp, wait_list);
list_del(&rsp->wait_list);
- spin_unlock(&queue->rsp_wr_wait_lock);
+ spin_unlock_bh(&queue->rsp_wr_wait_lock);
ret = nvmet_rdma_execute_command(rsp);
- spin_lock(&queue->rsp_wr_wait_lock);
+ spin_lock_bh(&queue->rsp_wr_wait_lock);
if (!ret) {
list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
break;
}
}
- spin_unlock(&queue->rsp_wr_wait_lock);
+ spin_unlock_bh(&queue->rsp_wr_wait_lock);
}
static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
@@ -994,9 +994,9 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
goto out_err;
if (unlikely(!nvmet_rdma_execute_command(cmd))) {
- spin_lock(&queue->rsp_wr_wait_lock);
+ spin_lock_bh(&queue->rsp_wr_wait_lock);
list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
- spin_unlock(&queue->rsp_wr_wait_lock);
+ spin_unlock_bh(&queue->rsp_wr_wait_lock);
}
return;
--
2.17.1