Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752808AbbD3RsC (ORCPT ); Thu, 30 Apr 2015 13:48:02 -0400 Received: from mx0a-00082601.pphosted.com ([67.231.145.42]:32887 "EHLO mx0a-00082601.pphosted.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1750878AbbD3RpW (ORCPT ); Thu, 30 Apr 2015 13:45:22 -0400 From: Shaohua Li To: CC: , , , Subject: [PATCH 5/5] blk-mq: make plug work for mutiple disks and queues Date: Thu, 30 Apr 2015 10:45:18 -0700 Message-ID: <9cb12b99feb2886a21084bf3f93fab773c93c465.1430414610.git.shli@fb.com> X-Mailer: git-send-email 1.8.1 In-Reply-To: References: X-FB-Internal: Safe MIME-Version: 1.0 Content-Type: text/plain X-Proofpoint-Spam-Reason: safe X-FB-Internal: Safe X-Proofpoint-Virus-Version: vendor=fsecure engine=2.50.10432:5.13.68,1.0.33,0.0.0000 definitions=2015-04-30_05:2015-04-29,2015-04-30,1970-01-01 signatures=0 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4614 Lines: 130 Last patch makes plug work for multiple queue case. However it only works for single disk case, because it assumes only one request in the plug list. If a task is accessing multiple disks, eg MD/DM, the assumption is wrong. Let blk_attempt_plug_merge() record request from the same queue. Cc: Jens Axboe Cc: Christoph Hellwig Signed-off-by: Shaohua Li --- block/blk-core.c | 10 +++++++--- block/blk-mq.c | 11 ++++++----- block/blk.h | 3 ++- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index d51ed61..a5e1574 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1521,7 +1521,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, * Caller must ensure !blk_queue_nomerges(q) beforehand. */ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, - unsigned int *request_count) + unsigned int *request_count, + struct request **same_queue_rq) { struct blk_plug *plug; struct request *rq; @@ -1541,8 +1542,10 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, list_for_each_entry_reverse(rq, plug_list, queuelist) { int el_ret; - if (rq->q == q) + if (rq->q == q) { (*request_count)++; + *same_queue_rq = rq; + } if (rq->q != q || !blk_rq_merge_ok(rq, bio)) continue; @@ -1583,6 +1586,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT; struct request *req; unsigned int request_count = 0; + struct request *same_queue_rq; /* * low level driver can indicate that it wants pages above a @@ -1607,7 +1611,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) * any locks. */ if (!blk_queue_nomerges(q) && - blk_attempt_plug_merge(q, bio, &request_count)) + blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) return; spin_lock_irq(q->queue_lock); diff --git a/block/blk-mq.c b/block/blk-mq.c index 5545828..c45739c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1269,6 +1269,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) struct request *rq; unsigned int request_count = 0; struct blk_plug *plug; + struct request *same_queue_rq = NULL; blk_queue_bounce(q, &bio); @@ -1278,7 +1279,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) } if (likely(!is_flush_fua) && !blk_queue_nomerges(q) && - blk_attempt_plug_merge(q, bio, &request_count)) + blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) return; rq = blk_mq_map_request(q, bio, &data); @@ -1308,9 +1309,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) * issued. So the plug list will have one request at most */ if (plug) { - if (!list_empty(&plug->mq_list)) { - old_rq = list_first_entry(&plug->mq_list, - struct request, queuelist); + if (same_queue_rq) { + old_rq = same_queue_rq; list_del_init(&old_rq->queuelist); } list_add_tail(&rq->queuelist, &plug->mq_list); @@ -1350,6 +1350,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio) unsigned int request_count = 0; struct blk_map_ctx data; struct request *rq; + struct request *same_queue_rq; blk_queue_bounce(q, &bio); @@ -1359,7 +1360,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio) } if (likely(!is_flush_fua) && !blk_queue_nomerges(q) && - blk_attempt_plug_merge(q, bio, &request_count)) + blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) return; rq = blk_mq_map_request(q, bio, &data); diff --git a/block/blk.h b/block/blk.h index 43b0361..aa8633c 100644 --- a/block/blk.h +++ b/block/blk.h @@ -78,7 +78,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req, bool bio_attempt_back_merge(struct request_queue *q, struct request *req, struct bio *bio); bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, - unsigned int *request_count); + unsigned int *request_count, + struct request **same_queue_rq); void blk_account_io_start(struct request *req, bool new_io); void blk_account_io_completion(struct request *req, unsigned int bytes); -- 1.8.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/