2009-01-06 06:26:28

by Divyesh Shah

[permalink] [raw]
Subject: [PATCH] Allow RT requests to pre-empt ongoing BE timeslice in CFQ.

This patch adds the ability to pre-empt an ongoing BE timeslice when a RT
request is waiting for the current timeslice to complete. This reduces the
wait time to disk for RT requests from an upper bound of 4 (current value
of cfq_quantum) to 1 disk request.

Signed-off-by: Divyesh Shah <[email protected]>
---
Patch based on 2.6.28 linus tree.

Test Results:
Latency(secs) for the RT task when doing sequential reads from 10G file.
only RT | RT + BE | RT + BE + this patch
small (512 byte) reads | 143 | 163 | 146
large (1Mb) reads | 142 | 158 | 146

block/cfq-iosched.c | 38 ++++++++++++++++++++++++++++++++++++--
1 files changed, 36 insertions(+), 2 deletions(-)

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6a062ee..d0ded09 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -54,6 +54,7 @@ static DEFINE_SPINLOCK(ioc_gone_lock);

#define CFQ_PRIO_LISTS IOPRIO_BE_NR
#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
+#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)

#define ASYNC (0)
@@ -992,13 +993,29 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
*/
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
{
- struct cfq_queue *cfqq;
+ struct cfq_queue *cfqq, *first_cfqq;

cfqq = cfqd->active_queue;
if (!cfqq)
goto new_queue;

/*
+ * If we have a RT cfqq waiting, then we pre-empt the current BE cfqq
+ * provided its not already used up its timeslice.
+ */
+ first_cfqq = cfq_rb_first(&cfqd->service_tree);
+ if (first_cfqq && cfq_class_rt(first_cfqq) &&
+ cfq_class_be(cfqq) && !cfq_slice_used(cfqq)) {
+ /*
+ * We simulate this as cfqq timed out so that it gets to bank
+ * the remaining of its time slice.
+ */
+ cfq_log_cfqq(cfqd, cfqq, "preempt");
+ cfq_slice_expired(cfqd, 1);
+ goto new_queue;
+ }
+
+ /*
* The active queue has run out of time, expire it and select new.
*/
if (cfq_slice_used(cfqq))
@@ -1044,6 +1061,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,

do {
struct request *rq;
+ struct cfqq *first_cfqq;

/*
* follow expired path, else get first next available
@@ -1067,6 +1085,15 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (RB_EMPTY_ROOT(&cfqq->sort_list))
break;

+ /*
+ * If there is a non-empty RT cfqq waiting for current
+ * cfqq's timeslice to complete, pre-empt this cfqq
+ */
+ first_cfqq = cfq_rb_first(&cfqd->service_tree);
+ if (cfq_class_be(cfqq) && first_cfqq &&
+ cfq_class_rt(first_cfqq))
+ break;
+
} while (dispatched < max_dispatch);

/*
@@ -1797,6 +1824,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
if (rq_is_meta(rq) && !cfqq->meta_pending)
return 1;

+ /*
+ * Allow an RT request to pre-empt an ongoing BE cfqq timeslice.
+ */
+ if (cfq_class_rt(new_cfqq) && cfq_class_be(cfqq))
+ return 1;
+
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
return 0;

@@ -1866,7 +1899,8 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
/*
* not the active queue - expire current slice if it is
* idle and has expired it's mean thinktime or this new queue
- * has some old slice time left and is of higher priority
+ * has some old slice time left and is of higher priority or
+ * this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
cfq_mark_cfqq_must_dispatch(cfqq);


2009-01-06 09:00:55

by Jens Axboe

[permalink] [raw]
Subject: Re: [PATCH] Allow RT requests to pre-empt ongoing BE timeslice in CFQ.

On Mon, Jan 05 2009, Divyesh Shah wrote:
> This patch adds the ability to pre-empt an ongoing BE timeslice when a RT
> request is waiting for the current timeslice to complete. This reduces the
> wait time to disk for RT requests from an upper bound of 4 (current value
> of cfq_quantum) to 1 disk request.
>
> Signed-off-by: Divyesh Shah <[email protected]>
> ---
> Patch based on 2.6.28 linus tree.
>
> Test Results:
> Latency(secs) for the RT task when doing sequential reads from 10G file.
> only RT | RT + BE | RT + BE + this patch
> small (512 byte) reads | 143 | 163 | 146
> large (1Mb) reads | 142 | 158 | 146

Results look good. I'm not too keen on doing the rb lookup, how about we
just track pending rt queues instead? Add a counter to cfqd
(busy_rq_queues) and inc/dec that along with busy_queues, then check
that in the two locations? If that value is non-zero, an rt queue should
be returned with cfq_rb_first() as well.

Also, the places that test cfq_class_be() seem more appropriate as
!cfq_class_rt() checks, as that also covers idle queues.

How does the below look to you? It's untested here, but it compiles. I'd
appreciate your comments and testing!

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e8525fa..8640199 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -84,6 +84,7 @@ struct cfq_data {
*/
struct cfq_rb_root service_tree;
unsigned int busy_queues;
+ unsigned int busy_rt_queues;

int rq_in_driver;
int sync_flight;
@@ -562,6 +563,8 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;
+ if (cfq_class_rt(cfqq))
+ cfqd->busy_rt_queues++;

cfq_resort_rr_list(cfqd, cfqq);
}
@@ -581,6 +584,8 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)

BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
+ if (cfq_class_rt(cfqq))
+ cfqd->busy_rt_queues--;
}

/*
@@ -1005,6 +1010,20 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
goto expire;

/*
+ * If we have RT queues waiting, then we preempt the current non-rt
+ * cfqq.
+ */
+ if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
+ /*
+ * We simulate this as cfqq timed out so that it gets to bank
+ * the remaining of its time slice.
+ */
+ cfq_log_cfqq(cfqd, cfqq, "preempt from RT");
+ cfq_slice_expired(cfqd, 1);
+ goto new_queue;
+ }
+
+ /*
* The active queue has requests and isn't expired, allow it to
* dispatch.
*/
@@ -1067,6 +1086,12 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (RB_EMPTY_ROOT(&cfqq->sort_list))
break;

+ /*
+ * If we have pending rt queues, stop dispatching
+ */
+ if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues)
+ break;
+
} while (dispatched < max_dispatch);

/*
@@ -1801,6 +1826,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
if (rq_is_meta(rq) && !cfqq->meta_pending)
return 1;

+ /*
+ * Allow an RT request to pre-empt an ongoing BE cfqq timeslice.
+ */
+ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
+ return 1;
+
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
return 0;


--
Jens Axboe