We use different service trees for different priority classes.
This allows a simplification in the service tree insertion code, that no
longer has to consider priority while walking the tree.
Signed-off-by: Corrado Zoccolo <[email protected]>
---
block/cfq-iosched.c | 114 +++++++++++++++++++++++++++++++++++---------------
1 files changed, 80 insertions(+), 34 deletions(-)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index c59609c..23de38e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -122,15 +122,31 @@ struct cfq_queue {
};
/*
+ * Index in the service_trees.
+ * IDLE is handled separately, so it has negative index
+ */
+enum wl_prio_t {
+ IDLE_WORKLOAD = -1,
+ BE_WORKLOAD = 0,
+ RT_WORKLOAD = 1
+};
+
+/*
* Per block device queue structure
*/
struct cfq_data {
struct request_queue *queue;
/*
- * rr list of queues with requests and the count of them
+ * rr lists of queues with requests, onle rr for each priority class.
+ * Counts are embedded in the cfq_rb_root
+ */
+ struct cfq_rb_root service_trees[2];
+ struct cfq_rb_root service_tree_idle;
+ /*
+ * The priority currently being served
*/
- struct cfq_rb_root service_tree;
+ enum wl_prio_t serving_prio;
/*
* Each priority tree is sorted by next_request position. These
@@ -140,7 +156,6 @@ struct cfq_data {
struct rb_root prio_trees[CFQ_PRIO_LISTS];
unsigned int busy_queues;
- unsigned int busy_rt_queues;
unsigned int busy_queues_avg[2];
int rq_in_driver[2];
@@ -193,6 +208,15 @@ struct cfq_data {
unsigned long last_end_sync_rq;
};
+static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio,
+ struct cfq_data *cfqd)
+{
+ if (prio == IDLE_WORKLOAD)
+ return &cfqd->service_tree_idle;
+
+ return &cfqd->service_trees[prio];
+}
+
enum cfqq_state_flags {
CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
@@ -237,6 +261,23 @@ CFQ_CFQQ_FNS(coop);
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
+static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
+{
+ if (cfq_class_idle(cfqq))
+ return IDLE_WORKLOAD;
+ if (cfq_class_rt(cfqq))
+ return RT_WORKLOAD;
+ return BE_WORKLOAD;
+}
+
+static inline int cfq_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd)
+{
+ if (wl == IDLE_WORKLOAD)
+ return cfqd->service_tree_idle.count;
+
+ return cfqd->service_trees[wl].count;
+}
+
static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
struct io_context *, gfp_t);
@@ -321,10 +362,7 @@ cfq_get_avg_queues(struct cfq_data *cfqd, bool rt) {
unsigned min_q, max_q;
unsigned mult = cfq_hist_divisor - 1;
unsigned round = cfq_hist_divisor / 2;
- unsigned busy = cfqd->busy_rt_queues;
-
- if (!rt)
- busy = cfqd->busy_queues - cfqd->busy_rt_queues;
+ unsigned busy = cfq_busy_queues_wl(rt, cfqd);
min_q = min(cfqd->busy_queues_avg[rt], busy);
max_q = max(cfqd->busy_queues_avg[rt], busy);
@@ -528,7 +566,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
}
/*
- * The cfqd->service_tree holds all pending cfq_queue's that have
+ * The cfqd->service_trees holds all pending cfq_queue's that have
* requests waiting to be processed. It is sorted in the order that
* we will service the queues.
*/
@@ -538,9 +576,10 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct rb_node **p, *parent;
struct cfq_queue *__cfqq;
unsigned long rb_key;
- struct cfq_rb_root *service_tree = &cfqd->service_tree;
+ struct cfq_rb_root *service_tree;
int left;
+ service_tree = service_tree_for(cfqq_prio(cfqq), cfqd);
if (cfq_class_idle(cfqq)) {
rb_key = CFQ_IDLE_DELAY;
parent = rb_last(&service_tree->rb);
@@ -569,7 +608,8 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
/*
* same position, nothing more to do
*/
- if (rb_key == cfqq->rb_key)
+ if (rb_key == cfqq->rb_key &&
+ cfqq->service_tree == service_tree)
return;
cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
@@ -587,25 +627,14 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
/*
- * sort RT queues first, we always want to give
- * preference to them. IDLE queues goes to the back.
- * after that, sort on the next service time.
+ * sort by key, that represents service time.
*/
- if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
- n = &(*p)->rb_left;
- else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
- n = &(*p)->rb_right;
- else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
- n = &(*p)->rb_left;
- else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
- n = &(*p)->rb_right;
- else if (time_before(rb_key, __cfqq->rb_key))
+ if (time_before(rb_key, __cfqq->rb_key))
n = &(*p)->rb_left;
- else
+ else {
n = &(*p)->rb_right;
-
- if (n == &(*p)->rb_right)
left = 0;
+ }
p = n;
}
@@ -704,8 +733,7 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfqd->busy_queues++;
- if (cfq_class_rt(cfqq))
- cfqd->busy_rt_queues++;
+
cfq_resort_rr_list(cfqd, cfqq);
}
@@ -730,8 +758,6 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
- if (cfq_class_rt(cfqq))
- cfqd->busy_rt_queues--;
}
/*
@@ -985,10 +1011,12 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
*/
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
{
- if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
- return NULL;
+ struct cfq_rb_root *service_tree =
+ service_tree_for(cfqd->serving_prio, cfqd);
- return cfq_rb_first(&cfqd->service_tree);
+ if (RB_EMPTY_ROOT(&service_tree->rb))
+ return NULL;
+ return cfq_rb_first(service_tree);
}
/*
@@ -1106,6 +1134,10 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
if (cfq_cfqq_coop(cfqq))
return NULL;
+ /* we don't want to mix processes with different characteristics */
+ if (cfqq->service_tree != cur_cfqq->service_tree)
+ return NULL;
+
if (!probe)
cfq_mark_cfqq_coop(cfqq);
return cfqq;
@@ -1272,6 +1304,14 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
expire:
cfq_slice_expired(cfqd, 0);
new_queue:
+ if (!new_cfqq) {
+ if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd))
+ cfqd->serving_prio = RT_WORKLOAD;
+ else if (cfq_busy_queues_wl(BE_WORKLOAD, cfqd))
+ cfqd->serving_prio = BE_WORKLOAD;
+ else
+ cfqd->serving_prio = IDLE_WORKLOAD;
+ }
cfqq = cfq_set_active_queue(cfqd, new_cfqq);
keep_queue:
return cfqq;
@@ -1298,8 +1338,12 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
{
struct cfq_queue *cfqq;
int dispatched = 0;
+ int i;
+ for (i = 0; i < 2; ++i)
+ while ((cfqq = cfq_rb_first(&cfqd->service_trees[i])) != NULL)
+ dispatched += __cfq_forced_dispatch_cfqq(cfqq);
- while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
+ while ((cfqq = cfq_rb_first(&cfqd->service_tree_idle)) != NULL)
dispatched += __cfq_forced_dispatch_cfqq(cfqq);
cfq_slice_expired(cfqd, 0);
@@ -2546,7 +2590,9 @@ static void *cfq_init_queue(struct request_queue *q)
if (!cfqd)
return NULL;
- cfqd->service_tree = CFQ_RB_ROOT;
+ for (i = 0; i < 2; ++i)
+ cfqd->service_trees[i] = CFQ_RB_ROOT;
+ cfqd->service_tree_idle = CFQ_RB_ROOT;
/*
* Not strictly needed (since RB_ROOT just clears the node and we
--
1.6.2.5
Corrado Zoccolo <[email protected]> writes:
> We use different service trees for different priority classes.
> This allows a simplification in the service tree insertion code, that no
> longer has to consider priority while walking the tree.
This is kind of funny, considering things used to be divied up into
lists by class and priority.
> + * Index in the service_trees.
> + * IDLE is handled separately, so it has negative index
> + */
> +enum wl_prio_t {
> + IDLE_WORKLOAD = -1,
> + BE_WORKLOAD = 0,
> + RT_WORKLOAD = 1
> +};
What's wrong with IOPRIO_CLASS_(RT|BE|IDLE)? Why invent another enum?
> +
> +/*
> * Per block device queue structure
> */
> struct cfq_data {
[...]
> + struct cfq_rb_root service_trees[2];
> + struct cfq_rb_root service_tree_idle;
Why separate out the idle service tree from the others?
> +static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio,
> + struct cfq_data *cfqd)
> +{
> + if (prio == IDLE_WORKLOAD)
> + return &cfqd->service_tree_idle;
> +
> + return &cfqd->service_trees[prio];
> +}
This should just turn into cfqd->service_trees[IOPRIO_CLASS_*] in the
callers.
[...]
> /*
> @@ -1106,6 +1134,10 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
> if (cfq_cfqq_coop(cfqq))
> return NULL;
>
> + /* we don't want to mix processes with different characteristics */
> + if (cfqq->service_tree != cur_cfqq->service_tree)
> + return NULL;
> +
Hmm, that looks like a current bug in the close cooperator code. It
shouldn't allow cooperation between differring scheduling classes.
Cheers,
Jeff
On Wed, Oct 21, 2009 at 5:54 PM, Jeff Moyer <[email protected]> wrote:
> Corrado Zoccolo <[email protected]> writes:
>
>> We use different service trees for different priority classes.
>> This allows a simplification in the service tree insertion code, that no
>> longer has to consider priority while walking the tree.
>
> This is kind of funny, considering things used to be divied up into
> lists by class and priority.
>
>> + * Index in the service_trees.
>> + * IDLE is handled separately, so it has negative index
>> + */
>> +enum wl_prio_t {
>> + IDLE_WORKLOAD = -1,
>> + BE_WORKLOAD = 0,
>> + RT_WORKLOAD = 1
>> +};
>
> What's wrong with IOPRIO_CLASS_(RT|BE|IDLE)? Why invent another enum?
Because I want to index inside my internal structures, and I have no
control over the former ones.
>
>> +
>> +/*
>> * Per block device queue structure
>> */
>> struct cfq_data {
> [...]
>> + struct cfq_rb_root service_trees[2];
>> + struct cfq_rb_root service_tree_idle;
>
> Why separate out the idle service tree from the others?
>
In a subsequent patch, I will transform the first [2] in a [2][3],
while the idle tree will remain unchanged.
>> +static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio,
>> + struct cfq_data *cfqd)
>> +{
>> + if (prio == IDLE_WORKLOAD)
>> + return &cfqd->service_tree_idle;
>> +
>> + return &cfqd->service_trees[prio];
>> +}
>
> This should just turn into cfqd->service_trees[IOPRIO_CLASS_*] in the
> callers.
I need the special treatment for idle in next patches, so I had chosen
the different approach.
>
> [...]
>
>> /*
>> @@ -1106,6 +1134,10 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
>> if (cfq_cfqq_coop(cfqq))
>> return NULL;
>>
>> + /* we don't want to mix processes with different characteristics */
>> + if (cfqq->service_tree != cur_cfqq->service_tree)
>> + return NULL;
>> +
>
> Hmm, that looks like a current bug in the close cooperator code. It
> shouldn't allow cooperation between differring scheduling classes.
Maybe. Anyway, this check for me has more impact, when the service
trees are subsequently splitted in SYNC, SYNC_NOIDLE, and ASYNC. I
don't want close cooperation between them as well.
Corrado
>
> Cheers,
> Jeff
>
--
__________________________________________________________________________
dott. Corrado Zoccolo mailto:[email protected]
PhD - Department of Computer Science - University of Pisa, Italy
--------------------------------------------------------------------------
The self-confidence of a warrior is not the self-confidence of the average
man. The average man seeks certainty in the eyes of the onlooker and calls
that self-confidence. The warrior seeks impeccability in his own eyes and
calls that humbleness.
Tales of Power - C. Castaneda
Corrado Zoccolo <[email protected]> writes:
> On Wed, Oct 21, 2009 at 5:54 PM, Jeff Moyer <[email protected]> wrote:
>> Corrado Zoccolo <[email protected]> writes:
>>> + * Index in the service_trees.
>>> + * IDLE is handled separately, so it has negative index
>>> + */
>>> +enum wl_prio_t {
>>> + IDLE_WORKLOAD = -1,
>>> + BE_WORKLOAD = 0,
>>> + RT_WORKLOAD = 1
>>> +};
>>
>> What's wrong with IOPRIO_CLASS_(RT|BE|IDLE)? Why invent another enum?
> Because I want to index inside my internal structures, and I have no
> control over the former ones.
Well, I already know and understand IOPRIO*, and it seems like it maps
exactly to what you're doing. I'll leave it up to Jens, though, this is
a minor detail.
Cheers,
Jeff