We embed a pointer to the service tree in each queue, to handle multiple
service trees easily.
Service trees are enriched with a counter.
cfq_add_rq_rb is invoked after putting the rq in the fifo, to ensure
that all fields in rq are properly initialized.
Signed-off-by: Corrado Zoccolo <[email protected]>
---
block/cfq-iosched.c | 33 ++++++++++++++++++++++-----------
1 files changed, 22 insertions(+), 11 deletions(-)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 25a3f46..c59609c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -69,8 +69,9 @@ static DEFINE_SPINLOCK(ioc_gone_lock);
struct cfq_rb_root {
struct rb_root rb;
struct rb_node *left;
+ unsigned count;
};
-#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
+#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, }
/*
* Per process-grouping structure
@@ -115,6 +116,9 @@ struct cfq_queue {
unsigned short ioprio_class, org_ioprio_class;
pid_t pid;
+
+ struct cfq_rb_root *service_tree;
+ struct cfq_io_context *cic;
};
/*
@@ -254,6 +258,7 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
struct cfq_queue *cfqq, bool is_sync)
{
cic->cfqq[is_sync] = cfqq;
+ cfqq->cic = cic;
}
/*
@@ -482,6 +487,7 @@ static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
if (root->left == n)
root->left = NULL;
rb_erase_init(n, &root->rb);
+ --root->count;
}
/*
@@ -532,11 +538,12 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct rb_node **p, *parent;
struct cfq_queue *__cfqq;
unsigned long rb_key;
+ struct cfq_rb_root *service_tree = &cfqd->service_tree;
int left;
if (cfq_class_idle(cfqq)) {
rb_key = CFQ_IDLE_DELAY;
- parent = rb_last(&cfqd->service_tree.rb);
+ parent = rb_last(&service_tree->rb);
if (parent && parent != &cfqq->rb_node) {
__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
rb_key += __cfqq->rb_key;
@@ -554,7 +561,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqq->slice_resid = 0;
} else {
rb_key = -HZ;
- __cfqq = cfq_rb_first(&cfqd->service_tree);
+ __cfqq = cfq_rb_first(service_tree);
rb_key += __cfqq ? __cfqq->rb_key : jiffies;
}
@@ -565,12 +572,14 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (rb_key == cfqq->rb_key)
return;
- cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
+ cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
+ cfqq->service_tree = NULL;
}
left = 1;
parent = NULL;
- p = &cfqd->service_tree.rb.rb_node;
+ cfqq->service_tree = service_tree;
+ p = &service_tree->rb.rb_node;
while (*p) {
struct rb_node **n;
@@ -602,11 +611,12 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
if (left)
- cfqd->service_tree.left = &cfqq->rb_node;
+ service_tree->left = &cfqq->rb_node;
cfqq->rb_key = rb_key;
rb_link_node(&cfqq->rb_node, parent, p);
- rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
+ rb_insert_color(&cfqq->rb_node, &service_tree->rb);
+ service_tree->count++;
}
static struct cfq_queue *
@@ -709,8 +719,10 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(!cfq_cfqq_on_rr(cfqq));
cfq_clear_cfqq_on_rr(cfqq);
- if (!RB_EMPTY_NODE(&cfqq->rb_node))
- cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
+ if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
+ cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
+ cfqq->service_tree = NULL;
+ }
if (cfqq->p_root) {
rb_erase(&cfqq->p_node, cfqq->p_root);
cfqq->p_root = NULL;
@@ -2194,10 +2206,9 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, cfqq, "insert_request");
cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
- cfq_add_rq_rb(rq);
-
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
+ cfq_add_rq_rb(rq);
cfq_rq_enqueued(cfqd, cfqq, rq);
}
--
1.6.2.5
Corrado Zoccolo <[email protected]> writes:
> We embed a pointer to the service tree in each queue, to handle multiple
> service trees easily.
> Service trees are enriched with a counter.
> cfq_add_rq_rb is invoked after putting the rq in the fifo, to ensure
> that all fields in rq are properly initialized.
>
> @@ -115,6 +116,9 @@ struct cfq_queue {
> unsigned short ioprio_class, org_ioprio_class;
>
> pid_t pid;
> +
> + struct cfq_rb_root *service_tree;
> + struct cfq_io_context *cic;
> };
This change worries me. For async queues, there is not a 1:1 mapping
between cfqq and cic. With my forthcoming close cooperator changes, the
same will be true for sync queues.
Cheers,
Jeff
Hi Jeff,
On Wed, Oct 21, 2009 at 5:31 PM, Jeff Moyer <[email protected]> wrote:
> Corrado Zoccolo <[email protected]> writes:
>
>> We embed a pointer to the service tree in each queue, to handle multiple
>> service trees easily.
>> Service trees are enriched with a counter.
>> cfq_add_rq_rb is invoked after putting the rq in the fifo, to ensure
>> that all fields in rq are properly initialized.
>>
>> @@ -115,6 +116,9 @@ struct cfq_queue {
>> unsigned short ioprio_class, org_ioprio_class;
>>
>> pid_t pid;
>> +
>> + struct cfq_rb_root *service_tree;
>> + struct cfq_io_context *cic;
>> };
>
> This change worries me. For async queues, there is not a 1:1 mapping
> between cfqq and cic.
Currently, I need it only for sync queues. And it is used only to
determine if a queue is seeky, so if we can mark it in the queue
instead of the cic, this will not be needed any more.
> With my forthcoming close cooperator changes, the
> same will be true for sync queues.
Yes, I saw it. This is currently just an RFC. If your changes got
merged before, I'll devise an alternative solution to this problem.
Corrado
> Cheers,
> Jeff
>
--
__________________________________________________________________________
dott. Corrado Zoccolo mailto:[email protected]
PhD - Department of Computer Science - University of Pisa, Italy
--------------------------------------------------------------------------
The self-confidence of a warrior is not the self-confidence of the average
man. The average man seeks certainty in the eyes of the onlooker and calls
that self-confidence. The warrior seeks impeccability in his own eyes and
calls that humbleness.
Tales of Power - C. Castaneda
Corrado Zoccolo <[email protected]> writes:
> Hi Jeff,
> On Wed, Oct 21, 2009 at 5:31 PM, Jeff Moyer <[email protected]> wrote:
>> Corrado Zoccolo <[email protected]> writes:
>>
>>> We embed a pointer to the service tree in each queue, to handle multiple
>>> service trees easily.
>>> Service trees are enriched with a counter.
>>> cfq_add_rq_rb is invoked after putting the rq in the fifo, to ensure
>>> that all fields in rq are properly initialized.
>>>
>>> @@ -115,6 +116,9 @@ struct cfq_queue {
>>> unsigned short ioprio_class, org_ioprio_class;
>>>
>>> pid_t pid;
>>> +
>>> + struct cfq_rb_root *service_tree;
>>> + struct cfq_io_context *cic;
>>> };
>>
>> This change worries me. For async queues, there is not a 1:1 mapping
>> between cfqq and cic.
>
> Currently, I need it only for sync queues. And it is used only to
> determine if a queue is seeky, so if we can mark it in the queue
> instead of the cic, this will not be needed any more.
Ah ha! I have the same requirement for breaking the queues apart, so I
think this won't be an issue.
Thanks!
Jeff