Simplify the calculation in cfq_prio_to_maxrq(), plus replace CFQ_PRIO_LISTS to
IOPRIO_BE_NR since they are the same and IOPRIO_BE_NR looks more reasonable in
this context IMHO.
Signed-off-by: Namhyung Kim <[email protected]>
---
block/cfq-iosched.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index ab7a9e6a9b1c..151a050e692c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2029,7 +2029,7 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
- return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
+ return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
}
/*
--
1.7.5.2
Reduce the number of bit operations in cfq_choose_req() on average
(and worst) cases.
Signed-off-by: Namhyung Kim <[email protected]>
---
block/cfq-iosched.c | 14 +++++---------
1 files changed, 5 insertions(+), 9 deletions(-)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 151a050e692c..a79e62063144 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -665,15 +665,11 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
if (rq2 == NULL)
return rq1;
- if (rq_is_sync(rq1) && !rq_is_sync(rq2))
- return rq1;
- else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
- return rq2;
- if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
- return rq1;
- else if ((rq2->cmd_flags & REQ_META) &&
- !(rq1->cmd_flags & REQ_META))
- return rq2;
+ if (rq_is_sync(rq1) != rq_is_sync(rq2))
+ return rq_is_sync(rq1) ? rq1 : rq2;
+
+ if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META)
+ return rq1->cmd_flags & REQ_META ? rq1 : rq2;
s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2);
--
1.7.5.2
The 'group_changed' variable is initialized to 0 and never changed, so
checking the variable is meaningless.
It is a leftover from 0bbfeb832042 ("cfq-iosched: Always provide group
iosolation."). Let's get rid of it.
Signed-off-by: Namhyung Kim <[email protected]>
Cc: Justin TerAvest <[email protected]>
---
block/cfq-iosched.c | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a79e62063144..4ceebd346710 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1206,7 +1206,6 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_rb_root *service_tree;
int left;
int new_cfqq = 1;
- int group_changed = 0;
service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
cfqq_type(cfqq));
@@ -1277,7 +1276,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
rb_link_node(&cfqq->rb_node, parent, p);
rb_insert_color(&cfqq->rb_node, &service_tree->rb);
service_tree->count++;
- if ((add_front || !new_cfqq) && !group_changed)
+ if (add_front || !new_cfqq)
return;
cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
}
--
1.7.5.2
When struct cfq_data allocation fails, cic_index need to be freed.
Signed-off-by: Namhyung Kim <[email protected]>
---
block/cfq-iosched.c | 6 +++++-
1 files changed, 5 insertions(+), 1 deletions(-)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4ceebd346710..7fe732a274b2 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3881,8 +3881,12 @@ static void *cfq_init_queue(struct request_queue *q)
return NULL;
cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
- if (!cfqd)
+ if (!cfqd) {
+ spin_lock(&cic_index_lock);
+ ida_remove(&cic_index_ida, i);
+ spin_unlock(&cic_index_lock);
return NULL;
+ }
/*
* Don't need take queue_lock in the routine, since we are
--
1.7.5.2
On 2011-05-24 05:18, Namhyung Kim wrote:
> Simplify the calculation in cfq_prio_to_maxrq(), plus replace CFQ_PRIO_LISTS to
> IOPRIO_BE_NR since they are the same and IOPRIO_BE_NR looks more reasonable in
> this context IMHO.
Double checked your math, it is indeed identical to
2*base_rq*IOPRIO_BE_NR - 2*base_rq*cfqq->ioprio. Looks a lot cleaner,
thanks.
--
Jens Axboe
Looks good to me.
Reviewed-by: Justin TerAvest <[email protected]>
On Mon, May 23, 2011 at 8:18 PM, Namhyung Kim <[email protected]> wrote:
> The 'group_changed' variable is initialized to 0 and never changed, so
> checking the variable is meaningless.
>
> It is a leftover from 0bbfeb832042 ("cfq-iosched: Always provide group
> iosolation."). Let's get rid of it.
>
> Signed-off-by: Namhyung Kim <[email protected]>
> Cc: Justin TerAvest <[email protected]>
> ---
> ?block/cfq-iosched.c | ? ?3 +--
> ?1 files changed, 1 insertions(+), 2 deletions(-)
>
> diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
> index a79e62063144..4ceebd346710 100644
> --- a/block/cfq-iosched.c
> +++ b/block/cfq-iosched.c
> @@ -1206,7 +1206,6 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
> ? ? ? ?struct cfq_rb_root *service_tree;
> ? ? ? ?int left;
> ? ? ? ?int new_cfqq = 1;
> - ? ? ? int group_changed = 0;
>
> ? ? ? ?service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
> ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?cfqq_type(cfqq));
> @@ -1277,7 +1276,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
> ? ? ? ?rb_link_node(&cfqq->rb_node, parent, p);
> ? ? ? ?rb_insert_color(&cfqq->rb_node, &service_tree->rb);
> ? ? ? ?service_tree->count++;
> - ? ? ? if ((add_front || !new_cfqq) && !group_changed)
> + ? ? ? if (add_front || !new_cfqq)
> ? ? ? ? ? ? ? ?return;
> ? ? ? ?cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
> ?}
> --
> 1.7.5.2
>
>
Why make this change? Are we that sensitive to the number of operations?
It makes the code a bit harder to read, I think.
On Mon, May 23, 2011 at 8:18 PM, Namhyung Kim <[email protected]> wrote:
> Reduce the number of bit operations in cfq_choose_req() on average
> (and worst) cases.
>
> Signed-off-by: Namhyung Kim <[email protected]>
> ---
> ?block/cfq-iosched.c | ? 14 +++++---------
> ?1 files changed, 5 insertions(+), 9 deletions(-)
>
> diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
> index 151a050e692c..a79e62063144 100644
> --- a/block/cfq-iosched.c
> +++ b/block/cfq-iosched.c
> @@ -665,15 +665,11 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
> ? ? ? ?if (rq2 == NULL)
> ? ? ? ? ? ? ? ?return rq1;
>
> - ? ? ? if (rq_is_sync(rq1) && !rq_is_sync(rq2))
> - ? ? ? ? ? ? ? return rq1;
> - ? ? ? else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
> - ? ? ? ? ? ? ? return rq2;
> - ? ? ? if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
> - ? ? ? ? ? ? ? return rq1;
> - ? ? ? else if ((rq2->cmd_flags & REQ_META) &&
> - ? ? ? ? ? ? ? ?!(rq1->cmd_flags & REQ_META))
> - ? ? ? ? ? ? ? return rq2;
> + ? ? ? if (rq_is_sync(rq1) != rq_is_sync(rq2))
> + ? ? ? ? ? ? ? return rq_is_sync(rq1) ? rq1 : rq2;
> +
> + ? ? ? if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_META)
> + ? ? ? ? ? ? ? return rq1->cmd_flags & REQ_META ? rq1 : rq2;
>
> ? ? ? ?s1 = blk_rq_pos(rq1);
> ? ? ? ?s2 = blk_rq_pos(rq2);
> --
> 1.7.5.2
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at ?http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at ?http://www.tux.org/lkml/
>
2011-05-24 (화), 10:34 -0700, Justin TerAvest:
> Why make this change? Are we that sensitive to the number of operations?
>
> It makes the code a bit harder to read, I think.
>
Hi Justin,
I thought that the function is a sort of hot path operation so it would
be better make it even a bit faster, no? And if you guys think this
patch makes the code harder we can add a comment, although I don't think
it's hard enough. :)
Thanks.
--
Regards,
Namhyung Kim