2007-11-21 01:23:16

by Steven Rostedt

[permalink] [raw]
Subject: [PATCH v4 19/20] Optimize out cpu_clears

This patch removes several cpumask operations by keeping track
of the first of the CPUS that is of the lowest priority. When
the search for the lowest priority runqueue is completed, all
the bits up to the first CPU with the lowest priority runqueue
is cleared.

Signed-off-by: Steven Rostedt <[email protected]>

---
kernel/sched_rt.c | 35 +++++++++++++++++++++++++----------
1 file changed, 25 insertions(+), 10 deletions(-)

Index: linux-compile.git/kernel/sched_rt.c
===================================================================
--- linux-compile.git.orig/kernel/sched_rt.c 2007-11-20 19:53:15.000000000 -0500
+++ linux-compile.git/kernel/sched_rt.c 2007-11-20 19:53:17.000000000 -0500
@@ -293,22 +293,20 @@ static struct task_struct *pick_next_hig
}

static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
-static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask);

static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
{
- int cpu;
- cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask);
int lowest_prio = -1;
+ int lowest_cpu = 0;
int count = 0;
+ int cpu;

- cpus_clear(*lowest_mask);
- cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed);
+ cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed);

/*
* Scan each rq for the lowest prio.
*/
- for_each_cpu_mask(cpu, *valid_mask) {
+ for_each_cpu_mask(cpu, *lowest_mask) {
struct rq *rq = cpu_rq(cpu);

/* We look for lowest RT prio or non-rt CPU */
@@ -325,13 +323,30 @@ static int find_lowest_cpus(struct task_
if (rq->rt.highest_prio > lowest_prio) {
/* new low - clear old data */
lowest_prio = rq->rt.highest_prio;
- if (count) {
- cpus_clear(*lowest_mask);
- count = 0;
- }
+ lowest_cpu = cpu;
+ count = 0;
}
cpu_set(rq->cpu, *lowest_mask);
count++;
+ } else
+ cpu_clear(cpu, *lowest_mask);
+ }
+
+ /*
+ * Clear out all the set bits that represent
+ * runqueues that were of higher prio than
+ * the lowest_prio.
+ */
+ if (lowest_cpu) {
+ /*
+ * Perhaps we could add another cpumask op to
+ * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
+ * Then that could be optimized to use memset and such.
+ */
+ for_each_cpu_mask(cpu, *lowest_mask) {
+ if (cpu >= lowest_cpu)
+ break;
+ cpu_clear(cpu, *lowest_mask);
}
}


--


2007-11-21 02:11:57

by Steven Rostedt

[permalink] [raw]
Subject: Re: [PATCH v4 19/20] Optimize out cpu_clears

On Tue, Nov 20, 2007 at 08:01:13PM -0500, Steven Rostedt wrote:
>
> static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
> -static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask);
>
> static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
> {
> - int cpu;
> - cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask);
> int lowest_prio = -1;
> + int lowest_cpu = 0;
> int count = 0;
> + int cpu;
>
> - cpus_clear(*lowest_mask);
> - cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed);
> + cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed);
>
> /*
> * Scan each rq for the lowest prio.
> */
> - for_each_cpu_mask(cpu, *valid_mask) {
> + for_each_cpu_mask(cpu, *lowest_mask) {
> struct rq *rq = cpu_rq(cpu);
>
> /* We look for lowest RT prio or non-rt CPU */
> @@ -325,13 +323,30 @@ static int find_lowest_cpus(struct task_
> if (rq->rt.highest_prio > lowest_prio) {
> /* new low - clear old data */
> lowest_prio = rq->rt.highest_prio;
> - if (count) {
> - cpus_clear(*lowest_mask);
> - count = 0;
> - }

Gregory Haskins pointed out to me that this logic is slightly wrong. I
originally wrote this patch before adding his "count" patch optimization.
I did not take into account that on finding a non RT queue, we may leave
on some extra bits because the clear_cpus is not preformed if count is
zero. And count gets set to zero here. Which means that we don't clean
up.

The fix is to check for lowest_cpus > 0 instead of count on finding an
non-RT runqueue. This lets us know that we need to clear the mask.
Otherwise, if lowest_cpus == 0, then we can return the mask untouched.
The proper bit would already be set, and the return of 1 will have
the rest of the algorithm use the first bit.

Below is the updated patch. The full series is at:

http://rostedt.homelinux.com/rt/rt-balance-patches-v5.tar.bz2


> + lowest_cpu = cpu;
> + count = 0;
> }
> cpu_set(rq->cpu, *lowest_mask);
> count++;



From: Steven Rostedt <[email protected]>

This patch removes several cpumask operations by keeping track
of the first of the CPUS that is of the lowest priority. When
the search for the lowest priority runqueue is completed, all
the bits up to the first CPU with the lowest priority runqueue
is cleared.

Signed-off-by: Steven Rostedt <[email protected]>

---
kernel/sched_rt.c | 48 ++++++++++++++++++++++++++++++++++++------------
1 file changed, 36 insertions(+), 12 deletions(-)

Index: linux-compile.git/kernel/sched_rt.c
===================================================================
--- linux-compile.git.orig/kernel/sched_rt.c 2007-11-20 19:53:15.000000000 -0500
+++ linux-compile.git/kernel/sched_rt.c 2007-11-20 20:35:04.000000000 -0500
@@ -293,29 +293,36 @@ static struct task_struct *pick_next_hig
}

static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
-static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask);

static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
{
- int cpu;
- cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask);
int lowest_prio = -1;
+ int lowest_cpu = 0;
int count = 0;
+ int cpu;

- cpus_clear(*lowest_mask);
- cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed);
+ cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed);

/*
* Scan each rq for the lowest prio.
*/
- for_each_cpu_mask(cpu, *valid_mask) {
+ for_each_cpu_mask(cpu, *lowest_mask) {
struct rq *rq = cpu_rq(cpu);

/* We look for lowest RT prio or non-rt CPU */
if (rq->rt.highest_prio >= MAX_RT_PRIO) {
- if (count)
+ /*
+ * if we already found a low RT queue
+ * and now we found this non-rt queue
+ * clear the mask and set our bit.
+ * Otherwise just return the queue as is
+ * and the count==1 will cause the algorithm
+ * to use the first bit found.
+ */
+ if (lowest_cpu) {
cpus_clear(*lowest_mask);
- cpu_set(rq->cpu, *lowest_mask);
+ cpu_set(rq->cpu, *lowest_mask);
+ }
return 1;
}

@@ -325,13 +332,30 @@ static int find_lowest_cpus(struct task_
if (rq->rt.highest_prio > lowest_prio) {
/* new low - clear old data */
lowest_prio = rq->rt.highest_prio;
- if (count) {
- cpus_clear(*lowest_mask);
- count = 0;
- }
+ lowest_cpu = cpu;
+ count = 0;
}
cpu_set(rq->cpu, *lowest_mask);
count++;
+ } else
+ cpu_clear(cpu, *lowest_mask);
+ }
+
+ /*
+ * Clear out all the set bits that represent
+ * runqueues that were of higher prio than
+ * the lowest_prio.
+ */
+ if (lowest_cpu) {
+ /*
+ * Perhaps we could add another cpumask op to
+ * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
+ * Then that could be optimized to use memset and such.
+ */
+ for_each_cpu_mask(cpu, *lowest_mask) {
+ if (cpu >= lowest_cpu)
+ break;
+ cpu_clear(cpu, *lowest_mask);
}
}

2007-11-21 03:30:11

by Gregory Haskins

[permalink] [raw]
Subject: [PATCH] Fix optimized search

I spied a few more issues from http://lkml.org/lkml/2007/11/20/590.

Patch is below..

Regards,
-Greg

-----------------

Include cpu 0 in the search, and eliminate the redundant cpu_set since
the bit should already be set in the mask.

Signed-off-by: Gregory Haskins <[email protected]>
---

kernel/sched_rt.c | 7 +++----
1 files changed, 3 insertions(+), 4 deletions(-)

diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 28feeff..fbf4fb1 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -297,7 +297,7 @@ static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
{
int lowest_prio = -1;
- int lowest_cpu = 0;
+ int lowest_cpu = -1;
int count = 0;
int cpu;

@@ -319,7 +319,7 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
* and the count==1 will cause the algorithm
* to use the first bit found.
*/
- if (lowest_cpu) {
+ if (lowest_cpu != -1) {
cpus_clear(*lowest_mask);
cpu_set(rq->cpu, *lowest_mask);
}
@@ -335,7 +335,6 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
lowest_cpu = cpu;
count = 0;
}
- cpu_set(rq->cpu, *lowest_mask);
count++;
} else
cpu_clear(cpu, *lowest_mask);
@@ -346,7 +345,7 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
* runqueues that were of higher prio than
* the lowest_prio.
*/
- if (lowest_cpu) {
+ if (lowest_cpu != -1) {
/*
* Perhaps we could add another cpumask op to
* zero out bits. Like cpu_zero_bits(cpumask, nrbits);

2007-11-21 04:16:19

by Steven Rostedt

[permalink] [raw]
Subject: Re: [PATCH] Fix optimized search

Gregory Haskins wrote:
> I spied a few more issues from http://lkml.org/lkml/2007/11/20/590.
>
> Patch is below..

Thanks, but I have one update...

>
> Regards,
> -Greg
>
> -----------------
>
> Include cpu 0 in the search, and eliminate the redundant cpu_set since
> the bit should already be set in the mask.
>
> Signed-off-by: Gregory Haskins <[email protected]>
> ---
>
> kernel/sched_rt.c | 7 +++----
> 1 files changed, 3 insertions(+), 4 deletions(-)
>
> diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
> index 28feeff..fbf4fb1 100644
> --- a/kernel/sched_rt.c
> +++ b/kernel/sched_rt.c
> @@ -297,7 +297,7 @@ static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
> static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
> {
> int lowest_prio = -1;
> - int lowest_cpu = 0;
> + int lowest_cpu = -1;
> int count = 0;
> int cpu;
>
> @@ -319,7 +319,7 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
> * and the count==1 will cause the algorithm
> * to use the first bit found.
> */
> - if (lowest_cpu) {
> + if (lowest_cpu != -1) {
> cpus_clear(*lowest_mask);
> cpu_set(rq->cpu, *lowest_mask);
> }
> @@ -335,7 +335,6 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
> lowest_cpu = cpu;
> count = 0;
> }
> - cpu_set(rq->cpu, *lowest_mask);
> count++;
> } else
> cpu_clear(cpu, *lowest_mask);
> @@ -346,7 +345,7 @@ static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
> * runqueues that were of higher prio than
> * the lowest_prio.
> */
> - if (lowest_cpu) {
> + if (lowest_cpu != -1) {

We can change this to

if (lowest_cpu > 0) {

because if lowest_cpu == 0, we don't need to bother with clearing any bits.

I'll apply this next.

Thanks.

-- Steve

> /*
> * Perhaps we could add another cpumask op to
> * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
>

2007-11-21 04:27:11

by Steven Rostedt

[permalink] [raw]
Subject: Re: [PATCH] Fix optimized search

On Tue, Nov 20, 2007 at 11:15:48PM -0500, Steven Rostedt wrote:
> Gregory Haskins wrote:
>> I spied a few more issues from http://lkml.org/lkml/2007/11/20/590.
>> Patch is below..
>
> Thanks, but I have one update...
>

Here's the updated patch.

Oh, and Gregory, please email me at my [email protected] account. It
has better filters ;-)

This series is at:

http://rostedt.homelinux.com/rt/rt-balance-patches-v6.tar.bz2

===

This patch removes several cpumask operations by keeping track
of the first of the CPUS that is of the lowest priority. When
the search for the lowest priority runqueue is completed, all
the bits up to the first CPU with the lowest priority runqueue
is cleared.

Signed-off-by: Steven Rostedt <[email protected]>

---
kernel/sched_rt.c | 49 ++++++++++++++++++++++++++++++++++++-------------
1 file changed, 36 insertions(+), 13 deletions(-)

Index: linux-compile.git/kernel/sched_rt.c
===================================================================
--- linux-compile.git.orig/kernel/sched_rt.c 2007-11-20 23:17:43.000000000 -0500
+++ linux-compile.git/kernel/sched_rt.c 2007-11-20 23:18:21.000000000 -0500
@@ -293,29 +293,36 @@ static struct task_struct *pick_next_hig
}

static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
-static DEFINE_PER_CPU(cpumask_t, valid_cpu_mask);

static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
{
- int cpu;
- cpumask_t *valid_mask = &__get_cpu_var(valid_cpu_mask);
int lowest_prio = -1;
+ int lowest_cpu = -1;
int count = 0;
+ int cpu;

- cpus_clear(*lowest_mask);
- cpus_and(*valid_mask, cpu_online_map, task->cpus_allowed);
+ cpus_and(*lowest_mask, cpu_online_map, task->cpus_allowed);

/*
* Scan each rq for the lowest prio.
*/
- for_each_cpu_mask(cpu, *valid_mask) {
+ for_each_cpu_mask(cpu, *lowest_mask) {
struct rq *rq = cpu_rq(cpu);

/* We look for lowest RT prio or non-rt CPU */
if (rq->rt.highest_prio >= MAX_RT_PRIO) {
- if (count)
+ /*
+ * if we already found a low RT queue
+ * and now we found this non-rt queue
+ * clear the mask and set our bit.
+ * Otherwise just return the queue as is
+ * and the count==1 will cause the algorithm
+ * to use the first bit found.
+ */
+ if (lowest_cpu != -1) {
cpus_clear(*lowest_mask);
- cpu_set(rq->cpu, *lowest_mask);
+ cpu_set(rq->cpu, *lowest_mask);
+ }
return 1;
}

@@ -325,13 +332,29 @@ static int find_lowest_cpus(struct task_
if (rq->rt.highest_prio > lowest_prio) {
/* new low - clear old data */
lowest_prio = rq->rt.highest_prio;
- if (count) {
- cpus_clear(*lowest_mask);
- count = 0;
- }
+ lowest_cpu = cpu;
+ count = 0;
}
- cpu_set(rq->cpu, *lowest_mask);
count++;
+ } else
+ cpu_clear(cpu, *lowest_mask);
+ }
+
+ /*
+ * Clear out all the set bits that represent
+ * runqueues that were of higher prio than
+ * the lowest_prio.
+ */
+ if (lowest_cpu > 0) {
+ /*
+ * Perhaps we could add another cpumask op to
+ * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
+ * Then that could be optimized to use memset and such.
+ */
+ for_each_cpu_mask(cpu, *lowest_mask) {
+ if (cpu >= lowest_cpu)
+ break;
+ cpu_clear(cpu, *lowest_mask);
}
}

2007-11-21 05:19:00

by Gregory Haskins

[permalink] [raw]
Subject: Re: [PATCH] Fix optimized search

>>> On Tue, Nov 20, 2007 at 11:26 PM, in message
<[email protected]>, Steven Rostedt <[email protected]>
wrote:
> On Tue, Nov 20, 2007 at 11:15:48PM -0500, Steven Rostedt wrote:
>> Gregory Haskins wrote:
>>> I spied a few more issues from http://lkml.org/lkml/2007/11/20/590.
>>> Patch is below..
>>
>> Thanks, but I have one update...
>>
>
> Here's the updated patch.
>
> Oh, and Gregory, please email me at my [email protected] account. It
> has better filters ;-)
>
> This series is at:
>
> http://rostedt.homelinux.com/rt/rt-balance-patches-v6.tar.bz2

Ah..mails crossed. ;) Ignore my patch #1 from the 0/4 series I just sent out.

Regards,
-Greg