On 22 September 2012 00:02, <[email protected]> wrote:
> +config SCHED_HMP_PRIO_FILTER
> + bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
> + depends on SCHED_HMP
Should it depend on EXPERIMENTAL?
> + help
> + Enables task priority based HMP migration filter. Any task with
> + a NICE value above the threshold will always be on low-power cpus
> + with less compute capacity.
> +
> +config SCHED_HMP_PRIO_FILTER_VAL
> + int "NICE priority threshold"
> + default 5
> + depends on SCHED_HMP_PRIO_FILTER
> +
> config HAVE_ARM_SCU
> bool
> help
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 490f1f0..8f0f3b9 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3129,9 +3129,12 @@ static int __init hmp_cpu_mask_setup(void)
> * hmp_down_threshold: max. load allowed for tasks migrating to a slower cpu
> * The default values (512, 256) offer good responsiveness, but may need
> * tweaking suit particular needs.
> + *
> + * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio)
> */
> unsigned int hmp_up_threshold = 512;
> unsigned int hmp_down_threshold = 256;
> +unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
>
> static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
> static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
> @@ -5491,6 +5494,12 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
> if (hmp_cpu_is_fastest(cpu))
> return 0;
>
> +#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
> + /* Filter by task priority */
> + if (p->prio >= hmp_up_prio)
> + return 0;
> +#endif
> +
> if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
> tsk_cpus_allowed(p))
> && se->avg.load_avg_ratio > hmp_up_threshold) {
> @@ -5507,6 +5516,12 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
> if (hmp_cpu_is_slowest(cpu))
> return 0;
>
> +#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
> + /* Filter by task priority */
> + if (p->prio >= hmp_up_prio)
> + return 1;
> +#endif
Even if below cpumask_intersects() fails?
> if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
> tsk_cpus_allowed(p))
> && se->avg.load_avg_ratio < hmp_down_threshold) {
--
viresh
On Thu, Oct 04, 2012 at 07:27:00AM +0100, Viresh Kumar wrote:
> On 22 September 2012 00:02, <[email protected]> wrote:
>
> > +config SCHED_HMP_PRIO_FILTER
> > + bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
> > + depends on SCHED_HMP
>
> Should it depend on EXPERIMENTAL?
>
> > + help
> > + Enables task priority based HMP migration filter. Any task with
> > + a NICE value above the threshold will always be on low-power cpus
> > + with less compute capacity.
> > +
> > +config SCHED_HMP_PRIO_FILTER_VAL
> > + int "NICE priority threshold"
> > + default 5
> > + depends on SCHED_HMP_PRIO_FILTER
> > +
> > config HAVE_ARM_SCU
> > bool
> > help
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 490f1f0..8f0f3b9 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -3129,9 +3129,12 @@ static int __init hmp_cpu_mask_setup(void)
> > * hmp_down_threshold: max. load allowed for tasks migrating to a slower cpu
> > * The default values (512, 256) offer good responsiveness, but may need
> > * tweaking suit particular needs.
> > + *
> > + * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio)
> > */
> > unsigned int hmp_up_threshold = 512;
> > unsigned int hmp_down_threshold = 256;
> > +unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
> >
> > static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
> > static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
> > @@ -5491,6 +5494,12 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
> > if (hmp_cpu_is_fastest(cpu))
> > return 0;
> >
> > +#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
> > + /* Filter by task priority */
> > + if (p->prio >= hmp_up_prio)
> > + return 0;
> > +#endif
> > +
> > if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
> > tsk_cpus_allowed(p))
> > && se->avg.load_avg_ratio > hmp_up_threshold) {
> > @@ -5507,6 +5516,12 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
> > if (hmp_cpu_is_slowest(cpu))
> > return 0;
> >
> > +#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
> > + /* Filter by task priority */
> > + if (p->prio >= hmp_up_prio)
> > + return 1;
> > +#endif
>
> Even if below cpumask_intersects() fails?
>
No. Good catch :)
> > if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
> > tsk_cpus_allowed(p))
> > && se->avg.load_avg_ratio < hmp_down_threshold) {
>
> --
> viresh
>
Thanks,
Morten
在 2012-10-09二的 17:40 +0100,Morten Rasmussen写道:
> On Thu, Oct 04, 2012 at 07:27:00AM +0100, Viresh Kumar wrote:
> > On 22 September 2012 00:02, <[email protected]> wrote:
> >
> > > +config SCHED_HMP_PRIO_FILTER
> > > + bool "(EXPERIMENTAL) Filter HMP migrations by task priority"
> > > + depends on SCHED_HMP
> >
> > Should it depend on EXPERIMENTAL?
> >
> > > + help
> > > + Enables task priority based HMP migration filter. Any task with
> > > + a NICE value above the threshold will always be on low-power cpus
> > > + with less compute capacity.
> > > +
> > > +config SCHED_HMP_PRIO_FILTER_VAL
> > > + int "NICE priority threshold"
> > > + default 5
> > > + depends on SCHED_HMP_PRIO_FILTER
> > > +
> > > config HAVE_ARM_SCU
> > > bool
> > > help
> > > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > > index 490f1f0..8f0f3b9 100644
> > > --- a/kernel/sched/fair.c
> > > +++ b/kernel/sched/fair.c
> > > @@ -3129,9 +3129,12 @@ static int __init hmp_cpu_mask_setup(void)
> > > * hmp_down_threshold: max. load allowed for tasks migrating to a slower cpu
> > > * The default values (512, 256) offer good responsiveness, but may need
> > > * tweaking suit particular needs.
> > > + *
> > > + * hmp_up_prio: Only up migrate task with high priority (<hmp_up_prio)
> > > */
> > > unsigned int hmp_up_threshold = 512;
> > > unsigned int hmp_down_threshold = 256;
hmp_*_threshold maybe sysctl_hmp_*_threshold,
and appear at /proc/sys/kernel,
so, can be adjusted to be rational.
> > > +unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
> > >
> > > static unsigned int hmp_up_migration(int cpu, struct sched_entity *se);
> > > static unsigned int hmp_down_migration(int cpu, struct sched_entity *se);
> > > @@ -5491,6 +5494,12 @@ static unsigned int hmp_up_migration(int cpu, struct sched_entity *se)
> > > if (hmp_cpu_is_fastest(cpu))
> > > return 0;
> > >
> > > +#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
> > > + /* Filter by task priority */
> > > + if (p->prio >= hmp_up_prio)
> > > + return 0;
> > > +#endif
> > > +
> > > if (cpumask_intersects(&hmp_faster_domain(cpu)->cpus,
> > > tsk_cpus_allowed(p))
> > > && se->avg.load_avg_ratio > hmp_up_threshold) {
> > > @@ -5507,6 +5516,12 @@ static unsigned int hmp_down_migration(int cpu, struct sched_entity *se)
> > > if (hmp_cpu_is_slowest(cpu))
> > > return 0;
> > >
> > > +#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
> > > + /* Filter by task priority */
> > > + if (p->prio >= hmp_up_prio)
> > > + return 1;
> > > +#endif
> >
> > Even if below cpumask_intersects() fails?
> >
>
> No. Good catch :)
>
> > > if (cpumask_intersects(&hmp_slower_domain(cpu)->cpus,
> > > tsk_cpus_allowed(p))
> > > && se->avg.load_avg_ratio < hmp_down_threshold) {
> >
> > --
> > viresh
> >
>
> Thanks,
> Morten
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
liguang [email protected]
FNST linux kernel team