After downing/upping a cpu, an attempt to set
/proc/sys/vm/percpu_pagelist_fraction results in an oops in
percpu_pagelist_fraction_sysctl_handler().
If a processor is downed then we need to set the pageset pointer back to the
boot pageset.
Updates of the high water marks should not access pagesets of unpopulated zones
(those pointer go to the boot pagesets which would be no longer functional if
their size would be increased beyond zero).
Signed-off-by: Dimitri Sivanich <[email protected]>
Signed-off-by: Christoph Lameter <[email protected]>
---
This patch should go into the -stable tree as well.
mm/page_alloc.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
Index: linux/mm/page_alloc.c
===================================================================
--- linux.orig/mm/page_alloc.c 2009-06-22 13:06:50.000000000 -0500
+++ linux/mm/page_alloc.c 2009-06-22 13:08:09.000000000 -0500
@@ -3026,7 +3026,7 @@ bad:
if (dzone == zone)
break;
kfree(zone_pcp(dzone, cpu));
- zone_pcp(dzone, cpu) = NULL;
+ zone_pcp(dzone, cpu) = &boot_pageset[cpu];
}
return -ENOMEM;
}
@@ -3041,7 +3041,7 @@ static inline void free_zone_pagesets(in
/* Free per_cpu_pageset if it is slab allocated */
if (pset != &boot_pageset[cpu])
kfree(pset);
- zone_pcp(zone, cpu) = NULL;
+ zone_pcp(zone, cpu) = &boot_pageset[cpu];
}
}
@@ -4659,7 +4659,7 @@ int percpu_pagelist_fraction_sysctl_hand
ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
if (!write || (ret == -EINVAL))
return ret;
- for_each_zone(zone) {
+ for_each_populated_zone(zone) {
for_each_online_cpu(cpu) {
unsigned long high;
high = zone->present_pages / percpu_pagelist_fraction;
> After downing/upping a cpu, an attempt to set
> /proc/sys/vm/percpu_pagelist_fraction results in an oops in
> percpu_pagelist_fraction_sysctl_handler().
>
> If a processor is downed then we need to set the pageset pointer back to the
> boot pageset.
>
> Updates of the high water marks should not access pagesets of unpopulated zones
> (those pointer go to the boot pagesets which would be no longer functional if
> their size would be increased beyond zero).
>
> Signed-off-by: Dimitri Sivanich <[email protected]>
> Signed-off-by: Christoph Lameter <[email protected]>
>
> ---
>
> This patch should go into the -stable tree as well.
>
> mm/page_alloc.c | 6 +++---
> 1 file changed, 3 insertions(+), 3 deletions(-)
>
> Index: linux/mm/page_alloc.c
> ===================================================================
> --- linux.orig/mm/page_alloc.c 2009-06-22 13:06:50.000000000 -0500
> +++ linux/mm/page_alloc.c 2009-06-22 13:08:09.000000000 -0500
> @@ -3026,7 +3026,7 @@ bad:
> if (dzone == zone)
> break;
> kfree(zone_pcp(dzone, cpu));
> - zone_pcp(dzone, cpu) = NULL;
> + zone_pcp(dzone, cpu) = &boot_pageset[cpu];
> }
> return -ENOMEM;
> }
> @@ -3041,7 +3041,7 @@ static inline void free_zone_pagesets(in
> /* Free per_cpu_pageset if it is slab allocated */
> if (pset != &boot_pageset[cpu])
> kfree(pset);
> - zone_pcp(zone, cpu) = NULL;
> + zone_pcp(zone, cpu) = &boot_pageset[cpu];
> }
> }
>
> @@ -4659,7 +4659,7 @@ int percpu_pagelist_fraction_sysctl_hand
> ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
> if (!write || (ret == -EINVAL))
> return ret;
> - for_each_zone(zone) {
> + for_each_populated_zone(zone) {
> for_each_online_cpu(cpu) {
> unsigned long high;
> high = zone->present_pages / percpu_pagelist_fraction;
Looks good. thanks for making multiple improvement.
Reviewed-by: KOSAKI Motohiro <[email protected]>