Keep it before the real fine grained scan patch is ready :)
The following patches really needs small scan quantities, at least in
normal situation.
Signed-off-by: Wu Fengguang <[email protected]>
---
mm/vmscan.c | 8 ++++++++
1 files changed, 8 insertions(+)
--- linux.orig/mm/vmscan.c
+++ linux/mm/vmscan.c
@@ -63,6 +63,9 @@ struct scan_control {
unsigned long nr_mapped; /* From page_state */
+ /* How many pages shrink_cache() should reclaim */
+ int nr_to_reclaim;
+
/* Ask shrink_caches, or shrink_zone to scan at this priority */
unsigned int priority;
@@ -898,6 +901,7 @@ static void shrink_cache(struct zone *zo
if (current_is_kswapd())
mod_page_state(kswapd_steal, nr_freed);
mod_page_state_zone(zone, pgsteal, nr_freed);
+ sc->nr_to_reclaim -= nr_freed;
spin_lock_irq(&zone->lru_lock);
/*
@@ -1097,6 +1101,8 @@ shrink_zone(struct zone *zone, struct sc
else
nr_inactive = 0;
+ sc->nr_to_reclaim = sc->swap_cluster_max;
+
while (nr_active || nr_inactive) {
if (nr_active) {
sc->nr_to_scan = min(nr_active,
@@ -1110,6 +1116,8 @@ shrink_zone(struct zone *zone, struct sc
(unsigned long)sc->swap_cluster_max);
nr_inactive -= sc->nr_to_scan;
shrink_cache(zone, sc);
+ if (sc->nr_to_reclaim <= 0)
+ break;
}
}
--