shrink_cache() and refill_inactive_zone() do not need loops.
Simplify them to scan one chunk at a time.
Signed-off-by: Wu Fengguang <[email protected]>
---
mm/vmscan.c | 92 ++++++++++++++++++++++++++++--------------------------------
1 files changed, 43 insertions(+), 49 deletions(-)
--- linux.orig/mm/vmscan.c
+++ linux/mm/vmscan.c
@@ -900,63 +900,58 @@ static void shrink_cache(struct zone *zo
{
LIST_HEAD(page_list);
struct pagevec pvec;
- int max_scan = sc->nr_to_scan;
+ struct page *page;
+ int nr_taken;
+ int nr_scan;
+ int nr_freed;
pagevec_init(&pvec, 1);
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
- while (max_scan > 0) {
- struct page *page;
- int nr_taken;
- int nr_scan;
- int nr_freed;
-
- nr_taken = isolate_lru_pages(sc->nr_to_scan,
- &zone->inactive_list,
- &page_list, &nr_scan);
- zone->nr_inactive -= nr_taken;
- zone->pages_scanned += nr_scan;
- update_zone_age(zone, nr_scan);
- spin_unlock_irq(&zone->lru_lock);
+ nr_taken = isolate_lru_pages(sc->nr_to_scan,
+ &zone->inactive_list,
+ &page_list, &nr_scan);
+ zone->nr_inactive -= nr_taken;
+ zone->pages_scanned += nr_scan;
+ update_zone_age(zone, nr_scan);
+ spin_unlock_irq(&zone->lru_lock);
- if (nr_taken == 0)
- goto done;
+ if (nr_taken == 0)
+ return;
- max_scan -= nr_scan;
- sc->nr_scanned += nr_scan;
- if (current_is_kswapd())
- mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
- else
- mod_page_state_zone(zone, pgscan_direct, nr_scan);
- nr_freed = shrink_list(&page_list, sc);
- if (current_is_kswapd())
- mod_page_state(kswapd_steal, nr_freed);
- mod_page_state_zone(zone, pgsteal, nr_freed);
- sc->nr_to_reclaim -= nr_freed;
+ sc->nr_scanned += nr_scan;
+ if (current_is_kswapd())
+ mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
+ else
+ mod_page_state_zone(zone, pgscan_direct, nr_scan);
+ nr_freed = shrink_list(&page_list, sc);
+ if (current_is_kswapd())
+ mod_page_state(kswapd_steal, nr_freed);
+ mod_page_state_zone(zone, pgsteal, nr_freed);
+ sc->nr_to_reclaim -= nr_freed;
- spin_lock_irq(&zone->lru_lock);
- /*
- * Put back any unfreeable pages.
- */
- while (!list_empty(&page_list)) {
- page = lru_to_page(&page_list);
- if (TestSetPageLRU(page))
- BUG();
- list_del(&page->lru);
- if (PageActive(page))
- add_page_to_active_list(zone, page);
- else
- add_page_to_inactive_list(zone, page);
- if (!pagevec_add(&pvec, page)) {
- spin_unlock_irq(&zone->lru_lock);
- __pagevec_release(&pvec);
- spin_lock_irq(&zone->lru_lock);
- }
+ spin_lock_irq(&zone->lru_lock);
+ /*
+ * Put back any unfreeable pages.
+ */
+ while (!list_empty(&page_list)) {
+ page = lru_to_page(&page_list);
+ if (TestSetPageLRU(page))
+ BUG();
+ list_del(&page->lru);
+ if (PageActive(page))
+ add_page_to_active_list(zone, page);
+ else
+ add_page_to_inactive_list(zone, page);
+ if (!pagevec_add(&pvec, page)) {
+ spin_unlock_irq(&zone->lru_lock);
+ __pagevec_release(&pvec);
+ spin_lock_irq(&zone->lru_lock);
}
- }
+ }
spin_unlock_irq(&zone->lru_lock);
-done:
+
pagevec_release(&pvec);
}
@@ -983,7 +978,6 @@ refill_inactive_zone(struct zone *zone,
int pgmoved;
int pgdeactivate = 0;
int pgscanned;
- int nr_pages = sc->nr_to_scan;
LIST_HEAD(l_hold); /* The pages which were snipped off */
LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */
LIST_HEAD(l_active); /* Pages to go onto the active_list */
@@ -996,7 +990,7 @@ refill_inactive_zone(struct zone *zone,
lru_add_drain();
spin_lock_irq(&zone->lru_lock);
- pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
+ pgmoved = isolate_lru_pages(sc->nr_to_scan, &zone->active_list,
&l_hold, &pgscanned);
zone->pages_scanned += pgscanned;
zone->nr_active -= pgmoved;
--
On Wed, Dec 07, 2005 at 06:48:04PM +0800, Wu Fengguang wrote:
> shrink_cache() and refill_inactive_zone() do not need loops.
>
> Simplify them to scan one chunk at a time.
Hi Wu,
What is the purpose of scanning large chunks at a time?
Some drawbacks that I can think of by doing that:
- zone->lru_lock will be held for much longer periods, resulting in
decreased responsiveness and possibly slowdowns.
- if the task doing the scan is uncapable of certain operations, for
instance IO, dirty pages will be moved back to the head of the inactive
list in much larger batches then they were before. This could hurt
reclaim in general.
What were the results of this change? Particularly contention on the
lru_lock on medium-large SMP systems.
Thanks!
On Thu, Jan 05, 2006 at 05:21:56PM -0200, Marcelo Tosatti wrote:
> On Wed, Dec 07, 2005 at 06:48:04PM +0800, Wu Fengguang wrote:
> > shrink_cache() and refill_inactive_zone() do not need loops.
> >
> > Simplify them to scan one chunk at a time.
>
> Hi Wu,
Hi Marcelo,
> What is the purpose of scanning large chunks at a time?
But I did not say or mean 'large' chunks :)
With the patch the chunk size is _always_ set to SWAP_CLUSTER_MAX=32 - the good
old default value.
Thanks.
Wu