Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932547AbVJTXBH (ORCPT ); Thu, 20 Oct 2005 19:01:07 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S932550AbVJTXAl (ORCPT ); Thu, 20 Oct 2005 19:00:41 -0400 Received: from omx2-ext.sgi.com ([192.48.171.19]:19409 "EHLO omx2.sgi.com") by vger.kernel.org with ESMTP id S932547AbVJTXA2 (ORCPT ); Thu, 20 Oct 2005 19:00:28 -0400 Date: Thu, 20 Oct 2005 15:59:40 -0700 (PDT) From: Christoph Lameter To: akpm@osdl.org Cc: Mike Kravetz , linux-kernel@vger.kernel.org, linux-mm@kvack.org, Christoph Lameter , Magnus Damm , Marcelo Tosatti Message-Id: <20051020225940.19761.93396.sendpatchset@schroedinger.engr.sgi.com> In-Reply-To: <20051020225935.19761.57434.sendpatchset@schroedinger.engr.sgi.com> References: <20051020225935.19761.57434.sendpatchset@schroedinger.engr.sgi.com> Subject: [PATCH 1/4] Swap migration V3: LRU operations Sender: linux-kernel-owner@vger.kernel.org X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 7881 Lines: 279 Implement functions to isolate pages from the LRU and put them back later. >From Magnus: This patch for 2.6.14-rc4-mm1 breaks out isolate_lru_page() and putpack_lru_page() and makes them inline. I'd like to build my code on top of this patch, and I think your page eviction code could be built on top of this patch too - without introducing too much duplicated code. Signed-off-by: Magnus Damm Signed-off-by: Christoph Lameter Index: linux-2.6.14-rc4-mm1/include/linux/mm_inline.h =================================================================== --- linux-2.6.14-rc4-mm1.orig/include/linux/mm_inline.h 2005-10-10 18:19:19.000000000 -0700 +++ linux-2.6.14-rc4-mm1/include/linux/mm_inline.h 2005-10-20 10:45:40.000000000 -0700 @@ -38,3 +38,55 @@ del_page_from_lru(struct zone *zone, str zone->nr_inactive--; } } + +/* + * Isolate one page from the LRU lists. + * + * - zone->lru_lock must be held + * + * Result: + * 0 = page not on LRU list + * 1 = page removed from LRU list + * -1 = page is being freed elsewhere. + */ +static inline int +__isolate_lru_page(struct zone *zone, struct page *page) +{ + if (TestClearPageLRU(page)) { + if (get_page_testone(page)) { + /* + * It is being freed elsewhere + */ + __put_page(page); + SetPageLRU(page); + return -1; + } else { + if (PageActive(page)) + del_page_from_active_list(zone, page); + else + del_page_from_inactive_list(zone, page); + return 1; + } + } + + return 0; +} + +/* + * Add isolated page back on the LRU lists + * + * - zone->lru_lock must be held + * - page must already be removed from other list + * - additional call to put_page() is needed + */ +static inline void +__putback_lru_page(struct zone *zone, struct page *page) +{ + if (TestSetPageLRU(page)) + BUG(); + + if (PageActive(page)) + add_page_to_active_list(zone, page); + else + add_page_to_inactive_list(zone, page); +} Index: linux-2.6.14-rc4-mm1/mm/vmscan.c =================================================================== --- linux-2.6.14-rc4-mm1.orig/mm/vmscan.c 2005-10-17 10:24:30.000000000 -0700 +++ linux-2.6.14-rc4-mm1/mm/vmscan.c 2005-10-20 13:18:05.000000000 -0700 @@ -573,43 +573,75 @@ keep: * * Appropriate locks must be held before calling this function. * + * @zone: The zone where lru_lock is held. * @nr_to_scan: The number of pages to look through on the list. * @src: The LRU list to pull pages off. * @dst: The temp list to put pages on to. - * @scanned: The number of pages that were scanned. * - * returns how many pages were moved onto *@dst. + * returns the number of pages that were scanned. */ -static int isolate_lru_pages(int nr_to_scan, struct list_head *src, - struct list_head *dst, int *scanned) +static int isolate_lru_pages(struct zone *zone, int nr_to_scan, + struct list_head *src, struct list_head *dst) { - int nr_taken = 0; struct page *page; - int scan = 0; + int scanned = 0; + int rc; - while (scan++ < nr_to_scan && !list_empty(src)) { + while (scanned++ < nr_to_scan && !list_empty(src)) { page = lru_to_page(src); prefetchw_prev_lru_page(page, src, flags); - if (!TestClearPageLRU(page)) - BUG(); - list_del(&page->lru); - if (get_page_testone(page)) { - /* - * It is being freed elsewhere - */ - __put_page(page); - SetPageLRU(page); - list_add(&page->lru, src); - continue; - } else { + rc = __isolate_lru_page(zone, page); + + BUG_ON(rc == 0); /* PageLRU(page) must be true */ + + if (rc == 1) /* Succeeded to isolate page */ list_add(&page->lru, dst); - nr_taken++; + + if (rc == -1) { /* Not possible to isolate */ + list_del(&page->lru); + list_add(&page->lru, src); } } - *scanned = scan; - return nr_taken; + return scanned; +} + +static void lru_add_drain_per_cpu(void *dummy) +{ + lru_add_drain(); +} + +/* + * Isolate one page from the LRU lists and put it on the + * indicated list. Do necessary cache draining if the + * page is not on the LRU lists yet. + * + * Result: + * 0 = page not on LRU list + * 1 = page removed from LRU list and added to the specified list. + * -1 = page is being freed elsewhere. + */ +int isolate_lru_page(struct page *page, struct list_head *l) +{ + int rc = 0; + struct zone *zone = page_zone(page); + +redo: + spin_lock_irq(&zone->lru_lock); + rc = __isolate_lru_page(zone, page); + spin_unlock_irq(&zone->lru_lock); + if (rc == 0) { + /* + * Maybe this page is still waiting for a cpu to drain it + * from one of the lru lists? + */ + smp_call_function(&lru_add_drain_per_cpu, NULL, 0 , 1); + lru_add_drain(); + if (PageLRU(page)) + goto redo; + } + return rc; } /* @@ -627,18 +659,15 @@ static void shrink_cache(struct zone *zo spin_lock_irq(&zone->lru_lock); while (max_scan > 0) { struct page *page; - int nr_taken; int nr_scan; int nr_freed; - nr_taken = isolate_lru_pages(sc->swap_cluster_max, - &zone->inactive_list, - &page_list, &nr_scan); - zone->nr_inactive -= nr_taken; + nr_scan = isolate_lru_pages(zone, sc->swap_cluster_max, + &zone->inactive_list, &page_list); zone->pages_scanned += nr_scan; spin_unlock_irq(&zone->lru_lock); - if (nr_taken == 0) + if (list_empty(&page_list)) goto done; max_scan -= nr_scan; @@ -658,13 +687,9 @@ static void shrink_cache(struct zone *zo */ while (!list_empty(&page_list)) { page = lru_to_page(&page_list); - if (TestSetPageLRU(page)) - BUG(); list_del(&page->lru); - if (PageActive(page)) - add_page_to_active_list(zone, page); - else - add_page_to_inactive_list(zone, page); + __putback_lru_page(zone, page); + if (!pagevec_add(&pvec, page)) { spin_unlock_irq(&zone->lru_lock); __pagevec_release(&pvec); @@ -678,6 +703,33 @@ done: } /* + * Add isolated pages on the list back to the LRU + * Determines the zone for each pages and takes + * the necessary lru lock for each page. + * + * returns the number of pages put back. + */ +int putback_lru_pages(struct list_head *l) +{ + struct page * page; + struct page * page2; + int count = 0; + + list_for_each_entry_safe(page, page2, l, lru) { + struct zone *zone = page_zone(page); + + list_del(&page->lru); + spin_lock_irq(&zone->lru_lock); + __putback_lru_page(zone, page); + spin_unlock_irq(&zone->lru_lock); + count++; + /* Undo the get from isolate_lru_page */ + put_page(page); + } + return count; +} + +/* * This moves pages from the active list to the inactive list. * * We move them the other way if the page is referenced by one or more @@ -713,10 +765,9 @@ refill_inactive_zone(struct zone *zone, lru_add_drain(); spin_lock_irq(&zone->lru_lock); - pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, - &l_hold, &pgscanned); + pgscanned = isolate_lru_pages(zone, nr_pages, + &zone->active_list, &l_hold); zone->pages_scanned += pgscanned; - zone->nr_active -= pgmoved; spin_unlock_irq(&zone->lru_lock); /* Index: linux-2.6.14-rc4-mm1/include/linux/swap.h =================================================================== --- linux-2.6.14-rc4-mm1.orig/include/linux/swap.h 2005-10-17 10:24:16.000000000 -0700 +++ linux-2.6.14-rc4-mm1/include/linux/swap.h 2005-10-20 13:13:24.000000000 -0700 @@ -176,6 +176,9 @@ extern int zone_reclaim(struct zone *, u extern int shrink_all_memory(int); extern int vm_swappiness; +extern int isolate_lru_page(struct page *p, struct list_head *l); +extern int putback_lru_pages(struct list_head *l); + #ifdef CONFIG_MMU /* linux/mm/shmem.c */ extern int shmem_unuse(swp_entry_t entry, struct page *page); - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/