Do not free the page in swap_page() to allow the page to be managed by
the caller of migrate_page().
If the page count dropped to 1 then rely on the next loop in migrate_pages()
to deal with the page of freeing it directly.
Some whitespace cleanup.
Signed-off-by: Christoph Lameter <[email protected]>
Index: linux-2.6.15-rc1-mm2/mm/vmscan.c
===================================================================
--- linux-2.6.15-rc1-mm2.orig/mm/vmscan.c 2005-11-18 09:47:15.000000000 -0800
+++ linux-2.6.15-rc1-mm2/mm/vmscan.c 2005-11-18 10:04:05.000000000 -0800
@@ -627,41 +627,32 @@ static int swap_page(struct page *page)
case PAGE_KEEP:
case PAGE_ACTIVATE:
goto unlock_retry;
+
case PAGE_SUCCESS:
goto retry;
+
case PAGE_CLEAN:
; /* try to free the page below */
}
}
if (PagePrivate(page)) {
- if (!try_to_release_page(page, GFP_KERNEL))
+ if (!try_to_release_page(page, GFP_KERNEL) ||
+ (!mapping && page_count(page) == 1))
goto unlock_retry;
- if (!mapping && page_count(page) == 1)
- goto free_it;
}
- if (!remove_mapping(mapping, page))
- goto unlock_retry; /* truncate got there first */
-
-free_it:
- /*
- * We may free pages that were taken off the active list
- * by isolate_lru_page. However, free_hot_cold_page will check
- * if the active bit is set. So clear it.
- */
- ClearPageActive(page);
-
- list_del(&page->lru);
- unlock_page(page);
- put_page(page);
- return 0;
+ if (remove_mapping(mapping, page)) {
+ /* Success */
+ unlock_page(page);
+ return 0;
+ }
unlock_retry:
unlock_page(page);
retry:
- return 1;
+ return 1;
}
/*
* migrate_pages
Use -Exxx instead of numeric return codes and cleanup the code
in migrate_pages() using -Exx error codes.
Signed-off-by: Christoph Lameter <[email protected]>
Index: linux-2.6.15-rc1-mm2/mm/vmscan.c
===================================================================
--- linux-2.6.15-rc1-mm2.orig/mm/vmscan.c 2005-11-18 10:04:05.000000000 -0800
+++ linux-2.6.15-rc1-mm2/mm/vmscan.c 2005-11-18 10:12:01.000000000 -0800
@@ -608,10 +608,6 @@ int putback_lru_pages(struct list_head *
/*
* swapout a single page
* page is locked upon entry, unlocked on exit
- *
- * return codes:
- * 0 = complete
- * 1 = retry
*/
static int swap_page(struct page *page)
{
@@ -652,7 +648,7 @@ unlock_retry:
unlock_page(page);
retry:
- return 1;
+ return -EAGAIN;
}
/*
* migrate_pages
@@ -671,6 +667,8 @@ retry:
* is only swapping out pages and never touches the second
* list. The direct migration patchset
* extends this function to avoid the use of swap.
+ *
+ * Return: Number of pages not migrated when "to" ran empty.
*/
int migrate_pages(struct list_head *from, struct list_head *to,
struct list_head *moved, struct list_head *failed)
@@ -681,6 +679,7 @@ int migrate_pages(struct list_head *from
struct page *page;
struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE;
+ int rc;
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
@@ -702,11 +701,12 @@ redo:
* use lock_page() to have a higher chance of acquiring the
* lock.
*/
+ rc = -EAGAIN;
if (pass > 2)
lock_page(page);
else
if (TestSetPageLocked(page))
- goto retry_later;
+ goto next;
/*
* Only wait on writeback if we have already done a pass where
@@ -715,18 +715,19 @@ redo:
if (pass > 0) {
wait_on_page_writeback(page);
} else {
- if (PageWriteback(page)) {
- unlock_page(page);
- goto retry_later;
- }
+ if (PageWriteback(page))
+ goto unlock_page;
}
+ /*
+ * Anonymous pages must have swap cache references otherwise
+ * the information contained in the page maps cannot be
+ * preserved.
+ */
if (PageAnon(page) && !PageSwapCache(page)) {
if (!add_to_swap(page, GFP_KERNEL)) {
- unlock_page(page);
- list_move(&page->lru, failed);
- nr_failed++;
- continue;
+ rc = -ENOMEM;
+ goto unlock_page;
}
}
@@ -738,8 +739,19 @@ redo:
list_move(&page->lru, moved);
continue;
}
-retry_later:
- retry++;
+
+unlock_page:
+ unlock_page(page);
+
+next:
+ if (rc == -EAGAIN)
+ retry++;
+
+ else if (rc) {
+ /* Permanent failure to migrate the page */
+ list_move(&page->lru, failed);
+ nr_failed++;
+ }
}
if (retry && pass++ < 10)
goto redo;