Hello,
This series try to enable migration of non-LRU pages, such as driver's page.
My ARM-based platform occured severe fragmentation problem after long-term
(several days) test. Sometimes even order-3 page allocation failed. It has
memory size 512MB ~ 1024MB. 30% ~ 40% memory is consumed for graphic processing
and 20~30 memory is reserved for zram.
I found that many pages of GPU driver and zram are non-movable pages. So I
reported Minchan Kim, the maintainer of zram, and he made the internal
compaction logic of zram. And I made the internal compaction of GPU driver.
They reduced some fragmentation but they are not enough effective.
They are activated by its own interface, /sys, so they are not cooperative
with kernel compaction. If there is too much fragmentation and kernel starts
to compaction, zram and GPU driver cannot work with the kernel compaction.
The first this patch adds a generic isolate/migrate/putback callbacks for page
address-space. The zram and GPU, and any other modules can register
its own migration method. The kernel compaction can call the registered
migration when it works. Therefore all page in the system can be migrated
at once.
The 2nd the generic migration callbacks are applied into balloon driver.
My gpu driver code is not open so I apply generic migration into balloon
to show how it works. I've tested it with qemu enabled by kvm like followings:
- turn on Ubuntu 14.04 with 1G memory on qemu.
- do kernel building
- after several seconds check more than 512MB is used with free command
- command "balloon 512" in qemu monitor
- check hundreds MB of pages are migrated
Next kernel compaction code can call generic migration callbacks instead of
balloon driver interface.
Finally calling migration of balloon driver is removed.
Gioh Kim (4):
mm/compaction: enable driver page migration
mm/balloon: apply migratable-page into balloon driver
mm/compaction: apply migratable-page into compaction
mm: remove direct migration of migratable-page
drivers/virtio/virtio_balloon.c | 2 +
fs/proc/page.c | 4 +-
include/linux/balloon_compaction.h | 42 +++++++++++++++------
include/linux/compaction.h | 13 +++++++
include/linux/fs.h | 2 +
include/linux/mm.h | 14 +++----
include/linux/pagemap.h | 27 ++++++++++++++
include/uapi/linux/kernel-page-flags.h | 2 +-
mm/balloon_compaction.c | 67 +++++++++++++++++++++++++++++-----
mm/compaction.c | 9 +++--
mm/migrate.c | 25 ++++---------
11 files changed, 154 insertions(+), 53 deletions(-)
--
1.9.1
Add framework to register callback functions and
check migratable pages.
There are some modes to isolate page so that isolate interface
has arguments of page address and isolation mode.
Signed-off-by: Gioh Kim <[email protected]>
---
include/linux/compaction.h | 13 +++++++++++++
include/linux/fs.h | 2 ++
include/linux/mm.h | 19 +++++++++++++++++++
include/linux/pagemap.h | 27 +++++++++++++++++++++++++++
4 files changed, 61 insertions(+)
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index a014559..1acfa21 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,6 +1,9 @@
#ifndef _LINUX_COMPACTION_H
#define _LINUX_COMPACTION_H
+#include <linux/pagemap.h>
+#include <linux/mm.h>
+
/* Return values for compact_zone() and try_to_compact_pages() */
/* compaction didn't start as it was deferred due to past failures */
#define COMPACT_DEFERRED 0
@@ -50,6 +53,11 @@ extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
extern bool compaction_restarting(struct zone *zone, int order);
+static inline bool driver_page_migratable(struct page *page)
+{
+ return PageMigratable(page) && mapping_migratable(page->mapping);
+}
+
#else
static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, int alloc_flags,
@@ -82,6 +90,11 @@ static inline bool compaction_deferred(struct zone *zone, int order)
return true;
}
+static inline bool driver_page_migratable(struct page *page)
+{
+ return false
+}
+
#endif /* CONFIG_COMPACTION */
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 52cc449..bdfcadf 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -368,6 +368,8 @@ struct address_space_operations {
*/
int (*migratepage) (struct address_space *,
struct page *, struct page *, enum migrate_mode);
+ bool (*isolatepage) (struct page *, isolate_mode_t);
+ void (*putbackpage) (struct page *);
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, unsigned long,
unsigned long);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 47a9392..422c484 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -618,6 +618,25 @@ static inline void __ClearPageBalloon(struct page *page)
atomic_set(&page->_mapcount, -1);
}
+#define PAGE_MIGRATABLE_MAPCOUNT_VALUE (-256)
+
+static inline int PageMigratable(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_MIGRATABLE_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageMigratable(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_MIGRATABLE_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageMigratable(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageMigratable(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4b3736f..e924dfe 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -25,8 +25,35 @@ enum mapping_flags {
AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
+ AS_MIGRATABLE = __GFP_BITS_SHIFT + 5,
};
+static inline void mapping_set_migratable(struct address_space *mapping)
+{
+ set_bit(AS_MIGRATABLE, &mapping->flags);
+}
+
+static inline void mapping_clear_migratable(struct address_space *mapping)
+{
+ clear_bit(AS_MIGRATABLE, &mapping->flags);
+}
+
+static inline int __mapping_ops(struct address_space *mapping)
+{
+ /* migrating page should define all following methods */
+ return mapping->a_ops &&
+ mapping->a_ops->migratepage &&
+ mapping->a_ops->isolatepage &&
+ mapping->a_ops->putbackpage;
+}
+
+static inline int mapping_migratable(struct address_space *mapping)
+{
+ if (mapping && __mapping_ops(mapping))
+ return test_bit(AS_MIGRATABLE, &mapping->flags);
+ return !!mapping;
+}
+
static inline void mapping_set_error(struct address_space *mapping, int error)
{
if (unlikely(error)) {
--
1.9.1
Apply driver page migration into balloon driver.
Signed-off-by: Gioh Kim <[email protected]>
---
drivers/virtio/virtio_balloon.c | 2 ++
fs/proc/page.c | 4 +--
include/linux/balloon_compaction.h | 42 ++++++++++++++++-------
include/linux/mm.h | 19 -----------
include/uapi/linux/kernel-page-flags.h | 2 +-
mm/balloon_compaction.c | 61 ++++++++++++++++++++++++++++++++--
6 files changed, 94 insertions(+), 36 deletions(-)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 6a356e3..cdd0038 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -496,6 +496,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
balloon_devinfo_init(&vb->vb_dev_info);
#ifdef CONFIG_BALLOON_COMPACTION
vb->vb_dev_info.migratepage = virtballoon_migratepage;
+ vb->vb_dev_info.mapping = balloon_mapping_alloc(&vb->vb_dev_info,
+ &balloon_aops);
#endif
err = init_vqs(vb);
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 7eee2d8..e741307 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -143,8 +143,8 @@ u64 stable_page_flags(struct page *page)
if (PageBuddy(page))
u |= 1 << KPF_BUDDY;
- if (PageBalloon(page))
- u |= 1 << KPF_BALLOON;
+ if (PageMigratable(page))
+ u |= 1 << KPF_MIGRATABLE;
u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 9b0a15d..0989e96 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -48,6 +48,7 @@
#include <linux/migrate.h>
#include <linux/gfp.h>
#include <linux/err.h>
+#include <linux/fs.h>
/*
* Balloon device information descriptor.
@@ -62,6 +63,7 @@ struct balloon_dev_info {
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
+ struct address_space *mapping;
};
extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
@@ -73,24 +75,37 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
+ balloon->mapping = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
-extern bool balloon_page_isolate(struct page *page);
+extern const struct address_space_operations balloon_aops;
+extern bool balloon_page_isolate(struct page *page,
+ isolate_mode_t mode);
extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct page *newpage,
+extern int balloon_page_migrate(struct address_space *mapping,
+ struct page *newpage,
struct page *page, enum migrate_mode mode);
+extern struct address_space
+*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
+ const struct address_space_operations *a_ops);
+
+static inline void balloon_mapping_free(struct address_space *balloon_mapping)
+{
+ kfree(balloon_mapping);
+}
+
/*
- * __is_movable_balloon_page - helper to perform @page PageBalloon tests
+ * __is_movable_balloon_page - helper to perform @page PageMigratable tests
*/
static inline bool __is_movable_balloon_page(struct page *page)
{
- return PageBalloon(page);
+ return PageMigratable(page);
}
/*
- * balloon_page_movable - test PageBalloon to identify balloon pages
+ * balloon_page_movable - test PageMigratable to identify balloon pages
* and PagePrivate to check that the page is not
* isolated and can be moved by compaction/migration.
*
@@ -99,7 +114,7 @@ static inline bool __is_movable_balloon_page(struct page *page)
*/
static inline bool balloon_page_movable(struct page *page)
{
- return PageBalloon(page) && PagePrivate(page);
+ return PageMigratable(page) && PagePrivate(page);
}
/*
@@ -108,7 +123,7 @@ static inline bool balloon_page_movable(struct page *page)
*/
static inline bool isolated_balloon_page(struct page *page)
{
- return PageBalloon(page);
+ return PageMigratable(page);
}
/*
@@ -123,7 +138,8 @@ static inline bool isolated_balloon_page(struct page *page)
static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
- __SetPageBalloon(page);
+ page->mapping = balloon->mapping;
+ __SetPageMigratable(page);
SetPagePrivate(page);
set_page_private(page, (unsigned long)balloon);
list_add(&page->lru, &balloon->pages);
@@ -139,7 +155,8 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
*/
static inline void balloon_page_delete(struct page *page)
{
- __ClearPageBalloon(page);
+ page->mapping = NULL;
+ __ClearPageMigratable(page);
set_page_private(page, 0);
if (PagePrivate(page)) {
ClearPagePrivate(page);
@@ -166,13 +183,13 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
- __SetPageBalloon(page);
+ __SetPageMigratable(page);
list_add(&page->lru, &balloon->pages);
}
static inline void balloon_page_delete(struct page *page)
{
- __ClearPageBalloon(page);
+ __ClearPageMigratable(page);
list_del(&page->lru);
}
@@ -191,7 +208,8 @@ static inline bool isolated_balloon_page(struct page *page)
return false;
}
-static inline bool balloon_page_isolate(struct page *page)
+static inline bool balloon_page_isolate(struct page *page,
+ isolate_mode_t mode)
{
return false;
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 422c484..2d991a0 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -599,25 +599,6 @@ static inline void __ClearPageBuddy(struct page *page)
atomic_set(&page->_mapcount, -1);
}
-#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
-
-static inline int PageBalloon(struct page *page)
-{
- return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
-}
-
-static inline void __SetPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
- atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
-}
-
-static inline void __ClearPageBalloon(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageBalloon(page), page);
- atomic_set(&page->_mapcount, -1);
-}
-
#define PAGE_MIGRATABLE_MAPCOUNT_VALUE (-256)
static inline int PageMigratable(struct page *page)
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index a6c4962..a6a3c4b 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -31,7 +31,7 @@
#define KPF_KSM 21
#define KPF_THP 22
-#define KPF_BALLOON 23
+#define KPF_MIGRATABLE 23
#define KPF_ZERO_PAGE 24
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index fcad832..f98a500 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -131,7 +131,7 @@ static inline void __putback_balloon_page(struct page *page)
}
/* __isolate_lru_page() counterpart for a ballooned page */
-bool balloon_page_isolate(struct page *page)
+bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
{
/*
* Avoid burning cycles with pages that are yet under __free_pages(),
@@ -175,6 +175,9 @@ bool balloon_page_isolate(struct page *page)
/* putback_lru_page() counterpart for a ballooned page */
void balloon_page_putback(struct page *page)
{
+ if (!isolated_balloon_page(page))
+ return;
+
/*
* 'lock_page()' stabilizes the page and prevents races against
* concurrent isolation threads attempting to re-isolate it.
@@ -193,12 +196,16 @@ void balloon_page_putback(struct page *page)
}
/* move_to_new_page() counterpart for a ballooned page */
-int balloon_page_migrate(struct page *newpage,
+int balloon_page_migrate(struct address_space *mapping,
+ struct page *newpage,
struct page *page, enum migrate_mode mode)
{
struct balloon_dev_info *balloon = balloon_page_device(page);
int rc = -EAGAIN;
+ if (!isolated_balloon_page(page))
+ return rc;
+
/*
* Block others from accessing the 'newpage' when we get around to
* establishing additional references. We should be the only one
@@ -218,4 +225,54 @@ int balloon_page_migrate(struct page *newpage,
unlock_page(newpage);
return rc;
}
+
+/* define the balloon_mapping->a_ops callback to allow balloon page migration */
+const struct address_space_operations balloon_aops = {
+ .migratepage = balloon_page_migrate,
+ .isolatepage = balloon_page_isolate,
+ .putbackpage = balloon_page_putback,
+};
+EXPORT_SYMBOL_GPL(balloon_aops);
+
+struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
+ const struct address_space_operations *a_ops)
+{
+ struct address_space *mapping;
+
+ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Give a clean 'zeroed' status to all elements of this special
+ * balloon page->mapping struct address_space instance.
+ */
+ address_space_init_once(mapping);
+
+ /*
+ * Set mapping->flags appropriately, to allow balloon pages
+ * ->mapping identification.
+ */
+ mapping_set_migratable(mapping);
+ mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask());
+
+ /* balloon's page->mapping->a_ops callback descriptor */
+ mapping->a_ops = a_ops;
+
+ /*
+ * Establish a pointer reference back to the balloon device descriptor
+ * this particular page->mapping will be servicing.
+ * This is used by compaction / migration procedures to identify and
+ * access the balloon device pageset while isolating / migrating pages.
+ *
+ * As some balloon drivers can register multiple balloon devices
+ * for a single guest, this also helps compaction / migration to
+ * properly deal with multiple balloon pagesets, when required.
+ */
+ mapping->private_data = b_dev_info;
+ b_dev_info->mapping = mapping;
+
+ return mapping;
+}
+EXPORT_SYMBOL_GPL(balloon_mapping_alloc);
#endif /* CONFIG_BALLOON_COMPACTION */
--
1.9.1
Compaction calls interfaces of driver page migration
instead of calling balloon migration directly.
Signed-off-by: Gioh Kim <[email protected]>
---
mm/compaction.c | 9 +++++----
mm/migrate.c | 22 +++++++++++++---------
2 files changed, 18 insertions(+), 13 deletions(-)
diff --git a/mm/compaction.c b/mm/compaction.c
index 8c0d945..ca666e2 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -14,7 +14,7 @@
#include <linux/backing-dev.h>
#include <linux/sysctl.h>
#include <linux/sysfs.h>
-#include <linux/balloon_compaction.h>
+#include <linux/compaction.h>
#include <linux/page-isolation.h>
#include <linux/kasan.h>
#include "internal.h"
@@ -736,12 +736,13 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/*
* Check may be lockless but that's ok as we recheck later.
- * It's possible to migrate LRU pages and balloon pages
+ * It's possible to migrate LRU pages and migratable-pages
* Skip any other type of page
*/
if (!PageLRU(page)) {
- if (unlikely(balloon_page_movable(page))) {
- if (balloon_page_isolate(page)) {
+ if (unlikely(driver_page_migratable(page))) {
+ if (page->mapping->a_ops->isolatepage(page,
+ isolate_mode)) {
/* Successfully isolated */
goto isolate_success;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 85e0426..649b1cd 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -35,7 +35,7 @@
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
-#include <linux/balloon_compaction.h>
+#include <linux/compaction.h>
#include <linux/mmu_notifier.h>
#include <asm/tlbflush.h>
@@ -76,7 +76,8 @@ int migrate_prep_local(void)
* from where they were once taken off for compaction/migration.
*
* This function shall be used whenever the isolated pageset has been
- * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
+ * built from lru, migratable-page, hugetlbfs page.
+ * See isolate_migratepages_range()
* and isolate_huge_page().
*/
void putback_movable_pages(struct list_head *l)
@@ -92,8 +93,8 @@ void putback_movable_pages(struct list_head *l)
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
- if (unlikely(isolated_balloon_page(page)))
- balloon_page_putback(page);
+ if (unlikely(driver_page_migratable(page)))
+ page->mapping->a_ops->putbackpage(page);
else
putback_lru_page(page);
}
@@ -843,15 +844,18 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
}
- if (unlikely(isolated_balloon_page(page))) {
+ if (unlikely(driver_page_migratable(page))) {
/*
- * A ballooned page does not need any special attention from
+ * A migratable-page does not need any special attention from
* physical to virtual reverse mapping procedures.
* Skip any attempt to unmap PTEs or to remap swap cache,
* in order to avoid burning cycles at rmap level, and perform
* the page migration right away (proteced by page lock).
*/
- rc = balloon_page_migrate(newpage, page, mode);
+ rc = page->mapping->a_ops->migratepage(page->mapping,
+ newpage,
+ page,
+ mode);
goto out_unlock;
}
@@ -948,8 +952,8 @@ out:
if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
ClearPageSwapBacked(newpage);
put_new_page(newpage, private);
- } else if (unlikely(__is_movable_balloon_page(newpage))) {
- /* drop our reference, page already in the balloon */
+ } else if (unlikely(driver_page_migratable(newpage))) {
+ /* drop our reference */
put_page(newpage);
} else
putback_lru_page(newpage);
--
1.9.1
Migration is completely generalized.
Signed-off-by: Gioh Kim <[email protected]>
---
mm/balloon_compaction.c | 8 --------
mm/migrate.c | 15 ---------------
2 files changed, 23 deletions(-)
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index f98a500..d29270aa 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -206,13 +206,6 @@ int balloon_page_migrate(struct address_space *mapping,
if (!isolated_balloon_page(page))
return rc;
- /*
- * Block others from accessing the 'newpage' when we get around to
- * establishing additional references. We should be the only one
- * holding a reference to the 'newpage' at this point.
- */
- BUG_ON(!trylock_page(newpage));
-
if (WARN_ON(!__is_movable_balloon_page(page))) {
dump_page(page, "not movable balloon page");
unlock_page(newpage);
@@ -222,7 +215,6 @@ int balloon_page_migrate(struct address_space *mapping,
if (balloon && balloon->migratepage)
rc = balloon->migratepage(balloon, newpage, page, mode);
- unlock_page(newpage);
return rc;
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 649b1cd..ca47b3e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -844,21 +844,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
}
- if (unlikely(driver_page_migratable(page))) {
- /*
- * A migratable-page does not need any special attention from
- * physical to virtual reverse mapping procedures.
- * Skip any attempt to unmap PTEs or to remap swap cache,
- * in order to avoid burning cycles at rmap level, and perform
- * the page migration right away (proteced by page lock).
- */
- rc = page->mapping->a_ops->migratepage(page->mapping,
- newpage,
- page,
- mode);
- goto out_unlock;
- }
-
/*
* Corner case handling:
* 1. When a new swap-cache page is read into, it is added to the LRU
--
1.9.1
On Tue, Jun 2, 2015 at 10:27 AM, Gioh Kim <[email protected]> wrote:
> Apply driver page migration into balloon driver.
>
> Signed-off-by: Gioh Kim <[email protected]>
> ---
> drivers/virtio/virtio_balloon.c | 2 ++
> fs/proc/page.c | 4 +--
> include/linux/balloon_compaction.h | 42 ++++++++++++++++-------
> include/linux/mm.h | 19 -----------
> include/uapi/linux/kernel-page-flags.h | 2 +-
> mm/balloon_compaction.c | 61 ++++++++++++++++++++++++++++++++--
> 6 files changed, 94 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
> index 6a356e3..cdd0038 100644
> --- a/drivers/virtio/virtio_balloon.c
> +++ b/drivers/virtio/virtio_balloon.c
> @@ -496,6 +496,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
> balloon_devinfo_init(&vb->vb_dev_info);
> #ifdef CONFIG_BALLOON_COMPACTION
> vb->vb_dev_info.migratepage = virtballoon_migratepage;
> + vb->vb_dev_info.mapping = balloon_mapping_alloc(&vb->vb_dev_info,
> + &balloon_aops);
> #endif
>
> err = init_vqs(vb);
> diff --git a/fs/proc/page.c b/fs/proc/page.c
> index 7eee2d8..e741307 100644
> --- a/fs/proc/page.c
> +++ b/fs/proc/page.c
> @@ -143,8 +143,8 @@ u64 stable_page_flags(struct page *page)
> if (PageBuddy(page))
> u |= 1 << KPF_BUDDY;
>
> - if (PageBalloon(page))
> - u |= 1 << KPF_BALLOON;
> + if (PageMigratable(page))
> + u |= 1 << KPF_MIGRATABLE;
>
> u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
>
> diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
> index 9b0a15d..0989e96 100644
> --- a/include/linux/balloon_compaction.h
> +++ b/include/linux/balloon_compaction.h
> @@ -48,6 +48,7 @@
> #include <linux/migrate.h>
> #include <linux/gfp.h>
> #include <linux/err.h>
> +#include <linux/fs.h>
>
> /*
> * Balloon device information descriptor.
> @@ -62,6 +63,7 @@ struct balloon_dev_info {
> struct list_head pages; /* Pages enqueued & handled to Host */
> int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
> struct page *page, enum migrate_mode mode);
> + struct address_space *mapping;
> };
>
> extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
> @@ -73,24 +75,37 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
> spin_lock_init(&balloon->pages_lock);
> INIT_LIST_HEAD(&balloon->pages);
> balloon->migratepage = NULL;
> + balloon->mapping = NULL;
> }
>
> #ifdef CONFIG_BALLOON_COMPACTION
> -extern bool balloon_page_isolate(struct page *page);
> +extern const struct address_space_operations balloon_aops;
> +extern bool balloon_page_isolate(struct page *page,
> + isolate_mode_t mode);
> extern void balloon_page_putback(struct page *page);
> -extern int balloon_page_migrate(struct page *newpage,
> +extern int balloon_page_migrate(struct address_space *mapping,
> + struct page *newpage,
> struct page *page, enum migrate_mode mode);
>
> +extern struct address_space
> +*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
> + const struct address_space_operations *a_ops);
> +
> +static inline void balloon_mapping_free(struct address_space *balloon_mapping)
> +{
> + kfree(balloon_mapping);
> +}
> +
> /*
> - * __is_movable_balloon_page - helper to perform @page PageBalloon tests
> + * __is_movable_balloon_page - helper to perform @page PageMigratable tests
> */
> static inline bool __is_movable_balloon_page(struct page *page)
> {
> - return PageBalloon(page);
> + return PageMigratable(page);
> }
>
> /*
> - * balloon_page_movable - test PageBalloon to identify balloon pages
> + * balloon_page_movable - test PageMigratable to identify balloon pages
> * and PagePrivate to check that the page is not
> * isolated and can be moved by compaction/migration.
> *
> @@ -99,7 +114,7 @@ static inline bool __is_movable_balloon_page(struct page *page)
> */
> static inline bool balloon_page_movable(struct page *page)
> {
> - return PageBalloon(page) && PagePrivate(page);
> + return PageMigratable(page) && PagePrivate(page);
> }
>
> /*
> @@ -108,7 +123,7 @@ static inline bool balloon_page_movable(struct page *page)
> */
> static inline bool isolated_balloon_page(struct page *page)
> {
> - return PageBalloon(page);
> + return PageMigratable(page);
> }
>
> /*
> @@ -123,7 +138,8 @@ static inline bool isolated_balloon_page(struct page *page)
> static inline void balloon_page_insert(struct balloon_dev_info *balloon,
> struct page *page)
> {
> - __SetPageBalloon(page);
> + page->mapping = balloon->mapping;
> + __SetPageMigratable(page);
> SetPagePrivate(page);
> set_page_private(page, (unsigned long)balloon);
> list_add(&page->lru, &balloon->pages);
> @@ -139,7 +155,8 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
> */
> static inline void balloon_page_delete(struct page *page)
> {
> - __ClearPageBalloon(page);
> + page->mapping = NULL;
> + __ClearPageMigratable(page);
> set_page_private(page, 0);
> if (PagePrivate(page)) {
> ClearPagePrivate(page);
> @@ -166,13 +183,13 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
> static inline void balloon_page_insert(struct balloon_dev_info *balloon,
> struct page *page)
> {
> - __SetPageBalloon(page);
> + __SetPageMigratable(page);
> list_add(&page->lru, &balloon->pages);
> }
>
> static inline void balloon_page_delete(struct page *page)
> {
> - __ClearPageBalloon(page);
> + __ClearPageMigratable(page);
> list_del(&page->lru);
> }
>
> @@ -191,7 +208,8 @@ static inline bool isolated_balloon_page(struct page *page)
> return false;
> }
>
> -static inline bool balloon_page_isolate(struct page *page)
> +static inline bool balloon_page_isolate(struct page *page,
> + isolate_mode_t mode)
> {
> return false;
> }
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 422c484..2d991a0 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -599,25 +599,6 @@ static inline void __ClearPageBuddy(struct page *page)
> atomic_set(&page->_mapcount, -1);
> }
>
> -#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
> -
> -static inline int PageBalloon(struct page *page)
> -{
> - return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
> -}
> -
> -static inline void __SetPageBalloon(struct page *page)
> -{
> - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
> - atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
> -}
> -
> -static inline void __ClearPageBalloon(struct page *page)
> -{
> - VM_BUG_ON_PAGE(!PageBalloon(page), page);
> - atomic_set(&page->_mapcount, -1);
> -}
> -
Why you're killing this? This mark is exported into userspace.
> #define PAGE_MIGRATABLE_MAPCOUNT_VALUE (-256)
>
> static inline int PageMigratable(struct page *page)
> diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
> index a6c4962..a6a3c4b 100644
> --- a/include/uapi/linux/kernel-page-flags.h
> +++ b/include/uapi/linux/kernel-page-flags.h
> @@ -31,7 +31,7 @@
>
> #define KPF_KSM 21
> #define KPF_THP 22
> -#define KPF_BALLOON 23
> +#define KPF_MIGRATABLE 23
> #define KPF_ZERO_PAGE 24
>
>
> diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
> index fcad832..f98a500 100644
> --- a/mm/balloon_compaction.c
> +++ b/mm/balloon_compaction.c
> @@ -131,7 +131,7 @@ static inline void __putback_balloon_page(struct page *page)
> }
>
> /* __isolate_lru_page() counterpart for a ballooned page */
> -bool balloon_page_isolate(struct page *page)
> +bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
> {
> /*
> * Avoid burning cycles with pages that are yet under __free_pages(),
> @@ -175,6 +175,9 @@ bool balloon_page_isolate(struct page *page)
> /* putback_lru_page() counterpart for a ballooned page */
> void balloon_page_putback(struct page *page)
> {
> + if (!isolated_balloon_page(page))
> + return;
> +
> /*
> * 'lock_page()' stabilizes the page and prevents races against
> * concurrent isolation threads attempting to re-isolate it.
> @@ -193,12 +196,16 @@ void balloon_page_putback(struct page *page)
> }
>
> /* move_to_new_page() counterpart for a ballooned page */
> -int balloon_page_migrate(struct page *newpage,
> +int balloon_page_migrate(struct address_space *mapping,
> + struct page *newpage,
> struct page *page, enum migrate_mode mode)
> {
> struct balloon_dev_info *balloon = balloon_page_device(page);
> int rc = -EAGAIN;
>
> + if (!isolated_balloon_page(page))
> + return rc;
> +
> /*
> * Block others from accessing the 'newpage' when we get around to
> * establishing additional references. We should be the only one
> @@ -218,4 +225,54 @@ int balloon_page_migrate(struct page *newpage,
> unlock_page(newpage);
> return rc;
> }
> +
> +/* define the balloon_mapping->a_ops callback to allow balloon page migration */
> +const struct address_space_operations balloon_aops = {
> + .migratepage = balloon_page_migrate,
> + .isolatepage = balloon_page_isolate,
> + .putbackpage = balloon_page_putback,
> +};
> +EXPORT_SYMBOL_GPL(balloon_aops);
> +
> +struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
> + const struct address_space_operations *a_ops)
> +{
> + struct address_space *mapping;
> +
> + mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
> + if (!mapping)
> + return ERR_PTR(-ENOMEM);
> +
> + /*
> + * Give a clean 'zeroed' status to all elements of this special
> + * balloon page->mapping struct address_space instance.
> + */
> + address_space_init_once(mapping);
> +
> + /*
> + * Set mapping->flags appropriately, to allow balloon pages
> + * ->mapping identification.
> + */
> + mapping_set_migratable(mapping);
> + mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask());
> +
> + /* balloon's page->mapping->a_ops callback descriptor */
> + mapping->a_ops = a_ops;
> +
> + /*
> + * Establish a pointer reference back to the balloon device descriptor
> + * this particular page->mapping will be servicing.
> + * This is used by compaction / migration procedures to identify and
> + * access the balloon device pageset while isolating / migrating pages.
> + *
> + * As some balloon drivers can register multiple balloon devices
> + * for a single guest, this also helps compaction / migration to
> + * properly deal with multiple balloon pagesets, when required.
> + */
> + mapping->private_data = b_dev_info;
> + b_dev_info->mapping = mapping;
> +
> + return mapping;
> +}
> +EXPORT_SYMBOL_GPL(balloon_mapping_alloc);
So, you're reverting my changes and return this mess.
I don't mind -- zram/balloon might have special mapping but at least please
create it in appropriate way: together with valid inode and superblock.
I think it's ok to use anon-inodes (fs/anon_inodes.c) for that.
For now anon_inodefs has only one inode and I see no reason why it
cannot keep more.
Probably aio/drm could use it too instead of mounting it's own presudo
filesystem.
> #endif /* CONFIG_BALLOON_COMPACTION */
> --
> 1.9.1
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to [email protected]. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"[email protected]"> [email protected] </a>
> On Tue, Jun 2, 2015 at 10:27 AM, Gioh Kim <[email protected]> wrote:
>> Apply driver page migration into balloon driver.
>>
>> Signed-off-by: Gioh Kim <[email protected]>
>> ---
>> drivers/virtio/virtio_balloon.c | 2 ++
>> fs/proc/page.c | 4 +--
>> include/linux/balloon_compaction.h | 42 ++++++++++++++++-------
>> include/linux/mm.h | 19 -----------
>> include/uapi/linux/kernel-page-flags.h | 2 +-
>> mm/balloon_compaction.c | 61 ++++++++++++++++++++++++++++++++--
>> 6 files changed, 94 insertions(+), 36 deletions(-)
>>
>> diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
>> index 6a356e3..cdd0038 100644
>> --- a/drivers/virtio/virtio_balloon.c
>> +++ b/drivers/virtio/virtio_balloon.c
>> @@ -496,6 +496,8 @@ static int virtballoon_probe(struct virtio_device *vdev)
>> balloon_devinfo_init(&vb->vb_dev_info);
>> #ifdef CONFIG_BALLOON_COMPACTION
>> vb->vb_dev_info.migratepage = virtballoon_migratepage;
>> + vb->vb_dev_info.mapping = balloon_mapping_alloc(&vb->vb_dev_info,
>> + &balloon_aops);
>> #endif
>>
>> err = init_vqs(vb);
>> diff --git a/fs/proc/page.c b/fs/proc/page.c
>> index 7eee2d8..e741307 100644
>> --- a/fs/proc/page.c
>> +++ b/fs/proc/page.c
>> @@ -143,8 +143,8 @@ u64 stable_page_flags(struct page *page)
>> if (PageBuddy(page))
>> u |= 1 << KPF_BUDDY;
>>
>> - if (PageBalloon(page))
>> - u |= 1 << KPF_BALLOON;
>> + if (PageMigratable(page))
>> + u |= 1 << KPF_MIGRATABLE;
>>
>> u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
>>
>> diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
>> index 9b0a15d..0989e96 100644
>> --- a/include/linux/balloon_compaction.h
>> +++ b/include/linux/balloon_compaction.h
>> @@ -48,6 +48,7 @@
>> #include <linux/migrate.h>
>> #include <linux/gfp.h>
>> #include <linux/err.h>
>> +#include <linux/fs.h>
>>
>> /*
>> * Balloon device information descriptor.
>> @@ -62,6 +63,7 @@ struct balloon_dev_info {
>> struct list_head pages; /* Pages enqueued & handled to Host */
>> int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
>> struct page *page, enum migrate_mode mode);
>> + struct address_space *mapping;
>> };
>>
>> extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
>> @@ -73,24 +75,37 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
>> spin_lock_init(&balloon->pages_lock);
>> INIT_LIST_HEAD(&balloon->pages);
>> balloon->migratepage = NULL;
>> + balloon->mapping = NULL;
>> }
>>
>> #ifdef CONFIG_BALLOON_COMPACTION
>> -extern bool balloon_page_isolate(struct page *page);
>> +extern const struct address_space_operations balloon_aops;
>> +extern bool balloon_page_isolate(struct page *page,
>> + isolate_mode_t mode);
>> extern void balloon_page_putback(struct page *page);
>> -extern int balloon_page_migrate(struct page *newpage,
>> +extern int balloon_page_migrate(struct address_space *mapping,
>> + struct page *newpage,
>> struct page *page, enum migrate_mode mode);
>>
>> +extern struct address_space
>> +*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
>> + const struct address_space_operations *a_ops);
>> +
>> +static inline void balloon_mapping_free(struct address_space *balloon_mapping)
>> +{
>> + kfree(balloon_mapping);
>> +}
>> +
>> /*
>> - * __is_movable_balloon_page - helper to perform @page PageBalloon tests
>> + * __is_movable_balloon_page - helper to perform @page PageMigratable tests
>> */
>> static inline bool __is_movable_balloon_page(struct page *page)
>> {
>> - return PageBalloon(page);
>> + return PageMigratable(page);
>> }
>>
>> /*
>> - * balloon_page_movable - test PageBalloon to identify balloon pages
>> + * balloon_page_movable - test PageMigratable to identify balloon pages
>> * and PagePrivate to check that the page is not
>> * isolated and can be moved by compaction/migration.
>> *
>> @@ -99,7 +114,7 @@ static inline bool __is_movable_balloon_page(struct page *page)
>> */
>> static inline bool balloon_page_movable(struct page *page)
>> {
>> - return PageBalloon(page) && PagePrivate(page);
>> + return PageMigratable(page) && PagePrivate(page);
>> }
>>
>> /*
>> @@ -108,7 +123,7 @@ static inline bool balloon_page_movable(struct page *page)
>> */
>> static inline bool isolated_balloon_page(struct page *page)
>> {
>> - return PageBalloon(page);
>> + return PageMigratable(page);
>> }
>>
>> /*
>> @@ -123,7 +138,8 @@ static inline bool isolated_balloon_page(struct page *page)
>> static inline void balloon_page_insert(struct balloon_dev_info *balloon,
>> struct page *page)
>> {
>> - __SetPageBalloon(page);
>> + page->mapping = balloon->mapping;
>> + __SetPageMigratable(page);
>> SetPagePrivate(page);
>> set_page_private(page, (unsigned long)balloon);
>> list_add(&page->lru, &balloon->pages);
>> @@ -139,7 +155,8 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
>> */
>> static inline void balloon_page_delete(struct page *page)
>> {
>> - __ClearPageBalloon(page);
>> + page->mapping = NULL;
>> + __ClearPageMigratable(page);
>> set_page_private(page, 0);
>> if (PagePrivate(page)) {
>> ClearPagePrivate(page);
>> @@ -166,13 +183,13 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
>> static inline void balloon_page_insert(struct balloon_dev_info *balloon,
>> struct page *page)
>> {
>> - __SetPageBalloon(page);
>> + __SetPageMigratable(page);
>> list_add(&page->lru, &balloon->pages);
>> }
>>
>> static inline void balloon_page_delete(struct page *page)
>> {
>> - __ClearPageBalloon(page);
>> + __ClearPageMigratable(page);
>> list_del(&page->lru);
>> }
>>
>> @@ -191,7 +208,8 @@ static inline bool isolated_balloon_page(struct page *page)
>> return false;
>> }
>>
>> -static inline bool balloon_page_isolate(struct page *page)
>> +static inline bool balloon_page_isolate(struct page *page,
>> + isolate_mode_t mode)
>> {
>> return false;
>> }
>> diff --git a/include/linux/mm.h b/include/linux/mm.h
>> index 422c484..2d991a0 100644
>> --- a/include/linux/mm.h
>> +++ b/include/linux/mm.h
>> @@ -599,25 +599,6 @@ static inline void __ClearPageBuddy(struct page *page)
>> atomic_set(&page->_mapcount, -1);
>> }
>>
>> -#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
>> -
>> -static inline int PageBalloon(struct page *page)
>> -{
>> - return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
>> -}
>> -
>> -static inline void __SetPageBalloon(struct page *page)
>> -{
>> - VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
>> - atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
>> -}
>> -
>> -static inline void __ClearPageBalloon(struct page *page)
>> -{
>> - VM_BUG_ON_PAGE(!PageBalloon(page), page);
>> - atomic_set(&page->_mapcount, -1);
>> -}
>> -
>
> Why you're killing this? This mark is exported into userspace.
>
>> #define PAGE_MIGRATABLE_MAPCOUNT_VALUE (-256)
>>
>> static inline int PageMigratable(struct page *page)
>> diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
>> index a6c4962..a6a3c4b 100644
>> --- a/include/uapi/linux/kernel-page-flags.h
>> +++ b/include/uapi/linux/kernel-page-flags.h
>> @@ -31,7 +31,7 @@
>>
>> #define KPF_KSM 21
>> #define KPF_THP 22
>> -#define KPF_BALLOON 23
>> +#define KPF_MIGRATABLE 23
>> #define KPF_ZERO_PAGE 24
>>
>>
>> diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
>> index fcad832..f98a500 100644
>> --- a/mm/balloon_compaction.c
>> +++ b/mm/balloon_compaction.c
>> @@ -131,7 +131,7 @@ static inline void __putback_balloon_page(struct page *page)
>> }
>>
>> /* __isolate_lru_page() counterpart for a ballooned page */
>> -bool balloon_page_isolate(struct page *page)
>> +bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
>> {
>> /*
>> * Avoid burning cycles with pages that are yet under __free_pages(),
>> @@ -175,6 +175,9 @@ bool balloon_page_isolate(struct page *page)
>> /* putback_lru_page() counterpart for a ballooned page */
>> void balloon_page_putback(struct page *page)
>> {
>> + if (!isolated_balloon_page(page))
>> + return;
>> +
>> /*
>> * 'lock_page()' stabilizes the page and prevents races against
>> * concurrent isolation threads attempting to re-isolate it.
>> @@ -193,12 +196,16 @@ void balloon_page_putback(struct page *page)
>> }
>>
>> /* move_to_new_page() counterpart for a ballooned page */
>> -int balloon_page_migrate(struct page *newpage,
>> +int balloon_page_migrate(struct address_space *mapping,
>> + struct page *newpage,
>> struct page *page, enum migrate_mode mode)
>> {
>> struct balloon_dev_info *balloon = balloon_page_device(page);
>> int rc = -EAGAIN;
>>
>> + if (!isolated_balloon_page(page))
>> + return rc;
>> +
>> /*
>> * Block others from accessing the 'newpage' when we get around to
>> * establishing additional references. We should be the only one
>> @@ -218,4 +225,54 @@ int balloon_page_migrate(struct page *newpage,
>> unlock_page(newpage);
>> return rc;
>> }
>> +
>> +/* define the balloon_mapping->a_ops callback to allow balloon page migration */
>> +const struct address_space_operations balloon_aops = {
>> + .migratepage = balloon_page_migrate,
>> + .isolatepage = balloon_page_isolate,
>> + .putbackpage = balloon_page_putback,
>> +};
>> +EXPORT_SYMBOL_GPL(balloon_aops);
>> +
>> +struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
>> + const struct address_space_operations *a_ops)
>> +{
>> + struct address_space *mapping;
>> +
>> + mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
>> + if (!mapping)
>> + return ERR_PTR(-ENOMEM);
>> +
>> + /*
>> + * Give a clean 'zeroed' status to all elements of this special
>> + * balloon page->mapping struct address_space instance.
>> + */
>> + address_space_init_once(mapping);
>> +
>> + /*
>> + * Set mapping->flags appropriately, to allow balloon pages
>> + * ->mapping identification.
>> + */
>> + mapping_set_migratable(mapping);
>> + mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask());
>> +
>> + /* balloon's page->mapping->a_ops callback descriptor */
>> + mapping->a_ops = a_ops;
>> +
>> + /*
>> + * Establish a pointer reference back to the balloon device descriptor
>> + * this particular page->mapping will be servicing.
>> + * This is used by compaction / migration procedures to identify and
>> + * access the balloon device pageset while isolating / migrating pages.
>> + *
>> + * As some balloon drivers can register multiple balloon devices
>> + * for a single guest, this also helps compaction / migration to
>> + * properly deal with multiple balloon pagesets, when required.
>> + */
>> + mapping->private_data = b_dev_info;
>> + b_dev_info->mapping = mapping;
>> +
>> + return mapping;
>> +}
>> +EXPORT_SYMBOL_GPL(balloon_mapping_alloc);
>
> So, you're reverting my changes and return this mess.
> I don't mind -- zram/balloon might have special mapping but at least please
> create it in appropriate way: together with valid inode and superblock.
I think it's not bad.
Anyway my point is that the generic callbacks are need to migrate non-LRU pages
as described in patch 1/4.
The patch 2~4 are an example to show howto use the generic callbacks.
>
> I think it's ok to use anon-inodes (fs/anon_inodes.c) for that.
> For now anon_inodefs has only one inode and I see no reason why it
> cannot keep more.
> Probably aio/drm could use it too instead of mounting it's own presudo
> filesystem.
Thank you for your feedback.
>
>> #endif /* CONFIG_BALLOON_COMPACTION */
>> --
>> 1.9.1
>>
>> --
>> To unsubscribe, send a message with 'unsubscribe linux-mm' in
>> the body to [email protected]. For more info on Linux MM,
>> see: http://www.linux-mm.org/ .
>> Don't email: <a href=mailto:"[email protected]"> [email protected] </a>
>
On Tue, Jun 02, 2015 at 04:27:40PM +0900, Gioh Kim wrote:
> Hello,
>
> This series try to enable migration of non-LRU pages, such as driver's page.
>
> My ARM-based platform occured severe fragmentation problem after long-term
> (several days) test. Sometimes even order-3 page allocation failed. It has
> memory size 512MB ~ 1024MB. 30% ~ 40% memory is consumed for graphic processing
> and 20~30 memory is reserved for zram.
>
> I found that many pages of GPU driver and zram are non-movable pages. So I
> reported Minchan Kim, the maintainer of zram, and he made the internal
> compaction logic of zram. And I made the internal compaction of GPU driver.
>
> They reduced some fragmentation but they are not enough effective.
> They are activated by its own interface, /sys, so they are not cooperative
> with kernel compaction. If there is too much fragmentation and kernel starts
> to compaction, zram and GPU driver cannot work with the kernel compaction.
>
> The first this patch adds a generic isolate/migrate/putback callbacks for page
> address-space. The zram and GPU, and any other modules can register
> its own migration method. The kernel compaction can call the registered
> migration when it works. Therefore all page in the system can be migrated
> at once.
>
> The 2nd the generic migration callbacks are applied into balloon driver.
> My gpu driver code is not open so I apply generic migration into balloon
> to show how it works. I've tested it with qemu enabled by kvm like followings:
> - turn on Ubuntu 14.04 with 1G memory on qemu.
> - do kernel building
> - after several seconds check more than 512MB is used with free command
> - command "balloon 512" in qemu monitor
> - check hundreds MB of pages are migrated
>
> Next kernel compaction code can call generic migration callbacks instead of
> balloon driver interface.
> Finally calling migration of balloon driver is removed.
>
In a glance, ss Konstantin pointed out this set, while it twists chunks around,
brings back code we got rid of a while ago because it was messy and racy.
I'll take a closer look into your work next week, but for now, I'd say
we should not follow this patch of reintroducing long-dead code.
Cheers!
-- Rafael
>
> Gioh Kim (4):
> mm/compaction: enable driver page migration
> mm/balloon: apply migratable-page into balloon driver
> mm/compaction: apply migratable-page into compaction
> mm: remove direct migration of migratable-page
>
> drivers/virtio/virtio_balloon.c | 2 +
> fs/proc/page.c | 4 +-
> include/linux/balloon_compaction.h | 42 +++++++++++++++------
> include/linux/compaction.h | 13 +++++++
> include/linux/fs.h | 2 +
> include/linux/mm.h | 14 +++----
> include/linux/pagemap.h | 27 ++++++++++++++
> include/uapi/linux/kernel-page-flags.h | 2 +-
> mm/balloon_compaction.c | 67 +++++++++++++++++++++++++++++-----
> mm/compaction.c | 9 +++--
> mm/migrate.c | 25 ++++---------
> 11 files changed, 154 insertions(+), 53 deletions(-)
>
> --
> 1.9.1
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to [email protected]. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"[email protected]"> [email protected] </a>
> On Tue, Jun 02, 2015 at 04:27:40PM +0900, Gioh Kim wrote:
>> Hello,
>>
>> This series try to enable migration of non-LRU pages, such as driver's page.
>>
>> My ARM-based platform occured severe fragmentation problem after long-term
>> (several days) test. Sometimes even order-3 page allocation failed. It has
>> memory size 512MB ~ 1024MB. 30% ~ 40% memory is consumed for graphic processing
>> and 20~30 memory is reserved for zram.
>>
>> I found that many pages of GPU driver and zram are non-movable pages. So I
>> reported Minchan Kim, the maintainer of zram, and he made the internal
>> compaction logic of zram. And I made the internal compaction of GPU driver.
>>
>> They reduced some fragmentation but they are not enough effective.
>> They are activated by its own interface, /sys, so they are not cooperative
>> with kernel compaction. If there is too much fragmentation and kernel starts
>> to compaction, zram and GPU driver cannot work with the kernel compaction.
>>
>> The first this patch adds a generic isolate/migrate/putback callbacks for page
>> address-space. The zram and GPU, and any other modules can register
>> its own migration method. The kernel compaction can call the registered
>> migration when it works. Therefore all page in the system can be migrated
>> at once.
>>
>> The 2nd the generic migration callbacks are applied into balloon driver.
>> My gpu driver code is not open so I apply generic migration into balloon
>> to show how it works. I've tested it with qemu enabled by kvm like followings:
>> - turn on Ubuntu 14.04 with 1G memory on qemu.
>> - do kernel building
>> - after several seconds check more than 512MB is used with free command
>> - command "balloon 512" in qemu monitor
>> - check hundreds MB of pages are migrated
>>
>> Next kernel compaction code can call generic migration callbacks instead of
>> balloon driver interface.
>> Finally calling migration of balloon driver is removed.
>>
>
> In a glance, ss Konstantin pointed out this set, while it twists chunks around,
> brings back code we got rid of a while ago because it was messy and racy.
Yes, your point is right.
> I'll take a closer look into your work next week, but for now, I'd say
> we should not follow this patch of reintroducing long-dead code.
BUT as I replied to Konstantin, the code for balloon driver is to show
how the generic callbacks can be applied.
My point is there are some pages to be migrated which are not LRU pages,
but there is no interface for them to migrate.
For example gpu driver has many, not mapped to kernel space, pages.
Those pages can be migrated when GPU is not working, screen is not refreshed.
And zram pages can be migrated also.
I'm very sorry that I'm not familiar to balloon driver.
If you give me some hints, I might be able to refine code of patch 2~3 in the next spin.
Thank you for the feedback.
>
> Cheers!
> -- Rafael
>
>>
>> Gioh Kim (4):
>> mm/compaction: enable driver page migration
>> mm/balloon: apply migratable-page into balloon driver
>> mm/compaction: apply migratable-page into compaction
>> mm: remove direct migration of migratable-page
>>
>> drivers/virtio/virtio_balloon.c | 2 +
>> fs/proc/page.c | 4 +-
>> include/linux/balloon_compaction.h | 42 +++++++++++++++------
>> include/linux/compaction.h | 13 +++++++
>> include/linux/fs.h | 2 +
>> include/linux/mm.h | 14 +++----
>> include/linux/pagemap.h | 27 ++++++++++++++
>> include/uapi/linux/kernel-page-flags.h | 2 +-
>> mm/balloon_compaction.c | 67 +++++++++++++++++++++++++++++-----
>> mm/compaction.c | 9 +++--
>> mm/migrate.c | 25 ++++---------
>> 11 files changed, 154 insertions(+), 53 deletions(-)
>>
>> --
>> 1.9.1
>>
>> --
>> To unsubscribe, send a message with 'unsubscribe linux-mm' in
>> the body to [email protected]. For more info on Linux MM,
>> see: http://www.linux-mm.org/ .
>> Don't email: <a href=mailto:"[email protected]"> [email protected] </a>
>
On Fri, Jun 05, 2015 at 11:35:49PM +0900, Gioh Kim wrote:
> >On Tue, Jun 02, 2015 at 04:27:40PM +0900, Gioh Kim wrote:
> >>Hello,
> >>
> >>This series try to enable migration of non-LRU pages, such as driver's page.
> >>
> >>My ARM-based platform occured severe fragmentation problem after long-term
> >>(several days) test. Sometimes even order-3 page allocation failed. It has
> >>memory size 512MB ~ 1024MB. 30% ~ 40% memory is consumed for graphic processing
> >>and 20~30 memory is reserved for zram.
> >>
> >>I found that many pages of GPU driver and zram are non-movable pages. So I
> >>reported Minchan Kim, the maintainer of zram, and he made the internal
> >>compaction logic of zram. And I made the internal compaction of GPU driver.
> >>
> >>They reduced some fragmentation but they are not enough effective.
> >>They are activated by its own interface, /sys, so they are not cooperative
> >>with kernel compaction. If there is too much fragmentation and kernel starts
> >>to compaction, zram and GPU driver cannot work with the kernel compaction.
> >>
> >>The first this patch adds a generic isolate/migrate/putback callbacks for page
> >>address-space. The zram and GPU, and any other modules can register
> >>its own migration method. The kernel compaction can call the registered
> >>migration when it works. Therefore all page in the system can be migrated
> >>at once.
> >>
> >>The 2nd the generic migration callbacks are applied into balloon driver.
> >>My gpu driver code is not open so I apply generic migration into balloon
> >>to show how it works. I've tested it with qemu enabled by kvm like followings:
> >>- turn on Ubuntu 14.04 with 1G memory on qemu.
> >>- do kernel building
> >>- after several seconds check more than 512MB is used with free command
> >>- command "balloon 512" in qemu monitor
> >>- check hundreds MB of pages are migrated
> >>
> >>Next kernel compaction code can call generic migration callbacks instead of
> >>balloon driver interface.
> >>Finally calling migration of balloon driver is removed.
> >>
> >
> >In a glance, ss Konstantin pointed out this set, while it twists chunks around,
> >brings back code we got rid of a while ago because it was messy and racy.
>
> Yes, your point is right.
>
> >I'll take a closer look into your work next week, but for now, I'd say
> >we should not follow this patch of reintroducing long-dead code.
>
> BUT as I replied to Konstantin, the code for balloon driver is to show
> how the generic callbacks can be applied.
>
> My point is there are some pages to be migrated which are not LRU pages,
> but there is no interface for them to migrate.
>
> For example gpu driver has many, not mapped to kernel space, pages.
> Those pages can be migrated when GPU is not working, screen is not refreshed.
> And zram pages can be migrated also.
>
> I'm very sorry that I'm not familiar to balloon driver.
> If you give me some hints, I might be able to refine code of patch 2~3 in the next spin.
>
NP at all. Thanks for bringing this discussion of migrating non-LRU
pages back to the table, actually. As I mentioned earlier, I'll take a
closer look into your work as soon as I get time next week, so I can try
to help with more qualified feedback.
Have a nice weekend!
-- Rafael
Hello Gioh,
On Tue, Jun 02, 2015 at 04:27:40PM +0900, Gioh Kim wrote:
> Hello,
>
> This series try to enable migration of non-LRU pages, such as driver's page.
>
> My ARM-based platform occured severe fragmentation problem after long-term
> (several days) test. Sometimes even order-3 page allocation failed. It has
> memory size 512MB ~ 1024MB. 30% ~ 40% memory is consumed for graphic processing
> and 20~30 memory is reserved for zram.
>
> I found that many pages of GPU driver and zram are non-movable pages. So I
> reported Minchan Kim, the maintainer of zram, and he made the internal
> compaction logic of zram. And I made the internal compaction of GPU driver.
>
> They reduced some fragmentation but they are not enough effective.
> They are activated by its own interface, /sys, so they are not cooperative
> with kernel compaction. If there is too much fragmentation and kernel starts
> to compaction, zram and GPU driver cannot work with the kernel compaction.
>
> The first this patch adds a generic isolate/migrate/putback callbacks for page
> address-space. The zram and GPU, and any other modules can register
> its own migration method. The kernel compaction can call the registered
> migration when it works. Therefore all page in the system can be migrated
> at once.
>
> The 2nd the generic migration callbacks are applied into balloon driver.
> My gpu driver code is not open so I apply generic migration into balloon
> to show how it works. I've tested it with qemu enabled by kvm like followings:
> - turn on Ubuntu 14.04 with 1G memory on qemu.
> - do kernel building
> - after several seconds check more than 512MB is used with free command
> - command "balloon 512" in qemu monitor
> - check hundreds MB of pages are migrated
>
> Next kernel compaction code can call generic migration callbacks instead of
> balloon driver interface.
> Finally calling migration of balloon driver is removed.
I didn't hava a time to review but it surely will help using zram with
CMA as well as fragmentation of the system memory via making zram objects
movable.
If it lands on mainline, I will work for zram object migration.
Thanks!
2015-06-10 오전 9:08에 Minchan Kim 이(가) 쓴 글:
> Hello Gioh,
>
> On Tue, Jun 02, 2015 at 04:27:40PM +0900, Gioh Kim wrote:
>> Hello,
>>
>> This series try to enable migration of non-LRU pages, such as driver's page.
>>
>> My ARM-based platform occured severe fragmentation problem after long-term
>> (several days) test. Sometimes even order-3 page allocation failed. It has
>> memory size 512MB ~ 1024MB. 30% ~ 40% memory is consumed for graphic processing
>> and 20~30 memory is reserved for zram.
>>
>> I found that many pages of GPU driver and zram are non-movable pages. So I
>> reported Minchan Kim, the maintainer of zram, and he made the internal
>> compaction logic of zram. And I made the internal compaction of GPU driver.
>>
>> They reduced some fragmentation but they are not enough effective.
>> They are activated by its own interface, /sys, so they are not cooperative
>> with kernel compaction. If there is too much fragmentation and kernel starts
>> to compaction, zram and GPU driver cannot work with the kernel compaction.
>>
>> The first this patch adds a generic isolate/migrate/putback callbacks for page
>> address-space. The zram and GPU, and any other modules can register
>> its own migration method. The kernel compaction can call the registered
>> migration when it works. Therefore all page in the system can be migrated
>> at once.
>>
>> The 2nd the generic migration callbacks are applied into balloon driver.
>> My gpu driver code is not open so I apply generic migration into balloon
>> to show how it works. I've tested it with qemu enabled by kvm like followings:
>> - turn on Ubuntu 14.04 with 1G memory on qemu.
>> - do kernel building
>> - after several seconds check more than 512MB is used with free command
>> - command "balloon 512" in qemu monitor
>> - check hundreds MB of pages are migrated
>>
>> Next kernel compaction code can call generic migration callbacks instead of
>> balloon driver interface.
>> Finally calling migration of balloon driver is removed.
>
> I didn't hava a time to review but it surely will help using zram with
> CMA as well as fragmentation of the system memory via making zram objects
> movable.
I know you are busy. I hope you make time for review.
>
> If it lands on mainline, I will work for zram object migration.
>
> Thanks!
>