2019-06-05 10:09:00

by Hui Zhu

[permalink] [raw]
Subject: [PATCH V3 1/2] zpool: Add malloc_support_movable to zpool_driver

As a zpool_driver, zsmalloc can allocate movable memory because it
support migate pages.
But zbud and z3fold cannot allocate movable memory.

This commit adds malloc_support_movable to zpool_driver.
If a zpool_driver support allocate movable memory, set it to true.
And add zpool_malloc_support_movable check malloc_support_movable
to make sure if a zpool support allocate movable memory.

Signed-off-by: Hui Zhu <[email protected]>
---
include/linux/zpool.h | 3 +++
mm/zpool.c | 16 ++++++++++++++++
mm/zsmalloc.c | 19 ++++++++++---------
3 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index 7238865e75b0..51bf43076165 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -46,6 +46,8 @@ const char *zpool_get_type(struct zpool *pool);

void zpool_destroy_pool(struct zpool *pool);

+bool zpool_malloc_support_movable(struct zpool *pool);
+
int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
unsigned long *handle);

@@ -90,6 +92,7 @@ struct zpool_driver {
struct zpool *zpool);
void (*destroy)(void *pool);

+ bool malloc_support_movable;
int (*malloc)(void *pool, size_t size, gfp_t gfp,
unsigned long *handle);
void (*free)(void *pool, unsigned long handle);
diff --git a/mm/zpool.c b/mm/zpool.c
index a2dd9107857d..863669212070 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -238,6 +238,22 @@ const char *zpool_get_type(struct zpool *zpool)
return zpool->driver->type;
}

+/**
+ * zpool_malloc_support_movable() - Check if the zpool support
+ * allocate movable memory
+ * @zpool: The zpool to check
+ *
+ * This returns if the zpool support allocate movable memory.
+ *
+ * Implementations must guarantee this to be thread-safe.
+ *
+ * Returns: true if if the zpool support allocate movable memory, false if not
+ */
+bool zpool_malloc_support_movable(struct zpool *zpool)
+{
+ return zpool->driver->malloc_support_movable;
+}
+
/**
* zpool_malloc() - Allocate memory
* @zpool: The zpool to allocate from.
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 0787d33b80d8..8f3d9a4d46f4 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -437,15 +437,16 @@ static u64 zs_zpool_total_size(void *pool)
}

static struct zpool_driver zs_zpool_driver = {
- .type = "zsmalloc",
- .owner = THIS_MODULE,
- .create = zs_zpool_create,
- .destroy = zs_zpool_destroy,
- .malloc = zs_zpool_malloc,
- .free = zs_zpool_free,
- .map = zs_zpool_map,
- .unmap = zs_zpool_unmap,
- .total_size = zs_zpool_total_size,
+ .type = "zsmalloc",
+ .owner = THIS_MODULE,
+ .create = zs_zpool_create,
+ .destroy = zs_zpool_destroy,
+ .malloc_support_movable = true,
+ .malloc = zs_zpool_malloc,
+ .free = zs_zpool_free,
+ .map = zs_zpool_map,
+ .unmap = zs_zpool_unmap,
+ .total_size = zs_zpool_total_size,
};

MODULE_ALIAS("zpool-zsmalloc");
--
2.21.0 (Apple Git-120)


2019-06-05 10:09:27

by Hui Zhu

[permalink] [raw]
Subject: [PATCH V3 2/2] zswap: Use movable memory if zpool support allocate movable memory

This is the third version that was updated according to the comments
from Sergey Senozhatsky https://lkml.org/lkml/2019/5/29/73 and
Shakeel Butt https://lkml.org/lkml/2019/6/4/973

zswap compresses swap pages into a dynamically allocated RAM-based
memory pool. The memory pool should be zbud, z3fold or zsmalloc.
All of them will allocate unmovable pages. It will increase the
number of unmovable page blocks that will bad for anti-fragment.

zsmalloc support page migration if request movable page:
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);

And commit "zpool: Add malloc_support_movable to zpool_driver" add
zpool_malloc_support_movable check malloc_support_movable to make
sure if a zpool support allocate movable memory.

This commit let zswap allocate block with gfp
__GFP_HIGHMEM | __GFP_MOVABLE if zpool support allocate movable memory.

Following part is test log in a pc that has 8G memory and 2G swap.

Without this commit:
~# echo lz4 > /sys/module/zswap/parameters/compressor
~# echo zsmalloc > /sys/module/zswap/parameters/zpool
~# echo 1 > /sys/module/zswap/parameters/enabled
~# swapon /swapfile
~# cd /home/teawater/kernel/vm-scalability/
/home/teawater/kernel/vm-scalability# export unit_size=$((9 * 1024 * 1024 * 1024))
/home/teawater/kernel/vm-scalability# ./case-anon-w-seq
2717908992 bytes / 4826062 usecs = 549973 KB/s
2717908992 bytes / 4864201 usecs = 545661 KB/s
2717908992 bytes / 4867015 usecs = 545346 KB/s
2717908992 bytes / 4915485 usecs = 539968 KB/s
397853 usecs to free memory
357820 usecs to free memory
421333 usecs to free memory
420454 usecs to free memory
/home/teawater/kernel/vm-scalability# cat /proc/pagetypeinfo
Page block order: 9
Pages per block: 512

Free pages count per migrate type at order 0 1 2 3 4 5 6 7 8 9 10
Node 0, zone DMA, type Unmovable 1 1 1 0 2 1 1 0 1 0 0
Node 0, zone DMA, type Movable 0 0 0 0 0 0 0 0 0 1 3
Node 0, zone DMA, type Reclaimable 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA, type CMA 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA, type Isolate 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA32, type Unmovable 6 5 8 6 6 5 4 1 1 1 0
Node 0, zone DMA32, type Movable 25 20 20 19 22 15 14 11 11 5 767
Node 0, zone DMA32, type Reclaimable 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA32, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA32, type CMA 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA32, type Isolate 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone Normal, type Unmovable 4753 5588 5159 4613 3712 2520 1448 594 188 11 0
Node 0, zone Normal, type Movable 16 3 457 2648 2143 1435 860 459 223 224 296
Node 0, zone Normal, type Reclaimable 0 0 44 38 11 2 0 0 0 0 0
Node 0, zone Normal, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone Normal, type CMA 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone Normal, type Isolate 0 0 0 0 0 0 0 0 0 0 0

Number of blocks type Unmovable Movable Reclaimable HighAtomic CMA Isolate
Node 0, zone DMA 1 7 0 0 0 0
Node 0, zone DMA32 4 1652 0 0 0 0
Node 0, zone Normal 931 1485 15 0 0 0

With this commit:
~# echo lz4 > /sys/module/zswap/parameters/compressor
~# echo zsmalloc > /sys/module/zswap/parameters/zpool
~# echo 1 > /sys/module/zswap/parameters/enabled
~# swapon /swapfile
~# cd /home/teawater/kernel/vm-scalability/
/home/teawater/kernel/vm-scalability# export unit_size=$((9 * 1024 * 1024 * 1024))
/home/teawater/kernel/vm-scalability# ./case-anon-w-seq
2717908992 bytes / 4689240 usecs = 566020 KB/s
2717908992 bytes / 4760605 usecs = 557535 KB/s
2717908992 bytes / 4803621 usecs = 552543 KB/s
2717908992 bytes / 5069828 usecs = 523530 KB/s
431546 usecs to free memory
383397 usecs to free memory
456454 usecs to free memory
224487 usecs to free memory
/home/teawater/kernel/vm-scalability# cat /proc/pagetypeinfo
Page block order: 9
Pages per block: 512

Free pages count per migrate type at order 0 1 2 3 4 5 6 7 8 9 10
Node 0, zone DMA, type Unmovable 1 1 1 0 2 1 1 0 1 0 0
Node 0, zone DMA, type Movable 0 0 0 0 0 0 0 0 0 1 3
Node 0, zone DMA, type Reclaimable 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA, type CMA 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA, type Isolate 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA32, type Unmovable 10 8 10 9 10 4 3 2 3 0 0
Node 0, zone DMA32, type Movable 18 12 14 16 16 11 9 5 5 6 775
Node 0, zone DMA32, type Reclaimable 0 0 0 0 0 0 0 0 0 0 1
Node 0, zone DMA32, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA32, type CMA 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone DMA32, type Isolate 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone Normal, type Unmovable 2669 1236 452 118 37 14 4 1 2 3 0
Node 0, zone Normal, type Movable 3850 6086 5274 4327 3510 2494 1520 934 438 220 470
Node 0, zone Normal, type Reclaimable 56 93 155 124 47 31 17 7 3 0 0
Node 0, zone Normal, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone Normal, type CMA 0 0 0 0 0 0 0 0 0 0 0
Node 0, zone Normal, type Isolate 0 0 0 0 0 0 0 0 0 0 0

Number of blocks type Unmovable Movable Reclaimable HighAtomic CMA Isolate
Node 0, zone DMA 1 7 0 0 0 0
Node 0, zone DMA32 4 1650 2 0 0 0
Node 0, zone Normal 79 2326 26 0 0 0

You can see that the number of unmovable page blocks is decreased
when the kernel has this commit.

Signed-off-by: Hui Zhu <[email protected]>
---
mm/zswap.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index a4e4d36ec085..c6bf92bf5890 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1006,6 +1006,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
char *buf;
u8 *src, *dst;
struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
+ gfp_t gfp;

/* THP isn't supported */
if (PageTransHuge(page)) {
@@ -1079,9 +1080,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,

/* store */
hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
- ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
- __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
- &handle);
+ gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+ if (zpool_malloc_support_movable(entry->pool->zpool))
+ gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
+ ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
if (ret == -ENOSPC) {
zswap_reject_compress_poor++;
goto put_dstmem;
--
2.21.0 (Apple Git-120)

2019-06-05 16:35:05

by Shakeel Butt

[permalink] [raw]
Subject: Re: [PATCH V3 1/2] zpool: Add malloc_support_movable to zpool_driver

On Wed, Jun 5, 2019 at 3:06 AM Hui Zhu <[email protected]> wrote:
>
> As a zpool_driver, zsmalloc can allocate movable memory because it
> support migate pages.
> But zbud and z3fold cannot allocate movable memory.
>

Cc: Vitaly

It seems like z3fold does support page migration but z3fold's malloc
is rejecting __GFP_HIGHMEM. Vitaly, is there a reason to keep
rejecting __GFP_HIGHMEM after 1f862989b04a ("mm/z3fold.c: support page
migration").

thanks,
Shakeel

2019-06-05 19:55:26

by Shakeel Butt

[permalink] [raw]
Subject: Re: [PATCH V3 2/2] zswap: Use movable memory if zpool support allocate movable memory

On Wed, Jun 5, 2019 at 3:06 AM Hui Zhu <[email protected]> wrote:
>
> This is the third version that was updated according to the comments
> from Sergey Senozhatsky https://lkml.org/lkml/2019/5/29/73 and
> Shakeel Butt https://lkml.org/lkml/2019/6/4/973
>
> zswap compresses swap pages into a dynamically allocated RAM-based
> memory pool. The memory pool should be zbud, z3fold or zsmalloc.
> All of them will allocate unmovable pages. It will increase the
> number of unmovable page blocks that will bad for anti-fragment.
>
> zsmalloc support page migration if request movable page:
> handle = zs_malloc(zram->mem_pool, comp_len,
> GFP_NOIO | __GFP_HIGHMEM |
> __GFP_MOVABLE);
>
> And commit "zpool: Add malloc_support_movable to zpool_driver" add
> zpool_malloc_support_movable check malloc_support_movable to make
> sure if a zpool support allocate movable memory.
>
> This commit let zswap allocate block with gfp
> __GFP_HIGHMEM | __GFP_MOVABLE if zpool support allocate movable memory.
>
> Following part is test log in a pc that has 8G memory and 2G swap.
>
> Without this commit:
> ~# echo lz4 > /sys/module/zswap/parameters/compressor
> ~# echo zsmalloc > /sys/module/zswap/parameters/zpool
> ~# echo 1 > /sys/module/zswap/parameters/enabled
> ~# swapon /swapfile
> ~# cd /home/teawater/kernel/vm-scalability/
> /home/teawater/kernel/vm-scalability# export unit_size=$((9 * 1024 * 1024 * 1024))
> /home/teawater/kernel/vm-scalability# ./case-anon-w-seq
> 2717908992 bytes / 4826062 usecs = 549973 KB/s
> 2717908992 bytes / 4864201 usecs = 545661 KB/s
> 2717908992 bytes / 4867015 usecs = 545346 KB/s
> 2717908992 bytes / 4915485 usecs = 539968 KB/s
> 397853 usecs to free memory
> 357820 usecs to free memory
> 421333 usecs to free memory
> 420454 usecs to free memory
> /home/teawater/kernel/vm-scalability# cat /proc/pagetypeinfo
> Page block order: 9
> Pages per block: 512
>
> Free pages count per migrate type at order 0 1 2 3 4 5 6 7 8 9 10
> Node 0, zone DMA, type Unmovable 1 1 1 0 2 1 1 0 1 0 0
> Node 0, zone DMA, type Movable 0 0 0 0 0 0 0 0 0 1 3
> Node 0, zone DMA, type Reclaimable 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type Isolate 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type Unmovable 6 5 8 6 6 5 4 1 1 1 0
> Node 0, zone DMA32, type Movable 25 20 20 19 22 15 14 11 11 5 767
> Node 0, zone DMA32, type Reclaimable 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type Isolate 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type Unmovable 4753 5588 5159 4613 3712 2520 1448 594 188 11 0
> Node 0, zone Normal, type Movable 16 3 457 2648 2143 1435 860 459 223 224 296
> Node 0, zone Normal, type Reclaimable 0 0 44 38 11 2 0 0 0 0 0
> Node 0, zone Normal, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type Isolate 0 0 0 0 0 0 0 0 0 0 0
>
> Number of blocks type Unmovable Movable Reclaimable HighAtomic CMA Isolate
> Node 0, zone DMA 1 7 0 0 0 0
> Node 0, zone DMA32 4 1652 0 0 0 0
> Node 0, zone Normal 931 1485 15 0 0 0
>
> With this commit:
> ~# echo lz4 > /sys/module/zswap/parameters/compressor
> ~# echo zsmalloc > /sys/module/zswap/parameters/zpool
> ~# echo 1 > /sys/module/zswap/parameters/enabled
> ~# swapon /swapfile
> ~# cd /home/teawater/kernel/vm-scalability/
> /home/teawater/kernel/vm-scalability# export unit_size=$((9 * 1024 * 1024 * 1024))
> /home/teawater/kernel/vm-scalability# ./case-anon-w-seq
> 2717908992 bytes / 4689240 usecs = 566020 KB/s
> 2717908992 bytes / 4760605 usecs = 557535 KB/s
> 2717908992 bytes / 4803621 usecs = 552543 KB/s
> 2717908992 bytes / 5069828 usecs = 523530 KB/s
> 431546 usecs to free memory
> 383397 usecs to free memory
> 456454 usecs to free memory
> 224487 usecs to free memory
> /home/teawater/kernel/vm-scalability# cat /proc/pagetypeinfo
> Page block order: 9
> Pages per block: 512
>
> Free pages count per migrate type at order 0 1 2 3 4 5 6 7 8 9 10
> Node 0, zone DMA, type Unmovable 1 1 1 0 2 1 1 0 1 0 0
> Node 0, zone DMA, type Movable 0 0 0 0 0 0 0 0 0 1 3
> Node 0, zone DMA, type Reclaimable 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type Isolate 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type Unmovable 10 8 10 9 10 4 3 2 3 0 0
> Node 0, zone DMA32, type Movable 18 12 14 16 16 11 9 5 5 6 775
> Node 0, zone DMA32, type Reclaimable 0 0 0 0 0 0 0 0 0 0 1
> Node 0, zone DMA32, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type Isolate 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type Unmovable 2669 1236 452 118 37 14 4 1 2 3 0
> Node 0, zone Normal, type Movable 3850 6086 5274 4327 3510 2494 1520 934 438 220 470
> Node 0, zone Normal, type Reclaimable 56 93 155 124 47 31 17 7 3 0 0
> Node 0, zone Normal, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type Isolate 0 0 0 0 0 0 0 0 0 0 0
>
> Number of blocks type Unmovable Movable Reclaimable HighAtomic CMA Isolate
> Node 0, zone DMA 1 7 0 0 0 0
> Node 0, zone DMA32 4 1650 2 0 0 0
> Node 0, zone Normal 79 2326 26 0 0 0
>
> You can see that the number of unmovable page blocks is decreased
> when the kernel has this commit.
>
> Signed-off-by: Hui Zhu <[email protected]>

Reviewed-by: Shakeel Butt <[email protected]>

2019-06-05 19:58:15

by Shakeel Butt

[permalink] [raw]
Subject: Re: [PATCH V3 1/2] zpool: Add malloc_support_movable to zpool_driver

On Wed, Jun 5, 2019 at 3:06 AM Hui Zhu <[email protected]> wrote:
>
> As a zpool_driver, zsmalloc can allocate movable memory because it
> support migate pages.
> But zbud and z3fold cannot allocate movable memory.
>
> This commit adds malloc_support_movable to zpool_driver.
> If a zpool_driver support allocate movable memory, set it to true.
> And add zpool_malloc_support_movable check malloc_support_movable
> to make sure if a zpool support allocate movable memory.
>
> Signed-off-by: Hui Zhu <[email protected]>

Reviewed-by: Shakeel Butt <[email protected]>

IMHO no need to block this series on z3fold query.

> ---
> include/linux/zpool.h | 3 +++
> mm/zpool.c | 16 ++++++++++++++++
> mm/zsmalloc.c | 19 ++++++++++---------
> 3 files changed, 29 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/zpool.h b/include/linux/zpool.h
> index 7238865e75b0..51bf43076165 100644
> --- a/include/linux/zpool.h
> +++ b/include/linux/zpool.h
> @@ -46,6 +46,8 @@ const char *zpool_get_type(struct zpool *pool);
>
> void zpool_destroy_pool(struct zpool *pool);
>
> +bool zpool_malloc_support_movable(struct zpool *pool);
> +
> int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
> unsigned long *handle);
>
> @@ -90,6 +92,7 @@ struct zpool_driver {
> struct zpool *zpool);
> void (*destroy)(void *pool);
>
> + bool malloc_support_movable;
> int (*malloc)(void *pool, size_t size, gfp_t gfp,
> unsigned long *handle);
> void (*free)(void *pool, unsigned long handle);
> diff --git a/mm/zpool.c b/mm/zpool.c
> index a2dd9107857d..863669212070 100644
> --- a/mm/zpool.c
> +++ b/mm/zpool.c
> @@ -238,6 +238,22 @@ const char *zpool_get_type(struct zpool *zpool)
> return zpool->driver->type;
> }
>
> +/**
> + * zpool_malloc_support_movable() - Check if the zpool support
> + * allocate movable memory
> + * @zpool: The zpool to check
> + *
> + * This returns if the zpool support allocate movable memory.
> + *
> + * Implementations must guarantee this to be thread-safe.
> + *
> + * Returns: true if if the zpool support allocate movable memory, false if not
> + */
> +bool zpool_malloc_support_movable(struct zpool *zpool)
> +{
> + return zpool->driver->malloc_support_movable;
> +}
> +
> /**
> * zpool_malloc() - Allocate memory
> * @zpool: The zpool to allocate from.
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 0787d33b80d8..8f3d9a4d46f4 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -437,15 +437,16 @@ static u64 zs_zpool_total_size(void *pool)
> }
>
> static struct zpool_driver zs_zpool_driver = {
> - .type = "zsmalloc",
> - .owner = THIS_MODULE,
> - .create = zs_zpool_create,
> - .destroy = zs_zpool_destroy,
> - .malloc = zs_zpool_malloc,
> - .free = zs_zpool_free,
> - .map = zs_zpool_map,
> - .unmap = zs_zpool_unmap,
> - .total_size = zs_zpool_total_size,
> + .type = "zsmalloc",
> + .owner = THIS_MODULE,
> + .create = zs_zpool_create,
> + .destroy = zs_zpool_destroy,
> + .malloc_support_movable = true,
> + .malloc = zs_zpool_malloc,
> + .free = zs_zpool_free,
> + .map = zs_zpool_map,
> + .unmap = zs_zpool_unmap,
> + .total_size = zs_zpool_total_size,
> };
>
> MODULE_ALIAS("zpool-zsmalloc");
> --
> 2.21.0 (Apple Git-120)
>

2019-06-06 00:26:05

by Vitaly Wool

[permalink] [raw]
Subject: Re: [PATCH V3 1/2] zpool: Add malloc_support_movable to zpool_driver

Hi Shakeel,

On Wed, Jun 5, 2019 at 6:31 PM Shakeel Butt <[email protected]> wrote:
>
> On Wed, Jun 5, 2019 at 3:06 AM Hui Zhu <[email protected]> wrote:
> >
> > As a zpool_driver, zsmalloc can allocate movable memory because it
> > support migate pages.
> > But zbud and z3fold cannot allocate movable memory.
> >
>
> Cc: Vitaly

thanks for looping me in :)

> It seems like z3fold does support page migration but z3fold's malloc
> is rejecting __GFP_HIGHMEM. Vitaly, is there a reason to keep
> rejecting __GFP_HIGHMEM after 1f862989b04a ("mm/z3fold.c: support page
> migration").

No; I don't think I see a reason to keep that part. You are very
welcome to submit a patch, or otherwise I can do it when I'm done with
the patches that are already in the pipeline.

Thanks,
Vitaly

2019-07-10 18:38:20

by Shakeel Butt

[permalink] [raw]
Subject: Re: [PATCH V3 2/2] zswap: Use movable memory if zpool support allocate movable memory

Cc: [email protected]

The email starts at

http://lkml.kernel.org/r/[email protected]

On Wed, Jun 5, 2019 at 3:06 AM Hui Zhu <[email protected]> wrote:
>
> This is the third version that was updated according to the comments
> from Sergey Senozhatsky https://lkml.org/lkml/2019/5/29/73 and
> Shakeel Butt https://lkml.org/lkml/2019/6/4/973
>
> zswap compresses swap pages into a dynamically allocated RAM-based
> memory pool. The memory pool should be zbud, z3fold or zsmalloc.
> All of them will allocate unmovable pages. It will increase the
> number of unmovable page blocks that will bad for anti-fragment.
>
> zsmalloc support page migration if request movable page:
> handle = zs_malloc(zram->mem_pool, comp_len,
> GFP_NOIO | __GFP_HIGHMEM |
> __GFP_MOVABLE);
>
> And commit "zpool: Add malloc_support_movable to zpool_driver" add
> zpool_malloc_support_movable check malloc_support_movable to make
> sure if a zpool support allocate movable memory.
>
> This commit let zswap allocate block with gfp
> __GFP_HIGHMEM | __GFP_MOVABLE if zpool support allocate movable memory.
>
> Following part is test log in a pc that has 8G memory and 2G swap.
>
> Without this commit:
> ~# echo lz4 > /sys/module/zswap/parameters/compressor
> ~# echo zsmalloc > /sys/module/zswap/parameters/zpool
> ~# echo 1 > /sys/module/zswap/parameters/enabled
> ~# swapon /swapfile
> ~# cd /home/teawater/kernel/vm-scalability/
> /home/teawater/kernel/vm-scalability# export unit_size=$((9 * 1024 * 1024 * 1024))
> /home/teawater/kernel/vm-scalability# ./case-anon-w-seq
> 2717908992 bytes / 4826062 usecs = 549973 KB/s
> 2717908992 bytes / 4864201 usecs = 545661 KB/s
> 2717908992 bytes / 4867015 usecs = 545346 KB/s
> 2717908992 bytes / 4915485 usecs = 539968 KB/s
> 397853 usecs to free memory
> 357820 usecs to free memory
> 421333 usecs to free memory
> 420454 usecs to free memory
> /home/teawater/kernel/vm-scalability# cat /proc/pagetypeinfo
> Page block order: 9
> Pages per block: 512
>
> Free pages count per migrate type at order 0 1 2 3 4 5 6 7 8 9 10
> Node 0, zone DMA, type Unmovable 1 1 1 0 2 1 1 0 1 0 0
> Node 0, zone DMA, type Movable 0 0 0 0 0 0 0 0 0 1 3
> Node 0, zone DMA, type Reclaimable 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type Isolate 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type Unmovable 6 5 8 6 6 5 4 1 1 1 0
> Node 0, zone DMA32, type Movable 25 20 20 19 22 15 14 11 11 5 767
> Node 0, zone DMA32, type Reclaimable 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type Isolate 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type Unmovable 4753 5588 5159 4613 3712 2520 1448 594 188 11 0
> Node 0, zone Normal, type Movable 16 3 457 2648 2143 1435 860 459 223 224 296
> Node 0, zone Normal, type Reclaimable 0 0 44 38 11 2 0 0 0 0 0
> Node 0, zone Normal, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type Isolate 0 0 0 0 0 0 0 0 0 0 0
>
> Number of blocks type Unmovable Movable Reclaimable HighAtomic CMA Isolate
> Node 0, zone DMA 1 7 0 0 0 0
> Node 0, zone DMA32 4 1652 0 0 0 0
> Node 0, zone Normal 931 1485 15 0 0 0
>
> With this commit:
> ~# echo lz4 > /sys/module/zswap/parameters/compressor
> ~# echo zsmalloc > /sys/module/zswap/parameters/zpool
> ~# echo 1 > /sys/module/zswap/parameters/enabled
> ~# swapon /swapfile
> ~# cd /home/teawater/kernel/vm-scalability/
> /home/teawater/kernel/vm-scalability# export unit_size=$((9 * 1024 * 1024 * 1024))
> /home/teawater/kernel/vm-scalability# ./case-anon-w-seq
> 2717908992 bytes / 4689240 usecs = 566020 KB/s
> 2717908992 bytes / 4760605 usecs = 557535 KB/s
> 2717908992 bytes / 4803621 usecs = 552543 KB/s
> 2717908992 bytes / 5069828 usecs = 523530 KB/s
> 431546 usecs to free memory
> 383397 usecs to free memory
> 456454 usecs to free memory
> 224487 usecs to free memory
> /home/teawater/kernel/vm-scalability# cat /proc/pagetypeinfo
> Page block order: 9
> Pages per block: 512
>
> Free pages count per migrate type at order 0 1 2 3 4 5 6 7 8 9 10
> Node 0, zone DMA, type Unmovable 1 1 1 0 2 1 1 0 1 0 0
> Node 0, zone DMA, type Movable 0 0 0 0 0 0 0 0 0 1 3
> Node 0, zone DMA, type Reclaimable 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA, type Isolate 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type Unmovable 10 8 10 9 10 4 3 2 3 0 0
> Node 0, zone DMA32, type Movable 18 12 14 16 16 11 9 5 5 6 775
> Node 0, zone DMA32, type Reclaimable 0 0 0 0 0 0 0 0 0 0 1
> Node 0, zone DMA32, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone DMA32, type Isolate 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type Unmovable 2669 1236 452 118 37 14 4 1 2 3 0
> Node 0, zone Normal, type Movable 3850 6086 5274 4327 3510 2494 1520 934 438 220 470
> Node 0, zone Normal, type Reclaimable 56 93 155 124 47 31 17 7 3 0 0
> Node 0, zone Normal, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type CMA 0 0 0 0 0 0 0 0 0 0 0
> Node 0, zone Normal, type Isolate 0 0 0 0 0 0 0 0 0 0 0
>
> Number of blocks type Unmovable Movable Reclaimable HighAtomic CMA Isolate
> Node 0, zone DMA 1 7 0 0 0 0
> Node 0, zone DMA32 4 1650 2 0 0 0
> Node 0, zone Normal 79 2326 26 0 0 0
>
> You can see that the number of unmovable page blocks is decreased
> when the kernel has this commit.
>
> Signed-off-by: Hui Zhu <[email protected]>
> ---
> mm/zswap.c | 8 +++++---
> 1 file changed, 5 insertions(+), 3 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index a4e4d36ec085..c6bf92bf5890 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -1006,6 +1006,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
> char *buf;
> u8 *src, *dst;
> struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
> + gfp_t gfp;
>
> /* THP isn't supported */
> if (PageTransHuge(page)) {
> @@ -1079,9 +1080,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
>
> /* store */
> hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
> - ret = zpool_malloc(entry->pool->zpool, hlen + dlen,
> - __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM,
> - &handle);
> + gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
> + if (zpool_malloc_support_movable(entry->pool->zpool))
> + gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
> + ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
> if (ret == -ENOSPC) {
> zswap_reject_compress_poor++;
> goto put_dstmem;
> --
> 2.21.0 (Apple Git-120)
>

2019-07-10 19:34:42

by Shakeel Butt

[permalink] [raw]
Subject: Re: [PATCH V3 1/2] zpool: Add malloc_support_movable to zpool_driver

Cc: [email protected]

On Wed, Jun 5, 2019 at 3:06 AM Hui Zhu <[email protected]> wrote:
>
> As a zpool_driver, zsmalloc can allocate movable memory because it
> support migate pages.
> But zbud and z3fold cannot allocate movable memory.
>
> This commit adds malloc_support_movable to zpool_driver.
> If a zpool_driver support allocate movable memory, set it to true.
> And add zpool_malloc_support_movable check malloc_support_movable
> to make sure if a zpool support allocate movable memory.
>
> Signed-off-by: Hui Zhu <[email protected]>

I was wondering why this patch is not picked up by Andrew yet. You
forgot to CC Andrew.

Andrew, the thread starts at:

http://lkml.kernel.org/r/[email protected]

> ---
> include/linux/zpool.h | 3 +++
> mm/zpool.c | 16 ++++++++++++++++
> mm/zsmalloc.c | 19 ++++++++++---------
> 3 files changed, 29 insertions(+), 9 deletions(-)
>
> diff --git a/include/linux/zpool.h b/include/linux/zpool.h
> index 7238865e75b0..51bf43076165 100644
> --- a/include/linux/zpool.h
> +++ b/include/linux/zpool.h
> @@ -46,6 +46,8 @@ const char *zpool_get_type(struct zpool *pool);
>
> void zpool_destroy_pool(struct zpool *pool);
>
> +bool zpool_malloc_support_movable(struct zpool *pool);
> +
> int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
> unsigned long *handle);
>
> @@ -90,6 +92,7 @@ struct zpool_driver {
> struct zpool *zpool);
> void (*destroy)(void *pool);
>
> + bool malloc_support_movable;
> int (*malloc)(void *pool, size_t size, gfp_t gfp,
> unsigned long *handle);
> void (*free)(void *pool, unsigned long handle);
> diff --git a/mm/zpool.c b/mm/zpool.c
> index a2dd9107857d..863669212070 100644
> --- a/mm/zpool.c
> +++ b/mm/zpool.c
> @@ -238,6 +238,22 @@ const char *zpool_get_type(struct zpool *zpool)
> return zpool->driver->type;
> }
>
> +/**
> + * zpool_malloc_support_movable() - Check if the zpool support
> + * allocate movable memory
> + * @zpool: The zpool to check
> + *
> + * This returns if the zpool support allocate movable memory.
> + *
> + * Implementations must guarantee this to be thread-safe.
> + *
> + * Returns: true if if the zpool support allocate movable memory, false if not
> + */
> +bool zpool_malloc_support_movable(struct zpool *zpool)
> +{
> + return zpool->driver->malloc_support_movable;
> +}
> +
> /**
> * zpool_malloc() - Allocate memory
> * @zpool: The zpool to allocate from.
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 0787d33b80d8..8f3d9a4d46f4 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -437,15 +437,16 @@ static u64 zs_zpool_total_size(void *pool)
> }
>
> static struct zpool_driver zs_zpool_driver = {
> - .type = "zsmalloc",
> - .owner = THIS_MODULE,
> - .create = zs_zpool_create,
> - .destroy = zs_zpool_destroy,
> - .malloc = zs_zpool_malloc,
> - .free = zs_zpool_free,
> - .map = zs_zpool_map,
> - .unmap = zs_zpool_unmap,
> - .total_size = zs_zpool_total_size,
> + .type = "zsmalloc",
> + .owner = THIS_MODULE,
> + .create = zs_zpool_create,
> + .destroy = zs_zpool_destroy,
> + .malloc_support_movable = true,
> + .malloc = zs_zpool_malloc,
> + .free = zs_zpool_free,
> + .map = zs_zpool_map,
> + .unmap = zs_zpool_unmap,
> + .total_size = zs_zpool_total_size,
> };
>
> MODULE_ALIAS("zpool-zsmalloc");
> --
> 2.21.0 (Apple Git-120)
>