2023-06-06 15:39:09

by Domenico Cerasuolo

[permalink] [raw]
Subject: [RFC PATCH v2 0/7] mm: zswap: move writeback LRU from zpool to zswap

This series aims to improve the zswap reclaim mechanism by reorganizing
the LRU management. In the current implementation, the LRU is maintained
within each zpool driver, resulting in duplicated code across the three
drivers. The proposed change consists in moving the LRU management from
the individual implementations up to the zswap layer.

The primary objective of this refactoring effort is to simplify the
codebase. By unifying the reclaim loop and consolidating LRU handling
within zswap, we can eliminate redundant code and improve
maintainability. Additionally, this change enables the reclamation of
stored pages in their actual LRU order. Presently, the zpool drivers
link backing pages in an LRU, causing compressed pages with different
LRU positions to be written back simultaneously.

The series consists of several patches. The first patch implements the
LRU and the reclaim loop in zswap, but it is not used yet because all
three driver implementations are marked as zpool_evictable.
The following three commits modify each zpool driver to be not
zpool_evictable, allowing the use of the reclaim loop in zswap.
As the drivers removed their shrink functions, the zpool interface is
then trimmed by removing zpool_evictable, zpool_ops, and zpool_shrink.
Finally, the code in zswap is further cleaned up by simplifying the
writeback function and removing the now unnecessary zswap_header.

Based on mm-stable + commit 399ab221f3ff
("mm: zswap: shrink until can accept") currently in mm-unstable.

V2:
- fixed lru list init/del/del_init (Johannes)
- renamed pool.lock to lru_lock and added lock ordering comment (Yosry)
- trimmed zsmalloc even more (Johannes | Nhat)
- moved ref drop out of writeback function (Johannes)

Domenico Cerasuolo (7):
mm: zswap: add pool shrinking mechanism
mm: zswap: remove page reclaim logic from zbud
mm: zswap: remove page reclaim logic from z3fold
mm: zswap: remove page reclaim logic from zsmalloc
mm: zswap: remove shrink from zpool interface
mm: zswap: simplify writeback function
mm: zswap: remove zswap_header

include/linux/zpool.h | 19 +-
mm/z3fold.c | 249 +-------------------------
mm/zbud.c | 167 +-----------------
mm/zpool.c | 48 +----
mm/zsmalloc.c | 396 ++----------------------------------------
mm/zswap.c | 186 +++++++++++---------
6 files changed, 130 insertions(+), 935 deletions(-)

--
2.34.1



2023-06-06 15:41:48

by Domenico Cerasuolo

[permalink] [raw]
Subject: [RFC PATCH v2 5/7] mm: zswap: remove shrink from zpool interface

Now that all three zswap backends have removed their shrink code, it is
no longer necessary for the zpool interface to include shrink/writeback
endpoints.

Signed-off-by: Domenico Cerasuolo <[email protected]>
---
include/linux/zpool.h | 19 ++---------------
mm/z3fold.c | 5 +----
mm/zbud.c | 5 +----
mm/zpool.c | 48 ++-----------------------------------------
mm/zsmalloc.c | 5 +----
mm/zswap.c | 27 +++++++-----------------
6 files changed, 14 insertions(+), 95 deletions(-)

diff --git a/include/linux/zpool.h b/include/linux/zpool.h
index e8997010612a..6b15a4213de5 100644
--- a/include/linux/zpool.h
+++ b/include/linux/zpool.h
@@ -14,10 +14,6 @@

struct zpool;

-struct zpool_ops {
- int (*evict)(struct zpool *pool, unsigned long handle);
-};
-
/*
* Control how a handle is mapped. It will be ignored if the
* implementation does not support it. Its use is optional.
@@ -40,7 +36,7 @@ enum zpool_mapmode {
bool zpool_has_pool(char *type);

struct zpool *zpool_create_pool(const char *type, const char *name,
- gfp_t gfp, const struct zpool_ops *ops);
+ gfp_t gfp);

const char *zpool_get_type(struct zpool *pool);

@@ -53,9 +49,6 @@ int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,

void zpool_free(struct zpool *pool, unsigned long handle);

-int zpool_shrink(struct zpool *pool, unsigned int pages,
- unsigned int *reclaimed);
-
void *zpool_map_handle(struct zpool *pool, unsigned long handle,
enum zpool_mapmode mm);

@@ -72,7 +65,6 @@ u64 zpool_get_total_size(struct zpool *pool);
* @destroy: destroy a pool.
* @malloc: allocate mem from a pool.
* @free: free mem from a pool.
- * @shrink: shrink the pool.
* @sleep_mapped: whether zpool driver can sleep during map.
* @map: map a handle.
* @unmap: unmap a handle.
@@ -87,10 +79,7 @@ struct zpool_driver {
atomic_t refcount;
struct list_head list;

- void *(*create)(const char *name,
- gfp_t gfp,
- const struct zpool_ops *ops,
- struct zpool *zpool);
+ void *(*create)(const char *name, gfp_t gfp);
void (*destroy)(void *pool);

bool malloc_support_movable;
@@ -98,9 +87,6 @@ struct zpool_driver {
unsigned long *handle);
void (*free)(void *pool, unsigned long handle);

- int (*shrink)(void *pool, unsigned int pages,
- unsigned int *reclaimed);
-
bool sleep_mapped;
void *(*map)(void *pool, unsigned long handle,
enum zpool_mapmode mm);
@@ -113,7 +99,6 @@ void zpool_register_driver(struct zpool_driver *driver);

int zpool_unregister_driver(struct zpool_driver *driver);

-bool zpool_evictable(struct zpool *pool);
bool zpool_can_sleep_mapped(struct zpool *pool);

#endif
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 4af8741553ac..e84de91ecccb 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -1364,9 +1364,7 @@ static const struct movable_operations z3fold_mops = {
* zpool
****************/

-static void *z3fold_zpool_create(const char *name, gfp_t gfp,
- const struct zpool_ops *zpool_ops,
- struct zpool *zpool)
+static void *z3fold_zpool_create(const char *name, gfp_t gfp)
{
return z3fold_create_pool(name, gfp);
}
@@ -1409,7 +1407,6 @@ static struct zpool_driver z3fold_zpool_driver = {
.destroy = z3fold_zpool_destroy,
.malloc = z3fold_zpool_malloc,
.free = z3fold_zpool_free,
- .shrink = NULL,
.map = z3fold_zpool_map,
.unmap = z3fold_zpool_unmap,
.total_size = z3fold_zpool_total_size,
diff --git a/mm/zbud.c b/mm/zbud.c
index 19bc662ef5e9..2190cc1f37b3 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -380,9 +380,7 @@ static u64 zbud_get_pool_size(struct zbud_pool *pool)
* zpool
****************/

-static void *zbud_zpool_create(const char *name, gfp_t gfp,
- const struct zpool_ops *zpool_ops,
- struct zpool *zpool)
+static void *zbud_zpool_create(const char *name, gfp_t gfp)
{
return zbud_create_pool(gfp);
}
@@ -425,7 +423,6 @@ static struct zpool_driver zbud_zpool_driver = {
.destroy = zbud_zpool_destroy,
.malloc = zbud_zpool_malloc,
.free = zbud_zpool_free,
- .shrink = NULL,
.map = zbud_zpool_map,
.unmap = zbud_zpool_unmap,
.total_size = zbud_zpool_total_size,
diff --git a/mm/zpool.c b/mm/zpool.c
index 6a19c4a58f77..846410479c2f 100644
--- a/mm/zpool.c
+++ b/mm/zpool.c
@@ -133,7 +133,6 @@ EXPORT_SYMBOL(zpool_has_pool);
* @type: The type of the zpool to create (e.g. zbud, zsmalloc)
* @name: The name of the zpool (e.g. zram0, zswap)
* @gfp: The GFP flags to use when allocating the pool.
- * @ops: The optional ops callback.
*
* This creates a new zpool of the specified type. The gfp flags will be
* used when allocating memory, if the implementation supports it. If the
@@ -145,8 +144,7 @@ EXPORT_SYMBOL(zpool_has_pool);
*
* Returns: New zpool on success, NULL on failure.
*/
-struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
- const struct zpool_ops *ops)
+struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp)
{
struct zpool_driver *driver;
struct zpool *zpool;
@@ -173,7 +171,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
}

zpool->driver = driver;
- zpool->pool = driver->create(name, gfp, ops, zpool);
+ zpool->pool = driver->create(name, gfp);

if (!zpool->pool) {
pr_err("couldn't create %s pool\n", type);
@@ -279,30 +277,6 @@ void zpool_free(struct zpool *zpool, unsigned long handle)
zpool->driver->free(zpool->pool, handle);
}

-/**
- * zpool_shrink() - Shrink the pool size
- * @zpool: The zpool to shrink.
- * @pages: The number of pages to shrink the pool.
- * @reclaimed: The number of pages successfully evicted.
- *
- * This attempts to shrink the actual memory size of the pool
- * by evicting currently used handle(s). If the pool was
- * created with no zpool_ops, or the evict call fails for any
- * of the handles, this will fail. If non-NULL, the @reclaimed
- * parameter will be set to the number of pages reclaimed,
- * which may be more than the number of pages requested.
- *
- * Implementations must guarantee this to be thread-safe.
- *
- * Returns: 0 on success, negative value on error/failure.
- */
-int zpool_shrink(struct zpool *zpool, unsigned int pages,
- unsigned int *reclaimed)
-{
- return zpool->driver->shrink ?
- zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
-}
-
/**
* zpool_map_handle() - Map a previously allocated handle into memory
* @zpool: The zpool that the handle was allocated from
@@ -359,24 +333,6 @@ u64 zpool_get_total_size(struct zpool *zpool)
return zpool->driver->total_size(zpool->pool);
}

-/**
- * zpool_evictable() - Test if zpool is potentially evictable
- * @zpool: The zpool to test
- *
- * Zpool is only potentially evictable when it's created with struct
- * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
- *
- * However, it doesn't necessarily mean driver will use zpool_ops.evict
- * in its implementation of zpool_driver.shrink. It could do internal
- * defragmentation instead.
- *
- * Returns: true if potentially evictable; false otherwise.
- */
-bool zpool_evictable(struct zpool *zpool)
-{
- return zpool->driver->shrink;
-}
-
/**
* zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
* @zpool: The zpool to test
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 75386283dba0..634daa19b6c2 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -351,9 +351,7 @@ static void record_obj(unsigned long handle, unsigned long obj)

#ifdef CONFIG_ZPOOL

-static void *zs_zpool_create(const char *name, gfp_t gfp,
- const struct zpool_ops *zpool_ops,
- struct zpool *zpool)
+static void *zs_zpool_create(const char *name, gfp_t gfp)
{
/*
* Ignore global gfp flags: zs_malloc() may be invoked from
@@ -420,7 +418,6 @@ static struct zpool_driver zs_zpool_driver = {
.malloc_support_movable = true,
.malloc = zs_zpool_malloc,
.free = zs_zpool_free,
- .shrink = NULL,
.map = zs_zpool_map,
.unmap = zs_zpool_unmap,
.total_size = zs_zpool_total_size,
diff --git a/mm/zswap.c b/mm/zswap.c
index c99bafcefecf..2831bf56b168 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -254,10 +254,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
static int zswap_pool_get(struct zswap_pool *pool);
static void zswap_pool_put(struct zswap_pool *pool);

-static const struct zpool_ops zswap_zpool_ops = {
- .evict = zswap_writeback_entry
-};
-
static bool zswap_is_full(void)
{
return totalram_pages() * zswap_max_pool_percent / 100 <
@@ -375,12 +371,9 @@ static void zswap_free_entry(struct zswap_entry *entry)
if (!entry->length)
atomic_dec(&zswap_same_filled_pages);
else {
- /* zpool_evictable will be removed once all 3 backends have migrated */
- if (!zpool_evictable(entry->pool->zpool)) {
- spin_lock(&entry->pool->lru_lock);
- list_del(&entry->lru);
- spin_unlock(&entry->pool->lru_lock);
- }
+ spin_lock(&entry->pool->lru_lock);
+ list_del(&entry->lru);
+ spin_unlock(&entry->pool->lru_lock);
zpool_free(entry->pool->zpool, entry->handle);
zswap_pool_put(entry->pool);
}
@@ -659,12 +652,8 @@ static void shrink_worker(struct work_struct *w)
shrink_work);
int ret, failures = 0;

- /* zpool_evictable will be removed once all 3 backends have migrated */
do {
- if (zpool_evictable(pool->zpool))
- ret = zpool_shrink(pool->zpool, 1, NULL);
- else
- ret = zswap_shrink(pool);
+ ret = zswap_shrink(pool);
if (ret) {
zswap_reject_reclaim_fail++;
if (ret != -EAGAIN)
@@ -702,7 +691,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
/* unique name for each pool specifically required by zsmalloc */
snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));

- pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
+ pool->zpool = zpool_create_pool(type, name, gfp);
if (!pool->zpool) {
pr_err("%s zpool not available\n", type);
goto error;
@@ -1388,8 +1377,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
zswap_entry_put(tree, dupentry);
}
} while (ret == -EEXIST);
- /* zpool_evictable will be removed once all 3 backends have migrated */
- if (entry->length && !zpool_evictable(entry->pool->zpool)) {
+ if (entry->length) {
spin_lock(&entry->pool->lru_lock);
list_add(&entry->lru, &entry->pool->lru);
spin_unlock(&entry->pool->lru_lock);
@@ -1495,8 +1483,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
freeentry:
spin_lock(&tree->lock);
zswap_entry_put(tree, entry);
- /* zpool_evictable will be removed once all 3 backends have migrated */
- if (entry->length && !zpool_evictable(entry->pool->zpool)) {
+ if (entry->length) {
spin_lock(&entry->pool->lru_lock);
list_move(&entry->lru, &entry->pool->lru);
spin_unlock(&entry->pool->lru_lock);
--
2.34.1


2023-06-07 09:43:32

by Yosry Ahmed

[permalink] [raw]
Subject: Re: [RFC PATCH v2 5/7] mm: zswap: remove shrink from zpool interface

On Tue, Jun 6, 2023 at 7:56 AM Domenico Cerasuolo
<[email protected]> wrote:
>
> Now that all three zswap backends have removed their shrink code, it is
> no longer necessary for the zpool interface to include shrink/writeback
> endpoints.
>
> Signed-off-by: Domenico Cerasuolo <[email protected]>

I will leave reviewing the driver-specific patches to the respective
maintainers, but this cleanup LGTM otherwise.

Reviewed-by: Yosry Ahmed <[email protected]>

> ---
> include/linux/zpool.h | 19 ++---------------
> mm/z3fold.c | 5 +----
> mm/zbud.c | 5 +----
> mm/zpool.c | 48 ++-----------------------------------------
> mm/zsmalloc.c | 5 +----
> mm/zswap.c | 27 +++++++-----------------
> 6 files changed, 14 insertions(+), 95 deletions(-)
>
> diff --git a/include/linux/zpool.h b/include/linux/zpool.h
> index e8997010612a..6b15a4213de5 100644
> --- a/include/linux/zpool.h
> +++ b/include/linux/zpool.h
> @@ -14,10 +14,6 @@
>
> struct zpool;
>
> -struct zpool_ops {
> - int (*evict)(struct zpool *pool, unsigned long handle);
> -};
> -
> /*
> * Control how a handle is mapped. It will be ignored if the
> * implementation does not support it. Its use is optional.
> @@ -40,7 +36,7 @@ enum zpool_mapmode {
> bool zpool_has_pool(char *type);
>
> struct zpool *zpool_create_pool(const char *type, const char *name,
> - gfp_t gfp, const struct zpool_ops *ops);
> + gfp_t gfp);
>
> const char *zpool_get_type(struct zpool *pool);
>
> @@ -53,9 +49,6 @@ int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
>
> void zpool_free(struct zpool *pool, unsigned long handle);
>
> -int zpool_shrink(struct zpool *pool, unsigned int pages,
> - unsigned int *reclaimed);
> -
> void *zpool_map_handle(struct zpool *pool, unsigned long handle,
> enum zpool_mapmode mm);
>
> @@ -72,7 +65,6 @@ u64 zpool_get_total_size(struct zpool *pool);
> * @destroy: destroy a pool.
> * @malloc: allocate mem from a pool.
> * @free: free mem from a pool.
> - * @shrink: shrink the pool.
> * @sleep_mapped: whether zpool driver can sleep during map.
> * @map: map a handle.
> * @unmap: unmap a handle.
> @@ -87,10 +79,7 @@ struct zpool_driver {
> atomic_t refcount;
> struct list_head list;
>
> - void *(*create)(const char *name,
> - gfp_t gfp,
> - const struct zpool_ops *ops,
> - struct zpool *zpool);
> + void *(*create)(const char *name, gfp_t gfp);
> void (*destroy)(void *pool);
>
> bool malloc_support_movable;
> @@ -98,9 +87,6 @@ struct zpool_driver {
> unsigned long *handle);
> void (*free)(void *pool, unsigned long handle);
>
> - int (*shrink)(void *pool, unsigned int pages,
> - unsigned int *reclaimed);
> -
> bool sleep_mapped;
> void *(*map)(void *pool, unsigned long handle,
> enum zpool_mapmode mm);
> @@ -113,7 +99,6 @@ void zpool_register_driver(struct zpool_driver *driver);
>
> int zpool_unregister_driver(struct zpool_driver *driver);
>
> -bool zpool_evictable(struct zpool *pool);
> bool zpool_can_sleep_mapped(struct zpool *pool);
>
> #endif
> diff --git a/mm/z3fold.c b/mm/z3fold.c
> index 4af8741553ac..e84de91ecccb 100644
> --- a/mm/z3fold.c
> +++ b/mm/z3fold.c
> @@ -1364,9 +1364,7 @@ static const struct movable_operations z3fold_mops = {
> * zpool
> ****************/
>
> -static void *z3fold_zpool_create(const char *name, gfp_t gfp,
> - const struct zpool_ops *zpool_ops,
> - struct zpool *zpool)
> +static void *z3fold_zpool_create(const char *name, gfp_t gfp)
> {
> return z3fold_create_pool(name, gfp);
> }
> @@ -1409,7 +1407,6 @@ static struct zpool_driver z3fold_zpool_driver = {
> .destroy = z3fold_zpool_destroy,
> .malloc = z3fold_zpool_malloc,
> .free = z3fold_zpool_free,
> - .shrink = NULL,
> .map = z3fold_zpool_map,
> .unmap = z3fold_zpool_unmap,
> .total_size = z3fold_zpool_total_size,
> diff --git a/mm/zbud.c b/mm/zbud.c
> index 19bc662ef5e9..2190cc1f37b3 100644
> --- a/mm/zbud.c
> +++ b/mm/zbud.c
> @@ -380,9 +380,7 @@ static u64 zbud_get_pool_size(struct zbud_pool *pool)
> * zpool
> ****************/
>
> -static void *zbud_zpool_create(const char *name, gfp_t gfp,
> - const struct zpool_ops *zpool_ops,
> - struct zpool *zpool)
> +static void *zbud_zpool_create(const char *name, gfp_t gfp)
> {
> return zbud_create_pool(gfp);
> }
> @@ -425,7 +423,6 @@ static struct zpool_driver zbud_zpool_driver = {
> .destroy = zbud_zpool_destroy,
> .malloc = zbud_zpool_malloc,
> .free = zbud_zpool_free,
> - .shrink = NULL,
> .map = zbud_zpool_map,
> .unmap = zbud_zpool_unmap,
> .total_size = zbud_zpool_total_size,
> diff --git a/mm/zpool.c b/mm/zpool.c
> index 6a19c4a58f77..846410479c2f 100644
> --- a/mm/zpool.c
> +++ b/mm/zpool.c
> @@ -133,7 +133,6 @@ EXPORT_SYMBOL(zpool_has_pool);
> * @type: The type of the zpool to create (e.g. zbud, zsmalloc)
> * @name: The name of the zpool (e.g. zram0, zswap)
> * @gfp: The GFP flags to use when allocating the pool.
> - * @ops: The optional ops callback.
> *
> * This creates a new zpool of the specified type. The gfp flags will be
> * used when allocating memory, if the implementation supports it. If the
> @@ -145,8 +144,7 @@ EXPORT_SYMBOL(zpool_has_pool);
> *
> * Returns: New zpool on success, NULL on failure.
> */
> -struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
> - const struct zpool_ops *ops)
> +struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp)
> {
> struct zpool_driver *driver;
> struct zpool *zpool;
> @@ -173,7 +171,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
> }
>
> zpool->driver = driver;
> - zpool->pool = driver->create(name, gfp, ops, zpool);
> + zpool->pool = driver->create(name, gfp);
>
> if (!zpool->pool) {
> pr_err("couldn't create %s pool\n", type);
> @@ -279,30 +277,6 @@ void zpool_free(struct zpool *zpool, unsigned long handle)
> zpool->driver->free(zpool->pool, handle);
> }
>
> -/**
> - * zpool_shrink() - Shrink the pool size
> - * @zpool: The zpool to shrink.
> - * @pages: The number of pages to shrink the pool.
> - * @reclaimed: The number of pages successfully evicted.
> - *
> - * This attempts to shrink the actual memory size of the pool
> - * by evicting currently used handle(s). If the pool was
> - * created with no zpool_ops, or the evict call fails for any
> - * of the handles, this will fail. If non-NULL, the @reclaimed
> - * parameter will be set to the number of pages reclaimed,
> - * which may be more than the number of pages requested.
> - *
> - * Implementations must guarantee this to be thread-safe.
> - *
> - * Returns: 0 on success, negative value on error/failure.
> - */
> -int zpool_shrink(struct zpool *zpool, unsigned int pages,
> - unsigned int *reclaimed)
> -{
> - return zpool->driver->shrink ?
> - zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
> -}
> -
> /**
> * zpool_map_handle() - Map a previously allocated handle into memory
> * @zpool: The zpool that the handle was allocated from
> @@ -359,24 +333,6 @@ u64 zpool_get_total_size(struct zpool *zpool)
> return zpool->driver->total_size(zpool->pool);
> }
>
> -/**
> - * zpool_evictable() - Test if zpool is potentially evictable
> - * @zpool: The zpool to test
> - *
> - * Zpool is only potentially evictable when it's created with struct
> - * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
> - *
> - * However, it doesn't necessarily mean driver will use zpool_ops.evict
> - * in its implementation of zpool_driver.shrink. It could do internal
> - * defragmentation instead.
> - *
> - * Returns: true if potentially evictable; false otherwise.
> - */
> -bool zpool_evictable(struct zpool *zpool)
> -{
> - return zpool->driver->shrink;
> -}
> -
> /**
> * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
> * @zpool: The zpool to test
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 75386283dba0..634daa19b6c2 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -351,9 +351,7 @@ static void record_obj(unsigned long handle, unsigned long obj)
>
> #ifdef CONFIG_ZPOOL
>
> -static void *zs_zpool_create(const char *name, gfp_t gfp,
> - const struct zpool_ops *zpool_ops,
> - struct zpool *zpool)
> +static void *zs_zpool_create(const char *name, gfp_t gfp)
> {
> /*
> * Ignore global gfp flags: zs_malloc() may be invoked from
> @@ -420,7 +418,6 @@ static struct zpool_driver zs_zpool_driver = {
> .malloc_support_movable = true,
> .malloc = zs_zpool_malloc,
> .free = zs_zpool_free,
> - .shrink = NULL,
> .map = zs_zpool_map,
> .unmap = zs_zpool_unmap,
> .total_size = zs_zpool_total_size,
> diff --git a/mm/zswap.c b/mm/zswap.c
> index c99bafcefecf..2831bf56b168 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -254,10 +254,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
> static int zswap_pool_get(struct zswap_pool *pool);
> static void zswap_pool_put(struct zswap_pool *pool);
>
> -static const struct zpool_ops zswap_zpool_ops = {
> - .evict = zswap_writeback_entry
> -};
> -
> static bool zswap_is_full(void)
> {
> return totalram_pages() * zswap_max_pool_percent / 100 <
> @@ -375,12 +371,9 @@ static void zswap_free_entry(struct zswap_entry *entry)
> if (!entry->length)
> atomic_dec(&zswap_same_filled_pages);
> else {
> - /* zpool_evictable will be removed once all 3 backends have migrated */
> - if (!zpool_evictable(entry->pool->zpool)) {
> - spin_lock(&entry->pool->lru_lock);
> - list_del(&entry->lru);
> - spin_unlock(&entry->pool->lru_lock);
> - }
> + spin_lock(&entry->pool->lru_lock);
> + list_del(&entry->lru);
> + spin_unlock(&entry->pool->lru_lock);
> zpool_free(entry->pool->zpool, entry->handle);
> zswap_pool_put(entry->pool);
> }
> @@ -659,12 +652,8 @@ static void shrink_worker(struct work_struct *w)
> shrink_work);
> int ret, failures = 0;
>
> - /* zpool_evictable will be removed once all 3 backends have migrated */
> do {
> - if (zpool_evictable(pool->zpool))
> - ret = zpool_shrink(pool->zpool, 1, NULL);
> - else
> - ret = zswap_shrink(pool);
> + ret = zswap_shrink(pool);
> if (ret) {
> zswap_reject_reclaim_fail++;
> if (ret != -EAGAIN)
> @@ -702,7 +691,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
> /* unique name for each pool specifically required by zsmalloc */
> snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
>
> - pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
> + pool->zpool = zpool_create_pool(type, name, gfp);
> if (!pool->zpool) {
> pr_err("%s zpool not available\n", type);
> goto error;
> @@ -1388,8 +1377,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
> zswap_entry_put(tree, dupentry);
> }
> } while (ret == -EEXIST);
> - /* zpool_evictable will be removed once all 3 backends have migrated */
> - if (entry->length && !zpool_evictable(entry->pool->zpool)) {
> + if (entry->length) {
> spin_lock(&entry->pool->lru_lock);
> list_add(&entry->lru, &entry->pool->lru);
> spin_unlock(&entry->pool->lru_lock);
> @@ -1495,8 +1483,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
> freeentry:
> spin_lock(&tree->lock);
> zswap_entry_put(tree, entry);
> - /* zpool_evictable will be removed once all 3 backends have migrated */
> - if (entry->length && !zpool_evictable(entry->pool->zpool)) {
> + if (entry->length) {
> spin_lock(&entry->pool->lru_lock);
> list_move(&entry->lru, &entry->pool->lru);
> spin_unlock(&entry->pool->lru_lock);
> --
> 2.34.1
>

2023-06-07 09:47:01

by Yosry Ahmed

[permalink] [raw]
Subject: Re: [RFC PATCH v2 0/7] mm: zswap: move writeback LRU from zpool to zswap

On Wed, Jun 7, 2023 at 2:24 AM Domenico Cerasuolo
<[email protected]> wrote:
>
> On Wed, Jun 7, 2023 at 11:16 AM Yosry Ahmed <[email protected]> wrote:
> >
> > On Tue, Jun 6, 2023 at 7:56 AM Domenico Cerasuolo
> > <[email protected]> wrote:
> > >
> > > This series aims to improve the zswap reclaim mechanism by reorganizing
> > > the LRU management. In the current implementation, the LRU is maintained
> > > within each zpool driver, resulting in duplicated code across the three
> > > drivers. The proposed change consists in moving the LRU management from
> > > the individual implementations up to the zswap layer.
> > >
> > > The primary objective of this refactoring effort is to simplify the
> > > codebase. By unifying the reclaim loop and consolidating LRU handling
> > > within zswap, we can eliminate redundant code and improve
> > > maintainability. Additionally, this change enables the reclamation of
> > > stored pages in their actual LRU order. Presently, the zpool drivers
> > > link backing pages in an LRU, causing compressed pages with different
> > > LRU positions to be written back simultaneously.
> > >
> > > The series consists of several patches. The first patch implements the
> > > LRU and the reclaim loop in zswap, but it is not used yet because all
> > > three driver implementations are marked as zpool_evictable.
> > > The following three commits modify each zpool driver to be not
> > > zpool_evictable, allowing the use of the reclaim loop in zswap.
> > > As the drivers removed their shrink functions, the zpool interface is
> > > then trimmed by removing zpool_evictable, zpool_ops, and zpool_shrink.
> > > Finally, the code in zswap is further cleaned up by simplifying the
> > > writeback function and removing the now unnecessary zswap_header.
> > >
> > > Based on mm-stable + commit 399ab221f3ff
> > > ("mm: zswap: shrink until can accept") currently in mm-unstable.
> >
> > I tested this + commit fe1d1f7d0fb5 ("mm: zswap: support exclusive
> > loads") currently in mm-unstable, using zsmalloc and
> > CONFIG_ZSWAP_EXCLUSIVE_LOADS=y. I only ran basic zswap tests with
> > manual writeback induction and made sure everything is sane. I
> > obviously hope you did more involved testing :)
> >
> > The only problem I came across is the conflict with fe1d1f7d0fb5, and
> > I suggested the fix in patch 1. With the fix, everything seems
> > correct.
> >
> > So I guess, FWIW for all the patches except 2 & 3 (for zbud and z3fold):
> > Tested-by: Yosry Ahmed <[email protected]>
>
> Thanks a lot for the effort! I'll rebase and test it again before submitting the
> new version.

Perhaps give v2 a little bit more time to give other folks a chance to
take a look as well, save yourself (and probably Andrew) the trouble
of sending a new version for every single review :)

>
> >
> > >
> > > V2:
> > > - fixed lru list init/del/del_init (Johannes)
> > > - renamed pool.lock to lru_lock and added lock ordering comment (Yosry)
> > > - trimmed zsmalloc even more (Johannes | Nhat)
> > > - moved ref drop out of writeback function (Johannes)
> > >
> > > Domenico Cerasuolo (7):
> > > mm: zswap: add pool shrinking mechanism
> > > mm: zswap: remove page reclaim logic from zbud
> > > mm: zswap: remove page reclaim logic from z3fold
> > > mm: zswap: remove page reclaim logic from zsmalloc
> > > mm: zswap: remove shrink from zpool interface
> > > mm: zswap: simplify writeback function
> > > mm: zswap: remove zswap_header
> > >
> > > include/linux/zpool.h | 19 +-
> > > mm/z3fold.c | 249 +-------------------------
> > > mm/zbud.c | 167 +-----------------
> > > mm/zpool.c | 48 +----
> > > mm/zsmalloc.c | 396 ++----------------------------------------
> > > mm/zswap.c | 186 +++++++++++---------
> > > 6 files changed, 130 insertions(+), 935 deletions(-)
> > >
> > > --
> > > 2.34.1
> > >

2023-06-07 09:48:14

by Yosry Ahmed

[permalink] [raw]
Subject: Re: [RFC PATCH v2 0/7] mm: zswap: move writeback LRU from zpool to zswap

On Tue, Jun 6, 2023 at 7:56 AM Domenico Cerasuolo
<[email protected]> wrote:
>
> This series aims to improve the zswap reclaim mechanism by reorganizing
> the LRU management. In the current implementation, the LRU is maintained
> within each zpool driver, resulting in duplicated code across the three
> drivers. The proposed change consists in moving the LRU management from
> the individual implementations up to the zswap layer.
>
> The primary objective of this refactoring effort is to simplify the
> codebase. By unifying the reclaim loop and consolidating LRU handling
> within zswap, we can eliminate redundant code and improve
> maintainability. Additionally, this change enables the reclamation of
> stored pages in their actual LRU order. Presently, the zpool drivers
> link backing pages in an LRU, causing compressed pages with different
> LRU positions to be written back simultaneously.
>
> The series consists of several patches. The first patch implements the
> LRU and the reclaim loop in zswap, but it is not used yet because all
> three driver implementations are marked as zpool_evictable.
> The following three commits modify each zpool driver to be not
> zpool_evictable, allowing the use of the reclaim loop in zswap.
> As the drivers removed their shrink functions, the zpool interface is
> then trimmed by removing zpool_evictable, zpool_ops, and zpool_shrink.
> Finally, the code in zswap is further cleaned up by simplifying the
> writeback function and removing the now unnecessary zswap_header.
>
> Based on mm-stable + commit 399ab221f3ff
> ("mm: zswap: shrink until can accept") currently in mm-unstable.

I tested this + commit fe1d1f7d0fb5 ("mm: zswap: support exclusive
loads") currently in mm-unstable, using zsmalloc and
CONFIG_ZSWAP_EXCLUSIVE_LOADS=y. I only ran basic zswap tests with
manual writeback induction and made sure everything is sane. I
obviously hope you did more involved testing :)

The only problem I came across is the conflict with fe1d1f7d0fb5, and
I suggested the fix in patch 1. With the fix, everything seems
correct.

So I guess, FWIW for all the patches except 2 & 3 (for zbud and z3fold):
Tested-by: Yosry Ahmed <[email protected]>

>
> V2:
> - fixed lru list init/del/del_init (Johannes)
> - renamed pool.lock to lru_lock and added lock ordering comment (Yosry)
> - trimmed zsmalloc even more (Johannes | Nhat)
> - moved ref drop out of writeback function (Johannes)
>
> Domenico Cerasuolo (7):
> mm: zswap: add pool shrinking mechanism
> mm: zswap: remove page reclaim logic from zbud
> mm: zswap: remove page reclaim logic from z3fold
> mm: zswap: remove page reclaim logic from zsmalloc
> mm: zswap: remove shrink from zpool interface
> mm: zswap: simplify writeback function
> mm: zswap: remove zswap_header
>
> include/linux/zpool.h | 19 +-
> mm/z3fold.c | 249 +-------------------------
> mm/zbud.c | 167 +-----------------
> mm/zpool.c | 48 +----
> mm/zsmalloc.c | 396 ++----------------------------------------
> mm/zswap.c | 186 +++++++++++---------
> 6 files changed, 130 insertions(+), 935 deletions(-)
>
> --
> 2.34.1
>

2023-06-07 10:00:05

by Domenico Cerasuolo

[permalink] [raw]
Subject: Re: [RFC PATCH v2 0/7] mm: zswap: move writeback LRU from zpool to zswap

On Wed, Jun 7, 2023 at 11:16 AM Yosry Ahmed <[email protected]> wrote:
>
> On Tue, Jun 6, 2023 at 7:56 AM Domenico Cerasuolo
> <[email protected]> wrote:
> >
> > This series aims to improve the zswap reclaim mechanism by reorganizing
> > the LRU management. In the current implementation, the LRU is maintained
> > within each zpool driver, resulting in duplicated code across the three
> > drivers. The proposed change consists in moving the LRU management from
> > the individual implementations up to the zswap layer.
> >
> > The primary objective of this refactoring effort is to simplify the
> > codebase. By unifying the reclaim loop and consolidating LRU handling
> > within zswap, we can eliminate redundant code and improve
> > maintainability. Additionally, this change enables the reclamation of
> > stored pages in their actual LRU order. Presently, the zpool drivers
> > link backing pages in an LRU, causing compressed pages with different
> > LRU positions to be written back simultaneously.
> >
> > The series consists of several patches. The first patch implements the
> > LRU and the reclaim loop in zswap, but it is not used yet because all
> > three driver implementations are marked as zpool_evictable.
> > The following three commits modify each zpool driver to be not
> > zpool_evictable, allowing the use of the reclaim loop in zswap.
> > As the drivers removed their shrink functions, the zpool interface is
> > then trimmed by removing zpool_evictable, zpool_ops, and zpool_shrink.
> > Finally, the code in zswap is further cleaned up by simplifying the
> > writeback function and removing the now unnecessary zswap_header.
> >
> > Based on mm-stable + commit 399ab221f3ff
> > ("mm: zswap: shrink until can accept") currently in mm-unstable.
>
> I tested this + commit fe1d1f7d0fb5 ("mm: zswap: support exclusive
> loads") currently in mm-unstable, using zsmalloc and
> CONFIG_ZSWAP_EXCLUSIVE_LOADS=y. I only ran basic zswap tests with
> manual writeback induction and made sure everything is sane. I
> obviously hope you did more involved testing :)
>
> The only problem I came across is the conflict with fe1d1f7d0fb5, and
> I suggested the fix in patch 1. With the fix, everything seems
> correct.
>
> So I guess, FWIW for all the patches except 2 & 3 (for zbud and z3fold):
> Tested-by: Yosry Ahmed <[email protected]>

Thanks a lot for the effort! I'll rebase and test it again before submitting the
new version.

>
> >
> > V2:
> > - fixed lru list init/del/del_init (Johannes)
> > - renamed pool.lock to lru_lock and added lock ordering comment (Yosry)
> > - trimmed zsmalloc even more (Johannes | Nhat)
> > - moved ref drop out of writeback function (Johannes)
> >
> > Domenico Cerasuolo (7):
> > mm: zswap: add pool shrinking mechanism
> > mm: zswap: remove page reclaim logic from zbud
> > mm: zswap: remove page reclaim logic from z3fold
> > mm: zswap: remove page reclaim logic from zsmalloc
> > mm: zswap: remove shrink from zpool interface
> > mm: zswap: simplify writeback function
> > mm: zswap: remove zswap_header
> >
> > include/linux/zpool.h | 19 +-
> > mm/z3fold.c | 249 +-------------------------
> > mm/zbud.c | 167 +-----------------
> > mm/zpool.c | 48 +----
> > mm/zsmalloc.c | 396 ++----------------------------------------
> > mm/zswap.c | 186 +++++++++++---------
> > 6 files changed, 130 insertions(+), 935 deletions(-)
> >
> > --
> > 2.34.1
> >

2023-06-08 16:36:26

by Johannes Weiner

[permalink] [raw]
Subject: Re: [RFC PATCH v2 5/7] mm: zswap: remove shrink from zpool interface

On Tue, Jun 06, 2023 at 04:56:09PM +0200, Domenico Cerasuolo wrote:
> @@ -40,7 +36,7 @@ enum zpool_mapmode {
> bool zpool_has_pool(char *type);
>
> struct zpool *zpool_create_pool(const char *type, const char *name,
> - gfp_t gfp, const struct zpool_ops *ops);
> + gfp_t gfp);

This fits into a single line now.

Otherwise, the patch looks great to me.

Acked-by: Johannes Weiner <[email protected]>

2023-06-08 18:15:58

by Nhat Pham

[permalink] [raw]
Subject: Re: [RFC PATCH v2 5/7] mm: zswap: remove shrink from zpool interface

On Tue, Jun 6, 2023 at 7:56 AM Domenico Cerasuolo
<[email protected]> wrote:
>
> Now that all three zswap backends have removed their shrink code, it is
> no longer necessary for the zpool interface to include shrink/writeback
> endpoints.
>
> Signed-off-by: Domenico Cerasuolo <[email protected]>
> ---
> include/linux/zpool.h | 19 ++---------------
> mm/z3fold.c | 5 +----
> mm/zbud.c | 5 +----
> mm/zpool.c | 48 ++-----------------------------------------
> mm/zsmalloc.c | 5 +----
> mm/zswap.c | 27 +++++++-----------------
> 6 files changed, 14 insertions(+), 95 deletions(-)
>
> diff --git a/include/linux/zpool.h b/include/linux/zpool.h
> index e8997010612a..6b15a4213de5 100644
> --- a/include/linux/zpool.h
> +++ b/include/linux/zpool.h
> @@ -14,10 +14,6 @@
>
> struct zpool;
>
> -struct zpool_ops {
> - int (*evict)(struct zpool *pool, unsigned long handle);
> -};
> -
> /*
> * Control how a handle is mapped. It will be ignored if the
> * implementation does not support it. Its use is optional.
> @@ -40,7 +36,7 @@ enum zpool_mapmode {
> bool zpool_has_pool(char *type);
>
> struct zpool *zpool_create_pool(const char *type, const char *name,
> - gfp_t gfp, const struct zpool_ops *ops);
> + gfp_t gfp);
>
> const char *zpool_get_type(struct zpool *pool);
>
> @@ -53,9 +49,6 @@ int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp,
>
> void zpool_free(struct zpool *pool, unsigned long handle);
>
> -int zpool_shrink(struct zpool *pool, unsigned int pages,
> - unsigned int *reclaimed);
> -
> void *zpool_map_handle(struct zpool *pool, unsigned long handle,
> enum zpool_mapmode mm);
>
> @@ -72,7 +65,6 @@ u64 zpool_get_total_size(struct zpool *pool);
> * @destroy: destroy a pool.
> * @malloc: allocate mem from a pool.
> * @free: free mem from a pool.
> - * @shrink: shrink the pool.
> * @sleep_mapped: whether zpool driver can sleep during map.
> * @map: map a handle.
> * @unmap: unmap a handle.
> @@ -87,10 +79,7 @@ struct zpool_driver {
> atomic_t refcount;
> struct list_head list;
>
> - void *(*create)(const char *name,
> - gfp_t gfp,
> - const struct zpool_ops *ops,
> - struct zpool *zpool);
> + void *(*create)(const char *name, gfp_t gfp);
> void (*destroy)(void *pool);
>
> bool malloc_support_movable;
> @@ -98,9 +87,6 @@ struct zpool_driver {
> unsigned long *handle);
> void (*free)(void *pool, unsigned long handle);
>
> - int (*shrink)(void *pool, unsigned int pages,
> - unsigned int *reclaimed);
> -
> bool sleep_mapped;
> void *(*map)(void *pool, unsigned long handle,
> enum zpool_mapmode mm);
> @@ -113,7 +99,6 @@ void zpool_register_driver(struct zpool_driver *driver);
>
> int zpool_unregister_driver(struct zpool_driver *driver);
>
> -bool zpool_evictable(struct zpool *pool);
> bool zpool_can_sleep_mapped(struct zpool *pool);
>
> #endif
> diff --git a/mm/z3fold.c b/mm/z3fold.c
> index 4af8741553ac..e84de91ecccb 100644
> --- a/mm/z3fold.c
> +++ b/mm/z3fold.c
> @@ -1364,9 +1364,7 @@ static const struct movable_operations z3fold_mops = {
> * zpool
> ****************/
>
> -static void *z3fold_zpool_create(const char *name, gfp_t gfp,
> - const struct zpool_ops *zpool_ops,
> - struct zpool *zpool)
> +static void *z3fold_zpool_create(const char *name, gfp_t gfp)
> {
> return z3fold_create_pool(name, gfp);
> }
> @@ -1409,7 +1407,6 @@ static struct zpool_driver z3fold_zpool_driver = {
> .destroy = z3fold_zpool_destroy,
> .malloc = z3fold_zpool_malloc,
> .free = z3fold_zpool_free,
> - .shrink = NULL,
> .map = z3fold_zpool_map,
> .unmap = z3fold_zpool_unmap,
> .total_size = z3fold_zpool_total_size,
> diff --git a/mm/zbud.c b/mm/zbud.c
> index 19bc662ef5e9..2190cc1f37b3 100644
> --- a/mm/zbud.c
> +++ b/mm/zbud.c
> @@ -380,9 +380,7 @@ static u64 zbud_get_pool_size(struct zbud_pool *pool)
> * zpool
> ****************/
>
> -static void *zbud_zpool_create(const char *name, gfp_t gfp,
> - const struct zpool_ops *zpool_ops,
> - struct zpool *zpool)
> +static void *zbud_zpool_create(const char *name, gfp_t gfp)
> {
> return zbud_create_pool(gfp);
> }
> @@ -425,7 +423,6 @@ static struct zpool_driver zbud_zpool_driver = {
> .destroy = zbud_zpool_destroy,
> .malloc = zbud_zpool_malloc,
> .free = zbud_zpool_free,
> - .shrink = NULL,
> .map = zbud_zpool_map,
> .unmap = zbud_zpool_unmap,
> .total_size = zbud_zpool_total_size,
> diff --git a/mm/zpool.c b/mm/zpool.c
> index 6a19c4a58f77..846410479c2f 100644
> --- a/mm/zpool.c
> +++ b/mm/zpool.c
> @@ -133,7 +133,6 @@ EXPORT_SYMBOL(zpool_has_pool);
> * @type: The type of the zpool to create (e.g. zbud, zsmalloc)
> * @name: The name of the zpool (e.g. zram0, zswap)
> * @gfp: The GFP flags to use when allocating the pool.
> - * @ops: The optional ops callback.
> *
> * This creates a new zpool of the specified type. The gfp flags will be
> * used when allocating memory, if the implementation supports it. If the
> @@ -145,8 +144,7 @@ EXPORT_SYMBOL(zpool_has_pool);
> *
> * Returns: New zpool on success, NULL on failure.
> */
> -struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
> - const struct zpool_ops *ops)
> +struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp)
> {
> struct zpool_driver *driver;
> struct zpool *zpool;
> @@ -173,7 +171,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp,
> }
>
> zpool->driver = driver;
> - zpool->pool = driver->create(name, gfp, ops, zpool);
> + zpool->pool = driver->create(name, gfp);
>
> if (!zpool->pool) {
> pr_err("couldn't create %s pool\n", type);
> @@ -279,30 +277,6 @@ void zpool_free(struct zpool *zpool, unsigned long handle)
> zpool->driver->free(zpool->pool, handle);
> }
>
> -/**
> - * zpool_shrink() - Shrink the pool size
> - * @zpool: The zpool to shrink.
> - * @pages: The number of pages to shrink the pool.
> - * @reclaimed: The number of pages successfully evicted.
> - *
> - * This attempts to shrink the actual memory size of the pool
> - * by evicting currently used handle(s). If the pool was
> - * created with no zpool_ops, or the evict call fails for any
> - * of the handles, this will fail. If non-NULL, the @reclaimed
> - * parameter will be set to the number of pages reclaimed,
> - * which may be more than the number of pages requested.
> - *
> - * Implementations must guarantee this to be thread-safe.
> - *
> - * Returns: 0 on success, negative value on error/failure.
> - */
> -int zpool_shrink(struct zpool *zpool, unsigned int pages,
> - unsigned int *reclaimed)
> -{
> - return zpool->driver->shrink ?
> - zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL;
> -}
> -
> /**
> * zpool_map_handle() - Map a previously allocated handle into memory
> * @zpool: The zpool that the handle was allocated from
> @@ -359,24 +333,6 @@ u64 zpool_get_total_size(struct zpool *zpool)
> return zpool->driver->total_size(zpool->pool);
> }
>
> -/**
> - * zpool_evictable() - Test if zpool is potentially evictable
> - * @zpool: The zpool to test
> - *
> - * Zpool is only potentially evictable when it's created with struct
> - * zpool_ops.evict and its driver implements struct zpool_driver.shrink.
> - *
> - * However, it doesn't necessarily mean driver will use zpool_ops.evict
> - * in its implementation of zpool_driver.shrink. It could do internal
> - * defragmentation instead.
> - *
> - * Returns: true if potentially evictable; false otherwise.
> - */
> -bool zpool_evictable(struct zpool *zpool)
> -{
> - return zpool->driver->shrink;
> -}
> -
> /**
> * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped.
> * @zpool: The zpool to test
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index 75386283dba0..634daa19b6c2 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -351,9 +351,7 @@ static void record_obj(unsigned long handle, unsigned long obj)
>
> #ifdef CONFIG_ZPOOL
>
> -static void *zs_zpool_create(const char *name, gfp_t gfp,
> - const struct zpool_ops *zpool_ops,
> - struct zpool *zpool)
> +static void *zs_zpool_create(const char *name, gfp_t gfp)
> {
> /*
> * Ignore global gfp flags: zs_malloc() may be invoked from
> @@ -420,7 +418,6 @@ static struct zpool_driver zs_zpool_driver = {
> .malloc_support_movable = true,
> .malloc = zs_zpool_malloc,
> .free = zs_zpool_free,
> - .shrink = NULL,
> .map = zs_zpool_map,
> .unmap = zs_zpool_unmap,
> .total_size = zs_zpool_total_size,
> diff --git a/mm/zswap.c b/mm/zswap.c
> index c99bafcefecf..2831bf56b168 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -254,10 +254,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
> static int zswap_pool_get(struct zswap_pool *pool);
> static void zswap_pool_put(struct zswap_pool *pool);
>
> -static const struct zpool_ops zswap_zpool_ops = {
> - .evict = zswap_writeback_entry
> -};
> -
> static bool zswap_is_full(void)
> {
> return totalram_pages() * zswap_max_pool_percent / 100 <
> @@ -375,12 +371,9 @@ static void zswap_free_entry(struct zswap_entry *entry)
> if (!entry->length)
> atomic_dec(&zswap_same_filled_pages);
> else {
> - /* zpool_evictable will be removed once all 3 backends have migrated */
> - if (!zpool_evictable(entry->pool->zpool)) {
> - spin_lock(&entry->pool->lru_lock);
> - list_del(&entry->lru);
> - spin_unlock(&entry->pool->lru_lock);
> - }
> + spin_lock(&entry->pool->lru_lock);
> + list_del(&entry->lru);
> + spin_unlock(&entry->pool->lru_lock);
> zpool_free(entry->pool->zpool, entry->handle);
> zswap_pool_put(entry->pool);
> }
> @@ -659,12 +652,8 @@ static void shrink_worker(struct work_struct *w)
> shrink_work);
> int ret, failures = 0;
>
> - /* zpool_evictable will be removed once all 3 backends have migrated */
> do {
> - if (zpool_evictable(pool->zpool))
> - ret = zpool_shrink(pool->zpool, 1, NULL);
> - else
> - ret = zswap_shrink(pool);
> + ret = zswap_shrink(pool);
> if (ret) {
> zswap_reject_reclaim_fail++;
> if (ret != -EAGAIN)
> @@ -702,7 +691,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
> /* unique name for each pool specifically required by zsmalloc */
> snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
>
> - pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
> + pool->zpool = zpool_create_pool(type, name, gfp);
> if (!pool->zpool) {
> pr_err("%s zpool not available\n", type);
> goto error;
> @@ -1388,8 +1377,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
> zswap_entry_put(tree, dupentry);
> }
> } while (ret == -EEXIST);
> - /* zpool_evictable will be removed once all 3 backends have migrated */
> - if (entry->length && !zpool_evictable(entry->pool->zpool)) {
> + if (entry->length) {
> spin_lock(&entry->pool->lru_lock);
> list_add(&entry->lru, &entry->pool->lru);
> spin_unlock(&entry->pool->lru_lock);
> @@ -1495,8 +1483,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
> freeentry:
> spin_lock(&tree->lock);
> zswap_entry_put(tree, entry);
> - /* zpool_evictable will be removed once all 3 backends have migrated */
> - if (entry->length && !zpool_evictable(entry->pool->zpool)) {
> + if (entry->length) {
> spin_lock(&entry->pool->lru_lock);
> list_move(&entry->lru, &entry->pool->lru);
> spin_unlock(&entry->pool->lru_lock);
> --
> 2.34.1
>
zsmalloc's shrink removal looks good to me.
Acked-by: Nhat Pham <[email protected]>