2024-06-12 12:49:01

by Usama Arif

[permalink] [raw]
Subject: [PATCH v4 2/2] mm: remove code to handle same filled pages

With an earlier commit to handle zero-filled pages in swap directly,
and with only 1% of the same-filled pages being non-zero, zswap no
longer needs to handle same-filled pages and can just work on compressed
pages.

Signed-off-by: Usama Arif <[email protected]>
---
mm/zswap.c | 86 +++++-------------------------------------------------
1 file changed, 8 insertions(+), 78 deletions(-)

diff --git a/mm/zswap.c b/mm/zswap.c
index b9b35ef86d9b..ca8df9c99abf 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -44,8 +44,6 @@
**********************************/
/* The number of compressed pages currently stored in zswap */
atomic_t zswap_stored_pages = ATOMIC_INIT(0);
-/* The number of same-value filled pages currently stored in zswap */
-static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);

/*
* The statistics below are not protected from concurrent access for
@@ -182,11 +180,9 @@ static struct shrinker *zswap_shrinker;
*
* swpentry - associated swap entry, the offset indexes into the red-black tree
* length - the length in bytes of the compressed page data. Needed during
- * decompression. For a same value filled page length is 0, and both
- * pool and lru are invalid and must be ignored.
+ * decompression.
* pool - the zswap_pool the entry's data is in
* handle - zpool allocation handle that stores the compressed page data
- * value - value of the same-value filled pages which have same content
* objcg - the obj_cgroup that the compressed memory is charged to
* lru - handle to the pool's lru used to evict pages.
*/
@@ -194,10 +190,7 @@ struct zswap_entry {
swp_entry_t swpentry;
unsigned int length;
struct zswap_pool *pool;
- union {
- unsigned long handle;
- unsigned long value;
- };
+ unsigned long handle;
struct obj_cgroup *objcg;
struct list_head lru;
};
@@ -814,13 +807,9 @@ static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
*/
static void zswap_entry_free(struct zswap_entry *entry)
{
- if (!entry->length)
- atomic_dec(&zswap_same_filled_pages);
- else {
- zswap_lru_del(&zswap_list_lru, entry);
- zpool_free(zswap_find_zpool(entry), entry->handle);
- zswap_pool_put(entry->pool);
- }
+ zswap_lru_del(&zswap_list_lru, entry);
+ zpool_free(zswap_find_zpool(entry), entry->handle);
+ zswap_pool_put(entry->pool);
if (entry->objcg) {
obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
obj_cgroup_put(entry->objcg);
@@ -1262,11 +1251,6 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
* This ensures that the better zswap compresses memory, the fewer
* pages we will evict to swap (as it will otherwise incur IO for
* relatively small memory saving).
- *
- * The memory saving factor calculated here takes same-filled pages into
- * account, but those are not freeable since they almost occupy no
- * space. Hence, we may scale nr_freeable down a little bit more than we
- * should if we have a lot of same-filled pages.
*/
return mult_frac(nr_freeable, nr_backing, nr_stored);
}
@@ -1370,42 +1354,6 @@ static void shrink_worker(struct work_struct *w)
} while (zswap_total_pages() > thr);
}

-/*********************************
-* same-filled functions
-**********************************/
-static bool zswap_is_folio_same_filled(struct folio *folio, unsigned long *value)
-{
- unsigned long *data;
- unsigned long val;
- unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1;
- bool ret = false;
-
- data = kmap_local_folio(folio, 0);
- val = data[0];
-
- if (val != data[last_pos])
- goto out;
-
- for (pos = 1; pos < last_pos; pos++) {
- if (val != data[pos])
- goto out;
- }
-
- *value = val;
- ret = true;
-out:
- kunmap_local(data);
- return ret;
-}
-
-static void zswap_fill_folio(struct folio *folio, unsigned long value)
-{
- unsigned long *data = kmap_local_folio(folio, 0);
-
- memset_l(data, value, PAGE_SIZE / sizeof(unsigned long));
- kunmap_local(data);
-}
-
/*********************************
* main API
**********************************/
@@ -1417,7 +1365,6 @@ bool zswap_store(struct folio *folio)
struct zswap_entry *entry, *old;
struct obj_cgroup *objcg = NULL;
struct mem_cgroup *memcg = NULL;
- unsigned long value;

VM_WARN_ON_ONCE(!folio_test_locked(folio));
VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
@@ -1450,13 +1397,6 @@ bool zswap_store(struct folio *folio)
goto reject;
}

- if (zswap_is_folio_same_filled(folio, &value)) {
- entry->length = 0;
- entry->value = value;
- atomic_inc(&zswap_same_filled_pages);
- goto store_entry;
- }
-
/* if entry is successfully added, it keeps the reference */
entry->pool = zswap_pool_current_get();
if (!entry->pool)
@@ -1474,7 +1414,6 @@ bool zswap_store(struct folio *folio)
if (!zswap_compress(folio, entry))
goto put_pool;

-store_entry:
entry->swpentry = swp;
entry->objcg = objcg;

@@ -1522,13 +1461,9 @@ bool zswap_store(struct folio *folio)
return true;

store_failed:
- if (!entry->length)
- atomic_dec(&zswap_same_filled_pages);
- else {
- zpool_free(zswap_find_zpool(entry), entry->handle);
+ zpool_free(zswap_find_zpool(entry), entry->handle);
put_pool:
- zswap_pool_put(entry->pool);
- }
+ zswap_pool_put(entry->pool);
freepage:
zswap_entry_cache_free(entry);
reject:
@@ -1577,10 +1512,7 @@ bool zswap_load(struct folio *folio)
if (!entry)
return false;

- if (entry->length)
- zswap_decompress(entry, folio);
- else
- zswap_fill_folio(folio, entry->value);
+ zswap_decompress(entry, folio);

count_vm_event(ZSWPIN);
if (entry->objcg)
@@ -1682,8 +1614,6 @@ static int zswap_debugfs_init(void)
zswap_debugfs_root, NULL, &total_size_fops);
debugfs_create_atomic_t("stored_pages", 0444,
zswap_debugfs_root, &zswap_stored_pages);
- debugfs_create_atomic_t("same_filled_pages", 0444,
- zswap_debugfs_root, &zswap_same_filled_pages);

return 0;
}
--
2.43.0



2024-06-12 15:12:48

by Nhat Pham

[permalink] [raw]
Subject: Re: [PATCH v4 2/2] mm: remove code to handle same filled pages

On Wed, Jun 12, 2024 at 5:47 AM Usama Arif <[email protected]> wrote:
>
> With an earlier commit to handle zero-filled pages in swap directly,
> and with only 1% of the same-filled pages being non-zero, zswap no
> longer needs to handle same-filled pages and can just work on compressed
> pages.
>
> Signed-off-by: Usama Arif <[email protected]>
> ---
> mm/zswap.c | 86 +++++-------------------------------------------------
> 1 file changed, 8 insertions(+), 78 deletions(-)
>
> diff --git a/mm/zswap.c b/mm/zswap.c
> index b9b35ef86d9b..ca8df9c99abf 100644
> --- a/mm/zswap.c
> +++ b/mm/zswap.c
> @@ -44,8 +44,6 @@
> **********************************/
> /* The number of compressed pages currently stored in zswap */
> atomic_t zswap_stored_pages = ATOMIC_INIT(0);
> -/* The number of same-value filled pages currently stored in zswap */
> -static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);

Can we re-introduce this counter somewhere? I have found this counter
to be valuable in the past, and would love to keep it. For instance,
in a system where a lot of zero-filled pages are reclaimed, we might
see an increase in swap usage. Having this counter at hands will allow
us to trace the source of the swap usage more easily. I *suppose* you
can do elimination work (check zswap usage, check disk swap usage
somehow - I'm not sure, etc., etc.), but this would be much more
direct and user-friendly.

Not *entirely* sure where to expose this though. Seems a bit specific
for vmstat. Maybe as a new debugfs directory?

This doesn't have to be done in this patch (or this series even) - but
please consider this for follow-up work at least :)

2024-06-12 16:35:05

by Usama Arif

[permalink] [raw]
Subject: Re: [PATCH v4 2/2] mm: remove code to handle same filled pages


On 12/06/2024 16:09, Nhat Pham wrote:
> On Wed, Jun 12, 2024 at 5:47 AM Usama Arif <[email protected]> wrote:
>> With an earlier commit to handle zero-filled pages in swap directly,
>> and with only 1% of the same-filled pages being non-zero, zswap no
>> longer needs to handle same-filled pages and can just work on compressed
>> pages.
>>
>> Signed-off-by: Usama Arif <[email protected]>
>> ---
>> mm/zswap.c | 86 +++++-------------------------------------------------
>> 1 file changed, 8 insertions(+), 78 deletions(-)
>>
>> diff --git a/mm/zswap.c b/mm/zswap.c
>> index b9b35ef86d9b..ca8df9c99abf 100644
>> --- a/mm/zswap.c
>> +++ b/mm/zswap.c
>> @@ -44,8 +44,6 @@
>> **********************************/
>> /* The number of compressed pages currently stored in zswap */
>> atomic_t zswap_stored_pages = ATOMIC_INIT(0);
>> -/* The number of same-value filled pages currently stored in zswap */
>> -static atomic_t zswap_same_filled_pages = ATOMIC_INIT(0);
> Can we re-introduce this counter somewhere? I have found this counter
> to be valuable in the past, and would love to keep it. For instance,
> in a system where a lot of zero-filled pages are reclaimed, we might
> see an increase in swap usage. Having this counter at hands will allow
> us to trace the source of the swap usage more easily. I *suppose* you
> can do elimination work (check zswap usage, check disk swap usage
> somehow - I'm not sure, etc., etc.), but this would be much more
> direct and user-friendly.
>
> Not *entirely* sure where to expose this though. Seems a bit specific
> for vmstat. Maybe as a new debugfs directory?
>
> This doesn't have to be done in this patch (or this series even) - but
> please consider this for follow-up work at least :)

Yes, would be good to have this . There are 2 things to consider: where
to put it in debugfs as you pointed out, and where to decrement it.
Decrementing it will require to test and clear instead of clear zeromap
at different places, so might be a bit more complicated compared to how
zswap_same_filled_pages is tracked. I think best to keep this separate
from this series.