2024-06-12 12:49:12

by Usama Arif

[permalink] [raw]
Subject: [PATCH v4 1/2] mm: store zero pages to be swapped out in a bitmap

Approximately 10-20% of pages to be swapped out are zero pages [1].
Rather than reading/writing these pages to flash resulting
in increased I/O and flash wear, a bitmap can be used to mark these
pages as zero at write time, and the pages can be filled at
read time if the bit corresponding to the page is set.
With this patch, NVMe writes in Meta server fleet decreased
by almost 10% with conventional swap setup (zswap disabled).

[1] https://lore.kernel.org/all/20171018104832epcms5p1b2232e2236258de3d03d1344dde9fce0@epcms5p1/

Signed-off-by: Usama Arif <[email protected]>
---
include/linux/swap.h | 1 +
mm/page_io.c | 114 ++++++++++++++++++++++++++++++++++++++++++-
mm/swapfile.c | 24 ++++++++-
3 files changed, 136 insertions(+), 3 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index a11c75e897ec..e88563978441 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -299,6 +299,7 @@ struct swap_info_struct {
signed char type; /* strange name for an index */
unsigned int max; /* extent of the swap_map */
unsigned char *swap_map; /* vmalloc'ed array of usage counts */
+ unsigned long *zeromap; /* vmalloc'ed bitmap to track zero pages */
struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */
struct swap_cluster_list free_clusters; /* free clusters list */
unsigned int lowest_bit; /* index of first free in swap_map */
diff --git a/mm/page_io.c b/mm/page_io.c
index a360857cf75d..39fc3919ce15 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -172,6 +172,88 @@ int generic_swapfile_activate(struct swap_info_struct *sis,
goto out;
}

+static bool is_folio_page_zero_filled(struct folio *folio, int i)
+{
+ unsigned long *data;
+ unsigned int pos, last_pos = PAGE_SIZE / sizeof(*data) - 1;
+ bool ret = false;
+
+ data = kmap_local_folio(folio, i * PAGE_SIZE);
+ if (data[last_pos])
+ goto out;
+ for (pos = 0; pos < PAGE_SIZE / sizeof(*data); pos++) {
+ if (data[pos])
+ goto out;
+ }
+ ret = true;
+out:
+ kunmap_local(data);
+ return ret;
+}
+
+static bool is_folio_zero_filled(struct folio *folio)
+{
+ unsigned int i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ if (!is_folio_page_zero_filled(folio, i))
+ return false;
+ }
+ return true;
+}
+
+static void folio_zero_fill(struct folio *folio)
+{
+ unsigned int i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++)
+ clear_highpage(folio_page(folio, i));
+}
+
+static void swap_zeromap_folio_set(struct folio *folio)
+{
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
+ swp_entry_t entry;
+ unsigned int i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ entry = page_swap_entry(folio_page(folio, i));
+ set_bit(swp_offset(entry), sis->zeromap);
+ }
+}
+
+static void swap_zeromap_folio_clear(struct folio *folio)
+{
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
+ swp_entry_t entry;
+ unsigned int i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ entry = page_swap_entry(folio_page(folio, i));
+ clear_bit(swp_offset(entry), sis->zeromap);
+ }
+}
+
+/*
+ * Return the index of the first subpage which is not zero-filled
+ * according to swap_info_struct->zeromap.
+ * If all pages are zero-filled according to zeromap, it will return
+ * folio_nr_pages(folio).
+ */
+static unsigned int swap_zeromap_folio_test(struct folio *folio)
+{
+ struct swap_info_struct *sis = swp_swap_info(folio->swap);
+ swp_entry_t entry;
+ unsigned int i;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+ entry = page_swap_entry(folio_page(folio, i));
+ if (!test_bit(swp_offset(entry), sis->zeromap))
+ return i;
+ }
+ return i;
+}
+
/*
* We may have stale swap cache pages in memory: notice
* them here and get rid of the unnecessary final write.
@@ -195,6 +277,13 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
folio_unlock(folio);
return ret;
}
+
+ if (is_folio_zero_filled(folio)) {
+ swap_zeromap_folio_set(folio);
+ folio_unlock(folio);
+ return 0;
+ }
+ swap_zeromap_folio_clear(folio);
if (zswap_store(folio)) {
folio_start_writeback(folio);
folio_unlock(folio);
@@ -426,6 +515,26 @@ static void sio_read_complete(struct kiocb *iocb, long ret)
mempool_free(sio, sio_pool);
}

+static bool swap_read_folio_zeromap(struct folio *folio)
+{
+ unsigned int idx = swap_zeromap_folio_test(folio);
+
+ if (idx == 0)
+ return false;
+
+ /*
+ * Swapping in a large folio that is partially in the zeromap is not
+ * currently handled. Return true without marking the folio uptodate so
+ * that an IO error is emitted (e.g. do_swap_page() will sigbus).
+ */
+ if (WARN_ON_ONCE(idx < folio_nr_pages(folio)))
+ return true;
+
+ folio_zero_fill(folio);
+ folio_mark_uptodate(folio);
+ return true;
+}
+
static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
{
struct swap_info_struct *sis = swp_swap_info(folio->swap);
@@ -515,8 +624,9 @@ void swap_read_folio(struct folio *folio, bool synchronous,
psi_memstall_enter(&pflags);
}
delayacct_swapin_start();
-
- if (zswap_load(folio)) {
+ if (swap_read_folio_zeromap(folio)) {
+ folio_unlock(folio);
+ } else if (zswap_load(folio)) {
folio_mark_uptodate(folio);
folio_unlock(folio);
} else if (data_race(sis->flags & SWP_FS_OPS)) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f1e559e216bd..48d8dca0b94b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -453,6 +453,8 @@ static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
static void swap_cluster_schedule_discard(struct swap_info_struct *si,
unsigned int idx)
{
+ unsigned int i;
+
/*
* If scan_swap_map_slots() can't find a free cluster, it will check
* si->swap_map directly. To make sure the discarding cluster isn't
@@ -461,6 +463,13 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
*/
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
SWAP_MAP_BAD, SWAPFILE_CLUSTER);
+ /*
+ * zeromap can see updates from concurrent swap_writepage() and swap_read_folio()
+ * call on other slots, hence use atomic clear_bit for zeromap instead of the
+ * non-atomic bitmap_clear.
+ */
+ for (i = 0; i < SWAPFILE_CLUSTER; i++)
+ clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);

cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);

@@ -482,7 +491,7 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
static void swap_do_scheduled_discard(struct swap_info_struct *si)
{
struct swap_cluster_info *info, *ci;
- unsigned int idx;
+ unsigned int idx, i;

info = si->cluster_info;

@@ -498,6 +507,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
__free_cluster(si, idx);
memset(si->swap_map + idx * SWAPFILE_CLUSTER,
0, SWAPFILE_CLUSTER);
+ for (i = 0; i < SWAPFILE_CLUSTER; i++)
+ clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
unlock_cluster(ci);
}
}
@@ -1059,9 +1070,12 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
{
unsigned long offset = idx * SWAPFILE_CLUSTER;
struct swap_cluster_info *ci;
+ unsigned int i;

ci = lock_cluster(si, offset);
memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
+ for (i = 0; i < SWAPFILE_CLUSTER; i++)
+ clear_bit(offset + i, si->zeromap);
cluster_set_count_flag(ci, 0, 0);
free_cluster(si, idx);
unlock_cluster(ci);
@@ -1336,6 +1350,7 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
count = p->swap_map[offset];
VM_BUG_ON(count != SWAP_HAS_CACHE);
p->swap_map[offset] = 0;
+ clear_bit(offset, p->zeromap);
dec_cluster_info_page(p, p->cluster_info, offset);
unlock_cluster(ci);

@@ -2597,6 +2612,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
free_percpu(p->cluster_next_cpu);
p->cluster_next_cpu = NULL;
vfree(swap_map);
+ bitmap_free(p->zeromap);
kvfree(cluster_info);
/* Destroy swap account information */
swap_cgroup_swapoff(p->type);
@@ -3123,6 +3139,12 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
goto bad_swap_unlock_inode;
}

+ p->zeromap = bitmap_zalloc(maxpages, GFP_KERNEL);
+ if (!p->zeromap) {
+ error = -ENOMEM;
+ goto bad_swap_unlock_inode;
+ }
+
if (p->bdev && bdev_stable_writes(p->bdev))
p->flags |= SWP_STABLE_WRITES;

--
2.43.0



2024-06-12 20:14:02

by Yosry Ahmed

[permalink] [raw]
Subject: Re: [PATCH v4 1/2] mm: store zero pages to be swapped out in a bitmap

On Wed, Jun 12, 2024 at 01:43:35PM +0100, Usama Arif wrote:
[..]

Hi Usama,

A few more comments/questions, sorry for not looking closely earlier.

> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index f1e559e216bd..48d8dca0b94b 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -453,6 +453,8 @@ static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
> static void swap_cluster_schedule_discard(struct swap_info_struct *si,
> unsigned int idx)
> {
> + unsigned int i;
> +
> /*
> * If scan_swap_map_slots() can't find a free cluster, it will check
> * si->swap_map directly. To make sure the discarding cluster isn't
> @@ -461,6 +463,13 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
> */
> memset(si->swap_map + idx * SWAPFILE_CLUSTER,
> SWAP_MAP_BAD, SWAPFILE_CLUSTER);
> + /*
> + * zeromap can see updates from concurrent swap_writepage() and swap_read_folio()
> + * call on other slots, hence use atomic clear_bit for zeromap instead of the
> + * non-atomic bitmap_clear.
> + */

I don't think this is accurate. swap_read_folio() does not update the
zeromap. I think the need for an atomic operation here is because we may
be updating adjacent bits simulatenously, so we may cause lost updates
otherwise (i.e. corrupting adjacent bits).

> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);

Could you explain why we need to clear the zeromap here?

swap_cluster_schedule_discard() is called from:
- swap_free_cluster() -> free_cluster()

This is already covered below.

- swap_entry_free() -> dec_cluster_info_page() -> free_cluster()

Each entry in the cluster should have its zeromap bit cleared in
swap_entry_free() before the entire cluster is free and we call
free_cluster().

Am I missing something?

>
> cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
>
> @@ -482,7 +491,7 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
> static void swap_do_scheduled_discard(struct swap_info_struct *si)
> {
> struct swap_cluster_info *info, *ci;
> - unsigned int idx;
> + unsigned int idx, i;
>
> info = si->cluster_info;
>
> @@ -498,6 +507,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
> __free_cluster(si, idx);
> memset(si->swap_map + idx * SWAPFILE_CLUSTER,
> 0, SWAPFILE_CLUSTER);
> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);

Same here. I didn't look into the specific code paths, but shouldn't the
cluster be unused (and hence its zeromap bits already cleared?).

> unlock_cluster(ci);
> }
> }
> @@ -1059,9 +1070,12 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
> {
> unsigned long offset = idx * SWAPFILE_CLUSTER;
> struct swap_cluster_info *ci;
> + unsigned int i;
>
> ci = lock_cluster(si, offset);
> memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
> + clear_bit(offset + i, si->zeromap);
> cluster_set_count_flag(ci, 0, 0);
> free_cluster(si, idx);
> unlock_cluster(ci);
> @@ -1336,6 +1350,7 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
> count = p->swap_map[offset];
> VM_BUG_ON(count != SWAP_HAS_CACHE);
> p->swap_map[offset] = 0;
> + clear_bit(offset, p->zeromap);

I think instead of clearing the zeromap in swap_free_cluster() and here
separately, we can just do it in swap_range_free(). I suspect this may
be the only place we really need to clear the zero in the swapfile code.

> dec_cluster_info_page(p, p->cluster_info, offset);
> unlock_cluster(ci);
>
> @@ -2597,6 +2612,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
> free_percpu(p->cluster_next_cpu);
> p->cluster_next_cpu = NULL;
> vfree(swap_map);
> + bitmap_free(p->zeromap);
> kvfree(cluster_info);
> /* Destroy swap account information */
> swap_cgroup_swapoff(p->type);
> @@ -3123,6 +3139,12 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
> goto bad_swap_unlock_inode;
> }
>
> + p->zeromap = bitmap_zalloc(maxpages, GFP_KERNEL);
> + if (!p->zeromap) {
> + error = -ENOMEM;
> + goto bad_swap_unlock_inode;
> + }
> +
> if (p->bdev && bdev_stable_writes(p->bdev))
> p->flags |= SWP_STABLE_WRITES;
>
> --
> 2.43.0
>

2024-06-13 11:37:55

by Usama Arif

[permalink] [raw]
Subject: Re: [PATCH v4 1/2] mm: store zero pages to be swapped out in a bitmap


On 12/06/2024 21:13, Yosry Ahmed wrote:
> On Wed, Jun 12, 2024 at 01:43:35PM +0100, Usama Arif wrote:
> [..]
>
> Hi Usama,
>
> A few more comments/questions, sorry for not looking closely earlier.
No worries, Thanks for the reviews!
>> diff --git a/mm/swapfile.c b/mm/swapfile.c
>> index f1e559e216bd..48d8dca0b94b 100644
>> --- a/mm/swapfile.c
>> +++ b/mm/swapfile.c
>> @@ -453,6 +453,8 @@ static unsigned int cluster_list_del_first(struct swap_cluster_list *list,
>> static void swap_cluster_schedule_discard(struct swap_info_struct *si,
>> unsigned int idx)
>> {
>> + unsigned int i;
>> +
>> /*
>> * If scan_swap_map_slots() can't find a free cluster, it will check
>> * si->swap_map directly. To make sure the discarding cluster isn't
>> @@ -461,6 +463,13 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si,
>> */
>> memset(si->swap_map + idx * SWAPFILE_CLUSTER,
>> SWAP_MAP_BAD, SWAPFILE_CLUSTER);
>> + /*
>> + * zeromap can see updates from concurrent swap_writepage() and swap_read_folio()
>> + * call on other slots, hence use atomic clear_bit for zeromap instead of the
>> + * non-atomic bitmap_clear.
>> + */
> I don't think this is accurate. swap_read_folio() does not update the
> zeromap. I think the need for an atomic operation here is because we may
> be updating adjacent bits simulatenously, so we may cause lost updates
> otherwise (i.e. corrupting adjacent bits).


Thanks, will change to "Use atomic clear_bit instead of non-atomic
bitmap_clear to prevent adjacent bits corruption due to simultaneous
writes." in the next revision

>
>> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
>> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
> Could you explain why we need to clear the zeromap here?
>
> swap_cluster_schedule_discard() is called from:
> - swap_free_cluster() -> free_cluster()
>
> This is already covered below.
>
> - swap_entry_free() -> dec_cluster_info_page() -> free_cluster()
>
> Each entry in the cluster should have its zeromap bit cleared in
> swap_entry_free() before the entire cluster is free and we call
> free_cluster().
>
> Am I missing something?

Yes, it looks like this one is not needed as swap_entry_free and
swap_free_cluster would already have cleared the bit. Will remove it.

I had initially started checking what codepaths zeromap would need to be
cleared. But then thought I could do it wherever si->swap_map is cleared
or set to SWAP_MAP_BAD, which is why I added it here.

>>
>> cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
>>
>> @@ -482,7 +491,7 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
>> static void swap_do_scheduled_discard(struct swap_info_struct *si)
>> {
>> struct swap_cluster_info *info, *ci;
>> - unsigned int idx;
>> + unsigned int idx, i;
>>
>> info = si->cluster_info;
>>
>> @@ -498,6 +507,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
>> __free_cluster(si, idx);
>> memset(si->swap_map + idx * SWAPFILE_CLUSTER,
>> 0, SWAPFILE_CLUSTER);
>> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
>> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
> Same here. I didn't look into the specific code paths, but shouldn't the
> cluster be unused (and hence its zeromap bits already cleared?).
>
I think this one is needed (or atleast very good to have). There are 2
paths:

1) swap_cluster_schedule_discard (clears zeromap) -> swap_discard_work
-> swap_do_scheduled_discard (clears zeromap)

Path 1 doesnt need it as swap_cluster_schedule_discard already clears it.

2) scan_swap_map_slots -> scan_swap_map_try_ssd_cluster ->
swap_do_scheduled_discard (clears zeromap)

Path 2 might need it as zeromap isnt cleared earlier I believe
(eventhough I think it might already be 0).

Even if its cleared in path 2, I think its good to keep this one, as the
function is swap_do_scheduled_discard, i.e. incase it gets directly
called or si->discard_work gets scheduled anywhere else in the future,
it should do as the function name suggests, i.e. swap discard(clear
zeromap).

>> unlock_cluster(ci);
>> }
>> }
>> @@ -1059,9 +1070,12 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
>> {
>> unsigned long offset = idx * SWAPFILE_CLUSTER;
>> struct swap_cluster_info *ci;
>> + unsigned int i;
>>
>> ci = lock_cluster(si, offset);
>> memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
>> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
>> + clear_bit(offset + i, si->zeromap);
>> cluster_set_count_flag(ci, 0, 0);
>> free_cluster(si, idx);
>> unlock_cluster(ci);
>> @@ -1336,6 +1350,7 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
>> count = p->swap_map[offset];
>> VM_BUG_ON(count != SWAP_HAS_CACHE);
>> p->swap_map[offset] = 0;
>> + clear_bit(offset, p->zeromap);
> I think instead of clearing the zeromap in swap_free_cluster() and here
> separately, we can just do it in swap_range_free(). I suspect this may
> be the only place we really need to clear the zero in the swapfile code.

Sure, we could move it to swap_range_free, but then also move the
clearing of swap_map.

When it comes to clearing zeromap, I think its just generally a good
idea to clear it wherever swap_map is cleared.

So the diff over v4 looks like below (should address all comments but
not remove it from swap_do_scheduled_discard, and
move si->swap_map/zeromap clearing from
swap_free_cluster/swap_entry_free to swap_range_free):



diff --git a/mm/swapfile.c b/mm/swapfile.c
index 48d8dca0b94b..39cad0d09525 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -463,13 +463,6 @@ static void swap_cluster_schedule_discard(struct
swap_info_struct *si,
         */
        memset(si->swap_map + idx * SWAPFILE_CLUSTER,
                        SWAP_MAP_BAD, SWAPFILE_CLUSTER);
-       /*
-        * zeromap can see updates from concurrent swap_writepage() and
swap_read_folio()
-        * call on other slots, hence use atomic clear_bit for zeromap
instead of the
-        * non-atomic bitmap_clear.
-        */
-       for (i = 0; i < SWAPFILE_CLUSTER; i++)
-               clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);

        cluster_list_add_tail(&si->discard_clusters, si->cluster_info,
idx);

@@ -758,6 +751,15 @@ static void swap_range_free(struct swap_info_struct
*si, unsigned long offset,
        unsigned long begin = offset;
        unsigned long end = offset + nr_entries - 1;
        void (*swap_slot_free_notify)(struct block_device *, unsigned
long);
+       unsigned int i;
+
+       memset(si->swap_map + offset, 0, nr_entries);
+       /*
+        * Use atomic clear_bit operations only on zeromap instead of
non-atomic
+        * bitmap_clear to prevent adjacent bits corruption due to
simultaneous writes.
+        */
+       for (i = 0; i < nr_entries; i++)
+               clear_bit(offset + i, si->zeromap);

        if (offset < si->lowest_bit)
                si->lowest_bit = offset;
@@ -1070,12 +1072,8 @@ static void swap_free_cluster(struct
swap_info_struct *si, unsigned long idx)
 {
        unsigned long offset = idx * SWAPFILE_CLUSTER;
        struct swap_cluster_info *ci;
-       unsigned int i;

        ci = lock_cluster(si, offset);
-       memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
-       for (i = 0; i < SWAPFILE_CLUSTER; i++)
-               clear_bit(offset + i, si->zeromap);
        cluster_set_count_flag(ci, 0, 0);
        free_cluster(si, idx);
        unlock_cluster(ci);
@@ -1349,8 +1347,6 @@ static void swap_entry_free(struct
swap_info_struct *p, swp_entry_t entry)
        ci = lock_cluster(p, offset);
        count = p->swap_map[offset];
        VM_BUG_ON(count != SWAP_HAS_CACHE);
-       p->swap_map[offset] = 0;
-       clear_bit(offset, p->zeromap);
        dec_cluster_info_page(p, p->cluster_info, offset);
        unlock_cluster(ci);




2024-06-13 16:39:14

by Yosry Ahmed

[permalink] [raw]
Subject: Re: [PATCH v4 1/2] mm: store zero pages to be swapped out in a bitmap

[..]
>
> >
> >> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
> >> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
> > Could you explain why we need to clear the zeromap here?
> >
> > swap_cluster_schedule_discard() is called from:
> > - swap_free_cluster() -> free_cluster()
> >
> > This is already covered below.
> >
> > - swap_entry_free() -> dec_cluster_info_page() -> free_cluster()
> >
> > Each entry in the cluster should have its zeromap bit cleared in
> > swap_entry_free() before the entire cluster is free and we call
> > free_cluster().
> >
> > Am I missing something?
>
> Yes, it looks like this one is not needed as swap_entry_free and
> swap_free_cluster would already have cleared the bit. Will remove it.
>
> I had initially started checking what codepaths zeromap would need to be
> cleared. But then thought I could do it wherever si->swap_map is cleared
> or set to SWAP_MAP_BAD, which is why I added it here.
>
> >>
> >> cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
> >>
> >> @@ -482,7 +491,7 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
> >> static void swap_do_scheduled_discard(struct swap_info_struct *si)
> >> {
> >> struct swap_cluster_info *info, *ci;
> >> - unsigned int idx;
> >> + unsigned int idx, i;
> >>
> >> info = si->cluster_info;
> >>
> >> @@ -498,6 +507,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
> >> __free_cluster(si, idx);
> >> memset(si->swap_map + idx * SWAPFILE_CLUSTER,
> >> 0, SWAPFILE_CLUSTER);
> >> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
> >> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
> > Same here. I didn't look into the specific code paths, but shouldn't the
> > cluster be unused (and hence its zeromap bits already cleared?).
> >
> I think this one is needed (or atleast very good to have). There are 2
> paths:
>
> 1) swap_cluster_schedule_discard (clears zeromap) -> swap_discard_work
> -> swap_do_scheduled_discard (clears zeromap)
>
> Path 1 doesnt need it as swap_cluster_schedule_discard already clears it.
>
> 2) scan_swap_map_slots -> scan_swap_map_try_ssd_cluster ->
> swap_do_scheduled_discard (clears zeromap)
>
> Path 2 might need it as zeromap isnt cleared earlier I believe
> (eventhough I think it might already be 0).

Aren't the clusters in the discard list free by definition? It seems
like we add a cluster there from swap_cluster_schedule_discard(),
which we establish above that it gets called on a free cluster, right?

>
> Even if its cleared in path 2, I think its good to keep this one, as the
> function is swap_do_scheduled_discard, i.e. incase it gets directly
> called or si->discard_work gets scheduled anywhere else in the future,
> it should do as the function name suggests, i.e. swap discard(clear
> zeromap).

I think we just set the swap map to SWAP_MAP_BAD in
swap_cluster_schedule_discard() and then clear it in
swap_do_scheduled_discard(), and the clusters are already freed at
that point. Ying could set me straight if I am wrong here.

It is confusing to me to keep an unnecessary call tbh, it makes sense
to clear zeromap bits once, when the swap entry/cluster is not being
used anymore and before it's reallocated.

>
> >> unlock_cluster(ci);
> >> }
> >> }
> >> @@ -1059,9 +1070,12 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
> >> {
> >> unsigned long offset = idx * SWAPFILE_CLUSTER;
> >> struct swap_cluster_info *ci;
> >> + unsigned int i;
> >>
> >> ci = lock_cluster(si, offset);
> >> memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
> >> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
> >> + clear_bit(offset + i, si->zeromap);
> >> cluster_set_count_flag(ci, 0, 0);
> >> free_cluster(si, idx);
> >> unlock_cluster(ci);
> >> @@ -1336,6 +1350,7 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
> >> count = p->swap_map[offset];
> >> VM_BUG_ON(count != SWAP_HAS_CACHE);
> >> p->swap_map[offset] = 0;
> >> + clear_bit(offset, p->zeromap);
> > I think instead of clearing the zeromap in swap_free_cluster() and here
> > separately, we can just do it in swap_range_free(). I suspect this may
> > be the only place we really need to clear the zero in the swapfile code.
>
> Sure, we could move it to swap_range_free, but then also move the
> clearing of swap_map.
>
> When it comes to clearing zeromap, I think its just generally a good
> idea to clear it wherever swap_map is cleared.

I am not convinced about this argument. The swap_map is used for
multiple reasons beyond just keeping track of whether a swap entry is
in-use. The zeromap on the other hand is simpler and just needs to be
cleared once when an entry is being freed.

Unless others disagree, I prefer to only clear the zeromap once in
swap_range_free() and keep the swap_map code as-is for now. If we
think there is value in moving clearing the swap_map to
swap_range_free(), it should at least be in a separate patch to be
evaluated separately.

Just my 2c.

2024-06-13 19:30:34

by Yosry Ahmed

[permalink] [raw]
Subject: Re: [PATCH v4 1/2] mm: store zero pages to be swapped out in a bitmap

[..]
> >>>> @@ -498,6 +507,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
> >>>> __free_cluster(si, idx);
> >>>> memset(si->swap_map + idx * SWAPFILE_CLUSTER,
> >>>> 0, SWAPFILE_CLUSTER);
> >>>> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
> >>>> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
> >>> Same here. I didn't look into the specific code paths, but shouldn't the
> >>> cluster be unused (and hence its zeromap bits already cleared?).
> >>>
> >> I think this one is needed (or atleast very good to have). There are 2
> >> paths:
> >>
> >> 1) swap_cluster_schedule_discard (clears zeromap) -> swap_discard_work
> >> -> swap_do_scheduled_discard (clears zeromap)
> >>
> >> Path 1 doesnt need it as swap_cluster_schedule_discard already clears it.
> >>
> >> 2) scan_swap_map_slots -> scan_swap_map_try_ssd_cluster ->
> >> swap_do_scheduled_discard (clears zeromap)
> >>
> >> Path 2 might need it as zeromap isnt cleared earlier I believe
> >> (eventhough I think it might already be 0).
> > Aren't the clusters in the discard list free by definition? It seems
> > like we add a cluster there from swap_cluster_schedule_discard(),
> > which we establish above that it gets called on a free cluster, right?
>
> You mean for path 2? Its not from swap_cluster_schedule_discard. The
> whole call path is
>
> get_swap_pages -> scan_swap_map_slots -> scan_swap_map_try_ssd_cluster
> -> swap_do_scheduled_discard. Nowhere up until swap_do_scheduled_discard
> was the zeromap cleared, which is why I think we should add it here.

swap_do_scheduled_discard() iterates over clusters from
si->discard_clusters. Clusters are added to that list from
swap_cluster_schedule_discard().

IOW, swap_cluster_schedule_discard() schedules freed clusters to be
discarded, and swap_do_scheduled_discard() later does the actual
discarding, whether it's through si->discard_work scheduled by
swap_cluster_schedule_discard(), or when looking for a free cluster
through scan_swap_map_try_ssd_cluster().

Did I miss anything?

2024-06-13 19:39:22

by Usama Arif

[permalink] [raw]
Subject: Re: [PATCH v4 1/2] mm: store zero pages to be swapped out in a bitmap


On 13/06/2024 20:26, Yosry Ahmed wrote:
> [..]
>>>>>> @@ -498,6 +507,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
>>>>>> __free_cluster(si, idx);
>>>>>> memset(si->swap_map + idx * SWAPFILE_CLUSTER,
>>>>>> 0, SWAPFILE_CLUSTER);
>>>>>> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
>>>>>> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
>>>>> Same here. I didn't look into the specific code paths, but shouldn't the
>>>>> cluster be unused (and hence its zeromap bits already cleared?).
>>>>>
>>>> I think this one is needed (or atleast very good to have). There are 2
>>>> paths:
>>>>
>>>> 1) swap_cluster_schedule_discard (clears zeromap) -> swap_discard_work
>>>> -> swap_do_scheduled_discard (clears zeromap)
>>>>
>>>> Path 1 doesnt need it as swap_cluster_schedule_discard already clears it.
>>>>
>>>> 2) scan_swap_map_slots -> scan_swap_map_try_ssd_cluster ->
>>>> swap_do_scheduled_discard (clears zeromap)
>>>>
>>>> Path 2 might need it as zeromap isnt cleared earlier I believe
>>>> (eventhough I think it might already be 0).
>>> Aren't the clusters in the discard list free by definition? It seems
>>> like we add a cluster there from swap_cluster_schedule_discard(),
>>> which we establish above that it gets called on a free cluster, right?
>> You mean for path 2? Its not from swap_cluster_schedule_discard. The
>> whole call path is
>>
>> get_swap_pages -> scan_swap_map_slots -> scan_swap_map_try_ssd_cluster
>> -> swap_do_scheduled_discard. Nowhere up until swap_do_scheduled_discard
>> was the zeromap cleared, which is why I think we should add it here.
> swap_do_scheduled_discard() iterates over clusters from
> si->discard_clusters. Clusters are added to that list from
> swap_cluster_schedule_discard().
>
> IOW, swap_cluster_schedule_discard() schedules freed clusters to be
> discarded, and swap_do_scheduled_discard() later does the actual
> discarding, whether it's through si->discard_work scheduled by
> swap_cluster_schedule_discard(), or when looking for a free cluster
> through scan_swap_map_try_ssd_cluster().
>
> Did I miss anything?

Ah ok, and the schedule_discard in free_cluster wont be called scheduled
before swap_range_free. Will only keep the one in swap_range_free. Thanks!



2024-06-13 19:41:21

by Usama Arif

[permalink] [raw]
Subject: Re: [PATCH v4 1/2] mm: store zero pages to be swapped out in a bitmap


On 13/06/2024 17:38, Yosry Ahmed wrote:
> [..]
>>>> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
>>>> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
>>> Could you explain why we need to clear the zeromap here?
>>>
>>> swap_cluster_schedule_discard() is called from:
>>> - swap_free_cluster() -> free_cluster()
>>>
>>> This is already covered below.
>>>
>>> - swap_entry_free() -> dec_cluster_info_page() -> free_cluster()
>>>
>>> Each entry in the cluster should have its zeromap bit cleared in
>>> swap_entry_free() before the entire cluster is free and we call
>>> free_cluster().
>>>
>>> Am I missing something?
>> Yes, it looks like this one is not needed as swap_entry_free and
>> swap_free_cluster would already have cleared the bit. Will remove it.
>>
>> I had initially started checking what codepaths zeromap would need to be
>> cleared. But then thought I could do it wherever si->swap_map is cleared
>> or set to SWAP_MAP_BAD, which is why I added it here.
>>
>>>> cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
>>>>
>>>> @@ -482,7 +491,7 @@ static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
>>>> static void swap_do_scheduled_discard(struct swap_info_struct *si)
>>>> {
>>>> struct swap_cluster_info *info, *ci;
>>>> - unsigned int idx;
>>>> + unsigned int idx, i;
>>>>
>>>> info = si->cluster_info;
>>>>
>>>> @@ -498,6 +507,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si)
>>>> __free_cluster(si, idx);
>>>> memset(si->swap_map + idx * SWAPFILE_CLUSTER,
>>>> 0, SWAPFILE_CLUSTER);
>>>> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
>>>> + clear_bit(idx * SWAPFILE_CLUSTER + i, si->zeromap);
>>> Same here. I didn't look into the specific code paths, but shouldn't the
>>> cluster be unused (and hence its zeromap bits already cleared?).
>>>
>> I think this one is needed (or atleast very good to have). There are 2
>> paths:
>>
>> 1) swap_cluster_schedule_discard (clears zeromap) -> swap_discard_work
>> -> swap_do_scheduled_discard (clears zeromap)
>>
>> Path 1 doesnt need it as swap_cluster_schedule_discard already clears it.
>>
>> 2) scan_swap_map_slots -> scan_swap_map_try_ssd_cluster ->
>> swap_do_scheduled_discard (clears zeromap)
>>
>> Path 2 might need it as zeromap isnt cleared earlier I believe
>> (eventhough I think it might already be 0).
> Aren't the clusters in the discard list free by definition? It seems
> like we add a cluster there from swap_cluster_schedule_discard(),
> which we establish above that it gets called on a free cluster, right?

You mean for path 2? Its not from swap_cluster_schedule_discard. The
whole call path is

get_swap_pages -> scan_swap_map_slots -> scan_swap_map_try_ssd_cluster
-> swap_do_scheduled_discard. Nowhere up until swap_do_scheduled_discard
was the zeromap cleared, which is why I think we should add it here.


>> Even if its cleared in path 2, I think its good to keep this one, as the
>> function is swap_do_scheduled_discard, i.e. incase it gets directly
>> called or si->discard_work gets scheduled anywhere else in the future,
>> it should do as the function name suggests, i.e. swap discard(clear
>> zeromap).
> I think we just set the swap map to SWAP_MAP_BAD in
> swap_cluster_schedule_discard() and then clear it in
> swap_do_scheduled_discard(), and the clusters are already freed at
> that point. Ying could set me straight if I am wrong here.
I think you might be mixing up path 1 and path 2 above?
swap_cluster_schedule_discard is not called in Path 2 where
swap_do_scheduled_discard ends up being called, which is why I think we
would need to clear the zeromap here.
> It is confusing to me to keep an unnecessary call tbh, it makes sense
> to clear zeromap bits once, when the swap entry/cluster is not being
> used anymore and before it's reallocated.
>
>>>> unlock_cluster(ci);
>>>> }
>>>> }
>>>> @@ -1059,9 +1070,12 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
>>>> {
>>>> unsigned long offset = idx * SWAPFILE_CLUSTER;
>>>> struct swap_cluster_info *ci;
>>>> + unsigned int i;
>>>>
>>>> ci = lock_cluster(si, offset);
>>>> memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
>>>> + for (i = 0; i < SWAPFILE_CLUSTER; i++)
>>>> + clear_bit(offset + i, si->zeromap);
>>>> cluster_set_count_flag(ci, 0, 0);
>>>> free_cluster(si, idx);
>>>> unlock_cluster(ci);
>>>> @@ -1336,6 +1350,7 @@ static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry)
>>>> count = p->swap_map[offset];
>>>> VM_BUG_ON(count != SWAP_HAS_CACHE);
>>>> p->swap_map[offset] = 0;
>>>> + clear_bit(offset, p->zeromap);
>>> I think instead of clearing the zeromap in swap_free_cluster() and here
>>> separately, we can just do it in swap_range_free(). I suspect this may
>>> be the only place we really need to clear the zero in the swapfile code.
>> Sure, we could move it to swap_range_free, but then also move the
>> clearing of swap_map.
>>
>> When it comes to clearing zeromap, I think its just generally a good
>> idea to clear it wherever swap_map is cleared.
> I am not convinced about this argument. The swap_map is used for
> multiple reasons beyond just keeping track of whether a swap entry is
> in-use. The zeromap on the other hand is simpler and just needs to be
> cleared once when an entry is being freed.
>
> Unless others disagree, I prefer to only clear the zeromap once in
> swap_range_free() and keep the swap_map code as-is for now. If we
> think there is value in moving clearing the swap_map to
> swap_range_free(), it should at least be in a separate patch to be
> evaluated separately.
>
> Just my 2c.

Sure, I am indifferent to this. I dont think it makes a difference if
the zeromap is cleared in swap_free_cluster + swap_entry_free or later
on in a common swap_range_free function, so will just move it in the
next revision. Wont move swap_map clearing code.