These patches aim to simplify zswap_swapoff() by removing the
unnecessary trees cleanup code. Patch 1 makes sure that the order of
operations during swapoff is enforced correctly, making sure the
simplification in patch 2 is correct in a future-proof manner.
This is based on mm-unstable and v2 of the "mm/zswap: optimize the
scalability of zswap rb-tree" series [1].
[1]https://lore.kernel.org/lkml/[email protected]/
Yosry Ahmed (2):
mm: swap: enforce updating inuse_pages at the end of swap_range_free()
mm: zswap: remove unnecessary trees cleanups in zswap_swapoff()
mm/swapfile.c | 18 +++++++++++++++---
mm/zswap.c | 16 +++-------------
2 files changed, 18 insertions(+), 16 deletions(-)
--
2.43.0.429.g432eaa2c6b-goog
In swap_range_free(), we update inuse_pages then do some cleanups (arch
invalidation, zswap invalidation, swap cache cleanups, etc). During
swapoff, try_to_unuse() checks that inuse_pages is 0 to make sure all
swap entries are freed. Make sure we only update inuse_pages after we
are done with the cleanups in swap_range_free(), and use the proper
memory barriers to enforce it. This makes sure that code following
try_to_unuse() can safely assume that swap_range_free() ran for all
entries in thr swapfile (e.g. swap cache cleanup, zswap_swapoff()).
In practice, this currently isn't a problem because swap_range_free() is
called with the swap info lock held, and the swapoff code happens to
spin for that after try_to_unuse(). However, this seems fragile and
unintentional, so make it more relable and future-proof. This also
facilitates a following simplification of zswap_swapoff().
Signed-off-by: Yosry Ahmed <[email protected]>
---
mm/swapfile.c | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index b11b6057d8b5f..0580bb3e34d77 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -737,8 +737,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
if (was_full && (si->flags & SWP_WRITEOK))
add_to_avail_list(si);
}
- atomic_long_add(nr_entries, &nr_swap_pages);
- WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
if (si->flags & SWP_BLKDEV)
swap_slot_free_notify =
si->bdev->bd_disk->fops->swap_slot_free_notify;
@@ -752,6 +750,14 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
offset++;
}
clear_shadow_from_swap_cache(si->type, begin, end);
+
+ /*
+ * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
+ * only after the above cleanups are done.
+ */
+ smp_wmb();
+ atomic_long_add(nr_entries, &nr_swap_pages);
+ WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
}
static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
@@ -2049,7 +2055,7 @@ static int try_to_unuse(unsigned int type)
unsigned int i;
if (!READ_ONCE(si->inuse_pages))
- return 0;
+ goto success;
retry:
retval = shmem_unuse(type);
@@ -2130,6 +2136,12 @@ static int try_to_unuse(unsigned int type)
return -EINTR;
}
+success:
+ /*
+ * Make sure that further cleanups after try_to_unuse() returns happen
+ * after swap_range_free() reduces si->inuse_pages to 0.
+ */
+ smp_mb();
return 0;
}
--
2.43.0.429.g432eaa2c6b-goog
Yosry Ahmed <[email protected]> writes:
> In swap_range_free(), we update inuse_pages then do some cleanups (arch
> invalidation, zswap invalidation, swap cache cleanups, etc). During
> swapoff, try_to_unuse() checks that inuse_pages is 0 to make sure all
> swap entries are freed. Make sure we only update inuse_pages after we
> are done with the cleanups in swap_range_free(), and use the proper
> memory barriers to enforce it. This makes sure that code following
> try_to_unuse() can safely assume that swap_range_free() ran for all
> entries in thr swapfile (e.g. swap cache cleanup, zswap_swapoff()).
>
> In practice, this currently isn't a problem because swap_range_free() is
> called with the swap info lock held, and the swapoff code happens to
> spin for that after try_to_unuse(). However, this seems fragile and
> unintentional, so make it more relable and future-proof. This also
> facilitates a following simplification of zswap_swapoff().
>
> Signed-off-by: Yosry Ahmed <[email protected]>
LGTM, Thanks!
Reviewed-by: "Huang, Ying" <[email protected]>
> ---
> mm/swapfile.c | 18 +++++++++++++++---
> 1 file changed, 15 insertions(+), 3 deletions(-)
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index b11b6057d8b5f..0580bb3e34d77 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -737,8 +737,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
> if (was_full && (si->flags & SWP_WRITEOK))
> add_to_avail_list(si);
> }
> - atomic_long_add(nr_entries, &nr_swap_pages);
> - WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
> if (si->flags & SWP_BLKDEV)
> swap_slot_free_notify =
> si->bdev->bd_disk->fops->swap_slot_free_notify;
> @@ -752,6 +750,14 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
> offset++;
> }
> clear_shadow_from_swap_cache(si->type, begin, end);
> +
> + /*
> + * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
> + * only after the above cleanups are done.
> + */
> + smp_wmb();
> + atomic_long_add(nr_entries, &nr_swap_pages);
> + WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
> }
>
> static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
> @@ -2049,7 +2055,7 @@ static int try_to_unuse(unsigned int type)
> unsigned int i;
>
> if (!READ_ONCE(si->inuse_pages))
> - return 0;
> + goto success;
>
> retry:
> retval = shmem_unuse(type);
> @@ -2130,6 +2136,12 @@ static int try_to_unuse(unsigned int type)
> return -EINTR;
> }
>
> +success:
> + /*
> + * Make sure that further cleanups after try_to_unuse() returns happen
> + * after swap_range_free() reduces si->inuse_pages to 0.
> + */
> + smp_mb();
> return 0;
> }