2024-05-21 11:03:56

by Baolin Wang

[permalink] [raw]
Subject: [RFC PATCH 3/8] mm: swap: extend swap_shmem_alloc() to support batch SWAP_MAP_SHMEM flag setting

To support shmem large folio swap operations, add a new parameter to
swap_shmem_alloc() that allows batch SWAP_MAP_SHMEM flag setting for
shmem swap entries.

While we are at it, using folio_nr_pages() to get the number of pages
of the folio as a preparation.

Signed-off-by: Baolin Wang <[email protected]>
---
include/linux/swap.h | 4 +-
mm/shmem.c | 6 ++-
mm/swapfile.c | 98 +++++++++++++++++++++++---------------------
3 files changed, 57 insertions(+), 51 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 48131b869a4d..78922922abbd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -479,7 +479,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
-extern void swap_shmem_alloc(swp_entry_t);
+extern void swap_shmem_alloc(swp_entry_t, int);
extern int swap_duplicate(swp_entry_t);
extern int swapcache_prepare(swp_entry_t);
extern void swap_free_nr(swp_entry_t entry, int nr_pages);
@@ -546,7 +546,7 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
return 0;
}

-static inline void swap_shmem_alloc(swp_entry_t swp)
+static inline void swap_shmem_alloc(swp_entry_t swp, int nr)
{
}

diff --git a/mm/shmem.c b/mm/shmem.c
index fd2cb2e73a21..daab124c3e61 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1433,6 +1433,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
swp_entry_t swap;
pgoff_t index;
+ int nr_pages;

/*
* Our capabilities prevent regular writeback or sync from ever calling
@@ -1465,6 +1466,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
}

index = folio->index;
+ nr_pages = folio_nr_pages(folio);

/*
* This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
@@ -1517,8 +1519,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (add_to_swap_cache(folio, swap,
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
NULL) == 0) {
- shmem_recalc_inode(inode, 0, 1);
- swap_shmem_alloc(swap);
+ shmem_recalc_inode(inode, 0, nr_pages);
+ swap_shmem_alloc(swap, nr_pages);
shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));

mutex_unlock(&shmem_swaplist_mutex);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 99e701620562..2f23b87ddcb3 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3387,62 +3387,58 @@ void si_swapinfo(struct sysinfo *val)
* - swap-cache reference is requested but the entry is not used. -> ENOENT
* - swap-mapped reference requested but needs continued swap count. -> ENOMEM
*/
-static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
+static int __swap_duplicate(struct swap_info_struct *p, unsigned long offset,
+ int nr, unsigned char usage)
{
- struct swap_info_struct *p;
struct swap_cluster_info *ci;
- unsigned long offset;
unsigned char count;
unsigned char has_cache;
- int err;
+ int err, i;

- p = swp_swap_info(entry);
-
- offset = swp_offset(entry);
ci = lock_cluster_or_swap_info(p, offset);

- count = p->swap_map[offset];
-
- /*
- * swapin_readahead() doesn't check if a swap entry is valid, so the
- * swap entry could be SWAP_MAP_BAD. Check here with lock held.
- */
- if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
- err = -ENOENT;
- goto unlock_out;
- }
-
- has_cache = count & SWAP_HAS_CACHE;
- count &= ~SWAP_HAS_CACHE;
- err = 0;
-
- if (usage == SWAP_HAS_CACHE) {
+ for (i = 0; i < nr; i++) {
+ count = p->swap_map[offset + i];

- /* set SWAP_HAS_CACHE if there is no cache and entry is used */
- if (!has_cache && count)
- has_cache = SWAP_HAS_CACHE;
- else if (has_cache) /* someone else added cache */
- err = -EEXIST;
- else /* no users remaining */
+ /*
+ * swapin_readahead() doesn't check if a swap entry is valid, so the
+ * swap entry could be SWAP_MAP_BAD. Check here with lock held.
+ */
+ if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
err = -ENOENT;
+ break;
+ }

- } else if (count || has_cache) {
+ has_cache = count & SWAP_HAS_CACHE;
+ count &= ~SWAP_HAS_CACHE;
+ err = 0;
+
+ if (usage == SWAP_HAS_CACHE) {
+ /* set SWAP_HAS_CACHE if there is no cache and entry is used */
+ if (!has_cache && count)
+ has_cache = SWAP_HAS_CACHE;
+ else if (has_cache) /* someone else added cache */
+ err = -EEXIST;
+ else /* no users remaining */
+ err = -ENOENT;
+ } else if (count || has_cache) {
+ if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
+ count += usage;
+ else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
+ err = -EINVAL;
+ else if (swap_count_continued(p, offset + i, count))
+ count = COUNT_CONTINUED;
+ else
+ err = -ENOMEM;
+ } else
+ err = -ENOENT; /* unused swap entry */

- if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
- count += usage;
- else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX)
- err = -EINVAL;
- else if (swap_count_continued(p, offset, count))
- count = COUNT_CONTINUED;
- else
- err = -ENOMEM;
- } else
- err = -ENOENT; /* unused swap entry */
+ if (err)
+ break;

- if (!err)
- WRITE_ONCE(p->swap_map[offset], count | has_cache);
+ WRITE_ONCE(p->swap_map[offset + i], count | has_cache);
+ }

-unlock_out:
unlock_cluster_or_swap_info(p, ci);
return err;
}
@@ -3451,9 +3447,12 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
* Help swapoff by noting that swap entry belongs to shmem/tmpfs
* (in which case its reference count is never incremented).
*/
-void swap_shmem_alloc(swp_entry_t entry)
+void swap_shmem_alloc(swp_entry_t entry, int nr)
{
- __swap_duplicate(entry, SWAP_MAP_SHMEM);
+ struct swap_info_struct *p = swp_swap_info(entry);
+ unsigned long offset = swp_offset(entry);
+
+ __swap_duplicate(p, offset, nr, SWAP_MAP_SHMEM);
}

/*
@@ -3465,9 +3464,11 @@ void swap_shmem_alloc(swp_entry_t entry)
*/
int swap_duplicate(swp_entry_t entry)
{
+ struct swap_info_struct *p = swp_swap_info(entry);
+ unsigned long offset = swp_offset(entry);
int err = 0;

- while (!err && __swap_duplicate(entry, 1) == -ENOMEM)
+ while (!err && __swap_duplicate(p, offset, 1, 1) == -ENOMEM)
err = add_swap_count_continuation(entry, GFP_ATOMIC);
return err;
}
@@ -3482,7 +3483,10 @@ int swap_duplicate(swp_entry_t entry)
*/
int swapcache_prepare(swp_entry_t entry)
{
- return __swap_duplicate(entry, SWAP_HAS_CACHE);
+ struct swap_info_struct *p = swp_swap_info(entry);
+ unsigned long offset = swp_offset(entry);
+
+ return __swap_duplicate(p, offset, 1, SWAP_HAS_CACHE);
}

void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
--
2.39.3