And here is a series of patches based on v6.6-rc3: mostly just cosmetic
mods in mm/shmem.c, but the last two enforcing the "size=" limit better.
8/8 goes into percpu counter territory, and could stand alone: I'll add
some more Cc's on that one.
Applies to any v6.6-rc so far, and to next-20230929 and to
mm-everything-2023-09-29-23-51: hah, there's now an 09-30-01-16,
I haven't tried it yet, but this should be good on that too.
1/8 shmem: shrink shmem_inode_info: dir_offsets in a union
2/8 shmem: remove vma arg from shmem_get_folio_gfp()
3/8 shmem: factor shmem_falloc_wait() out of shmem_fault()
4/8 shmem: trivial tidyups, removing extra blank lines, etc
5/8 shmem: shmem_acct_blocks() and shmem_inode_acct_blocks()
6/8 shmem: move memcg charge out of shmem_add_to_page_cache()
7/8 shmem: _add_to_page_cache() before shmem_inode_acct_blocks()
8/8 shmem,percpu_counter: add _limited_add(fbc, limit, amount)
include/linux/percpu_counter.h | 23 ++
include/linux/shmem_fs.h | 16 +-
lib/percpu_counter.c | 53 ++++
mm/shmem.c | 500 +++++++++++++++++------------------
4 files changed, 333 insertions(+), 259 deletions(-)
Hugh
Extract shmem's memcg charging out of shmem_add_to_page_cache(): it's
misleading done there, because many calls are dealing with a swapcache
page, whose memcg is nowadays always remembered while swapped out, then
the charge re-levied when it's brought back into swapcache.
Temporarily move it back up to the shmem_get_folio_gfp() level, where
the memcg was charged before v5.8; but the next commit goes on to move
it back down to a new home.
In making this change, it becomes clear that shmem_swapin_folio() does
not need to know the vma, just the fault mm (if any): call it fault_mm
rather than charge_mm - let mem_cgroup_charge() decide whom to charge.
Signed-off-by: Hugh Dickins <[email protected]>
---
mm/shmem.c | 68 +++++++++++++++++++++++-------------------------------
1 file changed, 29 insertions(+), 39 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 63ba6037b23a..0a7f7b567b80 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -146,9 +146,8 @@ static unsigned long shmem_default_max_inodes(void)
#endif
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
- struct folio **foliop, enum sgp_type sgp,
- gfp_t gfp, struct vm_area_struct *vma,
- vm_fault_t *fault_type);
+ struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
+ struct mm_struct *fault_mm, vm_fault_t *fault_type);
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
{
@@ -760,12 +759,10 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
*/
static int shmem_add_to_page_cache(struct folio *folio,
struct address_space *mapping,
- pgoff_t index, void *expected, gfp_t gfp,
- struct mm_struct *charge_mm)
+ pgoff_t index, void *expected, gfp_t gfp)
{
XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
long nr = folio_nr_pages(folio);
- int error;
VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
@@ -776,16 +773,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
folio->mapping = mapping;
folio->index = index;
- if (!folio_test_swapcache(folio)) {
- error = mem_cgroup_charge(folio, charge_mm, gfp);
- if (error) {
- if (folio_test_pmd_mappable(folio)) {
- count_vm_event(THP_FILE_FALLBACK);
- count_vm_event(THP_FILE_FALLBACK_CHARGE);
- }
- goto error;
- }
- }
+ gfp &= GFP_RECLAIM_MASK;
folio_throttle_swaprate(folio, gfp);
do {
@@ -813,15 +801,12 @@ static int shmem_add_to_page_cache(struct folio *folio,
} while (xas_nomem(&xas, gfp));
if (xas_error(&xas)) {
- error = xas_error(&xas);
- goto error;
+ folio->mapping = NULL;
+ folio_ref_sub(folio, nr);
+ return xas_error(&xas);
}
return 0;
-error:
- folio->mapping = NULL;
- folio_ref_sub(folio, nr);
- return error;
}
/*
@@ -1324,10 +1309,8 @@ static int shmem_unuse_swap_entries(struct inode *inode,
if (!xa_is_value(folio))
continue;
- error = shmem_swapin_folio(inode, indices[i],
- &folio, SGP_CACHE,
- mapping_gfp_mask(mapping),
- NULL, NULL);
+ error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
+ mapping_gfp_mask(mapping), NULL, NULL);
if (error == 0) {
folio_unlock(folio);
folio_put(folio);
@@ -1810,12 +1793,11 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
*/
static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct folio **foliop, enum sgp_type sgp,
- gfp_t gfp, struct vm_area_struct *vma,
+ gfp_t gfp, struct mm_struct *fault_mm,
vm_fault_t *fault_type)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
- struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
struct swap_info_struct *si;
struct folio *folio = NULL;
swp_entry_t swap;
@@ -1843,7 +1825,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
if (fault_type) {
*fault_type |= VM_FAULT_MAJOR;
count_vm_event(PGMAJFAULT);
- count_memcg_event_mm(charge_mm, PGMAJFAULT);
+ count_memcg_event_mm(fault_mm, PGMAJFAULT);
}
/* Here we actually start the io */
folio = shmem_swapin(swap, gfp, info, index);
@@ -1880,8 +1862,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
}
error = shmem_add_to_page_cache(folio, mapping, index,
- swp_to_radix_entry(swap), gfp,
- charge_mm);
+ swp_to_radix_entry(swap), gfp);
if (error)
goto failed;
@@ -1929,7 +1910,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo;
- struct mm_struct *charge_mm;
+ struct mm_struct *fault_mm;
struct folio *folio;
pgoff_t hindex;
gfp_t huge_gfp;
@@ -1946,7 +1927,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
}
sbinfo = SHMEM_SB(inode->i_sb);
- charge_mm = vma ? vma->vm_mm : NULL;
+ fault_mm = vma ? vma->vm_mm : NULL;
folio = filemap_get_entry(mapping, index);
if (folio && vma && userfaultfd_minor(vma)) {
@@ -1958,7 +1939,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
if (xa_is_value(folio)) {
error = shmem_swapin_folio(inode, index, &folio,
- sgp, gfp, vma, fault_type);
+ sgp, gfp, fault_mm, fault_type);
if (error == -EEXIST)
goto repeat;
@@ -2044,9 +2025,16 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
if (sgp == SGP_WRITE)
__folio_set_referenced(folio);
- error = shmem_add_to_page_cache(folio, mapping, hindex,
- NULL, gfp & GFP_RECLAIM_MASK,
- charge_mm);
+ error = mem_cgroup_charge(folio, fault_mm, gfp);
+ if (error) {
+ if (folio_test_pmd_mappable(folio)) {
+ count_vm_event(THP_FILE_FALLBACK);
+ count_vm_event(THP_FILE_FALLBACK_CHARGE);
+ }
+ goto unacct;
+ }
+
+ error = shmem_add_to_page_cache(folio, mapping, hindex, NULL, gfp);
if (error)
goto unacct;
@@ -2644,8 +2632,10 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
if (unlikely(pgoff >= max_off))
goto out_release;
- ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
- gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm);
+ ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
+ if (ret)
+ goto out_release;
+ ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
if (ret)
goto out_release;
--
2.35.3
The vma is already there in vmf->vma, so no need for a separate arg.
Signed-off-by: Hugh Dickins <[email protected]>
---
mm/shmem.c | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 69595d341882..824eb55671d2 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1921,14 +1921,13 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache.
*
- * vma, vmf, and fault_type are only supplied by shmem_fault:
- * otherwise they are NULL.
+ * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
*/
static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
- struct vm_area_struct *vma, struct vm_fault *vmf,
- vm_fault_t *fault_type)
+ struct vm_fault *vmf, vm_fault_t *fault_type)
{
+ struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo;
@@ -2141,7 +2140,7 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
enum sgp_type sgp)
{
return shmem_get_folio_gfp(inode, index, foliop, sgp,
- mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
+ mapping_gfp_mask(inode->i_mapping), NULL, NULL);
}
/*
@@ -2225,7 +2224,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
}
err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
- gfp, vma, vmf, &ret);
+ gfp, vmf, &ret);
if (err)
return vmf_error(err);
if (folio)
@@ -4897,7 +4896,7 @@ struct folio *shmem_read_folio_gfp(struct address_space *mapping,
BUG_ON(!shmem_mapping(mapping));
error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
- gfp, NULL, NULL, NULL);
+ gfp, NULL, NULL);
if (error)
return ERR_PTR(error);
--
2.35.3
Shave 32 bytes off (the 64-bit) shmem_inode_info. There was a 4-byte
pahole after stop_eviction, better filled by fsflags. And the 24-byte
dir_offsets can only be used by directories, whereas shrinklist and
swaplist only by shmem_mapping() inodes (regular files or long symlinks):
so put those into a union. No change in mm/shmem.c is required for this.
Signed-off-by: Hugh Dickins <[email protected]>
---
include/linux/shmem_fs.h | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 6b0c626620f5..2caa6b86106a 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -23,18 +23,22 @@ struct shmem_inode_info {
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
unsigned long swapped; /* subtotal assigned to swap */
- pgoff_t fallocend; /* highest fallocate endindex */
- struct list_head shrinklist; /* shrinkable hpage inodes */
- struct list_head swaplist; /* chain of maybes on swap */
+ union {
+ struct offset_ctx dir_offsets; /* stable directory offsets */
+ struct {
+ struct list_head shrinklist; /* shrinkable hpage inodes */
+ struct list_head swaplist; /* chain of maybes on swap */
+ };
+ };
+ struct timespec64 i_crtime; /* file creation time */
struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
+ pgoff_t fallocend; /* highest fallocate endindex */
+ unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
atomic_t stop_eviction; /* hold when working on inode */
- struct timespec64 i_crtime; /* file creation time */
- unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
#ifdef CONFIG_TMPFS_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
#endif
- struct offset_ctx dir_offsets; /* stable entry offsets */
struct inode vfs_inode;
};
--
2.35.3
Mostly removing a few superfluous blank lines, joining short arglines,
imposing some 80-column observance, correcting a couple of comments.
None of it more interesting than deleting a repeated INIT_LIST_HEAD().
Signed-off-by: Hugh Dickins <[email protected]>
---
mm/shmem.c | 56 ++++++++++++++++++++----------------------------------
1 file changed, 21 insertions(+), 35 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 5501a5bc8d8c..caee8ba841f7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -756,7 +756,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * Like filemap_add_folio, but error if expected item has gone.
+ * Somewhat like filemap_add_folio, but error if expected item has gone.
*/
static int shmem_add_to_page_cache(struct folio *folio,
struct address_space *mapping,
@@ -825,7 +825,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
}
/*
- * Like delete_from_page_cache, but substitutes swap for @folio.
+ * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
*/
static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
{
@@ -887,7 +887,6 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
cond_resched_rcu();
}
}
-
rcu_read_unlock();
return swapped << PAGE_SHIFT;
@@ -1213,7 +1212,6 @@ static int shmem_setattr(struct mnt_idmap *idmap,
if (i_uid_needs_update(idmap, attr, inode) ||
i_gid_needs_update(idmap, attr, inode)) {
error = dquot_transfer(idmap, inode, attr);
-
if (error)
return error;
}
@@ -2456,7 +2454,6 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
if (err)
return ERR_PTR(err);
-
inode = new_inode(sb);
if (!inode) {
shmem_free_inode(sb, 0);
@@ -2481,11 +2478,10 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
shmem_set_inode_flags(inode, info->fsflags);
INIT_LIST_HEAD(&info->shrinklist);
INIT_LIST_HEAD(&info->swaplist);
- INIT_LIST_HEAD(&info->swaplist);
- if (sbinfo->noswap)
- mapping_set_unevictable(inode->i_mapping);
simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
+ if (sbinfo->noswap)
+ mapping_set_unevictable(inode->i_mapping);
mapping_set_large_folios(inode->i_mapping);
switch (mode & S_IFMT) {
@@ -2697,7 +2693,6 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
}
ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
-
if (ret)
return ret;
@@ -3229,8 +3224,7 @@ shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
error = simple_acl_create(dir, inode);
if (error)
goto out_iput;
- error = security_inode_init_security(inode, dir,
- &dentry->d_name,
+ error = security_inode_init_security(inode, dir, &dentry->d_name,
shmem_initxattrs, NULL);
if (error && error != -EOPNOTSUPP)
goto out_iput;
@@ -3259,14 +3253,11 @@ shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
int error;
inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
-
if (IS_ERR(inode)) {
error = PTR_ERR(inode);
goto err_out;
}
-
- error = security_inode_init_security(inode, dir,
- NULL,
+ error = security_inode_init_security(inode, dir, NULL,
shmem_initxattrs, NULL);
if (error && error != -EOPNOTSUPP)
goto out_iput;
@@ -3303,7 +3294,8 @@ static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
/*
* Link a file..
*/
-static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
+static int shmem_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
{
struct inode *inode = d_inode(old_dentry);
int ret = 0;
@@ -3334,7 +3326,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
inode_inc_iversion(dir);
inc_nlink(inode);
ihold(inode); /* New dentry reference */
- dget(dentry); /* Extra pinning count for the created dentry */
+ dget(dentry); /* Extra pinning count for the created dentry */
d_instantiate(dentry, inode);
out:
return ret;
@@ -3354,7 +3346,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
inode_set_ctime_current(inode));
inode_inc_iversion(dir);
drop_nlink(inode);
- dput(dentry); /* Undo the count from "create" - this does all the work */
+ dput(dentry); /* Undo the count from "create" - does all the work */
return 0;
}
@@ -3464,7 +3456,6 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
VM_NORESERVE);
-
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -3518,8 +3509,7 @@ static void shmem_put_link(void *arg)
folio_put(arg);
}
-static const char *shmem_get_link(struct dentry *dentry,
- struct inode *inode,
+static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
struct delayed_call *done)
{
struct folio *folio = NULL;
@@ -3593,8 +3583,7 @@ static int shmem_fileattr_set(struct mnt_idmap *idmap,
* Callback for security_inode_init_security() for acquiring xattrs.
*/
static int shmem_initxattrs(struct inode *inode,
- const struct xattr *xattr_array,
- void *fs_info)
+ const struct xattr *xattr_array, void *fs_info)
{
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
@@ -3778,7 +3767,6 @@ static struct dentry *shmem_find_alias(struct inode *inode)
return alias ?: d_find_any_alias(inode);
}
-
static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
@@ -4362,8 +4350,8 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
}
#endif /* CONFIG_TMPFS_QUOTA */
- inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0,
- VM_NORESERVE);
+ inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
+ S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
if (IS_ERR(inode)) {
error = PTR_ERR(inode);
goto failed;
@@ -4666,11 +4654,9 @@ static ssize_t shmem_enabled_show(struct kobject *kobj,
for (i = 0; i < ARRAY_SIZE(values); i++) {
len += sysfs_emit_at(buf, len,
- shmem_huge == values[i] ? "%s[%s]" : "%s%s",
- i ? " " : "",
- shmem_format_huge(values[i]));
+ shmem_huge == values[i] ? "%s[%s]" : "%s%s",
+ i ? " " : "", shmem_format_huge(values[i]));
}
-
len += sysfs_emit_at(buf, len, "\n");
return len;
@@ -4767,8 +4753,9 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
#define shmem_acct_size(flags, size) 0
#define shmem_unacct_size(flags, size) do {} while (0)
-static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir,
- umode_t mode, dev_t dev, unsigned long flags)
+static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
+ struct super_block *sb, struct inode *dir,
+ umode_t mode, dev_t dev, unsigned long flags)
{
struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
return inode ? inode : ERR_PTR(-ENOSPC);
@@ -4778,8 +4765,8 @@ static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct supe
/* common code */
-static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
- unsigned long flags, unsigned int i_flags)
+static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
+ loff_t size, unsigned long flags, unsigned int i_flags)
{
struct inode *inode;
struct file *res;
@@ -4798,7 +4785,6 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, l
inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
S_IFREG | S_IRWXUGO, 0, flags);
-
if (IS_ERR(inode)) {
shmem_unacct_size(flags, size);
return ERR_CAST(inode);
--
2.35.3
On Fri, Sep 29, 2023 at 08:25:38PM -0700, Hugh Dickins wrote:
> Shave 32 bytes off (the 64-bit) shmem_inode_info. There was a 4-byte
> pahole after stop_eviction, better filled by fsflags. And the 24-byte
> dir_offsets can only be used by directories, whereas shrinklist and
> swaplist only by shmem_mapping() inodes (regular files or long symlinks):
> so put those into a union. No change in mm/shmem.c is required for this.
>
> Signed-off-by: Hugh Dickins <[email protected]>
Reviewed-by: Chuck Lever <[email protected]>
> ---
> include/linux/shmem_fs.h | 16 ++++++++++------
> 1 file changed, 10 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
> index 6b0c626620f5..2caa6b86106a 100644
> --- a/include/linux/shmem_fs.h
> +++ b/include/linux/shmem_fs.h
> @@ -23,18 +23,22 @@ struct shmem_inode_info {
> unsigned long flags;
> unsigned long alloced; /* data pages alloced to file */
> unsigned long swapped; /* subtotal assigned to swap */
> - pgoff_t fallocend; /* highest fallocate endindex */
> - struct list_head shrinklist; /* shrinkable hpage inodes */
> - struct list_head swaplist; /* chain of maybes on swap */
> + union {
> + struct offset_ctx dir_offsets; /* stable directory offsets */
> + struct {
> + struct list_head shrinklist; /* shrinkable hpage inodes */
> + struct list_head swaplist; /* chain of maybes on swap */
> + };
> + };
> + struct timespec64 i_crtime; /* file creation time */
> struct shared_policy policy; /* NUMA memory alloc policy */
> struct simple_xattrs xattrs; /* list of xattrs */
> + pgoff_t fallocend; /* highest fallocate endindex */
> + unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
> atomic_t stop_eviction; /* hold when working on inode */
> - struct timespec64 i_crtime; /* file creation time */
> - unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
> #ifdef CONFIG_TMPFS_QUOTA
> struct dquot *i_dquot[MAXQUOTAS];
> #endif
> - struct offset_ctx dir_offsets; /* stable entry offsets */
> struct inode vfs_inode;
> };
>
> --
> 2.35.3
>
--
Chuck Lever
On Fri 29-09-23 20:25:38, Hugh Dickins wrote:
> Shave 32 bytes off (the 64-bit) shmem_inode_info. There was a 4-byte
> pahole after stop_eviction, better filled by fsflags. And the 24-byte
> dir_offsets can only be used by directories, whereas shrinklist and
> swaplist only by shmem_mapping() inodes (regular files or long symlinks):
> so put those into a union. No change in mm/shmem.c is required for this.
>
> Signed-off-by: Hugh Dickins <[email protected]>
Looks good to me. Feel free to add:
Reviewed-by: Jan Kara <[email protected]>
Honza
> ---
> include/linux/shmem_fs.h | 16 ++++++++++------
> 1 file changed, 10 insertions(+), 6 deletions(-)
>
> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
> index 6b0c626620f5..2caa6b86106a 100644
> --- a/include/linux/shmem_fs.h
> +++ b/include/linux/shmem_fs.h
> @@ -23,18 +23,22 @@ struct shmem_inode_info {
> unsigned long flags;
> unsigned long alloced; /* data pages alloced to file */
> unsigned long swapped; /* subtotal assigned to swap */
> - pgoff_t fallocend; /* highest fallocate endindex */
> - struct list_head shrinklist; /* shrinkable hpage inodes */
> - struct list_head swaplist; /* chain of maybes on swap */
> + union {
> + struct offset_ctx dir_offsets; /* stable directory offsets */
> + struct {
> + struct list_head shrinklist; /* shrinkable hpage inodes */
> + struct list_head swaplist; /* chain of maybes on swap */
> + };
> + };
> + struct timespec64 i_crtime; /* file creation time */
> struct shared_policy policy; /* NUMA memory alloc policy */
> struct simple_xattrs xattrs; /* list of xattrs */
> + pgoff_t fallocend; /* highest fallocate endindex */
> + unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
> atomic_t stop_eviction; /* hold when working on inode */
> - struct timespec64 i_crtime; /* file creation time */
> - unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
> #ifdef CONFIG_TMPFS_QUOTA
> struct dquot *i_dquot[MAXQUOTAS];
> #endif
> - struct offset_ctx dir_offsets; /* stable entry offsets */
> struct inode vfs_inode;
> };
>
> --
> 2.35.3
>
--
Jan Kara <[email protected]>
SUSE Labs, CR
On Fri 29-09-23 20:26:53, Hugh Dickins wrote:
> The vma is already there in vmf->vma, so no need for a separate arg.
>
> Signed-off-by: Hugh Dickins <[email protected]>
Sure. Feel free to add:
Reviewed-by: Jan Kara <[email protected]>
Honza
> ---
> mm/shmem.c | 13 ++++++-------
> 1 file changed, 6 insertions(+), 7 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 69595d341882..824eb55671d2 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1921,14 +1921,13 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
> * vm. If we swap it in we mark it dirty since we also free the swap
> * entry since a page cannot live in both the swap and page cache.
> *
> - * vma, vmf, and fault_type are only supplied by shmem_fault:
> - * otherwise they are NULL.
> + * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL.
> */
> static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
> struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
> - struct vm_area_struct *vma, struct vm_fault *vmf,
> - vm_fault_t *fault_type)
> + struct vm_fault *vmf, vm_fault_t *fault_type)
> {
> + struct vm_area_struct *vma = vmf ? vmf->vma : NULL;
> struct address_space *mapping = inode->i_mapping;
> struct shmem_inode_info *info = SHMEM_I(inode);
> struct shmem_sb_info *sbinfo;
> @@ -2141,7 +2140,7 @@ int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
> enum sgp_type sgp)
> {
> return shmem_get_folio_gfp(inode, index, foliop, sgp,
> - mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
> + mapping_gfp_mask(inode->i_mapping), NULL, NULL);
> }
>
> /*
> @@ -2225,7 +2224,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
> }
>
> err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
> - gfp, vma, vmf, &ret);
> + gfp, vmf, &ret);
> if (err)
> return vmf_error(err);
> if (folio)
> @@ -4897,7 +4896,7 @@ struct folio *shmem_read_folio_gfp(struct address_space *mapping,
>
> BUG_ON(!shmem_mapping(mapping));
> error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
> - gfp, NULL, NULL, NULL);
> + gfp, NULL, NULL);
> if (error)
> return ERR_PTR(error);
>
> --
> 2.35.3
>
--
Jan Kara <[email protected]>
SUSE Labs, CR
On Fri 29-09-23 20:28:50, Hugh Dickins wrote:
> Mostly removing a few superfluous blank lines, joining short arglines,
> imposing some 80-column observance, correcting a couple of comments.
> None of it more interesting than deleting a repeated INIT_LIST_HEAD().
>
> Signed-off-by: Hugh Dickins <[email protected]>
Autumn cleaning ;). Feel free to add:
Reviewed-by: Jan Kara <[email protected]>
Honza
> ---
> mm/shmem.c | 56 ++++++++++++++++++++----------------------------------
> 1 file changed, 21 insertions(+), 35 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 5501a5bc8d8c..caee8ba841f7 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -756,7 +756,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
> #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
>
> /*
> - * Like filemap_add_folio, but error if expected item has gone.
> + * Somewhat like filemap_add_folio, but error if expected item has gone.
> */
> static int shmem_add_to_page_cache(struct folio *folio,
> struct address_space *mapping,
> @@ -825,7 +825,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
> }
>
> /*
> - * Like delete_from_page_cache, but substitutes swap for @folio.
> + * Somewhat like filemap_remove_folio, but substitutes swap for @folio.
> */
> static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
> {
> @@ -887,7 +887,6 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping,
> cond_resched_rcu();
> }
> }
> -
> rcu_read_unlock();
>
> return swapped << PAGE_SHIFT;
> @@ -1213,7 +1212,6 @@ static int shmem_setattr(struct mnt_idmap *idmap,
> if (i_uid_needs_update(idmap, attr, inode) ||
> i_gid_needs_update(idmap, attr, inode)) {
> error = dquot_transfer(idmap, inode, attr);
> -
> if (error)
> return error;
> }
> @@ -2456,7 +2454,6 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
> if (err)
> return ERR_PTR(err);
>
> -
> inode = new_inode(sb);
> if (!inode) {
> shmem_free_inode(sb, 0);
> @@ -2481,11 +2478,10 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
> shmem_set_inode_flags(inode, info->fsflags);
> INIT_LIST_HEAD(&info->shrinklist);
> INIT_LIST_HEAD(&info->swaplist);
> - INIT_LIST_HEAD(&info->swaplist);
> - if (sbinfo->noswap)
> - mapping_set_unevictable(inode->i_mapping);
> simple_xattrs_init(&info->xattrs);
> cache_no_acl(inode);
> + if (sbinfo->noswap)
> + mapping_set_unevictable(inode->i_mapping);
> mapping_set_large_folios(inode->i_mapping);
>
> switch (mode & S_IFMT) {
> @@ -2697,7 +2693,6 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
> }
>
> ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
> -
> if (ret)
> return ret;
>
> @@ -3229,8 +3224,7 @@ shmem_mknod(struct mnt_idmap *idmap, struct inode *dir,
> error = simple_acl_create(dir, inode);
> if (error)
> goto out_iput;
> - error = security_inode_init_security(inode, dir,
> - &dentry->d_name,
> + error = security_inode_init_security(inode, dir, &dentry->d_name,
> shmem_initxattrs, NULL);
> if (error && error != -EOPNOTSUPP)
> goto out_iput;
> @@ -3259,14 +3253,11 @@ shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
> int error;
>
> inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE);
> -
> if (IS_ERR(inode)) {
> error = PTR_ERR(inode);
> goto err_out;
> }
> -
> - error = security_inode_init_security(inode, dir,
> - NULL,
> + error = security_inode_init_security(inode, dir, NULL,
> shmem_initxattrs, NULL);
> if (error && error != -EOPNOTSUPP)
> goto out_iput;
> @@ -3303,7 +3294,8 @@ static int shmem_create(struct mnt_idmap *idmap, struct inode *dir,
> /*
> * Link a file..
> */
> -static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
> +static int shmem_link(struct dentry *old_dentry, struct inode *dir,
> + struct dentry *dentry)
> {
> struct inode *inode = d_inode(old_dentry);
> int ret = 0;
> @@ -3334,7 +3326,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
> inode_inc_iversion(dir);
> inc_nlink(inode);
> ihold(inode); /* New dentry reference */
> - dget(dentry); /* Extra pinning count for the created dentry */
> + dget(dentry); /* Extra pinning count for the created dentry */
> d_instantiate(dentry, inode);
> out:
> return ret;
> @@ -3354,7 +3346,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
> inode_set_ctime_current(inode));
> inode_inc_iversion(dir);
> drop_nlink(inode);
> - dput(dentry); /* Undo the count from "create" - this does all the work */
> + dput(dentry); /* Undo the count from "create" - does all the work */
> return 0;
> }
>
> @@ -3464,7 +3456,6 @@ static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir,
>
> inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0,
> VM_NORESERVE);
> -
> if (IS_ERR(inode))
> return PTR_ERR(inode);
>
> @@ -3518,8 +3509,7 @@ static void shmem_put_link(void *arg)
> folio_put(arg);
> }
>
> -static const char *shmem_get_link(struct dentry *dentry,
> - struct inode *inode,
> +static const char *shmem_get_link(struct dentry *dentry, struct inode *inode,
> struct delayed_call *done)
> {
> struct folio *folio = NULL;
> @@ -3593,8 +3583,7 @@ static int shmem_fileattr_set(struct mnt_idmap *idmap,
> * Callback for security_inode_init_security() for acquiring xattrs.
> */
> static int shmem_initxattrs(struct inode *inode,
> - const struct xattr *xattr_array,
> - void *fs_info)
> + const struct xattr *xattr_array, void *fs_info)
> {
> struct shmem_inode_info *info = SHMEM_I(inode);
> struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
> @@ -3778,7 +3767,6 @@ static struct dentry *shmem_find_alias(struct inode *inode)
> return alias ?: d_find_any_alias(inode);
> }
>
> -
> static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
> struct fid *fid, int fh_len, int fh_type)
> {
> @@ -4362,8 +4350,8 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
> }
> #endif /* CONFIG_TMPFS_QUOTA */
>
> - inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, S_IFDIR | sbinfo->mode, 0,
> - VM_NORESERVE);
> + inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL,
> + S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
> if (IS_ERR(inode)) {
> error = PTR_ERR(inode);
> goto failed;
> @@ -4666,11 +4654,9 @@ static ssize_t shmem_enabled_show(struct kobject *kobj,
>
> for (i = 0; i < ARRAY_SIZE(values); i++) {
> len += sysfs_emit_at(buf, len,
> - shmem_huge == values[i] ? "%s[%s]" : "%s%s",
> - i ? " " : "",
> - shmem_format_huge(values[i]));
> + shmem_huge == values[i] ? "%s[%s]" : "%s%s",
> + i ? " " : "", shmem_format_huge(values[i]));
> }
> -
> len += sysfs_emit_at(buf, len, "\n");
>
> return len;
> @@ -4767,8 +4753,9 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
> #define shmem_acct_size(flags, size) 0
> #define shmem_unacct_size(flags, size) do {} while (0)
>
> -static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct super_block *sb, struct inode *dir,
> - umode_t mode, dev_t dev, unsigned long flags)
> +static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
> + struct super_block *sb, struct inode *dir,
> + umode_t mode, dev_t dev, unsigned long flags)
> {
> struct inode *inode = ramfs_get_inode(sb, dir, mode, dev);
> return inode ? inode : ERR_PTR(-ENOSPC);
> @@ -4778,8 +4765,8 @@ static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, struct supe
>
> /* common code */
>
> -static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size,
> - unsigned long flags, unsigned int i_flags)
> +static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name,
> + loff_t size, unsigned long flags, unsigned int i_flags)
> {
> struct inode *inode;
> struct file *res;
> @@ -4798,7 +4785,6 @@ static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, l
>
> inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL,
> S_IFREG | S_IRWXUGO, 0, flags);
> -
> if (IS_ERR(inode)) {
> shmem_unacct_size(flags, size);
> return ERR_CAST(inode);
> --
> 2.35.3
>
--
Jan Kara <[email protected]>
SUSE Labs, CR
On Fri 29-09-23 20:31:27, Hugh Dickins wrote:
> Extract shmem's memcg charging out of shmem_add_to_page_cache(): it's
> misleading done there, because many calls are dealing with a swapcache
> page, whose memcg is nowadays always remembered while swapped out, then
> the charge re-levied when it's brought back into swapcache.
>
> Temporarily move it back up to the shmem_get_folio_gfp() level, where
> the memcg was charged before v5.8; but the next commit goes on to move
> it back down to a new home.
>
> In making this change, it becomes clear that shmem_swapin_folio() does
> not need to know the vma, just the fault mm (if any): call it fault_mm
> rather than charge_mm - let mem_cgroup_charge() decide whom to charge.
>
> Signed-off-by: Hugh Dickins <[email protected]>
Looks good. Feel free to add:
Reviewed-by: Jan Kara <[email protected]>
Honza
> ---
> mm/shmem.c | 68 +++++++++++++++++++++++-------------------------------
> 1 file changed, 29 insertions(+), 39 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 63ba6037b23a..0a7f7b567b80 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -146,9 +146,8 @@ static unsigned long shmem_default_max_inodes(void)
> #endif
>
> static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
> - struct folio **foliop, enum sgp_type sgp,
> - gfp_t gfp, struct vm_area_struct *vma,
> - vm_fault_t *fault_type);
> + struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
> + struct mm_struct *fault_mm, vm_fault_t *fault_type);
>
> static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
> {
> @@ -760,12 +759,10 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
> */
> static int shmem_add_to_page_cache(struct folio *folio,
> struct address_space *mapping,
> - pgoff_t index, void *expected, gfp_t gfp,
> - struct mm_struct *charge_mm)
> + pgoff_t index, void *expected, gfp_t gfp)
> {
> XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));
> long nr = folio_nr_pages(folio);
> - int error;
>
> VM_BUG_ON_FOLIO(index != round_down(index, nr), folio);
> VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
> @@ -776,16 +773,7 @@ static int shmem_add_to_page_cache(struct folio *folio,
> folio->mapping = mapping;
> folio->index = index;
>
> - if (!folio_test_swapcache(folio)) {
> - error = mem_cgroup_charge(folio, charge_mm, gfp);
> - if (error) {
> - if (folio_test_pmd_mappable(folio)) {
> - count_vm_event(THP_FILE_FALLBACK);
> - count_vm_event(THP_FILE_FALLBACK_CHARGE);
> - }
> - goto error;
> - }
> - }
> + gfp &= GFP_RECLAIM_MASK;
> folio_throttle_swaprate(folio, gfp);
>
> do {
> @@ -813,15 +801,12 @@ static int shmem_add_to_page_cache(struct folio *folio,
> } while (xas_nomem(&xas, gfp));
>
> if (xas_error(&xas)) {
> - error = xas_error(&xas);
> - goto error;
> + folio->mapping = NULL;
> + folio_ref_sub(folio, nr);
> + return xas_error(&xas);
> }
>
> return 0;
> -error:
> - folio->mapping = NULL;
> - folio_ref_sub(folio, nr);
> - return error;
> }
>
> /*
> @@ -1324,10 +1309,8 @@ static int shmem_unuse_swap_entries(struct inode *inode,
>
> if (!xa_is_value(folio))
> continue;
> - error = shmem_swapin_folio(inode, indices[i],
> - &folio, SGP_CACHE,
> - mapping_gfp_mask(mapping),
> - NULL, NULL);
> + error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE,
> + mapping_gfp_mask(mapping), NULL, NULL);
> if (error == 0) {
> folio_unlock(folio);
> folio_put(folio);
> @@ -1810,12 +1793,11 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
> */
> static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
> struct folio **foliop, enum sgp_type sgp,
> - gfp_t gfp, struct vm_area_struct *vma,
> + gfp_t gfp, struct mm_struct *fault_mm,
> vm_fault_t *fault_type)
> {
> struct address_space *mapping = inode->i_mapping;
> struct shmem_inode_info *info = SHMEM_I(inode);
> - struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
> struct swap_info_struct *si;
> struct folio *folio = NULL;
> swp_entry_t swap;
> @@ -1843,7 +1825,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
> if (fault_type) {
> *fault_type |= VM_FAULT_MAJOR;
> count_vm_event(PGMAJFAULT);
> - count_memcg_event_mm(charge_mm, PGMAJFAULT);
> + count_memcg_event_mm(fault_mm, PGMAJFAULT);
> }
> /* Here we actually start the io */
> folio = shmem_swapin(swap, gfp, info, index);
> @@ -1880,8 +1862,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
> }
>
> error = shmem_add_to_page_cache(folio, mapping, index,
> - swp_to_radix_entry(swap), gfp,
> - charge_mm);
> + swp_to_radix_entry(swap), gfp);
> if (error)
> goto failed;
>
> @@ -1929,7 +1910,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
> struct address_space *mapping = inode->i_mapping;
> struct shmem_inode_info *info = SHMEM_I(inode);
> struct shmem_sb_info *sbinfo;
> - struct mm_struct *charge_mm;
> + struct mm_struct *fault_mm;
> struct folio *folio;
> pgoff_t hindex;
> gfp_t huge_gfp;
> @@ -1946,7 +1927,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
> }
>
> sbinfo = SHMEM_SB(inode->i_sb);
> - charge_mm = vma ? vma->vm_mm : NULL;
> + fault_mm = vma ? vma->vm_mm : NULL;
>
> folio = filemap_get_entry(mapping, index);
> if (folio && vma && userfaultfd_minor(vma)) {
> @@ -1958,7 +1939,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
>
> if (xa_is_value(folio)) {
> error = shmem_swapin_folio(inode, index, &folio,
> - sgp, gfp, vma, fault_type);
> + sgp, gfp, fault_mm, fault_type);
> if (error == -EEXIST)
> goto repeat;
>
> @@ -2044,9 +2025,16 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
> if (sgp == SGP_WRITE)
> __folio_set_referenced(folio);
>
> - error = shmem_add_to_page_cache(folio, mapping, hindex,
> - NULL, gfp & GFP_RECLAIM_MASK,
> - charge_mm);
> + error = mem_cgroup_charge(folio, fault_mm, gfp);
> + if (error) {
> + if (folio_test_pmd_mappable(folio)) {
> + count_vm_event(THP_FILE_FALLBACK);
> + count_vm_event(THP_FILE_FALLBACK_CHARGE);
> + }
> + goto unacct;
> + }
> +
> + error = shmem_add_to_page_cache(folio, mapping, hindex, NULL, gfp);
> if (error)
> goto unacct;
>
> @@ -2644,8 +2632,10 @@ int shmem_mfill_atomic_pte(pmd_t *dst_pmd,
> if (unlikely(pgoff >= max_off))
> goto out_release;
>
> - ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
> - gfp & GFP_RECLAIM_MASK, dst_vma->vm_mm);
> + ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp);
> + if (ret)
> + goto out_release;
> + ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp);
> if (ret)
> goto out_release;
>
> --
> 2.35.3
>
--
Jan Kara <[email protected]>
SUSE Labs, CR