2020-09-14 09:07:13

by Chao Yu

[permalink] [raw]
Subject: [PATCH v2 1/2] f2fs: compress: introduce page array slab cache

Add a per-sbi slab cache "f2fs_page_array_entry-%u:%u" for memory
allocation of page pointer array in compress context.

Signed-off-by: Chao Yu <[email protected]>
---
v2:
- add missing CONFIG_F2FS_FS_COMPRESSION
fs/f2fs/compress.c | 86 +++++++++++++++++++++++++++++++++-------------
fs/f2fs/f2fs.h | 9 +++++
fs/f2fs/super.c | 8 ++++-
3 files changed, 79 insertions(+), 24 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 7895186cc765..c6fcd68df71a 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -17,6 +17,32 @@
#include "node.h"
#include <trace/events/f2fs.h>

+static void *page_array_alloc(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int size = sizeof(struct page *) <<
+ F2FS_I(inode)->i_log_cluster_size;
+
+ if (likely(size == sbi->page_array_slab_size))
+ return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
+ return f2fs_kzalloc(sbi, size, GFP_NOFS);
+}
+
+static void page_array_free(struct inode *inode, void *pages)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned int size = sizeof(struct page *) <<
+ F2FS_I(inode)->i_log_cluster_size;
+
+ if (!pages)
+ return;
+
+ if (likely(size == sbi->page_array_slab_size))
+ kmem_cache_free(sbi->page_array_slab, pages);
+ else
+ kfree(pages);
+}
+
struct f2fs_compress_ops {
int (*init_compress_ctx)(struct compress_ctx *cc);
void (*destroy_compress_ctx)(struct compress_ctx *cc);
@@ -130,19 +156,16 @@ struct page *f2fs_compress_control_page(struct page *page)

int f2fs_init_compress_ctx(struct compress_ctx *cc)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
-
if (cc->nr_rpages)
return 0;

- cc->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
- cc->log_cluster_size, GFP_NOFS);
+ cc->rpages = page_array_alloc(cc->inode);
return cc->rpages ? 0 : -ENOMEM;
}

void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
{
- kfree(cc->rpages);
+ page_array_free(cc->inode, cc->rpages);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
@@ -573,7 +596,6 @@ static void *f2fs_vmap(struct page **pages, unsigned int count)

static int f2fs_compress_pages(struct compress_ctx *cc)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
@@ -592,8 +614,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);

- cc->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
- cc->nr_cpages, GFP_NOFS);
+ cc->cpages = page_array_alloc(cc->inode);
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
@@ -667,7 +688,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
- kfree(cc->cpages);
+ page_array_free(cc->inode, cc->cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@@ -706,8 +727,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}

- dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
- dic->cluster_size, GFP_NOFS);
+ dic->tpages = page_array_alloc(dic->inode);
if (!dic->tpages) {
ret = -ENOMEM;
goto out_free_dic;
@@ -1046,6 +1066,7 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,

{
struct compress_ctx cc = {
+ .inode = inode,
.log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
.cluster_size = F2FS_I(inode)->i_cluster_size,
.rpages = fsdata,
@@ -1179,8 +1200,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->nr_cpages);
- cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
- cc->log_cluster_size, GFP_NOFS);
+ cic->rpages = page_array_alloc(cc->inode);
if (!cic->rpages)
goto out_put_cic;

@@ -1278,7 +1298,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
return 0;

out_destroy_crypt:
- kfree(cic->rpages);
+ page_array_free(cc->inode, cic->rpages);

for (--i; i >= 0; i--)
fscrypt_finalize_bounce_page(&cc->cpages[i]);
@@ -1322,7 +1342,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}

- kfree(cic->rpages);
+ page_array_free(cic->inode, cic->rpages);
kfree(cic);
}

@@ -1419,7 +1439,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,

err = f2fs_write_compressed_pages(cc, submitted,
wbc, io_type);
- kfree(cc->cpages);
+ page_array_free(cc->inode, cc->cpages);
cc->cpages = NULL;
if (!err)
return 0;
@@ -1446,8 +1466,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
if (!dic)
return ERR_PTR(-ENOMEM);

- dic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
- cc->log_cluster_size, GFP_NOFS);
+ dic->rpages = page_array_alloc(cc->inode);
if (!dic->rpages) {
kfree(dic);
return ERR_PTR(-ENOMEM);
@@ -1466,8 +1485,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;

- dic->cpages = f2fs_kzalloc(sbi, sizeof(struct page *) *
- dic->nr_cpages, GFP_NOFS);
+ dic->cpages = page_array_alloc(dic->inode);
if (!dic->cpages)
goto out_free;

@@ -1502,7 +1520,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
- kfree(dic->tpages);
+ page_array_free(dic->inode, dic->tpages);
}

if (dic->cpages) {
@@ -1511,10 +1529,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
- kfree(dic->cpages);
+ page_array_free(dic->inode, dic->cpages);
}

- kfree(dic->rpages);
+ page_array_free(dic->inode, dic->rpages);
kfree(dic);
}

@@ -1543,3 +1561,25 @@ void f2fs_decompress_end_io(struct page **rpages,
unlock_page(rpage);
}
}
+
+int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
+{
+ dev_t dev = sbi->sb->s_bdev->bd_dev;
+ char slab_name[32];
+
+ sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
+
+ sbi->page_array_slab_size = sizeof(struct page *) <<
+ F2FS_OPTION(sbi).compress_log_size;
+
+ sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
+ sbi->page_array_slab_size);
+ if (!sbi->page_array_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
+{
+ kmem_cache_destroy(sbi->page_array_slab);
+}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 8efa19baf33d..50953b442220 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1626,6 +1626,11 @@ struct f2fs_sb_info {

struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct kmem_cache *page_array_slab; /* page array entry */
+ unsigned int page_array_slab_size; /* default page array slab size */
+#endif
};

struct f2fs_private_dio {
@@ -3933,6 +3938,8 @@ void f2fs_decompress_end_io(struct page **rpages,
int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
+int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
+void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
#else
static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
@@ -3949,6 +3956,8 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
}
static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
+static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
+static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
#endif

static inline void set_compress_context(struct inode *inode)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 844ea837ebd7..d7336914d2b3 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1277,6 +1277,7 @@ static void f2fs_put_super(struct super_block *sb)
kfree(sbi->raw_super);

destroy_device_list(sbi);
+ f2fs_destroy_page_array_cache(sbi);
f2fs_destroy_xattr_caches(sbi);
mempool_destroy(sbi->write_io_dummy);
#ifdef CONFIG_QUOTA
@@ -3613,13 +3614,16 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
err = f2fs_init_xattr_caches(sbi);
if (err)
goto free_io_dummy;
+ err = f2fs_init_page_array_cache(sbi);
+ if (err)
+ goto free_xattr_cache;

/* get an inode for meta space */
sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
if (IS_ERR(sbi->meta_inode)) {
f2fs_err(sbi, "Failed to read F2FS meta data inode");
err = PTR_ERR(sbi->meta_inode);
- goto free_xattr_cache;
+ goto free_page_array_cache;
}

err = f2fs_get_valid_checkpoint(sbi);
@@ -3895,6 +3899,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
make_bad_inode(sbi->meta_inode);
iput(sbi->meta_inode);
sbi->meta_inode = NULL;
+free_page_array_cache:
+ f2fs_destroy_page_array_cache(sbi);
free_xattr_cache:
f2fs_destroy_xattr_caches(sbi);
free_io_dummy:
--
2.26.2


2020-09-14 09:09:14

by Chao Yu

[permalink] [raw]
Subject: [PATCH v2 2/2] f2fs: compress: introduce cic/dic slab cache

Add two slab caches: "f2fs_cic_entry" and "f2fs_dic_entry" for memory
allocation of compress_io_ctx and decompress_io_ctx structure.

Signed-off-by: Chao Yu <[email protected]>
---
v2:
- fix -Wreturn-type warning
fs/f2fs/compress.c | 67 +++++++++++++++++++++++++++++++++++++++++-----
fs/f2fs/f2fs.h | 4 +++
fs/f2fs/super.c | 6 +++++
3 files changed, 70 insertions(+), 7 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index c6fcd68df71a..10a9f39b9d6a 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -17,6 +17,9 @@
#include "node.h"
#include <trace/events/f2fs.h>

+static struct kmem_cache *cic_entry_slab;
+static struct kmem_cache *dic_entry_slab;
+
static void *page_array_alloc(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -1193,7 +1196,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,

fio.version = ni.version;

- cic = f2fs_kzalloc(sbi, sizeof(struct compress_io_ctx), GFP_NOFS);
+ cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
if (!cic)
goto out_put_dnode;

@@ -1308,7 +1311,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
f2fs_put_page(cc->cpages[i], 1);
}
out_put_cic:
- kfree(cic);
+ kmem_cache_free(cic_entry_slab, cic);
out_put_dnode:
f2fs_put_dnode(&dn);
out_unlock_op:
@@ -1343,7 +1346,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
}

page_array_free(cic->inode, cic->rpages);
- kfree(cic);
+ kmem_cache_free(cic_entry_slab, cic);
}

static int f2fs_write_raw_pages(struct compress_ctx *cc,
@@ -1457,18 +1460,17 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,

struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
struct decompress_io_ctx *dic;
pgoff_t start_idx = start_idx_of_cluster(cc);
int i;

- dic = f2fs_kzalloc(sbi, sizeof(struct decompress_io_ctx), GFP_NOFS);
+ dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
if (!dic)
return ERR_PTR(-ENOMEM);

dic->rpages = page_array_alloc(cc->inode);
if (!dic->rpages) {
- kfree(dic);
+ kmem_cache_free(dic_entry_slab, dic);
return ERR_PTR(-ENOMEM);
}

@@ -1533,7 +1535,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
}

page_array_free(dic->inode, dic->rpages);
- kfree(dic);
+ kmem_cache_free(dic_entry_slab, dic);
}

void f2fs_decompress_end_io(struct page **rpages,
@@ -1583,3 +1585,54 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
{
kmem_cache_destroy(sbi->page_array_slab);
}
+
+static int __init f2fs_init_cic_cache(void)
+{
+ cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
+ sizeof(struct compress_io_ctx));
+ if (!cic_entry_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+static void f2fs_destroy_cic_cache(void)
+{
+ kmem_cache_destroy(cic_entry_slab);
+}
+
+static int __init f2fs_init_dic_cache(void)
+{
+ dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
+ sizeof(struct decompress_io_ctx));
+ if (!dic_entry_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+static void f2fs_destroy_dic_cache(void)
+{
+ kmem_cache_destroy(dic_entry_slab);
+}
+
+int __init f2fs_init_compress_cache(void)
+{
+ int err;
+
+ err = f2fs_init_cic_cache();
+ if (err)
+ goto out;
+ err = f2fs_init_dic_cache();
+ if (err)
+ goto free_cic;
+ return 0;
+free_cic:
+ f2fs_destroy_cic_cache();
+out:
+ return -ENOMEM;
+}
+
+void f2fs_destroy_compress_cache(void)
+{
+ f2fs_destroy_dic_cache();
+ f2fs_destroy_cic_cache();
+}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 50953b442220..ca3f3ce4b2e3 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3940,6 +3940,8 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
+int __init f2fs_init_compress_cache(void);
+void f2fs_destroy_compress_cache(void);
#else
static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
@@ -3958,6 +3960,8 @@ static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { }
static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
+static inline int __init f2fs_init_compress_cache(void) { return 0; }
+static inline void f2fs_destroy_compress_cache(void) { }
#endif

static inline void set_compress_context(struct inode *inode)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index d7336914d2b3..427ce4cbd124 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -4047,7 +4047,12 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_compress_mempool();
if (err)
goto free_bioset;
+ err = f2fs_init_compress_cache();
+ if (err)
+ goto free_compress_mempool;
return 0;
+free_compress_mempool:
+ f2fs_destroy_compress_mempool();
free_bioset:
f2fs_destroy_bioset();
free_bio_enrty_cache:
@@ -4079,6 +4084,7 @@ static int __init init_f2fs_fs(void)

static void __exit exit_f2fs_fs(void)
{
+ f2fs_destroy_compress_cache();
f2fs_destroy_compress_mempool();
f2fs_destroy_bioset();
f2fs_destroy_bio_entry_cache();
--
2.26.2

2020-09-29 08:24:28

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] f2fs: compress: introduce page array slab cache

I found a bug related to the number of page pointer allocation related to
nr_cpages.

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index f086ac43ca825..3a18666725fef 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -20,22 +20,20 @@
static struct kmem_cache *cic_entry_slab;
static struct kmem_cache *dic_entry_slab;

-static void *page_array_alloc(struct inode *inode)
+static void *page_array_alloc(struct inode *inode, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int size = sizeof(struct page *) <<
- F2FS_I(inode)->i_log_cluster_size;
+ unsigned int size = sizeof(struct page *) * nr;

if (likely(size == sbi->page_array_slab_size))
return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
return f2fs_kzalloc(sbi, size, GFP_NOFS);
}

-static void page_array_free(struct inode *inode, void *pages)
+static void page_array_free(struct inode *inode, void *pages, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int size = sizeof(struct page *) <<
- F2FS_I(inode)->i_log_cluster_size;
+ unsigned int size = sizeof(struct page *) * nr;

if (!pages)
return;
@@ -162,13 +160,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
if (cc->rpages)
return 0;

- cc->rpages = page_array_alloc(cc->inode);
+ cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
return cc->rpages ? 0 : -ENOMEM;
}

void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
{
- page_array_free(cc->inode, cc->rpages);
+ page_array_free(cc->inode, cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
@@ -602,7 +600,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
- unsigned int max_len, nr_cpages;
+ unsigned int max_len, new_nr_cpages;
+ struct page **new_cpages;
int i, ret;

trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
@@ -617,7 +616,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);

- cc->cpages = page_array_alloc(cc->inode);
+ cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
@@ -659,16 +658,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
cc->cbuf->reserved[i] = cpu_to_le32(0);

- nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+ new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+
+ /* Now we're going to cut unnecessary tail pages */
+ new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
+ if (new_cpages) {
+ ret = -ENOMEM;
+ goto out_vunmap_cbuf;
+ }

/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
- (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
+ (new_nr_cpages * PAGE_SIZE) -
+ (cc->clen + COMPRESS_HEADER_SIZE));

vm_unmap_ram(cc->cbuf, cc->nr_cpages);
vm_unmap_ram(cc->rbuf, cc->cluster_size);

- for (i = nr_cpages; i < cc->nr_cpages; i++) {
+ for (i = 0; i < cc->nr_cpages; i++) {
+ if (i < new_nr_cpages) {
+ new_cpages[i] = cc->cpages[i];
+ continue;
+ }
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@@ -676,7 +687,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);

- cc->nr_cpages = nr_cpages;
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = new_cpages;
+ cc->nr_cpages = new_nr_cpages;

trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
@@ -691,7 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
- page_array_free(cc->inode, cc->cpages);
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@@ -730,7 +743,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}

- dic->tpages = page_array_alloc(dic->inode);
+ dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages) {
ret = -ENOMEM;
goto out_free_dic;
@@ -1203,7 +1216,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->nr_cpages);
- cic->rpages = page_array_alloc(cc->inode);
+ cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;

@@ -1301,7 +1314,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
return 0;

out_destroy_crypt:
- page_array_free(cc->inode, cic->rpages);
+ page_array_free(cc->inode, cic->rpages, cc->cluster_size);

for (--i; i >= 0; i--)
fscrypt_finalize_bounce_page(&cc->cpages[i]);
@@ -1345,7 +1358,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}

- page_array_free(cic->inode, cic->rpages);
+ page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
kmem_cache_free(cic_entry_slab, cic);
}

@@ -1442,7 +1455,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,

err = f2fs_write_compressed_pages(cc, submitted,
wbc, io_type);
- page_array_free(cc->inode, cc->cpages);
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
if (!err)
return 0;
@@ -1468,7 +1481,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
if (!dic)
return ERR_PTR(-ENOMEM);

- dic->rpages = page_array_alloc(cc->inode);
+ dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!dic->rpages) {
kmem_cache_free(dic_entry_slab, dic);
return ERR_PTR(-ENOMEM);
@@ -1487,7 +1500,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;

- dic->cpages = page_array_alloc(dic->inode);
+ dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
if (!dic->cpages)
goto out_free;

@@ -1522,7 +1535,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
- page_array_free(dic->inode, dic->tpages);
+ page_array_free(dic->inode, dic->tpages, dic->cluster_size);
}

if (dic->cpages) {
@@ -1531,10 +1544,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
- page_array_free(dic->inode, dic->cpages);
+ page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
}

- page_array_free(dic->inode, dic->rpages);
+ page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
kmem_cache_free(dic_entry_slab, dic);
}

--
2.28.0.709.gb0816b6eb0-goog

2020-09-29 08:46:58

by Chao Yu

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] f2fs: compress: introduce page array slab cache

On 2020/9/29 16:23, Jaegeuk Kim wrote:
> I found a bug related to the number of page pointer allocation related to
> nr_cpages.

Jaegeuk,

If I didn't miss anything, you mean that nr_cpages could be larger
than nr_rpages, right? the problematic case here is lzo/lzo-rle:

cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);

As we can't limited clen as we did for lz4/zstd:

cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;

>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index f086ac43ca825..3a18666725fef 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -20,22 +20,20 @@
> static struct kmem_cache *cic_entry_slab;
> static struct kmem_cache *dic_entry_slab;
>
> -static void *page_array_alloc(struct inode *inode)
> +static void *page_array_alloc(struct inode *inode, int nr)
> {
> struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> - unsigned int size = sizeof(struct page *) <<
> - F2FS_I(inode)->i_log_cluster_size;
> + unsigned int size = sizeof(struct page *) * nr;
>
> if (likely(size == sbi->page_array_slab_size))
> return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
> return f2fs_kzalloc(sbi, size, GFP_NOFS);
> }
>
> -static void page_array_free(struct inode *inode, void *pages)
> +static void page_array_free(struct inode *inode, void *pages, int nr)
> {
> struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> - unsigned int size = sizeof(struct page *) <<
> - F2FS_I(inode)->i_log_cluster_size;
> + unsigned int size = sizeof(struct page *) * nr;
>
> if (!pages)
> return;
> @@ -162,13 +160,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
> if (cc->rpages)
> return 0;
>
> - cc->rpages = page_array_alloc(cc->inode);
> + cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> return cc->rpages ? 0 : -ENOMEM;
> }
>
> void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
> {
> - page_array_free(cc->inode, cc->rpages);
> + page_array_free(cc->inode, cc->rpages, cc->cluster_size);
> cc->rpages = NULL;
> cc->nr_rpages = 0;
> cc->nr_cpages = 0;
> @@ -602,7 +600,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> struct f2fs_inode_info *fi = F2FS_I(cc->inode);
> const struct f2fs_compress_ops *cops =
> f2fs_cops[fi->i_compress_algorithm];
> - unsigned int max_len, nr_cpages;
> + unsigned int max_len, new_nr_cpages;
> + struct page **new_cpages;
> int i, ret;
>
> trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
> @@ -617,7 +616,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> max_len = COMPRESS_HEADER_SIZE + cc->clen;
> cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
>
> - cc->cpages = page_array_alloc(cc->inode);
> + cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
> if (!cc->cpages) {
> ret = -ENOMEM;
> goto destroy_compress_ctx;
> @@ -659,16 +658,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
> cc->cbuf->reserved[i] = cpu_to_le32(0);
>
> - nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
> + new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
> +
> + /* Now we're going to cut unnecessary tail pages */
> + new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
> + if (new_cpages) {
> + ret = -ENOMEM;
> + goto out_vunmap_cbuf;
> + }
>
> /* zero out any unused part of the last page */
> memset(&cc->cbuf->cdata[cc->clen], 0,
> - (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
> + (new_nr_cpages * PAGE_SIZE) -
> + (cc->clen + COMPRESS_HEADER_SIZE));
>
> vm_unmap_ram(cc->cbuf, cc->nr_cpages);
> vm_unmap_ram(cc->rbuf, cc->cluster_size);
>
> - for (i = nr_cpages; i < cc->nr_cpages; i++) {
> + for (i = 0; i < cc->nr_cpages; i++) {
> + if (i < new_nr_cpages) {
> + new_cpages[i] = cc->cpages[i];
> + continue;
> + }
> f2fs_compress_free_page(cc->cpages[i]);
> cc->cpages[i] = NULL;
> }
> @@ -676,7 +687,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> if (cops->destroy_compress_ctx)
> cops->destroy_compress_ctx(cc);
>
> - cc->nr_cpages = nr_cpages;
> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> + cc->cpages = new_cpages;
> + cc->nr_cpages = new_nr_cpages;
>
> trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
> cc->clen, ret);
> @@ -691,7 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> if (cc->cpages[i])
> f2fs_compress_free_page(cc->cpages[i]);
> }
> - page_array_free(cc->inode, cc->cpages);
> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> cc->cpages = NULL;
> destroy_compress_ctx:
> if (cops->destroy_compress_ctx)
> @@ -730,7 +743,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> goto out_free_dic;
> }
>
> - dic->tpages = page_array_alloc(dic->inode);
> + dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
> if (!dic->tpages) {
> ret = -ENOMEM;
> goto out_free_dic;
> @@ -1203,7 +1216,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
> cic->inode = inode;
> atomic_set(&cic->pending_pages, cc->nr_cpages);
> - cic->rpages = page_array_alloc(cc->inode);
> + cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> if (!cic->rpages)
> goto out_put_cic;
>
> @@ -1301,7 +1314,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> return 0;
>
> out_destroy_crypt:
> - page_array_free(cc->inode, cic->rpages);
> + page_array_free(cc->inode, cic->rpages, cc->cluster_size);
>
> for (--i; i >= 0; i--)
> fscrypt_finalize_bounce_page(&cc->cpages[i]);
> @@ -1345,7 +1358,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
> end_page_writeback(cic->rpages[i]);
> }
>
> - page_array_free(cic->inode, cic->rpages);
> + page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
> kmem_cache_free(cic_entry_slab, cic);
> }
>
> @@ -1442,7 +1455,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
>
> err = f2fs_write_compressed_pages(cc, submitted,
> wbc, io_type);
> - page_array_free(cc->inode, cc->cpages);
> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> cc->cpages = NULL;
> if (!err)
> return 0;
> @@ -1468,7 +1481,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
> if (!dic)
> return ERR_PTR(-ENOMEM);
>
> - dic->rpages = page_array_alloc(cc->inode);
> + dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> if (!dic->rpages) {
> kmem_cache_free(dic_entry_slab, dic);
> return ERR_PTR(-ENOMEM);
> @@ -1487,7 +1500,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
> dic->rpages[i] = cc->rpages[i];
> dic->nr_rpages = cc->cluster_size;
>
> - dic->cpages = page_array_alloc(dic->inode);
> + dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
> if (!dic->cpages)
> goto out_free;
>
> @@ -1522,7 +1535,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
> continue;
> f2fs_compress_free_page(dic->tpages[i]);
> }
> - page_array_free(dic->inode, dic->tpages);
> + page_array_free(dic->inode, dic->tpages, dic->cluster_size);
> }
>
> if (dic->cpages) {
> @@ -1531,10 +1544,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
> continue;
> f2fs_compress_free_page(dic->cpages[i]);
> }
> - page_array_free(dic->inode, dic->cpages);
> + page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
> }
>
> - page_array_free(dic->inode, dic->rpages);
> + page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
> kmem_cache_free(dic_entry_slab, dic);
> }
>
>

2020-09-29 08:51:10

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] f2fs: compress: introduce page array slab cache

On 09/29, Chao Yu wrote:
> On 2020/9/29 16:23, Jaegeuk Kim wrote:
> > I found a bug related to the number of page pointer allocation related to
> > nr_cpages.
>
> Jaegeuk,
>
> If I didn't miss anything, you mean that nr_cpages could be larger
> than nr_rpages, right? the problematic case here is lzo/lzo-rle:
>
> cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
>
> As we can't limited clen as we did for lz4/zstd:
>
> cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;

Yes, I've seen some memory corruption in lzo test. Here is another patch to fix
mem leak.

Signed-off-by: Jaegeuk Kim <[email protected]>
---
fs/f2fs/compress.c | 67 ++++++++++++++++++++++++++++------------------
1 file changed, 41 insertions(+), 26 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index f086ac43ca825..ba2d4897744d8 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -20,22 +20,20 @@
static struct kmem_cache *cic_entry_slab;
static struct kmem_cache *dic_entry_slab;

-static void *page_array_alloc(struct inode *inode)
+static void *page_array_alloc(struct inode *inode, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int size = sizeof(struct page *) <<
- F2FS_I(inode)->i_log_cluster_size;
+ unsigned int size = sizeof(struct page *) * nr;

if (likely(size == sbi->page_array_slab_size))
return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
return f2fs_kzalloc(sbi, size, GFP_NOFS);
}

-static void page_array_free(struct inode *inode, void *pages)
+static void page_array_free(struct inode *inode, void *pages, int nr)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- unsigned int size = sizeof(struct page *) <<
- F2FS_I(inode)->i_log_cluster_size;
+ unsigned int size = sizeof(struct page *) * nr;

if (!pages)
return;
@@ -162,13 +160,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
if (cc->rpages)
return 0;

- cc->rpages = page_array_alloc(cc->inode);
+ cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
return cc->rpages ? 0 : -ENOMEM;
}

void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
{
- page_array_free(cc->inode, cc->rpages);
+ page_array_free(cc->inode, cc->rpages, cc->cluster_size);
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
@@ -602,7 +600,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
struct f2fs_inode_info *fi = F2FS_I(cc->inode);
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
- unsigned int max_len, nr_cpages;
+ unsigned int max_len, new_nr_cpages;
+ struct page **new_cpages;
int i, ret;

trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
@@ -617,7 +616,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);

- cc->cpages = page_array_alloc(cc->inode);
+ cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
if (!cc->cpages) {
ret = -ENOMEM;
goto destroy_compress_ctx;
@@ -659,16 +658,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
cc->cbuf->reserved[i] = cpu_to_le32(0);

- nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+ new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
+
+ /* Now we're going to cut unnecessary tail pages */
+ new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
+ if (new_cpages) {
+ ret = -ENOMEM;
+ goto out_vunmap_cbuf;
+ }

/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
- (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
+ (new_nr_cpages * PAGE_SIZE) -
+ (cc->clen + COMPRESS_HEADER_SIZE));

vm_unmap_ram(cc->cbuf, cc->nr_cpages);
vm_unmap_ram(cc->rbuf, cc->cluster_size);

- for (i = nr_cpages; i < cc->nr_cpages; i++) {
+ for (i = 0; i < cc->nr_cpages; i++) {
+ if (i < new_nr_cpages) {
+ new_cpages[i] = cc->cpages[i];
+ continue;
+ }
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@@ -676,7 +687,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);

- cc->nr_cpages = nr_cpages;
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = new_cpages;
+ cc->nr_cpages = new_nr_cpages;

trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
@@ -691,7 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cc->cpages[i])
f2fs_compress_free_page(cc->cpages[i]);
}
- page_array_free(cc->inode, cc->cpages);
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
cc->cpages = NULL;
destroy_compress_ctx:
if (cops->destroy_compress_ctx)
@@ -730,7 +743,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}

- dic->tpages = page_array_alloc(dic->inode);
+ dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages) {
ret = -ENOMEM;
goto out_free_dic;
@@ -1203,7 +1216,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
atomic_set(&cic->pending_pages, cc->nr_cpages);
- cic->rpages = page_array_alloc(cc->inode);
+ cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;

@@ -1297,11 +1310,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
spin_unlock(&fi->i_size_lock);

f2fs_put_rpages(cc);
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = NULL;
f2fs_destroy_compress_ctx(cc);
return 0;

out_destroy_crypt:
- page_array_free(cc->inode, cic->rpages);
+ page_array_free(cc->inode, cic->rpages, cc->cluster_size);

for (--i; i >= 0; i--)
fscrypt_finalize_bounce_page(&cc->cpages[i]);
@@ -1310,6 +1325,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
continue;
f2fs_put_page(cc->cpages[i], 1);
}
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = NULL;
out_put_cic:
kmem_cache_free(cic_entry_slab, cic);
out_put_dnode:
@@ -1345,7 +1362,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
end_page_writeback(cic->rpages[i]);
}

- page_array_free(cic->inode, cic->rpages);
+ page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
kmem_cache_free(cic_entry_slab, cic);
}

@@ -1442,8 +1459,6 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,

err = f2fs_write_compressed_pages(cc, submitted,
wbc, io_type);
- page_array_free(cc->inode, cc->cpages);
- cc->cpages = NULL;
if (!err)
return 0;
f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
@@ -1468,7 +1483,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
if (!dic)
return ERR_PTR(-ENOMEM);

- dic->rpages = page_array_alloc(cc->inode);
+ dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!dic->rpages) {
kmem_cache_free(dic_entry_slab, dic);
return ERR_PTR(-ENOMEM);
@@ -1487,7 +1502,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->rpages[i] = cc->rpages[i];
dic->nr_rpages = cc->cluster_size;

- dic->cpages = page_array_alloc(dic->inode);
+ dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
if (!dic->cpages)
goto out_free;

@@ -1522,7 +1537,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->tpages[i]);
}
- page_array_free(dic->inode, dic->tpages);
+ page_array_free(dic->inode, dic->tpages, dic->cluster_size);
}

if (dic->cpages) {
@@ -1531,10 +1546,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
f2fs_compress_free_page(dic->cpages[i]);
}
- page_array_free(dic->inode, dic->cpages);
+ page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
}

- page_array_free(dic->inode, dic->rpages);
+ page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
kmem_cache_free(dic_entry_slab, dic);
}

--
2.28.0.709.gb0816b6eb0-goog

2020-09-29 09:15:46

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [f2fs-dev] [PATCH v2 1/2] f2fs: compress: introduce page array slab cache

On 09/29, Jaegeuk Kim wrote:
> On 09/29, Chao Yu wrote:
> > On 2020/9/29 16:23, Jaegeuk Kim wrote:
> > > I found a bug related to the number of page pointer allocation related to
> > > nr_cpages.
> >
> > Jaegeuk,
> >
> > If I didn't miss anything, you mean that nr_cpages could be larger
> > than nr_rpages, right? the problematic case here is lzo/lzo-rle:
> >
> > cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
> >
> > As we can't limited clen as we did for lz4/zstd:
> >
> > cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
>
> Yes, I've seen some memory corruption in lzo test. Here is another patch to fix
> mem leak.
>
One more fix:

---
fs/f2fs/compress.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index ba2d4897744d8..b9557865d627b 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -662,7 +662,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)

/* Now we're going to cut unnecessary tail pages */
new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
- if (new_cpages) {
+ if (!new_cpages) {
ret = -ENOMEM;
goto out_vunmap_cbuf;
}
@@ -1186,7 +1186,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
*/
down_read(&sbi->node_write);
} else if (!f2fs_trylock_op(sbi)) {
- return -EAGAIN;
+ goto out_free;
}

set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
@@ -1325,8 +1325,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
continue;
f2fs_put_page(cc->cpages[i], 1);
}
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
- cc->cpages = NULL;
out_put_cic:
kmem_cache_free(cic_entry_slab, cic);
out_put_dnode:
@@ -1336,6 +1334,9 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
up_read(&sbi->node_write);
else
f2fs_unlock_op(sbi);
+out_free:
+ page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
+ cc->cpages = NULL;
return -EAGAIN;
}

--
2.28.0.709.gb0816b6eb0-goog

2020-09-29 09:19:03

by Chao Yu

[permalink] [raw]
Subject: Re: [PATCH v2 1/2] f2fs: compress: introduce page array slab cache

On 2020/9/29 16:47, Jaegeuk Kim wrote:
> On 09/29, Chao Yu wrote:
>> On 2020/9/29 16:23, Jaegeuk Kim wrote:
>>> I found a bug related to the number of page pointer allocation related to
>>> nr_cpages.
>>
>> Jaegeuk,
>>
>> If I didn't miss anything, you mean that nr_cpages could be larger
>> than nr_rpages, right? the problematic case here is lzo/lzo-rle:
>>
>> cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
>>
>> As we can't limited clen as we did for lz4/zstd:
>>
>> cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
>
> Yes, I've seen some memory corruption in lzo test. Here is another patch to fix
> mem leak.
>
> Signed-off-by: Jaegeuk Kim <[email protected]>
> ---
> fs/f2fs/compress.c | 67 ++++++++++++++++++++++++++++------------------
> 1 file changed, 41 insertions(+), 26 deletions(-)
>
> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> index f086ac43ca825..ba2d4897744d8 100644
> --- a/fs/f2fs/compress.c
> +++ b/fs/f2fs/compress.c
> @@ -20,22 +20,20 @@
> static struct kmem_cache *cic_entry_slab;
> static struct kmem_cache *dic_entry_slab;
>
> -static void *page_array_alloc(struct inode *inode)
> +static void *page_array_alloc(struct inode *inode, int nr)
> {
> struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> - unsigned int size = sizeof(struct page *) <<
> - F2FS_I(inode)->i_log_cluster_size;
> + unsigned int size = sizeof(struct page *) * nr;
>
> if (likely(size == sbi->page_array_slab_size))
> return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
> return f2fs_kzalloc(sbi, size, GFP_NOFS);
> }
>
> -static void page_array_free(struct inode *inode, void *pages)
> +static void page_array_free(struct inode *inode, void *pages, int nr)
> {
> struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> - unsigned int size = sizeof(struct page *) <<
> - F2FS_I(inode)->i_log_cluster_size;
> + unsigned int size = sizeof(struct page *) * nr;
>
> if (!pages)
> return;
> @@ -162,13 +160,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
> if (cc->rpages)
> return 0;
>
> - cc->rpages = page_array_alloc(cc->inode);
> + cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> return cc->rpages ? 0 : -ENOMEM;
> }
>
> void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
> {
> - page_array_free(cc->inode, cc->rpages);
> + page_array_free(cc->inode, cc->rpages, cc->cluster_size);
> cc->rpages = NULL;
> cc->nr_rpages = 0;
> cc->nr_cpages = 0;
> @@ -602,7 +600,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> struct f2fs_inode_info *fi = F2FS_I(cc->inode);
> const struct f2fs_compress_ops *cops =
> f2fs_cops[fi->i_compress_algorithm];
> - unsigned int max_len, nr_cpages;
> + unsigned int max_len, new_nr_cpages;
> + struct page **new_cpages;
> int i, ret;
>
> trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
> @@ -617,7 +616,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> max_len = COMPRESS_HEADER_SIZE + cc->clen;
> cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
>
> - cc->cpages = page_array_alloc(cc->inode);
> + cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);

Well, cc->nr_cpages will be set to cc->nr_rpages - 1 for zstd/lz4 cases, so
this will make cpages allocation fallback to kmalloc, which can cause more
memory use.

Thanks,

> if (!cc->cpages) {
> ret = -ENOMEM;
> goto destroy_compress_ctx;
> @@ -659,16 +658,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
> cc->cbuf->reserved[i] = cpu_to_le32(0);
>
> - nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
> + new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
> +
> + /* Now we're going to cut unnecessary tail pages */
> + new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
> + if (new_cpages) {
> + ret = -ENOMEM;
> + goto out_vunmap_cbuf;
> + }
>
> /* zero out any unused part of the last page */
> memset(&cc->cbuf->cdata[cc->clen], 0,
> - (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
> + (new_nr_cpages * PAGE_SIZE) -
> + (cc->clen + COMPRESS_HEADER_SIZE));
>
> vm_unmap_ram(cc->cbuf, cc->nr_cpages);
> vm_unmap_ram(cc->rbuf, cc->cluster_size);
>
> - for (i = nr_cpages; i < cc->nr_cpages; i++) {
> + for (i = 0; i < cc->nr_cpages; i++) {
> + if (i < new_nr_cpages) {
> + new_cpages[i] = cc->cpages[i];
> + continue;
> + }
> f2fs_compress_free_page(cc->cpages[i]);
> cc->cpages[i] = NULL;
> }
> @@ -676,7 +687,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> if (cops->destroy_compress_ctx)
> cops->destroy_compress_ctx(cc);
>
> - cc->nr_cpages = nr_cpages;
> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> + cc->cpages = new_cpages;
> + cc->nr_cpages = new_nr_cpages;
>
> trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
> cc->clen, ret);
> @@ -691,7 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> if (cc->cpages[i])
> f2fs_compress_free_page(cc->cpages[i]);
> }
> - page_array_free(cc->inode, cc->cpages);
> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> cc->cpages = NULL;
> destroy_compress_ctx:
> if (cops->destroy_compress_ctx)
> @@ -730,7 +743,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> goto out_free_dic;
> }
>
> - dic->tpages = page_array_alloc(dic->inode);
> + dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
> if (!dic->tpages) {
> ret = -ENOMEM;
> goto out_free_dic;
> @@ -1203,7 +1216,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
> cic->inode = inode;
> atomic_set(&cic->pending_pages, cc->nr_cpages);
> - cic->rpages = page_array_alloc(cc->inode);
> + cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> if (!cic->rpages)
> goto out_put_cic;
>
> @@ -1297,11 +1310,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> spin_unlock(&fi->i_size_lock);
>
> f2fs_put_rpages(cc);
> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> + cc->cpages = NULL;
> f2fs_destroy_compress_ctx(cc);
> return 0;
>
> out_destroy_crypt:
> - page_array_free(cc->inode, cic->rpages);
> + page_array_free(cc->inode, cic->rpages, cc->cluster_size);
>
> for (--i; i >= 0; i--)
> fscrypt_finalize_bounce_page(&cc->cpages[i]);
> @@ -1310,6 +1325,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> continue;
> f2fs_put_page(cc->cpages[i], 1);
> }
> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> + cc->cpages = NULL;
> out_put_cic:
> kmem_cache_free(cic_entry_slab, cic);
> out_put_dnode:
> @@ -1345,7 +1362,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
> end_page_writeback(cic->rpages[i]);
> }
>
> - page_array_free(cic->inode, cic->rpages);
> + page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
> kmem_cache_free(cic_entry_slab, cic);
> }
>
> @@ -1442,8 +1459,6 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
>
> err = f2fs_write_compressed_pages(cc, submitted,
> wbc, io_type);
> - page_array_free(cc->inode, cc->cpages);
> - cc->cpages = NULL;
> if (!err)
> return 0;
> f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
> @@ -1468,7 +1483,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
> if (!dic)
> return ERR_PTR(-ENOMEM);
>
> - dic->rpages = page_array_alloc(cc->inode);
> + dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> if (!dic->rpages) {
> kmem_cache_free(dic_entry_slab, dic);
> return ERR_PTR(-ENOMEM);
> @@ -1487,7 +1502,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
> dic->rpages[i] = cc->rpages[i];
> dic->nr_rpages = cc->cluster_size;
>
> - dic->cpages = page_array_alloc(dic->inode);
> + dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
> if (!dic->cpages)
> goto out_free;
>
> @@ -1522,7 +1537,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
> continue;
> f2fs_compress_free_page(dic->tpages[i]);
> }
> - page_array_free(dic->inode, dic->tpages);
> + page_array_free(dic->inode, dic->tpages, dic->cluster_size);
> }
>
> if (dic->cpages) {
> @@ -1531,10 +1546,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
> continue;
> f2fs_compress_free_page(dic->cpages[i]);
> }
> - page_array_free(dic->inode, dic->cpages);
> + page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
> }
>
> - page_array_free(dic->inode, dic->rpages);
> + page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
> kmem_cache_free(dic_entry_slab, dic);
> }
>
>

2020-09-29 09:26:06

by Chao Yu

[permalink] [raw]
Subject: Re: [f2fs-dev] [PATCH v2 1/2] f2fs: compress: introduce page array slab cache

On 2020/9/29 17:15, Chao Yu wrote:
> On 2020/9/29 16:47, Jaegeuk Kim wrote:
>> On 09/29, Chao Yu wrote:
>>> On 2020/9/29 16:23, Jaegeuk Kim wrote:
>>>> I found a bug related to the number of page pointer allocation related to
>>>> nr_cpages.
>>>
>>> Jaegeuk,
>>>
>>> If I didn't miss anything, you mean that nr_cpages could be larger
>>> than nr_rpages, right? the problematic case here is lzo/lzo-rle:
>>>
>>> cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
>>>
>>> As we can't limited clen as we did for lz4/zstd:
>>>
>>> cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
>>
>> Yes, I've seen some memory corruption in lzo test. Here is another patch to fix
>> mem leak.
>>
>> Signed-off-by: Jaegeuk Kim <[email protected]>
>> ---
>> fs/f2fs/compress.c | 67 ++++++++++++++++++++++++++++------------------
>> 1 file changed, 41 insertions(+), 26 deletions(-)
>>
>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>> index f086ac43ca825..ba2d4897744d8 100644
>> --- a/fs/f2fs/compress.c
>> +++ b/fs/f2fs/compress.c
>> @@ -20,22 +20,20 @@
>> static struct kmem_cache *cic_entry_slab;
>> static struct kmem_cache *dic_entry_slab;
>>
>> -static void *page_array_alloc(struct inode *inode)
>> +static void *page_array_alloc(struct inode *inode, int nr)
>> {
>> struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
>> - unsigned int size = sizeof(struct page *) <<
>> - F2FS_I(inode)->i_log_cluster_size;
>> + unsigned int size = sizeof(struct page *) * nr;
>>
>> if (likely(size == sbi->page_array_slab_size))
>> return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
>> return f2fs_kzalloc(sbi, size, GFP_NOFS);
>> }
>>
>> -static void page_array_free(struct inode *inode, void *pages)
>> +static void page_array_free(struct inode *inode, void *pages, int nr)
>> {
>> struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
>> - unsigned int size = sizeof(struct page *) <<
>> - F2FS_I(inode)->i_log_cluster_size;
>> + unsigned int size = sizeof(struct page *) * nr;
>>
>> if (!pages)
>> return;
>> @@ -162,13 +160,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
>> if (cc->rpages)
>> return 0;
>>
>> - cc->rpages = page_array_alloc(cc->inode);
>> + cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
>> return cc->rpages ? 0 : -ENOMEM;
>> }
>>
>> void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
>> {
>> - page_array_free(cc->inode, cc->rpages);
>> + page_array_free(cc->inode, cc->rpages, cc->cluster_size);
>> cc->rpages = NULL;
>> cc->nr_rpages = 0;
>> cc->nr_cpages = 0;
>> @@ -602,7 +600,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>> struct f2fs_inode_info *fi = F2FS_I(cc->inode);
>> const struct f2fs_compress_ops *cops =
>> f2fs_cops[fi->i_compress_algorithm];
>> - unsigned int max_len, nr_cpages;
>> + unsigned int max_len, new_nr_cpages;
>> + struct page **new_cpages;
>> int i, ret;
>>
>> trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
>> @@ -617,7 +616,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>> max_len = COMPRESS_HEADER_SIZE + cc->clen;
>> cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
>>
>> - cc->cpages = page_array_alloc(cc->inode);
>> + cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
>
> Well, cc->nr_cpages will be set to cc->nr_rpages - 1 for zstd/lz4 cases, so
> this will make cpages allocation fallback to kmalloc, which can cause more
> memory use.

Could we handle cpages allocation for lzo/lzo-rle separately as:

force_xxx = is_lzo/lzo-rle_algorithm and is_cpages_array_allocation

page_array_alloc(, force_kmalloc)
page_array_free(, force_kfree)

Thanks,

>
> Thanks,
>
>> if (!cc->cpages) {
>> ret = -ENOMEM;
>> goto destroy_compress_ctx;
>> @@ -659,16 +658,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>> for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
>> cc->cbuf->reserved[i] = cpu_to_le32(0);
>>
>> - nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
>> + new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
>> +
>> + /* Now we're going to cut unnecessary tail pages */
>> + new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
>> + if (new_cpages) {
>> + ret = -ENOMEM;
>> + goto out_vunmap_cbuf;
>> + }
>>
>> /* zero out any unused part of the last page */
>> memset(&cc->cbuf->cdata[cc->clen], 0,
>> - (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
>> + (new_nr_cpages * PAGE_SIZE) -
>> + (cc->clen + COMPRESS_HEADER_SIZE));
>>
>> vm_unmap_ram(cc->cbuf, cc->nr_cpages);
>> vm_unmap_ram(cc->rbuf, cc->cluster_size);
>>
>> - for (i = nr_cpages; i < cc->nr_cpages; i++) {
>> + for (i = 0; i < cc->nr_cpages; i++) {
>> + if (i < new_nr_cpages) {
>> + new_cpages[i] = cc->cpages[i];
>> + continue;
>> + }
>> f2fs_compress_free_page(cc->cpages[i]);
>> cc->cpages[i] = NULL;
>> }
>> @@ -676,7 +687,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>> if (cops->destroy_compress_ctx)
>> cops->destroy_compress_ctx(cc);
>>
>> - cc->nr_cpages = nr_cpages;
>> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>> + cc->cpages = new_cpages;
>> + cc->nr_cpages = new_nr_cpages;
>>
>> trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
>> cc->clen, ret);
>> @@ -691,7 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>> if (cc->cpages[i])
>> f2fs_compress_free_page(cc->cpages[i]);
>> }
>> - page_array_free(cc->inode, cc->cpages);
>> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>> cc->cpages = NULL;
>> destroy_compress_ctx:
>> if (cops->destroy_compress_ctx)
>> @@ -730,7 +743,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
>> goto out_free_dic;
>> }
>>
>> - dic->tpages = page_array_alloc(dic->inode);
>> + dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
>> if (!dic->tpages) {
>> ret = -ENOMEM;
>> goto out_free_dic;
>> @@ -1203,7 +1216,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>> cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
>> cic->inode = inode;
>> atomic_set(&cic->pending_pages, cc->nr_cpages);
>> - cic->rpages = page_array_alloc(cc->inode);
>> + cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
>> if (!cic->rpages)
>> goto out_put_cic;
>>
>> @@ -1297,11 +1310,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>> spin_unlock(&fi->i_size_lock);
>>
>> f2fs_put_rpages(cc);
>> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>> + cc->cpages = NULL;
>> f2fs_destroy_compress_ctx(cc);
>> return 0;
>>
>> out_destroy_crypt:
>> - page_array_free(cc->inode, cic->rpages);
>> + page_array_free(cc->inode, cic->rpages, cc->cluster_size);
>>
>> for (--i; i >= 0; i--)
>> fscrypt_finalize_bounce_page(&cc->cpages[i]);
>> @@ -1310,6 +1325,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>> continue;
>> f2fs_put_page(cc->cpages[i], 1);
>> }
>> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>> + cc->cpages = NULL;
>> out_put_cic:
>> kmem_cache_free(cic_entry_slab, cic);
>> out_put_dnode:
>> @@ -1345,7 +1362,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
>> end_page_writeback(cic->rpages[i]);
>> }
>>
>> - page_array_free(cic->inode, cic->rpages);
>> + page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
>> kmem_cache_free(cic_entry_slab, cic);
>> }
>>
>> @@ -1442,8 +1459,6 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
>>
>> err = f2fs_write_compressed_pages(cc, submitted,
>> wbc, io_type);
>> - page_array_free(cc->inode, cc->cpages);
>> - cc->cpages = NULL;
>> if (!err)
>> return 0;
>> f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
>> @@ -1468,7 +1483,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
>> if (!dic)
>> return ERR_PTR(-ENOMEM);
>>
>> - dic->rpages = page_array_alloc(cc->inode);
>> + dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
>> if (!dic->rpages) {
>> kmem_cache_free(dic_entry_slab, dic);
>> return ERR_PTR(-ENOMEM);
>> @@ -1487,7 +1502,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
>> dic->rpages[i] = cc->rpages[i];
>> dic->nr_rpages = cc->cluster_size;
>>
>> - dic->cpages = page_array_alloc(dic->inode);
>> + dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
>> if (!dic->cpages)
>> goto out_free;
>>
>> @@ -1522,7 +1537,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
>> continue;
>> f2fs_compress_free_page(dic->tpages[i]);
>> }
>> - page_array_free(dic->inode, dic->tpages);
>> + page_array_free(dic->inode, dic->tpages, dic->cluster_size);
>> }
>>
>> if (dic->cpages) {
>> @@ -1531,10 +1546,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
>> continue;
>> f2fs_compress_free_page(dic->cpages[i]);
>> }
>> - page_array_free(dic->inode, dic->cpages);
>> + page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
>> }
>>
>> - page_array_free(dic->inode, dic->rpages);
>> + page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
>> kmem_cache_free(dic_entry_slab, dic);
>> }
>>
>>
>
>
> _______________________________________________
> Linux-f2fs-devel mailing list
> [email protected]
> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
> .
>

2020-09-29 09:48:02

by Jaegeuk Kim

[permalink] [raw]
Subject: Re: [f2fs-dev] [PATCH v2 1/2] f2fs: compress: introduce page array slab cache

On 09/29, Chao Yu wrote:
> On 2020/9/29 17:15, Chao Yu wrote:
> > On 2020/9/29 16:47, Jaegeuk Kim wrote:
> > > On 09/29, Chao Yu wrote:
> > > > On 2020/9/29 16:23, Jaegeuk Kim wrote:
> > > > > I found a bug related to the number of page pointer allocation related to
> > > > > nr_cpages.
> > > >
> > > > Jaegeuk,
> > > >
> > > > If I didn't miss anything, you mean that nr_cpages could be larger
> > > > than nr_rpages, right? the problematic case here is lzo/lzo-rle:
> > > >
> > > > cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
> > > >
> > > > As we can't limited clen as we did for lz4/zstd:
> > > >
> > > > cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
> > >
> > > Yes, I've seen some memory corruption in lzo test. Here is another patch to fix
> > > mem leak.
> > >
> > > Signed-off-by: Jaegeuk Kim <[email protected]>
> > > ---
> > > fs/f2fs/compress.c | 67 ++++++++++++++++++++++++++++------------------
> > > 1 file changed, 41 insertions(+), 26 deletions(-)
> > >
> > > diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
> > > index f086ac43ca825..ba2d4897744d8 100644
> > > --- a/fs/f2fs/compress.c
> > > +++ b/fs/f2fs/compress.c
> > > @@ -20,22 +20,20 @@
> > > static struct kmem_cache *cic_entry_slab;
> > > static struct kmem_cache *dic_entry_slab;
> > > -static void *page_array_alloc(struct inode *inode)
> > > +static void *page_array_alloc(struct inode *inode, int nr)
> > > {
> > > struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> > > - unsigned int size = sizeof(struct page *) <<
> > > - F2FS_I(inode)->i_log_cluster_size;
> > > + unsigned int size = sizeof(struct page *) * nr;
> > > if (likely(size == sbi->page_array_slab_size))
> > > return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
> > > return f2fs_kzalloc(sbi, size, GFP_NOFS);
> > > }
> > > -static void page_array_free(struct inode *inode, void *pages)
> > > +static void page_array_free(struct inode *inode, void *pages, int nr)
> > > {
> > > struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
> > > - unsigned int size = sizeof(struct page *) <<
> > > - F2FS_I(inode)->i_log_cluster_size;
> > > + unsigned int size = sizeof(struct page *) * nr;
> > > if (!pages)
> > > return;
> > > @@ -162,13 +160,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
> > > if (cc->rpages)
> > > return 0;
> > > - cc->rpages = page_array_alloc(cc->inode);
> > > + cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> > > return cc->rpages ? 0 : -ENOMEM;
> > > }
> > > void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
> > > {
> > > - page_array_free(cc->inode, cc->rpages);
> > > + page_array_free(cc->inode, cc->rpages, cc->cluster_size);
> > > cc->rpages = NULL;
> > > cc->nr_rpages = 0;
> > > cc->nr_cpages = 0;
> > > @@ -602,7 +600,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> > > struct f2fs_inode_info *fi = F2FS_I(cc->inode);
> > > const struct f2fs_compress_ops *cops =
> > > f2fs_cops[fi->i_compress_algorithm];
> > > - unsigned int max_len, nr_cpages;
> > > + unsigned int max_len, new_nr_cpages;
> > > + struct page **new_cpages;
> > > int i, ret;
> > > trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
> > > @@ -617,7 +616,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> > > max_len = COMPRESS_HEADER_SIZE + cc->clen;
> > > cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
> > > - cc->cpages = page_array_alloc(cc->inode);
> > > + cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
> >
> > Well, cc->nr_cpages will be set to cc->nr_rpages - 1 for zstd/lz4 cases, so
> > this will make cpages allocation fallback to kmalloc, which can cause more
> > memory use.
>
> Could we handle cpages allocation for lzo/lzo-rle separately as:
>
> force_xxx = is_lzo/lzo-rle_algorithm and is_cpages_array_allocation
>
> page_array_alloc(, force_kmalloc)
> page_array_free(, force_kfree)

What about:
if (likely(size <= sbi->page_array_slab_size))
return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);

>
> Thanks,
>
> >
> > Thanks,
> >
> > > if (!cc->cpages) {
> > > ret = -ENOMEM;
> > > goto destroy_compress_ctx;
> > > @@ -659,16 +658,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> > > for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
> > > cc->cbuf->reserved[i] = cpu_to_le32(0);
> > > - nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
> > > + new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
> > > +
> > > + /* Now we're going to cut unnecessary tail pages */
> > > + new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
> > > + if (new_cpages) {
> > > + ret = -ENOMEM;
> > > + goto out_vunmap_cbuf;
> > > + }
> > > /* zero out any unused part of the last page */
> > > memset(&cc->cbuf->cdata[cc->clen], 0,
> > > - (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
> > > + (new_nr_cpages * PAGE_SIZE) -
> > > + (cc->clen + COMPRESS_HEADER_SIZE));
> > > vm_unmap_ram(cc->cbuf, cc->nr_cpages);
> > > vm_unmap_ram(cc->rbuf, cc->cluster_size);
> > > - for (i = nr_cpages; i < cc->nr_cpages; i++) {
> > > + for (i = 0; i < cc->nr_cpages; i++) {
> > > + if (i < new_nr_cpages) {
> > > + new_cpages[i] = cc->cpages[i];
> > > + continue;
> > > + }
> > > f2fs_compress_free_page(cc->cpages[i]);
> > > cc->cpages[i] = NULL;
> > > }
> > > @@ -676,7 +687,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> > > if (cops->destroy_compress_ctx)
> > > cops->destroy_compress_ctx(cc);
> > > - cc->nr_cpages = nr_cpages;
> > > + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> > > + cc->cpages = new_cpages;
> > > + cc->nr_cpages = new_nr_cpages;
> > > trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
> > > cc->clen, ret);
> > > @@ -691,7 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
> > > if (cc->cpages[i])
> > > f2fs_compress_free_page(cc->cpages[i]);
> > > }
> > > - page_array_free(cc->inode, cc->cpages);
> > > + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> > > cc->cpages = NULL;
> > > destroy_compress_ctx:
> > > if (cops->destroy_compress_ctx)
> > > @@ -730,7 +743,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
> > > goto out_free_dic;
> > > }
> > > - dic->tpages = page_array_alloc(dic->inode);
> > > + dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
> > > if (!dic->tpages) {
> > > ret = -ENOMEM;
> > > goto out_free_dic;
> > > @@ -1203,7 +1216,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> > > cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
> > > cic->inode = inode;
> > > atomic_set(&cic->pending_pages, cc->nr_cpages);
> > > - cic->rpages = page_array_alloc(cc->inode);
> > > + cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> > > if (!cic->rpages)
> > > goto out_put_cic;
> > > @@ -1297,11 +1310,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> > > spin_unlock(&fi->i_size_lock);
> > > f2fs_put_rpages(cc);
> > > + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> > > + cc->cpages = NULL;
> > > f2fs_destroy_compress_ctx(cc);
> > > return 0;
> > > out_destroy_crypt:
> > > - page_array_free(cc->inode, cic->rpages);
> > > + page_array_free(cc->inode, cic->rpages, cc->cluster_size);
> > > for (--i; i >= 0; i--)
> > > fscrypt_finalize_bounce_page(&cc->cpages[i]);
> > > @@ -1310,6 +1325,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
> > > continue;
> > > f2fs_put_page(cc->cpages[i], 1);
> > > }
> > > + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
> > > + cc->cpages = NULL;
> > > out_put_cic:
> > > kmem_cache_free(cic_entry_slab, cic);
> > > out_put_dnode:
> > > @@ -1345,7 +1362,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
> > > end_page_writeback(cic->rpages[i]);
> > > }
> > > - page_array_free(cic->inode, cic->rpages);
> > > + page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
> > > kmem_cache_free(cic_entry_slab, cic);
> > > }
> > > @@ -1442,8 +1459,6 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
> > > err = f2fs_write_compressed_pages(cc, submitted,
> > > wbc, io_type);
> > > - page_array_free(cc->inode, cc->cpages);
> > > - cc->cpages = NULL;
> > > if (!err)
> > > return 0;
> > > f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
> > > @@ -1468,7 +1483,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
> > > if (!dic)
> > > return ERR_PTR(-ENOMEM);
> > > - dic->rpages = page_array_alloc(cc->inode);
> > > + dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
> > > if (!dic->rpages) {
> > > kmem_cache_free(dic_entry_slab, dic);
> > > return ERR_PTR(-ENOMEM);
> > > @@ -1487,7 +1502,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
> > > dic->rpages[i] = cc->rpages[i];
> > > dic->nr_rpages = cc->cluster_size;
> > > - dic->cpages = page_array_alloc(dic->inode);
> > > + dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
> > > if (!dic->cpages)
> > > goto out_free;
> > > @@ -1522,7 +1537,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
> > > continue;
> > > f2fs_compress_free_page(dic->tpages[i]);
> > > }
> > > - page_array_free(dic->inode, dic->tpages);
> > > + page_array_free(dic->inode, dic->tpages, dic->cluster_size);
> > > }
> > > if (dic->cpages) {
> > > @@ -1531,10 +1546,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
> > > continue;
> > > f2fs_compress_free_page(dic->cpages[i]);
> > > }
> > > - page_array_free(dic->inode, dic->cpages);
> > > + page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
> > > }
> > > - page_array_free(dic->inode, dic->rpages);
> > > + page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
> > > kmem_cache_free(dic_entry_slab, dic);
> > > }
> > >
> >
> >
> > _______________________________________________
> > Linux-f2fs-devel mailing list
> > [email protected]
> > https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
> > .
> >

2020-09-29 12:11:30

by Chao Yu

[permalink] [raw]
Subject: Re: [f2fs-dev] [PATCH v2 1/2] f2fs: compress: introduce page array slab cache

On 2020/9/29 17:46, Jaegeuk Kim wrote:
> On 09/29, Chao Yu wrote:
>> On 2020/9/29 17:15, Chao Yu wrote:
>>> On 2020/9/29 16:47, Jaegeuk Kim wrote:
>>>> On 09/29, Chao Yu wrote:
>>>>> On 2020/9/29 16:23, Jaegeuk Kim wrote:
>>>>>> I found a bug related to the number of page pointer allocation related to
>>>>>> nr_cpages.
>>>>>
>>>>> Jaegeuk,
>>>>>
>>>>> If I didn't miss anything, you mean that nr_cpages could be larger
>>>>> than nr_rpages, right? the problematic case here is lzo/lzo-rle:
>>>>>
>>>>> cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
>>>>>
>>>>> As we can't limited clen as we did for lz4/zstd:
>>>>>
>>>>> cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
>>>>
>>>> Yes, I've seen some memory corruption in lzo test. Here is another patch to fix
>>>> mem leak.
>>>>
>>>> Signed-off-by: Jaegeuk Kim <[email protected]>
>>>> ---
>>>> fs/f2fs/compress.c | 67 ++++++++++++++++++++++++++++------------------
>>>> 1 file changed, 41 insertions(+), 26 deletions(-)
>>>>
>>>> diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
>>>> index f086ac43ca825..ba2d4897744d8 100644
>>>> --- a/fs/f2fs/compress.c
>>>> +++ b/fs/f2fs/compress.c
>>>> @@ -20,22 +20,20 @@
>>>> static struct kmem_cache *cic_entry_slab;
>>>> static struct kmem_cache *dic_entry_slab;
>>>> -static void *page_array_alloc(struct inode *inode)
>>>> +static void *page_array_alloc(struct inode *inode, int nr)
>>>> {
>>>> struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
>>>> - unsigned int size = sizeof(struct page *) <<
>>>> - F2FS_I(inode)->i_log_cluster_size;
>>>> + unsigned int size = sizeof(struct page *) * nr;
>>>> if (likely(size == sbi->page_array_slab_size))
>>>> return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
>>>> return f2fs_kzalloc(sbi, size, GFP_NOFS);
>>>> }
>>>> -static void page_array_free(struct inode *inode, void *pages)
>>>> +static void page_array_free(struct inode *inode, void *pages, int nr)
>>>> {
>>>> struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
>>>> - unsigned int size = sizeof(struct page *) <<
>>>> - F2FS_I(inode)->i_log_cluster_size;
>>>> + unsigned int size = sizeof(struct page *) * nr;
>>>> if (!pages)
>>>> return;
>>>> @@ -162,13 +160,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
>>>> if (cc->rpages)
>>>> return 0;
>>>> - cc->rpages = page_array_alloc(cc->inode);
>>>> + cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
>>>> return cc->rpages ? 0 : -ENOMEM;
>>>> }
>>>> void f2fs_destroy_compress_ctx(struct compress_ctx *cc)
>>>> {
>>>> - page_array_free(cc->inode, cc->rpages);
>>>> + page_array_free(cc->inode, cc->rpages, cc->cluster_size);
>>>> cc->rpages = NULL;
>>>> cc->nr_rpages = 0;
>>>> cc->nr_cpages = 0;
>>>> @@ -602,7 +600,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>>>> struct f2fs_inode_info *fi = F2FS_I(cc->inode);
>>>> const struct f2fs_compress_ops *cops =
>>>> f2fs_cops[fi->i_compress_algorithm];
>>>> - unsigned int max_len, nr_cpages;
>>>> + unsigned int max_len, new_nr_cpages;
>>>> + struct page **new_cpages;
>>>> int i, ret;
>>>> trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
>>>> @@ -617,7 +616,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>>>> max_len = COMPRESS_HEADER_SIZE + cc->clen;
>>>> cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
>>>> - cc->cpages = page_array_alloc(cc->inode);
>>>> + cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
>>>
>>> Well, cc->nr_cpages will be set to cc->nr_rpages - 1 for zstd/lz4 cases, so
>>> this will make cpages allocation fallback to kmalloc, which can cause more
>>> memory use.
>>
>> Could we handle cpages allocation for lzo/lzo-rle separately as:
>>
>> force_xxx = is_lzo/lzo-rle_algorithm and is_cpages_array_allocation
>>
>> page_array_alloc(, force_kmalloc)
>> page_array_free(, force_kfree)
>
> What about:
> if (likely(size <= sbi->page_array_slab_size))
> return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);

Better, :)

Thanks,

>
>>
>> Thanks,
>>
>>>
>>> Thanks,
>>>
>>>> if (!cc->cpages) {
>>>> ret = -ENOMEM;
>>>> goto destroy_compress_ctx;
>>>> @@ -659,16 +658,28 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>>>> for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
>>>> cc->cbuf->reserved[i] = cpu_to_le32(0);
>>>> - nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
>>>> + new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
>>>> +
>>>> + /* Now we're going to cut unnecessary tail pages */
>>>> + new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
>>>> + if (new_cpages) {
>>>> + ret = -ENOMEM;
>>>> + goto out_vunmap_cbuf;
>>>> + }
>>>> /* zero out any unused part of the last page */
>>>> memset(&cc->cbuf->cdata[cc->clen], 0,
>>>> - (nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
>>>> + (new_nr_cpages * PAGE_SIZE) -
>>>> + (cc->clen + COMPRESS_HEADER_SIZE));
>>>> vm_unmap_ram(cc->cbuf, cc->nr_cpages);
>>>> vm_unmap_ram(cc->rbuf, cc->cluster_size);
>>>> - for (i = nr_cpages; i < cc->nr_cpages; i++) {
>>>> + for (i = 0; i < cc->nr_cpages; i++) {
>>>> + if (i < new_nr_cpages) {
>>>> + new_cpages[i] = cc->cpages[i];
>>>> + continue;
>>>> + }
>>>> f2fs_compress_free_page(cc->cpages[i]);
>>>> cc->cpages[i] = NULL;
>>>> }
>>>> @@ -676,7 +687,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>>>> if (cops->destroy_compress_ctx)
>>>> cops->destroy_compress_ctx(cc);
>>>> - cc->nr_cpages = nr_cpages;
>>>> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>>>> + cc->cpages = new_cpages;
>>>> + cc->nr_cpages = new_nr_cpages;
>>>> trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
>>>> cc->clen, ret);
>>>> @@ -691,7 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
>>>> if (cc->cpages[i])
>>>> f2fs_compress_free_page(cc->cpages[i]);
>>>> }
>>>> - page_array_free(cc->inode, cc->cpages);
>>>> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>>>> cc->cpages = NULL;
>>>> destroy_compress_ctx:
>>>> if (cops->destroy_compress_ctx)
>>>> @@ -730,7 +743,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
>>>> goto out_free_dic;
>>>> }
>>>> - dic->tpages = page_array_alloc(dic->inode);
>>>> + dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
>>>> if (!dic->tpages) {
>>>> ret = -ENOMEM;
>>>> goto out_free_dic;
>>>> @@ -1203,7 +1216,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>>> cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
>>>> cic->inode = inode;
>>>> atomic_set(&cic->pending_pages, cc->nr_cpages);
>>>> - cic->rpages = page_array_alloc(cc->inode);
>>>> + cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
>>>> if (!cic->rpages)
>>>> goto out_put_cic;
>>>> @@ -1297,11 +1310,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>>> spin_unlock(&fi->i_size_lock);
>>>> f2fs_put_rpages(cc);
>>>> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>>>> + cc->cpages = NULL;
>>>> f2fs_destroy_compress_ctx(cc);
>>>> return 0;
>>>> out_destroy_crypt:
>>>> - page_array_free(cc->inode, cic->rpages);
>>>> + page_array_free(cc->inode, cic->rpages, cc->cluster_size);
>>>> for (--i; i >= 0; i--)
>>>> fscrypt_finalize_bounce_page(&cc->cpages[i]);
>>>> @@ -1310,6 +1325,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
>>>> continue;
>>>> f2fs_put_page(cc->cpages[i], 1);
>>>> }
>>>> + page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
>>>> + cc->cpages = NULL;
>>>> out_put_cic:
>>>> kmem_cache_free(cic_entry_slab, cic);
>>>> out_put_dnode:
>>>> @@ -1345,7 +1362,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
>>>> end_page_writeback(cic->rpages[i]);
>>>> }
>>>> - page_array_free(cic->inode, cic->rpages);
>>>> + page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
>>>> kmem_cache_free(cic_entry_slab, cic);
>>>> }
>>>> @@ -1442,8 +1459,6 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
>>>> err = f2fs_write_compressed_pages(cc, submitted,
>>>> wbc, io_type);
>>>> - page_array_free(cc->inode, cc->cpages);
>>>> - cc->cpages = NULL;
>>>> if (!err)
>>>> return 0;
>>>> f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
>>>> @@ -1468,7 +1483,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
>>>> if (!dic)
>>>> return ERR_PTR(-ENOMEM);
>>>> - dic->rpages = page_array_alloc(cc->inode);
>>>> + dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
>>>> if (!dic->rpages) {
>>>> kmem_cache_free(dic_entry_slab, dic);
>>>> return ERR_PTR(-ENOMEM);
>>>> @@ -1487,7 +1502,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
>>>> dic->rpages[i] = cc->rpages[i];
>>>> dic->nr_rpages = cc->cluster_size;
>>>> - dic->cpages = page_array_alloc(dic->inode);
>>>> + dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
>>>> if (!dic->cpages)
>>>> goto out_free;
>>>> @@ -1522,7 +1537,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
>>>> continue;
>>>> f2fs_compress_free_page(dic->tpages[i]);
>>>> }
>>>> - page_array_free(dic->inode, dic->tpages);
>>>> + page_array_free(dic->inode, dic->tpages, dic->cluster_size);
>>>> }
>>>> if (dic->cpages) {
>>>> @@ -1531,10 +1546,10 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
>>>> continue;
>>>> f2fs_compress_free_page(dic->cpages[i]);
>>>> }
>>>> - page_array_free(dic->inode, dic->cpages);
>>>> + page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
>>>> }
>>>> - page_array_free(dic->inode, dic->rpages);
>>>> + page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
>>>> kmem_cache_free(dic_entry_slab, dic);
>>>> }
>>>>
>>>
>>>
>>> _______________________________________________
>>> Linux-f2fs-devel mailing list
>>> [email protected]
>>> https://lists.sourceforge.net/lists/listinfo/linux-f2fs-devel
>>> .
>>>
> .
>