2022-07-06 11:25:17

by Fabio M. De Francesco

[permalink] [raw]
Subject: [PATCH v6 0/2] btrfs: Replace kmap() with kmap_local_page() in zstd.c

This is a little series which serves the purpose to replace kmap() with
kmap_local_page() in btrfs/zstd.c. Actually this task is only accomplished
in patch 2/2.

Instead patch 1/2 is a pre-requisite for the above-mentioned replacement,
however, above all else, it has the purpose to conform the prototypes of
__kunmap_{local,atomic}() to their own correct semantics. Since those
functions don't make changes to the memory pointed by their arguments,
change the type of those arguments to become pointers to const void.

v5 -> v6: Delete an unnecessary assignment in 2/2 (thanks to Ira Weiny).

v4 -> v5: Use plain page_address() for pages which cannot come from Highmem
(instead of kmapping them); remove unnecessary initializations to NULL
in 2/2 (thanks to Ira Weiny).

v3 -> v4: Resend and add linux-mm to the list of recipients (thanks to
Andrew Morton).

Fabio M. De Francesco (2):
highmem: Make __kunmap_{local,atomic}() take "const void *"
btrfs: Replace kmap() with kmap_local_page() in zstd.c

arch/parisc/include/asm/cacheflush.h | 6 ++---
arch/parisc/kernel/cache.c | 2 +-
fs/btrfs/zstd.c | 33 +++++++++++-----------------
include/linux/highmem-internal.h | 10 ++++-----
mm/highmem.c | 2 +-
5 files changed, 23 insertions(+), 30 deletions(-)

--
2.36.1


2022-07-06 11:26:23

by Fabio M. De Francesco

[permalink] [raw]
Subject: [PATCH v6 2/2] btrfs: Replace kmap() with kmap_local_page() in zstd.c

The use of kmap() is being deprecated in favor of kmap_local_page(). With
kmap_local_page(), the mapping is per thread, CPU local and not globally
visible.

Therefore, use kmap_local_page() / kunmap_local() in zstd.c because in this
file the mappings are per thread and are not visible in other contexts. In
the meanwhile use plain page_address() on pages allocated with the GFP_NOFS
flag instead of calling kmap*() on them (since they are always allocated
from ZONE_NORMAL).

Tested with xfstests on QEMU + KVM 32 bits VM with 4GB of RAM, booting a
kernel with HIGHMEM64G enabled.

Cc: Filipe Manana <[email protected]>
Suggested-by: Ira Weiny <[email protected]>
Signed-off-by: Fabio M. De Francesco <[email protected]>
---
fs/btrfs/zstd.c | 33 +++++++++++++--------------------
1 file changed, 13 insertions(+), 20 deletions(-)

diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 0fe31a6f6e68..35a0224d4eb7 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -403,7 +403,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,

/* map in the first page of input data */
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- workspace->in_buf.src = kmap(in_page);
+ workspace->in_buf.src = kmap_local_page(in_page);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);

@@ -415,7 +415,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
pages[nr_pages++] = out_page;
- workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.dst = page_address(out_page);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);

@@ -450,9 +450,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
if (workspace->out_buf.pos == workspace->out_buf.size) {
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
- kunmap(out_page);
if (nr_pages == nr_dest_pages) {
- out_page = NULL;
ret = -E2BIG;
goto out;
}
@@ -462,7 +460,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
pages[nr_pages++] = out_page;
- workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.dst = page_address(out_page);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out,
PAGE_SIZE);
@@ -477,13 +475,12 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
/* Check if we need more input */
if (workspace->in_buf.pos == workspace->in_buf.size) {
tot_in += PAGE_SIZE;
- kunmap(in_page);
+ kunmap_local(workspace->in_buf.src);
put_page(in_page);
-
start += PAGE_SIZE;
len -= PAGE_SIZE;
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- workspace->in_buf.src = kmap(in_page);
+ workspace->in_buf.src = kmap_local_page(in_page);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
}
@@ -510,9 +507,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,

tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
- kunmap(out_page);
if (nr_pages == nr_dest_pages) {
- out_page = NULL;
ret = -E2BIG;
goto out;
}
@@ -522,7 +517,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
pages[nr_pages++] = out_page;
- workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.dst = page_address(out_page);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
}
@@ -537,13 +532,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_out = tot_out;
out:
*out_pages = nr_pages;
- /* Cleanup */
- if (in_page) {
- kunmap(in_page);
+ if (workspace->in_buf.src) {
+ kunmap_local(workspace->in_buf.src);
put_page(in_page);
}
- if (out_page)
- kunmap(out_page);
return ret;
}

@@ -567,7 +559,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
goto done;
}

- workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);

@@ -603,14 +595,15 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
break;

if (workspace->in_buf.pos == workspace->in_buf.size) {
- kunmap(pages_in[page_in_index++]);
+ kunmap_local(workspace->in_buf.src);
+ page_in_index++;
if (page_in_index >= total_pages_in) {
workspace->in_buf.src = NULL;
ret = -EIO;
goto done;
}
srclen -= PAGE_SIZE;
- workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
}
@@ -619,7 +612,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
zero_fill_bio(cb->orig_bio);
done:
if (workspace->in_buf.src)
- kunmap(pages_in[page_in_index]);
+ kunmap_local(workspace->in_buf.src);
return ret;
}

--
2.36.1

2022-07-07 22:37:26

by David Sterba

[permalink] [raw]
Subject: Re: [PATCH v6 0/2] btrfs: Replace kmap() with kmap_local_page() in zstd.c

On Wed, Jul 06, 2022 at 01:15:18PM +0200, Fabio M. De Francesco wrote:
> This is a little series which serves the purpose to replace kmap() with
> kmap_local_page() in btrfs/zstd.c. Actually this task is only accomplished
> in patch 2/2.
>
> Instead patch 1/2 is a pre-requisite for the above-mentioned replacement,
> however, above all else, it has the purpose to conform the prototypes of
> __kunmap_{local,atomic}() to their own correct semantics. Since those
> functions don't make changes to the memory pointed by their arguments,
> change the type of those arguments to become pointers to const void.
>
> v5 -> v6: Delete an unnecessary assignment in 2/2 (thanks to Ira Weiny).
>
> v4 -> v5: Use plain page_address() for pages which cannot come from Highmem
> (instead of kmapping them); remove unnecessary initializations to NULL
> in 2/2 (thanks to Ira Weiny).
>
> v3 -> v4: Resend and add linux-mm to the list of recipients (thanks to
> Andrew Morton).
>
> Fabio M. De Francesco (2):
> highmem: Make __kunmap_{local,atomic}() take "const void *"
> btrfs: Replace kmap() with kmap_local_page() in zstd.c

Added to the kmap patch queue, thanks. With all the other conversion
there are 5 patches

highmem: Make __kunmap_{local,atomic}() take const void pointer
btrfs: zstd: replace kmap() with kmap_local_page()
btrfs: zlib: replace kmap() with kmap_local_page() in zlib_compress_pages()
btrfs: zlib: replace kmap() with kmap_local_page() in zlib_decompress_bio()
btrfs: replace kmap_atomic() with kmap_local_page()

and there are no kmap or kmap_atomic left in fs/btrfs, scheduled for 5.20.