2022-07-04 15:26:05

by Fabio M. De Francesco

[permalink] [raw]
Subject: [PATCH v5 0/2] btrfs: Replace kmap() with kmap_local_page() in zstd.c

This is a little series which serves the purpose to replace kmap() with
kmap_local_page() in btrfs/zstd.c. Actually this task is only accomplished
in patch 2/2.

Instead patch 1/2 is a pre-requisite for the above-mentioned replacement,
however, above all else, it has the purpose to conform the prototypes of
__kunmap_{local,atomic}() to their own correct semantics. Since those
functions don't make changes to the memory pointed by their arguments,
change the type of those arguments to become pointers to const void.

v4 -> v5: Use plain page_address() for pages which cannot come from Highmem
(instead of kmapping them); remove unnecessary initialisations to NULL
(thanks to Ira Weiny).

v3 -> v4: Resend and add linux-mm to the list of recipients (thanks to
Andrew Morton).

Fabio M. De Francesco (2):
highmem: Make __kunmap_{local,atomic}() take "const void *"
btrfs: Replace kmap() with kmap_local_page() in zstd.c

arch/parisc/include/asm/cacheflush.h | 6 ++---
arch/parisc/kernel/cache.c | 2 +-
fs/btrfs/zstd.c | 34 ++++++++++++----------------
include/linux/highmem-internal.h | 10 ++++----
mm/highmem.c | 2 +-
5 files changed, 24 insertions(+), 30 deletions(-)

--
2.36.1


2022-07-04 15:26:16

by Fabio M. De Francesco

[permalink] [raw]
Subject: [PATCH v5 2/2] btrfs: Replace kmap() with kmap_local_page() in zstd.c

The use of kmap() is being deprecated in favor of kmap_local_page(). With
kmap_local_page(), the mapping is per thread, CPU local and not globally
visible.

Therefore, use kmap_local_page() / kunmap_local() in zstd.c because in this
file the mappings are per thread and are not visible in other contexts. In
the meanwhile use plain page_address() on pages allocated with the GFP_NOFS
flag instead of calling kmap*() on them (since they are always allocated
from ZONE_NORMAL).

Tested with xfstests on QEMU + KVM 32 bits VM with 4GB of RAM and
booting a kernel with HIGHMEM64G enabled.

Cc: Filipe Manana <[email protected]>
Suggested-by: Ira Weiny <[email protected]>
Signed-off-by: Fabio M. De Francesco <[email protected]>
---
fs/btrfs/zstd.c | 34 ++++++++++++++--------------------
1 file changed, 14 insertions(+), 20 deletions(-)

diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index 0fe31a6f6e68..78e0272e770e 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -403,7 +403,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,

/* map in the first page of input data */
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- workspace->in_buf.src = kmap(in_page);
+ workspace->in_buf.src = kmap_local_page(in_page);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);

@@ -415,7 +415,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
pages[nr_pages++] = out_page;
- workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.dst = page_address(out_page);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);

@@ -450,9 +450,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
if (workspace->out_buf.pos == workspace->out_buf.size) {
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
- kunmap(out_page);
if (nr_pages == nr_dest_pages) {
- out_page = NULL;
ret = -E2BIG;
goto out;
}
@@ -462,7 +460,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
pages[nr_pages++] = out_page;
- workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.dst = page_address(out_page);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out,
PAGE_SIZE);
@@ -477,15 +475,15 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
/* Check if we need more input */
if (workspace->in_buf.pos == workspace->in_buf.size) {
tot_in += PAGE_SIZE;
- kunmap(in_page);
+ kunmap_local(workspace->in_buf.src);
put_page(in_page);
-
start += PAGE_SIZE;
len -= PAGE_SIZE;
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
- workspace->in_buf.src = kmap(in_page);
+ workspace->in_buf.src = kmap_local_page(in_page);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ workspace->out_buf.dst = page_address(out_page);
}
}
while (1) {
@@ -510,9 +508,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,

tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
- kunmap(out_page);
if (nr_pages == nr_dest_pages) {
- out_page = NULL;
ret = -E2BIG;
goto out;
}
@@ -522,7 +518,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out;
}
pages[nr_pages++] = out_page;
- workspace->out_buf.dst = kmap(out_page);
+ workspace->out_buf.dst = page_address(out_page);
workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
}
@@ -537,13 +533,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_out = tot_out;
out:
*out_pages = nr_pages;
- /* Cleanup */
- if (in_page) {
- kunmap(in_page);
+ if (workspace->in_buf.src) {
+ kunmap_local(workspace->in_buf.src);
put_page(in_page);
}
- if (out_page)
- kunmap(out_page);
return ret;
}

@@ -567,7 +560,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
goto done;
}

- workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);

@@ -603,14 +596,15 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
break;

if (workspace->in_buf.pos == workspace->in_buf.size) {
- kunmap(pages_in[page_in_index++]);
+ kunmap_local(workspace->in_buf.src);
+ page_in_index++;
if (page_in_index >= total_pages_in) {
workspace->in_buf.src = NULL;
ret = -EIO;
goto done;
}
srclen -= PAGE_SIZE;
- workspace->in_buf.src = kmap(pages_in[page_in_index]);
+ workspace->in_buf.src = kmap_local_page(pages_in[page_in_index]);
workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
}
@@ -619,7 +613,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
zero_fill_bio(cb->orig_bio);
done:
if (workspace->in_buf.src)
- kunmap(pages_in[page_in_index]);
+ kunmap_local(workspace->in_buf.src);
return ret;
}

--
2.36.1

2022-07-04 15:37:38

by Fabio M. De Francesco

[permalink] [raw]
Subject: [PATCH v5 1/2] highmem: Make __kunmap_{local,atomic}() take "const void *"

__kunmap_ {local,atomic}() currently take pointers to void. However, this
is semantically incorrect, since these functions do not change the memory
their arguments point to.

Therefore, make this semantics explicit by modifying the
__kunmap_{local,atomic}() prototypes to take pointers to const void.

As a side effect, compilers will likely produce more efficient code.

Cc: Andrew Morton <[email protected]>
Suggested-by: David Sterba <[email protected]>
Suggested-by: Ira Weiny <[email protected]>
Reviewed-by: Ira Weiny <[email protected]>
Signed-off-by: Fabio M. De Francesco <[email protected]>
---
arch/parisc/include/asm/cacheflush.h | 6 +++---
arch/parisc/kernel/cache.c | 2 +-
include/linux/highmem-internal.h | 10 +++++-----
mm/highmem.c | 2 +-
4 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 8d03b3b26229..0bdee6724132 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -22,7 +22,7 @@ void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
-void flush_kernel_dcache_page_asm(void *);
+void flush_kernel_dcache_page_asm(const void *addr);
void flush_kernel_icache_page(void *);

/* Cache flush operations */
@@ -31,7 +31,7 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);

-void flush_kernel_dcache_page_addr(void *addr);
+void flush_kernel_dcache_page_addr(const void *addr);

#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
@@ -75,7 +75,7 @@ void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);

#define ARCH_HAS_FLUSH_ON_KUNMAP
-static inline void kunmap_flush_on_unmap(void *addr)
+static inline void kunmap_flush_on_unmap(const void *addr)
{
flush_kernel_dcache_page_addr(addr);
}
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a9bc578e4c52..993999a65e54 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -549,7 +549,7 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);

-void flush_kernel_dcache_page_addr(void *addr)
+void flush_kernel_dcache_page_addr(const void *addr)
{
unsigned long flags;

diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index cddb42ff0473..034b1106d022 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -8,7 +8,7 @@
#ifdef CONFIG_KMAP_LOCAL
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
-void kunmap_local_indexed(void *vaddr);
+void kunmap_local_indexed(const void *vaddr);
void kmap_local_fork(struct task_struct *tsk);
void __kmap_local_sched_out(void);
void __kmap_local_sched_in(void);
@@ -89,7 +89,7 @@ static inline void *kmap_local_pfn(unsigned long pfn)
return __kmap_local_pfn_prot(pfn, kmap_prot);
}

-static inline void __kunmap_local(void *vaddr)
+static inline void __kunmap_local(const void *vaddr)
{
kunmap_local_indexed(vaddr);
}
@@ -121,7 +121,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn)
return __kmap_local_pfn_prot(pfn, kmap_prot);
}

-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_atomic(const void *addr)
{
kunmap_local_indexed(addr);
pagefault_enable();
@@ -197,7 +197,7 @@ static inline void *kmap_local_pfn(unsigned long pfn)
return kmap_local_page(pfn_to_page(pfn));
}

-static inline void __kunmap_local(void *addr)
+static inline void __kunmap_local(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(addr);
@@ -224,7 +224,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn)
return kmap_atomic(pfn_to_page(pfn));
}

-static inline void __kunmap_atomic(void *addr)
+static inline void __kunmap_atomic(const void *addr)
{
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
kunmap_flush_on_unmap(addr);
diff --git a/mm/highmem.c b/mm/highmem.c
index 1a692997fac4..e32083e4ce0d 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -561,7 +561,7 @@ void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
}
EXPORT_SYMBOL(__kmap_local_page_prot);

-void kunmap_local_indexed(void *vaddr)
+void kunmap_local_indexed(const void *vaddr)
{
unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
pte_t *kmap_pte;
--
2.36.1

2022-07-05 19:09:32

by Ira Weiny

[permalink] [raw]
Subject: Re: [PATCH v5 2/2] btrfs: Replace kmap() with kmap_local_page() in zstd.c

On Mon, Jul 04, 2022 at 05:23:22PM +0200, Fabio M. De Francesco wrote:
> The use of kmap() is being deprecated in favor of kmap_local_page(). With
> kmap_local_page(), the mapping is per thread, CPU local and not globally
> visible.
>
> Therefore, use kmap_local_page() / kunmap_local() in zstd.c because in this
> file the mappings are per thread and are not visible in other contexts. In
> the meanwhile use plain page_address() on pages allocated with the GFP_NOFS
> flag instead of calling kmap*() on them (since they are always allocated
> from ZONE_NORMAL).
>
> Tested with xfstests on QEMU + KVM 32 bits VM with 4GB of RAM and
> booting a kernel with HIGHMEM64G enabled.
>
> Cc: Filipe Manana <[email protected]>
> Suggested-by: Ira Weiny <[email protected]>
> Signed-off-by: Fabio M. De Francesco <[email protected]>
> ---
> fs/btrfs/zstd.c | 34 ++++++++++++++--------------------
> 1 file changed, 14 insertions(+), 20 deletions(-)
>
> diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
> index 0fe31a6f6e68..78e0272e770e 100644
> --- a/fs/btrfs/zstd.c
> +++ b/fs/btrfs/zstd.c
> @@ -403,7 +403,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
>
> /* map in the first page of input data */
> in_page = find_get_page(mapping, start >> PAGE_SHIFT);
> - workspace->in_buf.src = kmap(in_page);
> + workspace->in_buf.src = kmap_local_page(in_page);
> workspace->in_buf.pos = 0;
> workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
>
> @@ -415,7 +415,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
> goto out;
> }
> pages[nr_pages++] = out_page;
> - workspace->out_buf.dst = kmap(out_page);
> + workspace->out_buf.dst = page_address(out_page);
> workspace->out_buf.pos = 0;
> workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
>
> @@ -450,9 +450,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
> if (workspace->out_buf.pos == workspace->out_buf.size) {
> tot_out += PAGE_SIZE;
> max_out -= PAGE_SIZE;
> - kunmap(out_page);
> if (nr_pages == nr_dest_pages) {
> - out_page = NULL;
> ret = -E2BIG;
> goto out;
> }
> @@ -462,7 +460,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
> goto out;
> }
> pages[nr_pages++] = out_page;
> - workspace->out_buf.dst = kmap(out_page);
> + workspace->out_buf.dst = page_address(out_page);
> workspace->out_buf.pos = 0;
> workspace->out_buf.size = min_t(size_t, max_out,
> PAGE_SIZE);
> @@ -477,15 +475,15 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
> /* Check if we need more input */
> if (workspace->in_buf.pos == workspace->in_buf.size) {
> tot_in += PAGE_SIZE;
> - kunmap(in_page);
> + kunmap_local(workspace->in_buf.src);
> put_page(in_page);
> -
> start += PAGE_SIZE;
> len -= PAGE_SIZE;
> in_page = find_get_page(mapping, start >> PAGE_SHIFT);
> - workspace->in_buf.src = kmap(in_page);
> + workspace->in_buf.src = kmap_local_page(in_page);
> workspace->in_buf.pos = 0;
> workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
> + workspace->out_buf.dst = page_address(out_page);

Why is this needed?

The rest looks good,
Ira

[snip]

2022-07-06 10:43:33

by Fabio M. De Francesco

[permalink] [raw]
Subject: Re: [PATCH v5 2/2] btrfs: Replace kmap() with kmap_local_page() in zstd.c

On martedì 5 luglio 2022 21:04:04 CEST Ira Weiny wrote:
> On Mon, Jul 04, 2022 at 05:23:22PM +0200, Fabio M. De Francesco wrote:
> > The use of kmap() is being deprecated in favor of kmap_local_page().
With
> > kmap_local_page(), the mapping is per thread, CPU local and not
globally
> > visible.
> >
> > Therefore, use kmap_local_page() / kunmap_local() in zstd.c because in
this
> > file the mappings are per thread and are not visible in other contexts.
In
> > the meanwhile use plain page_address() on pages allocated with the
GFP_NOFS
> > flag instead of calling kmap*() on them (since they are always
allocated
> > from ZONE_NORMAL).
> >
> > Tested with xfstests on QEMU + KVM 32 bits VM with 4GB of RAM and
> > booting a kernel with HIGHMEM64G enabled.
> >
> > Cc: Filipe Manana <[email protected]>
> > Suggested-by: Ira Weiny <[email protected]>
> > Signed-off-by: Fabio M. De Francesco <[email protected]>
> > ---
> > fs/btrfs/zstd.c | 34 ++++++++++++++--------------------
> > 1 file changed, 14 insertions(+), 20 deletions(-)
> >
> > diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
> > index 0fe31a6f6e68..78e0272e770e 100644
> > --- a/fs/btrfs/zstd.c
> > +++ b/fs/btrfs/zstd.c
> > @@ -403,7 +403,7 @@ int zstd_compress_pages(struct list_head *ws,
struct address_space *mapping,
> >
> > /* map in the first page of input data */
> > in_page = find_get_page(mapping, start >> PAGE_SHIFT);
> > - workspace->in_buf.src = kmap(in_page);
> > + workspace->in_buf.src = kmap_local_page(in_page);
> > workspace->in_buf.pos = 0;
> > workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
> >
> > @@ -415,7 +415,7 @@ int zstd_compress_pages(struct list_head *ws,
struct address_space *mapping,
> > goto out;
> > }
> > pages[nr_pages++] = out_page;
> > - workspace->out_buf.dst = kmap(out_page);
> > + workspace->out_buf.dst = page_address(out_page);
> > workspace->out_buf.pos = 0;
> > workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
> >
> > @@ -450,9 +450,7 @@ int zstd_compress_pages(struct list_head *ws,
struct address_space *mapping,
> > if (workspace->out_buf.pos == workspace->out_buf.size)
{
> > tot_out += PAGE_SIZE;
> > max_out -= PAGE_SIZE;
> > - kunmap(out_page);
> > if (nr_pages == nr_dest_pages) {
> > - out_page = NULL;
> > ret = -E2BIG;
> > goto out;
> > }
> > @@ -462,7 +460,7 @@ int zstd_compress_pages(struct list_head *ws,
struct address_space *mapping,
> > goto out;
> > }
> > pages[nr_pages++] = out_page;
> > - workspace->out_buf.dst = kmap(out_page);
> > + workspace->out_buf.dst =
page_address(out_page);
> > workspace->out_buf.pos = 0;
> > workspace->out_buf.size = min_t(size_t,
max_out,
> >
PAGE_SIZE);
> > @@ -477,15 +475,15 @@ int zstd_compress_pages(struct list_head *ws,
struct address_space *mapping,
> > /* Check if we need more input */
> > if (workspace->in_buf.pos == workspace->in_buf.size) {
> > tot_in += PAGE_SIZE;
> > - kunmap(in_page);
> > + kunmap_local(workspace->in_buf.src);
> > put_page(in_page);
> > -
> > start += PAGE_SIZE;
> > len -= PAGE_SIZE;
> > in_page = find_get_page(mapping, start >>
PAGE_SHIFT);
> > - workspace->in_buf.src = kmap(in_page);
> > + workspace->in_buf.src =
kmap_local_page(in_page);
> > workspace->in_buf.pos = 0;
> > workspace->in_buf.size = min_t(size_t, len,
PAGE_SIZE);
> > + workspace->out_buf.dst =
page_address(out_page);
>
> Why is this needed?

Sorry. This initialization is not needed at all.
Probably made a mistake with copy-pasting snippets of code.

I'm going to send ASAP the fifth version of this series.

> The rest looks good,

Thanks,

Fabio

> Ira
>
> [snip]
>



2022-07-06 10:59:46

by Fabio M. De Francesco

[permalink] [raw]
Subject: Re: [PATCH v5 2/2] btrfs: Replace kmap() with kmap_local_page() in zstd.c

On mercoledì 6 luglio 2022 12:38:29 CEST Fabio M. De Francesco wrote:
> On martedì 5 luglio 2022 21:04:04 CEST Ira Weiny wrote:
> > On Mon, Jul 04, 2022 at 05:23:22PM +0200, Fabio M. De Francesco wrote:
> > > The use of kmap() is being deprecated in favor of kmap_local_page().
> With
> > > kmap_local_page(), the mapping is per thread, CPU local and not
> globally
> > > visible.
> > >
> > > Therefore, use kmap_local_page() / kunmap_local() in zstd.c because
in
> this
> > > file the mappings are per thread and are not visible in other
contexts.
> In
> > > the meanwhile use plain page_address() on pages allocated with the
> GFP_NOFS
> > > flag instead of calling kmap*() on them (since they are always
> allocated
> > > from ZONE_NORMAL).
> > >
> > > Tested with xfstests on QEMU + KVM 32 bits VM with 4GB of RAM and
> > > booting a kernel with HIGHMEM64G enabled.
> > >
> > > Cc: Filipe Manana <[email protected]>
> > > Suggested-by: Ira Weiny <[email protected]>
> > > Signed-off-by: Fabio M. De Francesco <[email protected]>
> > > ---
> > > fs/btrfs/zstd.c | 34 ++++++++++++++--------------------
> > > 1 file changed, 14 insertions(+), 20 deletions(-)
> > >
> > > diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
> > > index 0fe31a6f6e68..78e0272e770e 100644
> > > --- a/fs/btrfs/zstd.c
> > > +++ b/fs/btrfs/zstd.c
> > > @@ -403,7 +403,7 @@ int zstd_compress_pages(struct list_head *ws,
> struct address_space *mapping,
> > >
> > > /* map in the first page of input data */
> > > in_page = find_get_page(mapping, start >> PAGE_SHIFT);
> > > - workspace->in_buf.src = kmap(in_page);
> > > + workspace->in_buf.src = kmap_local_page(in_page);
> > > workspace->in_buf.pos = 0;
> > > workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
> > >
> > > @@ -415,7 +415,7 @@ int zstd_compress_pages(struct list_head *ws,
> struct address_space *mapping,
> > > goto out;
> > > }
> > > pages[nr_pages++] = out_page;
> > > - workspace->out_buf.dst = kmap(out_page);
> > > + workspace->out_buf.dst = page_address(out_page);
> > > workspace->out_buf.pos = 0;
> > > workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
> > >
> > > @@ -450,9 +450,7 @@ int zstd_compress_pages(struct list_head *ws,
> struct address_space *mapping,
> > > if (workspace->out_buf.pos == workspace->out_buf.size)
> {
> > > tot_out += PAGE_SIZE;
> > > max_out -= PAGE_SIZE;
> > > - kunmap(out_page);
> > > if (nr_pages == nr_dest_pages) {
> > > - out_page = NULL;
> > > ret = -E2BIG;
> > > goto out;
> > > }
> > > @@ -462,7 +460,7 @@ int zstd_compress_pages(struct list_head *ws,
> struct address_space *mapping,
> > > goto out;
> > > }
> > > pages[nr_pages++] = out_page;
> > > - workspace->out_buf.dst = kmap(out_page);
> > > + workspace->out_buf.dst =
> page_address(out_page);
> > > workspace->out_buf.pos = 0;
> > > workspace->out_buf.size = min_t(size_t,
> max_out,
> > >
> PAGE_SIZE);
> > > @@ -477,15 +475,15 @@ int zstd_compress_pages(struct list_head *ws,
> struct address_space *mapping,
> > > /* Check if we need more input */
> > > if (workspace->in_buf.pos == workspace->in_buf.size) {
> > > tot_in += PAGE_SIZE;
> > > - kunmap(in_page);
> > > + kunmap_local(workspace->in_buf.src);
> > > put_page(in_page);
> > > -
> > > start += PAGE_SIZE;
> > > len -= PAGE_SIZE;
> > > in_page = find_get_page(mapping, start >>
> PAGE_SHIFT);
> > > - workspace->in_buf.src = kmap(in_page);
> > > + workspace->in_buf.src =
> kmap_local_page(in_page);
> > > workspace->in_buf.pos = 0;
> > > workspace->in_buf.size = min_t(size_t, len,
> PAGE_SIZE);
> > > + workspace->out_buf.dst =
> page_address(out_page);
> >
> > Why is this needed?
>
> Sorry. This initialization is not needed at all.
> Probably made a mistake with copy-pasting snippets of code.
>
> I'm going to send ASAP the fifth version of this series.

"fifth" -> "sixth".

>
> > The rest looks good,
>
> Thanks,
>
> Fabio
>
> > Ira
> >
> > [snip]
> >
>
>
>
>