Subject: [PATCH v2] sunrpc: clean-up cache downcall

We can simplify code around cache_downcall unifying memory
allocations using kvmalloc. This has the benefit of getting rid of
cache_slow_downcall (and queue_io_mutex), and also matches userland
allocation size and limits

Signed-off-by: Roberto Bergantinos Corpas <[email protected]>
---
net/sunrpc/cache.c | 41 +++++++++++------------------------------
1 file changed, 11 insertions(+), 30 deletions(-)

diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 20c93b68505e..1a2c1c44bb00 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -778,7 +778,6 @@ void cache_clean_deferred(void *owner)
*/

static DEFINE_SPINLOCK(queue_lock);
-static DEFINE_MUTEX(queue_io_mutex);

struct cache_queue {
struct list_head list;
@@ -906,44 +905,26 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
return ret;
}

-static ssize_t cache_slow_downcall(const char __user *buf,
- size_t count, struct cache_detail *cd)
-{
- static char write_buf[32768]; /* protected by queue_io_mutex */
- ssize_t ret = -EINVAL;
-
- if (count >= sizeof(write_buf))
- goto out;
- mutex_lock(&queue_io_mutex);
- ret = cache_do_downcall(write_buf, buf, count, cd);
- mutex_unlock(&queue_io_mutex);
-out:
- return ret;
-}
-
static ssize_t cache_downcall(struct address_space *mapping,
const char __user *buf,
size_t count, struct cache_detail *cd)
{
- struct page *page;
- char *kaddr;
+ char *write_buf;
ssize_t ret = -ENOMEM;

- if (count >= PAGE_SIZE)
- goto out_slow;
+ if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
+ ret = -EINVAL;
+ goto out;
+ }

- page = find_or_create_page(mapping, 0, GFP_KERNEL);
- if (!page)
- goto out_slow;
+ write_buf = kvmalloc(count + 1, GFP_KERNEL);
+ if (!write_buf)
+ goto out;

- kaddr = kmap(page);
- ret = cache_do_downcall(kaddr, buf, count, cd);
- kunmap(page);
- unlock_page(page);
- put_page(page);
+ ret = cache_do_downcall(write_buf, buf, count, cd);
+ kvfree(write_buf);
+out:
return ret;
-out_slow:
- return cache_slow_downcall(buf, count, cd);
}

static ssize_t cache_write(struct file *filp, const char __user *buf,
--
2.21.0


2020-11-27 22:44:03

by Chuck Lever III

[permalink] [raw]
Subject: Re: [PATCH v2] sunrpc: clean-up cache downcall


> On Nov 27, 2020, at 1:38 PM, Roberto Bergantinos Corpas <[email protected]> wrote:
>
> We can simplify code around cache_downcall unifying memory
> allocations using kvmalloc. This has the benefit of getting rid of
> cache_slow_downcall (and queue_io_mutex), and also matches userland
> allocation size and limits
>
> Signed-off-by: Roberto Bergantinos Corpas <[email protected]>


Thanks, Roberto.

I see from linux-nfs history that Bruce explicitly asked for
this change, so applied for the next merge window. See the
cel-next topic branch in:

git://git.linux-nfs.org/projects/cel/cel-2.6.git

or

https://git.linux-nfs.org/?p=cel/cel-2.6.git;a=summary


> ---
> net/sunrpc/cache.c | 41 +++++++++++------------------------------
> 1 file changed, 11 insertions(+), 30 deletions(-)
>
> diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
> index 20c93b68505e..1a2c1c44bb00 100644
> --- a/net/sunrpc/cache.c
> +++ b/net/sunrpc/cache.c
> @@ -778,7 +778,6 @@ void cache_clean_deferred(void *owner)
> */
>
> static DEFINE_SPINLOCK(queue_lock);
> -static DEFINE_MUTEX(queue_io_mutex);
>
> struct cache_queue {
> struct list_head list;
> @@ -906,44 +905,26 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
> return ret;
> }
>
> -static ssize_t cache_slow_downcall(const char __user *buf,
> - size_t count, struct cache_detail *cd)
> -{
> - static char write_buf[32768]; /* protected by queue_io_mutex */
> - ssize_t ret = -EINVAL;
> -
> - if (count >= sizeof(write_buf))
> - goto out;
> - mutex_lock(&queue_io_mutex);
> - ret = cache_do_downcall(write_buf, buf, count, cd);
> - mutex_unlock(&queue_io_mutex);
> -out:
> - return ret;
> -}
> -
> static ssize_t cache_downcall(struct address_space *mapping,
> const char __user *buf,
> size_t count, struct cache_detail *cd)
> {
> - struct page *page;
> - char *kaddr;
> + char *write_buf;
> ssize_t ret = -ENOMEM;
>
> - if (count >= PAGE_SIZE)
> - goto out_slow;
> + if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */
> + ret = -EINVAL;
> + goto out;
> + }
>
> - page = find_or_create_page(mapping, 0, GFP_KERNEL);
> - if (!page)
> - goto out_slow;
> + write_buf = kvmalloc(count + 1, GFP_KERNEL);
> + if (!write_buf)
> + goto out;
>
> - kaddr = kmap(page);
> - ret = cache_do_downcall(kaddr, buf, count, cd);
> - kunmap(page);
> - unlock_page(page);
> - put_page(page);
> + ret = cache_do_downcall(write_buf, buf, count, cd);
> + kvfree(write_buf);
> +out:
> return ret;
> -out_slow:
> - return cache_slow_downcall(buf, count, cd);
> }
>
> static ssize_t cache_write(struct file *filp, const char __user *buf,
> --
> 2.21.0
>

--
Chuck Lever