If a task failed to get the write lock in the call to xprt_connect(), then
it will be queued on xprt->sending. In that case, it is possible for it
to get transmitted before the call to call_connect_status(), in which
case it needs to be handled by call_transmit_status() instead.
Signed-off-by: Trond Myklebust <[email protected]>
---
net/sunrpc/clnt.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index ae3b8145da35..e35d642558e7 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1915,6 +1915,13 @@ call_connect_status(struct rpc_task *task)
struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status;
+ /* Check if the task was already transmitted */
+ if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
+ xprt_end_transmit(task);
+ task->tk_action = call_transmit_status;
+ return;
+ }
+
dprint_status(task);
trace_rpc_connect_status(task);
--
2.19.2
From: Chuck Lever <[email protected]>
call_encode can be invoked more than once per RPC call. Ensure that
each call to gss_wrap_req_priv does not overwrite pointers to
previously allocated memory.
Signed-off-by: Chuck Lever <[email protected]>
Cc: [email protected]
Signed-off-by: Trond Myklebust <[email protected]>
---
net/sunrpc/auth_gss/auth_gss.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 5d3f252659f1..ba765473d1f0 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1791,6 +1791,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp)
for (i=0; i < rqstp->rq_enc_pages_num; i++)
__free_page(rqstp->rq_enc_pages[i]);
kfree(rqstp->rq_enc_pages);
+ rqstp->rq_release_snd_buf = NULL;
}
static int
@@ -1799,6 +1800,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
int first, last, i;
+ if (rqstp->rq_release_snd_buf)
+ rqstp->rq_release_snd_buf(rqstp);
+
if (snd_buf->page_len == 0) {
rqstp->rq_enc_pages_num = 0;
return 0;
--
2.19.2
A call to gss_wrap_req_priv() will end up replacing the original array
in rqstp->rq_snd_buf.pages with a new one containing the encrypted
data. In order to avoid having the rqstp->rq_snd_buf.bvec point to the
wrong page data, we need to refresh that too.
Signed-off-by: Trond Myklebust <[email protected]>
---
include/linux/sunrpc/xdr.h | 1 -
net/sunrpc/xprt.c | 2 ++
net/sunrpc/xprtsock.c | 1 +
3 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 43106ffa6788..2ec128060239 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
buf->head[0].iov_base = start;
buf->head[0].iov_len = len;
buf->tail[0].iov_len = 0;
- buf->bvec = NULL;
buf->pages = NULL;
buf->page_len = 0;
buf->flags = 0;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 86bea4520c4d..122c91c28e7c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1623,6 +1623,8 @@ xprt_request_init(struct rpc_task *task)
req->rq_snd_buf.buflen = 0;
req->rq_rcv_buf.len = 0;
req->rq_rcv_buf.buflen = 0;
+ req->rq_snd_buf.bvec = NULL;
+ req->rq_rcv_buf.bvec = NULL;
req->rq_release_snd_buf = NULL;
xprt_reset_majortimeo(req);
dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ae77c71c1f64..615ef2397fc5 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -843,6 +843,7 @@ static int xs_nospace(struct rpc_rqst *req)
static void
xs_stream_prepare_request(struct rpc_rqst *req)
{
+ xdr_free_bvec(&req->rq_rcv_buf);
req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_NOIO);
}
--
2.19.2
> On Nov 30, 2018, at 4:55 PM, Trond Myklebust <[email protected]> wrote:
>
> A call to gss_wrap_req_priv() will end up replacing the original array
> in rqstp->rq_snd_buf.pages with a new one containing the encrypted
> data. In order to avoid having the rqstp->rq_snd_buf.bvec point to the
> wrong page data, we need to refresh that too.
I would add a note here that this patch fixes a memory leak. And
you might want to add a Fixes: tag.
I'm trying it out now.
> Signed-off-by: Trond Myklebust <[email protected]>
> ---
> include/linux/sunrpc/xdr.h | 1 -
> net/sunrpc/xprt.c | 2 ++
> net/sunrpc/xprtsock.c | 1 +
> 3 files changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
> index 43106ffa6788..2ec128060239 100644
> --- a/include/linux/sunrpc/xdr.h
> +++ b/include/linux/sunrpc/xdr.h
> @@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
> buf->head[0].iov_base = start;
> buf->head[0].iov_len = len;
> buf->tail[0].iov_len = 0;
> - buf->bvec = NULL;
> buf->pages = NULL;
> buf->page_len = 0;
> buf->flags = 0;
> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
> index 86bea4520c4d..122c91c28e7c 100644
> --- a/net/sunrpc/xprt.c
> +++ b/net/sunrpc/xprt.c
> @@ -1623,6 +1623,8 @@ xprt_request_init(struct rpc_task *task)
> req->rq_snd_buf.buflen = 0;
> req->rq_rcv_buf.len = 0;
> req->rq_rcv_buf.buflen = 0;
> + req->rq_snd_buf.bvec = NULL;
> + req->rq_rcv_buf.bvec = NULL;
> req->rq_release_snd_buf = NULL;
> xprt_reset_majortimeo(req);
> dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
> diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
> index ae77c71c1f64..615ef2397fc5 100644
> --- a/net/sunrpc/xprtsock.c
> +++ b/net/sunrpc/xprtsock.c
> @@ -843,6 +843,7 @@ static int xs_nospace(struct rpc_rqst *req)
> static void
> xs_stream_prepare_request(struct rpc_rqst *req)
> {
> + xdr_free_bvec(&req->rq_rcv_buf);
> req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_NOIO);
> }
>
> --
> 2.19.2
>
--
Chuck Lever
On Fri, 2018-11-30 at 17:15 -0500, Chuck Lever wrote:
> > On Nov 30, 2018, at 4:55 PM, Trond Myklebust <[email protected]>
> > wrote:
> >
> > A call to gss_wrap_req_priv() will end up replacing the original
> > array
> > in rqstp->rq_snd_buf.pages with a new one containing the encrypted
> > data. In order to avoid having the rqstp->rq_snd_buf.bvec point to
> > the
> > wrong page data, we need to refresh that too.
>
> I would add a note here that this patch fixes a memory leak. And
> you might want to add a Fixes: tag.
>
It only applies to new code that went into 4.20, so it won't need any
stable backporting.
That said, I'm realising (slowly - apparently I'm asleep today) that
this is receive side code, so
a) The contents of rq_snd_buf are irrelevant.
b) We want to beware of changing it while there is a copy in
rq_private_buf.
IOW: a v4 is forthcoming.
> I'm trying it out now.
>
>
> > Signed-off-by: Trond Myklebust <[email protected]>
> > ---
> > include/linux/sunrpc/xdr.h | 1 -
> > net/sunrpc/xprt.c | 2 ++
> > net/sunrpc/xprtsock.c | 1 +
> > 3 files changed, 3 insertions(+), 1 deletion(-)
> >
> > diff --git a/include/linux/sunrpc/xdr.h
> > b/include/linux/sunrpc/xdr.h
> > index 43106ffa6788..2ec128060239 100644
> > --- a/include/linux/sunrpc/xdr.h
> > +++ b/include/linux/sunrpc/xdr.h
> > @@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start,
> > size_t len)
> > buf->head[0].iov_base = start;
> > buf->head[0].iov_len = len;
> > buf->tail[0].iov_len = 0;
> > - buf->bvec = NULL;
> > buf->pages = NULL;
> > buf->page_len = 0;
> > buf->flags = 0;
> > diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
> > index 86bea4520c4d..122c91c28e7c 100644
> > --- a/net/sunrpc/xprt.c
> > +++ b/net/sunrpc/xprt.c
> > @@ -1623,6 +1623,8 @@ xprt_request_init(struct rpc_task *task)
> > req->rq_snd_buf.buflen = 0;
> > req->rq_rcv_buf.len = 0;
> > req->rq_rcv_buf.buflen = 0;
> > + req->rq_snd_buf.bvec = NULL;
> > + req->rq_rcv_buf.bvec = NULL;
> > req->rq_release_snd_buf = NULL;
> > xprt_reset_majortimeo(req);
> > dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
> > diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
> > index ae77c71c1f64..615ef2397fc5 100644
> > --- a/net/sunrpc/xprtsock.c
> > +++ b/net/sunrpc/xprtsock.c
> > @@ -843,6 +843,7 @@ static int xs_nospace(struct rpc_rqst *req)
> > static void
> > xs_stream_prepare_request(struct rpc_rqst *req)
> > {
> > + xdr_free_bvec(&req->rq_rcv_buf);
> > req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf,
> > GFP_NOIO);
> > }
> >
> > --
> > 2.19.2
> >
>
> --
> Chuck Lever
>
>
>
--
Trond Myklebust
Linux NFS client maintainer, Hammerspace
[email protected]
> On Nov 30, 2018, at 5:18 PM, Trond Myklebust <[email protected]> wrote:
>
> On Fri, 2018-11-30 at 17:15 -0500, Chuck Lever wrote:
>>> On Nov 30, 2018, at 4:55 PM, Trond Myklebust <[email protected]>
>>> wrote:
>>>
>>> A call to gss_wrap_req_priv() will end up replacing the original
>>> array
>>> in rqstp->rq_snd_buf.pages with a new one containing the encrypted
>>> data. In order to avoid having the rqstp->rq_snd_buf.bvec point to
>>> the
>>> wrong page data, we need to refresh that too.
>>
>> I would add a note here that this patch fixes a memory leak. And
>> you might want to add a Fixes: tag.
>>
>
> It only applies to new code that went into 4.20, so it won't need any
> stable backporting.
>
> That said, I'm realising (slowly - apparently I'm asleep today) that
> this is receive side code, so
>
> a) The contents of rq_snd_buf are irrelevant.
> b) We want to beware of changing it while there is a copy in
> rq_private_buf.
>
> IOW: a v4 is forthcoming.
fwiw, i see the soft IRQ warnings with NFS/RDMA too.
I would like to turn those into a pr_warn_rate_limited. I don't
see much point in the backtrace blather.
>> I'm trying it out now.
>>
>>
>>> Signed-off-by: Trond Myklebust <[email protected]>
>>> ---
>>> include/linux/sunrpc/xdr.h | 1 -
>>> net/sunrpc/xprt.c | 2 ++
>>> net/sunrpc/xprtsock.c | 1 +
>>> 3 files changed, 3 insertions(+), 1 deletion(-)
>>>
>>> diff --git a/include/linux/sunrpc/xdr.h
>>> b/include/linux/sunrpc/xdr.h
>>> index 43106ffa6788..2ec128060239 100644
>>> --- a/include/linux/sunrpc/xdr.h
>>> +++ b/include/linux/sunrpc/xdr.h
>>> @@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start,
>>> size_t len)
>>> buf->head[0].iov_base = start;
>>> buf->head[0].iov_len = len;
>>> buf->tail[0].iov_len = 0;
>>> - buf->bvec = NULL;
>>> buf->pages = NULL;
>>> buf->page_len = 0;
>>> buf->flags = 0;
>>> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
>>> index 86bea4520c4d..122c91c28e7c 100644
>>> --- a/net/sunrpc/xprt.c
>>> +++ b/net/sunrpc/xprt.c
>>> @@ -1623,6 +1623,8 @@ xprt_request_init(struct rpc_task *task)
>>> req->rq_snd_buf.buflen = 0;
>>> req->rq_rcv_buf.len = 0;
>>> req->rq_rcv_buf.buflen = 0;
>>> + req->rq_snd_buf.bvec = NULL;
>>> + req->rq_rcv_buf.bvec = NULL;
>>> req->rq_release_snd_buf = NULL;
>>> xprt_reset_majortimeo(req);
>>> dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
>>> diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
>>> index ae77c71c1f64..615ef2397fc5 100644
>>> --- a/net/sunrpc/xprtsock.c
>>> +++ b/net/sunrpc/xprtsock.c
>>> @@ -843,6 +843,7 @@ static int xs_nospace(struct rpc_rqst *req)
>>> static void
>>> xs_stream_prepare_request(struct rpc_rqst *req)
>>> {
>>> + xdr_free_bvec(&req->rq_rcv_buf);
>>> req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf,
>>> GFP_NOIO);
>>> }
>>>
>>> --
>>> 2.19.2
>>>
>>
>> --
>> Chuck Lever
>>
>>
>>
> --
> Trond Myklebust
> Linux NFS client maintainer, Hammerspace
> [email protected]
--
Chuck Lever
On Fri, 2018-11-30 at 17:23 -0500, Chuck Lever wrote:
> > On Nov 30, 2018, at 5:18 PM, Trond Myklebust <
> > [email protected]> wrote:
> >
> > On Fri, 2018-11-30 at 17:15 -0500, Chuck Lever wrote:
> > > > On Nov 30, 2018, at 4:55 PM, Trond Myklebust <[email protected]
> > > > >
> > > > wrote:
> > > >
> > > > A call to gss_wrap_req_priv() will end up replacing the
> > > > original
> > > > array
> > > > in rqstp->rq_snd_buf.pages with a new one containing the
> > > > encrypted
> > > > data. In order to avoid having the rqstp->rq_snd_buf.bvec point
> > > > to
> > > > the
> > > > wrong page data, we need to refresh that too.
> > >
> > > I would add a note here that this patch fixes a memory leak. And
> > > you might want to add a Fixes: tag.
> > >
> >
> > It only applies to new code that went into 4.20, so it won't need
> > any
> > stable backporting.
> >
> > That said, I'm realising (slowly - apparently I'm asleep today)
> > that
> > this is receive side code, so
> >
> > a) The contents of rq_snd_buf are irrelevant.
> > b) We want to beware of changing it while there is a copy in
> > rq_private_buf.
> >
> > IOW: a v4 is forthcoming.
>
> fwiw, i see the soft IRQ warnings with NFS/RDMA too.
>
> I would like to turn those into a pr_warn_rate_limited. I don't
> see much point in the backtrace blather.
>
Your initial email mentioned these soft IRQ warnings, but didn't
provide an example. Which warnings are these exactly, and where are
they coming from?
--
--
Trond Myklebust
Linux NFS client maintainer, Hammerspace
[email protected]
> On Nov 30, 2018, at 5:30 PM, Trond Myklebust <[email protected]> wrote:
>
> On Fri, 2018-11-30 at 17:23 -0500, Chuck Lever wrote:
>>> On Nov 30, 2018, at 5:18 PM, Trond Myklebust <
>>> [email protected]> wrote:
>>>
>>> On Fri, 2018-11-30 at 17:15 -0500, Chuck Lever wrote:
>>>>> On Nov 30, 2018, at 4:55 PM, Trond Myklebust <[email protected]
>>>>>>
>>>>> wrote:
>>>>>
>>>>> A call to gss_wrap_req_priv() will end up replacing the
>>>>> original
>>>>> array
>>>>> in rqstp->rq_snd_buf.pages with a new one containing the
>>>>> encrypted
>>>>> data. In order to avoid having the rqstp->rq_snd_buf.bvec point
>>>>> to
>>>>> the
>>>>> wrong page data, we need to refresh that too.
>>>>
>>>> I would add a note here that this patch fixes a memory leak. And
>>>> you might want to add a Fixes: tag.
>>>>
>>>
>>> It only applies to new code that went into 4.20, so it won't need
>>> any
>>> stable backporting.
>>>
>>> That said, I'm realising (slowly - apparently I'm asleep today)
>>> that
>>> this is receive side code, so
>>>
>>> a) The contents of rq_snd_buf are irrelevant.
>>> b) We want to beware of changing it while there is a copy in
>>> rq_private_buf.
>>>
>>> IOW: a v4 is forthcoming.
>>
>> fwiw, i see the soft IRQ warnings with NFS/RDMA too.
>>
>> I would like to turn those into a pr_warn_rate_limited. I don't
>> see much point in the backtrace blather.
>>
>
> Your initial email mentioned these soft IRQ warnings, but didn't
> provide an example. Which warnings are these exactly, and where are
> they coming from?
The WARN_ON in call_decode that fires when rq_rcv_buf does not
exactly match rq_private_buf.
--
Chuck Lever
On Fri, 2018-11-30 at 17:31 -0500, Chuck Lever wrote:
> > On Nov 30, 2018, at 5:30 PM, Trond Myklebust <
> > [email protected]> wrote:
> >
> > On Fri, 2018-11-30 at 17:23 -0500, Chuck Lever wrote:
> > > > On Nov 30, 2018, at 5:18 PM, Trond Myklebust <
> > > > [email protected]> wrote:
> > > >
> > > > On Fri, 2018-11-30 at 17:15 -0500, Chuck Lever wrote:
> > > > > > On Nov 30, 2018, at 4:55 PM, Trond Myklebust <
> > > > > > [email protected]
> > > > > > wrote:
> > > > > >
> > > > > > A call to gss_wrap_req_priv() will end up replacing the
> > > > > > original
> > > > > > array
> > > > > > in rqstp->rq_snd_buf.pages with a new one containing the
> > > > > > encrypted
> > > > > > data. In order to avoid having the rqstp->rq_snd_buf.bvec
> > > > > > point
> > > > > > to
> > > > > > the
> > > > > > wrong page data, we need to refresh that too.
> > > > >
> > > > > I would add a note here that this patch fixes a memory leak.
> > > > > And
> > > > > you might want to add a Fixes: tag.
> > > > >
> > > >
> > > > It only applies to new code that went into 4.20, so it won't
> > > > need
> > > > any
> > > > stable backporting.
> > > >
> > > > That said, I'm realising (slowly - apparently I'm asleep today)
> > > > that
> > > > this is receive side code, so
> > > >
> > > > a) The contents of rq_snd_buf are irrelevant.
> > > > b) We want to beware of changing it while there is a copy in
> > > > rq_private_buf.
> > > >
> > > > IOW: a v4 is forthcoming.
> > >
> > > fwiw, i see the soft IRQ warnings with NFS/RDMA too.
> > >
> > > I would like to turn those into a pr_warn_rate_limited. I don't
> > > see much point in the backtrace blather.
> > >
> >
> > Your initial email mentioned these soft IRQ warnings, but didn't
> > provide an example. Which warnings are these exactly, and where are
> > they coming from?
>
> The WARN_ON in call_decode that fires when rq_rcv_buf does not
> exactly match rq_private_buf.
>
Oh that warning... That might actually be triggering on the bvec value
clobber.
--
Trond Myklebust
Linux NFS client maintainer, Hammerspace
[email protected]