Because we clear XPRT_SOCK_DATA_READY before reading, we can end up
with a situation where new data arrives, causing xs_data_ready() to
queue up a second receive worker job for the same socket, which then
immediately gets stuck waiting on the transport receive mutex.
The fix is to only clear XPRT_SOCK_DATA_READY once we're done reading,
and then to use poll() to check if we might need to queue up a new
job in order to deal with any new data.
Signed-off-by: Trond Myklebust <[email protected]>
---
net/sunrpc/xprtsock.c | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index f5d7dcd9e8d9..a721c843d5d3 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -656,13 +656,25 @@ xs_read_stream(struct sock_xprt *transport, int flags)
return ret != 0 ? ret : -ESHUTDOWN;
}
+static void xs_poll_check_readable(struct sock_xprt *transport)
+{
+ struct socket *sock = transport->sock;
+ __poll_t events;
+
+ clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
+ events = sock->ops->poll(NULL, sock, NULL);
+ if (!(events & (EPOLLIN | EPOLLRDNORM)) || events & EPOLLRDHUP)
+ return;
+ if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
+ queue_work(xprtiod_workqueue, &transport->recv_worker);
+}
+
static void xs_stream_data_receive(struct sock_xprt *transport)
{
size_t read = 0;
ssize_t ret = 0;
mutex_lock(&transport->recv_mutex);
- clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
if (transport->sock == NULL)
goto out;
for (;;) {
@@ -672,6 +684,7 @@ static void xs_stream_data_receive(struct sock_xprt *transport)
read += ret;
cond_resched();
}
+ xs_poll_check_readable(transport);
out:
mutex_unlock(&transport->recv_mutex);
trace_xs_stream_read_data(&transport->xprt, ret, read);
@@ -1362,7 +1375,6 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
int err;
mutex_lock(&transport->recv_mutex);
- clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
sk = transport->inet;
if (sk == NULL)
goto out;
@@ -1374,6 +1386,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
consume_skb(skb);
cond_resched();
}
+ xs_poll_check_readable(transport);
out:
mutex_unlock(&transport->recv_mutex);
}
--
2.20.1
Hi Trond-
> On Feb 19, 2019, at 9:06 AM, Trond Myklebust <[email protected]> wrote:
>
> Because we clear XPRT_SOCK_DATA_READY before reading, we can end up
> with a situation where new data arrives, causing xs_data_ready() to
> queue up a second receive worker job for the same socket, which then
> immediately gets stuck waiting on the transport receive mutex.
> The fix is to only clear XPRT_SOCK_DATA_READY once we're done reading,
> and then to use poll() to check if we might need to queue up a new
> job in order to deal with any new data.
Does this fix an application-visible hang, or is it merely a performance
optimization?
> Signed-off-by: Trond Myklebust <[email protected]>
> ---
> net/sunrpc/xprtsock.c | 17 +++++++++++++++--
> 1 file changed, 15 insertions(+), 2 deletions(-)
>
> diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
> index f5d7dcd9e8d9..a721c843d5d3 100644
> --- a/net/sunrpc/xprtsock.c
> +++ b/net/sunrpc/xprtsock.c
> @@ -656,13 +656,25 @@ xs_read_stream(struct sock_xprt *transport, int flags)
> return ret != 0 ? ret : -ESHUTDOWN;
> }
>
> +static void xs_poll_check_readable(struct sock_xprt *transport)
> +{
> + struct socket *sock = transport->sock;
> + __poll_t events;
> +
> + clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
> + events = sock->ops->poll(NULL, sock, NULL);
> + if (!(events & (EPOLLIN | EPOLLRDNORM)) || events & EPOLLRDHUP)
> + return;
> + if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
> + queue_work(xprtiod_workqueue, &transport->recv_worker);
> +}
> +
> static void xs_stream_data_receive(struct sock_xprt *transport)
> {
> size_t read = 0;
> ssize_t ret = 0;
>
> mutex_lock(&transport->recv_mutex);
> - clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
> if (transport->sock == NULL)
> goto out;
> for (;;) {
> @@ -672,6 +684,7 @@ static void xs_stream_data_receive(struct sock_xprt *transport)
> read += ret;
> cond_resched();
> }
> + xs_poll_check_readable(transport);
> out:
> mutex_unlock(&transport->recv_mutex);
> trace_xs_stream_read_data(&transport->xprt, ret, read);
> @@ -1362,7 +1375,6 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
> int err;
>
> mutex_lock(&transport->recv_mutex);
> - clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
> sk = transport->inet;
> if (sk == NULL)
> goto out;
> @@ -1374,6 +1386,7 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
> consume_skb(skb);
> cond_resched();
> }
> + xs_poll_check_readable(transport);
> out:
> mutex_unlock(&transport->recv_mutex);
> }
> --
> 2.20.1
>
--
Chuck Lever
On Tue, 2019-02-19 at 09:54 -0500, Chuck Lever wrote:
> Hi Trond-
>
> > On Feb 19, 2019, at 9:06 AM, Trond Myklebust <[email protected]>
> > wrote:
> >
> > Because we clear XPRT_SOCK_DATA_READY before reading, we can end up
> > with a situation where new data arrives, causing xs_data_ready() to
> > queue up a second receive worker job for the same socket, which
> > then
> > immediately gets stuck waiting on the transport receive mutex.
> > The fix is to only clear XPRT_SOCK_DATA_READY once we're done
> > reading,
> > and then to use poll() to check if we might need to queue up a new
> > job in order to deal with any new data.
>
> Does this fix an application-visible hang, or is it merely a
> performance
> optimization?
I'm not aware of any hang associated with this behaviour. The patch is
rather intended as an optimisation to avoid having these threads block
uselessly on a mutex.
--
Trond Myklebust
Linux NFS client maintainer, Hammerspace
[email protected]
> On Feb 19, 2019, at 10:13 AM, Trond Myklebust <[email protected]> wrote:
>
> On Tue, 2019-02-19 at 09:54 -0500, Chuck Lever wrote:
>> Hi Trond-
>>
>>> On Feb 19, 2019, at 9:06 AM, Trond Myklebust <[email protected]>
>>> wrote:
>>>
>>> Because we clear XPRT_SOCK_DATA_READY before reading, we can end up
>>> with a situation where new data arrives, causing xs_data_ready() to
>>> queue up a second receive worker job for the same socket, which
>>> then
>>> immediately gets stuck waiting on the transport receive mutex.
>>> The fix is to only clear XPRT_SOCK_DATA_READY once we're done
>>> reading,
>>> and then to use poll() to check if we might need to queue up a new
>>> job in order to deal with any new data.
>>
>> Does this fix an application-visible hang, or is it merely a
>> performance
>> optimization?
>
> I'm not aware of any hang associated with this behaviour. The patch is
> rather intended as an optimisation to avoid having these threads block
> uselessly on a mutex.
That was my guess, thanks, just wanted to make certain.
--
Chuck Lever