From: Trond Myklebust <[email protected]>
Use a counter to keep track of how many requests are queued behind the
xprt->xpt_mutex, and keep TCP_CORK set until the queue is empty.
Signed-off-by: Trond Myklebust <[email protected]>
---
include/linux/sunrpc/svcsock.h | 2 ++
net/sunrpc/svcsock.c | 8 +++++++-
2 files changed, 9 insertions(+), 1 deletion(-)
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index b7ac7fe68306..bcc555c7ae9c 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -35,6 +35,8 @@ struct svc_sock {
/* Total length of the data (not including fragment headers)
* received so far in the fragments making up this rpc: */
u32 sk_datalen;
+ /* Number of queued send requests */
+ atomic_t sk_sendqlen;
struct page * sk_pages[RPCSVC_MAXPAGES]; /* received data */
};
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 5a809c64dc7b..231f510a4830 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1171,18 +1171,23 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
svc_tcp_release_rqst(rqstp);
+ atomic_inc(&svsk->sk_sendqlen);
mutex_lock(&xprt->xpt_mutex);
if (svc_xprt_is_dead(xprt))
goto out_notconn;
+ tcp_sock_set_cork(svsk->sk_sk, true);
err = svc_tcp_sendmsg(svsk->sk_sock, &msg, xdr, marker, &sent);
xdr_free_bvec(xdr);
trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
if (err < 0 || sent != (xdr->len + sizeof(marker)))
goto out_close;
+ if (atomic_dec_and_test(&svsk->sk_sendqlen))
+ tcp_sock_set_cork(svsk->sk_sk, false);
mutex_unlock(&xprt->xpt_mutex);
return sent;
out_notconn:
+ atomic_dec(&svsk->sk_sendqlen);
mutex_unlock(&xprt->xpt_mutex);
return -ENOTCONN;
out_close:
@@ -1192,6 +1197,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
(err < 0) ? err : sent, xdr->len);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
svc_xprt_enqueue(xprt);
+ atomic_dec(&svsk->sk_sendqlen);
mutex_unlock(&xprt->xpt_mutex);
return -EAGAIN;
}
@@ -1261,7 +1267,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
svsk->sk_datalen = 0;
memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
- tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
+ tcp_sock_set_nodelay(sk);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
switch (sk->sk_state) {
--
2.29.2
From: Trond Myklebust <[email protected]>
Now that the caller controls the TCP_CORK socket option, it is redundant
to set MSG_MORE and MSG_SENDPAGE_NOTLAST in the calls to
kernel_sendpage().
Signed-off-by: Trond Myklebust <[email protected]>
---
net/sunrpc/svcsock.c | 12 +++---------
1 file changed, 3 insertions(+), 9 deletions(-)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 231f510a4830..8c19732e425d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1088,12 +1088,11 @@ static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
.iov_base = &marker,
.iov_len = sizeof(marker),
};
- int flags, ret;
+ int ret;
*sentp = 0;
xdr_alloc_bvec(xdr, GFP_KERNEL);
- msg->msg_flags = MSG_MORE;
ret = kernel_sendmsg(sock, msg, &rm, 1, rm.iov_len);
if (ret < 0)
return ret;
@@ -1101,8 +1100,7 @@ static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
if (ret != rm.iov_len)
return -EAGAIN;
- flags = head->iov_len < xdr->len ? MSG_MORE | MSG_SENDPAGE_NOTLAST : 0;
- ret = svc_tcp_send_kvec(sock, head, flags);
+ ret = svc_tcp_send_kvec(sock, head, 0);
if (ret < 0)
return ret;
*sentp += ret;
@@ -1116,15 +1114,11 @@ static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT);
offset = offset_in_page(xdr->page_base);
remaining = xdr->page_len;
- flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
while (remaining > 0) {
- if (remaining <= PAGE_SIZE && tail->iov_len == 0)
- flags = 0;
-
len = min(remaining, bvec->bv_len - offset);
ret = kernel_sendpage(sock, bvec->bv_page,
bvec->bv_offset + offset,
- len, flags);
+ len, 0);
if (ret < 0)
return ret;
*sentp += ret;
--
2.29.2