2018-03-11 15:27:37

by Chuck Lever

[permalink] [raw]
Subject: [PATCH v2] SUNRPC: Initialize rpc_rqst outside of xprt->reserve_lock

alloc_slot is a transport-specific op, but initializing an rpc_rqst
is common to all transports. In addition, the only part of initial-
izing an rpc_rqst that needs serialization is getting a fresh XID.

Move rpc_rqst initialization to common code in preparation for
adding a transport-specific alloc_slot to xprtrdma.

Signed-off-by: Chuck Lever <[email protected]>
---
include/linux/sunrpc/xprt.h | 1 +
net/sunrpc/clnt.c | 1 +
net/sunrpc/xprt.c | 12 +++++++-----
3 files changed, 9 insertions(+), 5 deletions(-)

Changes since v1:
- Partial sends should not bump the XID

diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 5fea0fb..9784e28 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -324,6 +324,7 @@ struct xprt_class {
struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
void xprt_connect(struct rpc_task *task);
void xprt_reserve(struct rpc_task *task);
+void xprt_request_init(struct rpc_task *task);
void xprt_retry_reserve(struct rpc_task *task);
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 6e432ec..226f558 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1546,6 +1546,7 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
task->tk_status = 0;
if (status >= 0) {
if (task->tk_rqstp) {
+ xprt_request_init(task);
task->tk_action = call_refresh;
return;
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 70f0050..2d95926 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -66,7 +66,7 @@
* Local functions
*/
static void xprt_init(struct rpc_xprt *xprt, struct net *net);
-static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
+static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
static void xprt_connect_status(struct rpc_task *task);
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
@@ -987,6 +987,8 @@ bool xprt_prepare_transmit(struct rpc_task *task)
task->tk_status = -EAGAIN;
goto out_unlock;
}
+ if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
+ req->rq_xid = xprt_alloc_xid(xprt);
ret = true;
out_unlock:
spin_unlock_bh(&xprt->transport_lock);
@@ -1163,10 +1165,10 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
out_init_req:
xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
xprt->num_reqs);
+ spin_unlock(&xprt->reserve_lock);
+
task->tk_status = 0;
task->tk_rqstp = req;
- xprt_request_init(task, xprt);
- spin_unlock(&xprt->reserve_lock);
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);

@@ -1303,8 +1305,9 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt)
xprt->xid = prandom_u32();
}

-static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
+void xprt_request_init(struct rpc_task *task)
{
+ struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_rqst *req = task->tk_rqstp;

INIT_LIST_HEAD(&req->rq_list);
@@ -1312,7 +1315,6 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
req->rq_task = task;
req->rq_xprt = xprt;
req->rq_buffer = NULL;
- req->rq_xid = xprt_alloc_xid(xprt);
req->rq_connect_cookie = xprt->connect_cookie - 1;
req->rq_bytes_sent = 0;
req->rq_snd_buf.len = 0;



2018-03-12 00:41:26

by Tom Talpey

[permalink] [raw]
Subject: Re: [PATCH v2] SUNRPC: Initialize rpc_rqst outside of xprt->reserve_lock

> +static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);

Why is the type a be32? The XID is opaque, tested only for equality, and
no byte order is defined by the RPC protocol.

Methinks it should be u32.

Tom.

On 3/11/2018 8:27 AM, Chuck Lever wrote:
> alloc_slot is a transport-specific op, but initializing an rpc_rqst
> is common to all transports. In addition, the only part of initial-
> izing an rpc_rqst that needs serialization is getting a fresh XID.
>
> Move rpc_rqst initialization to common code in preparation for
> adding a transport-specific alloc_slot to xprtrdma.
>
> Signed-off-by: Chuck Lever <[email protected]>
> ---
> include/linux/sunrpc/xprt.h | 1 +
> net/sunrpc/clnt.c | 1 +
> net/sunrpc/xprt.c | 12 +++++++-----
> 3 files changed, 9 insertions(+), 5 deletions(-)
>
> Changes since v1:
> - Partial sends should not bump the XID
>
> diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
> index 5fea0fb..9784e28 100644
> --- a/include/linux/sunrpc/xprt.h
> +++ b/include/linux/sunrpc/xprt.h
> @@ -324,6 +324,7 @@ struct xprt_class {
> struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
> void xprt_connect(struct rpc_task *task);
> void xprt_reserve(struct rpc_task *task);
> +void xprt_request_init(struct rpc_task *task);
> void xprt_retry_reserve(struct rpc_task *task);
> int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
> int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
> diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
> index 6e432ec..226f558 100644
> --- a/net/sunrpc/clnt.c
> +++ b/net/sunrpc/clnt.c
> @@ -1546,6 +1546,7 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
> task->tk_status = 0;
> if (status >= 0) {
> if (task->tk_rqstp) {
> + xprt_request_init(task);
> task->tk_action = call_refresh;
> return;
> }
> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
> index 70f0050..2d95926 100644
> --- a/net/sunrpc/xprt.c
> +++ b/net/sunrpc/xprt.c
> @@ -66,7 +66,7 @@
> * Local functions
> */
> static void xprt_init(struct rpc_xprt *xprt, struct net *net);
> -static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
> +static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
> static void xprt_connect_status(struct rpc_task *task);
> static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
> static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
> @@ -987,6 +987,8 @@ bool xprt_prepare_transmit(struct rpc_task *task)
> task->tk_status = -EAGAIN;
> goto out_unlock;
> }
> + if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
> + req->rq_xid = xprt_alloc_xid(xprt);
> ret = true;
> out_unlock:
> spin_unlock_bh(&xprt->transport_lock);
> @@ -1163,10 +1165,10 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
> out_init_req:
> xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
> xprt->num_reqs);
> + spin_unlock(&xprt->reserve_lock);
> +
> task->tk_status = 0;
> task->tk_rqstp = req;
> - xprt_request_init(task, xprt);
> - spin_unlock(&xprt->reserve_lock);
> }
> EXPORT_SYMBOL_GPL(xprt_alloc_slot);
>
> @@ -1303,8 +1305,9 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt)
> xprt->xid = prandom_u32();
> }
>
> -static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
> +void xprt_request_init(struct rpc_task *task)
> {
> + struct rpc_xprt *xprt = task->tk_xprt;
> struct rpc_rqst *req = task->tk_rqstp;
>
> INIT_LIST_HEAD(&req->rq_list);
> @@ -1312,7 +1315,6 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
> req->rq_task = task;
> req->rq_xprt = xprt;
> req->rq_buffer = NULL;
> - req->rq_xid = xprt_alloc_xid(xprt);
> req->rq_connect_cookie = xprt->connect_cookie - 1;
> req->rq_bytes_sent = 0;
> req->rq_snd_buf.len = 0;
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
>

2018-03-12 10:57:01

by Jeff Layton

[permalink] [raw]
Subject: Re: [PATCH v2] SUNRPC: Initialize rpc_rqst outside of xprt->reserve_lock

It is opaque. But, it's helpful to treat it as if it has endianness, if
only to make logfiles, traces and such look consistent when (e.g.) the
client and server are different endianness.

-- Jeff

On Sun, 2018-03-11 at 17:41 -0700, Tom Talpey wrote:
> > +static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
>
> Why is the type a be32? The XID is opaque, tested only for equality, and
> no byte order is defined by the RPC protocol.
>
> Methinks it should be u32.
>
> Tom.
>
> On 3/11/2018 8:27 AM, Chuck Lever wrote:
> > alloc_slot is a transport-specific op, but initializing an rpc_rqst
> > is common to all transports. In addition, the only part of initial-
> > izing an rpc_rqst that needs serialization is getting a fresh XID.
> >
> > Move rpc_rqst initialization to common code in preparation for
> > adding a transport-specific alloc_slot to xprtrdma.
> >
> > Signed-off-by: Chuck Lever <[email protected]>
> > ---
> > include/linux/sunrpc/xprt.h | 1 +
> > net/sunrpc/clnt.c | 1 +
> > net/sunrpc/xprt.c | 12 +++++++-----
> > 3 files changed, 9 insertions(+), 5 deletions(-)
> >
> > Changes since v1:
> > - Partial sends should not bump the XID
> >
> > diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
> > index 5fea0fb..9784e28 100644
> > --- a/include/linux/sunrpc/xprt.h
> > +++ b/include/linux/sunrpc/xprt.h
> > @@ -324,6 +324,7 @@ struct xprt_class {
> > struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
> > void xprt_connect(struct rpc_task *task);
> > void xprt_reserve(struct rpc_task *task);
> > +void xprt_request_init(struct rpc_task *task);
> > void xprt_retry_reserve(struct rpc_task *task);
> > int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
> > int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
> > diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
> > index 6e432ec..226f558 100644
> > --- a/net/sunrpc/clnt.c
> > +++ b/net/sunrpc/clnt.c
> > @@ -1546,6 +1546,7 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
> > task->tk_status = 0;
> > if (status >= 0) {
> > if (task->tk_rqstp) {
> > + xprt_request_init(task);
> > task->tk_action = call_refresh;
> > return;
> > }
> > diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
> > index 70f0050..2d95926 100644
> > --- a/net/sunrpc/xprt.c
> > +++ b/net/sunrpc/xprt.c
> > @@ -66,7 +66,7 @@
> > * Local functions
> > */
> > static void xprt_init(struct rpc_xprt *xprt, struct net *net);
> > -static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
> > +static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
> > static void xprt_connect_status(struct rpc_task *task);
> > static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
> > static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
> > @@ -987,6 +987,8 @@ bool xprt_prepare_transmit(struct rpc_task *task)
> > task->tk_status = -EAGAIN;
> > goto out_unlock;
> > }
> > + if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
> > + req->rq_xid = xprt_alloc_xid(xprt);
> > ret = true;
> > out_unlock:
> > spin_unlock_bh(&xprt->transport_lock);
> > @@ -1163,10 +1165,10 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
> > out_init_req:
> > xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
> > xprt->num_reqs);
> > + spin_unlock(&xprt->reserve_lock);
> > +
> > task->tk_status = 0;
> > task->tk_rqstp = req;
> > - xprt_request_init(task, xprt);
> > - spin_unlock(&xprt->reserve_lock);
> > }
> > EXPORT_SYMBOL_GPL(xprt_alloc_slot);
> >
> > @@ -1303,8 +1305,9 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt)
> > xprt->xid = prandom_u32();
> > }
> >
> > -static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
> > +void xprt_request_init(struct rpc_task *task)
> > {
> > + struct rpc_xprt *xprt = task->tk_xprt;
> > struct rpc_rqst *req = task->tk_rqstp;
> >
> > INIT_LIST_HEAD(&req->rq_list);
> > @@ -1312,7 +1315,6 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
> > req->rq_task = task;
> > req->rq_xprt = xprt;
> > req->rq_buffer = NULL;
> > - req->rq_xid = xprt_alloc_xid(xprt);
> > req->rq_connect_cookie = xprt->connect_cookie - 1;
> > req->rq_bytes_sent = 0;
> > req->rq_snd_buf.len = 0;
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> > the body of a message to [email protected]
> > More majordomo info at http://vger.kernel.org/majordomo-info.html
> >
> >
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html

--
Jeff Layton <[email protected]>

2018-03-12 13:07:15

by Trond Myklebust

[permalink] [raw]
Subject: Re: [PATCH v2] SUNRPC: Initialize rpc_rqst outside of xprt->reserve_lock

T24gTW9uLCAyMDE4LTAzLTEyIGF0IDA2OjU2IC0wNDAwLCBKZWZmIExheXRvbiB3cm90ZToNCj4g
SXQgaXMgb3BhcXVlLiBCdXQsIGl0J3MgaGVscGZ1bCB0byB0cmVhdCBpdCBhcyBpZiBpdCBoYXMg
ZW5kaWFubmVzcywNCj4gaWYNCj4gb25seSB0byBtYWtlIGxvZ2ZpbGVzLCB0cmFjZXMgYW5kIHN1
Y2ggbG9vayBjb25zaXN0ZW50IHdoZW4gKGUuZy4pDQo+IHRoZQ0KPiBjbGllbnQgYW5kIHNlcnZl
ciBhcmUgZGlmZmVyZW50IGVuZGlhbm5lc3MuDQoNCkFncmVlZC4gVGhlIG90aGVyIHRoaW5nIHRv
IG5vdGUgaXMgdGhhdCBhbGwgbmF0aXZlIFhEUiBvYmplY3RzIGFyZQ0KYW5ub3RhdGVkIGFzIGJl
aW5nIGJpZyBlbmRpYW4gaW4gb3JkZXIgdG8gYWxsb3cgJ3NwYXJzZScgYW5kIG90aGVyDQpzdGF0
aWMgdHlwZSBjaGVja2VycyB0byBpZGVudGlmeSBkb2RneSBjYXN0cyBhbmQgZmFpbHVyZXMgdG8g
Y29udmVydA0KdGhlIGVuZGlhbm5lc3MuDQpTbyBieSBsYWJlbGxpbmcgdGhlIFhJRCBhcyBfX2Jl
MzIsIHdlJ3JlIGluIGVmZmVjdCB0ZWxsaW5nIHRob3NlDQpjaGVja2VycyB0aGF0IHdlIGRvIG5v
dCBuZWVkIHRvIGNvbnZlcnQgdGhpcyBvYmplY3Qgd2hlbiBYRFIgZW5jb2RpbmcNCml0Lg0KDQpD
aGVlcnMNCiAgVHJvbmQNCg0KPiANCj4gT24gU3VuLCAyMDE4LTAzLTExIGF0IDE3OjQxIC0wNzAw
LCBUb20gVGFscGV5IHdyb3RlOg0KPiA+ICA+ICtzdGF0aWMgX19iZTMyCXhwcnRfYWxsb2NfeGlk
KHN0cnVjdCBycGNfeHBydCAqeHBydCk7DQo+ID4gDQo+ID4gV2h5IGlzIHRoZSB0eXBlIGEgYmUz
Mj8gVGhlIFhJRCBpcyBvcGFxdWUsIHRlc3RlZCBvbmx5IGZvcg0KPiA+IGVxdWFsaXR5LCBhbmQN
Cj4gPiBubyBieXRlIG9yZGVyIGlzIGRlZmluZWQgYnkgdGhlIFJQQyBwcm90b2NvbC4NCj4gPiAN
Cj4gPiBNZXRoaW5rcyBpdCBzaG91bGQgYmUgdTMyLg0KPiA+IA0KPiA+IFRvbS4NCj4gPiANCj4g
PiBPbiAzLzExLzIwMTggODoyNyBBTSwgQ2h1Y2sgTGV2ZXIgd3JvdGU6DQo+ID4gPiBhbGxvY19z
bG90IGlzIGEgdHJhbnNwb3J0LXNwZWNpZmljIG9wLCBidXQgaW5pdGlhbGl6aW5nIGFuDQo+ID4g
PiBycGNfcnFzdA0KPiA+ID4gaXMgY29tbW9uIHRvIGFsbCB0cmFuc3BvcnRzLiBJbiBhZGRpdGlv
biwgdGhlIG9ubHkgcGFydCBvZg0KPiA+ID4gaW5pdGlhbC0NCj4gPiA+IGl6aW5nIGFuIHJwY19y
cXN0IHRoYXQgbmVlZHMgc2VyaWFsaXphdGlvbiBpcyBnZXR0aW5nIGEgZnJlc2gNCj4gPiA+IFhJ
RC4NCj4gPiA+IA0KPiA+ID4gTW92ZSBycGNfcnFzdCBpbml0aWFsaXphdGlvbiB0byBjb21tb24g
Y29kZSBpbiBwcmVwYXJhdGlvbiBmb3INCj4gPiA+IGFkZGluZyBhIHRyYW5zcG9ydC1zcGVjaWZp
YyBhbGxvY19zbG90IHRvIHhwcnRyZG1hLg0KPiA+ID4gDQo+ID4gPiBTaWduZWQtb2ZmLWJ5OiBD
aHVjayBMZXZlciA8Y2h1Y2subGV2ZXJAb3JhY2xlLmNvbT4NCj4gPiA+IC0tLQ0KPiA+ID4gICBp
bmNsdWRlL2xpbnV4L3N1bnJwYy94cHJ0LmggfCAgICAxICsNCj4gPiA+ICAgbmV0L3N1bnJwYy9j
bG50LmMgICAgICAgICAgIHwgICAgMSArDQo+ID4gPiAgIG5ldC9zdW5ycGMveHBydC5jICAgICAg
ICAgICB8ICAgMTIgKysrKysrKy0tLS0tDQo+ID4gPiAgIDMgZmlsZXMgY2hhbmdlZCwgOSBpbnNl
cnRpb25zKCspLCA1IGRlbGV0aW9ucygtKQ0KPiA+ID4gDQo+ID4gPiBDaGFuZ2VzIHNpbmNlIHYx
Og0KPiA+ID4gLSBQYXJ0aWFsIHNlbmRzIHNob3VsZCBub3QgYnVtcCB0aGUgWElEDQo+ID4gPiAN
Cj4gPiA+IGRpZmYgLS1naXQgYS9pbmNsdWRlL2xpbnV4L3N1bnJwYy94cHJ0LmgNCj4gPiA+IGIv
aW5jbHVkZS9saW51eC9zdW5ycGMveHBydC5oDQo+ID4gPiBpbmRleCA1ZmVhMGZiLi45Nzg0ZTI4
IDEwMDY0NA0KPiA+ID4gLS0tIGEvaW5jbHVkZS9saW51eC9zdW5ycGMveHBydC5oDQo+ID4gPiAr
KysgYi9pbmNsdWRlL2xpbnV4L3N1bnJwYy94cHJ0LmgNCj4gPiA+IEBAIC0zMjQsNiArMzI0LDcg
QEAgc3RydWN0IHhwcnRfY2xhc3Mgew0KPiA+ID4gICBzdHJ1Y3QgcnBjX3hwcnQJCSp4cHJ0X2Ny
ZWF0ZV90cmFuc3BvcnQoc3RydWN0DQo+ID4gPiB4cHJ0X2NyZWF0ZSAqYXJncyk7DQo+ID4gPiAg
IHZvaWQJCQl4cHJ0X2Nvbm5lY3Qoc3RydWN0IHJwY190YXNrDQo+ID4gPiAqdGFzayk7DQo+ID4g
PiAgIHZvaWQJCQl4cHJ0X3Jlc2VydmUoc3RydWN0IHJwY190YXNrDQo+ID4gPiAqdGFzayk7DQo+
ID4gPiArdm9pZAkJCXhwcnRfcmVxdWVzdF9pbml0KHN0cnVjdCBycGNfdGFzaw0KPiA+ID4gKnRh
c2spOw0KPiA+ID4gICB2b2lkCQkJeHBydF9yZXRyeV9yZXNlcnZlKHN0cnVjdCBycGNfdGFzaw0K
PiA+ID4gKnRhc2spOw0KPiA+ID4gICBpbnQJCQl4cHJ0X3Jlc2VydmVfeHBydChzdHJ1Y3QgcnBj
X3hwcnQNCj4gPiA+ICp4cHJ0LCBzdHJ1Y3QgcnBjX3Rhc2sgKnRhc2spOw0KPiA+ID4gICBpbnQJ
CQl4cHJ0X3Jlc2VydmVfeHBydF9jb25nKHN0cnVjdA0KPiA+ID4gcnBjX3hwcnQgKnhwcnQsIHN0
cnVjdCBycGNfdGFzayAqdGFzayk7DQo+ID4gPiBkaWZmIC0tZ2l0IGEvbmV0L3N1bnJwYy9jbG50
LmMgYi9uZXQvc3VucnBjL2NsbnQuYw0KPiA+ID4gaW5kZXggNmU0MzJlYy4uMjI2ZjU1OCAxMDA2
NDQNCj4gPiA+IC0tLSBhL25ldC9zdW5ycGMvY2xudC5jDQo+ID4gPiArKysgYi9uZXQvc3VucnBj
L2NsbnQuYw0KPiA+ID4gQEAgLTE1NDYsNiArMTU0Niw3IEBAIHZvaWQgcnBjX2ZvcmNlX3JlYmlu
ZChzdHJ1Y3QgcnBjX2NsbnQNCj4gPiA+ICpjbG50KQ0KPiA+ID4gICAJdGFzay0+dGtfc3RhdHVz
ID0gMDsNCj4gPiA+ICAgCWlmIChzdGF0dXMgPj0gMCkgew0KPiA+ID4gICAJCWlmICh0YXNrLT50
a19ycXN0cCkgew0KPiA+ID4gKwkJCXhwcnRfcmVxdWVzdF9pbml0KHRhc2spOw0KPiA+ID4gICAJ
CQl0YXNrLT50a19hY3Rpb24gPSBjYWxsX3JlZnJlc2g7DQo+ID4gPiAgIAkJCXJldHVybjsNCj4g
PiA+ICAgCQl9DQo+ID4gPiBkaWZmIC0tZ2l0IGEvbmV0L3N1bnJwYy94cHJ0LmMgYi9uZXQvc3Vu
cnBjL3hwcnQuYw0KPiA+ID4gaW5kZXggNzBmMDA1MC4uMmQ5NTkyNiAxMDA2NDQNCj4gPiA+IC0t
LSBhL25ldC9zdW5ycGMveHBydC5jDQo+ID4gPiArKysgYi9uZXQvc3VucnBjL3hwcnQuYw0KPiA+
ID4gQEAgLTY2LDcgKzY2LDcgQEANCj4gPiA+ICAgICogTG9jYWwgZnVuY3Rpb25zDQo+ID4gPiAg
ICAqLw0KPiA+ID4gICBzdGF0aWMgdm9pZAkgeHBydF9pbml0KHN0cnVjdCBycGNfeHBydCAqeHBy
dCwgc3RydWN0IG5ldA0KPiA+ID4gKm5ldCk7DQo+ID4gPiAtc3RhdGljIHZvaWQJeHBydF9yZXF1
ZXN0X2luaXQoc3RydWN0IHJwY190YXNrICosIHN0cnVjdA0KPiA+ID4gcnBjX3hwcnQgKik7DQo+
ID4gPiArc3RhdGljIF9fYmUzMgl4cHJ0X2FsbG9jX3hpZChzdHJ1Y3QgcnBjX3hwcnQgKnhwcnQp
Ow0KPiA+ID4gICBzdGF0aWMgdm9pZAl4cHJ0X2Nvbm5lY3Rfc3RhdHVzKHN0cnVjdCBycGNfdGFz
ayAqdGFzayk7DQo+ID4gPiAgIHN0YXRpYyBpbnQgICAgICBfX3hwcnRfZ2V0X2Nvbmcoc3RydWN0
IHJwY194cHJ0ICosIHN0cnVjdA0KPiA+ID4gcnBjX3Rhc2sgKik7DQo+ID4gPiAgIHN0YXRpYyB2
b2lkICAgICBfX3hwcnRfcHV0X2Nvbmcoc3RydWN0IHJwY194cHJ0ICosIHN0cnVjdA0KPiA+ID4g
cnBjX3Jxc3QgKik7DQo+ID4gPiBAQCAtOTg3LDYgKzk4Nyw4IEBAIGJvb2wgeHBydF9wcmVwYXJl
X3RyYW5zbWl0KHN0cnVjdCBycGNfdGFzaw0KPiA+ID4gKnRhc2spDQo+ID4gPiAgIAkJdGFzay0+
dGtfc3RhdHVzID0gLUVBR0FJTjsNCj4gPiA+ICAgCQlnb3RvIG91dF91bmxvY2s7DQo+ID4gPiAg
IAl9DQo+ID4gPiArCWlmICghYmNfcHJlYWxsb2MocmVxKSAmJiAhcmVxLT5ycV94bWl0X2J5dGVz
X3NlbnQpDQo+ID4gPiArCQlyZXEtPnJxX3hpZCA9IHhwcnRfYWxsb2NfeGlkKHhwcnQpOw0KPiA+
ID4gICAJcmV0ID0gdHJ1ZTsNCj4gPiA+ICAgb3V0X3VubG9jazoNCj4gPiA+ICAgCXNwaW5fdW5s
b2NrX2JoKCZ4cHJ0LT50cmFuc3BvcnRfbG9jayk7DQo+ID4gPiBAQCAtMTE2MywxMCArMTE2NSwx
MCBAQCB2b2lkIHhwcnRfYWxsb2Nfc2xvdChzdHJ1Y3QgcnBjX3hwcnQNCj4gPiA+ICp4cHJ0LCBz
dHJ1Y3QgcnBjX3Rhc2sgKnRhc2spDQo+ID4gPiAgIG91dF9pbml0X3JlcToNCj4gPiA+ICAgCXhw
cnQtPnN0YXQubWF4X3Nsb3RzID0gbWF4X3QodW5zaWduZWQgaW50LCB4cHJ0LQ0KPiA+ID4gPnN0
YXQubWF4X3Nsb3RzLA0KPiA+ID4gICAJCQkJICAgICB4cHJ0LT5udW1fcmVxcyk7DQo+ID4gPiAr
CXNwaW5fdW5sb2NrKCZ4cHJ0LT5yZXNlcnZlX2xvY2spOw0KPiA+ID4gKw0KPiA+ID4gICAJdGFz
ay0+dGtfc3RhdHVzID0gMDsNCj4gPiA+ICAgCXRhc2stPnRrX3Jxc3RwID0gcmVxOw0KPiA+ID4g
LQl4cHJ0X3JlcXVlc3RfaW5pdCh0YXNrLCB4cHJ0KTsNCj4gPiA+IC0Jc3Bpbl91bmxvY2soJnhw
cnQtPnJlc2VydmVfbG9jayk7DQo+ID4gPiAgIH0NCj4gPiA+ICAgRVhQT1JUX1NZTUJPTF9HUEwo
eHBydF9hbGxvY19zbG90KTsNCj4gPiA+ICAgDQo+ID4gPiBAQCAtMTMwMyw4ICsxMzA1LDkgQEAg
c3RhdGljIGlubGluZSB2b2lkIHhwcnRfaW5pdF94aWQoc3RydWN0DQo+ID4gPiBycGNfeHBydCAq
eHBydCkNCj4gPiA+ICAgCXhwcnQtPnhpZCA9IHByYW5kb21fdTMyKCk7DQo+ID4gPiAgIH0NCj4g
PiA+ICAgDQo+ID4gPiAtc3RhdGljIHZvaWQgeHBydF9yZXF1ZXN0X2luaXQoc3RydWN0IHJwY190
YXNrICp0YXNrLCBzdHJ1Y3QNCj4gPiA+IHJwY194cHJ0ICp4cHJ0KQ0KPiA+ID4gK3ZvaWQgeHBy
dF9yZXF1ZXN0X2luaXQoc3RydWN0IHJwY190YXNrICp0YXNrKQ0KPiA+ID4gICB7DQo+ID4gPiAr
CXN0cnVjdCBycGNfeHBydCAqeHBydCA9IHRhc2stPnRrX3hwcnQ7DQo+ID4gPiAgIAlzdHJ1Y3Qg
cnBjX3Jxc3QJKnJlcSA9IHRhc2stPnRrX3Jxc3RwOw0KPiA+ID4gICANCj4gPiA+ICAgCUlOSVRf
TElTVF9IRUFEKCZyZXEtPnJxX2xpc3QpOw0KPiA+ID4gQEAgLTEzMTIsNyArMTMxNSw2IEBAIHN0
YXRpYyB2b2lkIHhwcnRfcmVxdWVzdF9pbml0KHN0cnVjdA0KPiA+ID4gcnBjX3Rhc2sgKnRhc2ss
IHN0cnVjdCBycGNfeHBydCAqeHBydCkNCj4gPiA+ICAgCXJlcS0+cnFfdGFzawk9IHRhc2s7DQo+
ID4gPiAgIAlyZXEtPnJxX3hwcnQgICAgPSB4cHJ0Ow0KPiA+ID4gICAJcmVxLT5ycV9idWZmZXIg
ID0gTlVMTDsNCj4gPiA+IC0JcmVxLT5ycV94aWQgICAgID0geHBydF9hbGxvY194aWQoeHBydCk7
DQo+ID4gPiAgIAlyZXEtPnJxX2Nvbm5lY3RfY29va2llID0geHBydC0+Y29ubmVjdF9jb29raWUg
LSAxOw0KPiA+ID4gICAJcmVxLT5ycV9ieXRlc19zZW50ID0gMDsNCj4gPiA+ICAgCXJlcS0+cnFf
c25kX2J1Zi5sZW4gPSAwOw0KPiA+ID4gDQo+ID4gPiAtLQ0KPiA+ID4gVG8gdW5zdWJzY3JpYmUg
ZnJvbSB0aGlzIGxpc3Q6IHNlbmQgdGhlIGxpbmUgInVuc3Vic2NyaWJlIGxpbnV4LQ0KPiA+ID4g
bmZzIiBpbg0KPiA+ID4gdGhlIGJvZHkgb2YgYSBtZXNzYWdlIHRvIG1ham9yZG9tb0B2Z2VyLmtl
cm5lbC5vcmcNCj4gPiA+IE1vcmUgbWFqb3Jkb21vIGluZm8gYXQgIGh0dHA6Ly92Z2VyLmtlcm5l
bC5vcmcvbWFqb3Jkb21vLWluZm8uaHRtDQo+ID4gPiBsDQo+ID4gPiANCj4gPiA+IA0KPiA+IA0K
PiA+IC0tDQo+ID4gVG8gdW5zdWJzY3JpYmUgZnJvbSB0aGlzIGxpc3Q6IHNlbmQgdGhlIGxpbmUg
InVuc3Vic2NyaWJlIGxpbnV4LQ0KPiA+IG5mcyIgaW4NCj4gPiB0aGUgYm9keSBvZiBhIG1lc3Nh
Z2UgdG8gbWFqb3Jkb21vQHZnZXIua2VybmVsLm9yZw0KPiA+IE1vcmUgbWFqb3Jkb21vIGluZm8g
YXQgIGh0dHA6Ly92Z2VyLmtlcm5lbC5vcmcvbWFqb3Jkb21vLWluZm8uaHRtbA0KPiANCj4gDQot
LSANClRyb25kIE15a2xlYnVzdA0KTGludXggTkZTIGNsaWVudCBtYWludGFpbmVyLCBQcmltYXJ5
RGF0YQ0KdHJvbmQubXlrbGVidXN0QHByaW1hcnlkYXRhLmNvbQ0K


2018-03-12 14:30:27

by Chuck Lever

[permalink] [raw]
Subject: Re: [PATCH v2] SUNRPC: Initialize rpc_rqst outside of xprt->reserve_lock

Hi Tom-

Thanks for taking a look!


> On Mar 11, 2018, at 8:41 PM, Tom Talpey <[email protected]> wrote:
>=20
> > +static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
>=20
> Why is the type a be32? The XID is opaque, tested only for equality, =
and
> no byte order is defined by the RPC protocol.
>=20
> Methinks it should be u32.

Others have already commented, but I'll throw in my 2 pfennigs worth.

Here, I'm adding a forward declaration for xprt_alloc_xid. The
existing function definition already returns __be32. So the patch
isn't actually altering the function's return type. The forward
declaration added here reflects the existing function definition.

The Linux client places this opaque on the wire without XDR-encoding
it, because, as you observed, this data item is indeed opaque to the
RPC server. No encoding is needed since the remote system does not
interpret these bits.

Therefore what comes out of this function can be considered already
XDR-encoded. XDR is essentially big-endian, and __be32 is how we
denote "already XDR-encoded" data in the Linux kernel, to assist our
static checkers (ie, sparse).

Compare, for example, the constant NFS status values that are handled
by the NFS server. Rather than handling native values and XDR-encoding
them every time, since they are constant they are pre-encoded constant
32-bit integers, already encoded, and therefore their type is __be32.


> Tom.
>=20
> On 3/11/2018 8:27 AM, Chuck Lever wrote:
>> alloc_slot is a transport-specific op, but initializing an rpc_rqst
>> is common to all transports. In addition, the only part of initial-
>> izing an rpc_rqst that needs serialization is getting a fresh XID.
>> Move rpc_rqst initialization to common code in preparation for
>> adding a transport-specific alloc_slot to xprtrdma.
>> Signed-off-by: Chuck Lever <[email protected]>
>> ---
>> include/linux/sunrpc/xprt.h | 1 +
>> net/sunrpc/clnt.c | 1 +
>> net/sunrpc/xprt.c | 12 +++++++-----
>> 3 files changed, 9 insertions(+), 5 deletions(-)
>> Changes since v1:
>> - Partial sends should not bump the XID
>> diff --git a/include/linux/sunrpc/xprt.h =
b/include/linux/sunrpc/xprt.h
>> index 5fea0fb..9784e28 100644
>> --- a/include/linux/sunrpc/xprt.h
>> +++ b/include/linux/sunrpc/xprt.h
>> @@ -324,6 +324,7 @@ struct xprt_class {
>> struct rpc_xprt *xprt_create_transport(struct =
xprt_create *args);
>> void xprt_connect(struct rpc_task *task);
>> void xprt_reserve(struct rpc_task *task);
>> +void xprt_request_init(struct rpc_task =
*task);
>> void xprt_retry_reserve(struct rpc_task =
*task);
>> int xprt_reserve_xprt(struct rpc_xprt *xprt, struct =
rpc_task *task);
>> int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, =
struct rpc_task *task);
>> diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
>> index 6e432ec..226f558 100644
>> --- a/net/sunrpc/clnt.c
>> +++ b/net/sunrpc/clnt.c
>> @@ -1546,6 +1546,7 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
>> task->tk_status =3D 0;
>> if (status >=3D 0) {
>> if (task->tk_rqstp) {
>> + xprt_request_init(task);
>> task->tk_action =3D call_refresh;
>> return;
>> }
>> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
>> index 70f0050..2d95926 100644
>> --- a/net/sunrpc/xprt.c
>> +++ b/net/sunrpc/xprt.c
>> @@ -66,7 +66,7 @@
>> * Local functions
>> */
>> static void xprt_init(struct rpc_xprt *xprt, struct net *net);
>> -static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
>> +static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
>> static void xprt_connect_status(struct rpc_task *task);
>> static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task =
*);
>> static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst =
*);
>> @@ -987,6 +987,8 @@ bool xprt_prepare_transmit(struct rpc_task *task)
>> task->tk_status =3D -EAGAIN;
>> goto out_unlock;
>> }
>> + if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
>> + req->rq_xid =3D xprt_alloc_xid(xprt);
>> ret =3D true;
>> out_unlock:
>> spin_unlock_bh(&xprt->transport_lock);
>> @@ -1163,10 +1165,10 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, =
struct rpc_task *task)
>> out_init_req:
>> xprt->stat.max_slots =3D max_t(unsigned int, =
xprt->stat.max_slots,
>> xprt->num_reqs);
>> + spin_unlock(&xprt->reserve_lock);
>> +
>> task->tk_status =3D 0;
>> task->tk_rqstp =3D req;
>> - xprt_request_init(task, xprt);
>> - spin_unlock(&xprt->reserve_lock);
>> }
>> EXPORT_SYMBOL_GPL(xprt_alloc_slot);
>> @@ -1303,8 +1305,9 @@ static inline void xprt_init_xid(struct =
rpc_xprt *xprt)
>> xprt->xid =3D prandom_u32();
>> }
>> -static void xprt_request_init(struct rpc_task *task, struct =
rpc_xprt *xprt)
>> +void xprt_request_init(struct rpc_task *task)
>> {
>> + struct rpc_xprt *xprt =3D task->tk_xprt;
>> struct rpc_rqst *req =3D task->tk_rqstp;
>> INIT_LIST_HEAD(&req->rq_list);
>> @@ -1312,7 +1315,6 @@ static void xprt_request_init(struct rpc_task =
*task, struct rpc_xprt *xprt)
>> req->rq_task =3D task;
>> req->rq_xprt =3D xprt;
>> req->rq_buffer =3D NULL;
>> - req->rq_xid =3D xprt_alloc_xid(xprt);
>> req->rq_connect_cookie =3D xprt->connect_cookie - 1;
>> req->rq_bytes_sent =3D 0;
>> req->rq_snd_buf.len =3D 0;
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-nfs" =
in
>> the body of a message to [email protected]
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" =
in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html

--
Chuck Lever




2018-04-03 20:12:22

by Anna Schumaker

[permalink] [raw]
Subject: Re: [PATCH v2] SUNRPC: Initialize rpc_rqst outside of xprt->reserve_lock

Hi Chuck,

On 03/11/2018 11:27 AM, Chuck Lever wrote:
> alloc_slot is a transport-specific op, but initializing an rpc_rqst
> is common to all transports. In addition, the only part of initial-
> izing an rpc_rqst that needs serialization is getting a fresh XID.
>
> Move rpc_rqst initialization to common code in preparation for
> adding a transport-specific alloc_slot to xprtrdma.

I'm having trouble with this patch again. This time I'm running xfstests generic/074 with NFS v4.0, and my client starts seeing "nfs: server 192.168.100.215 not responding, still trying" after about a minute. Any thoughts about what's going on here? I keep trying to capture it with Wireshark, but there are enough packets on the wire to crash Wireshark.

Thoughts?
Anna

>
> Signed-off-by: Chuck Lever <[email protected]>
> ---
> include/linux/sunrpc/xprt.h | 1 +
> net/sunrpc/clnt.c | 1 +
> net/sunrpc/xprt.c | 12 +++++++-----
> 3 files changed, 9 insertions(+), 5 deletions(-)
>
> Changes since v1:
> - Partial sends should not bump the XID
>
> diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
> index 5fea0fb..9784e28 100644
> --- a/include/linux/sunrpc/xprt.h
> +++ b/include/linux/sunrpc/xprt.h
> @@ -324,6 +324,7 @@ struct xprt_class {
> struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
> void xprt_connect(struct rpc_task *task);
> void xprt_reserve(struct rpc_task *task);
> +void xprt_request_init(struct rpc_task *task);
> void xprt_retry_reserve(struct rpc_task *task);
> int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
> int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
> diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
> index 6e432ec..226f558 100644
> --- a/net/sunrpc/clnt.c
> +++ b/net/sunrpc/clnt.c
> @@ -1546,6 +1546,7 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
> task->tk_status = 0;
> if (status >= 0) {
> if (task->tk_rqstp) {
> + xprt_request_init(task);
> task->tk_action = call_refresh;
> return;
> }
> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
> index 70f0050..2d95926 100644
> --- a/net/sunrpc/xprt.c
> +++ b/net/sunrpc/xprt.c
> @@ -66,7 +66,7 @@
> * Local functions
> */
> static void xprt_init(struct rpc_xprt *xprt, struct net *net);
> -static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
> +static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
> static void xprt_connect_status(struct rpc_task *task);
> static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
> static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
> @@ -987,6 +987,8 @@ bool xprt_prepare_transmit(struct rpc_task *task)
> task->tk_status = -EAGAIN;
> goto out_unlock;
> }
> + if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
> + req->rq_xid = xprt_alloc_xid(xprt);
> ret = true;
> out_unlock:
> spin_unlock_bh(&xprt->transport_lock);
> @@ -1163,10 +1165,10 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
> out_init_req:
> xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
> xprt->num_reqs);
> + spin_unlock(&xprt->reserve_lock);
> +
> task->tk_status = 0;
> task->tk_rqstp = req;
> - xprt_request_init(task, xprt);
> - spin_unlock(&xprt->reserve_lock);
> }
> EXPORT_SYMBOL_GPL(xprt_alloc_slot);
>
> @@ -1303,8 +1305,9 @@ static inline void xprt_init_xid(struct rpc_xprt *xprt)
> xprt->xid = prandom_u32();
> }
>
> -static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
> +void xprt_request_init(struct rpc_task *task)
> {
> + struct rpc_xprt *xprt = task->tk_xprt;
> struct rpc_rqst *req = task->tk_rqstp;
>
> INIT_LIST_HEAD(&req->rq_list);
> @@ -1312,7 +1315,6 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
> req->rq_task = task;
> req->rq_xprt = xprt;
> req->rq_buffer = NULL;
> - req->rq_xid = xprt_alloc_xid(xprt);
> req->rq_connect_cookie = xprt->connect_cookie - 1;
> req->rq_bytes_sent = 0;
> req->rq_snd_buf.len = 0;
>

2018-04-03 20:38:43

by Chuck Lever

[permalink] [raw]
Subject: Re: [PATCH v2] SUNRPC: Initialize rpc_rqst outside of xprt->reserve_lock



> On Apr 3, 2018, at 4:12 PM, Anna Schumaker <[email protected]> =
wrote:
>=20
> Hi Chuck,
>=20
> On 03/11/2018 11:27 AM, Chuck Lever wrote:
>> alloc_slot is a transport-specific op, but initializing an rpc_rqst
>> is common to all transports. In addition, the only part of initial-
>> izing an rpc_rqst that needs serialization is getting a fresh XID.
>>=20
>> Move rpc_rqst initialization to common code in preparation for
>> adding a transport-specific alloc_slot to xprtrdma.
>=20
> I'm having trouble with this patch again. This time I'm running =
xfstests generic/074 with NFS v4.0, and my client starts seeing "nfs: =
server 192.168.100.215 not responding, still trying" after about a =
minute. Any thoughts about what's going on here?

Random thoughts:

"nfs: server not responding" is a very generic failure. One
minute is the TCP timeout setting. NFSv4.0 clients don't
typically retransmit unless the server drops the connection.

What is your client, server, and network configuration?

Can you reproduce with NFSv3 or NFSv4.1? I assume you are
using TCP for this test.

Any other relevant messages in the client's or server's /v/l/m?


> I keep trying to capture it with Wireshark, but there are enough =
packets on the wire to crash Wireshark.

Use tcpdump instead, and capture into a tmpfs.


> Thoughts?
> Anna
>=20
>>=20
>> Signed-off-by: Chuck Lever <[email protected]>
>> ---
>> include/linux/sunrpc/xprt.h | 1 +
>> net/sunrpc/clnt.c | 1 +
>> net/sunrpc/xprt.c | 12 +++++++-----
>> 3 files changed, 9 insertions(+), 5 deletions(-)
>>=20
>> Changes since v1:
>> - Partial sends should not bump the XID
>>=20
>> diff --git a/include/linux/sunrpc/xprt.h =
b/include/linux/sunrpc/xprt.h
>> index 5fea0fb..9784e28 100644
>> --- a/include/linux/sunrpc/xprt.h
>> +++ b/include/linux/sunrpc/xprt.h
>> @@ -324,6 +324,7 @@ struct xprt_class {
>> struct rpc_xprt *xprt_create_transport(struct =
xprt_create *args);
>> void xprt_connect(struct rpc_task *task);
>> void xprt_reserve(struct rpc_task *task);
>> +void xprt_request_init(struct rpc_task =
*task);
>> void xprt_retry_reserve(struct rpc_task *task);
>> int xprt_reserve_xprt(struct rpc_xprt *xprt, struct =
rpc_task *task);
>> int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, =
struct rpc_task *task);
>> diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
>> index 6e432ec..226f558 100644
>> --- a/net/sunrpc/clnt.c
>> +++ b/net/sunrpc/clnt.c
>> @@ -1546,6 +1546,7 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
>> task->tk_status =3D 0;
>> if (status >=3D 0) {
>> if (task->tk_rqstp) {
>> + xprt_request_init(task);
>> task->tk_action =3D call_refresh;
>> return;
>> }
>> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
>> index 70f0050..2d95926 100644
>> --- a/net/sunrpc/xprt.c
>> +++ b/net/sunrpc/xprt.c
>> @@ -66,7 +66,7 @@
>> * Local functions
>> */
>> static void xprt_init(struct rpc_xprt *xprt, struct net *net);
>> -static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
>> +static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
>> static void xprt_connect_status(struct rpc_task *task);
>> static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task =
*);
>> static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst =
*);
>> @@ -987,6 +987,8 @@ bool xprt_prepare_transmit(struct rpc_task *task)
>> task->tk_status =3D -EAGAIN;
>> goto out_unlock;
>> }
>> + if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
>> + req->rq_xid =3D xprt_alloc_xid(xprt);
>> ret =3D true;
>> out_unlock:
>> spin_unlock_bh(&xprt->transport_lock);
>> @@ -1163,10 +1165,10 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, =
struct rpc_task *task)
>> out_init_req:
>> xprt->stat.max_slots =3D max_t(unsigned int, =
xprt->stat.max_slots,
>> xprt->num_reqs);
>> + spin_unlock(&xprt->reserve_lock);
>> +
>> task->tk_status =3D 0;
>> task->tk_rqstp =3D req;
>> - xprt_request_init(task, xprt);
>> - spin_unlock(&xprt->reserve_lock);
>> }
>> EXPORT_SYMBOL_GPL(xprt_alloc_slot);
>>=20
>> @@ -1303,8 +1305,9 @@ static inline void xprt_init_xid(struct =
rpc_xprt *xprt)
>> xprt->xid =3D prandom_u32();
>> }
>>=20
>> -static void xprt_request_init(struct rpc_task *task, struct rpc_xprt =
*xprt)
>> +void xprt_request_init(struct rpc_task *task)
>> {
>> + struct rpc_xprt *xprt =3D task->tk_xprt;
>> struct rpc_rqst *req =3D task->tk_rqstp;
>>=20
>> INIT_LIST_HEAD(&req->rq_list);
>> @@ -1312,7 +1315,6 @@ static void xprt_request_init(struct rpc_task =
*task, struct rpc_xprt *xprt)
>> req->rq_task =3D task;
>> req->rq_xprt =3D xprt;
>> req->rq_buffer =3D NULL;
>> - req->rq_xid =3D xprt_alloc_xid(xprt);
>> req->rq_connect_cookie =3D xprt->connect_cookie - 1;
>> req->rq_bytes_sent =3D 0;
>> req->rq_snd_buf.len =3D 0;
>>=20

--
Chuck Lever




2018-04-03 20:49:03

by Chuck Lever

[permalink] [raw]
Subject: Re: [PATCH v2] SUNRPC: Initialize rpc_rqst outside of xprt->reserve_lock



> On Apr 3, 2018, at 4:38 PM, Chuck Lever <[email protected]> =
wrote:
>=20
>=20
>=20
>> On Apr 3, 2018, at 4:12 PM, Anna Schumaker =
<[email protected]> wrote:
>>=20
>> Hi Chuck,
>>=20
>> On 03/11/2018 11:27 AM, Chuck Lever wrote:
>>> alloc_slot is a transport-specific op, but initializing an rpc_rqst
>>> is common to all transports. In addition, the only part of initial-
>>> izing an rpc_rqst that needs serialization is getting a fresh XID.
>>>=20
>>> Move rpc_rqst initialization to common code in preparation for
>>> adding a transport-specific alloc_slot to xprtrdma.
>>=20
>> I'm having trouble with this patch again. This time I'm running =
xfstests generic/074 with NFS v4.0, and my client starts seeing "nfs: =
server 192.168.100.215 not responding, still trying" after about a =
minute. Any thoughts about what's going on here?
>=20
> Random thoughts:
>=20
> "nfs: server not responding" is a very generic failure. One
> minute is the TCP timeout setting. NFSv4.0 clients don't
> typically retransmit unless the server drops the connection.

Another thought:

The problem before was that the client didn't recognize an XID
returned by the server because the client had overwritten the
rqst's rq_xid field. It should be straightforward to have
xprt_lookup_rqst complain about an unrecognized XID, to confirm
that this is happening again. Or there might even be a counter
somewhere that you can view with a tool like mountstats.


> What is your client, server, and network configuration?
>=20
> Can you reproduce with NFSv3 or NFSv4.1? I assume you are
> using TCP for this test.
>=20
> Any other relevant messages in the client's or server's /v/l/m?
>=20
>=20
>> I keep trying to capture it with Wireshark, but there are enough =
packets on the wire to crash Wireshark.
>=20
> Use tcpdump instead, and capture into a tmpfs.
>=20
>=20
>> Thoughts?
>> Anna
>>=20
>>>=20
>>> Signed-off-by: Chuck Lever <[email protected]>
>>> ---
>>> include/linux/sunrpc/xprt.h | 1 +
>>> net/sunrpc/clnt.c | 1 +
>>> net/sunrpc/xprt.c | 12 +++++++-----
>>> 3 files changed, 9 insertions(+), 5 deletions(-)
>>>=20
>>> Changes since v1:
>>> - Partial sends should not bump the XID
>>>=20
>>> diff --git a/include/linux/sunrpc/xprt.h =
b/include/linux/sunrpc/xprt.h
>>> index 5fea0fb..9784e28 100644
>>> --- a/include/linux/sunrpc/xprt.h
>>> +++ b/include/linux/sunrpc/xprt.h
>>> @@ -324,6 +324,7 @@ struct xprt_class {
>>> struct rpc_xprt *xprt_create_transport(struct =
xprt_create *args);
>>> void xprt_connect(struct rpc_task *task);
>>> void xprt_reserve(struct rpc_task *task);
>>> +void xprt_request_init(struct rpc_task =
*task);
>>> void xprt_retry_reserve(struct rpc_task =
*task);
>>> int xprt_reserve_xprt(struct rpc_xprt *xprt, struct =
rpc_task *task);
>>> int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, =
struct rpc_task *task);
>>> diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
>>> index 6e432ec..226f558 100644
>>> --- a/net/sunrpc/clnt.c
>>> +++ b/net/sunrpc/clnt.c
>>> @@ -1546,6 +1546,7 @@ void rpc_force_rebind(struct rpc_clnt *clnt)
>>> task->tk_status =3D 0;
>>> if (status >=3D 0) {
>>> if (task->tk_rqstp) {
>>> + xprt_request_init(task);
>>> task->tk_action =3D call_refresh;
>>> return;
>>> }
>>> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
>>> index 70f0050..2d95926 100644
>>> --- a/net/sunrpc/xprt.c
>>> +++ b/net/sunrpc/xprt.c
>>> @@ -66,7 +66,7 @@
>>> * Local functions
>>> */
>>> static void xprt_init(struct rpc_xprt *xprt, struct net *net);
>>> -static void xprt_request_init(struct rpc_task *, struct =
rpc_xprt *);
>>> +static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
>>> static void xprt_connect_status(struct rpc_task *task);
>>> static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task =
*);
>>> static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst =
*);
>>> @@ -987,6 +987,8 @@ bool xprt_prepare_transmit(struct rpc_task =
*task)
>>> task->tk_status =3D -EAGAIN;
>>> goto out_unlock;
>>> }
>>> + if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
>>> + req->rq_xid =3D xprt_alloc_xid(xprt);
>>> ret =3D true;
>>> out_unlock:
>>> spin_unlock_bh(&xprt->transport_lock);
>>> @@ -1163,10 +1165,10 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, =
struct rpc_task *task)
>>> out_init_req:
>>> xprt->stat.max_slots =3D max_t(unsigned int, =
xprt->stat.max_slots,
>>> xprt->num_reqs);
>>> + spin_unlock(&xprt->reserve_lock);
>>> +
>>> task->tk_status =3D 0;
>>> task->tk_rqstp =3D req;
>>> - xprt_request_init(task, xprt);
>>> - spin_unlock(&xprt->reserve_lock);
>>> }
>>> EXPORT_SYMBOL_GPL(xprt_alloc_slot);
>>>=20
>>> @@ -1303,8 +1305,9 @@ static inline void xprt_init_xid(struct =
rpc_xprt *xprt)
>>> xprt->xid =3D prandom_u32();
>>> }
>>>=20
>>> -static void xprt_request_init(struct rpc_task *task, struct =
rpc_xprt *xprt)
>>> +void xprt_request_init(struct rpc_task *task)
>>> {
>>> + struct rpc_xprt *xprt =3D task->tk_xprt;
>>> struct rpc_rqst *req =3D task->tk_rqstp;
>>>=20
>>> INIT_LIST_HEAD(&req->rq_list);
>>> @@ -1312,7 +1315,6 @@ static void xprt_request_init(struct rpc_task =
*task, struct rpc_xprt *xprt)
>>> req->rq_task =3D task;
>>> req->rq_xprt =3D xprt;
>>> req->rq_buffer =3D NULL;
>>> - req->rq_xid =3D xprt_alloc_xid(xprt);
>>> req->rq_connect_cookie =3D xprt->connect_cookie - 1;
>>> req->rq_bytes_sent =3D 0;
>>> req->rq_snd_buf.len =3D 0;
>>>=20
>=20
> --
> Chuck Lever
>=20
>=20
>=20
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" =
in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html

--
Chuck Lever