From: Andy Adamson <[email protected]>
Mimic the client and prepare to share the back channel xdr with NFSv4.1.
Bump the number of operations in each encode routine, then backfill the
number of operations.
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
Signed-off-by: Ricardo Labiaga <[email protected]>
---
fs/nfsd/nfs4callback.c | 24 ++++++++++++++++--------
1 files changed, 16 insertions(+), 8 deletions(-)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index f4fab69..353eb4a 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -140,8 +140,9 @@ struct nfs4_cb_compound_hdr {
int status;
u32 ident;
u32 nops;
+ __be32 *nops_p;
u32 taglen;
- char * tag;
+ char *tag;
};
static struct {
@@ -201,7 +202,7 @@ nfs_cb_stat_to_errno(int stat)
* XDR encode
*/
-static int
+static void
encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
{
__be32 * p;
@@ -210,12 +211,18 @@ encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
WRITE32(0); /* tag length is always 0 */
WRITE32(NFS4_MINOR_VERSION);
WRITE32(hdr->ident);
+ hdr->nops_p = p;
WRITE32(hdr->nops);
- return 0;
}
-static int
-encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp)
+static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
+{
+ *hdr->nops_p = htonl(hdr->nops);
+}
+
+static void
+encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
+ struct nfs4_cb_compound_hdr *hdr)
{
__be32 *p;
int len = dp->dl_fh.fh_size;
@@ -227,7 +234,7 @@ encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp)
WRITE32(0); /* truncate optimization not implemented */
WRITE32(len);
WRITEMEM(&dp->dl_fh.fh_base, len);
- return 0;
+ hdr->nops++;
}
static int
@@ -246,12 +253,13 @@ nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_delegation *
struct xdr_stream xdr;
struct nfs4_cb_compound_hdr hdr = {
.ident = args->dl_ident,
- .nops = 1,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_cb_compound_hdr(&xdr, &hdr);
- return (encode_cb_recall(&xdr, args));
+ encode_cb_recall(&xdr, args, &hdr);
+ encode_cb_nops(&hdr);
+ return 0;
}
--
1.5.4.3
From: Andy Adamson <[email protected]>
Prepare to share backchannel code with NFSv4.1.
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
[nfsd41: use nfsd4_cb_sequence for callback minorversion]
Signed-off-by: Benny Halevy <[email protected]>
---
fs/nfsd/nfs4callback.c | 3 ++-
fs/nfsd/nfs4state.c | 1 +
include/linux/nfsd/state.h | 3 ++-
3 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 353eb4a..3fd23f7 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -141,6 +141,7 @@ struct nfs4_cb_compound_hdr {
u32 ident;
u32 nops;
__be32 *nops_p;
+ u32 minorversion;
u32 taglen;
char *tag;
};
@@ -209,7 +210,7 @@ encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
RESERVE_SPACE(16);
WRITE32(0); /* tag length is always 0 */
- WRITE32(NFS4_MINOR_VERSION);
+ WRITE32(hdr->minorversion);
WRITE32(hdr->ident);
hdr->nops_p = p;
WRITE32(hdr->nops);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b276624..7158134 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -978,6 +978,7 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se)
if ( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val,
&cb->cb_addr, &cb->cb_port)))
goto out_err;
+ cb->cb_minorversion = 0;
cb->cb_prog = se->se_callback_prog;
cb->cb_ident = se->se_callback_ident;
return;
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index c0c4921..212a75b 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -85,7 +85,8 @@ struct nfs4_cb_conn {
u32 cb_addr;
unsigned short cb_port;
u32 cb_prog;
- u32 cb_ident;
+ u32 cb_minorversion;
+ u32 cb_ident; /* minorversion 0 only */
/* RPC client info */
atomic_t cb_set; /* successful CB_NULL call */
struct rpc_clnt * cb_client;
--
1.5.4.3
Follow the model we use in the client. Make the sequence arguments
part of the regular RPC arguments. The results point to them. Adjust
references to the sequence arguments.
Signed-off-by: Ricardo Labiaga <[email protected]>
[define struct nfsd4_cb_sequence here]
Signed-off-by: Benny Halevy <[email protected]>
---
fs/nfsd/nfs4callback.c | 9 +++++++++
include/linux/nfsd/state.h | 9 +++++++++
2 files changed, 18 insertions(+), 0 deletions(-)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index f75750e..0b5c4a6 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -92,6 +92,15 @@ enum nfs_cb_opnum4 {
cb_sequence_dec_sz + \
op_dec_sz)
+struct nfs4_rpc_args {
+ void *args_op;
+ struct nfsd4_cb_sequence args_seq;
+};
+
+struct nfs4_rpc_res {
+ struct nfsd4_cb_sequence *res_seq;
+};
+
/*
* Generic encode routines from fs/nfs/nfs4xdr.c
*/
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 212a75b..931aaa6 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -61,6 +61,12 @@ typedef struct {
#define si_stateownerid si_opaque.so_stateownerid
#define si_fileid si_opaque.so_fileid
+struct nfsd4_cb_sequence {
+ /* args/res */
+ u32 cbs_minorversion;
+ struct nfs4_client *cbs_clp;
+};
+
struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
@@ -188,6 +194,9 @@ struct nfs4_client {
struct nfsd4_slot cl_slot; /* create_session slot */
u32 cl_exchange_flags;
struct nfs4_sessionid cl_sessionid;
+
+ /* for nfs41 callbacks */
+ struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
};
/* struct nfs4_client_reset
--
1.5.4.3
From: Andy Adamson <[email protected]>
Keep the xprt used for create_session in cl_cb_xprt.
Mark cl_callback.cb_minorversion = 1 and remember
the client provided cl_callback.cb_prog rpc program number.
Use it to probe the callback path.
Define xdr sizes and code nfs4_cb_compound header to be able
to send a null callback rpc.
Signed-off-by: Andy Adamson<[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
Signed-off-by: Ricardo Labiaga <[email protected]>
[get callback minorversion from fore channel's]
Signed-off-by: Benny Halevy <[email protected]>
Signed-off-by: Ricardo Labiaga <[email protected]>
---
fs/nfsd/nfs4callback.c | 27 ++++++++++++++++++++++++---
fs/nfsd/nfs4state.c | 11 +++++++++++
2 files changed, 35 insertions(+), 3 deletions(-)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3fd23f7..f75750e 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -43,6 +43,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/svcsock.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/state.h>
#include <linux/sunrpc/sched.h>
@@ -52,16 +53,19 @@
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
+#define NFS4_STATEID_SIZE 16
/* Index of predefined Linux callback client operations */
enum {
- NFSPROC4_CLNT_CB_NULL = 0,
+ NFSPROC4_CLNT_CB_NULL = 0,
NFSPROC4_CLNT_CB_RECALL,
+ NFSPROC4_CLNT_CB_SEQUENCE,
};
enum nfs_cb_opnum4 {
OP_CB_RECALL = 4,
+ OP_CB_SEQUENCE = 11,
};
#define NFS4_MAXTAGLEN 20
@@ -70,15 +74,22 @@ enum nfs_cb_opnum4 {
#define NFS4_dec_cb_null_sz 0
#define cb_compound_enc_hdr_sz 4
#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
+#define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2)
+#define cb_sequence_enc_sz (sessionid_sz + 4 + \
+ 1 /* no referring calls list yet */)
+#define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4)
+
#define op_enc_sz 1
#define op_dec_sz 2
#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
#define enc_stateid_sz (NFS4_STATEID_SIZE >> 2)
#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
+ cb_sequence_enc_sz + \
1 + enc_stateid_sz + \
enc_nfs4_fh_sz)
#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
+ cb_sequence_dec_sz + \
op_dec_sz)
/*
@@ -137,11 +148,13 @@ xdr_error: \
} while (0)
struct nfs4_cb_compound_hdr {
- int status;
- u32 ident;
+ /* args */
+ u32 ident; /* minorversion 0 only */
u32 nops;
__be32 *nops_p;
u32 minorversion;
+ /* res */
+ int status;
u32 taglen;
char *tag;
};
@@ -405,6 +418,14 @@ int setup_callback_client(struct nfs4_client *clp)
addr.sin_family = AF_INET;
addr.sin_port = htons(cb->cb_port);
addr.sin_addr.s_addr = htonl(cb->cb_addr);
+ if (cb->cb_minorversion) {
+ args.bc_sock = container_of(clp->cl_cb_xprt, struct svc_sock,
+ sk_xprt);
+ }
+
+ dprintk("%s: program %s 0x%x nrvers %u version %u minorversion %u\n",
+ __func__, args.program->name, args.prognumber,
+ args.program->nrvers, args.version, cb->cb_minorversion);
/* Create RPC client */
client = rpc_create(&args);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 7158134..7d89bd0 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -653,6 +653,8 @@ free_client(struct nfs4_client *clp)
shutdown_callback_client(clp);
nfsd4_release_respages(clp->cl_slot.sl_cache_entry.ce_respages,
clp->cl_slot.sl_cache_entry.ce_resused);
+ if (clp->cl_cb_xprt)
+ svc_xprt_put(clp->cl_cb_xprt);
if (clp->cl_cred.cr_group_info)
put_group_info(clp->cl_cred.cr_group_info);
kfree(clp->cl_principal);
@@ -1396,6 +1398,15 @@ nfsd4_create_session(struct svc_rqst *rqstp,
cr_ses->flags &= ~SESSION4_PERSIST;
cr_ses->flags &= ~SESSION4_RDMA;
+ if (cr_ses->flags & SESSION4_BACK_CHAN) {
+ unconf->cl_cb_xprt = rqstp->rq_xprt;
+ svc_xprt_get(unconf->cl_cb_xprt);
+ unconf->cl_cb_conn.cb_minorversion =
+ cstate->minorversion;
+ unconf->cl_cb_conn.cb_prog = cr_ses->callback_prog;
+ unconf->cl_cb_seq_nr = 1;
+ nfsd4_probe_callback(unconf);
+ }
conf = unconf;
} else {
status = nfserr_stale_clientid;
--
1.5.4.3
From: Alexandros Batsakis <[email protected]>
Factor functionality out of svc_tcp_recvfrom() to simplify routine
Signed-off-by: Alexandros Batsakis <[email protected]>
Signed-off-by: Ricardo Labiaga <[email protected]>
---
net/sunrpc/svcsock.c | 61 +++++++++++++++++++++++++++++++++++--------------
1 files changed, 43 insertions(+), 18 deletions(-)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 4e6d406..b739111 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -815,28 +815,18 @@ failed:
}
/*
- * Receive data from a TCP socket.
+ * Receive data.
+ * If we haven't gotten the record length yet, get the next four bytes.
+ * Otherwise try to gobble up as much as possible up to the complete
+ * record length.
*/
-static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
{
- struct svc_sock *svsk =
- container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
struct svc_serv *serv = svsk->sk_xprt.xpt_server;
- int len;
- struct kvec *vec;
- int pnum, vlen;
-
- dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
- svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
- test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
- test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
+ int len;
clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
- /* Receive data. If we haven't got the record length yet, get
- * the next four bytes. Otherwise try to gobble up as much as
- * possible up to the complete record length.
- */
if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
struct kvec iov;
@@ -851,7 +841,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
dprintk("svc: short recvfrom while reading record "
"length (%d of %d)\n", len, want);
svc_xprt_received(&svsk->sk_xprt);
- return -EAGAIN; /* record header not complete */
+ goto err_again; /* record header not complete */
}
svsk->sk_reclen = ntohl(svsk->sk_reclen);
@@ -866,6 +856,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
"per record not supported\n");
goto err_delete;
}
+
svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
if (svsk->sk_reclen > serv->sv_max_mesg) {
@@ -886,11 +877,45 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
dprintk("svc: incomplete TCP record (%d of %d)\n",
len, svsk->sk_reclen);
svc_xprt_received(&svsk->sk_xprt);
- return -EAGAIN; /* record not complete */
+ goto err_again; /* record not complete */
}
len = svsk->sk_reclen;
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
+ return len;
+ error:
+ if (len == -EAGAIN) {
+ dprintk("RPC: TCP recv_record got EAGAIN\n");
+ svc_xprt_received(&svsk->sk_xprt);
+ }
+ return len;
+ err_delete:
+ set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
+ err_again:
+ return -EAGAIN;
+}
+
+/*
+ * Receive data from a TCP socket.
+ */
+static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
+{
+ struct svc_sock *svsk =
+ container_of(rqstp->rq_xprt, struct svc_sock, sk_xprt);
+ struct svc_serv *serv = svsk->sk_xprt.xpt_server;
+ int len;
+ struct kvec *vec;
+ int pnum, vlen;
+
+ dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
+ svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
+ test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
+ test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
+
+ len = svc_tcp_recv_record(svsk, rqstp);
+ if (len < 0)
+ goto error;
+
vec = rqstp->rq_vec;
vec[0] = rqstp->rq_arg.head[0];
vlen = PAGE_SIZE;
--
1.5.4.3
Signed-off-by: Rahul Iyer <[email protected]>
Signed-off-by: Mike Sager <[email protected]>
Signed-off-by: Marc Eshel <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
When the call direction is a reply, copy the xid and call direction into the
req->rq_private_buf.head[0].iov_base otherwise rpc_verify_header returns
rpc_garbage.
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
[get rid of CONFIG_NFSD_V4_1]
Signed-off-by: Benny Halevy <[email protected]>
[sunrpc: refactoring of svc_tcp_recvfrom]
Signed-off-by: Alexandros Batsakis <[email protected]>
Signed-off-by: Ricardo Labiaga <[email protected]>
---
include/linux/sunrpc/clnt.h | 1 +
include/linux/sunrpc/svcsock.h | 1 +
include/linux/sunrpc/xprt.h | 2 +
net/sunrpc/clnt.c | 1 +
net/sunrpc/svcsock.c | 102 +++++++++++++--
net/sunrpc/xprt.c | 41 ++++++-
net/sunrpc/xprtsock.c | 278 +++++++++++++++++++++++++++++++++++++++-
7 files changed, 405 insertions(+), 21 deletions(-)
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index c39a210..cf9a8ec 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -110,6 +110,7 @@ struct rpc_create_args {
rpc_authflavor_t authflavor;
unsigned long flags;
char *client_name;
+ struct svc_sock *bc_sock; /* NFSv4.1 backchannel */
};
/* Values for "flags" field */
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 8271631..19228f4 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -28,6 +28,7 @@ struct svc_sock {
/* private TCP part */
u32 sk_reclen; /* length of record */
u32 sk_tcplen; /* current read length */
+ struct rpc_xprt *sk_bc_xprt; /* NFSv4.1 backchannel xprt */
};
/*
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 1758d9f..063a6a7 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -174,6 +174,7 @@ struct rpc_xprt {
spinlock_t reserve_lock; /* lock slot table */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
+ struct svc_sock *bc_sock; /* NFSv4.1 backchannel */
struct list_head recv;
struct {
@@ -197,6 +198,7 @@ struct xprt_create {
struct sockaddr * srcaddr; /* optional local address */
struct sockaddr * dstaddr; /* remote peer address */
size_t addrlen;
+ struct svc_sock *bc_sock; /* NFSv4.1 backchannel */
};
struct xprt_class {
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 5abab09..3dc847f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -266,6 +266,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
.srcaddr = args->saddress,
.dstaddr = args->address,
.addrlen = args->addrsize,
+ .bc_sock = args->bc_sock,
};
char servername[48];
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index b739111..90c9a75 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -49,6 +49,7 @@
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/xprt.h>
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
@@ -895,6 +896,57 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
return -EAGAIN;
}
+static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
+ struct rpc_rqst **reqpp, struct kvec *vec)
+{
+ struct rpc_rqst *req = NULL;
+ u32 *p;
+ u32 xid;
+ u32 calldir;
+ int len;
+
+ len = svc_recvfrom(rqstp, vec, 1, 8);
+ if (len < 0)
+ goto error;
+
+ p = (u32 *)rqstp->rq_arg.head[0].iov_base;
+ xid = *p++;
+ calldir = *p;
+
+ if (calldir == 0) {
+ /* REQUEST is the most common case */
+ vec[0] = rqstp->rq_arg.head[0];
+ } else {
+ /* REPLY */
+ if (svsk->sk_bc_xprt)
+ req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
+
+ if (!req) {
+ printk(KERN_NOTICE
+ "%s: Got unrecognized reply: "
+ "calldir 0x%x sk_bc_xprt %p xid %08x\n",
+ __func__, ntohl(calldir),
+ svsk->sk_bc_xprt, xid);
+ vec[0] = rqstp->rq_arg.head[0];
+ goto out;
+ }
+
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
+ sizeof(struct xdr_buf));
+ /* copy the xid and call direction */
+ memcpy(req->rq_private_buf.head[0].iov_base,
+ rqstp->rq_arg.head[0].iov_base, 8);
+ vec[0] = req->rq_private_buf.head[0];
+ }
+ out:
+ vec[0].iov_base += 8;
+ vec[0].iov_len -= 8;
+ len = svsk->sk_reclen - 8;
+ error:
+ *reqpp = req;
+ return len;
+}
+
/*
* Receive data from a TCP socket.
*/
@@ -906,6 +958,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
int len;
struct kvec *vec;
int pnum, vlen;
+ struct rpc_rqst *req = NULL;
dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
@@ -919,9 +972,27 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
vec = rqstp->rq_vec;
vec[0] = rqstp->rq_arg.head[0];
vlen = PAGE_SIZE;
+
+ /*
+ * We have enough data for the whole tcp record. Let's try and read the
+ * first 8 bytes to get the xid and the call direction. We can use this
+ * to figure out if this is a call or a reply to a callback. If
+ * sk_reclen is < 8 (xid and calldir), then this is a malformed packet.
+ * In that case, don't bother with the calldir and just read the data.
+ * It will be rejected in svc_process.
+ */
+ if (len >= 8) {
+ len = svc_process_calldir(svsk, rqstp, &req, vec);
+ if (len < 0)
+ goto err_again;
+ vlen -= 8;
+ }
+
pnum = 1;
while (vlen < len) {
- vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
+ vec[pnum].iov_base = (req) ?
+ page_address(req->rq_private_buf.pages[pnum - 1]) :
+ page_address(rqstp->rq_pages[pnum]);
vec[pnum].iov_len = PAGE_SIZE;
pnum++;
vlen += PAGE_SIZE;
@@ -931,8 +1002,18 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
/* Now receive data */
len = svc_recvfrom(rqstp, vec, pnum, len);
if (len < 0)
- goto error;
+ goto err_again;
+
+ /*
+ * Account for the 8 bytes we read earlier
+ */
+ len += 8;
+ if (req) {
+ xprt_complete_rqst(req->rq_task, len);
+ len = 0;
+ goto out;
+ }
dprintk("svc: TCP complete record (%d bytes)\n", len);
rqstp->rq_arg.len = len;
rqstp->rq_arg.page_base = 0;
@@ -946,6 +1027,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
+out:
/* Reset TCP read info */
svsk->sk_reclen = 0;
svsk->sk_tcplen = 0;
@@ -957,21 +1039,19 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
return len;
- err_delete:
- set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
- return -EAGAIN;
-
- error:
+err_again:
if (len == -EAGAIN) {
dprintk("RPC: TCP recvfrom got EAGAIN\n");
svc_xprt_received(&svsk->sk_xprt);
- } else {
+ return len;
+ }
+error:
+ if (len != -EAGAIN) {
printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
svsk->sk_xprt.xpt_server->sv_name, -len);
- goto err_delete;
+ set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
}
-
- return len;
+ return -EAGAIN;
}
/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index a0bfe53..03f175e 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1015,6 +1015,27 @@ void xprt_release(struct rpc_task *task)
spin_unlock(&xprt->reserve_lock);
}
+/*
+ * The autoclose function for the back channel
+ *
+ * The callback channel should never close the channel,
+ * let the forechannel do that.
+ */
+static void bc_autoclose(struct work_struct *work)
+{
+ return;
+}
+
+
+/*
+ * The autodisconnect routine for the back channel. We never disconnect
+ */
+static void
+bc_init_autodisconnect(unsigned long data)
+{
+ return;
+}
+
/**
* xprt_create_transport - create an RPC transport
* @args: rpc transport creation arguments
@@ -1051,9 +1072,16 @@ found:
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
- INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
- setup_timer(&xprt->timer, xprt_init_autodisconnect,
- (unsigned long)xprt);
+ if (args->bc_sock) {
+ INIT_WORK(&xprt->task_cleanup, bc_autoclose);
+ setup_timer(&xprt->timer, bc_init_autodisconnect,
+ (unsigned long)xprt);
+ } else {
+ INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
+ setup_timer(&xprt->timer, xprt_init_autodisconnect,
+ (unsigned long)xprt);
+ }
+
xprt->last_used = jiffies;
xprt->cwnd = RPC_INITCWND;
xprt->bind_index = 0;
@@ -1073,6 +1101,13 @@ found:
dprintk("RPC: created transport %p with %u slots\n", xprt,
xprt->max_reqs);
+ /*
+ * Since we don't want connections for the backchannel, we set
+ * the xprt status to connected
+ */
+ if (args->bc_sock)
+ xprt_set_connected(xprt);
+
return xprt;
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d40ff50..067d205 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/xprtsock.h>
#include <linux/file.h>
@@ -1966,6 +1967,219 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
xprt->stat.bklog_u);
}
+/*
+ * The connect worker for the backchannel
+ * This should never be called as we should never need to connect
+ */
+static void bc_connect_worker(struct work_struct *work)
+{
+ BUG();
+}
+
+/*
+ * The set_port routine of the rpc_xprt_ops. This is related to the portmapper
+ * and should never be called
+ */
+
+static void bc_set_port(struct rpc_xprt *xprt, unsigned short port)
+{
+ BUG();
+}
+
+/*
+ * The connect routine for the backchannel rpc_xprt ops
+ * Again, should never be called!
+ */
+
+static void bc_connect(struct rpc_task *task)
+{
+ BUG();
+}
+
+struct rpc_buffer {
+ size_t len;
+ char data[];
+};
+/*
+ * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
+ * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
+ * to use the server side send routines.
+ */
+void *bc_malloc(struct rpc_task *task, size_t size)
+{
+ struct page *page;
+ struct rpc_buffer *buf;
+
+ BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
+ page = alloc_page(GFP_KERNEL);
+
+ if (!page)
+ return NULL;
+
+ buf = page_address(page);
+ buf->len = PAGE_SIZE;
+
+ return buf->data;
+}
+
+/*
+ * Free the space allocated in the bc_alloc routine
+ */
+void bc_free(void *buffer)
+{
+ struct rpc_buffer *buf;
+
+ if (!buffer)
+ return;
+
+ buf = container_of(buffer, struct rpc_buffer, data);
+ free_pages((unsigned long)buf, get_order(buf->len));
+}
+
+/*
+ * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
+ * held. Borrows heavily from svc_tcp_sendto and xs_tcp_semd_request.
+ */
+static int bc_sendto(struct rpc_rqst *req)
+{
+ int total_len;
+ int len;
+ int size;
+ int result;
+ struct xdr_buf *xbufp = &req->rq_snd_buf;
+ struct page **pages = xbufp->pages;
+ unsigned int flags = MSG_MORE;
+ unsigned int pglen = xbufp->page_len;
+ size_t base = xbufp->page_base;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct socket *sock = transport->sock;
+
+ total_len = xbufp->len;
+
+ /*
+ * Set up the rpc header and record marker stuff
+ */
+ xs_encode_tcp_record_marker(xbufp);
+
+ /*
+ * The RPC message is divided into 3 pieces:
+ * - The header: This is what most of the smaller RPC messages consist
+ * of. Often the whole message is in this.
+ *
+ * - xdr->pages: This is a list of pages that contain data, for
+ * example in a write request or while using rpcsec gss
+ *
+ * - The tail: This is the rest of the rpc message
+ *
+ * First we send the header, then the pages and then finally the tail.
+ * The code borrows heavily from svc_sendto.
+ */
+
+ /*
+ * Send the head
+ */
+ if (total_len == xbufp->head[0].iov_len)
+ flags = 0;
+
+ len = sock->ops->sendpage(sock, virt_to_page(xbufp->head[0].iov_base),
+ (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK,
+ xbufp->head[0].iov_len, flags);
+
+ if (len != xbufp->head[0].iov_len)
+ goto out;
+
+ /*
+ * send page data
+ *
+ * Check the amount of data to be sent. If it is less than the
+ * remaining page, then send it else send the current page
+ */
+
+ size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
+ while (pglen > 0) {
+ if (total_len == size)
+ flags = 0;
+ result = sock->ops->sendpage(sock, *pages, base, size, flags);
+ if (result > 0)
+ len += result;
+ if (result != size)
+ goto out;
+ total_len -= size;
+ pglen -= size;
+ size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
+ base = 0;
+ pages++;
+ }
+ /*
+ * send tail
+ */
+ if (xbufp->tail[0].iov_len) {
+ result = sock->ops->sendpage(sock,
+ xbufp->tail[0].iov_base,
+ (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK,
+ xbufp->tail[0].iov_len,
+ 0);
+
+ if (result > 0)
+ len += result;
+ }
+out:
+ if (len != xbufp->len)
+ printk(KERN_NOTICE "Error sending entire callback!\n");
+
+ return len;
+}
+
+/*
+ * The send routine. Borrows from svc_send
+ */
+static int bc_send_request(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *bc_xprt = req->rq_xprt;
+ struct svc_xprt *xprt;
+ struct svc_sock *svsk;
+ u32 len;
+
+ dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
+ /*
+ * Get the server socket associated with this callback xprt
+ */
+ svsk = bc_xprt->bc_sock;
+ xprt = &svsk->sk_xprt;
+
+ mutex_lock(&xprt->xpt_mutex);
+ if (test_bit(XPT_DEAD, &xprt->xpt_flags))
+ len = -ENOTCONN;
+ else
+ len = bc_sendto(req);
+ mutex_unlock(&xprt->xpt_mutex);
+
+ return 0;
+
+}
+
+/*
+ * The close routine. Since this is client initiated, we do nothing
+ */
+
+static void bc_close(struct rpc_xprt *xprt)
+{
+ return;
+}
+
+/*
+ * The xprt destroy routine. Again, because this connection is client
+ * initiated, we do nothing
+ */
+
+static void bc_destroy(struct rpc_xprt *xprt)
+{
+ return;
+}
+
static struct rpc_xprt_ops xs_udp_ops = {
.set_buffer_size = xs_udp_set_buffer_size,
.reserve_xprt = xprt_reserve_xprt_cong,
@@ -1999,6 +2213,24 @@ static struct rpc_xprt_ops xs_tcp_ops = {
.print_stats = xs_tcp_print_stats,
};
+/*
+ * The rpc_xprt_ops for the server backchannel
+ */
+
+static struct rpc_xprt_ops bc_tcp_ops = {
+ .reserve_xprt = xprt_reserve_xprt,
+ .release_xprt = xprt_release_xprt,
+ .set_port = bc_set_port,
+ .connect = bc_connect,
+ .buf_alloc = bc_malloc,
+ .buf_free = bc_free,
+ .send_request = bc_send_request,
+ .set_retrans_timeout = xprt_set_retrans_timeout_def,
+ .close = bc_close,
+ .destroy = bc_destroy,
+ .print_stats = xs_tcp_print_stats,
+};
+
static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
unsigned int slot_table_size)
{
@@ -2131,13 +2363,29 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
- xprt->bind_timeout = XS_BIND_TO;
- xprt->connect_timeout = XS_TCP_CONN_TO;
- xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
- xprt->idle_timeout = XS_IDLE_DISC_TO;
+ if (args->bc_sock) {
+ /* backchannel */
+ xprt_set_bound(xprt);
+ INIT_DELAYED_WORK(&transport->connect_worker,
+ bc_connect_worker);
+ xprt->bind_timeout = 0;
+ xprt->connect_timeout = 0;
+ xprt->reestablish_timeout = 0;
+ xprt->idle_timeout = (~0);
- xprt->ops = &xs_tcp_ops;
- xprt->timeout = &xs_tcp_default_timeout;
+ /*
+ * The backchannel uses the same socket connection as the
+ * forechannel
+ */
+ xprt->bc_sock = args->bc_sock;
+ xprt->bc_sock->sk_bc_xprt = xprt;
+ transport->sock = xprt->bc_sock->sk_sock;
+ transport->inet = xprt->bc_sock->sk_sk;
+
+ xprt->ops = &bc_tcp_ops;
+
+ goto next;
+ }
switch (addr->sa_family) {
case AF_INET:
@@ -2145,13 +2393,29 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker4);
- xs_format_ipv4_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
break;
case AF_INET6:
if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
xprt_set_bound(xprt);
INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker6);
+ break;
+ }
+ xprt->bind_timeout = XS_BIND_TO;
+ xprt->connect_timeout = XS_TCP_CONN_TO;
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ xprt->idle_timeout = XS_IDLE_DISC_TO;
+
+ xprt->ops = &xs_tcp_ops;
+
+next:
+ xprt->timeout = &xs_tcp_default_timeout;
+
+ switch (addr->sa_family) {
+ case AF_INET:
+ xs_format_ipv4_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
+ break;
+ case AF_INET6:
xs_format_ipv6_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
break;
default:
--
1.5.4.3
RPC callback requests will wait on this wait queue if the backchannel
is out of slots.
Signed-off-by: Ricardo Labiaga <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
---
fs/nfsd/nfs4state.c | 2 ++
include/linux/nfsd/state.h | 4 ++++
2 files changed, 6 insertions(+), 0 deletions(-)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 7d89bd0..45cbbbc 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -726,6 +726,8 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir)
INIT_LIST_HEAD(&clp->cl_delegations);
INIT_LIST_HEAD(&clp->cl_sessions);
INIT_LIST_HEAD(&clp->cl_lru);
+ clear_bit(0, &clp->cl_cb_slot_busy);
+ rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
return clp;
}
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 931aaa6..261cd73 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -196,7 +196,11 @@ struct nfs4_client {
struct nfs4_sessionid cl_sessionid;
/* for nfs41 callbacks */
+ /* We currently support a single back channel with a single slot */
+ unsigned long cl_cb_slot_busy;
struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
+ struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
+ /* wait here for slots */
};
/* struct nfs4_client_reset
--
1.5.4.3
Signed-off-by: Ricardo Labiaga <[email protected]>
[nfsd41: cb_recall callback]
[Share v4.0 and v4.1 back channel xdr]
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Ricardo Labiaga <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
[Share v4.0 and v4.1 back channel xdr]
Signed-off-by: Andy Adamson <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
[nfsd41: use nfsd4_cb_sequence for callback minorversion]
[nfsd41: conditionally decode_sequence in nfs4_xdr_dec_cb_recall]
Signed-off-by: Benny Halevy <[email protected]>
[nfsd41: Backchannel: Add sequence arguments to callback RPC arguments]
Signed-off-by: Ricardo Labiaga <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
---
fs/nfsd/nfs4callback.c | 40 ++++++++++++++++++++++++++++++++++++----
1 files changed, 36 insertions(+), 4 deletions(-)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 521d5f5..b25dcc2 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -292,15 +292,19 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
}
static int
-nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_delegation *args)
+nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
+ struct nfs4_rpc_args *rpc_args)
{
struct xdr_stream xdr;
+ struct nfs4_delegation *args = rpc_args->args_op;
struct nfs4_cb_compound_hdr hdr = {
.ident = args->dl_ident,
+ .minorversion = rpc_args->args_seq.cbs_minorversion,
};
xdr_init_encode(&xdr, &req->rq_snd_buf, p);
encode_cb_compound_hdr(&xdr, &hdr);
+ encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr);
encode_cb_recall(&xdr, args, &hdr);
encode_cb_nops(&hdr);
return 0;
@@ -400,7 +404,8 @@ nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
}
static int
-nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p)
+nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
+ struct nfs4_rpc_res *rpc_res)
{
struct xdr_stream xdr;
struct nfs4_cb_compound_hdr hdr;
@@ -410,6 +415,11 @@ nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p)
status = decode_cb_compound_hdr(&xdr, &hdr);
if (status)
goto out;
+ if (rpc_res && rpc_res->res_seq) {
+ status = decode_cb_sequence(&xdr, rpc_res->res_seq, rqstp);
+ if (status)
+ goto out;
+ }
status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
out:
return status;
@@ -687,6 +697,8 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
struct nfs4_delegation *dp = calldata;
struct nfs4_client *clp = dp->dl_client;
+ nfsd4_cb_done(task, calldata);
+
switch (task->tk_status) {
case -EIO:
/* Network partition? */
@@ -699,16 +711,20 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
break;
default:
/* success, or error we can't handle */
- return;
+ goto done;
}
if (dp->dl_retries--) {
rpc_delay(task, 2*HZ);
task->tk_status = 0;
rpc_restart_call(task);
+ return;
} else {
atomic_set(&clp->cl_cb_conn.cb_set, 0);
warn_no_callback_path(clp, task->tk_status);
}
+done:
+ kfree(task->tk_msg.rpc_argp);
+ kfree(task->tk_msg.rpc_resp);
}
static void nfsd4_cb_recall_release(void *calldata)
@@ -734,16 +750,32 @@ nfsd4_cb_recall(struct nfs4_delegation *dp)
{
struct nfs4_client *clp = dp->dl_client;
struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
+ struct nfs4_rpc_args *args;
+ struct nfs4_rpc_res *res;
struct rpc_message msg = {
.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
- .rpc_argp = dp,
.rpc_cred = clp->cl_cb_conn.cb_cred
};
int status;
+ args = kzalloc(sizeof(*args), GFP_KERNEL);
+ if (!args) {
+ status = -ENOMEM;
+ goto out;
+ }
+ res = kzalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ kfree(args);
+ status = -ENOMEM;
+ goto out;
+ }
+ args->args_op = dp;
+ msg.rpc_argp = args;
+ msg.rpc_resp = res;
dp->dl_retries = 1;
status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
&nfsd4_cb_recall_ops, dp);
+out:
if (status) {
put_nfs4_client(clp);
nfs4_put_delegation(dp);
--
1.5.4.3
From: Benny Halevy <[email protected]>
Implement the cb_sequence callback conforming to draft-ietf-nfsv4-minorversion1
Note: highest slot id and target highest slot id do not have to be 0
as was previously implemented. They can be greater than what the
nfs server sent if the client supports a larger slot table on the
backchannel. At this point we just ignore that.
Signed-off-by: Benny Halevy <[email protected]>
Signed-off-by: Ricardo Labiaga <[email protected]>
[Rework the back channel xdr using the shared v4.0 and v4.1 framework.]
Signed-off-by: Andy Adamson <[email protected]>
[fixed indentation]
Signed-off-by: Benny Halevy <[email protected]>
[nfsd41: use nfsd4_cb_sequence for callback minorversion]
Signed-off-by: Benny Halevy <[email protected]>
[nfsd41: fix verification of CB_SEQUENCE highest slot id[
Signed-off-by: Benny Halevy <[email protected]>
[nfsd41: Backchannel: Remove old backchannel serialization]
[nfsd41: Backchannel: First callback sequence ID should be 1]
Signed-off-by: Ricardo Labiaga <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
[nfsd41: decode_cb_sequence does not need to actually decode ignored fields]
Signed-off-by: Benny Halevy <[email protected]>
Signed-off-by: Ricardo Labiaga <[email protected]>
---
fs/nfsd/nfs4callback.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++
1 files changed, 72 insertions(+), 0 deletions(-)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index b739cc9..521d5f5 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -260,6 +260,27 @@ encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
hdr->nops++;
}
+static void
+encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *args,
+ struct nfs4_cb_compound_hdr *hdr)
+{
+ __be32 *p;
+
+ if (hdr->minorversion == 0)
+ return;
+
+ RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
+
+ WRITE32(OP_CB_SEQUENCE);
+ WRITEMEM(args->cbs_clp->cl_sessionid.data, NFS4_MAX_SESSIONID_LEN);
+ WRITE32(args->cbs_clp->cl_cb_seq_nr);
+ WRITE32(0); /* slotid, always 0 */
+ WRITE32(0); /* highest slotid always 0 */
+ WRITE32(0); /* cachethis always 0 */
+ WRITE32(0); /* FIXME: support referring_call_lists */
+ hdr->nops++;
+}
+
static int
nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
{
@@ -321,6 +342,57 @@ decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
return 0;
}
+/*
+ * Our current back channel implmentation supports a single backchannel
+ * with a single slot.
+ */
+static int
+decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_cb_sequence *res,
+ struct rpc_rqst *rqstp)
+{
+ struct nfs4_sessionid id;
+ int status;
+ u32 dummy;
+ __be32 *p;
+
+ if (res->cbs_minorversion == 0)
+ return 0;
+
+ status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
+ if (status)
+ return status;
+
+ /*
+ * If the server returns different values for sessionID, slotID or
+ * sequence number, the server is looney tunes.
+ */
+ status = -ESERVERFAULT;
+
+ READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
+ memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
+ p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
+ if (memcmp(id.data, res->cbs_clp->cl_sessionid.data,
+ NFS4_MAX_SESSIONID_LEN)) {
+ dprintk("%s Invalid session id\n", __func__);
+ goto out;
+ }
+ READ32(dummy);
+ if (dummy != res->cbs_clp->cl_cb_seq_nr) {
+ dprintk("%s Invalid sequence number\n", __func__);
+ goto out;
+ }
+ READ32(dummy); /* slotid must be 0 */
+ if (dummy != 0) {
+ dprintk("%s Invalid slotid\n", __func__);
+ goto out;
+ }
+ /* FIXME: process highest slotid and target highest slotid */
+ status = 0;
+out:
+ return status;
+}
+
+
static int
nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
{
--
1.5.4.3
Move common initialization of 'struct nfs4_client' inside create_client().
Signed-off-by: Ricardo Labiaga <[email protected]>
[nfsd41: Remember the auth flavor to use for callbacks]
Signed-off-by: Ricardo Labiaga <[email protected]>
Signed-off-by: Benny Halevy <[email protected]>
---
fs/nfsd/nfs4state.c | 90 +++++++++++++++++++++++++-------------------------
1 files changed, 45 insertions(+), 45 deletions(-)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 45cbbbc..1e4740f 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -710,27 +710,6 @@ expire_client(struct nfs4_client *clp)
put_nfs4_client(clp);
}
-static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir)
-{
- struct nfs4_client *clp;
-
- clp = alloc_client(name);
- if (clp == NULL)
- return NULL;
- memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
- atomic_set(&clp->cl_count, 1);
- atomic_set(&clp->cl_cb_conn.cb_set, 0);
- INIT_LIST_HEAD(&clp->cl_idhash);
- INIT_LIST_HEAD(&clp->cl_strhash);
- INIT_LIST_HEAD(&clp->cl_openowners);
- INIT_LIST_HEAD(&clp->cl_delegations);
- INIT_LIST_HEAD(&clp->cl_sessions);
- INIT_LIST_HEAD(&clp->cl_lru);
- clear_bit(0, &clp->cl_cb_slot_busy);
- rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
- return clp;
-}
-
static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
{
memcpy(target->cl_verifier.data, source->data,
@@ -793,6 +772,46 @@ static void gen_confirm(struct nfs4_client *clp)
*p++ = i++;
}
+static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
+ struct svc_rqst *rqstp, nfs4_verifier *verf)
+{
+ struct nfs4_client *clp;
+ u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr;
+ char *princ;
+
+ clp = alloc_client(name);
+ if (clp == NULL)
+ return NULL;
+
+ princ = svc_gss_principal(rqstp);
+ if (princ) {
+ clp->cl_principal = kstrdup(princ, GFP_KERNEL);
+ if (clp->cl_principal == NULL) {
+ free_client(clp);
+ return NULL;
+ }
+ }
+
+ memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
+ atomic_set(&clp->cl_count, 1);
+ atomic_set(&clp->cl_cb_conn.cb_set, 0);
+ INIT_LIST_HEAD(&clp->cl_idhash);
+ INIT_LIST_HEAD(&clp->cl_strhash);
+ INIT_LIST_HEAD(&clp->cl_openowners);
+ INIT_LIST_HEAD(&clp->cl_delegations);
+ INIT_LIST_HEAD(&clp->cl_sessions);
+ INIT_LIST_HEAD(&clp->cl_lru);
+ clear_bit(0, &clp->cl_cb_slot_busy);
+ rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
+ copy_verf(clp, verf);
+ clp->cl_addr = ip_addr;
+ clp->cl_flavor = rqstp->rq_flavor;
+ copy_cred(&clp->cl_cred, &rqstp->rq_cred);
+ gen_confirm(clp);
+
+ return clp;
+}
+
static int check_name(struct xdr_netobj name)
{
if (name.len == 0)
@@ -1285,17 +1304,13 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
out_new:
/* Normal case */
- new = create_client(exid->clname, dname);
+ new = create_client(exid->clname, dname, rqstp, &verf);
if (new == NULL) {
status = nfserr_resource;
goto out;
}
- copy_verf(new, &verf);
- copy_cred(&new->cl_cred, &rqstp->rq_cred);
- new->cl_addr = ip_addr;
gen_clid(new);
- gen_confirm(new);
add_to_unconfirmed(new, strhashval);
out_copy:
exid->clientid.cl_boot = new->cl_clientid.cl_boot;
@@ -1535,7 +1550,6 @@ __be32
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_setclientid *setclid)
{
- struct sockaddr_in *sin = svc_addr_in(rqstp);
struct xdr_netobj clname = {
.len = setclid->se_namelen,
.data = setclid->se_name,
@@ -1544,7 +1558,6 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
unsigned int strhashval;
struct nfs4_client *conf, *unconf, *new;
__be32 status;
- char *princ;
char dname[HEXDIR_LEN];
if (!check_name(clname))
@@ -1586,7 +1599,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
if (unconf)
expire_client(unconf);
- new = create_client(clname, dname);
+ new = create_client(clname, dname, rqstp, &clverifier);
if (new == NULL)
goto out;
gen_clid(new);
@@ -1603,7 +1616,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
*/
expire_client(unconf);
}
- new = create_client(clname, dname);
+ new = create_client(clname, dname, rqstp, &clverifier);
if (new == NULL)
goto out;
copy_clid(new, conf);
@@ -1613,7 +1626,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* probable client reboot; state will be removed if
* confirmed.
*/
- new = create_client(clname, dname);
+ new = create_client(clname, dname, rqstp, &clverifier);
if (new == NULL)
goto out;
gen_clid(new);
@@ -1624,24 +1637,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* confirmed.
*/
expire_client(unconf);
- new = create_client(clname, dname);
+ new = create_client(clname, dname, rqstp, &clverifier);
if (new == NULL)
goto out;
gen_clid(new);
}
- copy_verf(new, &clverifier);
- new->cl_addr = sin->sin_addr.s_addr;
- new->cl_flavor = rqstp->rq_flavor;
- princ = svc_gss_principal(rqstp);
- if (princ) {
- new->cl_principal = kstrdup(princ, GFP_KERNEL);
- if (new->cl_principal == NULL) {
- free_client(new);
- goto out;
- }
- }
- copy_cred(&new->cl_cred, &rqstp->rq_cred);
- gen_confirm(new);
gen_callback(new, setclid);
add_to_unconfirmed(new, strhashval);
setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
--
1.5.4.3
Follows the model used by the NFS client. Setup the RPC prepare and done
function pointers so that we can populate the sequence information if
minorversion == 1. rpc_run_task() is then invoked directly just like
existing NFS client operations do.
nfsd4_cb_prepare() determines if the sequence information needs to be setup.
If the slot is in use, it adds itself to the wait queue.
nfsd4_cb_done() wakes anyone sleeping on the callback channel wait queue
after our RPC reply has been received.
Signed-off-by: Ricardo Labiaga <[email protected]>
[define cl_cb_seq_nr here]
Signed-off-by: Benny Halevy <[email protected]>
---
fs/nfsd/nfs4callback.c | 78 ++++++++++++++++++++++++++++++++++++++++++++
include/linux/nfsd/state.h | 1 +
2 files changed, 79 insertions(+), 0 deletions(-)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 0b5c4a6..b739cc9 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -533,6 +533,83 @@ nfsd4_probe_callback(struct nfs4_client *clp)
do_probe_callback(clp);
}
+/*
+ * There's currently a single callback channel slot.
+ * If the slot is available, then mark it busy. Otherwise, set the
+ * thread for sleeping on the callback RPC wait queue.
+ */
+static int nfsd41_cb_setup_sequence(struct nfs4_client *clp,
+ struct rpc_task *task)
+{
+ struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
+ struct nfs4_rpc_res *res = task->tk_msg.rpc_resp;
+ u32 *ptr = (u32 *)clp->cl_sessionid.data;
+ int status = 0;
+
+ dprintk("%s: %u:%u:%u:%u\n", __func__,
+ ptr[0], ptr[1], ptr[2], ptr[3]);
+
+ if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
+ rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
+ dprintk("%s slot is busy\n", __func__);
+ status = -EAGAIN;
+ goto out;
+ }
+
+ /* We'll need the clp during XDR encoding and decoding */
+ args->args_seq.cbs_clp = clp;
+ res->res_seq = &args->args_seq;
+
+out:
+ dprintk("%s status=%d\n", __func__, status);
+ return status;
+}
+
+/*
+ * TODO: cb_sequence should support referring call lists, cachethis, multiple
+ * slots, and mark callback channel down on communication errors.
+ */
+static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
+ struct nfs4_rpc_args *args = task->tk_msg.rpc_argp;
+ u32 minorversion = clp->cl_cb_conn.cb_minorversion;
+ int status = 0;
+
+ args->args_seq.cbs_minorversion = minorversion;
+ if (minorversion) {
+ status = nfsd41_cb_setup_sequence(clp, task);
+ if (status) {
+ if (status != -EAGAIN) {
+ /* terminate rpc task */
+ task->tk_status = status;
+ task->tk_action = NULL;
+ }
+ return;
+ }
+ }
+ rpc_call_start(task);
+}
+
+static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_delegation *dp = calldata;
+ struct nfs4_client *clp = dp->dl_client;
+
+ dprintk("%s: minorversion=%d\n", __func__,
+ clp->cl_cb_conn.cb_minorversion);
+
+ if (clp->cl_cb_conn.cb_minorversion) {
+ /* No need for lock, access serialized in nfsd4_cb_prepare */
+ ++clp->cl_cb_seq_nr;
+ clear_bit(0, &clp->cl_cb_slot_busy);
+ rpc_wake_up_next(&clp->cl_cb_waitq);
+ dprintk("%s: freed slot, new seqid=%d\n", __func__,
+ clp->cl_cb_seq_nr);
+ }
+}
+
static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegation *dp = calldata;
@@ -572,6 +649,7 @@ static void nfsd4_cb_recall_release(void *calldata)
}
static const struct rpc_call_ops nfsd4_cb_recall_ops = {
+ .rpc_call_prepare = nfsd4_cb_prepare,
.rpc_call_done = nfsd4_cb_recall_done,
.rpc_release = nfsd4_cb_recall_release,
};
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 261cd73..9191497 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -198,6 +198,7 @@ struct nfs4_client {
/* for nfs41 callbacks */
/* We currently support a single back channel with a single slot */
unsigned long cl_cb_slot_busy;
+ u32 cl_cb_seq_nr;
struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
/* wait here for slots */
--
1.5.4.3
On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]> wrote:
> Follow the model we use in the client. Make the sequence arguments
> part of the regular RPC arguments. The results point to them. Adjust
> references to the sequence arguments.
>
> Signed-off-by: Ricardo Labiaga <[email protected]>
> [define struct nfsd4_cb_sequence here]
> Signed-off-by: Benny Halevy <[email protected]>
> ---
> fs/nfsd/nfs4callback.c | 9 +++++++++
> include/linux/nfsd/state.h | 9 +++++++++
> 2 files changed, 18 insertions(+), 0 deletions(-)
>
> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
> index f75750e..0b5c4a6 100644
> --- a/fs/nfsd/nfs4callback.c
> +++ b/fs/nfsd/nfs4callback.c
> @@ -92,6 +92,15 @@ enum nfs_cb_opnum4 {
> cb_sequence_dec_sz + \
> op_dec_sz)
>
> +struct nfs4_rpc_args {
> + void *args_op;
> + struct nfsd4_cb_sequence args_seq;
> +};
> +
> +struct nfs4_rpc_res {
> + struct nfsd4_cb_sequence *res_seq;
Given the size of nfsd4_cb_sequence, why not just embed it here?
(Historically, it didn't have the cbs_minorversion member so
it's presence was the actual indication of the minorversion.
Now that this has changed, I see no good reason not to embed it.
Benny
> +};
> +
> /*
> * Generic encode routines from fs/nfs/nfs4xdr.c
> */
> diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
> index 212a75b..931aaa6 100644
> --- a/include/linux/nfsd/state.h
> +++ b/include/linux/nfsd/state.h
> @@ -61,6 +61,12 @@ typedef struct {
> #define si_stateownerid si_opaque.so_stateownerid
> #define si_fileid si_opaque.so_fileid
>
> +struct nfsd4_cb_sequence {
> + /* args/res */
> + u32 cbs_minorversion;
> + struct nfs4_client *cbs_clp;
> +};
> +
> struct nfs4_delegation {
> struct list_head dl_perfile;
> struct list_head dl_perclnt;
> @@ -188,6 +194,9 @@ struct nfs4_client {
> struct nfsd4_slot cl_slot; /* create_session slot */
> u32 cl_exchange_flags;
> struct nfs4_sessionid cl_sessionid;
> +
> + /* for nfs41 callbacks */
> + struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
> };
>
> /* struct nfs4_client_reset
On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]> wrote:
> Signed-off-by: Ricardo Labiaga <[email protected]>
> [nfsd41: cb_recall callback]
> [Share v4.0 and v4.1 back channel xdr]
> Signed-off-by: Andy Adamson <[email protected]>
> Signed-off-by: Ricardo Labiaga <[email protected]>
> Signed-off-by: Benny Halevy <[email protected]>
> [Share v4.0 and v4.1 back channel xdr]
> Signed-off-by: Andy Adamson <[email protected]>
> Signed-off-by: Benny Halevy <[email protected]>
> [nfsd41: use nfsd4_cb_sequence for callback minorversion]
> [nfsd41: conditionally decode_sequence in nfs4_xdr_dec_cb_recall]
> Signed-off-by: Benny Halevy <[email protected]>
> [nfsd41: Backchannel: Add sequence arguments to callback RPC arguments]
> Signed-off-by: Ricardo Labiaga <[email protected]>
> Signed-off-by: Benny Halevy <[email protected]>
> ---
> fs/nfsd/nfs4callback.c | 40 ++++++++++++++++++++++++++++++++++++----
> 1 files changed, 36 insertions(+), 4 deletions(-)
>
> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
> index 521d5f5..b25dcc2 100644
> --- a/fs/nfsd/nfs4callback.c
> +++ b/fs/nfsd/nfs4callback.c
> @@ -292,15 +292,19 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
> }
>
> static int
> -nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct nfs4_delegation *args)
> +nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
> + struct nfs4_rpc_args *rpc_args)
> {
> struct xdr_stream xdr;
> + struct nfs4_delegation *args = rpc_args->args_op;
> struct nfs4_cb_compound_hdr hdr = {
> .ident = args->dl_ident,
> + .minorversion = rpc_args->args_seq.cbs_minorversion,
> };
>
> xdr_init_encode(&xdr, &req->rq_snd_buf, p);
> encode_cb_compound_hdr(&xdr, &hdr);
> + encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr);
> encode_cb_recall(&xdr, args, &hdr);
> encode_cb_nops(&hdr);
> return 0;
> @@ -400,7 +404,8 @@ nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
> }
>
> static int
> -nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p)
> +nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
> + struct nfs4_rpc_res *rpc_res)
> {
> struct xdr_stream xdr;
> struct nfs4_cb_compound_hdr hdr;
> @@ -410,6 +415,11 @@ nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p)
> status = decode_cb_compound_hdr(&xdr, &hdr);
> if (status)
> goto out;
> + if (rpc_res && rpc_res->res_seq) {
With this version rpc_res != NULL is guaranteed, isn't it?
Also, embedding res_seq in nfs4_rpc_res will obviate this condition further.
> + status = decode_cb_sequence(&xdr, rpc_res->res_seq, rqstp);
> + if (status)
> + goto out;
> + }
> status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
> out:
> return status;
> @@ -687,6 +697,8 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
> struct nfs4_delegation *dp = calldata;
> struct nfs4_client *clp = dp->dl_client;
>
> + nfsd4_cb_done(task, calldata);
> +
> switch (task->tk_status) {
> case -EIO:
> /* Network partition? */
> @@ -699,16 +711,20 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata)
> break;
> default:
> /* success, or error we can't handle */
> - return;
> + goto done;
> }
> if (dp->dl_retries--) {
> rpc_delay(task, 2*HZ);
> task->tk_status = 0;
> rpc_restart_call(task);
> + return;
> } else {
> atomic_set(&clp->cl_cb_conn.cb_set, 0);
> warn_no_callback_path(clp, task->tk_status);
> }
> +done:
> + kfree(task->tk_msg.rpc_argp);
> + kfree(task->tk_msg.rpc_resp);
> }
>
> static void nfsd4_cb_recall_release(void *calldata)
> @@ -734,16 +750,32 @@ nfsd4_cb_recall(struct nfs4_delegation *dp)
> {
> struct nfs4_client *clp = dp->dl_client;
> struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
> + struct nfs4_rpc_args *args;
> + struct nfs4_rpc_res *res;
> struct rpc_message msg = {
> .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
> - .rpc_argp = dp,
> .rpc_cred = clp->cl_cb_conn.cb_cred
> };
> int status;
>
> + args = kzalloc(sizeof(*args), GFP_KERNEL);
> + if (!args) {
> + status = -ENOMEM;
> + goto out;
> + }
> + res = kzalloc(sizeof(*res), GFP_KERNEL);
> + if (!res) {
> + kfree(args);
> + status = -ENOMEM;
> + goto out;
> + }
Hmm, why not allocate the two in one piece and possibly having a kmem_cache
for them?
Benny
> + args->args_op = dp;
> + msg.rpc_argp = args;
> + msg.rpc_resp = res;
> dp->dl_retries = 1;
> status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
> &nfsd4_cb_recall_ops, dp);
> +out:
> if (status) {
> put_nfs4_client(clp);
> nfs4_put_delegation(dp);
Ricardo, this patch conflicts with Andy's DRC changes.
I'm not sure who'll go first into Bruce's branch but
the other guy will need to rebase his patches accordingly...
Benny
On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]> wrote:
> Move common initialization of 'struct nfs4_client' inside create_client().
>
> Signed-off-by: Ricardo Labiaga <[email protected]>
>
> [nfsd41: Remember the auth flavor to use for callbacks]
> Signed-off-by: Ricardo Labiaga <[email protected]>
> Signed-off-by: Benny Halevy <[email protected]>
> ---
> fs/nfsd/nfs4state.c | 90 +++++++++++++++++++++++++-------------------------
> 1 files changed, 45 insertions(+), 45 deletions(-)
>
> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> index 45cbbbc..1e4740f 100644
> --- a/fs/nfsd/nfs4state.c
> +++ b/fs/nfsd/nfs4state.c
> @@ -710,27 +710,6 @@ expire_client(struct nfs4_client *clp)
> put_nfs4_client(clp);
> }
>
> -static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir)
> -{
> - struct nfs4_client *clp;
> -
> - clp = alloc_client(name);
> - if (clp == NULL)
> - return NULL;
> - memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
> - atomic_set(&clp->cl_count, 1);
> - atomic_set(&clp->cl_cb_conn.cb_set, 0);
> - INIT_LIST_HEAD(&clp->cl_idhash);
> - INIT_LIST_HEAD(&clp->cl_strhash);
> - INIT_LIST_HEAD(&clp->cl_openowners);
> - INIT_LIST_HEAD(&clp->cl_delegations);
> - INIT_LIST_HEAD(&clp->cl_sessions);
> - INIT_LIST_HEAD(&clp->cl_lru);
> - clear_bit(0, &clp->cl_cb_slot_busy);
> - rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
> - return clp;
> -}
> -
> static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
> {
> memcpy(target->cl_verifier.data, source->data,
> @@ -793,6 +772,46 @@ static void gen_confirm(struct nfs4_client *clp)
> *p++ = i++;
> }
>
> +static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
> + struct svc_rqst *rqstp, nfs4_verifier *verf)
> +{
> + struct nfs4_client *clp;
> + u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr;
> + char *princ;
> +
> + clp = alloc_client(name);
> + if (clp == NULL)
> + return NULL;
> +
> + princ = svc_gss_principal(rqstp);
> + if (princ) {
> + clp->cl_principal = kstrdup(princ, GFP_KERNEL);
> + if (clp->cl_principal == NULL) {
> + free_client(clp);
> + return NULL;
> + }
> + }
> +
> + memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
> + atomic_set(&clp->cl_count, 1);
> + atomic_set(&clp->cl_cb_conn.cb_set, 0);
> + INIT_LIST_HEAD(&clp->cl_idhash);
> + INIT_LIST_HEAD(&clp->cl_strhash);
> + INIT_LIST_HEAD(&clp->cl_openowners);
> + INIT_LIST_HEAD(&clp->cl_delegations);
> + INIT_LIST_HEAD(&clp->cl_sessions);
> + INIT_LIST_HEAD(&clp->cl_lru);
> + clear_bit(0, &clp->cl_cb_slot_busy);
> + rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
> + copy_verf(clp, verf);
> + clp->cl_addr = ip_addr;
> + clp->cl_flavor = rqstp->rq_flavor;
> + copy_cred(&clp->cl_cred, &rqstp->rq_cred);
> + gen_confirm(clp);
> +
> + return clp;
> +}
> +
> static int check_name(struct xdr_netobj name)
> {
> if (name.len == 0)
> @@ -1285,17 +1304,13 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
>
> out_new:
> /* Normal case */
> - new = create_client(exid->clname, dname);
> + new = create_client(exid->clname, dname, rqstp, &verf);
> if (new == NULL) {
> status = nfserr_resource;
> goto out;
> }
>
> - copy_verf(new, &verf);
> - copy_cred(&new->cl_cred, &rqstp->rq_cred);
> - new->cl_addr = ip_addr;
> gen_clid(new);
> - gen_confirm(new);
> add_to_unconfirmed(new, strhashval);
> out_copy:
> exid->clientid.cl_boot = new->cl_clientid.cl_boot;
> @@ -1535,7 +1550,6 @@ __be32
> nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
> struct nfsd4_setclientid *setclid)
> {
> - struct sockaddr_in *sin = svc_addr_in(rqstp);
> struct xdr_netobj clname = {
> .len = setclid->se_namelen,
> .data = setclid->se_name,
> @@ -1544,7 +1558,6 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
> unsigned int strhashval;
> struct nfs4_client *conf, *unconf, *new;
> __be32 status;
> - char *princ;
> char dname[HEXDIR_LEN];
>
> if (!check_name(clname))
> @@ -1586,7 +1599,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
> */
> if (unconf)
> expire_client(unconf);
> - new = create_client(clname, dname);
> + new = create_client(clname, dname, rqstp, &clverifier);
> if (new == NULL)
> goto out;
> gen_clid(new);
> @@ -1603,7 +1616,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
> */
> expire_client(unconf);
> }
> - new = create_client(clname, dname);
> + new = create_client(clname, dname, rqstp, &clverifier);
> if (new == NULL)
> goto out;
> copy_clid(new, conf);
> @@ -1613,7 +1626,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
> * probable client reboot; state will be removed if
> * confirmed.
> */
> - new = create_client(clname, dname);
> + new = create_client(clname, dname, rqstp, &clverifier);
> if (new == NULL)
> goto out;
> gen_clid(new);
> @@ -1624,24 +1637,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
> * confirmed.
> */
> expire_client(unconf);
> - new = create_client(clname, dname);
> + new = create_client(clname, dname, rqstp, &clverifier);
> if (new == NULL)
> goto out;
> gen_clid(new);
> }
> - copy_verf(new, &clverifier);
> - new->cl_addr = sin->sin_addr.s_addr;
> - new->cl_flavor = rqstp->rq_flavor;
> - princ = svc_gss_principal(rqstp);
> - if (princ) {
> - new->cl_principal = kstrdup(princ, GFP_KERNEL);
> - if (new->cl_principal == NULL) {
> - free_client(new);
> - goto out;
> - }
> - }
> - copy_cred(&new->cl_cred, &rqstp->rq_cred);
> - gen_confirm(new);
> gen_callback(new, setclid);
> add_to_unconfirmed(new, strhashval);
> setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]> wrote:
> Signed-off-by: Rahul Iyer <[email protected]>
> Signed-off-by: Mike Sager <[email protected]>
> Signed-off-by: Marc Eshel <[email protected]>
> Signed-off-by: Benny Halevy <[email protected]>
>
> When the call direction is a reply, copy the xid and call direction into the
> req->rq_private_buf.head[0].iov_base otherwise rpc_verify_header returns
> rpc_garbage.
>
> Signed-off-by: Andy Adamson <[email protected]>
> Signed-off-by: Benny Halevy <[email protected]>
> [get rid of CONFIG_NFSD_V4_1]
> Signed-off-by: Benny Halevy <[email protected]>
>
> [sunrpc: refactoring of svc_tcp_recvfrom]
> Signed-off-by: Alexandros Batsakis <[email protected]>
> Signed-off-by: Ricardo Labiaga <[email protected]>
> ---
> include/linux/sunrpc/clnt.h | 1 +
> include/linux/sunrpc/svcsock.h | 1 +
> include/linux/sunrpc/xprt.h | 2 +
> net/sunrpc/clnt.c | 1 +
> net/sunrpc/svcsock.c | 102 +++++++++++++--
> net/sunrpc/xprt.c | 41 ++++++-
> net/sunrpc/xprtsock.c | 278 +++++++++++++++++++++++++++++++++++++++-
> 7 files changed, 405 insertions(+), 21 deletions(-)
>
> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
> index c39a210..cf9a8ec 100644
> --- a/include/linux/sunrpc/clnt.h
> +++ b/include/linux/sunrpc/clnt.h
> @@ -110,6 +110,7 @@ struct rpc_create_args {
> rpc_authflavor_t authflavor;
> unsigned long flags;
> char *client_name;
> + struct svc_sock *bc_sock; /* NFSv4.1 backchannel */
> };
>
> /* Values for "flags" field */
> diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
> index 8271631..19228f4 100644
> --- a/include/linux/sunrpc/svcsock.h
> +++ b/include/linux/sunrpc/svcsock.h
> @@ -28,6 +28,7 @@ struct svc_sock {
> /* private TCP part */
> u32 sk_reclen; /* length of record */
> u32 sk_tcplen; /* current read length */
> + struct rpc_xprt *sk_bc_xprt; /* NFSv4.1 backchannel xprt */
> };
>
> /*
> diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
> index 1758d9f..063a6a7 100644
> --- a/include/linux/sunrpc/xprt.h
> +++ b/include/linux/sunrpc/xprt.h
> @@ -174,6 +174,7 @@ struct rpc_xprt {
> spinlock_t reserve_lock; /* lock slot table */
> u32 xid; /* Next XID value to use */
> struct rpc_task * snd_task; /* Task blocked in send */
> + struct svc_sock *bc_sock; /* NFSv4.1 backchannel */
> struct list_head recv;
>
> struct {
> @@ -197,6 +198,7 @@ struct xprt_create {
> struct sockaddr * srcaddr; /* optional local address */
> struct sockaddr * dstaddr; /* remote peer address */
> size_t addrlen;
> + struct svc_sock *bc_sock; /* NFSv4.1 backchannel */
> };
>
> struct xprt_class {
> diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
> index 5abab09..3dc847f 100644
> --- a/net/sunrpc/clnt.c
> +++ b/net/sunrpc/clnt.c
> @@ -266,6 +266,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
> .srcaddr = args->saddress,
> .dstaddr = args->address,
> .addrlen = args->addrsize,
> + .bc_sock = args->bc_sock,
> };
> char servername[48];
>
> diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
> index b739111..90c9a75 100644
> --- a/net/sunrpc/svcsock.c
> +++ b/net/sunrpc/svcsock.c
> @@ -49,6 +49,7 @@
> #include <linux/sunrpc/msg_prot.h>
> #include <linux/sunrpc/svcsock.h>
> #include <linux/sunrpc/stats.h>
> +#include <linux/sunrpc/xprt.h>
>
> #define RPCDBG_FACILITY RPCDBG_SVCXPRT
>
> @@ -895,6 +896,57 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
> return -EAGAIN;
> }
>
> +static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp,
> + struct rpc_rqst **reqpp, struct kvec *vec)
> +{
> + struct rpc_rqst *req = NULL;
> + u32 *p;
> + u32 xid;
> + u32 calldir;
> + int len;
> +
> + len = svc_recvfrom(rqstp, vec, 1, 8);
> + if (len < 0)
> + goto error;
> +
> + p = (u32 *)rqstp->rq_arg.head[0].iov_base;
> + xid = *p++;
> + calldir = *p;
> +
> + if (calldir == 0) {
> + /* REQUEST is the most common case */
> + vec[0] = rqstp->rq_arg.head[0];
> + } else {
> + /* REPLY */
> + if (svsk->sk_bc_xprt)
> + req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
> +
> + if (!req) {
> + printk(KERN_NOTICE
> + "%s: Got unrecognized reply: "
> + "calldir 0x%x sk_bc_xprt %p xid %08x\n",
> + __func__, ntohl(calldir),
> + svsk->sk_bc_xprt, xid);
> + vec[0] = rqstp->rq_arg.head[0];
> + goto out;
> + }
> +
> + memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
> + sizeof(struct xdr_buf));
> + /* copy the xid and call direction */
> + memcpy(req->rq_private_buf.head[0].iov_base,
> + rqstp->rq_arg.head[0].iov_base, 8);
> + vec[0] = req->rq_private_buf.head[0];
> + }
> + out:
> + vec[0].iov_base += 8;
> + vec[0].iov_len -= 8;
> + len = svsk->sk_reclen - 8;
> + error:
> + *reqpp = req;
> + return len;
> +}
> +
> /*
> * Receive data from a TCP socket.
> */
> @@ -906,6 +958,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
> int len;
> struct kvec *vec;
> int pnum, vlen;
> + struct rpc_rqst *req = NULL;
>
> dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
> svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
> @@ -919,9 +972,27 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
> vec = rqstp->rq_vec;
> vec[0] = rqstp->rq_arg.head[0];
> vlen = PAGE_SIZE;
> +
> + /*
> + * We have enough data for the whole tcp record. Let's try and read the
> + * first 8 bytes to get the xid and the call direction. We can use this
> + * to figure out if this is a call or a reply to a callback. If
> + * sk_reclen is < 8 (xid and calldir), then this is a malformed packet.
> + * In that case, don't bother with the calldir and just read the data.
> + * It will be rejected in svc_process.
> + */
> + if (len >= 8) {
> + len = svc_process_calldir(svsk, rqstp, &req, vec);
> + if (len < 0)
> + goto err_again;
> + vlen -= 8;
> + }
> +
> pnum = 1;
> while (vlen < len) {
> - vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
> + vec[pnum].iov_base = (req) ?
> + page_address(req->rq_private_buf.pages[pnum - 1]) :
> + page_address(rqstp->rq_pages[pnum]);
> vec[pnum].iov_len = PAGE_SIZE;
> pnum++;
> vlen += PAGE_SIZE;
> @@ -931,8 +1002,18 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
> /* Now receive data */
> len = svc_recvfrom(rqstp, vec, pnum, len);
> if (len < 0)
> - goto error;
> + goto err_again;
This seems to belong to the previous patch
as well as the last hunk in this file (@@ -957,21 +1039,19 @@).
Benny
> +
> + /*
> + * Account for the 8 bytes we read earlier
> + */
> + len += 8;
>
> + if (req) {
> + xprt_complete_rqst(req->rq_task, len);
> + len = 0;
> + goto out;
> + }
> dprintk("svc: TCP complete record (%d bytes)\n", len);
> rqstp->rq_arg.len = len;
> rqstp->rq_arg.page_base = 0;
> @@ -946,6 +1027,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
> rqstp->rq_xprt_ctxt = NULL;
> rqstp->rq_prot = IPPROTO_TCP;
>
> +out:
> /* Reset TCP read info */
> svsk->sk_reclen = 0;
> svsk->sk_tcplen = 0;
> @@ -957,21 +1039,19 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
>
> return len;
>
> - err_delete:
> - set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
> - return -EAGAIN;
> -
> - error:
> +err_again:
> if (len == -EAGAIN) {
> dprintk("RPC: TCP recvfrom got EAGAIN\n");
> svc_xprt_received(&svsk->sk_xprt);
> - } else {
> + return len;
> + }
> +error:
> + if (len != -EAGAIN) {
> printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
> svsk->sk_xprt.xpt_server->sv_name, -len);
> - goto err_delete;
> + set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
> }
> -
> - return len;
> + return -EAGAIN;
> }
>
> /*
> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
> index a0bfe53..03f175e 100644
> --- a/net/sunrpc/xprt.c
> +++ b/net/sunrpc/xprt.c
> @@ -1015,6 +1015,27 @@ void xprt_release(struct rpc_task *task)
> spin_unlock(&xprt->reserve_lock);
> }
>
> +/*
> + * The autoclose function for the back channel
> + *
> + * The callback channel should never close the channel,
> + * let the forechannel do that.
> + */
> +static void bc_autoclose(struct work_struct *work)
> +{
> + return;
> +}
> +
> +
> +/*
> + * The autodisconnect routine for the back channel. We never disconnect
> + */
> +static void
> +bc_init_autodisconnect(unsigned long data)
> +{
> + return;
> +}
> +
> /**
> * xprt_create_transport - create an RPC transport
> * @args: rpc transport creation arguments
> @@ -1051,9 +1072,16 @@ found:
>
> INIT_LIST_HEAD(&xprt->free);
> INIT_LIST_HEAD(&xprt->recv);
> - INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
> - setup_timer(&xprt->timer, xprt_init_autodisconnect,
> - (unsigned long)xprt);
> + if (args->bc_sock) {
> + INIT_WORK(&xprt->task_cleanup, bc_autoclose);
> + setup_timer(&xprt->timer, bc_init_autodisconnect,
> + (unsigned long)xprt);
> + } else {
> + INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
> + setup_timer(&xprt->timer, xprt_init_autodisconnect,
> + (unsigned long)xprt);
> + }
> +
> xprt->last_used = jiffies;
> xprt->cwnd = RPC_INITCWND;
> xprt->bind_index = 0;
> @@ -1073,6 +1101,13 @@ found:
> dprintk("RPC: created transport %p with %u slots\n", xprt,
> xprt->max_reqs);
>
> + /*
> + * Since we don't want connections for the backchannel, we set
> + * the xprt status to connected
> + */
> + if (args->bc_sock)
> + xprt_set_connected(xprt);
> +
> return xprt;
> }
>
> diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
> index d40ff50..067d205 100644
> --- a/net/sunrpc/xprtsock.c
> +++ b/net/sunrpc/xprtsock.c
> @@ -32,6 +32,7 @@
> #include <linux/tcp.h>
> #include <linux/sunrpc/clnt.h>
> #include <linux/sunrpc/sched.h>
> +#include <linux/sunrpc/svcsock.h>
> #include <linux/sunrpc/xprtsock.h>
> #include <linux/file.h>
>
> @@ -1966,6 +1967,219 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
> xprt->stat.bklog_u);
> }
>
> +/*
> + * The connect worker for the backchannel
> + * This should never be called as we should never need to connect
> + */
> +static void bc_connect_worker(struct work_struct *work)
> +{
> + BUG();
> +}
> +
> +/*
> + * The set_port routine of the rpc_xprt_ops. This is related to the portmapper
> + * and should never be called
> + */
> +
> +static void bc_set_port(struct rpc_xprt *xprt, unsigned short port)
> +{
> + BUG();
> +}
> +
> +/*
> + * The connect routine for the backchannel rpc_xprt ops
> + * Again, should never be called!
> + */
> +
> +static void bc_connect(struct rpc_task *task)
> +{
> + BUG();
> +}
> +
> +struct rpc_buffer {
> + size_t len;
> + char data[];
> +};
> +/*
> + * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
> + * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
> + * to use the server side send routines.
> + */
> +void *bc_malloc(struct rpc_task *task, size_t size)
> +{
> + struct page *page;
> + struct rpc_buffer *buf;
> +
> + BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
> + page = alloc_page(GFP_KERNEL);
> +
> + if (!page)
> + return NULL;
> +
> + buf = page_address(page);
> + buf->len = PAGE_SIZE;
> +
> + return buf->data;
> +}
> +
> +/*
> + * Free the space allocated in the bc_alloc routine
> + */
> +void bc_free(void *buffer)
> +{
> + struct rpc_buffer *buf;
> +
> + if (!buffer)
> + return;
> +
> + buf = container_of(buffer, struct rpc_buffer, data);
> + free_pages((unsigned long)buf, get_order(buf->len));
> +}
> +
> +/*
> + * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
> + * held. Borrows heavily from svc_tcp_sendto and xs_tcp_semd_request.
> + */
> +static int bc_sendto(struct rpc_rqst *req)
> +{
> + int total_len;
> + int len;
> + int size;
> + int result;
> + struct xdr_buf *xbufp = &req->rq_snd_buf;
> + struct page **pages = xbufp->pages;
> + unsigned int flags = MSG_MORE;
> + unsigned int pglen = xbufp->page_len;
> + size_t base = xbufp->page_base;
> + struct rpc_xprt *xprt = req->rq_xprt;
> + struct sock_xprt *transport =
> + container_of(xprt, struct sock_xprt, xprt);
> + struct socket *sock = transport->sock;
> +
> + total_len = xbufp->len;
> +
> + /*
> + * Set up the rpc header and record marker stuff
> + */
> + xs_encode_tcp_record_marker(xbufp);
> +
> + /*
> + * The RPC message is divided into 3 pieces:
> + * - The header: This is what most of the smaller RPC messages consist
> + * of. Often the whole message is in this.
> + *
> + * - xdr->pages: This is a list of pages that contain data, for
> + * example in a write request or while using rpcsec gss
> + *
> + * - The tail: This is the rest of the rpc message
> + *
> + * First we send the header, then the pages and then finally the tail.
> + * The code borrows heavily from svc_sendto.
> + */
> +
> + /*
> + * Send the head
> + */
> + if (total_len == xbufp->head[0].iov_len)
> + flags = 0;
> +
> + len = sock->ops->sendpage(sock, virt_to_page(xbufp->head[0].iov_base),
> + (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK,
> + xbufp->head[0].iov_len, flags);
> +
> + if (len != xbufp->head[0].iov_len)
> + goto out;
> +
> + /*
> + * send page data
> + *
> + * Check the amount of data to be sent. If it is less than the
> + * remaining page, then send it else send the current page
> + */
> +
> + size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
> + while (pglen > 0) {
> + if (total_len == size)
> + flags = 0;
> + result = sock->ops->sendpage(sock, *pages, base, size, flags);
> + if (result > 0)
> + len += result;
> + if (result != size)
> + goto out;
> + total_len -= size;
> + pglen -= size;
> + size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
> + base = 0;
> + pages++;
> + }
> + /*
> + * send tail
> + */
> + if (xbufp->tail[0].iov_len) {
> + result = sock->ops->sendpage(sock,
> + xbufp->tail[0].iov_base,
> + (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK,
> + xbufp->tail[0].iov_len,
> + 0);
> +
> + if (result > 0)
> + len += result;
> + }
> +out:
> + if (len != xbufp->len)
> + printk(KERN_NOTICE "Error sending entire callback!\n");
> +
> + return len;
> +}
> +
> +/*
> + * The send routine. Borrows from svc_send
> + */
> +static int bc_send_request(struct rpc_task *task)
> +{
> + struct rpc_rqst *req = task->tk_rqstp;
> + struct rpc_xprt *bc_xprt = req->rq_xprt;
> + struct svc_xprt *xprt;
> + struct svc_sock *svsk;
> + u32 len;
> +
> + dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
> + /*
> + * Get the server socket associated with this callback xprt
> + */
> + svsk = bc_xprt->bc_sock;
> + xprt = &svsk->sk_xprt;
> +
> + mutex_lock(&xprt->xpt_mutex);
> + if (test_bit(XPT_DEAD, &xprt->xpt_flags))
> + len = -ENOTCONN;
> + else
> + len = bc_sendto(req);
> + mutex_unlock(&xprt->xpt_mutex);
> +
> + return 0;
> +
> +}
> +
> +/*
> + * The close routine. Since this is client initiated, we do nothing
> + */
> +
> +static void bc_close(struct rpc_xprt *xprt)
> +{
> + return;
> +}
> +
> +/*
> + * The xprt destroy routine. Again, because this connection is client
> + * initiated, we do nothing
> + */
> +
> +static void bc_destroy(struct rpc_xprt *xprt)
> +{
> + return;
> +}
> +
> static struct rpc_xprt_ops xs_udp_ops = {
> .set_buffer_size = xs_udp_set_buffer_size,
> .reserve_xprt = xprt_reserve_xprt_cong,
> @@ -1999,6 +2213,24 @@ static struct rpc_xprt_ops xs_tcp_ops = {
> .print_stats = xs_tcp_print_stats,
> };
>
> +/*
> + * The rpc_xprt_ops for the server backchannel
> + */
> +
> +static struct rpc_xprt_ops bc_tcp_ops = {
> + .reserve_xprt = xprt_reserve_xprt,
> + .release_xprt = xprt_release_xprt,
> + .set_port = bc_set_port,
> + .connect = bc_connect,
> + .buf_alloc = bc_malloc,
> + .buf_free = bc_free,
> + .send_request = bc_send_request,
> + .set_retrans_timeout = xprt_set_retrans_timeout_def,
> + .close = bc_close,
> + .destroy = bc_destroy,
> + .print_stats = xs_tcp_print_stats,
> +};
> +
> static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
> unsigned int slot_table_size)
> {
> @@ -2131,13 +2363,29 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
> xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
> xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
>
> - xprt->bind_timeout = XS_BIND_TO;
> - xprt->connect_timeout = XS_TCP_CONN_TO;
> - xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
> - xprt->idle_timeout = XS_IDLE_DISC_TO;
> + if (args->bc_sock) {
> + /* backchannel */
> + xprt_set_bound(xprt);
> + INIT_DELAYED_WORK(&transport->connect_worker,
> + bc_connect_worker);
> + xprt->bind_timeout = 0;
> + xprt->connect_timeout = 0;
> + xprt->reestablish_timeout = 0;
> + xprt->idle_timeout = (~0);
>
> - xprt->ops = &xs_tcp_ops;
> - xprt->timeout = &xs_tcp_default_timeout;
> + /*
> + * The backchannel uses the same socket connection as the
> + * forechannel
> + */
> + xprt->bc_sock = args->bc_sock;
> + xprt->bc_sock->sk_bc_xprt = xprt;
> + transport->sock = xprt->bc_sock->sk_sock;
> + transport->inet = xprt->bc_sock->sk_sk;
> +
> + xprt->ops = &bc_tcp_ops;
> +
> + goto next;
> + }
>
> switch (addr->sa_family) {
> case AF_INET:
> @@ -2145,13 +2393,29 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
> xprt_set_bound(xprt);
>
> INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker4);
> - xs_format_ipv4_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
> break;
> case AF_INET6:
> if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
> xprt_set_bound(xprt);
>
> INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker6);
> + break;
> + }
> + xprt->bind_timeout = XS_BIND_TO;
> + xprt->connect_timeout = XS_TCP_CONN_TO;
> + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
> + xprt->idle_timeout = XS_IDLE_DISC_TO;
> +
> + xprt->ops = &xs_tcp_ops;
> +
> +next:
> + xprt->timeout = &xs_tcp_default_timeout;
> +
> + switch (addr->sa_family) {
> + case AF_INET:
> + xs_format_ipv4_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
> + break;
> + case AF_INET6:
> xs_format_ipv6_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
> break;
> default:
On 5/20/09 12:32 AM, "Benny Halevy" <[email protected]> wrote:
> On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]>
> wrote:
>> Follow the model we use in the client. Make the sequence arguments
>> part of the regular RPC arguments. The results point to them. Adjust
>> references to the sequence arguments.
>>
>> Signed-off-by: Ricardo Labiaga <[email protected]>
>> [define struct nfsd4_cb_sequence here]
>> Signed-off-by: Benny Halevy <[email protected]>
>> ---
>> fs/nfsd/nfs4callback.c | 9 +++++++++
>> include/linux/nfsd/state.h | 9 +++++++++
>> 2 files changed, 18 insertions(+), 0 deletions(-)
>>
>> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
>> index f75750e..0b5c4a6 100644
>> --- a/fs/nfsd/nfs4callback.c
>> +++ b/fs/nfsd/nfs4callback.c
>> @@ -92,6 +92,15 @@ enum nfs_cb_opnum4 {
>> cb_sequence_dec_sz + \
>> op_dec_sz)
>>
>> +struct nfs4_rpc_args {
>> + void *args_op;
>> + struct nfsd4_cb_sequence args_seq;
>> +};
>> +
>> +struct nfs4_rpc_res {
>> + struct nfsd4_cb_sequence *res_seq;
>
> Given the size of nfsd4_cb_sequence, why not just embed it here?
nfs4_rpc_res->res_seq will simply point to nfs4_rpc_args.args_seq. During
decode we only need to verify the return values are the same, as what we
sent, so I don't see the need to have a separate copy. That's why the args
have the actual structure, but the results only a pointer.
- ricardo
> (Historically, it didn't have the cbs_minorversion member so
> it's presence was the actual indication of the minorversion.
> Now that this has changed, I see no good reason not to embed it.
>
> Benny
>
>> +};
>> +
>> /*
>> * Generic encode routines from fs/nfs/nfs4xdr.c
>> */
>> diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
>> index 212a75b..931aaa6 100644
>> --- a/include/linux/nfsd/state.h
>> +++ b/include/linux/nfsd/state.h
>> @@ -61,6 +61,12 @@ typedef struct {
>> #define si_stateownerid si_opaque.so_stateownerid
>> #define si_fileid si_opaque.so_fileid
>>
>> +struct nfsd4_cb_sequence {
>> + /* args/res */
>> + u32 cbs_minorversion;
>> + struct nfs4_client *cbs_clp;
>> +};
>> +
>> struct nfs4_delegation {
>> struct list_head dl_perfile;
>> struct list_head dl_perclnt;
>> @@ -188,6 +194,9 @@ struct nfs4_client {
>> struct nfsd4_slot cl_slot; /* create_session slot */
>> u32 cl_exchange_flags;
>> struct nfs4_sessionid cl_sessionid;
>> +
>> + /* for nfs41 callbacks */
>> + struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
>> };
>>
>> /* struct nfs4_client_reset
On 5/20/09 12:46 AM, "Benny Halevy" <[email protected]> wrote:
> On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]>
> wrote:
>> Signed-off-by: Ricardo Labiaga <[email protected]>
>> [nfsd41: cb_recall callback]
>> [Share v4.0 and v4.1 back channel xdr]
>> Signed-off-by: Andy Adamson <[email protected]>
>> Signed-off-by: Ricardo Labiaga <[email protected]>
>> Signed-off-by: Benny Halevy <[email protected]>
>> [Share v4.0 and v4.1 back channel xdr]
>> Signed-off-by: Andy Adamson <[email protected]>
>> Signed-off-by: Benny Halevy <[email protected]>
>> [nfsd41: use nfsd4_cb_sequence for callback minorversion]
>> [nfsd41: conditionally decode_sequence in nfs4_xdr_dec_cb_recall]
>> Signed-off-by: Benny Halevy <[email protected]>
>> [nfsd41: Backchannel: Add sequence arguments to callback RPC arguments]
>> Signed-off-by: Ricardo Labiaga <[email protected]>
>> Signed-off-by: Benny Halevy <[email protected]>
>> ---
>> fs/nfsd/nfs4callback.c | 40 ++++++++++++++++++++++++++++++++++++----
>> 1 files changed, 36 insertions(+), 4 deletions(-)
>>
>> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
>> index 521d5f5..b25dcc2 100644
>> --- a/fs/nfsd/nfs4callback.c
>> +++ b/fs/nfsd/nfs4callback.c
>> @@ -292,15 +292,19 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
>> }
>>
>> static int
>> -nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct
>> nfs4_delegation *args)
>> +nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
>> + struct nfs4_rpc_args *rpc_args)
>> {
>> struct xdr_stream xdr;
>> + struct nfs4_delegation *args = rpc_args->args_op;
>> struct nfs4_cb_compound_hdr hdr = {
>> .ident = args->dl_ident,
>> + .minorversion = rpc_args->args_seq.cbs_minorversion,
>> };
>>
>> xdr_init_encode(&xdr, &req->rq_snd_buf, p);
>> encode_cb_compound_hdr(&xdr, &hdr);
>> + encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr);
>> encode_cb_recall(&xdr, args, &hdr);
>> encode_cb_nops(&hdr);
>> return 0;
>> @@ -400,7 +404,8 @@ nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
>> }
>>
>> static int
>> -nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p)
>> +nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
>> + struct nfs4_rpc_res *rpc_res)
>> {
>> struct xdr_stream xdr;
>> struct nfs4_cb_compound_hdr hdr;
>> @@ -410,6 +415,11 @@ nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32
>> *p)
>> status = decode_cb_compound_hdr(&xdr, &hdr);
>> if (status)
>> goto out;
>> + if (rpc_res && rpc_res->res_seq) {
>
> With this version rpc_res != NULL is guaranteed, isn't it?
> Also, embedding res_seq in nfs4_rpc_res will obviate this condition further.
True, rpc_res will always be non-NULL but rpc_res->res_seq is still NULL if
this is a v4.0 callback.
>
>> + status = decode_cb_sequence(&xdr, rpc_res->res_seq, rqstp);
>> + if (status)
>> + goto out;
>> + }
>> status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
>> out:
>> return status;
>> @@ -687,6 +697,8 @@ static void nfsd4_cb_recall_done(struct rpc_task *task,
>> void *calldata)
>> struct nfs4_delegation *dp = calldata;
>> struct nfs4_client *clp = dp->dl_client;
>>
>> + nfsd4_cb_done(task, calldata);
>> +
>> switch (task->tk_status) {
>> case -EIO:
>> /* Network partition? */
>> @@ -699,16 +711,20 @@ static void nfsd4_cb_recall_done(struct rpc_task *task,
>> void *calldata)
>> break;
>> default:
>> /* success, or error we can't handle */
>> - return;
>> + goto done;
>> }
>> if (dp->dl_retries--) {
>> rpc_delay(task, 2*HZ);
>> task->tk_status = 0;
>> rpc_restart_call(task);
>> + return;
>> } else {
>> atomic_set(&clp->cl_cb_conn.cb_set, 0);
>> warn_no_callback_path(clp, task->tk_status);
>> }
>> +done:
>> + kfree(task->tk_msg.rpc_argp);
>> + kfree(task->tk_msg.rpc_resp);
>> }
>>
>> static void nfsd4_cb_recall_release(void *calldata)
>> @@ -734,16 +750,32 @@ nfsd4_cb_recall(struct nfs4_delegation *dp)
>> {
>> struct nfs4_client *clp = dp->dl_client;
>> struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
>> + struct nfs4_rpc_args *args;
>> + struct nfs4_rpc_res *res;
>> struct rpc_message msg = {
>> .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
>> - .rpc_argp = dp,
>> .rpc_cred = clp->cl_cb_conn.cb_cred
>> };
>> int status;
>>
>> + args = kzalloc(sizeof(*args), GFP_KERNEL);
>> + if (!args) {
>> + status = -ENOMEM;
>> + goto out;
>> + }
>> + res = kzalloc(sizeof(*res), GFP_KERNEL);
>> + if (!res) {
>> + kfree(args);
>> + status = -ENOMEM;
>> + goto out;
>> + }
>
> Hmm, why not allocate the two in one piece and possibly having a kmem_cache
> for them?
They're two different types of structures. You mean encapsulate them in a
super structure and then have the pointers to respective members? I'm not
following.
- ricardo
> Benny
>
>> + args->args_op = dp;
>> + msg.rpc_argp = args;
>> + msg.rpc_resp = res;
>> dp->dl_retries = 1;
>> status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
>> &nfsd4_cb_recall_ops, dp);
>> +out:
>> if (status) {
>> put_nfs4_client(clp);
>> nfs4_put_delegation(dp);
On 5/20/09 1:18 AM, "Benny Halevy" <[email protected]> wrote:
> Ricardo, this patch conflicts with Andy's DRC changes.
> I'm not sure who'll go first into Bruce's branch but
> the other guy will need to rebase his patches accordingly...
Sure.
- ricardo
>
> Benny
>
> On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]>
> wrote:
>> Move common initialization of 'struct nfs4_client' inside create_client().
>>
>> Signed-off-by: Ricardo Labiaga <[email protected]>
>>
>> [nfsd41: Remember the auth flavor to use for callbacks]
>> Signed-off-by: Ricardo Labiaga <[email protected]>
>> Signed-off-by: Benny Halevy <[email protected]>
>> ---
>> fs/nfsd/nfs4state.c | 90
>> +++++++++++++++++++++++++-------------------------
>> 1 files changed, 45 insertions(+), 45 deletions(-)
>>
>> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
>> index 45cbbbc..1e4740f 100644
>> --- a/fs/nfsd/nfs4state.c
>> +++ b/fs/nfsd/nfs4state.c
>> @@ -710,27 +710,6 @@ expire_client(struct nfs4_client *clp)
>> put_nfs4_client(clp);
>> }
>>
>> -static struct nfs4_client *create_client(struct xdr_netobj name, char
>> *recdir)
>> -{
>> - struct nfs4_client *clp;
>> -
>> - clp = alloc_client(name);
>> - if (clp == NULL)
>> - return NULL;
>> - memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
>> - atomic_set(&clp->cl_count, 1);
>> - atomic_set(&clp->cl_cb_conn.cb_set, 0);
>> - INIT_LIST_HEAD(&clp->cl_idhash);
>> - INIT_LIST_HEAD(&clp->cl_strhash);
>> - INIT_LIST_HEAD(&clp->cl_openowners);
>> - INIT_LIST_HEAD(&clp->cl_delegations);
>> - INIT_LIST_HEAD(&clp->cl_sessions);
>> - INIT_LIST_HEAD(&clp->cl_lru);
>> - clear_bit(0, &clp->cl_cb_slot_busy);
>> - rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
>> - return clp;
>> -}
>> -
>> static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
>> {
>> memcpy(target->cl_verifier.data, source->data,
>> @@ -793,6 +772,46 @@ static void gen_confirm(struct nfs4_client *clp)
>> *p++ = i++;
>> }
>>
>> +static struct nfs4_client *create_client(struct xdr_netobj name, char
>> *recdir,
>> + struct svc_rqst *rqstp, nfs4_verifier *verf)
>> +{
>> + struct nfs4_client *clp;
>> + u32 ip_addr = svc_addr_in(rqstp)->sin_addr.s_addr;
>> + char *princ;
>> +
>> + clp = alloc_client(name);
>> + if (clp == NULL)
>> + return NULL;
>> +
>> + princ = svc_gss_principal(rqstp);
>> + if (princ) {
>> + clp->cl_principal = kstrdup(princ, GFP_KERNEL);
>> + if (clp->cl_principal == NULL) {
>> + free_client(clp);
>> + return NULL;
>> + }
>> + }
>> +
>> + memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
>> + atomic_set(&clp->cl_count, 1);
>> + atomic_set(&clp->cl_cb_conn.cb_set, 0);
>> + INIT_LIST_HEAD(&clp->cl_idhash);
>> + INIT_LIST_HEAD(&clp->cl_strhash);
>> + INIT_LIST_HEAD(&clp->cl_openowners);
>> + INIT_LIST_HEAD(&clp->cl_delegations);
>> + INIT_LIST_HEAD(&clp->cl_sessions);
>> + INIT_LIST_HEAD(&clp->cl_lru);
>> + clear_bit(0, &clp->cl_cb_slot_busy);
>> + rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
>> + copy_verf(clp, verf);
>> + clp->cl_addr = ip_addr;
>> + clp->cl_flavor = rqstp->rq_flavor;
>> + copy_cred(&clp->cl_cred, &rqstp->rq_cred);
>> + gen_confirm(clp);
>> +
>> + return clp;
>> +}
>> +
>> static int check_name(struct xdr_netobj name)
>> {
>> if (name.len == 0)
>> @@ -1285,17 +1304,13 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
>>
>> out_new:
>> /* Normal case */
>> - new = create_client(exid->clname, dname);
>> + new = create_client(exid->clname, dname, rqstp, &verf);
>> if (new == NULL) {
>> status = nfserr_resource;
>> goto out;
>> }
>>
>> - copy_verf(new, &verf);
>> - copy_cred(&new->cl_cred, &rqstp->rq_cred);
>> - new->cl_addr = ip_addr;
>> gen_clid(new);
>> - gen_confirm(new);
>> add_to_unconfirmed(new, strhashval);
>> out_copy:
>> exid->clientid.cl_boot = new->cl_clientid.cl_boot;
>> @@ -1535,7 +1550,6 @@ __be32
>> nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state
>> *cstate,
>> struct nfsd4_setclientid *setclid)
>> {
>> - struct sockaddr_in *sin = svc_addr_in(rqstp);
>> struct xdr_netobj clname = {
>> .len = setclid->se_namelen,
>> .data = setclid->se_name,
>> @@ -1544,7 +1558,6 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct
>> nfsd4_compound_state *cstate,
>> unsigned int strhashval;
>> struct nfs4_client *conf, *unconf, *new;
>> __be32 status;
>> - char *princ;
>> char dname[HEXDIR_LEN];
>>
>> if (!check_name(clname))
>> @@ -1586,7 +1599,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct
>> nfsd4_compound_state *cstate,
>> */
>> if (unconf)
>> expire_client(unconf);
>> - new = create_client(clname, dname);
>> + new = create_client(clname, dname, rqstp, &clverifier);
>> if (new == NULL)
>> goto out;
>> gen_clid(new);
>> @@ -1603,7 +1616,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct
>> nfsd4_compound_state *cstate,
>> */
>> expire_client(unconf);
>> }
>> - new = create_client(clname, dname);
>> + new = create_client(clname, dname, rqstp, &clverifier);
>> if (new == NULL)
>> goto out;
>> copy_clid(new, conf);
>> @@ -1613,7 +1626,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct
>> nfsd4_compound_state *cstate,
>> * probable client reboot; state will be removed if
>> * confirmed.
>> */
>> - new = create_client(clname, dname);
>> + new = create_client(clname, dname, rqstp, &clverifier);
>> if (new == NULL)
>> goto out;
>> gen_clid(new);
>> @@ -1624,24 +1637,11 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct
>> nfsd4_compound_state *cstate,
>> * confirmed.
>> */
>> expire_client(unconf);
>> - new = create_client(clname, dname);
>> + new = create_client(clname, dname, rqstp, &clverifier);
>> if (new == NULL)
>> goto out;
>> gen_clid(new);
>> }
>> - copy_verf(new, &clverifier);
>> - new->cl_addr = sin->sin_addr.s_addr;
>> - new->cl_flavor = rqstp->rq_flavor;
>> - princ = svc_gss_principal(rqstp);
>> - if (princ) {
>> - new->cl_principal = kstrdup(princ, GFP_KERNEL);
>> - if (new->cl_principal == NULL) {
>> - free_client(new);
>> - goto out;
>> - }
>> - }
>> - copy_cred(&new->cl_cred, &rqstp->rq_cred);
>> - gen_confirm(new);
>> gen_callback(new, setclid);
>> add_to_unconfirmed(new, strhashval);
>> setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
On 5/20/09 1:34 AM, "Benny Halevy" <[email protected]> wrote:
> On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]>
> wrote:
>> Signed-off-by: Rahul Iyer <[email protected]>
>> Signed-off-by: Mike Sager <[email protected]>
>> Signed-off-by: Marc Eshel <[email protected]>
>> Signed-off-by: Benny Halevy <[email protected]>
>>
>> When the call direction is a reply, copy the xid and call direction into the
>> req->rq_private_buf.head[0].iov_base otherwise rpc_verify_header returns
>> rpc_garbage.
>>
>> Signed-off-by: Andy Adamson <[email protected]>
>> Signed-off-by: Benny Halevy <[email protected]>
>> [get rid of CONFIG_NFSD_V4_1]
>> Signed-off-by: Benny Halevy <[email protected]>
>>
>> [sunrpc: refactoring of svc_tcp_recvfrom]
>> Signed-off-by: Alexandros Batsakis <[email protected]>
>> Signed-off-by: Ricardo Labiaga <[email protected]>
>> ---
>> include/linux/sunrpc/clnt.h | 1 +
>> include/linux/sunrpc/svcsock.h | 1 +
>> include/linux/sunrpc/xprt.h | 2 +
>> net/sunrpc/clnt.c | 1 +
>> net/sunrpc/svcsock.c | 102 +++++++++++++--
>> net/sunrpc/xprt.c | 41 ++++++-
>> net/sunrpc/xprtsock.c | 278
>> +++++++++++++++++++++++++++++++++++++++-
>> 7 files changed, 405 insertions(+), 21 deletions(-)
>>
>> diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
>> index c39a210..cf9a8ec 100644
>> --- a/include/linux/sunrpc/clnt.h
>> +++ b/include/linux/sunrpc/clnt.h
>> @@ -110,6 +110,7 @@ struct rpc_create_args {
>> rpc_authflavor_t authflavor;
>> unsigned long flags;
>> char *client_name;
>> + struct svc_sock *bc_sock; /* NFSv4.1 backchannel */
>> };
>>
>> /* Values for "flags" field */
>> diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
>> index 8271631..19228f4 100644
>> --- a/include/linux/sunrpc/svcsock.h
>> +++ b/include/linux/sunrpc/svcsock.h
>> @@ -28,6 +28,7 @@ struct svc_sock {
>> /* private TCP part */
>> u32 sk_reclen; /* length of record */
>> u32 sk_tcplen; /* current read length */
>> + struct rpc_xprt *sk_bc_xprt; /* NFSv4.1 backchannel xprt */
>> };
>>
>> /*
>> diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
>> index 1758d9f..063a6a7 100644
>> --- a/include/linux/sunrpc/xprt.h
>> +++ b/include/linux/sunrpc/xprt.h
>> @@ -174,6 +174,7 @@ struct rpc_xprt {
>> spinlock_t reserve_lock; /* lock slot table */
>> u32 xid; /* Next XID value to use */
>> struct rpc_task * snd_task; /* Task blocked in send */
>> + struct svc_sock *bc_sock; /* NFSv4.1 backchannel */
>> struct list_head recv;
>>
>> struct {
>> @@ -197,6 +198,7 @@ struct xprt_create {
>> struct sockaddr * srcaddr; /* optional local address */
>> struct sockaddr * dstaddr; /* remote peer address */
>> size_t addrlen;
>> + struct svc_sock *bc_sock; /* NFSv4.1 backchannel */
>> };
>>
>> struct xprt_class {
>> diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
>> index 5abab09..3dc847f 100644
>> --- a/net/sunrpc/clnt.c
>> +++ b/net/sunrpc/clnt.c
>> @@ -266,6 +266,7 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
>> .srcaddr = args->saddress,
>> .dstaddr = args->address,
>> .addrlen = args->addrsize,
>> + .bc_sock = args->bc_sock,
>> };
>> char servername[48];
>>
>> diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
>> index b739111..90c9a75 100644
>> --- a/net/sunrpc/svcsock.c
>> +++ b/net/sunrpc/svcsock.c
>> @@ -49,6 +49,7 @@
>> #include <linux/sunrpc/msg_prot.h>
>> #include <linux/sunrpc/svcsock.h>
>> #include <linux/sunrpc/stats.h>
>> +#include <linux/sunrpc/xprt.h>
>>
>> #define RPCDBG_FACILITY RPCDBG_SVCXPRT
>>
>> @@ -895,6 +896,57 @@ static int svc_tcp_recv_record(struct svc_sock *svsk,
>> struct svc_rqst *rqstp)
>> return -EAGAIN;
>> }
>>
>> +static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst
>> *rqstp,
>> + struct rpc_rqst **reqpp, struct kvec *vec)
>> +{
>> + struct rpc_rqst *req = NULL;
>> + u32 *p;
>> + u32 xid;
>> + u32 calldir;
>> + int len;
>> +
>> + len = svc_recvfrom(rqstp, vec, 1, 8);
>> + if (len < 0)
>> + goto error;
>> +
>> + p = (u32 *)rqstp->rq_arg.head[0].iov_base;
>> + xid = *p++;
>> + calldir = *p;
>> +
>> + if (calldir == 0) {
>> + /* REQUEST is the most common case */
>> + vec[0] = rqstp->rq_arg.head[0];
>> + } else {
>> + /* REPLY */
>> + if (svsk->sk_bc_xprt)
>> + req = xprt_lookup_rqst(svsk->sk_bc_xprt, xid);
>> +
>> + if (!req) {
>> + printk(KERN_NOTICE
>> + "%s: Got unrecognized reply: "
>> + "calldir 0x%x sk_bc_xprt %p xid %08x\n",
>> + __func__, ntohl(calldir),
>> + svsk->sk_bc_xprt, xid);
>> + vec[0] = rqstp->rq_arg.head[0];
>> + goto out;
>> + }
>> +
>> + memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
>> + sizeof(struct xdr_buf));
>> + /* copy the xid and call direction */
>> + memcpy(req->rq_private_buf.head[0].iov_base,
>> + rqstp->rq_arg.head[0].iov_base, 8);
>> + vec[0] = req->rq_private_buf.head[0];
>> + }
>> + out:
>> + vec[0].iov_base += 8;
>> + vec[0].iov_len -= 8;
>> + len = svsk->sk_reclen - 8;
>> + error:
>> + *reqpp = req;
>> + return len;
>> +}
>> +
>> /*
>> * Receive data from a TCP socket.
>> */
>> @@ -906,6 +958,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
>> int len;
>> struct kvec *vec;
>> int pnum, vlen;
>> + struct rpc_rqst *req = NULL;
>>
>> dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
>> svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
>> @@ -919,9 +972,27 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
>> vec = rqstp->rq_vec;
>> vec[0] = rqstp->rq_arg.head[0];
>> vlen = PAGE_SIZE;
>> +
>> + /*
>> + * We have enough data for the whole tcp record. Let's try and read the
>> + * first 8 bytes to get the xid and the call direction. We can use this
>> + * to figure out if this is a call or a reply to a callback. If
>> + * sk_reclen is < 8 (xid and calldir), then this is a malformed packet.
>> + * In that case, don't bother with the calldir and just read the data.
>> + * It will be rejected in svc_process.
>> + */
>> + if (len >= 8) {
>> + len = svc_process_calldir(svsk, rqstp, &req, vec);
>> + if (len < 0)
>> + goto err_again;
>> + vlen -= 8;
>> + }
>> +
>> pnum = 1;
>> while (vlen < len) {
>> - vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]);
>> + vec[pnum].iov_base = (req) ?
>> + page_address(req->rq_private_buf.pages[pnum - 1]) :
>> + page_address(rqstp->rq_pages[pnum]);
>> vec[pnum].iov_len = PAGE_SIZE;
>> pnum++;
>> vlen += PAGE_SIZE;
>> @@ -931,8 +1002,18 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
>> /* Now receive data */
>> len = svc_recvfrom(rqstp, vec, pnum, len);
>> if (len < 0)
>> - goto error;
>> + goto err_again;
>
> This seems to belong to the previous patch
> as well as the last hunk in this file (@@ -957,21 +1039,19 @@).
Thanks for catching this. I'll move it to the previous patch and resubmit
both.
- ricardo
> Benny
>
>> +
>> + /*
>> + * Account for the 8 bytes we read earlier
>> + */
>> + len += 8;
>>
>> + if (req) {
>> + xprt_complete_rqst(req->rq_task, len);
>> + len = 0;
>> + goto out;
>> + }
>> dprintk("svc: TCP complete record (%d bytes)\n", len);
>> rqstp->rq_arg.len = len;
>> rqstp->rq_arg.page_base = 0;
>> @@ -946,6 +1027,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
>> rqstp->rq_xprt_ctxt = NULL;
>> rqstp->rq_prot = IPPROTO_TCP;
>>
>> +out:
>> /* Reset TCP read info */
>> svsk->sk_reclen = 0;
>> svsk->sk_tcplen = 0;
>> @@ -957,21 +1039,19 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
>>
>> return len;
>>
>> - err_delete:
>> - set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
>> - return -EAGAIN;
>> -
>> - error:
>> +err_again:
>> if (len == -EAGAIN) {
>> dprintk("RPC: TCP recvfrom got EAGAIN\n");
>> svc_xprt_received(&svsk->sk_xprt);
>> - } else {
>> + return len;
>> + }
>> +error:
>> + if (len != -EAGAIN) {
>> printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
>> svsk->sk_xprt.xpt_server->sv_name, -len);
>> - goto err_delete;
>> + set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
>> }
>> -
>> - return len;
>> + return -EAGAIN;
>> }
>>
>> /*
>> diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
>> index a0bfe53..03f175e 100644
>> --- a/net/sunrpc/xprt.c
>> +++ b/net/sunrpc/xprt.c
>> @@ -1015,6 +1015,27 @@ void xprt_release(struct rpc_task *task)
>> spin_unlock(&xprt->reserve_lock);
>> }
>>
>> +/*
>> + * The autoclose function for the back channel
>> + *
>> + * The callback channel should never close the channel,
>> + * let the forechannel do that.
>> + */
>> +static void bc_autoclose(struct work_struct *work)
>> +{
>> + return;
>> +}
>> +
>> +
>> +/*
>> + * The autodisconnect routine for the back channel. We never disconnect
>> + */
>> +static void
>> +bc_init_autodisconnect(unsigned long data)
>> +{
>> + return;
>> +}
>> +
>> /**
>> * xprt_create_transport - create an RPC transport
>> * @args: rpc transport creation arguments
>> @@ -1051,9 +1072,16 @@ found:
>>
>> INIT_LIST_HEAD(&xprt->free);
>> INIT_LIST_HEAD(&xprt->recv);
>> - INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
>> - setup_timer(&xprt->timer, xprt_init_autodisconnect,
>> - (unsigned long)xprt);
>> + if (args->bc_sock) {
>> + INIT_WORK(&xprt->task_cleanup, bc_autoclose);
>> + setup_timer(&xprt->timer, bc_init_autodisconnect,
>> + (unsigned long)xprt);
>> + } else {
>> + INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
>> + setup_timer(&xprt->timer, xprt_init_autodisconnect,
>> + (unsigned long)xprt);
>> + }
>> +
>> xprt->last_used = jiffies;
>> xprt->cwnd = RPC_INITCWND;
>> xprt->bind_index = 0;
>> @@ -1073,6 +1101,13 @@ found:
>> dprintk("RPC: created transport %p with %u slots\n", xprt,
>> xprt->max_reqs);
>>
>> + /*
>> + * Since we don't want connections for the backchannel, we set
>> + * the xprt status to connected
>> + */
>> + if (args->bc_sock)
>> + xprt_set_connected(xprt);
>> +
>> return xprt;
>> }
>>
>> diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
>> index d40ff50..067d205 100644
>> --- a/net/sunrpc/xprtsock.c
>> +++ b/net/sunrpc/xprtsock.c
>> @@ -32,6 +32,7 @@
>> #include <linux/tcp.h>
>> #include <linux/sunrpc/clnt.h>
>> #include <linux/sunrpc/sched.h>
>> +#include <linux/sunrpc/svcsock.h>
>> #include <linux/sunrpc/xprtsock.h>
>> #include <linux/file.h>
>>
>> @@ -1966,6 +1967,219 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt,
>> struct seq_file *seq)
>> xprt->stat.bklog_u);
>> }
>>
>> +/*
>> + * The connect worker for the backchannel
>> + * This should never be called as we should never need to connect
>> + */
>> +static void bc_connect_worker(struct work_struct *work)
>> +{
>> + BUG();
>> +}
>> +
>> +/*
>> + * The set_port routine of the rpc_xprt_ops. This is related to the
>> portmapper
>> + * and should never be called
>> + */
>> +
>> +static void bc_set_port(struct rpc_xprt *xprt, unsigned short port)
>> +{
>> + BUG();
>> +}
>> +
>> +/*
>> + * The connect routine for the backchannel rpc_xprt ops
>> + * Again, should never be called!
>> + */
>> +
>> +static void bc_connect(struct rpc_task *task)
>> +{
>> + BUG();
>> +}
>> +
>> +struct rpc_buffer {
>> + size_t len;
>> + char data[];
>> +};
>> +/*
>> + * Allocate a bunch of pages for a scratch buffer for the rpc code. The
>> reason
>> + * we allocate pages instead doing a kmalloc like rpc_malloc is because we
>> want
>> + * to use the server side send routines.
>> + */
>> +void *bc_malloc(struct rpc_task *task, size_t size)
>> +{
>> + struct page *page;
>> + struct rpc_buffer *buf;
>> +
>> + BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
>> + page = alloc_page(GFP_KERNEL);
>> +
>> + if (!page)
>> + return NULL;
>> +
>> + buf = page_address(page);
>> + buf->len = PAGE_SIZE;
>> +
>> + return buf->data;
>> +}
>> +
>> +/*
>> + * Free the space allocated in the bc_alloc routine
>> + */
>> +void bc_free(void *buffer)
>> +{
>> + struct rpc_buffer *buf;
>> +
>> + if (!buffer)
>> + return;
>> +
>> + buf = container_of(buffer, struct rpc_buffer, data);
>> + free_pages((unsigned long)buf, get_order(buf->len));
>> +}
>> +
>> +/*
>> + * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
>> + * held. Borrows heavily from svc_tcp_sendto and xs_tcp_semd_request.
>> + */
>> +static int bc_sendto(struct rpc_rqst *req)
>> +{
>> + int total_len;
>> + int len;
>> + int size;
>> + int result;
>> + struct xdr_buf *xbufp = &req->rq_snd_buf;
>> + struct page **pages = xbufp->pages;
>> + unsigned int flags = MSG_MORE;
>> + unsigned int pglen = xbufp->page_len;
>> + size_t base = xbufp->page_base;
>> + struct rpc_xprt *xprt = req->rq_xprt;
>> + struct sock_xprt *transport =
>> + container_of(xprt, struct sock_xprt, xprt);
>> + struct socket *sock = transport->sock;
>> +
>> + total_len = xbufp->len;
>> +
>> + /*
>> + * Set up the rpc header and record marker stuff
>> + */
>> + xs_encode_tcp_record_marker(xbufp);
>> +
>> + /*
>> + * The RPC message is divided into 3 pieces:
>> + * - The header: This is what most of the smaller RPC messages consist
>> + * of. Often the whole message is in this.
>> + *
>> + * - xdr->pages: This is a list of pages that contain data, for
>> + * example in a write request or while using rpcsec gss
>> + *
>> + * - The tail: This is the rest of the rpc message
>> + *
>> + * First we send the header, then the pages and then finally the tail.
>> + * The code borrows heavily from svc_sendto.
>> + */
>> +
>> + /*
>> + * Send the head
>> + */
>> + if (total_len == xbufp->head[0].iov_len)
>> + flags = 0;
>> +
>> + len = sock->ops->sendpage(sock, virt_to_page(xbufp->head[0].iov_base),
>> + (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK,
>> + xbufp->head[0].iov_len, flags);
>> +
>> + if (len != xbufp->head[0].iov_len)
>> + goto out;
>> +
>> + /*
>> + * send page data
>> + *
>> + * Check the amount of data to be sent. If it is less than the
>> + * remaining page, then send it else send the current page
>> + */
>> +
>> + size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen;
>> + while (pglen > 0) {
>> + if (total_len == size)
>> + flags = 0;
>> + result = sock->ops->sendpage(sock, *pages, base, size, flags);
>> + if (result > 0)
>> + len += result;
>> + if (result != size)
>> + goto out;
>> + total_len -= size;
>> + pglen -= size;
>> + size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen;
>> + base = 0;
>> + pages++;
>> + }
>> + /*
>> + * send tail
>> + */
>> + if (xbufp->tail[0].iov_len) {
>> + result = sock->ops->sendpage(sock,
>> + xbufp->tail[0].iov_base,
>> + (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK,
>> + xbufp->tail[0].iov_len,
>> + 0);
>> +
>> + if (result > 0)
>> + len += result;
>> + }
>> +out:
>> + if (len != xbufp->len)
>> + printk(KERN_NOTICE "Error sending entire callback!\n");
>> +
>> + return len;
>> +}
>> +
>> +/*
>> + * The send routine. Borrows from svc_send
>> + */
>> +static int bc_send_request(struct rpc_task *task)
>> +{
>> + struct rpc_rqst *req = task->tk_rqstp;
>> + struct rpc_xprt *bc_xprt = req->rq_xprt;
>> + struct svc_xprt *xprt;
>> + struct svc_sock *svsk;
>> + u32 len;
>> +
>> + dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
>> + /*
>> + * Get the server socket associated with this callback xprt
>> + */
>> + svsk = bc_xprt->bc_sock;
>> + xprt = &svsk->sk_xprt;
>> +
>> + mutex_lock(&xprt->xpt_mutex);
>> + if (test_bit(XPT_DEAD, &xprt->xpt_flags))
>> + len = -ENOTCONN;
>> + else
>> + len = bc_sendto(req);
>> + mutex_unlock(&xprt->xpt_mutex);
>> +
>> + return 0;
>> +
>> +}
>> +
>> +/*
>> + * The close routine. Since this is client initiated, we do nothing
>> + */
>> +
>> +static void bc_close(struct rpc_xprt *xprt)
>> +{
>> + return;
>> +}
>> +
>> +/*
>> + * The xprt destroy routine. Again, because this connection is client
>> + * initiated, we do nothing
>> + */
>> +
>> +static void bc_destroy(struct rpc_xprt *xprt)
>> +{
>> + return;
>> +}
>> +
>> static struct rpc_xprt_ops xs_udp_ops = {
>> .set_buffer_size = xs_udp_set_buffer_size,
>> .reserve_xprt = xprt_reserve_xprt_cong,
>> @@ -1999,6 +2213,24 @@ static struct rpc_xprt_ops xs_tcp_ops = {
>> .print_stats = xs_tcp_print_stats,
>> };
>>
>> +/*
>> + * The rpc_xprt_ops for the server backchannel
>> + */
>> +
>> +static struct rpc_xprt_ops bc_tcp_ops = {
>> + .reserve_xprt = xprt_reserve_xprt,
>> + .release_xprt = xprt_release_xprt,
>> + .set_port = bc_set_port,
>> + .connect = bc_connect,
>> + .buf_alloc = bc_malloc,
>> + .buf_free = bc_free,
>> + .send_request = bc_send_request,
>> + .set_retrans_timeout = xprt_set_retrans_timeout_def,
>> + .close = bc_close,
>> + .destroy = bc_destroy,
>> + .print_stats = xs_tcp_print_stats,
>> +};
>> +
>> static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
>> unsigned int slot_table_size)
>> {
>> @@ -2131,13 +2363,29 @@ static struct rpc_xprt *xs_setup_tcp(struct
>> xprt_create *args)
>> xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
>> xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
>>
>> - xprt->bind_timeout = XS_BIND_TO;
>> - xprt->connect_timeout = XS_TCP_CONN_TO;
>> - xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
>> - xprt->idle_timeout = XS_IDLE_DISC_TO;
>> + if (args->bc_sock) {
>> + /* backchannel */
>> + xprt_set_bound(xprt);
>> + INIT_DELAYED_WORK(&transport->connect_worker,
>> + bc_connect_worker);
>> + xprt->bind_timeout = 0;
>> + xprt->connect_timeout = 0;
>> + xprt->reestablish_timeout = 0;
>> + xprt->idle_timeout = (~0);
>>
>> - xprt->ops = &xs_tcp_ops;
>> - xprt->timeout = &xs_tcp_default_timeout;
>> + /*
>> + * The backchannel uses the same socket connection as the
>> + * forechannel
>> + */
>> + xprt->bc_sock = args->bc_sock;
>> + xprt->bc_sock->sk_bc_xprt = xprt;
>> + transport->sock = xprt->bc_sock->sk_sock;
>> + transport->inet = xprt->bc_sock->sk_sk;
>> +
>> + xprt->ops = &bc_tcp_ops;
>> +
>> + goto next;
>> + }
>>
>> switch (addr->sa_family) {
>> case AF_INET:
>> @@ -2145,13 +2393,29 @@ static struct rpc_xprt *xs_setup_tcp(struct
>> xprt_create *args)
>> xprt_set_bound(xprt);
>>
>> INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker4);
>> - xs_format_ipv4_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
>> break;
>> case AF_INET6:
>> if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
>> xprt_set_bound(xprt);
>>
>> INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker6);
>> + break;
>> + }
>> + xprt->bind_timeout = XS_BIND_TO;
>> + xprt->connect_timeout = XS_TCP_CONN_TO;
>> + xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
>> + xprt->idle_timeout = XS_IDLE_DISC_TO;
>> +
>> + xprt->ops = &xs_tcp_ops;
>> +
>> +next:
>> + xprt->timeout = &xs_tcp_default_timeout;
>> +
>> + switch (addr->sa_family) {
>> + case AF_INET:
>> + xs_format_ipv4_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
>> + break;
>> + case AF_INET6:
>> xs_format_ipv6_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
>> break;
>> default:
On May. 20, 2009, 21:17 +0300, "Labiaga, Ricardo" <[email protected]> wrote:
> On 5/20/09 12:46 AM, "Benny Halevy" <[email protected]> wrote:
>
>> On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]>
>> wrote:
>>> Signed-off-by: Ricardo Labiaga <[email protected]>
>>> [nfsd41: cb_recall callback]
>>> [Share v4.0 and v4.1 back channel xdr]
>>> Signed-off-by: Andy Adamson <[email protected]>
>>> Signed-off-by: Ricardo Labiaga <[email protected]>
>>> Signed-off-by: Benny Halevy <[email protected]>
>>> [Share v4.0 and v4.1 back channel xdr]
>>> Signed-off-by: Andy Adamson <[email protected]>
>>> Signed-off-by: Benny Halevy <[email protected]>
>>> [nfsd41: use nfsd4_cb_sequence for callback minorversion]
>>> [nfsd41: conditionally decode_sequence in nfs4_xdr_dec_cb_recall]
>>> Signed-off-by: Benny Halevy <[email protected]>
>>> [nfsd41: Backchannel: Add sequence arguments to callback RPC arguments]
>>> Signed-off-by: Ricardo Labiaga <[email protected]>
>>> Signed-off-by: Benny Halevy <[email protected]>
>>> ---
>>> fs/nfsd/nfs4callback.c | 40 ++++++++++++++++++++++++++++++++++++----
>>> 1 files changed, 36 insertions(+), 4 deletions(-)
>>>
>>> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
>>> index 521d5f5..b25dcc2 100644
>>> --- a/fs/nfsd/nfs4callback.c
>>> +++ b/fs/nfsd/nfs4callback.c
>>> @@ -292,15 +292,19 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
>>> }
>>>
>>> static int
>>> -nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct
>>> nfs4_delegation *args)
>>> +nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
>>> + struct nfs4_rpc_args *rpc_args)
>>> {
>>> struct xdr_stream xdr;
>>> + struct nfs4_delegation *args = rpc_args->args_op;
>>> struct nfs4_cb_compound_hdr hdr = {
>>> .ident = args->dl_ident,
>>> + .minorversion = rpc_args->args_seq.cbs_minorversion,
>>> };
>>>
>>> xdr_init_encode(&xdr, &req->rq_snd_buf, p);
>>> encode_cb_compound_hdr(&xdr, &hdr);
>>> + encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr);
>>> encode_cb_recall(&xdr, args, &hdr);
>>> encode_cb_nops(&hdr);
>>> return 0;
>>> @@ -400,7 +404,8 @@ nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
>>> }
>>>
>>> static int
>>> -nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p)
>>> +nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
>>> + struct nfs4_rpc_res *rpc_res)
>>> {
>>> struct xdr_stream xdr;
>>> struct nfs4_cb_compound_hdr hdr;
>>> @@ -410,6 +415,11 @@ nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32
>>> *p)
>>> status = decode_cb_compound_hdr(&xdr, &hdr);
>>> if (status)
>>> goto out;
>>> + if (rpc_res && rpc_res->res_seq) {
>> With this version rpc_res != NULL is guaranteed, isn't it?
>> Also, embedding res_seq in nfs4_rpc_res will obviate this condition further.
>
> True, rpc_res will always be non-NULL but rpc_res->res_seq is still NULL if
> this is a v4.0 callback.
>
>>> + status = decode_cb_sequence(&xdr, rpc_res->res_seq, rqstp);
>>> + if (status)
>>> + goto out;
>>> + }
>>> status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
>>> out:
>>> return status;
>>> @@ -687,6 +697,8 @@ static void nfsd4_cb_recall_done(struct rpc_task *task,
>>> void *calldata)
>>> struct nfs4_delegation *dp = calldata;
>>> struct nfs4_client *clp = dp->dl_client;
>>>
>>> + nfsd4_cb_done(task, calldata);
>>> +
>>> switch (task->tk_status) {
>>> case -EIO:
>>> /* Network partition? */
>>> @@ -699,16 +711,20 @@ static void nfsd4_cb_recall_done(struct rpc_task *task,
>>> void *calldata)
>>> break;
>>> default:
>>> /* success, or error we can't handle */
>>> - return;
>>> + goto done;
>>> }
>>> if (dp->dl_retries--) {
>>> rpc_delay(task, 2*HZ);
>>> task->tk_status = 0;
>>> rpc_restart_call(task);
>>> + return;
>>> } else {
>>> atomic_set(&clp->cl_cb_conn.cb_set, 0);
>>> warn_no_callback_path(clp, task->tk_status);
>>> }
>>> +done:
>>> + kfree(task->tk_msg.rpc_argp);
>>> + kfree(task->tk_msg.rpc_resp);
>>> }
>>>
>>> static void nfsd4_cb_recall_release(void *calldata)
>>> @@ -734,16 +750,32 @@ nfsd4_cb_recall(struct nfs4_delegation *dp)
>>> {
>>> struct nfs4_client *clp = dp->dl_client;
>>> struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
>>> + struct nfs4_rpc_args *args;
>>> + struct nfs4_rpc_res *res;
>>> struct rpc_message msg = {
>>> .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
>>> - .rpc_argp = dp,
>>> .rpc_cred = clp->cl_cb_conn.cb_cred
>>> };
>>> int status;
>>>
>>> + args = kzalloc(sizeof(*args), GFP_KERNEL);
>>> + if (!args) {
>>> + status = -ENOMEM;
>>> + goto out;
>>> + }
>>> + res = kzalloc(sizeof(*res), GFP_KERNEL);
>>> + if (!res) {
>>> + kfree(args);
>>> + status = -ENOMEM;
>>> + goto out;
>>> + }
>> Hmm, why not allocate the two in one piece and possibly having a kmem_cache
>> for them?
>
> They're two different types of structures. You mean encapsulate them in a
> super structure and then have the pointers to respective members? I'm not
> following.
Exactly.
I meant something like this:
struct nfs4_rpc_alloc {
struct nfs4_rpc_args args;
struct nfs4_rpc_res res;
};
However, as you pointed elsewhere, struct nfs4_rpc_res currently
contains only a pointer to struct nfsd4_cb_sequence which is embedded
in the nfs4_rpc_args so we can just get rid of struct nfs4_rpc_res
altogether for now, until we have a better use for it, and set
task->tk_msg.rpc_resp = &args->args_seq;
directly in nfsd41_cb_setup_sequence.
(or even up in nfsd4_cb_recall and friends so it's always set,
for all minorversions, as decode_cb_sequence is a noop for
res->cbs_minorversion==0)
Benny
>
> - ricardo
>
>> Benny
>>
>>> + args->args_op = dp;
>>> + msg.rpc_argp = args;
>>> + msg.rpc_resp = res;
>>> dp->dl_retries = 1;
>>> status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
>>> &nfsd4_cb_recall_ops, dp);
>>> +out:
>>> if (status) {
>>> put_nfs4_client(clp);
>>> nfs4_put_delegation(dp);
>
On 5/20/09 10:59 PM, "Benny Halevy" <[email protected]> wrote:
> On May. 20, 2009, 21:17 +0300, "Labiaga, Ricardo" <[email protected]>
> wrote:
>> On 5/20/09 12:46 AM, "Benny Halevy" <[email protected]> wrote:
>>
>>> On May. 20, 2009, 6:00 +0300, Ricardo Labiaga <[email protected]>
>>> wrote:
>>>> Signed-off-by: Ricardo Labiaga <[email protected]>
>>>> [nfsd41: cb_recall callback]
>>>> [Share v4.0 and v4.1 back channel xdr]
>>>> Signed-off-by: Andy Adamson <[email protected]>
>>>> Signed-off-by: Ricardo Labiaga <[email protected]>
>>>> Signed-off-by: Benny Halevy <[email protected]>
>>>> [Share v4.0 and v4.1 back channel xdr]
>>>> Signed-off-by: Andy Adamson <[email protected]>
>>>> Signed-off-by: Benny Halevy <[email protected]>
>>>> [nfsd41: use nfsd4_cb_sequence for callback minorversion]
>>>> [nfsd41: conditionally decode_sequence in nfs4_xdr_dec_cb_recall]
>>>> Signed-off-by: Benny Halevy <[email protected]>
>>>> [nfsd41: Backchannel: Add sequence arguments to callback RPC arguments]
>>>> Signed-off-by: Ricardo Labiaga <[email protected]>
>>>> Signed-off-by: Benny Halevy <[email protected]>
>>>> ---
>>>> fs/nfsd/nfs4callback.c | 40 ++++++++++++++++++++++++++++++++++++----
>>>> 1 files changed, 36 insertions(+), 4 deletions(-)
>>>>
>>>> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
>>>> index 521d5f5..b25dcc2 100644
>>>> --- a/fs/nfsd/nfs4callback.c
>>>> +++ b/fs/nfsd/nfs4callback.c
>>>> @@ -292,15 +292,19 @@ nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
>>>> }
>>>>
>>>> static int
>>>> -nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, struct
>>>> nfs4_delegation *args)
>>>> +nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
>>>> + struct nfs4_rpc_args *rpc_args)
>>>> {
>>>> struct xdr_stream xdr;
>>>> + struct nfs4_delegation *args = rpc_args->args_op;
>>>> struct nfs4_cb_compound_hdr hdr = {
>>>> .ident = args->dl_ident,
>>>> + .minorversion = rpc_args->args_seq.cbs_minorversion,
>>>> };
>>>>
>>>> xdr_init_encode(&xdr, &req->rq_snd_buf, p);
>>>> encode_cb_compound_hdr(&xdr, &hdr);
>>>> + encode_cb_sequence(&xdr, &rpc_args->args_seq, &hdr);
>>>> encode_cb_recall(&xdr, args, &hdr);
>>>> encode_cb_nops(&hdr);
>>>> return 0;
>>>> @@ -400,7 +404,8 @@ nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
>>>> }
>>>>
>>>> static int
>>>> -nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p)
>>>> +nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
>>>> + struct nfs4_rpc_res *rpc_res)
>>>> {
>>>> struct xdr_stream xdr;
>>>> struct nfs4_cb_compound_hdr hdr;
>>>> @@ -410,6 +415,11 @@ nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32
>>>> *p)
>>>> status = decode_cb_compound_hdr(&xdr, &hdr);
>>>> if (status)
>>>> goto out;
>>>> + if (rpc_res && rpc_res->res_seq) {
>>> With this version rpc_res != NULL is guaranteed, isn't it?
>>> Also, embedding res_seq in nfs4_rpc_res will obviate this condition further.
>>
>> True, rpc_res will always be non-NULL but rpc_res->res_seq is still NULL if
>> this is a v4.0 callback.
>>
>>>> + status = decode_cb_sequence(&xdr, rpc_res->res_seq, rqstp);
>>>> + if (status)
>>>> + goto out;
>>>> + }
>>>> status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
>>>> out:
>>>> return status;
>>>> @@ -687,6 +697,8 @@ static void nfsd4_cb_recall_done(struct rpc_task *task,
>>>> void *calldata)
>>>> struct nfs4_delegation *dp = calldata;
>>>> struct nfs4_client *clp = dp->dl_client;
>>>>
>>>> + nfsd4_cb_done(task, calldata);
>>>> +
>>>> switch (task->tk_status) {
>>>> case -EIO:
>>>> /* Network partition? */
>>>> @@ -699,16 +711,20 @@ static void nfsd4_cb_recall_done(struct rpc_task
>>>> *task,
>>>> void *calldata)
>>>> break;
>>>> default:
>>>> /* success, or error we can't handle */
>>>> - return;
>>>> + goto done;
>>>> }
>>>> if (dp->dl_retries--) {
>>>> rpc_delay(task, 2*HZ);
>>>> task->tk_status = 0;
>>>> rpc_restart_call(task);
>>>> + return;
>>>> } else {
>>>> atomic_set(&clp->cl_cb_conn.cb_set, 0);
>>>> warn_no_callback_path(clp, task->tk_status);
>>>> }
>>>> +done:
>>>> + kfree(task->tk_msg.rpc_argp);
>>>> + kfree(task->tk_msg.rpc_resp);
>>>> }
>>>>
>>>> static void nfsd4_cb_recall_release(void *calldata)
>>>> @@ -734,16 +750,32 @@ nfsd4_cb_recall(struct nfs4_delegation *dp)
>>>> {
>>>> struct nfs4_client *clp = dp->dl_client;
>>>> struct rpc_clnt *clnt = clp->cl_cb_conn.cb_client;
>>>> + struct nfs4_rpc_args *args;
>>>> + struct nfs4_rpc_res *res;
>>>> struct rpc_message msg = {
>>>> .rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL],
>>>> - .rpc_argp = dp,
>>>> .rpc_cred = clp->cl_cb_conn.cb_cred
>>>> };
>>>> int status;
>>>>
>>>> + args = kzalloc(sizeof(*args), GFP_KERNEL);
>>>> + if (!args) {
>>>> + status = -ENOMEM;
>>>> + goto out;
>>>> + }
>>>> + res = kzalloc(sizeof(*res), GFP_KERNEL);
>>>> + if (!res) {
>>>> + kfree(args);
>>>> + status = -ENOMEM;
>>>> + goto out;
>>>> + }
>>> Hmm, why not allocate the two in one piece and possibly having a kmem_cache
>>> for them?
>>
>> They're two different types of structures. You mean encapsulate them in a
>> super structure and then have the pointers to respective members? I'm not
>> following.
>
> Exactly.
>
> I meant something like this:
>
> struct nfs4_rpc_alloc {
> struct nfs4_rpc_args args;
> struct nfs4_rpc_res res;
> };
>
> However, as you pointed elsewhere, struct nfs4_rpc_res currently
> contains only a pointer to struct nfsd4_cb_sequence which is embedded
> in the nfs4_rpc_args so we can just get rid of struct nfs4_rpc_res
> altogether for now, until we have a better use for it, and set
> task->tk_msg.rpc_resp = &args->args_seq;
> directly in nfsd41_cb_setup_sequence.
> (or even up in nfsd4_cb_recall and friends so it's always set,
> for all minorversions, as decode_cb_sequence is a noop for
> res->cbs_minorversion==0)
>
I initially decided against doing that since CB_GETATTR has results that
need to be returned to the caller. A pointer to the results would be
included in 'struct nfs4_rpc_res' for access during the decode.
Although in the spirit of not adding code for future use, I guess is best to
use &args->args_seq at this time and then change it to use 'nfs4_rpc_res'
when we actually implement CB_GETATTR.
You just talked me into it, I'll make the change.
- ricardo
> Benny
>
>>
>> - ricardo
>>
>>> Benny
>>>
>>>> + args->args_op = dp;
>>>> + msg.rpc_argp = args;
>>>> + msg.rpc_resp = res;
>>>> dp->dl_retries = 1;
>>>> status = rpc_call_async(clnt, &msg, RPC_TASK_SOFT,
>>>> &nfsd4_cb_recall_ops, dp);
>>>> +out:
>>>> if (status) {
>>>> put_nfs4_client(clp);
>>>> nfs4_put_delegation(dp);
>>