Return-Path: Received: from mail-ig0-f174.google.com ([209.85.213.174]:34260 "EHLO mail-ig0-f174.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S932839AbbLGUm6 (ORCPT ); Mon, 7 Dec 2015 15:42:58 -0500 Subject: [PATCH v3 4/6] svcrdma: Add infrastructure to send backwards direction RPC/RDMA calls From: Chuck Lever To: linux-rdma@vger.kernel.org, linux-nfs@vger.kernel.org Date: Mon, 07 Dec 2015 15:42:56 -0500 Message-ID: <20151207204256.12988.76473.stgit@klimt.1015granger.net> In-Reply-To: <20151207203851.12988.97804.stgit@klimt.1015granger.net> References: <20151207203851.12988.97804.stgit@klimt.1015granger.net> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Sender: linux-nfs-owner@vger.kernel.org List-ID: To support the NFSv4.1 backchannel on RDMA connections, add a mechanism for sending a backwards-direction RPC/RDMA call on a connection established by a client. Signed-off-by: Chuck Lever --- include/linux/sunrpc/svc_rdma.h | 2 + net/sunrpc/xprtrdma/svc_rdma_sendto.c | 61 +++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index f71c625..bf9b17b 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -215,6 +215,8 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, extern int svc_rdma_sendto(struct svc_rqst *); extern struct rpcrdma_read_chunk * svc_rdma_get_read_chunk(struct rpcrdma_msg *); +extern int svc_rdma_bc_post_send(struct svcxprt_rdma *, + struct svc_rdma_op_ctxt *, struct xdr_buf *); /* svc_rdma_transport.c */ extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index bad5eaa..846df63 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -648,3 +648,64 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) svc_rdma_put_context(ctxt, 0); return ret; } + +/* Send a backwards direction RPC call. + * + * Caller holds the connection's mutex and has already marshaled the + * RPC/RDMA request. Before sending the request, this API also posts + * an extra receive buffer to catch the bc reply for this request. + */ +int svc_rdma_bc_post_send(struct svcxprt_rdma *rdma, + struct svc_rdma_op_ctxt *ctxt, struct xdr_buf *sndbuf) +{ + struct svc_rdma_req_map *vec; + struct ib_send_wr send_wr; + int ret; + + vec = svc_rdma_get_req_map(); + ret = map_xdr(rdma, sndbuf, vec); + if (ret) + goto out; + + /* Post a recv buffer to handle reply for this request */ + ret = svc_rdma_post_recv(rdma); + if (ret) { + pr_err("svcrdma: Failed to post bc receive buffer, err=%d. " + "Closing transport %p.\n", ret, rdma); + set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); + ret = -ENOTCONN; + goto out; + } + + ctxt->wr_op = IB_WR_SEND; + ctxt->direction = DMA_TO_DEVICE; + ctxt->sge[0].lkey = rdma->sc_dma_lkey; + ctxt->sge[0].length = sndbuf->len; + ctxt->sge[0].addr = + ib_dma_map_page(rdma->sc_cm_id->device, ctxt->pages[0], 0, + sndbuf->len, DMA_TO_DEVICE); + if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) { + svc_rdma_unmap_dma(ctxt); + ret = -EIO; + goto out; + } + atomic_inc(&rdma->sc_dma_used); + + memset(&send_wr, 0, sizeof send_wr); + send_wr.wr_id = (unsigned long)ctxt; + send_wr.sg_list = ctxt->sge; + send_wr.num_sge = 1; + send_wr.opcode = IB_WR_SEND; + send_wr.send_flags = IB_SEND_SIGNALED; + + ret = svc_rdma_send(rdma, &send_wr); + if (ret) { + svc_rdma_unmap_dma(ctxt); + ret = -EIO; + goto out; + } +out: + svc_rdma_put_req_map(vec); + dprintk("svcrdma: %s returns %d\n", __func__, ret); + return ret; +}