License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6/btrfs-unstable.git] / net / sunrpc / xprtrdma / backchannel.c
blob823a781ec89cf6401f6c4988e5895072579e1ece
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2015 Oracle. All rights reserved.
5 * Support for backward direction RPCs on RPC/RDMA.
6 */
8 #include <linux/module.h>
9 #include <linux/sunrpc/xprt.h>
10 #include <linux/sunrpc/svc.h>
11 #include <linux/sunrpc/svc_xprt.h>
13 #include "xprt_rdma.h"
15 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
16 # define RPCDBG_FACILITY RPCDBG_TRANS
17 #endif
19 #undef RPCRDMA_BACKCHANNEL_DEBUG
21 static void rpcrdma_bc_free_rqst(struct rpcrdma_xprt *r_xprt,
22 struct rpc_rqst *rqst)
24 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
25 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
27 spin_lock(&buf->rb_reqslock);
28 list_del(&req->rl_all);
29 spin_unlock(&buf->rb_reqslock);
31 rpcrdma_destroy_req(req);
33 kfree(rqst);
36 static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
37 struct rpc_rqst *rqst)
39 struct rpcrdma_regbuf *rb;
40 struct rpcrdma_req *req;
41 size_t size;
43 req = rpcrdma_create_req(r_xprt);
44 if (IS_ERR(req))
45 return PTR_ERR(req);
46 req->rl_backchannel = true;
48 rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
49 DMA_TO_DEVICE, GFP_KERNEL);
50 if (IS_ERR(rb))
51 goto out_fail;
52 req->rl_rdmabuf = rb;
53 xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
55 size = r_xprt->rx_data.inline_rsize;
56 rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
57 if (IS_ERR(rb))
58 goto out_fail;
59 req->rl_sendbuf = rb;
60 xdr_buf_init(&rqst->rq_snd_buf, rb->rg_base,
61 min_t(size_t, size, PAGE_SIZE));
62 rpcrdma_set_xprtdata(rqst, req);
63 return 0;
65 out_fail:
66 rpcrdma_bc_free_rqst(r_xprt, rqst);
67 return -ENOMEM;
70 /* Allocate and add receive buffers to the rpcrdma_buffer's
71 * existing list of rep's. These are released when the
72 * transport is destroyed.
74 static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
75 unsigned int count)
77 struct rpcrdma_rep *rep;
78 int rc = 0;
80 while (count--) {
81 rep = rpcrdma_create_rep(r_xprt);
82 if (IS_ERR(rep)) {
83 pr_err("RPC: %s: reply buffer alloc failed\n",
84 __func__);
85 rc = PTR_ERR(rep);
86 break;
89 rpcrdma_recv_buffer_put(rep);
92 return rc;
95 /**
96 * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests
97 * @xprt: transport associated with these backchannel resources
98 * @reqs: number of concurrent incoming requests to expect
100 * Returns 0 on success; otherwise a negative errno
102 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
104 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
105 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
106 struct rpc_rqst *rqst;
107 unsigned int i;
108 int rc;
110 /* The backchannel reply path returns each rpc_rqst to the
111 * bc_pa_list _after_ the reply is sent. If the server is
112 * faster than the client, it can send another backward
113 * direction request before the rpc_rqst is returned to the
114 * list. The client rejects the request in this case.
116 * Twice as many rpc_rqsts are prepared to ensure there is
117 * always an rpc_rqst available as soon as a reply is sent.
119 if (reqs > RPCRDMA_BACKWARD_WRS >> 1)
120 goto out_err;
122 for (i = 0; i < (reqs << 1); i++) {
123 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
124 if (!rqst)
125 goto out_free;
127 dprintk("RPC: %s: new rqst %p\n", __func__, rqst);
129 rqst->rq_xprt = &r_xprt->rx_xprt;
130 INIT_LIST_HEAD(&rqst->rq_list);
131 INIT_LIST_HEAD(&rqst->rq_bc_list);
133 if (rpcrdma_bc_setup_rqst(r_xprt, rqst))
134 goto out_free;
136 spin_lock_bh(&xprt->bc_pa_lock);
137 list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
138 spin_unlock_bh(&xprt->bc_pa_lock);
141 rc = rpcrdma_bc_setup_reps(r_xprt, reqs);
142 if (rc)
143 goto out_free;
145 rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs);
146 if (rc)
147 goto out_free;
149 buffer->rb_bc_srv_max_requests = reqs;
150 request_module("svcrdma");
152 return 0;
154 out_free:
155 xprt_rdma_bc_destroy(xprt, reqs);
157 out_err:
158 pr_err("RPC: %s: setup backchannel transport failed\n", __func__);
159 return -ENOMEM;
163 * xprt_rdma_bc_up - Create transport endpoint for backchannel service
164 * @serv: server endpoint
165 * @net: network namespace
167 * The "xprt" is an implied argument: it supplies the name of the
168 * backchannel transport class.
170 * Returns zero on success, negative errno on failure
172 int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
174 int ret;
176 ret = svc_create_xprt(serv, "rdma-bc", net, PF_INET, 0, 0);
177 if (ret < 0)
178 return ret;
179 return 0;
183 * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
184 * @xprt: transport
186 * Returns maximum size, in bytes, of a backchannel message
188 size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
190 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
191 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
192 size_t maxmsg;
194 maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
195 maxmsg = min_t(unsigned int, maxmsg, PAGE_SIZE);
196 return maxmsg - RPCRDMA_HDRLEN_MIN;
200 * rpcrdma_bc_marshal_reply - Send backwards direction reply
201 * @rqst: buffer containing RPC reply data
203 * Returns zero on success.
205 int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
207 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
208 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
209 __be32 *p;
211 rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
212 xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
213 req->rl_rdmabuf->rg_base);
215 p = xdr_reserve_space(&req->rl_stream, 28);
216 if (unlikely(!p))
217 return -EIO;
218 *p++ = rqst->rq_xid;
219 *p++ = rpcrdma_version;
220 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
221 *p++ = rdma_msg;
222 *p++ = xdr_zero;
223 *p++ = xdr_zero;
224 *p = xdr_zero;
226 if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN,
227 &rqst->rq_snd_buf, rpcrdma_noch))
228 return -EIO;
229 return 0;
233 * xprt_rdma_bc_destroy - Release resources for handling backchannel requests
234 * @xprt: transport associated with these backchannel resources
235 * @reqs: number of incoming requests to destroy; ignored
237 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
239 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
240 struct rpc_rqst *rqst, *tmp;
242 spin_lock_bh(&xprt->bc_pa_lock);
243 list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
244 list_del(&rqst->rq_bc_pa_list);
245 spin_unlock_bh(&xprt->bc_pa_lock);
247 rpcrdma_bc_free_rqst(r_xprt, rqst);
249 spin_lock_bh(&xprt->bc_pa_lock);
251 spin_unlock_bh(&xprt->bc_pa_lock);
255 * xprt_rdma_bc_free_rqst - Release a backchannel rqst
256 * @rqst: request to release
258 void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
260 struct rpc_xprt *xprt = rqst->rq_xprt;
262 dprintk("RPC: %s: freeing rqst %p (req %p)\n",
263 __func__, rqst, rpcr_to_rdmar(rqst));
265 smp_mb__before_atomic();
266 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state));
267 clear_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
268 smp_mb__after_atomic();
270 spin_lock_bh(&xprt->bc_pa_lock);
271 list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
272 spin_unlock_bh(&xprt->bc_pa_lock);
276 * rpcrdma_bc_receive_call - Handle a backward direction call
277 * @xprt: transport receiving the call
278 * @rep: receive buffer containing the call
280 * Operational assumptions:
281 * o Backchannel credits are ignored, just as the NFS server
282 * forechannel currently does
283 * o The ULP manages a replay cache (eg, NFSv4.1 sessions).
284 * No replay detection is done at the transport level
286 void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
287 struct rpcrdma_rep *rep)
289 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
290 struct svc_serv *bc_serv;
291 struct rpcrdma_req *req;
292 struct rpc_rqst *rqst;
293 struct xdr_buf *buf;
294 size_t size;
295 __be32 *p;
297 p = xdr_inline_decode(&rep->rr_stream, 0);
298 size = xdr_stream_remaining(&rep->rr_stream);
300 #ifdef RPCRDMA_BACKCHANNEL_DEBUG
301 pr_info("RPC: %s: callback XID %08x, length=%u\n",
302 __func__, be32_to_cpup(p), size);
303 pr_info("RPC: %s: %*ph\n", __func__, size, p);
304 #endif
306 /* Grab a free bc rqst */
307 spin_lock(&xprt->bc_pa_lock);
308 if (list_empty(&xprt->bc_pa_list)) {
309 spin_unlock(&xprt->bc_pa_lock);
310 goto out_overflow;
312 rqst = list_first_entry(&xprt->bc_pa_list,
313 struct rpc_rqst, rq_bc_pa_list);
314 list_del(&rqst->rq_bc_pa_list);
315 spin_unlock(&xprt->bc_pa_lock);
316 dprintk("RPC: %s: using rqst %p\n", __func__, rqst);
318 /* Prepare rqst */
319 rqst->rq_reply_bytes_recvd = 0;
320 rqst->rq_bytes_sent = 0;
321 rqst->rq_xid = *p;
323 rqst->rq_private_buf.len = size;
324 set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
326 buf = &rqst->rq_rcv_buf;
327 memset(buf, 0, sizeof(*buf));
328 buf->head[0].iov_base = p;
329 buf->head[0].iov_len = size;
330 buf->len = size;
332 /* The receive buffer has to be hooked to the rpcrdma_req
333 * so that it is not released while the req is pointing
334 * to its buffer, and so that it can be reposted after
335 * the Upper Layer is done decoding it.
337 req = rpcr_to_rdmar(rqst);
338 dprintk("RPC: %s: attaching rep %p to req %p\n",
339 __func__, rep, req);
340 req->rl_reply = rep;
342 /* Defeat the retransmit detection logic in send_request */
343 req->rl_connect_cookie = 0;
345 /* Queue rqst for ULP's callback service */
346 bc_serv = xprt->bc_serv;
347 spin_lock(&bc_serv->sv_cb_lock);
348 list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
349 spin_unlock(&bc_serv->sv_cb_lock);
351 wake_up(&bc_serv->sv_cb_waitq);
353 r_xprt->rx_stats.bcall_count++;
354 return;
356 out_overflow:
357 pr_warn("RPC/RDMA backchannel overflow\n");
358 xprt_disconnect_done(xprt);
359 /* This receive buffer gets reposted automatically
360 * when the connection is re-established.
362 return;