svcrdma: Return error from rdma_read_xdr so caller knows to free context
[linux-2.6.git] / net / sunrpc / xprtrdma / svc_rdma_recvfrom.c
blob5e03d95b25e2880ad62b6ade2edc1735de20a140
1 /*
2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
53 * Replace the pages in the rq_argpages array with the pages from the SGE in
54 * the RDMA_RECV completion. The SGL should contain full pages up until the
55 * last one.
57 static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
58 struct svc_rdma_op_ctxt *ctxt,
59 u32 byte_count)
61 struct page *page;
62 u32 bc;
63 int sge_no;
65 /* Swap the page in the SGE with the page in argpages */
66 page = ctxt->pages[0];
67 put_page(rqstp->rq_pages[0]);
68 rqstp->rq_pages[0] = page;
70 /* Set up the XDR head */
71 rqstp->rq_arg.head[0].iov_base = page_address(page);
72 rqstp->rq_arg.head[0].iov_len = min(byte_count, ctxt->sge[0].length);
73 rqstp->rq_arg.len = byte_count;
74 rqstp->rq_arg.buflen = byte_count;
76 /* Compute bytes past head in the SGL */
77 bc = byte_count - rqstp->rq_arg.head[0].iov_len;
79 /* If data remains, store it in the pagelist */
80 rqstp->rq_arg.page_len = bc;
81 rqstp->rq_arg.page_base = 0;
82 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
83 sge_no = 1;
84 while (bc && sge_no < ctxt->count) {
85 page = ctxt->pages[sge_no];
86 put_page(rqstp->rq_pages[sge_no]);
87 rqstp->rq_pages[sge_no] = page;
88 bc -= min(bc, ctxt->sge[sge_no].length);
89 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
90 sge_no++;
92 rqstp->rq_respages = &rqstp->rq_pages[sge_no];
94 /* We should never run out of SGE because the limit is defined to
95 * support the max allowed RPC data length
97 BUG_ON(bc && (sge_no == ctxt->count));
98 BUG_ON((rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len)
99 != byte_count);
100 BUG_ON(rqstp->rq_arg.len != byte_count);
102 /* If not all pages were used from the SGL, free the remaining ones */
103 bc = sge_no;
104 while (sge_no < ctxt->count) {
105 page = ctxt->pages[sge_no++];
106 put_page(page);
108 ctxt->count = bc;
110 /* Set up tail */
111 rqstp->rq_arg.tail[0].iov_base = NULL;
112 rqstp->rq_arg.tail[0].iov_len = 0;
115 struct chunk_sge {
116 int start; /* sge no for this chunk */
117 int count; /* sge count for this chunk */
120 /* Encode a read-chunk-list as an array of IB SGE
122 * Assumptions:
123 * - chunk[0]->position points to pages[0] at an offset of 0
124 * - pages[] is not physically or virtually contigous and consists of
125 * PAGE_SIZE elements.
127 * Output:
128 * - sge array pointing into pages[] array.
129 * - chunk_sge array specifying sge index and count for each
130 * chunk in the read list
133 static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
134 struct svc_rqst *rqstp,
135 struct svc_rdma_op_ctxt *head,
136 struct rpcrdma_msg *rmsgp,
137 struct ib_sge *sge,
138 struct chunk_sge *ch_sge_ary,
139 int ch_count,
140 int byte_count)
142 int sge_no;
143 int sge_bytes;
144 int page_off;
145 int page_no;
146 int ch_bytes;
147 int ch_no;
148 struct rpcrdma_read_chunk *ch;
150 sge_no = 0;
151 page_no = 0;
152 page_off = 0;
153 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
154 ch_no = 0;
155 ch_bytes = ch->rc_target.rs_length;
156 head->arg.head[0] = rqstp->rq_arg.head[0];
157 head->arg.tail[0] = rqstp->rq_arg.tail[0];
158 head->arg.pages = &head->pages[head->count];
159 head->sge[0].length = head->count; /* save count of hdr pages */
160 head->arg.page_base = 0;
161 head->arg.page_len = ch_bytes;
162 head->arg.len = rqstp->rq_arg.len + ch_bytes;
163 head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes;
164 head->count++;
165 ch_sge_ary[0].start = 0;
166 while (byte_count) {
167 sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes);
168 sge[sge_no].addr =
169 ib_dma_map_page(xprt->sc_cm_id->device,
170 rqstp->rq_arg.pages[page_no],
171 page_off, sge_bytes,
172 DMA_FROM_DEVICE);
173 sge[sge_no].length = sge_bytes;
174 sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
176 * Don't bump head->count here because the same page
177 * may be used by multiple SGE.
179 head->arg.pages[page_no] = rqstp->rq_arg.pages[page_no];
180 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
182 byte_count -= sge_bytes;
183 ch_bytes -= sge_bytes;
184 sge_no++;
186 * If all bytes for this chunk have been mapped to an
187 * SGE, move to the next SGE
189 if (ch_bytes == 0) {
190 ch_sge_ary[ch_no].count =
191 sge_no - ch_sge_ary[ch_no].start;
192 ch_no++;
193 ch++;
194 ch_sge_ary[ch_no].start = sge_no;
195 ch_bytes = ch->rc_target.rs_length;
196 /* If bytes remaining account for next chunk */
197 if (byte_count) {
198 head->arg.page_len += ch_bytes;
199 head->arg.len += ch_bytes;
200 head->arg.buflen += ch_bytes;
204 * If this SGE consumed all of the page, move to the
205 * next page
207 if ((sge_bytes + page_off) == PAGE_SIZE) {
208 page_no++;
209 page_off = 0;
211 * If there are still bytes left to map, bump
212 * the page count
214 if (byte_count)
215 head->count++;
216 } else
217 page_off += sge_bytes;
219 BUG_ON(byte_count != 0);
220 return sge_no;
223 static void rdma_set_ctxt_sge(struct svc_rdma_op_ctxt *ctxt,
224 struct ib_sge *sge,
225 u64 *sgl_offset,
226 int count)
228 int i;
230 ctxt->count = count;
231 for (i = 0; i < count; i++) {
232 ctxt->sge[i].addr = sge[i].addr;
233 ctxt->sge[i].length = sge[i].length;
234 *sgl_offset = *sgl_offset + sge[i].length;
238 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
240 if ((RDMA_TRANSPORT_IWARP ==
241 rdma_node_get_transport(xprt->sc_cm_id->
242 device->node_type))
243 && sge_count > 1)
244 return 1;
245 else
246 return min_t(int, sge_count, xprt->sc_max_sge);
250 * Use RDMA_READ to read data from the advertised client buffer into the
251 * XDR stream starting at rq_arg.head[0].iov_base.
252 * Each chunk in the array
253 * contains the following fields:
254 * discrim - '1', This isn't used for data placement
255 * position - The xdr stream offset (the same for every chunk)
256 * handle - RMR for client memory region
257 * length - data transfer length
258 * offset - 64 bit tagged offset in remote memory region
260 * On our side, we need to read into a pagelist. The first page immediately
261 * follows the RPC header.
263 * This function returns:
264 * 0 - No error and no read-list found.
266 * 1 - Successful read-list processing. The data is not yet in
267 * the pagelist and therefore the RPC request must be deferred. The
268 * I/O completion will enqueue the transport again and
269 * svc_rdma_recvfrom will complete the request.
271 * <0 - Error processing/posting read-list.
273 * NOTE: The ctxt must not be touched after the last WR has been posted
274 * because the I/O completion processing may occur on another
275 * processor and free / modify the context. Ne touche pas!
277 static int rdma_read_xdr(struct svcxprt_rdma *xprt,
278 struct rpcrdma_msg *rmsgp,
279 struct svc_rqst *rqstp,
280 struct svc_rdma_op_ctxt *hdr_ctxt)
282 struct ib_send_wr read_wr;
283 int err = 0;
284 int ch_no;
285 struct ib_sge *sge;
286 int ch_count;
287 int byte_count;
288 int sge_count;
289 u64 sgl_offset;
290 struct rpcrdma_read_chunk *ch;
291 struct svc_rdma_op_ctxt *ctxt = NULL;
292 struct svc_rdma_op_ctxt *head;
293 struct svc_rdma_op_ctxt *tmp_sge_ctxt;
294 struct svc_rdma_op_ctxt *tmp_ch_ctxt;
295 struct chunk_sge *ch_sge_ary;
297 /* If no read list is present, return 0 */
298 ch = svc_rdma_get_read_chunk(rmsgp);
299 if (!ch)
300 return 0;
302 /* Allocate temporary contexts to keep SGE */
303 BUG_ON(sizeof(struct ib_sge) < sizeof(struct chunk_sge));
304 tmp_sge_ctxt = svc_rdma_get_context(xprt);
305 sge = tmp_sge_ctxt->sge;
306 tmp_ch_ctxt = svc_rdma_get_context(xprt);
307 ch_sge_ary = (struct chunk_sge *)tmp_ch_ctxt->sge;
309 svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
310 sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp,
311 sge, ch_sge_ary,
312 ch_count, byte_count);
313 head = svc_rdma_get_context(xprt);
314 sgl_offset = 0;
315 ch_no = 0;
317 for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
318 ch->rc_discrim != 0; ch++, ch_no++) {
319 next_sge:
320 if (!ctxt)
321 ctxt = head;
322 else {
323 ctxt->next = svc_rdma_get_context(xprt);
324 ctxt = ctxt->next;
326 ctxt->next = NULL;
327 ctxt->direction = DMA_FROM_DEVICE;
328 clear_bit(RDMACTXT_F_READ_DONE, &ctxt->flags);
329 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
331 /* Prepare READ WR */
332 memset(&read_wr, 0, sizeof read_wr);
333 ctxt->wr_op = IB_WR_RDMA_READ;
334 read_wr.wr_id = (unsigned long)ctxt;
335 read_wr.opcode = IB_WR_RDMA_READ;
336 read_wr.send_flags = IB_SEND_SIGNALED;
337 read_wr.wr.rdma.rkey = ch->rc_target.rs_handle;
338 read_wr.wr.rdma.remote_addr =
339 get_unaligned(&(ch->rc_target.rs_offset)) +
340 sgl_offset;
341 read_wr.sg_list = &sge[ch_sge_ary[ch_no].start];
342 read_wr.num_sge =
343 rdma_read_max_sge(xprt, ch_sge_ary[ch_no].count);
344 rdma_set_ctxt_sge(ctxt, &sge[ch_sge_ary[ch_no].start],
345 &sgl_offset,
346 read_wr.num_sge);
347 if (((ch+1)->rc_discrim == 0) &&
348 (read_wr.num_sge == ch_sge_ary[ch_no].count)) {
350 * Mark the last RDMA_READ with a bit to
351 * indicate all RPC data has been fetched from
352 * the client and the RPC needs to be enqueued.
354 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
355 ctxt->next = hdr_ctxt;
356 hdr_ctxt->next = head;
358 /* Post the read */
359 err = svc_rdma_send(xprt, &read_wr);
360 if (err) {
361 printk(KERN_ERR "svcrdma: Error posting send = %d\n",
362 err);
364 * Break the circular list so free knows when
365 * to stop if the error happened to occur on
366 * the last read
368 ctxt->next = NULL;
369 goto out;
371 atomic_inc(&rdma_stat_read);
373 if (read_wr.num_sge < ch_sge_ary[ch_no].count) {
374 ch_sge_ary[ch_no].count -= read_wr.num_sge;
375 ch_sge_ary[ch_no].start += read_wr.num_sge;
376 goto next_sge;
378 sgl_offset = 0;
379 err = 0;
382 out:
383 svc_rdma_put_context(tmp_sge_ctxt, 0);
384 svc_rdma_put_context(tmp_ch_ctxt, 0);
386 /* Detach arg pages. svc_recv will replenish them */
387 for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++)
388 rqstp->rq_pages[ch_no] = NULL;
391 * Detach res pages. svc_release must see a resused count of
392 * zero or it will attempt to put them.
394 while (rqstp->rq_resused)
395 rqstp->rq_respages[--rqstp->rq_resused] = NULL;
397 if (err) {
398 printk(KERN_ERR "svcrdma : RDMA_READ error = %d\n", err);
399 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
400 /* Free the linked list of read contexts */
401 while (head != NULL) {
402 ctxt = head->next;
403 svc_rdma_put_context(head, 1);
404 head = ctxt;
406 return err;
409 return 1;
412 static int rdma_read_complete(struct svc_rqst *rqstp,
413 struct svc_rdma_op_ctxt *data)
415 struct svc_rdma_op_ctxt *head = data->next;
416 int page_no;
417 int ret;
419 BUG_ON(!head);
421 /* Copy RPC pages */
422 for (page_no = 0; page_no < head->count; page_no++) {
423 put_page(rqstp->rq_pages[page_no]);
424 rqstp->rq_pages[page_no] = head->pages[page_no];
426 /* Point rq_arg.pages past header */
427 rqstp->rq_arg.pages = &rqstp->rq_pages[head->sge[0].length];
428 rqstp->rq_arg.page_len = head->arg.page_len;
429 rqstp->rq_arg.page_base = head->arg.page_base;
431 /* rq_respages starts after the last arg page */
432 rqstp->rq_respages = &rqstp->rq_arg.pages[page_no];
433 rqstp->rq_resused = 0;
435 /* Rebuild rq_arg head and tail. */
436 rqstp->rq_arg.head[0] = head->arg.head[0];
437 rqstp->rq_arg.tail[0] = head->arg.tail[0];
438 rqstp->rq_arg.len = head->arg.len;
439 rqstp->rq_arg.buflen = head->arg.buflen;
441 /* XXX: What should this be? */
442 rqstp->rq_prot = IPPROTO_MAX;
445 * Free the contexts we used to build the RDMA_READ. We have
446 * to be careful here because the context list uses the same
447 * next pointer used to chain the contexts associated with the
448 * RDMA_READ
450 data->next = NULL; /* terminate circular list */
451 do {
452 data = head->next;
453 svc_rdma_put_context(head, 0);
454 head = data;
455 } while (head != NULL);
457 ret = rqstp->rq_arg.head[0].iov_len
458 + rqstp->rq_arg.page_len
459 + rqstp->rq_arg.tail[0].iov_len;
460 dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, "
461 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
462 ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
463 rqstp->rq_arg.head[0].iov_len);
465 svc_xprt_received(rqstp->rq_xprt);
466 return ret;
470 * Set up the rqstp thread context to point to the RQ buffer. If
471 * necessary, pull additional data from the client with an RDMA_READ
472 * request.
474 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
476 struct svc_xprt *xprt = rqstp->rq_xprt;
477 struct svcxprt_rdma *rdma_xprt =
478 container_of(xprt, struct svcxprt_rdma, sc_xprt);
479 struct svc_rdma_op_ctxt *ctxt = NULL;
480 struct rpcrdma_msg *rmsgp;
481 int ret = 0;
482 int len;
484 dprintk("svcrdma: rqstp=%p\n", rqstp);
486 spin_lock_bh(&rdma_xprt->sc_read_complete_lock);
487 if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
488 ctxt = list_entry(rdma_xprt->sc_read_complete_q.next,
489 struct svc_rdma_op_ctxt,
490 dto_q);
491 list_del_init(&ctxt->dto_q);
493 spin_unlock_bh(&rdma_xprt->sc_read_complete_lock);
494 if (ctxt)
495 return rdma_read_complete(rqstp, ctxt);
497 spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
498 if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
499 ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
500 struct svc_rdma_op_ctxt,
501 dto_q);
502 list_del_init(&ctxt->dto_q);
503 } else {
504 atomic_inc(&rdma_stat_rq_starve);
505 clear_bit(XPT_DATA, &xprt->xpt_flags);
506 ctxt = NULL;
508 spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
509 if (!ctxt) {
510 /* This is the EAGAIN path. The svc_recv routine will
511 * return -EAGAIN, the nfsd thread will go to call into
512 * svc_recv again and we shouldn't be on the active
513 * transport list
515 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
516 goto close_out;
518 BUG_ON(ret);
519 goto out;
521 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
522 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
523 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
524 atomic_inc(&rdma_stat_recv);
526 /* Build up the XDR from the receive buffers. */
527 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
529 /* Decode the RDMA header. */
530 len = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
531 rqstp->rq_xprt_hlen = len;
533 /* If the request is invalid, reply with an error */
534 if (len < 0) {
535 if (len == -ENOSYS)
536 (void)svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
537 goto close_out;
540 /* Read read-list data. */
541 ret = rdma_read_xdr(rdma_xprt, rmsgp, rqstp, ctxt);
542 if (ret > 0) {
543 /* read-list posted, defer until data received from client. */
544 svc_xprt_received(xprt);
545 return 0;
547 if (ret < 0) {
548 /* Post of read-list failed, free context. */
549 svc_rdma_put_context(ctxt, 1);
550 return 0;
553 ret = rqstp->rq_arg.head[0].iov_len
554 + rqstp->rq_arg.page_len
555 + rqstp->rq_arg.tail[0].iov_len;
556 svc_rdma_put_context(ctxt, 0);
557 out:
558 dprintk("svcrdma: ret = %d, rq_arg.len =%d, "
559 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
560 ret, rqstp->rq_arg.len,
561 rqstp->rq_arg.head[0].iov_base,
562 rqstp->rq_arg.head[0].iov_len);
563 rqstp->rq_prot = IPPROTO_MAX;
564 svc_xprt_copy_addrs(rqstp, xprt);
565 svc_xprt_received(xprt);
566 return ret;
568 close_out:
569 if (ctxt)
570 svc_rdma_put_context(ctxt, 1);
571 dprintk("svcrdma: transport %p is closing\n", xprt);
573 * Set the close bit and enqueue it. svc_recv will see the
574 * close bit and call svc_xprt_delete
576 set_bit(XPT_CLOSE, &xprt->xpt_flags);
577 svc_xprt_received(xprt);
578 return 0;