2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
53 * Replace the pages in the rq_argpages array with the pages from the SGE in
54 * the RDMA_RECV completion. The SGL should contain full pages up until the
57 static void rdma_build_arg_xdr(struct svc_rqst
*rqstp
,
58 struct svc_rdma_op_ctxt
*ctxt
,
65 /* Swap the page in the SGE with the page in argpages */
66 page
= ctxt
->pages
[0];
67 put_page(rqstp
->rq_pages
[0]);
68 rqstp
->rq_pages
[0] = page
;
70 /* Set up the XDR head */
71 rqstp
->rq_arg
.head
[0].iov_base
= page_address(page
);
72 rqstp
->rq_arg
.head
[0].iov_len
= min(byte_count
, ctxt
->sge
[0].length
);
73 rqstp
->rq_arg
.len
= byte_count
;
74 rqstp
->rq_arg
.buflen
= byte_count
;
76 /* Compute bytes past head in the SGL */
77 bc
= byte_count
- rqstp
->rq_arg
.head
[0].iov_len
;
79 /* If data remains, store it in the pagelist */
80 rqstp
->rq_arg
.page_len
= bc
;
81 rqstp
->rq_arg
.page_base
= 0;
82 rqstp
->rq_arg
.pages
= &rqstp
->rq_pages
[1];
84 while (bc
&& sge_no
< ctxt
->count
) {
85 page
= ctxt
->pages
[sge_no
];
86 put_page(rqstp
->rq_pages
[sge_no
]);
87 rqstp
->rq_pages
[sge_no
] = page
;
88 bc
-= min(bc
, ctxt
->sge
[sge_no
].length
);
89 rqstp
->rq_arg
.buflen
+= ctxt
->sge
[sge_no
].length
;
92 rqstp
->rq_respages
= &rqstp
->rq_pages
[sge_no
];
94 /* We should never run out of SGE because the limit is defined to
95 * support the max allowed RPC data length
97 BUG_ON(bc
&& (sge_no
== ctxt
->count
));
98 BUG_ON((rqstp
->rq_arg
.head
[0].iov_len
+ rqstp
->rq_arg
.page_len
)
100 BUG_ON(rqstp
->rq_arg
.len
!= byte_count
);
102 /* If not all pages were used from the SGL, free the remaining ones */
104 while (sge_no
< ctxt
->count
) {
105 page
= ctxt
->pages
[sge_no
++];
111 rqstp
->rq_arg
.tail
[0].iov_base
= NULL
;
112 rqstp
->rq_arg
.tail
[0].iov_len
= 0;
115 /* Encode a read-chunk-list as an array of IB SGE
118 * - chunk[0]->position points to pages[0] at an offset of 0
119 * - pages[] is not physically or virtually contiguous and consists of
120 * PAGE_SIZE elements.
123 * - sge array pointing into pages[] array.
124 * - chunk_sge array specifying sge index and count for each
125 * chunk in the read list
128 static int map_read_chunks(struct svcxprt_rdma
*xprt
,
129 struct svc_rqst
*rqstp
,
130 struct svc_rdma_op_ctxt
*head
,
131 struct rpcrdma_msg
*rmsgp
,
132 struct svc_rdma_req_map
*rpl_map
,
133 struct svc_rdma_req_map
*chl_map
,
143 struct rpcrdma_read_chunk
*ch
;
148 ch
= (struct rpcrdma_read_chunk
*)&rmsgp
->rm_body
.rm_chunks
[0];
150 ch_bytes
= ntohl(ch
->rc_target
.rs_length
);
151 head
->arg
.head
[0] = rqstp
->rq_arg
.head
[0];
152 head
->arg
.tail
[0] = rqstp
->rq_arg
.tail
[0];
153 head
->arg
.pages
= &head
->pages
[head
->count
];
154 head
->hdr_count
= head
->count
; /* save count of hdr pages */
155 head
->arg
.page_base
= 0;
156 head
->arg
.page_len
= ch_bytes
;
157 head
->arg
.len
= rqstp
->rq_arg
.len
+ ch_bytes
;
158 head
->arg
.buflen
= rqstp
->rq_arg
.buflen
+ ch_bytes
;
160 chl_map
->ch
[0].start
= 0;
162 rpl_map
->sge
[sge_no
].iov_base
=
163 page_address(rqstp
->rq_arg
.pages
[page_no
]) + page_off
;
164 sge_bytes
= min_t(int, PAGE_SIZE
-page_off
, ch_bytes
);
165 rpl_map
->sge
[sge_no
].iov_len
= sge_bytes
;
167 * Don't bump head->count here because the same page
168 * may be used by multiple SGE.
170 head
->arg
.pages
[page_no
] = rqstp
->rq_arg
.pages
[page_no
];
171 rqstp
->rq_respages
= &rqstp
->rq_arg
.pages
[page_no
+1];
173 byte_count
-= sge_bytes
;
174 ch_bytes
-= sge_bytes
;
177 * If all bytes for this chunk have been mapped to an
178 * SGE, move to the next SGE
181 chl_map
->ch
[ch_no
].count
=
182 sge_no
- chl_map
->ch
[ch_no
].start
;
185 chl_map
->ch
[ch_no
].start
= sge_no
;
186 ch_bytes
= ntohl(ch
->rc_target
.rs_length
);
187 /* If bytes remaining account for next chunk */
189 head
->arg
.page_len
+= ch_bytes
;
190 head
->arg
.len
+= ch_bytes
;
191 head
->arg
.buflen
+= ch_bytes
;
195 * If this SGE consumed all of the page, move to the
198 if ((sge_bytes
+ page_off
) == PAGE_SIZE
) {
202 * If there are still bytes left to map, bump
208 page_off
+= sge_bytes
;
210 BUG_ON(byte_count
!= 0);
214 /* Map a read-chunk-list to an XDR and fast register the page-list.
217 * - chunk[0] position points to pages[0] at an offset of 0
218 * - pages[] will be made physically contiguous by creating a one-off memory
219 * region using the fastreg verb.
220 * - byte_count is # of bytes in read-chunk-list
221 * - ch_count is # of chunks in read-chunk-list
224 * - sge array pointing into pages[] array.
225 * - chunk_sge array specifying sge index and count for each
226 * chunk in the read list
228 static int fast_reg_read_chunks(struct svcxprt_rdma
*xprt
,
229 struct svc_rqst
*rqstp
,
230 struct svc_rdma_op_ctxt
*head
,
231 struct rpcrdma_msg
*rmsgp
,
232 struct svc_rdma_req_map
*rpl_map
,
233 struct svc_rdma_req_map
*chl_map
,
240 struct rpcrdma_read_chunk
*ch
;
241 struct svc_rdma_fastreg_mr
*frmr
;
244 frmr
= svc_rdma_get_frmr(xprt
);
249 head
->arg
.head
[0] = rqstp
->rq_arg
.head
[0];
250 head
->arg
.tail
[0] = rqstp
->rq_arg
.tail
[0];
251 head
->arg
.pages
= &head
->pages
[head
->count
];
252 head
->hdr_count
= head
->count
; /* save count of hdr pages */
253 head
->arg
.page_base
= 0;
254 head
->arg
.page_len
= byte_count
;
255 head
->arg
.len
= rqstp
->rq_arg
.len
+ byte_count
;
256 head
->arg
.buflen
= rqstp
->rq_arg
.buflen
+ byte_count
;
258 /* Fast register the page list */
259 frmr
->kva
= page_address(rqstp
->rq_arg
.pages
[0]);
260 frmr
->direction
= DMA_FROM_DEVICE
;
261 frmr
->access_flags
= (IB_ACCESS_LOCAL_WRITE
|IB_ACCESS_REMOTE_WRITE
);
262 frmr
->map_len
= byte_count
;
263 frmr
->page_list_len
= PAGE_ALIGN(byte_count
) >> PAGE_SHIFT
;
264 for (page_no
= 0; page_no
< frmr
->page_list_len
; page_no
++) {
265 frmr
->page_list
->page_list
[page_no
] =
266 ib_dma_map_page(xprt
->sc_cm_id
->device
,
267 rqstp
->rq_arg
.pages
[page_no
], 0,
268 PAGE_SIZE
, DMA_FROM_DEVICE
);
269 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
,
270 frmr
->page_list
->page_list
[page_no
]))
272 atomic_inc(&xprt
->sc_dma_used
);
273 head
->arg
.pages
[page_no
] = rqstp
->rq_arg
.pages
[page_no
];
275 head
->count
+= page_no
;
277 /* rq_respages points one past arg pages */
278 rqstp
->rq_respages
= &rqstp
->rq_arg
.pages
[page_no
];
280 /* Create the reply and chunk maps */
282 ch
= (struct rpcrdma_read_chunk
*)&rmsgp
->rm_body
.rm_chunks
[0];
283 for (ch_no
= 0; ch_no
< ch_count
; ch_no
++) {
284 int len
= ntohl(ch
->rc_target
.rs_length
);
285 rpl_map
->sge
[ch_no
].iov_base
= frmr
->kva
+ offset
;
286 rpl_map
->sge
[ch_no
].iov_len
= len
;
287 chl_map
->ch
[ch_no
].count
= 1;
288 chl_map
->ch
[ch_no
].start
= ch_no
;
293 ret
= svc_rdma_fastreg(xprt
, frmr
);
300 printk("svcrdma: error fast registering xdr for xprt %p", xprt
);
301 svc_rdma_put_frmr(xprt
, frmr
);
305 static int rdma_set_ctxt_sge(struct svcxprt_rdma
*xprt
,
306 struct svc_rdma_op_ctxt
*ctxt
,
307 struct svc_rdma_fastreg_mr
*frmr
,
316 ctxt
->direction
= DMA_FROM_DEVICE
;
317 for (i
= 0; i
< count
; i
++) {
318 ctxt
->sge
[i
].length
= 0; /* in case map fails */
320 BUG_ON(!virt_to_page(vec
[i
].iov_base
));
321 off
= (unsigned long)vec
[i
].iov_base
& ~PAGE_MASK
;
323 ib_dma_map_page(xprt
->sc_cm_id
->device
,
324 virt_to_page(vec
[i
].iov_base
),
328 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
,
331 ctxt
->sge
[i
].lkey
= xprt
->sc_dma_lkey
;
332 atomic_inc(&xprt
->sc_dma_used
);
334 ctxt
->sge
[i
].addr
= (unsigned long)vec
[i
].iov_base
;
335 ctxt
->sge
[i
].lkey
= frmr
->mr
->lkey
;
337 ctxt
->sge
[i
].length
= vec
[i
].iov_len
;
338 *sgl_offset
= *sgl_offset
+ vec
[i
].iov_len
;
343 static int rdma_read_max_sge(struct svcxprt_rdma
*xprt
, int sge_count
)
345 if ((rdma_node_get_transport(xprt
->sc_cm_id
->device
->node_type
) ==
346 RDMA_TRANSPORT_IWARP
) &&
350 return min_t(int, sge_count
, xprt
->sc_max_sge
);
354 * Use RDMA_READ to read data from the advertised client buffer into the
355 * XDR stream starting at rq_arg.head[0].iov_base.
356 * Each chunk in the array
357 * contains the following fields:
358 * discrim - '1', This isn't used for data placement
359 * position - The xdr stream offset (the same for every chunk)
360 * handle - RMR for client memory region
361 * length - data transfer length
362 * offset - 64 bit tagged offset in remote memory region
364 * On our side, we need to read into a pagelist. The first page immediately
365 * follows the RPC header.
367 * This function returns:
368 * 0 - No error and no read-list found.
370 * 1 - Successful read-list processing. The data is not yet in
371 * the pagelist and therefore the RPC request must be deferred. The
372 * I/O completion will enqueue the transport again and
373 * svc_rdma_recvfrom will complete the request.
375 * <0 - Error processing/posting read-list.
377 * NOTE: The ctxt must not be touched after the last WR has been posted
378 * because the I/O completion processing may occur on another
379 * processor and free / modify the context. Ne touche pas!
381 static int rdma_read_xdr(struct svcxprt_rdma
*xprt
,
382 struct rpcrdma_msg
*rmsgp
,
383 struct svc_rqst
*rqstp
,
384 struct svc_rdma_op_ctxt
*hdr_ctxt
)
386 struct ib_send_wr read_wr
;
387 struct ib_send_wr inv_wr
;
394 struct rpcrdma_read_chunk
*ch
;
395 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
396 struct svc_rdma_req_map
*rpl_map
;
397 struct svc_rdma_req_map
*chl_map
;
399 /* If no read list is present, return 0 */
400 ch
= svc_rdma_get_read_chunk(rmsgp
);
404 svc_rdma_rcl_chunk_counts(ch
, &ch_count
, &byte_count
);
405 if (ch_count
> RPCSVC_MAXPAGES
)
408 /* Allocate temporary reply and chunk maps */
409 rpl_map
= svc_rdma_get_req_map();
410 chl_map
= svc_rdma_get_req_map();
412 if (!xprt
->sc_frmr_pg_list_len
)
413 sge_count
= map_read_chunks(xprt
, rqstp
, hdr_ctxt
, rmsgp
,
414 rpl_map
, chl_map
, ch_count
,
417 sge_count
= fast_reg_read_chunks(xprt
, rqstp
, hdr_ctxt
, rmsgp
,
418 rpl_map
, chl_map
, ch_count
,
428 for (ch
= (struct rpcrdma_read_chunk
*)&rmsgp
->rm_body
.rm_chunks
[0];
429 ch
->rc_discrim
!= 0; ch
++, ch_no
++) {
432 ctxt
= svc_rdma_get_context(xprt
);
433 ctxt
->direction
= DMA_FROM_DEVICE
;
434 ctxt
->frmr
= hdr_ctxt
->frmr
;
435 ctxt
->read_hdr
= NULL
;
436 clear_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
);
437 clear_bit(RDMACTXT_F_FAST_UNREG
, &ctxt
->flags
);
439 /* Prepare READ WR */
440 memset(&read_wr
, 0, sizeof read_wr
);
441 read_wr
.wr_id
= (unsigned long)ctxt
;
442 read_wr
.opcode
= IB_WR_RDMA_READ
;
443 ctxt
->wr_op
= read_wr
.opcode
;
444 read_wr
.send_flags
= IB_SEND_SIGNALED
;
445 read_wr
.wr
.rdma
.rkey
= ntohl(ch
->rc_target
.rs_handle
);
446 xdr_decode_hyper((__be32
*)&ch
->rc_target
.rs_offset
,
448 read_wr
.wr
.rdma
.remote_addr
= rs_offset
+ sgl_offset
;
449 read_wr
.sg_list
= ctxt
->sge
;
451 rdma_read_max_sge(xprt
, chl_map
->ch
[ch_no
].count
);
452 err
= rdma_set_ctxt_sge(xprt
, ctxt
, hdr_ctxt
->frmr
,
453 &rpl_map
->sge
[chl_map
->ch
[ch_no
].start
],
457 svc_rdma_unmap_dma(ctxt
);
458 svc_rdma_put_context(ctxt
, 0);
461 if (((ch
+1)->rc_discrim
== 0) &&
462 (read_wr
.num_sge
== chl_map
->ch
[ch_no
].count
)) {
464 * Mark the last RDMA_READ with a bit to
465 * indicate all RPC data has been fetched from
466 * the client and the RPC needs to be enqueued.
468 set_bit(RDMACTXT_F_LAST_CTXT
, &ctxt
->flags
);
469 if (hdr_ctxt
->frmr
) {
470 set_bit(RDMACTXT_F_FAST_UNREG
, &ctxt
->flags
);
472 * Invalidate the local MR used to map the data
475 if (xprt
->sc_dev_caps
&
476 SVCRDMA_DEVCAP_READ_W_INV
) {
478 IB_WR_RDMA_READ_WITH_INV
;
479 ctxt
->wr_op
= read_wr
.opcode
;
480 read_wr
.ex
.invalidate_rkey
=
481 ctxt
->frmr
->mr
->lkey
;
483 /* Prepare INVALIDATE WR */
484 memset(&inv_wr
, 0, sizeof inv_wr
);
485 inv_wr
.opcode
= IB_WR_LOCAL_INV
;
486 inv_wr
.send_flags
= IB_SEND_SIGNALED
;
487 inv_wr
.ex
.invalidate_rkey
=
488 hdr_ctxt
->frmr
->mr
->lkey
;
489 read_wr
.next
= &inv_wr
;
492 ctxt
->read_hdr
= hdr_ctxt
;
495 err
= svc_rdma_send(xprt
, &read_wr
);
497 printk(KERN_ERR
"svcrdma: Error %d posting RDMA_READ\n",
499 set_bit(XPT_CLOSE
, &xprt
->sc_xprt
.xpt_flags
);
500 svc_rdma_unmap_dma(ctxt
);
501 svc_rdma_put_context(ctxt
, 0);
504 atomic_inc(&rdma_stat_read
);
506 if (read_wr
.num_sge
< chl_map
->ch
[ch_no
].count
) {
507 chl_map
->ch
[ch_no
].count
-= read_wr
.num_sge
;
508 chl_map
->ch
[ch_no
].start
+= read_wr
.num_sge
;
516 svc_rdma_put_req_map(rpl_map
);
517 svc_rdma_put_req_map(chl_map
);
519 /* Detach arg pages. svc_recv will replenish them */
520 for (ch_no
= 0; &rqstp
->rq_pages
[ch_no
] < rqstp
->rq_respages
; ch_no
++)
521 rqstp
->rq_pages
[ch_no
] = NULL
;
524 * Detach res pages. If svc_release sees any it will attempt to
527 while (rqstp
->rq_next_page
!= rqstp
->rq_respages
)
528 *(--rqstp
->rq_next_page
) = NULL
;
533 static int rdma_read_complete(struct svc_rqst
*rqstp
,
534 struct svc_rdma_op_ctxt
*head
)
542 for (page_no
= 0; page_no
< head
->count
; page_no
++) {
543 put_page(rqstp
->rq_pages
[page_no
]);
544 rqstp
->rq_pages
[page_no
] = head
->pages
[page_no
];
546 /* Point rq_arg.pages past header */
547 rqstp
->rq_arg
.pages
= &rqstp
->rq_pages
[head
->hdr_count
];
548 rqstp
->rq_arg
.page_len
= head
->arg
.page_len
;
549 rqstp
->rq_arg
.page_base
= head
->arg
.page_base
;
551 /* rq_respages starts after the last arg page */
552 rqstp
->rq_respages
= &rqstp
->rq_arg
.pages
[page_no
];
553 rqstp
->rq_next_page
= &rqstp
->rq_arg
.pages
[page_no
];
555 /* Rebuild rq_arg head and tail. */
556 rqstp
->rq_arg
.head
[0] = head
->arg
.head
[0];
557 rqstp
->rq_arg
.tail
[0] = head
->arg
.tail
[0];
558 rqstp
->rq_arg
.len
= head
->arg
.len
;
559 rqstp
->rq_arg
.buflen
= head
->arg
.buflen
;
561 /* Free the context */
562 svc_rdma_put_context(head
, 0);
564 /* XXX: What should this be? */
565 rqstp
->rq_prot
= IPPROTO_MAX
;
566 svc_xprt_copy_addrs(rqstp
, rqstp
->rq_xprt
);
568 ret
= rqstp
->rq_arg
.head
[0].iov_len
569 + rqstp
->rq_arg
.page_len
570 + rqstp
->rq_arg
.tail
[0].iov_len
;
571 dprintk("svcrdma: deferred read ret=%d, rq_arg.len =%d, "
572 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
573 ret
, rqstp
->rq_arg
.len
, rqstp
->rq_arg
.head
[0].iov_base
,
574 rqstp
->rq_arg
.head
[0].iov_len
);
580 * Set up the rqstp thread context to point to the RQ buffer. If
581 * necessary, pull additional data from the client with an RDMA_READ
584 int svc_rdma_recvfrom(struct svc_rqst
*rqstp
)
586 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
587 struct svcxprt_rdma
*rdma_xprt
=
588 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
589 struct svc_rdma_op_ctxt
*ctxt
= NULL
;
590 struct rpcrdma_msg
*rmsgp
;
594 dprintk("svcrdma: rqstp=%p\n", rqstp
);
596 spin_lock_bh(&rdma_xprt
->sc_rq_dto_lock
);
597 if (!list_empty(&rdma_xprt
->sc_read_complete_q
)) {
598 ctxt
= list_entry(rdma_xprt
->sc_read_complete_q
.next
,
599 struct svc_rdma_op_ctxt
,
601 list_del_init(&ctxt
->dto_q
);
604 spin_unlock_bh(&rdma_xprt
->sc_rq_dto_lock
);
605 return rdma_read_complete(rqstp
, ctxt
);
608 if (!list_empty(&rdma_xprt
->sc_rq_dto_q
)) {
609 ctxt
= list_entry(rdma_xprt
->sc_rq_dto_q
.next
,
610 struct svc_rdma_op_ctxt
,
612 list_del_init(&ctxt
->dto_q
);
614 atomic_inc(&rdma_stat_rq_starve
);
615 clear_bit(XPT_DATA
, &xprt
->xpt_flags
);
618 spin_unlock_bh(&rdma_xprt
->sc_rq_dto_lock
);
620 /* This is the EAGAIN path. The svc_recv routine will
621 * return -EAGAIN, the nfsd thread will go to call into
622 * svc_recv again and we shouldn't be on the active
625 if (test_bit(XPT_CLOSE
, &xprt
->xpt_flags
))
631 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
632 ctxt
, rdma_xprt
, rqstp
, ctxt
->wc_status
);
633 BUG_ON(ctxt
->wc_status
!= IB_WC_SUCCESS
);
634 atomic_inc(&rdma_stat_recv
);
636 /* Build up the XDR from the receive buffers. */
637 rdma_build_arg_xdr(rqstp
, ctxt
, ctxt
->byte_len
);
639 /* Decode the RDMA header. */
640 len
= svc_rdma_xdr_decode_req(&rmsgp
, rqstp
);
641 rqstp
->rq_xprt_hlen
= len
;
643 /* If the request is invalid, reply with an error */
646 svc_rdma_send_error(rdma_xprt
, rmsgp
, ERR_VERS
);
650 /* Read read-list data. */
651 ret
= rdma_read_xdr(rdma_xprt
, rmsgp
, rqstp
, ctxt
);
653 /* read-list posted, defer until data received from client. */
657 /* Post of read-list failed, free context. */
658 svc_rdma_put_context(ctxt
, 1);
662 ret
= rqstp
->rq_arg
.head
[0].iov_len
663 + rqstp
->rq_arg
.page_len
664 + rqstp
->rq_arg
.tail
[0].iov_len
;
665 svc_rdma_put_context(ctxt
, 0);
667 dprintk("svcrdma: ret = %d, rq_arg.len =%d, "
668 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len = %zd\n",
669 ret
, rqstp
->rq_arg
.len
,
670 rqstp
->rq_arg
.head
[0].iov_base
,
671 rqstp
->rq_arg
.head
[0].iov_len
);
672 rqstp
->rq_prot
= IPPROTO_MAX
;
673 svc_xprt_copy_addrs(rqstp
, xprt
);
678 svc_rdma_put_context(ctxt
, 1);
679 dprintk("svcrdma: transport %p is closing\n", xprt
);
681 * Set the close bit and enqueue it. svc_recv will see the
682 * close bit and call svc_xprt_delete
684 set_bit(XPT_CLOSE
, &xprt
->xpt_flags
);