2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
52 /* Encode an XDR as an array of IB SGE
55 * - head[0] is physically contiguous.
56 * - tail[0] is physically contiguous.
57 * - pages[] is not physically or virtually contiguous and consists of
61 * SGE[0] reserved for RCPRDMA header
62 * SGE[1] data from xdr->head[]
63 * SGE[2..sge_count-2] data from xdr->pages[]
64 * SGE[sge_count-1] data from xdr->tail.
66 * The max SGE we need is the length of the XDR / pagesize + one for
67 * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
68 * reserves a page for both the request and the reply header, and this
69 * array is only concerned with the reply we are assured that we have
70 * on extra page for the RPCRMDA header.
72 static int fast_reg_xdr(struct svcxprt_rdma
*xprt
,
74 struct svc_rdma_req_map
*vec
)
82 struct svc_rdma_fastreg_mr
*frmr
;
84 frmr
= svc_rdma_get_frmr(xprt
);
89 /* Skip the RPCRDMA header */
93 frva
= (void *)((unsigned long)(xdr
->head
[0].iov_base
) & PAGE_MASK
);
94 vec
->sge
[sge_no
].iov_base
= xdr
->head
[0].iov_base
;
95 vec
->sge
[sge_no
].iov_len
= xdr
->head
[0].iov_len
;
99 /* Map the XDR head */
101 frmr
->direction
= DMA_TO_DEVICE
;
102 frmr
->access_flags
= 0;
103 frmr
->map_len
= PAGE_SIZE
;
104 frmr
->page_list_len
= 1;
105 page_off
= (unsigned long)xdr
->head
[0].iov_base
& ~PAGE_MASK
;
106 frmr
->page_list
->page_list
[page_no
] =
107 ib_dma_map_page(xprt
->sc_cm_id
->device
,
108 virt_to_page(xdr
->head
[0].iov_base
),
110 PAGE_SIZE
- page_off
,
112 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
,
113 frmr
->page_list
->page_list
[page_no
]))
115 atomic_inc(&xprt
->sc_dma_used
);
117 /* Map the XDR page list */
118 page_off
= xdr
->page_base
;
119 page_bytes
= xdr
->page_len
+ page_off
;
124 vec
->sge
[sge_no
].iov_base
= frva
+ frmr
->map_len
+ page_off
;
125 vec
->sge
[sge_no
].iov_len
= page_bytes
;
130 page
= xdr
->pages
[page_no
++];
131 sge_bytes
= min_t(u32
, page_bytes
, (PAGE_SIZE
- page_off
));
132 page_bytes
-= sge_bytes
;
134 frmr
->page_list
->page_list
[page_no
] =
135 ib_dma_map_page(xprt
->sc_cm_id
->device
,
137 sge_bytes
, DMA_TO_DEVICE
);
138 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
,
139 frmr
->page_list
->page_list
[page_no
]))
142 atomic_inc(&xprt
->sc_dma_used
);
143 page_off
= 0; /* reset for next time through loop */
144 frmr
->map_len
+= PAGE_SIZE
;
145 frmr
->page_list_len
++;
151 if (0 == xdr
->tail
[0].iov_len
)
155 vec
->sge
[sge_no
].iov_len
= xdr
->tail
[0].iov_len
;
157 if (((unsigned long)xdr
->tail
[0].iov_base
& PAGE_MASK
) ==
158 ((unsigned long)xdr
->head
[0].iov_base
& PAGE_MASK
)) {
160 * If head and tail use the same page, we don't need
163 vec
->sge
[sge_no
].iov_base
= xdr
->tail
[0].iov_base
;
167 /* Map another page for the tail */
168 page_off
= (unsigned long)xdr
->tail
[0].iov_base
& ~PAGE_MASK
;
169 va
= (void *)((unsigned long)xdr
->tail
[0].iov_base
& PAGE_MASK
);
170 vec
->sge
[sge_no
].iov_base
= frva
+ frmr
->map_len
+ page_off
;
172 frmr
->page_list
->page_list
[page_no
] =
173 ib_dma_map_page(xprt
->sc_cm_id
->device
, virt_to_page(va
),
177 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
,
178 frmr
->page_list
->page_list
[page_no
]))
180 atomic_inc(&xprt
->sc_dma_used
);
181 frmr
->map_len
+= PAGE_SIZE
;
182 frmr
->page_list_len
++;
186 if (svc_rdma_fastreg(xprt
, frmr
))
192 printk("svcrdma: Error fast registering memory for xprt %p\n", xprt
);
194 svc_rdma_put_frmr(xprt
, frmr
);
198 static int map_xdr(struct svcxprt_rdma
*xprt
,
200 struct svc_rdma_req_map
*vec
)
209 (xdr
->head
[0].iov_len
+ xdr
->page_len
+ xdr
->tail
[0].iov_len
));
211 if (xprt
->sc_frmr_pg_list_len
)
212 return fast_reg_xdr(xprt
, xdr
, vec
);
214 /* Skip the first sge, this is for the RPCRDMA header */
218 vec
->sge
[sge_no
].iov_base
= xdr
->head
[0].iov_base
;
219 vec
->sge
[sge_no
].iov_len
= xdr
->head
[0].iov_len
;
224 page_bytes
= xdr
->page_len
;
225 page_off
= xdr
->page_base
;
227 vec
->sge
[sge_no
].iov_base
=
228 page_address(xdr
->pages
[page_no
]) + page_off
;
229 sge_bytes
= min_t(u32
, page_bytes
, (PAGE_SIZE
- page_off
));
230 page_bytes
-= sge_bytes
;
231 vec
->sge
[sge_no
].iov_len
= sge_bytes
;
235 page_off
= 0; /* reset for next time through loop */
239 if (xdr
->tail
[0].iov_len
) {
240 vec
->sge
[sge_no
].iov_base
= xdr
->tail
[0].iov_base
;
241 vec
->sge
[sge_no
].iov_len
= xdr
->tail
[0].iov_len
;
245 dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
246 "page_base %u page_len %u head_len %zu tail_len %zu\n",
247 sge_no
, page_no
, xdr
->page_base
, xdr
->page_len
,
248 xdr
->head
[0].iov_len
, xdr
->tail
[0].iov_len
);
254 static dma_addr_t
dma_map_xdr(struct svcxprt_rdma
*xprt
,
256 u32 xdr_off
, size_t len
, int dir
)
260 if (xdr_off
< xdr
->head
[0].iov_len
) {
261 /* This offset is in the head */
262 xdr_off
+= (unsigned long)xdr
->head
[0].iov_base
& ~PAGE_MASK
;
263 page
= virt_to_page(xdr
->head
[0].iov_base
);
265 xdr_off
-= xdr
->head
[0].iov_len
;
266 if (xdr_off
< xdr
->page_len
) {
267 /* This offset is in the page list */
268 page
= xdr
->pages
[xdr_off
>> PAGE_SHIFT
];
269 xdr_off
&= ~PAGE_MASK
;
271 /* This offset is in the tail */
272 xdr_off
-= xdr
->page_len
;
273 xdr_off
+= (unsigned long)
274 xdr
->tail
[0].iov_base
& ~PAGE_MASK
;
275 page
= virt_to_page(xdr
->tail
[0].iov_base
);
278 dma_addr
= ib_dma_map_page(xprt
->sc_cm_id
->device
, page
, xdr_off
,
279 min_t(size_t, PAGE_SIZE
, len
), dir
);
284 * - We are using FRMR
286 * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
288 static int send_write(struct svcxprt_rdma
*xprt
, struct svc_rqst
*rqstp
,
290 u32 xdr_off
, int write_len
,
291 struct svc_rdma_req_map
*vec
)
293 struct ib_send_wr write_wr
;
300 struct svc_rdma_op_ctxt
*ctxt
;
302 BUG_ON(vec
->count
> RPCSVC_MAXPAGES
);
303 dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
304 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
305 rmr
, (unsigned long long)to
, xdr_off
,
306 write_len
, vec
->sge
, vec
->count
);
308 ctxt
= svc_rdma_get_context(xprt
);
309 ctxt
->direction
= DMA_TO_DEVICE
;
312 /* Find the SGE associated with xdr_off */
313 for (bc
= xdr_off
, xdr_sge_no
= 1; bc
&& xdr_sge_no
< vec
->count
;
315 if (vec
->sge
[xdr_sge_no
].iov_len
> bc
)
317 bc
-= vec
->sge
[xdr_sge_no
].iov_len
;
324 /* Copy the remaining SGE */
326 sge_bytes
= min_t(size_t,
327 bc
, vec
->sge
[xdr_sge_no
].iov_len
-sge_off
);
328 sge
[sge_no
].length
= sge_bytes
;
331 dma_map_xdr(xprt
, &rqstp
->rq_res
, xdr_off
,
332 sge_bytes
, DMA_TO_DEVICE
);
333 xdr_off
+= sge_bytes
;
334 if (ib_dma_mapping_error(xprt
->sc_cm_id
->device
,
337 atomic_inc(&xprt
->sc_dma_used
);
338 sge
[sge_no
].lkey
= xprt
->sc_dma_lkey
;
340 sge
[sge_no
].addr
= (unsigned long)
341 vec
->sge
[xdr_sge_no
].iov_base
+ sge_off
;
342 sge
[sge_no
].lkey
= vec
->frmr
->mr
->lkey
;
345 ctxt
->frmr
= vec
->frmr
;
349 BUG_ON(xdr_sge_no
> vec
->count
);
353 /* Prepare WRITE WR */
354 memset(&write_wr
, 0, sizeof write_wr
);
355 ctxt
->wr_op
= IB_WR_RDMA_WRITE
;
356 write_wr
.wr_id
= (unsigned long)ctxt
;
357 write_wr
.sg_list
= &sge
[0];
358 write_wr
.num_sge
= sge_no
;
359 write_wr
.opcode
= IB_WR_RDMA_WRITE
;
360 write_wr
.send_flags
= IB_SEND_SIGNALED
;
361 write_wr
.wr
.rdma
.rkey
= rmr
;
362 write_wr
.wr
.rdma
.remote_addr
= to
;
365 atomic_inc(&rdma_stat_write
);
366 if (svc_rdma_send(xprt
, &write_wr
))
370 svc_rdma_unmap_dma(ctxt
);
371 svc_rdma_put_frmr(xprt
, vec
->frmr
);
372 svc_rdma_put_context(ctxt
, 0);
373 /* Fatal error, close transport */
377 static int send_write_chunks(struct svcxprt_rdma
*xprt
,
378 struct rpcrdma_msg
*rdma_argp
,
379 struct rpcrdma_msg
*rdma_resp
,
380 struct svc_rqst
*rqstp
,
381 struct svc_rdma_req_map
*vec
)
383 u32 xfer_len
= rqstp
->rq_res
.page_len
+ rqstp
->rq_res
.tail
[0].iov_len
;
389 struct rpcrdma_write_array
*arg_ary
;
390 struct rpcrdma_write_array
*res_ary
;
393 arg_ary
= svc_rdma_get_write_array(rdma_argp
);
396 res_ary
= (struct rpcrdma_write_array
*)
397 &rdma_resp
->rm_body
.rm_chunks
[1];
400 max_write
= vec
->frmr
->map_len
;
402 max_write
= xprt
->sc_max_sge
* PAGE_SIZE
;
404 /* Write chunks start at the pagelist */
405 for (xdr_off
= rqstp
->rq_res
.head
[0].iov_len
, chunk_no
= 0;
406 xfer_len
&& chunk_no
< arg_ary
->wc_nchunks
;
408 struct rpcrdma_segment
*arg_ch
;
411 arg_ch
= &arg_ary
->wc_array
[chunk_no
].wc_target
;
412 write_len
= min(xfer_len
, arg_ch
->rs_length
);
414 /* Prepare the response chunk given the length actually
416 rs_offset
= get_unaligned(&(arg_ch
->rs_offset
));
417 svc_rdma_xdr_encode_array_chunk(res_ary
, chunk_no
,
424 this_write
= min(write_len
, max_write
);
425 ret
= send_write(xprt
, rqstp
,
427 rs_offset
+ chunk_off
,
432 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
436 chunk_off
+= this_write
;
437 xdr_off
+= this_write
;
438 xfer_len
-= this_write
;
439 write_len
-= this_write
;
442 /* Update the req with the number of chunks actually used */
443 svc_rdma_xdr_encode_write_list(rdma_resp
, chunk_no
);
445 return rqstp
->rq_res
.page_len
+ rqstp
->rq_res
.tail
[0].iov_len
;
448 static int send_reply_chunks(struct svcxprt_rdma
*xprt
,
449 struct rpcrdma_msg
*rdma_argp
,
450 struct rpcrdma_msg
*rdma_resp
,
451 struct svc_rqst
*rqstp
,
452 struct svc_rdma_req_map
*vec
)
454 u32 xfer_len
= rqstp
->rq_res
.len
;
460 struct rpcrdma_segment
*ch
;
461 struct rpcrdma_write_array
*arg_ary
;
462 struct rpcrdma_write_array
*res_ary
;
465 arg_ary
= svc_rdma_get_reply_array(rdma_argp
);
468 /* XXX: need to fix when reply lists occur with read-list and or
470 res_ary
= (struct rpcrdma_write_array
*)
471 &rdma_resp
->rm_body
.rm_chunks
[2];
474 max_write
= vec
->frmr
->map_len
;
476 max_write
= xprt
->sc_max_sge
* PAGE_SIZE
;
478 /* xdr offset starts at RPC message */
479 for (xdr_off
= 0, chunk_no
= 0;
480 xfer_len
&& chunk_no
< arg_ary
->wc_nchunks
;
483 ch
= &arg_ary
->wc_array
[chunk_no
].wc_target
;
484 write_len
= min(xfer_len
, ch
->rs_length
);
486 /* Prepare the reply chunk given the length actually
488 rs_offset
= get_unaligned(&(ch
->rs_offset
));
489 svc_rdma_xdr_encode_array_chunk(res_ary
, chunk_no
,
490 ch
->rs_handle
, rs_offset
,
496 this_write
= min(write_len
, max_write
);
497 ret
= send_write(xprt
, rqstp
,
499 rs_offset
+ chunk_off
,
504 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
508 chunk_off
+= this_write
;
509 xdr_off
+= this_write
;
510 xfer_len
-= this_write
;
511 write_len
-= this_write
;
514 /* Update the req with the number of chunks actually used */
515 svc_rdma_xdr_encode_reply_array(res_ary
, chunk_no
);
517 return rqstp
->rq_res
.len
;
520 /* This function prepares the portion of the RPCRDMA message to be
521 * sent in the RDMA_SEND. This function is called after data sent via
522 * RDMA has already been transmitted. There are three cases:
523 * - The RPCRDMA header, RPC header, and payload are all sent in a
524 * single RDMA_SEND. This is the "inline" case.
525 * - The RPCRDMA header and some portion of the RPC header and data
526 * are sent via this RDMA_SEND and another portion of the data is
528 * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
529 * header and data are all transmitted via RDMA.
530 * In all three cases, this function prepares the RPCRDMA header in
531 * sge[0], the 'type' parameter indicates the type to place in the
532 * RPCRDMA header, and the 'byte_count' field indicates how much of
533 * the XDR to include in this RDMA_SEND. NB: The offset of the payload
534 * to send is zero in the XDR.
536 static int send_reply(struct svcxprt_rdma
*rdma
,
537 struct svc_rqst
*rqstp
,
539 struct rpcrdma_msg
*rdma_resp
,
540 struct svc_rdma_op_ctxt
*ctxt
,
541 struct svc_rdma_req_map
*vec
,
544 struct ib_send_wr send_wr
;
545 struct ib_send_wr inv_wr
;
551 /* Post a recv buffer to handle another request. */
552 ret
= svc_rdma_post_recv(rdma
);
555 "svcrdma: could not post a receive buffer, err=%d."
556 "Closing transport %p.\n", ret
, rdma
);
557 set_bit(XPT_CLOSE
, &rdma
->sc_xprt
.xpt_flags
);
558 svc_rdma_put_frmr(rdma
, vec
->frmr
);
559 svc_rdma_put_context(ctxt
, 0);
563 /* Prepare the context */
564 ctxt
->pages
[0] = page
;
566 ctxt
->frmr
= vec
->frmr
;
568 set_bit(RDMACTXT_F_FAST_UNREG
, &ctxt
->flags
);
570 clear_bit(RDMACTXT_F_FAST_UNREG
, &ctxt
->flags
);
572 /* Prepare the SGE for the RPCRDMA Header */
573 ctxt
->sge
[0].lkey
= rdma
->sc_dma_lkey
;
574 ctxt
->sge
[0].length
= svc_rdma_xdr_get_reply_hdr_len(rdma_resp
);
576 ib_dma_map_page(rdma
->sc_cm_id
->device
, page
, 0,
577 ctxt
->sge
[0].length
, DMA_TO_DEVICE
);
578 if (ib_dma_mapping_error(rdma
->sc_cm_id
->device
, ctxt
->sge
[0].addr
))
580 atomic_inc(&rdma
->sc_dma_used
);
582 ctxt
->direction
= DMA_TO_DEVICE
;
584 /* Map the payload indicated by 'byte_count' */
585 for (sge_no
= 1; byte_count
&& sge_no
< vec
->count
; sge_no
++) {
587 sge_bytes
= min_t(size_t, vec
->sge
[sge_no
].iov_len
, byte_count
);
588 byte_count
-= sge_bytes
;
590 ctxt
->sge
[sge_no
].addr
=
591 dma_map_xdr(rdma
, &rqstp
->rq_res
, xdr_off
,
592 sge_bytes
, DMA_TO_DEVICE
);
593 xdr_off
+= sge_bytes
;
594 if (ib_dma_mapping_error(rdma
->sc_cm_id
->device
,
595 ctxt
->sge
[sge_no
].addr
))
597 atomic_inc(&rdma
->sc_dma_used
);
598 ctxt
->sge
[sge_no
].lkey
= rdma
->sc_dma_lkey
;
600 ctxt
->sge
[sge_no
].addr
= (unsigned long)
601 vec
->sge
[sge_no
].iov_base
;
602 ctxt
->sge
[sge_no
].lkey
= vec
->frmr
->mr
->lkey
;
604 ctxt
->sge
[sge_no
].length
= sge_bytes
;
606 BUG_ON(byte_count
!= 0);
608 /* Save all respages in the ctxt and remove them from the
609 * respages array. They are our pages until the I/O
612 for (page_no
= 0; page_no
< rqstp
->rq_resused
; page_no
++) {
613 ctxt
->pages
[page_no
+1] = rqstp
->rq_respages
[page_no
];
615 rqstp
->rq_respages
[page_no
] = NULL
;
617 * If there are more pages than SGE, terminate SGE
618 * list so that svc_rdma_unmap_dma doesn't attempt to
621 if (page_no
+1 >= sge_no
)
622 ctxt
->sge
[page_no
+1].length
= 0;
624 BUG_ON(sge_no
> rdma
->sc_max_sge
);
625 memset(&send_wr
, 0, sizeof send_wr
);
626 ctxt
->wr_op
= IB_WR_SEND
;
627 send_wr
.wr_id
= (unsigned long)ctxt
;
628 send_wr
.sg_list
= ctxt
->sge
;
629 send_wr
.num_sge
= sge_no
;
630 send_wr
.opcode
= IB_WR_SEND
;
631 send_wr
.send_flags
= IB_SEND_SIGNALED
;
633 /* Prepare INVALIDATE WR */
634 memset(&inv_wr
, 0, sizeof inv_wr
);
635 inv_wr
.opcode
= IB_WR_LOCAL_INV
;
636 inv_wr
.send_flags
= IB_SEND_SIGNALED
;
637 inv_wr
.ex
.invalidate_rkey
=
639 send_wr
.next
= &inv_wr
;
642 ret
= svc_rdma_send(rdma
, &send_wr
);
649 svc_rdma_unmap_dma(ctxt
);
650 svc_rdma_put_frmr(rdma
, vec
->frmr
);
651 svc_rdma_put_context(ctxt
, 1);
655 void svc_rdma_prep_reply_hdr(struct svc_rqst
*rqstp
)
660 * Return the start of an xdr buffer.
662 static void *xdr_start(struct xdr_buf
*xdr
)
664 return xdr
->head
[0].iov_base
-
667 xdr
->tail
[0].iov_len
-
668 xdr
->head
[0].iov_len
);
671 int svc_rdma_sendto(struct svc_rqst
*rqstp
)
673 struct svc_xprt
*xprt
= rqstp
->rq_xprt
;
674 struct svcxprt_rdma
*rdma
=
675 container_of(xprt
, struct svcxprt_rdma
, sc_xprt
);
676 struct rpcrdma_msg
*rdma_argp
;
677 struct rpcrdma_msg
*rdma_resp
;
678 struct rpcrdma_write_array
*reply_ary
;
679 enum rpcrdma_proc reply_type
;
682 struct page
*res_page
;
683 struct svc_rdma_op_ctxt
*ctxt
;
684 struct svc_rdma_req_map
*vec
;
686 dprintk("svcrdma: sending response for rqstp=%p\n", rqstp
);
688 /* Get the RDMA request header. */
689 rdma_argp
= xdr_start(&rqstp
->rq_arg
);
691 /* Build an req vec for the XDR */
692 ctxt
= svc_rdma_get_context(rdma
);
693 ctxt
->direction
= DMA_TO_DEVICE
;
694 vec
= svc_rdma_get_req_map();
695 ret
= map_xdr(rdma
, &rqstp
->rq_res
, vec
);
698 inline_bytes
= rqstp
->rq_res
.len
;
700 /* Create the RDMA response header */
701 res_page
= svc_rdma_get_page();
702 rdma_resp
= page_address(res_page
);
703 reply_ary
= svc_rdma_get_reply_array(rdma_argp
);
705 reply_type
= RDMA_NOMSG
;
707 reply_type
= RDMA_MSG
;
708 svc_rdma_xdr_encode_reply_header(rdma
, rdma_argp
,
709 rdma_resp
, reply_type
);
711 /* Send any write-chunk data and build resp write-list */
712 ret
= send_write_chunks(rdma
, rdma_argp
, rdma_resp
,
715 printk(KERN_ERR
"svcrdma: failed to send write chunks, rc=%d\n",
721 /* Send any reply-list data and update resp reply-list */
722 ret
= send_reply_chunks(rdma
, rdma_argp
, rdma_resp
,
725 printk(KERN_ERR
"svcrdma: failed to send reply chunks, rc=%d\n",
731 ret
= send_reply(rdma
, rqstp
, res_page
, rdma_resp
, ctxt
, vec
,
733 svc_rdma_put_req_map(vec
);
734 dprintk("svcrdma: send_reply returns %d\n", ret
);
740 svc_rdma_put_req_map(vec
);
741 svc_rdma_put_context(ctxt
, 0);