GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / sunrpc / xprtrdma / rpc_rdma.c
blob2ac3f6e8adffaf74a7480961997bb9fb97201e98
1 /*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * rpc_rdma.c
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
48 #include "xprt_rdma.h"
50 #include <linux/highmem.h>
52 #ifdef RPC_DEBUG
53 # define RPCDBG_FACILITY RPCDBG_TRANS
54 #endif
56 enum rpcrdma_chunktype {
57 rpcrdma_noch = 0,
58 rpcrdma_readch,
59 rpcrdma_areadch,
60 rpcrdma_writech,
61 rpcrdma_replych
64 #ifdef RPC_DEBUG
65 static const char transfertypes[][12] = {
66 "pure inline", /* no chunks */
67 " read chunk", /* some argument via rdma read */
68 "*read chunk", /* entire request via rdma read */
69 "write chunk", /* some result via rdma write */
70 "reply chunk" /* entire reply via rdma write */
72 #endif
75 * Chunk assembly from upper layer xdr_buf.
77 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
78 * elements. Segments are then coalesced when registered, if possible
79 * within the selected memreg mode.
81 * Note, this routine is never called if the connection's memory
82 * registration strategy is 0 (bounce buffers).
85 static int
86 rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
87 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
89 int len, n = 0, p;
91 if (pos == 0 && xdrbuf->head[0].iov_len) {
92 seg[n].mr_page = NULL;
93 seg[n].mr_offset = xdrbuf->head[0].iov_base;
94 seg[n].mr_len = xdrbuf->head[0].iov_len;
95 ++n;
98 if (xdrbuf->page_len && (xdrbuf->pages[0] != NULL)) {
99 if (n == nsegs)
100 return 0;
101 seg[n].mr_page = xdrbuf->pages[0];
102 seg[n].mr_offset = (void *)(unsigned long) xdrbuf->page_base;
103 seg[n].mr_len = min_t(u32,
104 PAGE_SIZE - xdrbuf->page_base, xdrbuf->page_len);
105 len = xdrbuf->page_len - seg[n].mr_len;
106 ++n;
107 p = 1;
108 while (len > 0) {
109 if (n == nsegs)
110 return 0;
111 seg[n].mr_page = xdrbuf->pages[p];
112 seg[n].mr_offset = NULL;
113 seg[n].mr_len = min_t(u32, PAGE_SIZE, len);
114 len -= seg[n].mr_len;
115 ++n;
116 ++p;
120 if (xdrbuf->tail[0].iov_len) {
121 /* the rpcrdma protocol allows us to omit any trailing
122 * xdr pad bytes, saving the server an RDMA operation. */
123 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
124 return n;
125 if (n == nsegs)
126 return 0;
127 seg[n].mr_page = NULL;
128 seg[n].mr_offset = xdrbuf->tail[0].iov_base;
129 seg[n].mr_len = xdrbuf->tail[0].iov_len;
130 ++n;
133 return n;
137 * Create read/write chunk lists, and reply chunks, for RDMA
139 * Assume check against THRESHOLD has been done, and chunks are required.
140 * Assume only encoding one list entry for read|write chunks. The NFSv3
141 * protocol is simple enough to allow this as it only has a single "bulk
142 * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
143 * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
145 * When used for a single reply chunk (which is a special write
146 * chunk used for the entire reply, rather than just the data), it
147 * is used primarily for READDIR and READLINK which would otherwise
148 * be severely size-limited by a small rdma inline read max. The server
149 * response will come back as an RDMA Write, followed by a message
150 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
151 * chunks do not provide data alignment, however they do not require
152 * "fixup" (moving the response to the upper layer buffer) either.
154 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
156 * Read chunklist (a linked list):
157 * N elements, position P (same P for all chunks of same arg!):
158 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
160 * Write chunklist (a list of (one) counted array):
161 * N elements:
162 * 1 - N - HLOO - HLOO - ... - HLOO - 0
164 * Reply chunk (a counted array):
165 * N elements:
166 * 1 - N - HLOO - HLOO - ... - HLOO
169 static unsigned int
170 rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
171 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
175 int nsegs, nchunks = 0;
176 unsigned int pos;
177 struct rpcrdma_mr_seg *seg = req->rl_segments;
178 struct rpcrdma_read_chunk *cur_rchunk = NULL;
179 struct rpcrdma_write_array *warray = NULL;
180 struct rpcrdma_write_chunk *cur_wchunk = NULL;
181 __be32 *iptr = headerp->rm_body.rm_chunks;
183 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
184 /* a read chunk - server will RDMA Read our memory */
185 cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
186 } else {
187 /* a write or reply chunk - server will RDMA Write our memory */
188 *iptr++ = xdr_zero; /* encode a NULL read chunk list */
189 if (type == rpcrdma_replych)
190 *iptr++ = xdr_zero; /* a NULL write chunk list */
191 warray = (struct rpcrdma_write_array *) iptr;
192 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
195 if (type == rpcrdma_replych || type == rpcrdma_areadch)
196 pos = 0;
197 else
198 pos = target->head[0].iov_len;
200 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
201 if (nsegs == 0)
202 return 0;
204 do {
205 /* bind/register the memory, then build chunk from result. */
206 int n = rpcrdma_register_external(seg, nsegs,
207 cur_wchunk != NULL, r_xprt);
208 if (n <= 0)
209 goto out;
210 if (cur_rchunk) { /* read */
211 cur_rchunk->rc_discrim = xdr_one;
212 /* all read chunks have the same "position" */
213 cur_rchunk->rc_position = htonl(pos);
214 cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
215 cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
216 xdr_encode_hyper(
217 (__be32 *)&cur_rchunk->rc_target.rs_offset,
218 seg->mr_base);
219 dprintk("RPC: %s: read chunk "
220 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
221 seg->mr_len, (unsigned long long)seg->mr_base,
222 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
223 cur_rchunk++;
224 r_xprt->rx_stats.read_chunk_count++;
225 } else { /* write/reply */
226 cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
227 cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
228 xdr_encode_hyper(
229 (__be32 *)&cur_wchunk->wc_target.rs_offset,
230 seg->mr_base);
231 dprintk("RPC: %s: %s chunk "
232 "elem %d@0x%llx:0x%x (%s)\n", __func__,
233 (type == rpcrdma_replych) ? "reply" : "write",
234 seg->mr_len, (unsigned long long)seg->mr_base,
235 seg->mr_rkey, n < nsegs ? "more" : "last");
236 cur_wchunk++;
237 if (type == rpcrdma_replych)
238 r_xprt->rx_stats.reply_chunk_count++;
239 else
240 r_xprt->rx_stats.write_chunk_count++;
241 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
243 nchunks++;
244 seg += n;
245 nsegs -= n;
246 } while (nsegs);
248 /* success. all failures return above */
249 req->rl_nchunks = nchunks;
251 BUG_ON(nchunks == 0);
252 BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
253 && (nchunks > 3));
256 * finish off header. If write, marshal discrim and nchunks.
258 if (cur_rchunk) {
259 iptr = (__be32 *) cur_rchunk;
260 *iptr++ = xdr_zero; /* finish the read chunk list */
261 *iptr++ = xdr_zero; /* encode a NULL write chunk list */
262 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
263 } else {
264 warray->wc_discrim = xdr_one;
265 warray->wc_nchunks = htonl(nchunks);
266 iptr = (__be32 *) cur_wchunk;
267 if (type == rpcrdma_writech) {
268 *iptr++ = xdr_zero; /* finish the write chunk list */
269 *iptr++ = xdr_zero; /* encode a NULL reply chunk */
274 * Return header size.
276 return (unsigned char *)iptr - (unsigned char *)headerp;
278 out:
279 for (pos = 0; nchunks--;)
280 pos += rpcrdma_deregister_external(
281 &req->rl_segments[pos], r_xprt, NULL);
282 return 0;
286 * Copy write data inline.
287 * This function is used for "small" requests. Data which is passed
288 * to RPC via iovecs (or page list) is copied directly into the
289 * pre-registered memory buffer for this request. For small amounts
290 * of data, this is efficient. The cutoff value is tunable.
292 static int
293 rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
295 int i, npages, curlen;
296 int copy_len;
297 unsigned char *srcp, *destp;
298 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
300 destp = rqst->rq_svec[0].iov_base;
301 curlen = rqst->rq_svec[0].iov_len;
302 destp += curlen;
304 * Do optional padding where it makes sense. Alignment of write
305 * payload can help the server, if our setting is accurate.
307 pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
308 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
309 pad = 0; /* don't pad this request */
311 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
312 __func__, pad, destp, rqst->rq_slen, curlen);
314 copy_len = rqst->rq_snd_buf.page_len;
316 if (rqst->rq_snd_buf.tail[0].iov_len) {
317 curlen = rqst->rq_snd_buf.tail[0].iov_len;
318 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
319 memmove(destp + copy_len,
320 rqst->rq_snd_buf.tail[0].iov_base, curlen);
321 r_xprt->rx_stats.pullup_copy_count += curlen;
323 dprintk("RPC: %s: tail destp 0x%p len %d\n",
324 __func__, destp + copy_len, curlen);
325 rqst->rq_svec[0].iov_len += curlen;
328 r_xprt->rx_stats.pullup_copy_count += copy_len;
329 npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
330 for (i = 0; copy_len && i < npages; i++) {
331 if (i == 0)
332 curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base;
333 else
334 curlen = PAGE_SIZE;
335 if (curlen > copy_len)
336 curlen = copy_len;
337 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
338 __func__, i, destp, copy_len, curlen);
339 srcp = kmap_atomic(rqst->rq_snd_buf.pages[i],
340 KM_SKB_SUNRPC_DATA);
341 if (i == 0)
342 memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen);
343 else
344 memcpy(destp, srcp, curlen);
345 kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
346 rqst->rq_svec[0].iov_len += curlen;
347 destp += curlen;
348 copy_len -= curlen;
350 /* header now contains entire send message */
351 return pad;
355 * Marshal a request: the primary job of this routine is to choose
356 * the transfer modes. See comments below.
358 * Uses multiple RDMA IOVs for a request:
359 * [0] -- RPC RDMA header, which uses memory from the *start* of the
360 * preregistered buffer that already holds the RPC data in
361 * its middle.
362 * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
363 * [2] -- optional padding.
364 * [3] -- if padded, header only in [1] and data here.
368 rpcrdma_marshal_req(struct rpc_rqst *rqst)
370 struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
371 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
372 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
373 char *base;
374 size_t hdrlen, rpclen, padlen;
375 enum rpcrdma_chunktype rtype, wtype;
376 struct rpcrdma_msg *headerp;
379 * rpclen gets amount of data in first buffer, which is the
380 * pre-registered buffer.
382 base = rqst->rq_svec[0].iov_base;
383 rpclen = rqst->rq_svec[0].iov_len;
385 /* build RDMA header in private area at front */
386 headerp = (struct rpcrdma_msg *) req->rl_base;
387 /* don't htonl XID, it's already done in request */
388 headerp->rm_xid = rqst->rq_xid;
389 headerp->rm_vers = xdr_one;
390 headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
391 headerp->rm_type = htonl(RDMA_MSG);
394 * Chunks needed for results?
396 * o If the expected result is under the inline threshold, all ops
397 * return as inline (but see later).
398 * o Large non-read ops return as a single reply chunk.
399 * o Large read ops return data as write chunk(s), header as inline.
401 * Note: the NFS code sending down multiple result segments implies
402 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
406 * This code can handle read chunks, write chunks OR reply
407 * chunks -- only one type. If the request is too big to fit
408 * inline, then we will choose read chunks. If the request is
409 * a READ, then use write chunks to separate the file data
410 * into pages; otherwise use reply chunks.
412 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
413 wtype = rpcrdma_noch;
414 else if (rqst->rq_rcv_buf.page_len == 0)
415 wtype = rpcrdma_replych;
416 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
417 wtype = rpcrdma_writech;
418 else
419 wtype = rpcrdma_replych;
422 * Chunks needed for arguments?
424 * o If the total request is under the inline threshold, all ops
425 * are sent as inline.
426 * o Large non-write ops are sent with the entire message as a
427 * single read chunk (protocol 0-position special case).
428 * o Large write ops transmit data as read chunk(s), header as
429 * inline.
431 * Note: the NFS code sending down multiple argument segments
432 * implies the op is a write.
433 * TBD check NFSv4 setacl
435 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
436 rtype = rpcrdma_noch;
437 else if (rqst->rq_snd_buf.page_len == 0)
438 rtype = rpcrdma_areadch;
439 else
440 rtype = rpcrdma_readch;
442 /* The following simplification is not true forever */
443 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
444 wtype = rpcrdma_noch;
445 BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch);
447 if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS &&
448 (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) {
449 /* forced to "pure inline"? */
450 dprintk("RPC: %s: too much data (%d/%d) for inline\n",
451 __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
452 return -1;
455 hdrlen = 28; /*sizeof *headerp;*/
456 padlen = 0;
459 * Pull up any extra send data into the preregistered buffer.
460 * When padding is in use and applies to the transfer, insert
461 * it and change the message type.
463 if (rtype == rpcrdma_noch) {
465 padlen = rpcrdma_inline_pullup(rqst,
466 RPCRDMA_INLINE_PAD_VALUE(rqst));
468 if (padlen) {
469 headerp->rm_type = htonl(RDMA_MSGP);
470 headerp->rm_body.rm_padded.rm_align =
471 htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
472 headerp->rm_body.rm_padded.rm_thresh =
473 htonl(RPCRDMA_INLINE_PAD_THRESH);
474 headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
475 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
476 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
477 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
478 BUG_ON(wtype != rpcrdma_noch);
480 } else {
481 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
482 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
483 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
484 /* new length after pullup */
485 rpclen = rqst->rq_svec[0].iov_len;
487 * Currently we try to not actually use read inline.
488 * Reply chunks have the desirable property that
489 * they land, packed, directly in the target buffers
490 * without headers, so they require no fixup. The
491 * additional RDMA Write op sends the same amount
492 * of data, streams on-the-wire and adds no overhead
493 * on receive. Therefore, we request a reply chunk
494 * for non-writes wherever feasible and efficient.
496 if (wtype == rpcrdma_noch &&
497 r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER)
498 wtype = rpcrdma_replych;
503 * Marshal chunks. This routine will return the header length
504 * consumed by marshaling.
506 if (rtype != rpcrdma_noch) {
507 hdrlen = rpcrdma_create_chunks(rqst,
508 &rqst->rq_snd_buf, headerp, rtype);
509 wtype = rtype; /* simplify dprintk */
511 } else if (wtype != rpcrdma_noch) {
512 hdrlen = rpcrdma_create_chunks(rqst,
513 &rqst->rq_rcv_buf, headerp, wtype);
516 if (hdrlen == 0)
517 return -1;
519 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
520 " headerp 0x%p base 0x%p lkey 0x%x\n",
521 __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
522 headerp, base, req->rl_iov.lkey);
525 * initialize send_iov's - normally only two: rdma chunk header and
526 * single preregistered RPC header buffer, but if padding is present,
527 * then use a preregistered (and zeroed) pad buffer between the RPC
528 * header and any write data. In all non-rdma cases, any following
529 * data has been copied into the RPC header buffer.
531 req->rl_send_iov[0].addr = req->rl_iov.addr;
532 req->rl_send_iov[0].length = hdrlen;
533 req->rl_send_iov[0].lkey = req->rl_iov.lkey;
535 req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
536 req->rl_send_iov[1].length = rpclen;
537 req->rl_send_iov[1].lkey = req->rl_iov.lkey;
539 req->rl_niovs = 2;
541 if (padlen) {
542 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
544 req->rl_send_iov[2].addr = ep->rep_pad.addr;
545 req->rl_send_iov[2].length = padlen;
546 req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
548 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
549 req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
550 req->rl_send_iov[3].lkey = req->rl_iov.lkey;
552 req->rl_niovs = 4;
555 return 0;
559 * Chase down a received write or reply chunklist to get length
560 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
562 static int
563 rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
565 unsigned int i, total_len;
566 struct rpcrdma_write_chunk *cur_wchunk;
568 i = ntohl(**iptrp); /* get array count */
569 if (i > max)
570 return -1;
571 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
572 total_len = 0;
573 while (i--) {
574 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
575 ifdebug(FACILITY) {
576 u64 off;
577 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
578 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
579 __func__,
580 ntohl(seg->rs_length),
581 (unsigned long long)off,
582 ntohl(seg->rs_handle));
584 total_len += ntohl(seg->rs_length);
585 ++cur_wchunk;
587 /* check and adjust for properly terminated write chunk */
588 if (wrchunk) {
589 __be32 *w = (__be32 *) cur_wchunk;
590 if (*w++ != xdr_zero)
591 return -1;
592 cur_wchunk = (struct rpcrdma_write_chunk *) w;
594 if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
595 return -1;
597 *iptrp = (__be32 *) cur_wchunk;
598 return total_len;
602 * Scatter inline received data back into provided iov's.
604 static void
605 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
607 int i, npages, curlen, olen;
608 char *destp;
610 curlen = rqst->rq_rcv_buf.head[0].iov_len;
611 if (curlen > copy_len) { /* write chunk header fixup */
612 curlen = copy_len;
613 rqst->rq_rcv_buf.head[0].iov_len = curlen;
616 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
617 __func__, srcp, copy_len, curlen);
619 /* Shift pointer for first receive segment only */
620 rqst->rq_rcv_buf.head[0].iov_base = srcp;
621 srcp += curlen;
622 copy_len -= curlen;
624 olen = copy_len;
625 i = 0;
626 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
627 if (copy_len && rqst->rq_rcv_buf.page_len) {
628 npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base +
629 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
630 for (; i < npages; i++) {
631 if (i == 0)
632 curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base;
633 else
634 curlen = PAGE_SIZE;
635 if (curlen > copy_len)
636 curlen = copy_len;
637 dprintk("RPC: %s: page %d"
638 " srcp 0x%p len %d curlen %d\n",
639 __func__, i, srcp, copy_len, curlen);
640 destp = kmap_atomic(rqst->rq_rcv_buf.pages[i],
641 KM_SKB_SUNRPC_DATA);
642 if (i == 0)
643 memcpy(destp + rqst->rq_rcv_buf.page_base,
644 srcp, curlen);
645 else
646 memcpy(destp, srcp, curlen);
647 flush_dcache_page(rqst->rq_rcv_buf.pages[i]);
648 kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
649 srcp += curlen;
650 copy_len -= curlen;
651 if (copy_len == 0)
652 break;
654 rqst->rq_rcv_buf.page_len = olen - copy_len;
655 } else
656 rqst->rq_rcv_buf.page_len = 0;
658 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
659 curlen = copy_len;
660 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
661 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
662 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
663 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
664 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
665 __func__, srcp, copy_len, curlen);
666 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
667 copy_len -= curlen; ++i;
668 } else
669 rqst->rq_rcv_buf.tail[0].iov_len = 0;
671 if (pad) {
672 /* implicit padding on terminal chunk */
673 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
674 while (pad--)
675 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
678 if (copy_len)
679 dprintk("RPC: %s: %d bytes in"
680 " %d extra segments (%d lost)\n",
681 __func__, olen, i, copy_len);
683 /* TBD avoid a warning from call_decode() */
684 rqst->rq_private_buf = rqst->rq_rcv_buf;
688 * This function is called when an async event is posted to
689 * the connection which changes the connection state. All it
690 * does at this point is mark the connection up/down, the rpc
691 * timers do the rest.
693 void
694 rpcrdma_conn_func(struct rpcrdma_ep *ep)
696 struct rpc_xprt *xprt = ep->rep_xprt;
698 spin_lock_bh(&xprt->transport_lock);
699 if (++xprt->connect_cookie == 0) /* maintain a reserved value */
700 ++xprt->connect_cookie;
701 if (ep->rep_connected > 0) {
702 if (!xprt_test_and_set_connected(xprt))
703 xprt_wake_pending_tasks(xprt, 0);
704 } else {
705 if (xprt_test_and_clear_connected(xprt))
706 xprt_wake_pending_tasks(xprt, -ENOTCONN);
708 spin_unlock_bh(&xprt->transport_lock);
712 * This function is called when memory window unbind which we are waiting
713 * for completes. Just use rr_func (zeroed by upcall) to signal completion.
715 static void
716 rpcrdma_unbind_func(struct rpcrdma_rep *rep)
718 wake_up(&rep->rr_unbind);
722 * Called as a tasklet to do req/reply match and complete a request
723 * Errors must result in the RPC task either being awakened, or
724 * allowed to timeout, to discover the errors at that time.
726 void
727 rpcrdma_reply_handler(struct rpcrdma_rep *rep)
729 struct rpcrdma_msg *headerp;
730 struct rpcrdma_req *req;
731 struct rpc_rqst *rqst;
732 struct rpc_xprt *xprt = rep->rr_xprt;
733 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
734 __be32 *iptr;
735 int i, rdmalen, status;
737 /* Check status. If bad, signal disconnect and return rep to pool */
738 if (rep->rr_len == ~0U) {
739 rpcrdma_recv_buffer_put(rep);
740 if (r_xprt->rx_ep.rep_connected == 1) {
741 r_xprt->rx_ep.rep_connected = -EIO;
742 rpcrdma_conn_func(&r_xprt->rx_ep);
744 return;
746 if (rep->rr_len < 28) {
747 dprintk("RPC: %s: short/invalid reply\n", __func__);
748 goto repost;
750 headerp = (struct rpcrdma_msg *) rep->rr_base;
751 if (headerp->rm_vers != xdr_one) {
752 dprintk("RPC: %s: invalid version %d\n",
753 __func__, ntohl(headerp->rm_vers));
754 goto repost;
757 /* Get XID and try for a match. */
758 spin_lock(&xprt->transport_lock);
759 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
760 if (rqst == NULL) {
761 spin_unlock(&xprt->transport_lock);
762 dprintk("RPC: %s: reply 0x%p failed "
763 "to match any request xid 0x%08x len %d\n",
764 __func__, rep, headerp->rm_xid, rep->rr_len);
765 repost:
766 r_xprt->rx_stats.bad_reply_count++;
767 rep->rr_func = rpcrdma_reply_handler;
768 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
769 rpcrdma_recv_buffer_put(rep);
771 return;
774 /* get request object */
775 req = rpcr_to_rdmar(rqst);
777 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
778 " RPC request 0x%p xid 0x%08x\n",
779 __func__, rep, req, rqst, headerp->rm_xid);
781 BUG_ON(!req || req->rl_reply);
783 /* from here on, the reply is no longer an orphan */
784 req->rl_reply = rep;
786 /* check for expected message types */
787 /* The order of some of these tests is important. */
788 switch (headerp->rm_type) {
789 case htonl(RDMA_MSG):
790 /* never expect read chunks */
791 /* never expect reply chunks (two ways to check) */
792 /* never expect write chunks without having offered RDMA */
793 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
794 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
795 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
796 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
797 req->rl_nchunks == 0))
798 goto badheader;
799 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
800 /* count any expected write chunks in read reply */
801 /* start at write chunk array count */
802 iptr = &headerp->rm_body.rm_chunks[2];
803 rdmalen = rpcrdma_count_chunks(rep,
804 req->rl_nchunks, 1, &iptr);
805 /* check for validity, and no reply chunk after */
806 if (rdmalen < 0 || *iptr++ != xdr_zero)
807 goto badheader;
808 rep->rr_len -=
809 ((unsigned char *)iptr - (unsigned char *)headerp);
810 status = rep->rr_len + rdmalen;
811 r_xprt->rx_stats.total_rdma_reply += rdmalen;
812 /* special case - last chunk may omit padding */
813 if (rdmalen &= 3) {
814 rdmalen = 4 - rdmalen;
815 status += rdmalen;
817 } else {
818 /* else ordinary inline */
819 rdmalen = 0;
820 iptr = (__be32 *)((unsigned char *)headerp + 28);
821 rep->rr_len -= 28; /*sizeof *headerp;*/
822 status = rep->rr_len;
824 /* Fix up the rpc results for upper layer */
825 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
826 break;
828 case htonl(RDMA_NOMSG):
829 /* never expect read or write chunks, always reply chunks */
830 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
831 headerp->rm_body.rm_chunks[1] != xdr_zero ||
832 headerp->rm_body.rm_chunks[2] != xdr_one ||
833 req->rl_nchunks == 0)
834 goto badheader;
835 iptr = (__be32 *)((unsigned char *)headerp + 28);
836 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
837 if (rdmalen < 0)
838 goto badheader;
839 r_xprt->rx_stats.total_rdma_reply += rdmalen;
840 /* Reply chunk buffer already is the reply vector - no fixup. */
841 status = rdmalen;
842 break;
844 badheader:
845 default:
846 dprintk("%s: invalid rpcrdma reply header (type %d):"
847 " chunks[012] == %d %d %d"
848 " expected chunks <= %d\n",
849 __func__, ntohl(headerp->rm_type),
850 headerp->rm_body.rm_chunks[0],
851 headerp->rm_body.rm_chunks[1],
852 headerp->rm_body.rm_chunks[2],
853 req->rl_nchunks);
854 status = -EIO;
855 r_xprt->rx_stats.bad_reply_count++;
856 break;
859 /* If using mw bind, start the deregister process now. */
860 /* (Note: if mr_free(), cannot perform it here, in tasklet context) */
861 if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) {
862 case RPCRDMA_MEMWINDOWS:
863 for (i = 0; req->rl_nchunks-- > 1;)
864 i += rpcrdma_deregister_external(
865 &req->rl_segments[i], r_xprt, NULL);
866 /* Optionally wait (not here) for unbinds to complete */
867 rep->rr_func = rpcrdma_unbind_func;
868 (void) rpcrdma_deregister_external(&req->rl_segments[i],
869 r_xprt, rep);
870 break;
871 case RPCRDMA_MEMWINDOWS_ASYNC:
872 for (i = 0; req->rl_nchunks--;)
873 i += rpcrdma_deregister_external(&req->rl_segments[i],
874 r_xprt, NULL);
875 break;
876 default:
877 break;
880 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
881 __func__, xprt, rqst, status);
882 xprt_complete_rqst(rqst->rq_task, status);
883 spin_unlock(&xprt->transport_lock);