GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / sunrpc / xdr.c
blob330463d5c7664b3ae464a80d1a278c8e791258d0
1 /*
2 * linux/net/sunrpc/xdr.c
4 * Generic XDR support.
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pagemap.h>
15 #include <linux/errno.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
20 * XDR functions for basic NFS types
22 __be32 *
23 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
25 unsigned int quadlen = XDR_QUADLEN(obj->len);
27 p[quadlen] = 0; /* zero trailing bytes */
28 *p++ = cpu_to_be32(obj->len);
29 memcpy(p, obj->data, obj->len);
30 return p + XDR_QUADLEN(obj->len);
32 EXPORT_SYMBOL_GPL(xdr_encode_netobj);
34 __be32 *
35 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
37 unsigned int len;
39 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
40 return NULL;
41 obj->len = len;
42 obj->data = (u8 *) p;
43 return p + XDR_QUADLEN(len);
45 EXPORT_SYMBOL_GPL(xdr_decode_netobj);
47 /**
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
58 * Returns the updated current XDR buffer position
61 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
63 if (likely(nbytes != 0)) {
64 unsigned int quadlen = XDR_QUADLEN(nbytes);
65 unsigned int padding = (quadlen << 2) - nbytes;
67 if (ptr != NULL)
68 memcpy(p, ptr, nbytes);
69 if (padding != 0)
70 memset((char *)p + nbytes, 0, padding);
71 p += quadlen;
73 return p;
75 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
77 /**
78 * xdr_encode_opaque - Encode variable length opaque data
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
83 * Returns the updated current XDR buffer position
85 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
87 *p++ = cpu_to_be32(nbytes);
88 return xdr_encode_opaque_fixed(p, ptr, nbytes);
90 EXPORT_SYMBOL_GPL(xdr_encode_opaque);
92 __be32 *
93 xdr_encode_string(__be32 *p, const char *string)
95 return xdr_encode_array(p, string, strlen(string));
97 EXPORT_SYMBOL_GPL(xdr_encode_string);
99 __be32 *
100 xdr_decode_string_inplace(__be32 *p, char **sp,
101 unsigned int *lenp, unsigned int maxlen)
103 u32 len;
105 len = be32_to_cpu(*p++);
106 if (len > maxlen)
107 return NULL;
108 *lenp = len;
109 *sp = (char *) p;
110 return p + XDR_QUADLEN(len);
112 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
114 void
115 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
116 unsigned int len)
118 struct kvec *tail = xdr->tail;
119 u32 *p;
121 xdr->pages = pages;
122 xdr->page_base = base;
123 xdr->page_len = len;
125 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
126 tail->iov_base = p;
127 tail->iov_len = 0;
129 if (len & 3) {
130 unsigned int pad = 4 - (len & 3);
132 *p = 0;
133 tail->iov_base = (char *)p + (len & 3);
134 tail->iov_len = pad;
135 len += pad;
137 xdr->buflen += len;
138 xdr->len += len;
140 EXPORT_SYMBOL_GPL(xdr_encode_pages);
142 void
143 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
144 struct page **pages, unsigned int base, unsigned int len)
146 struct kvec *head = xdr->head;
147 struct kvec *tail = xdr->tail;
148 char *buf = (char *)head->iov_base;
149 unsigned int buflen = head->iov_len;
151 head->iov_len = offset;
153 xdr->pages = pages;
154 xdr->page_base = base;
155 xdr->page_len = len;
157 tail->iov_base = buf + offset;
158 tail->iov_len = buflen - offset;
160 xdr->buflen += len;
162 EXPORT_SYMBOL_GPL(xdr_inline_pages);
165 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
167 * _shift_data_right_pages
168 * @pages: vector of pages containing both the source and dest memory area.
169 * @pgto_base: page vector address of destination
170 * @pgfrom_base: page vector address of source
171 * @len: number of bytes to copy
173 * Note: the addresses pgto_base and pgfrom_base are both calculated in
174 * the same way:
175 * if a memory area starts at byte 'base' in page 'pages[i]',
176 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
177 * Also note: pgfrom_base must be < pgto_base, but the memory areas
178 * they point to may overlap.
180 static void
181 _shift_data_right_pages(struct page **pages, size_t pgto_base,
182 size_t pgfrom_base, size_t len)
184 struct page **pgfrom, **pgto;
185 char *vfrom, *vto;
186 size_t copy;
188 BUG_ON(pgto_base <= pgfrom_base);
190 pgto_base += len;
191 pgfrom_base += len;
193 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
194 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
196 pgto_base &= ~PAGE_CACHE_MASK;
197 pgfrom_base &= ~PAGE_CACHE_MASK;
199 do {
200 /* Are any pointers crossing a page boundary? */
201 if (pgto_base == 0) {
202 pgto_base = PAGE_CACHE_SIZE;
203 pgto--;
205 if (pgfrom_base == 0) {
206 pgfrom_base = PAGE_CACHE_SIZE;
207 pgfrom--;
210 copy = len;
211 if (copy > pgto_base)
212 copy = pgto_base;
213 if (copy > pgfrom_base)
214 copy = pgfrom_base;
215 pgto_base -= copy;
216 pgfrom_base -= copy;
218 vto = kmap_atomic(*pgto, KM_USER0);
219 vfrom = kmap_atomic(*pgfrom, KM_USER1);
220 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
221 flush_dcache_page(*pgto);
222 kunmap_atomic(vfrom, KM_USER1);
223 kunmap_atomic(vto, KM_USER0);
225 } while ((len -= copy) != 0);
229 * _copy_to_pages
230 * @pages: array of pages
231 * @pgbase: page vector address of destination
232 * @p: pointer to source data
233 * @len: length
235 * Copies data from an arbitrary memory location into an array of pages
236 * The copy is assumed to be non-overlapping.
238 static void
239 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
241 struct page **pgto;
242 char *vto;
243 size_t copy;
245 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
246 pgbase &= ~PAGE_CACHE_MASK;
248 for (;;) {
249 copy = PAGE_CACHE_SIZE - pgbase;
250 if (copy > len)
251 copy = len;
253 vto = kmap_atomic(*pgto, KM_USER0);
254 memcpy(vto + pgbase, p, copy);
255 kunmap_atomic(vto, KM_USER0);
257 len -= copy;
258 if (len == 0)
259 break;
261 pgbase += copy;
262 if (pgbase == PAGE_CACHE_SIZE) {
263 flush_dcache_page(*pgto);
264 pgbase = 0;
265 pgto++;
267 p += copy;
269 flush_dcache_page(*pgto);
273 * _copy_from_pages
274 * @p: pointer to destination
275 * @pages: array of pages
276 * @pgbase: offset of source data
277 * @len: length
279 * Copies data into an arbitrary memory location from an array of pages
280 * The copy is assumed to be non-overlapping.
282 static void
283 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
285 struct page **pgfrom;
286 char *vfrom;
287 size_t copy;
289 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
290 pgbase &= ~PAGE_CACHE_MASK;
292 do {
293 copy = PAGE_CACHE_SIZE - pgbase;
294 if (copy > len)
295 copy = len;
297 vfrom = kmap_atomic(*pgfrom, KM_USER0);
298 memcpy(p, vfrom + pgbase, copy);
299 kunmap_atomic(vfrom, KM_USER0);
301 pgbase += copy;
302 if (pgbase == PAGE_CACHE_SIZE) {
303 pgbase = 0;
304 pgfrom++;
306 p += copy;
308 } while ((len -= copy) != 0);
312 * xdr_shrink_bufhead
313 * @buf: xdr_buf
314 * @len: bytes to remove from buf->head[0]
316 * Shrinks XDR buffer's header kvec buf->head[0] by
317 * 'len' bytes. The extra data is not lost, but is instead
318 * moved into the inlined pages and/or the tail.
320 static void
321 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
323 struct kvec *head, *tail;
324 size_t copy, offs;
325 unsigned int pglen = buf->page_len;
327 tail = buf->tail;
328 head = buf->head;
329 BUG_ON (len > head->iov_len);
331 /* Shift the tail first */
332 if (tail->iov_len != 0) {
333 if (tail->iov_len > len) {
334 copy = tail->iov_len - len;
335 memmove((char *)tail->iov_base + len,
336 tail->iov_base, copy);
338 /* Copy from the inlined pages into the tail */
339 copy = len;
340 if (copy > pglen)
341 copy = pglen;
342 offs = len - copy;
343 if (offs >= tail->iov_len)
344 copy = 0;
345 else if (copy > tail->iov_len - offs)
346 copy = tail->iov_len - offs;
347 if (copy != 0)
348 _copy_from_pages((char *)tail->iov_base + offs,
349 buf->pages,
350 buf->page_base + pglen + offs - len,
351 copy);
352 /* Do we also need to copy data from the head into the tail ? */
353 if (len > pglen) {
354 offs = copy = len - pglen;
355 if (copy > tail->iov_len)
356 copy = tail->iov_len;
357 memcpy(tail->iov_base,
358 (char *)head->iov_base +
359 head->iov_len - offs,
360 copy);
363 /* Now handle pages */
364 if (pglen != 0) {
365 if (pglen > len)
366 _shift_data_right_pages(buf->pages,
367 buf->page_base + len,
368 buf->page_base,
369 pglen - len);
370 copy = len;
371 if (len > pglen)
372 copy = pglen;
373 _copy_to_pages(buf->pages, buf->page_base,
374 (char *)head->iov_base + head->iov_len - len,
375 copy);
377 head->iov_len -= len;
378 buf->buflen -= len;
379 /* Have we truncated the message? */
380 if (buf->len > buf->buflen)
381 buf->len = buf->buflen;
385 * xdr_shrink_pagelen
386 * @buf: xdr_buf
387 * @len: bytes to remove from buf->pages
389 * Shrinks XDR buffer's page array buf->pages by
390 * 'len' bytes. The extra data is not lost, but is instead
391 * moved into the tail.
393 static void
394 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
396 struct kvec *tail;
397 size_t copy;
398 char *p;
399 unsigned int pglen = buf->page_len;
401 tail = buf->tail;
402 BUG_ON (len > pglen);
404 /* Shift the tail first */
405 if (tail->iov_len != 0) {
406 p = (char *)tail->iov_base + len;
407 if (tail->iov_len > len) {
408 copy = tail->iov_len - len;
409 memmove(p, tail->iov_base, copy);
410 } else
411 buf->buflen -= len;
412 /* Copy from the inlined pages into the tail */
413 copy = len;
414 if (copy > tail->iov_len)
415 copy = tail->iov_len;
416 _copy_from_pages((char *)tail->iov_base,
417 buf->pages, buf->page_base + pglen - len,
418 copy);
420 buf->page_len -= len;
421 buf->buflen -= len;
422 /* Have we truncated the message? */
423 if (buf->len > buf->buflen)
424 buf->len = buf->buflen;
427 void
428 xdr_shift_buf(struct xdr_buf *buf, size_t len)
430 xdr_shrink_bufhead(buf, len);
432 EXPORT_SYMBOL_GPL(xdr_shift_buf);
435 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
436 * @xdr: pointer to xdr_stream struct
437 * @buf: pointer to XDR buffer in which to encode data
438 * @p: current pointer inside XDR buffer
440 * Note: at the moment the RPC client only passes the length of our
441 * scratch buffer in the xdr_buf's header kvec. Previously this
442 * meant we needed to call xdr_adjust_iovec() after encoding the
443 * data. With the new scheme, the xdr_stream manages the details
444 * of the buffer length, and takes care of adjusting the kvec
445 * length for us.
447 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
449 struct kvec *iov = buf->head;
450 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
452 BUG_ON(scratch_len < 0);
453 xdr->buf = buf;
454 xdr->iov = iov;
455 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
456 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
457 BUG_ON(iov->iov_len > scratch_len);
459 if (p != xdr->p && p != NULL) {
460 size_t len;
462 BUG_ON(p < xdr->p || p > xdr->end);
463 len = (char *)p - (char *)xdr->p;
464 xdr->p = p;
465 buf->len += len;
466 iov->iov_len += len;
469 EXPORT_SYMBOL_GPL(xdr_init_encode);
472 * xdr_reserve_space - Reserve buffer space for sending
473 * @xdr: pointer to xdr_stream
474 * @nbytes: number of bytes to reserve
476 * Checks that we have enough buffer space to encode 'nbytes' more
477 * bytes of data. If so, update the total xdr_buf length, and
478 * adjust the length of the current kvec.
480 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
482 __be32 *p = xdr->p;
483 __be32 *q;
485 /* align nbytes on the next 32-bit boundary */
486 nbytes += 3;
487 nbytes &= ~3;
488 q = p + (nbytes >> 2);
489 if (unlikely(q > xdr->end || q < p))
490 return NULL;
491 xdr->p = q;
492 xdr->iov->iov_len += nbytes;
493 xdr->buf->len += nbytes;
494 return p;
496 EXPORT_SYMBOL_GPL(xdr_reserve_space);
499 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
500 * @xdr: pointer to xdr_stream
501 * @pages: list of pages
502 * @base: offset of first byte
503 * @len: length of data in bytes
506 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
507 unsigned int len)
509 struct xdr_buf *buf = xdr->buf;
510 struct kvec *iov = buf->tail;
511 buf->pages = pages;
512 buf->page_base = base;
513 buf->page_len = len;
515 iov->iov_base = (char *)xdr->p;
516 iov->iov_len = 0;
517 xdr->iov = iov;
519 if (len & 3) {
520 unsigned int pad = 4 - (len & 3);
522 BUG_ON(xdr->p >= xdr->end);
523 iov->iov_base = (char *)xdr->p + (len & 3);
524 iov->iov_len += pad;
525 len += pad;
526 *xdr->p++ = 0;
528 buf->buflen += len;
529 buf->len += len;
531 EXPORT_SYMBOL_GPL(xdr_write_pages);
534 * xdr_init_decode - Initialize an xdr_stream for decoding data.
535 * @xdr: pointer to xdr_stream struct
536 * @buf: pointer to XDR buffer from which to decode data
537 * @p: current pointer inside XDR buffer
539 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
541 struct kvec *iov = buf->head;
542 unsigned int len = iov->iov_len;
544 if (len > buf->len)
545 len = buf->len;
546 xdr->buf = buf;
547 xdr->iov = iov;
548 xdr->p = p;
549 xdr->end = (__be32 *)((char *)iov->iov_base + len);
551 EXPORT_SYMBOL_GPL(xdr_init_decode);
554 * xdr_inline_decode - Retrieve non-page XDR data to decode
555 * @xdr: pointer to xdr_stream struct
556 * @nbytes: number of bytes of data to decode
558 * Check if the input buffer is long enough to enable us to decode
559 * 'nbytes' more bytes of data starting at the current position.
560 * If so return the current pointer, then update the current
561 * pointer position.
563 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
565 __be32 *p = xdr->p;
566 __be32 *q = p + XDR_QUADLEN(nbytes);
568 if (unlikely(q > xdr->end || q < p))
569 return NULL;
570 xdr->p = q;
571 return p;
573 EXPORT_SYMBOL_GPL(xdr_inline_decode);
576 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
577 * @xdr: pointer to xdr_stream struct
578 * @len: number of bytes of page data
580 * Moves data beyond the current pointer position from the XDR head[] buffer
581 * into the page list. Any data that lies beyond current position + "len"
582 * bytes is moved into the XDR tail[].
584 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
586 struct xdr_buf *buf = xdr->buf;
587 struct kvec *iov;
588 ssize_t shift;
589 unsigned int end;
590 int padding;
592 /* Realign pages to current pointer position */
593 iov = buf->head;
594 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
595 if (shift > 0)
596 xdr_shrink_bufhead(buf, shift);
598 /* Truncate page data and move it into the tail */
599 if (buf->page_len > len)
600 xdr_shrink_pagelen(buf, buf->page_len - len);
601 padding = (XDR_QUADLEN(len) << 2) - len;
602 xdr->iov = iov = buf->tail;
603 /* Compute remaining message length. */
604 end = iov->iov_len;
605 shift = buf->buflen - buf->len;
606 if (shift < end)
607 end -= shift;
608 else if (shift > 0)
609 end = 0;
611 * Position current pointer at beginning of tail, and
612 * set remaining message length.
614 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
615 xdr->end = (__be32 *)((char *)iov->iov_base + end);
617 EXPORT_SYMBOL_GPL(xdr_read_pages);
620 * xdr_enter_page - decode data from the XDR page
621 * @xdr: pointer to xdr_stream struct
622 * @len: number of bytes of page data
624 * Moves data beyond the current pointer position from the XDR head[] buffer
625 * into the page list. Any data that lies beyond current position + "len"
626 * bytes is moved into the XDR tail[]. The current pointer is then
627 * repositioned at the beginning of the first XDR page.
629 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
631 char * kaddr = page_address(xdr->buf->pages[0]);
632 xdr_read_pages(xdr, len);
634 * Position current pointer at beginning of tail, and
635 * set remaining message length.
637 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
638 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
639 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
640 xdr->end = (__be32 *)((char *)xdr->p + len);
642 EXPORT_SYMBOL_GPL(xdr_enter_page);
644 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
646 void
647 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
649 buf->head[0] = *iov;
650 buf->tail[0] = empty_iov;
651 buf->page_len = 0;
652 buf->buflen = buf->len = iov->iov_len;
654 EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
656 /* Sets subbuf to the portion of buf of length len beginning base bytes
657 * from the start of buf. Returns -1 if base of length are out of bounds. */
659 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
660 unsigned int base, unsigned int len)
662 subbuf->buflen = subbuf->len = len;
663 if (base < buf->head[0].iov_len) {
664 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
665 subbuf->head[0].iov_len = min_t(unsigned int, len,
666 buf->head[0].iov_len - base);
667 len -= subbuf->head[0].iov_len;
668 base = 0;
669 } else {
670 subbuf->head[0].iov_base = NULL;
671 subbuf->head[0].iov_len = 0;
672 base -= buf->head[0].iov_len;
675 if (base < buf->page_len) {
676 subbuf->page_len = min(buf->page_len - base, len);
677 base += buf->page_base;
678 subbuf->page_base = base & ~PAGE_CACHE_MASK;
679 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
680 len -= subbuf->page_len;
681 base = 0;
682 } else {
683 base -= buf->page_len;
684 subbuf->page_len = 0;
687 if (base < buf->tail[0].iov_len) {
688 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
689 subbuf->tail[0].iov_len = min_t(unsigned int, len,
690 buf->tail[0].iov_len - base);
691 len -= subbuf->tail[0].iov_len;
692 base = 0;
693 } else {
694 subbuf->tail[0].iov_base = NULL;
695 subbuf->tail[0].iov_len = 0;
696 base -= buf->tail[0].iov_len;
699 if (base || len)
700 return -1;
701 return 0;
703 EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
705 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
707 unsigned int this_len;
709 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
710 memcpy(obj, subbuf->head[0].iov_base, this_len);
711 len -= this_len;
712 obj += this_len;
713 this_len = min_t(unsigned int, len, subbuf->page_len);
714 if (this_len)
715 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
716 len -= this_len;
717 obj += this_len;
718 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
719 memcpy(obj, subbuf->tail[0].iov_base, this_len);
722 /* obj is assumed to point to allocated memory of size at least len: */
723 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
725 struct xdr_buf subbuf;
726 int status;
728 status = xdr_buf_subsegment(buf, &subbuf, base, len);
729 if (status != 0)
730 return status;
731 __read_bytes_from_xdr_buf(&subbuf, obj, len);
732 return 0;
734 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
736 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
738 unsigned int this_len;
740 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
741 memcpy(subbuf->head[0].iov_base, obj, this_len);
742 len -= this_len;
743 obj += this_len;
744 this_len = min_t(unsigned int, len, subbuf->page_len);
745 if (this_len)
746 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
747 len -= this_len;
748 obj += this_len;
749 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
750 memcpy(subbuf->tail[0].iov_base, obj, this_len);
753 /* obj is assumed to point to allocated memory of size at least len: */
754 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
756 struct xdr_buf subbuf;
757 int status;
759 status = xdr_buf_subsegment(buf, &subbuf, base, len);
760 if (status != 0)
761 return status;
762 __write_bytes_to_xdr_buf(&subbuf, obj, len);
763 return 0;
765 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
768 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
770 __be32 raw;
771 int status;
773 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
774 if (status)
775 return status;
776 *obj = be32_to_cpu(raw);
777 return 0;
779 EXPORT_SYMBOL_GPL(xdr_decode_word);
782 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
784 __be32 raw = cpu_to_be32(obj);
786 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
788 EXPORT_SYMBOL_GPL(xdr_encode_word);
790 /* If the netobj starting offset bytes from the start of xdr_buf is contained
791 * entirely in the head or the tail, set object to point to it; otherwise
792 * try to find space for it at the end of the tail, copy it there, and
793 * set obj to point to it. */
794 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
796 struct xdr_buf subbuf;
798 if (xdr_decode_word(buf, offset, &obj->len))
799 return -EFAULT;
800 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
801 return -EFAULT;
803 /* Is the obj contained entirely in the head? */
804 obj->data = subbuf.head[0].iov_base;
805 if (subbuf.head[0].iov_len == obj->len)
806 return 0;
807 /* ..or is the obj contained entirely in the tail? */
808 obj->data = subbuf.tail[0].iov_base;
809 if (subbuf.tail[0].iov_len == obj->len)
810 return 0;
812 /* use end of tail as storage for obj:
813 * (We don't copy to the beginning because then we'd have
814 * to worry about doing a potentially overlapping copy.
815 * This assumes the object is at most half the length of the
816 * tail.) */
817 if (obj->len > buf->buflen - buf->len)
818 return -ENOMEM;
819 if (buf->tail[0].iov_len != 0)
820 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
821 else
822 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
823 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
824 return 0;
826 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
828 /* Returns 0 on success, or else a negative error code. */
829 static int
830 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
831 struct xdr_array2_desc *desc, int encode)
833 char *elem = NULL, *c;
834 unsigned int copied = 0, todo, avail_here;
835 struct page **ppages = NULL;
836 int err;
838 if (encode) {
839 if (xdr_encode_word(buf, base, desc->array_len) != 0)
840 return -EINVAL;
841 } else {
842 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
843 desc->array_len > desc->array_maxlen ||
844 (unsigned long) base + 4 + desc->array_len *
845 desc->elem_size > buf->len)
846 return -EINVAL;
848 base += 4;
850 if (!desc->xcode)
851 return 0;
853 todo = desc->array_len * desc->elem_size;
855 /* process head */
856 if (todo && base < buf->head->iov_len) {
857 c = buf->head->iov_base + base;
858 avail_here = min_t(unsigned int, todo,
859 buf->head->iov_len - base);
860 todo -= avail_here;
862 while (avail_here >= desc->elem_size) {
863 err = desc->xcode(desc, c);
864 if (err)
865 goto out;
866 c += desc->elem_size;
867 avail_here -= desc->elem_size;
869 if (avail_here) {
870 if (!elem) {
871 elem = kmalloc(desc->elem_size, GFP_KERNEL);
872 err = -ENOMEM;
873 if (!elem)
874 goto out;
876 if (encode) {
877 err = desc->xcode(desc, elem);
878 if (err)
879 goto out;
880 memcpy(c, elem, avail_here);
881 } else
882 memcpy(elem, c, avail_here);
883 copied = avail_here;
885 base = buf->head->iov_len; /* align to start of pages */
888 /* process pages array */
889 base -= buf->head->iov_len;
890 if (todo && base < buf->page_len) {
891 unsigned int avail_page;
893 avail_here = min(todo, buf->page_len - base);
894 todo -= avail_here;
896 base += buf->page_base;
897 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
898 base &= ~PAGE_CACHE_MASK;
899 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
900 avail_here);
901 c = kmap(*ppages) + base;
903 while (avail_here) {
904 avail_here -= avail_page;
905 if (copied || avail_page < desc->elem_size) {
906 unsigned int l = min(avail_page,
907 desc->elem_size - copied);
908 if (!elem) {
909 elem = kmalloc(desc->elem_size,
910 GFP_KERNEL);
911 err = -ENOMEM;
912 if (!elem)
913 goto out;
915 if (encode) {
916 if (!copied) {
917 err = desc->xcode(desc, elem);
918 if (err)
919 goto out;
921 memcpy(c, elem + copied, l);
922 copied += l;
923 if (copied == desc->elem_size)
924 copied = 0;
925 } else {
926 memcpy(elem + copied, c, l);
927 copied += l;
928 if (copied == desc->elem_size) {
929 err = desc->xcode(desc, elem);
930 if (err)
931 goto out;
932 copied = 0;
935 avail_page -= l;
936 c += l;
938 while (avail_page >= desc->elem_size) {
939 err = desc->xcode(desc, c);
940 if (err)
941 goto out;
942 c += desc->elem_size;
943 avail_page -= desc->elem_size;
945 if (avail_page) {
946 unsigned int l = min(avail_page,
947 desc->elem_size - copied);
948 if (!elem) {
949 elem = kmalloc(desc->elem_size,
950 GFP_KERNEL);
951 err = -ENOMEM;
952 if (!elem)
953 goto out;
955 if (encode) {
956 if (!copied) {
957 err = desc->xcode(desc, elem);
958 if (err)
959 goto out;
961 memcpy(c, elem + copied, l);
962 copied += l;
963 if (copied == desc->elem_size)
964 copied = 0;
965 } else {
966 memcpy(elem + copied, c, l);
967 copied += l;
968 if (copied == desc->elem_size) {
969 err = desc->xcode(desc, elem);
970 if (err)
971 goto out;
972 copied = 0;
976 if (avail_here) {
977 kunmap(*ppages);
978 ppages++;
979 c = kmap(*ppages);
982 avail_page = min(avail_here,
983 (unsigned int) PAGE_CACHE_SIZE);
985 base = buf->page_len; /* align to start of tail */
988 /* process tail */
989 base -= buf->page_len;
990 if (todo) {
991 c = buf->tail->iov_base + base;
992 if (copied) {
993 unsigned int l = desc->elem_size - copied;
995 if (encode)
996 memcpy(c, elem + copied, l);
997 else {
998 memcpy(elem + copied, c, l);
999 err = desc->xcode(desc, elem);
1000 if (err)
1001 goto out;
1003 todo -= l;
1004 c += l;
1006 while (todo) {
1007 err = desc->xcode(desc, c);
1008 if (err)
1009 goto out;
1010 c += desc->elem_size;
1011 todo -= desc->elem_size;
1014 err = 0;
1016 out:
1017 kfree(elem);
1018 if (ppages)
1019 kunmap(*ppages);
1020 return err;
1024 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1025 struct xdr_array2_desc *desc)
1027 if (base >= buf->len)
1028 return -EINVAL;
1030 return xdr_xcode_array2(buf, base, desc, 0);
1032 EXPORT_SYMBOL_GPL(xdr_decode_array2);
1035 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1036 struct xdr_array2_desc *desc)
1038 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1039 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1040 return -EINVAL;
1042 return xdr_xcode_array2(buf, base, desc, 1);
1044 EXPORT_SYMBOL_GPL(xdr_encode_array2);
1047 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1048 int (*actor)(struct scatterlist *, void *), void *data)
1050 int i, ret = 0;
1051 unsigned page_len, thislen, page_offset;
1052 struct scatterlist sg[1];
1054 sg_init_table(sg, 1);
1056 if (offset >= buf->head[0].iov_len) {
1057 offset -= buf->head[0].iov_len;
1058 } else {
1059 thislen = buf->head[0].iov_len - offset;
1060 if (thislen > len)
1061 thislen = len;
1062 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1063 ret = actor(sg, data);
1064 if (ret)
1065 goto out;
1066 offset = 0;
1067 len -= thislen;
1069 if (len == 0)
1070 goto out;
1072 if (offset >= buf->page_len) {
1073 offset -= buf->page_len;
1074 } else {
1075 page_len = buf->page_len - offset;
1076 if (page_len > len)
1077 page_len = len;
1078 len -= page_len;
1079 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1080 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1081 thislen = PAGE_CACHE_SIZE - page_offset;
1082 do {
1083 if (thislen > page_len)
1084 thislen = page_len;
1085 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1086 ret = actor(sg, data);
1087 if (ret)
1088 goto out;
1089 page_len -= thislen;
1090 i++;
1091 page_offset = 0;
1092 thislen = PAGE_CACHE_SIZE;
1093 } while (page_len != 0);
1094 offset = 0;
1096 if (len == 0)
1097 goto out;
1098 if (offset < buf->tail[0].iov_len) {
1099 thislen = buf->tail[0].iov_len - offset;
1100 if (thislen > len)
1101 thislen = len;
1102 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1103 ret = actor(sg, data);
1104 len -= thislen;
1106 if (len != 0)
1107 ret = -EINVAL;
1108 out:
1109 return ret;
1111 EXPORT_SYMBOL_GPL(xdr_process_buf);