Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
[linux-2.6/linux-loongson.git] / net / sunrpc / xdr.c
blob54264062ea695d59f85da1d712f53cd0ff45952a
1 /*
2 * linux/net/sunrpc/xdr.c
4 * Generic XDR support.
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
15 #include <linux/sunrpc/xdr.h>
16 #include <linux/sunrpc/msg_prot.h>
19 * XDR functions for basic NFS types
21 __be32 *
22 xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
24 unsigned int quadlen = XDR_QUADLEN(obj->len);
26 p[quadlen] = 0; /* zero trailing bytes */
27 *p++ = htonl(obj->len);
28 memcpy(p, obj->data, obj->len);
29 return p + XDR_QUADLEN(obj->len);
31 EXPORT_SYMBOL(xdr_encode_netobj);
33 __be32 *
34 xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
36 unsigned int len;
38 if ((len = ntohl(*p++)) > XDR_MAX_NETOBJ)
39 return NULL;
40 obj->len = len;
41 obj->data = (u8 *) p;
42 return p + XDR_QUADLEN(len);
44 EXPORT_SYMBOL(xdr_decode_netobj);
46 /**
47 * xdr_encode_opaque_fixed - Encode fixed length opaque data
48 * @p: pointer to current position in XDR buffer.
49 * @ptr: pointer to data to encode (or NULL)
50 * @nbytes: size of data.
52 * Copy the array of data of length nbytes at ptr to the XDR buffer
53 * at position p, then align to the next 32-bit boundary by padding
54 * with zero bytes (see RFC1832).
55 * Note: if ptr is NULL, only the padding is performed.
57 * Returns the updated current XDR buffer position
60 __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
62 if (likely(nbytes != 0)) {
63 unsigned int quadlen = XDR_QUADLEN(nbytes);
64 unsigned int padding = (quadlen << 2) - nbytes;
66 if (ptr != NULL)
67 memcpy(p, ptr, nbytes);
68 if (padding != 0)
69 memset((char *)p + nbytes, 0, padding);
70 p += quadlen;
72 return p;
74 EXPORT_SYMBOL(xdr_encode_opaque_fixed);
76 /**
77 * xdr_encode_opaque - Encode variable length opaque data
78 * @p: pointer to current position in XDR buffer.
79 * @ptr: pointer to data to encode (or NULL)
80 * @nbytes: size of data.
82 * Returns the updated current XDR buffer position
84 __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
86 *p++ = htonl(nbytes);
87 return xdr_encode_opaque_fixed(p, ptr, nbytes);
89 EXPORT_SYMBOL(xdr_encode_opaque);
91 __be32 *
92 xdr_encode_string(__be32 *p, const char *string)
94 return xdr_encode_array(p, string, strlen(string));
96 EXPORT_SYMBOL(xdr_encode_string);
98 __be32 *
99 xdr_decode_string_inplace(__be32 *p, char **sp, int *lenp, int maxlen)
101 unsigned int len;
103 if ((len = ntohl(*p++)) > maxlen)
104 return NULL;
105 *lenp = len;
106 *sp = (char *) p;
107 return p + XDR_QUADLEN(len);
109 EXPORT_SYMBOL(xdr_decode_string_inplace);
111 void
112 xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
113 unsigned int len)
115 struct kvec *tail = xdr->tail;
116 u32 *p;
118 xdr->pages = pages;
119 xdr->page_base = base;
120 xdr->page_len = len;
122 p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
123 tail->iov_base = p;
124 tail->iov_len = 0;
126 if (len & 3) {
127 unsigned int pad = 4 - (len & 3);
129 *p = 0;
130 tail->iov_base = (char *)p + (len & 3);
131 tail->iov_len = pad;
132 len += pad;
134 xdr->buflen += len;
135 xdr->len += len;
137 EXPORT_SYMBOL(xdr_encode_pages);
139 void
140 xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
141 struct page **pages, unsigned int base, unsigned int len)
143 struct kvec *head = xdr->head;
144 struct kvec *tail = xdr->tail;
145 char *buf = (char *)head->iov_base;
146 unsigned int buflen = head->iov_len;
148 head->iov_len = offset;
150 xdr->pages = pages;
151 xdr->page_base = base;
152 xdr->page_len = len;
154 tail->iov_base = buf + offset;
155 tail->iov_len = buflen - offset;
157 xdr->buflen += len;
159 EXPORT_SYMBOL(xdr_inline_pages);
162 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
164 * _shift_data_right_pages
165 * @pages: vector of pages containing both the source and dest memory area.
166 * @pgto_base: page vector address of destination
167 * @pgfrom_base: page vector address of source
168 * @len: number of bytes to copy
170 * Note: the addresses pgto_base and pgfrom_base are both calculated in
171 * the same way:
172 * if a memory area starts at byte 'base' in page 'pages[i]',
173 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
174 * Also note: pgfrom_base must be < pgto_base, but the memory areas
175 * they point to may overlap.
177 static void
178 _shift_data_right_pages(struct page **pages, size_t pgto_base,
179 size_t pgfrom_base, size_t len)
181 struct page **pgfrom, **pgto;
182 char *vfrom, *vto;
183 size_t copy;
185 BUG_ON(pgto_base <= pgfrom_base);
187 pgto_base += len;
188 pgfrom_base += len;
190 pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
191 pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
193 pgto_base &= ~PAGE_CACHE_MASK;
194 pgfrom_base &= ~PAGE_CACHE_MASK;
196 do {
197 /* Are any pointers crossing a page boundary? */
198 if (pgto_base == 0) {
199 pgto_base = PAGE_CACHE_SIZE;
200 pgto--;
202 if (pgfrom_base == 0) {
203 pgfrom_base = PAGE_CACHE_SIZE;
204 pgfrom--;
207 copy = len;
208 if (copy > pgto_base)
209 copy = pgto_base;
210 if (copy > pgfrom_base)
211 copy = pgfrom_base;
212 pgto_base -= copy;
213 pgfrom_base -= copy;
215 vto = kmap_atomic(*pgto, KM_USER0);
216 vfrom = kmap_atomic(*pgfrom, KM_USER1);
217 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
218 flush_dcache_page(*pgto);
219 kunmap_atomic(vfrom, KM_USER1);
220 kunmap_atomic(vto, KM_USER0);
222 } while ((len -= copy) != 0);
226 * _copy_to_pages
227 * @pages: array of pages
228 * @pgbase: page vector address of destination
229 * @p: pointer to source data
230 * @len: length
232 * Copies data from an arbitrary memory location into an array of pages
233 * The copy is assumed to be non-overlapping.
235 static void
236 _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
238 struct page **pgto;
239 char *vto;
240 size_t copy;
242 pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
243 pgbase &= ~PAGE_CACHE_MASK;
245 do {
246 copy = PAGE_CACHE_SIZE - pgbase;
247 if (copy > len)
248 copy = len;
250 vto = kmap_atomic(*pgto, KM_USER0);
251 memcpy(vto + pgbase, p, copy);
252 kunmap_atomic(vto, KM_USER0);
254 pgbase += copy;
255 if (pgbase == PAGE_CACHE_SIZE) {
256 flush_dcache_page(*pgto);
257 pgbase = 0;
258 pgto++;
260 p += copy;
262 } while ((len -= copy) != 0);
263 flush_dcache_page(*pgto);
267 * _copy_from_pages
268 * @p: pointer to destination
269 * @pages: array of pages
270 * @pgbase: offset of source data
271 * @len: length
273 * Copies data into an arbitrary memory location from an array of pages
274 * The copy is assumed to be non-overlapping.
276 static void
277 _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
279 struct page **pgfrom;
280 char *vfrom;
281 size_t copy;
283 pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
284 pgbase &= ~PAGE_CACHE_MASK;
286 do {
287 copy = PAGE_CACHE_SIZE - pgbase;
288 if (copy > len)
289 copy = len;
291 vfrom = kmap_atomic(*pgfrom, KM_USER0);
292 memcpy(p, vfrom + pgbase, copy);
293 kunmap_atomic(vfrom, KM_USER0);
295 pgbase += copy;
296 if (pgbase == PAGE_CACHE_SIZE) {
297 pgbase = 0;
298 pgfrom++;
300 p += copy;
302 } while ((len -= copy) != 0);
306 * xdr_shrink_bufhead
307 * @buf: xdr_buf
308 * @len: bytes to remove from buf->head[0]
310 * Shrinks XDR buffer's header kvec buf->head[0] by
311 * 'len' bytes. The extra data is not lost, but is instead
312 * moved into the inlined pages and/or the tail.
314 static void
315 xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
317 struct kvec *head, *tail;
318 size_t copy, offs;
319 unsigned int pglen = buf->page_len;
321 tail = buf->tail;
322 head = buf->head;
323 BUG_ON (len > head->iov_len);
325 /* Shift the tail first */
326 if (tail->iov_len != 0) {
327 if (tail->iov_len > len) {
328 copy = tail->iov_len - len;
329 memmove((char *)tail->iov_base + len,
330 tail->iov_base, copy);
332 /* Copy from the inlined pages into the tail */
333 copy = len;
334 if (copy > pglen)
335 copy = pglen;
336 offs = len - copy;
337 if (offs >= tail->iov_len)
338 copy = 0;
339 else if (copy > tail->iov_len - offs)
340 copy = tail->iov_len - offs;
341 if (copy != 0)
342 _copy_from_pages((char *)tail->iov_base + offs,
343 buf->pages,
344 buf->page_base + pglen + offs - len,
345 copy);
346 /* Do we also need to copy data from the head into the tail ? */
347 if (len > pglen) {
348 offs = copy = len - pglen;
349 if (copy > tail->iov_len)
350 copy = tail->iov_len;
351 memcpy(tail->iov_base,
352 (char *)head->iov_base +
353 head->iov_len - offs,
354 copy);
357 /* Now handle pages */
358 if (pglen != 0) {
359 if (pglen > len)
360 _shift_data_right_pages(buf->pages,
361 buf->page_base + len,
362 buf->page_base,
363 pglen - len);
364 copy = len;
365 if (len > pglen)
366 copy = pglen;
367 _copy_to_pages(buf->pages, buf->page_base,
368 (char *)head->iov_base + head->iov_len - len,
369 copy);
371 head->iov_len -= len;
372 buf->buflen -= len;
373 /* Have we truncated the message? */
374 if (buf->len > buf->buflen)
375 buf->len = buf->buflen;
379 * xdr_shrink_pagelen
380 * @buf: xdr_buf
381 * @len: bytes to remove from buf->pages
383 * Shrinks XDR buffer's page array buf->pages by
384 * 'len' bytes. The extra data is not lost, but is instead
385 * moved into the tail.
387 static void
388 xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
390 struct kvec *tail;
391 size_t copy;
392 char *p;
393 unsigned int pglen = buf->page_len;
395 tail = buf->tail;
396 BUG_ON (len > pglen);
398 /* Shift the tail first */
399 if (tail->iov_len != 0) {
400 p = (char *)tail->iov_base + len;
401 if (tail->iov_len > len) {
402 copy = tail->iov_len - len;
403 memmove(p, tail->iov_base, copy);
404 } else
405 buf->buflen -= len;
406 /* Copy from the inlined pages into the tail */
407 copy = len;
408 if (copy > tail->iov_len)
409 copy = tail->iov_len;
410 _copy_from_pages((char *)tail->iov_base,
411 buf->pages, buf->page_base + pglen - len,
412 copy);
414 buf->page_len -= len;
415 buf->buflen -= len;
416 /* Have we truncated the message? */
417 if (buf->len > buf->buflen)
418 buf->len = buf->buflen;
421 void
422 xdr_shift_buf(struct xdr_buf *buf, size_t len)
424 xdr_shrink_bufhead(buf, len);
426 EXPORT_SYMBOL(xdr_shift_buf);
429 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
430 * @xdr: pointer to xdr_stream struct
431 * @buf: pointer to XDR buffer in which to encode data
432 * @p: current pointer inside XDR buffer
434 * Note: at the moment the RPC client only passes the length of our
435 * scratch buffer in the xdr_buf's header kvec. Previously this
436 * meant we needed to call xdr_adjust_iovec() after encoding the
437 * data. With the new scheme, the xdr_stream manages the details
438 * of the buffer length, and takes care of adjusting the kvec
439 * length for us.
441 void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
443 struct kvec *iov = buf->head;
444 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
446 BUG_ON(scratch_len < 0);
447 xdr->buf = buf;
448 xdr->iov = iov;
449 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
450 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
451 BUG_ON(iov->iov_len > scratch_len);
453 if (p != xdr->p && p != NULL) {
454 size_t len;
456 BUG_ON(p < xdr->p || p > xdr->end);
457 len = (char *)p - (char *)xdr->p;
458 xdr->p = p;
459 buf->len += len;
460 iov->iov_len += len;
463 EXPORT_SYMBOL(xdr_init_encode);
466 * xdr_reserve_space - Reserve buffer space for sending
467 * @xdr: pointer to xdr_stream
468 * @nbytes: number of bytes to reserve
470 * Checks that we have enough buffer space to encode 'nbytes' more
471 * bytes of data. If so, update the total xdr_buf length, and
472 * adjust the length of the current kvec.
474 __be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
476 __be32 *p = xdr->p;
477 __be32 *q;
479 /* align nbytes on the next 32-bit boundary */
480 nbytes += 3;
481 nbytes &= ~3;
482 q = p + (nbytes >> 2);
483 if (unlikely(q > xdr->end || q < p))
484 return NULL;
485 xdr->p = q;
486 xdr->iov->iov_len += nbytes;
487 xdr->buf->len += nbytes;
488 return p;
490 EXPORT_SYMBOL(xdr_reserve_space);
493 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
494 * @xdr: pointer to xdr_stream
495 * @pages: list of pages
496 * @base: offset of first byte
497 * @len: length of data in bytes
500 void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
501 unsigned int len)
503 struct xdr_buf *buf = xdr->buf;
504 struct kvec *iov = buf->tail;
505 buf->pages = pages;
506 buf->page_base = base;
507 buf->page_len = len;
509 iov->iov_base = (char *)xdr->p;
510 iov->iov_len = 0;
511 xdr->iov = iov;
513 if (len & 3) {
514 unsigned int pad = 4 - (len & 3);
516 BUG_ON(xdr->p >= xdr->end);
517 iov->iov_base = (char *)xdr->p + (len & 3);
518 iov->iov_len += pad;
519 len += pad;
520 *xdr->p++ = 0;
522 buf->buflen += len;
523 buf->len += len;
525 EXPORT_SYMBOL(xdr_write_pages);
528 * xdr_init_decode - Initialize an xdr_stream for decoding data.
529 * @xdr: pointer to xdr_stream struct
530 * @buf: pointer to XDR buffer from which to decode data
531 * @p: current pointer inside XDR buffer
533 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
535 struct kvec *iov = buf->head;
536 unsigned int len = iov->iov_len;
538 if (len > buf->len)
539 len = buf->len;
540 xdr->buf = buf;
541 xdr->iov = iov;
542 xdr->p = p;
543 xdr->end = (__be32 *)((char *)iov->iov_base + len);
545 EXPORT_SYMBOL(xdr_init_decode);
548 * xdr_inline_decode - Retrieve non-page XDR data to decode
549 * @xdr: pointer to xdr_stream struct
550 * @nbytes: number of bytes of data to decode
552 * Check if the input buffer is long enough to enable us to decode
553 * 'nbytes' more bytes of data starting at the current position.
554 * If so return the current pointer, then update the current
555 * pointer position.
557 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
559 __be32 *p = xdr->p;
560 __be32 *q = p + XDR_QUADLEN(nbytes);
562 if (unlikely(q > xdr->end || q < p))
563 return NULL;
564 xdr->p = q;
565 return p;
567 EXPORT_SYMBOL(xdr_inline_decode);
570 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
571 * @xdr: pointer to xdr_stream struct
572 * @len: number of bytes of page data
574 * Moves data beyond the current pointer position from the XDR head[] buffer
575 * into the page list. Any data that lies beyond current position + "len"
576 * bytes is moved into the XDR tail[].
578 void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
580 struct xdr_buf *buf = xdr->buf;
581 struct kvec *iov;
582 ssize_t shift;
583 unsigned int end;
584 int padding;
586 /* Realign pages to current pointer position */
587 iov = buf->head;
588 shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
589 if (shift > 0)
590 xdr_shrink_bufhead(buf, shift);
592 /* Truncate page data and move it into the tail */
593 if (buf->page_len > len)
594 xdr_shrink_pagelen(buf, buf->page_len - len);
595 padding = (XDR_QUADLEN(len) << 2) - len;
596 xdr->iov = iov = buf->tail;
597 /* Compute remaining message length. */
598 end = iov->iov_len;
599 shift = buf->buflen - buf->len;
600 if (shift < end)
601 end -= shift;
602 else if (shift > 0)
603 end = 0;
605 * Position current pointer at beginning of tail, and
606 * set remaining message length.
608 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
609 xdr->end = (__be32 *)((char *)iov->iov_base + end);
611 EXPORT_SYMBOL(xdr_read_pages);
614 * xdr_enter_page - decode data from the XDR page
615 * @xdr: pointer to xdr_stream struct
616 * @len: number of bytes of page data
618 * Moves data beyond the current pointer position from the XDR head[] buffer
619 * into the page list. Any data that lies beyond current position + "len"
620 * bytes is moved into the XDR tail[]. The current pointer is then
621 * repositioned at the beginning of the first XDR page.
623 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
625 char * kaddr = page_address(xdr->buf->pages[0]);
626 xdr_read_pages(xdr, len);
628 * Position current pointer at beginning of tail, and
629 * set remaining message length.
631 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
632 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
633 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
634 xdr->end = (__be32 *)((char *)xdr->p + len);
636 EXPORT_SYMBOL(xdr_enter_page);
638 static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
640 void
641 xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
643 buf->head[0] = *iov;
644 buf->tail[0] = empty_iov;
645 buf->page_len = 0;
646 buf->buflen = buf->len = iov->iov_len;
648 EXPORT_SYMBOL(xdr_buf_from_iov);
650 /* Sets subbuf to the portion of buf of length len beginning base bytes
651 * from the start of buf. Returns -1 if base of length are out of bounds. */
653 xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
654 unsigned int base, unsigned int len)
656 subbuf->buflen = subbuf->len = len;
657 if (base < buf->head[0].iov_len) {
658 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
659 subbuf->head[0].iov_len = min_t(unsigned int, len,
660 buf->head[0].iov_len - base);
661 len -= subbuf->head[0].iov_len;
662 base = 0;
663 } else {
664 subbuf->head[0].iov_base = NULL;
665 subbuf->head[0].iov_len = 0;
666 base -= buf->head[0].iov_len;
669 if (base < buf->page_len) {
670 subbuf->page_len = min(buf->page_len - base, len);
671 base += buf->page_base;
672 subbuf->page_base = base & ~PAGE_CACHE_MASK;
673 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
674 len -= subbuf->page_len;
675 base = 0;
676 } else {
677 base -= buf->page_len;
678 subbuf->page_len = 0;
681 if (base < buf->tail[0].iov_len) {
682 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
683 subbuf->tail[0].iov_len = min_t(unsigned int, len,
684 buf->tail[0].iov_len - base);
685 len -= subbuf->tail[0].iov_len;
686 base = 0;
687 } else {
688 subbuf->tail[0].iov_base = NULL;
689 subbuf->tail[0].iov_len = 0;
690 base -= buf->tail[0].iov_len;
693 if (base || len)
694 return -1;
695 return 0;
697 EXPORT_SYMBOL(xdr_buf_subsegment);
699 static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
701 unsigned int this_len;
703 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
704 memcpy(obj, subbuf->head[0].iov_base, this_len);
705 len -= this_len;
706 obj += this_len;
707 this_len = min_t(unsigned int, len, subbuf->page_len);
708 if (this_len)
709 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
710 len -= this_len;
711 obj += this_len;
712 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
713 memcpy(obj, subbuf->tail[0].iov_base, this_len);
716 /* obj is assumed to point to allocated memory of size at least len: */
717 int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
719 struct xdr_buf subbuf;
720 int status;
722 status = xdr_buf_subsegment(buf, &subbuf, base, len);
723 if (status != 0)
724 return status;
725 __read_bytes_from_xdr_buf(&subbuf, obj, len);
726 return 0;
728 EXPORT_SYMBOL(read_bytes_from_xdr_buf);
730 static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
732 unsigned int this_len;
734 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
735 memcpy(subbuf->head[0].iov_base, obj, this_len);
736 len -= this_len;
737 obj += this_len;
738 this_len = min_t(unsigned int, len, subbuf->page_len);
739 if (this_len)
740 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
741 len -= this_len;
742 obj += this_len;
743 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
744 memcpy(subbuf->tail[0].iov_base, obj, this_len);
747 /* obj is assumed to point to allocated memory of size at least len: */
748 int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
750 struct xdr_buf subbuf;
751 int status;
753 status = xdr_buf_subsegment(buf, &subbuf, base, len);
754 if (status != 0)
755 return status;
756 __write_bytes_to_xdr_buf(&subbuf, obj, len);
757 return 0;
761 xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
763 __be32 raw;
764 int status;
766 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
767 if (status)
768 return status;
769 *obj = ntohl(raw);
770 return 0;
772 EXPORT_SYMBOL(xdr_decode_word);
775 xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
777 __be32 raw = htonl(obj);
779 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
781 EXPORT_SYMBOL(xdr_encode_word);
783 /* If the netobj starting offset bytes from the start of xdr_buf is contained
784 * entirely in the head or the tail, set object to point to it; otherwise
785 * try to find space for it at the end of the tail, copy it there, and
786 * set obj to point to it. */
787 int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
789 struct xdr_buf subbuf;
791 if (xdr_decode_word(buf, offset, &obj->len))
792 return -EFAULT;
793 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
794 return -EFAULT;
796 /* Is the obj contained entirely in the head? */
797 obj->data = subbuf.head[0].iov_base;
798 if (subbuf.head[0].iov_len == obj->len)
799 return 0;
800 /* ..or is the obj contained entirely in the tail? */
801 obj->data = subbuf.tail[0].iov_base;
802 if (subbuf.tail[0].iov_len == obj->len)
803 return 0;
805 /* use end of tail as storage for obj:
806 * (We don't copy to the beginning because then we'd have
807 * to worry about doing a potentially overlapping copy.
808 * This assumes the object is at most half the length of the
809 * tail.) */
810 if (obj->len > buf->buflen - buf->len)
811 return -ENOMEM;
812 if (buf->tail[0].iov_len != 0)
813 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
814 else
815 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
816 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
817 return 0;
819 EXPORT_SYMBOL(xdr_buf_read_netobj);
821 /* Returns 0 on success, or else a negative error code. */
822 static int
823 xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
824 struct xdr_array2_desc *desc, int encode)
826 char *elem = NULL, *c;
827 unsigned int copied = 0, todo, avail_here;
828 struct page **ppages = NULL;
829 int err;
831 if (encode) {
832 if (xdr_encode_word(buf, base, desc->array_len) != 0)
833 return -EINVAL;
834 } else {
835 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
836 desc->array_len > desc->array_maxlen ||
837 (unsigned long) base + 4 + desc->array_len *
838 desc->elem_size > buf->len)
839 return -EINVAL;
841 base += 4;
843 if (!desc->xcode)
844 return 0;
846 todo = desc->array_len * desc->elem_size;
848 /* process head */
849 if (todo && base < buf->head->iov_len) {
850 c = buf->head->iov_base + base;
851 avail_here = min_t(unsigned int, todo,
852 buf->head->iov_len - base);
853 todo -= avail_here;
855 while (avail_here >= desc->elem_size) {
856 err = desc->xcode(desc, c);
857 if (err)
858 goto out;
859 c += desc->elem_size;
860 avail_here -= desc->elem_size;
862 if (avail_here) {
863 if (!elem) {
864 elem = kmalloc(desc->elem_size, GFP_KERNEL);
865 err = -ENOMEM;
866 if (!elem)
867 goto out;
869 if (encode) {
870 err = desc->xcode(desc, elem);
871 if (err)
872 goto out;
873 memcpy(c, elem, avail_here);
874 } else
875 memcpy(elem, c, avail_here);
876 copied = avail_here;
878 base = buf->head->iov_len; /* align to start of pages */
881 /* process pages array */
882 base -= buf->head->iov_len;
883 if (todo && base < buf->page_len) {
884 unsigned int avail_page;
886 avail_here = min(todo, buf->page_len - base);
887 todo -= avail_here;
889 base += buf->page_base;
890 ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
891 base &= ~PAGE_CACHE_MASK;
892 avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
893 avail_here);
894 c = kmap(*ppages) + base;
896 while (avail_here) {
897 avail_here -= avail_page;
898 if (copied || avail_page < desc->elem_size) {
899 unsigned int l = min(avail_page,
900 desc->elem_size - copied);
901 if (!elem) {
902 elem = kmalloc(desc->elem_size,
903 GFP_KERNEL);
904 err = -ENOMEM;
905 if (!elem)
906 goto out;
908 if (encode) {
909 if (!copied) {
910 err = desc->xcode(desc, elem);
911 if (err)
912 goto out;
914 memcpy(c, elem + copied, l);
915 copied += l;
916 if (copied == desc->elem_size)
917 copied = 0;
918 } else {
919 memcpy(elem + copied, c, l);
920 copied += l;
921 if (copied == desc->elem_size) {
922 err = desc->xcode(desc, elem);
923 if (err)
924 goto out;
925 copied = 0;
928 avail_page -= l;
929 c += l;
931 while (avail_page >= desc->elem_size) {
932 err = desc->xcode(desc, c);
933 if (err)
934 goto out;
935 c += desc->elem_size;
936 avail_page -= desc->elem_size;
938 if (avail_page) {
939 unsigned int l = min(avail_page,
940 desc->elem_size - copied);
941 if (!elem) {
942 elem = kmalloc(desc->elem_size,
943 GFP_KERNEL);
944 err = -ENOMEM;
945 if (!elem)
946 goto out;
948 if (encode) {
949 if (!copied) {
950 err = desc->xcode(desc, elem);
951 if (err)
952 goto out;
954 memcpy(c, elem + copied, l);
955 copied += l;
956 if (copied == desc->elem_size)
957 copied = 0;
958 } else {
959 memcpy(elem + copied, c, l);
960 copied += l;
961 if (copied == desc->elem_size) {
962 err = desc->xcode(desc, elem);
963 if (err)
964 goto out;
965 copied = 0;
969 if (avail_here) {
970 kunmap(*ppages);
971 ppages++;
972 c = kmap(*ppages);
975 avail_page = min(avail_here,
976 (unsigned int) PAGE_CACHE_SIZE);
978 base = buf->page_len; /* align to start of tail */
981 /* process tail */
982 base -= buf->page_len;
983 if (todo) {
984 c = buf->tail->iov_base + base;
985 if (copied) {
986 unsigned int l = desc->elem_size - copied;
988 if (encode)
989 memcpy(c, elem + copied, l);
990 else {
991 memcpy(elem + copied, c, l);
992 err = desc->xcode(desc, elem);
993 if (err)
994 goto out;
996 todo -= l;
997 c += l;
999 while (todo) {
1000 err = desc->xcode(desc, c);
1001 if (err)
1002 goto out;
1003 c += desc->elem_size;
1004 todo -= desc->elem_size;
1007 err = 0;
1009 out:
1010 kfree(elem);
1011 if (ppages)
1012 kunmap(*ppages);
1013 return err;
1017 xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1018 struct xdr_array2_desc *desc)
1020 if (base >= buf->len)
1021 return -EINVAL;
1023 return xdr_xcode_array2(buf, base, desc, 0);
1025 EXPORT_SYMBOL(xdr_decode_array2);
1028 xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1029 struct xdr_array2_desc *desc)
1031 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1032 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1033 return -EINVAL;
1035 return xdr_xcode_array2(buf, base, desc, 1);
1037 EXPORT_SYMBOL(xdr_encode_array2);
1040 xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1041 int (*actor)(struct scatterlist *, void *), void *data)
1043 int i, ret = 0;
1044 unsigned page_len, thislen, page_offset;
1045 struct scatterlist sg[1];
1047 sg_init_table(sg, 1);
1049 if (offset >= buf->head[0].iov_len) {
1050 offset -= buf->head[0].iov_len;
1051 } else {
1052 thislen = buf->head[0].iov_len - offset;
1053 if (thislen > len)
1054 thislen = len;
1055 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1056 ret = actor(sg, data);
1057 if (ret)
1058 goto out;
1059 offset = 0;
1060 len -= thislen;
1062 if (len == 0)
1063 goto out;
1065 if (offset >= buf->page_len) {
1066 offset -= buf->page_len;
1067 } else {
1068 page_len = buf->page_len - offset;
1069 if (page_len > len)
1070 page_len = len;
1071 len -= page_len;
1072 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1073 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1074 thislen = PAGE_CACHE_SIZE - page_offset;
1075 do {
1076 if (thislen > page_len)
1077 thislen = page_len;
1078 sg_set_page(sg, buf->pages[i], thislen, page_offset);
1079 ret = actor(sg, data);
1080 if (ret)
1081 goto out;
1082 page_len -= thislen;
1083 i++;
1084 page_offset = 0;
1085 thislen = PAGE_CACHE_SIZE;
1086 } while (page_len != 0);
1087 offset = 0;
1089 if (len == 0)
1090 goto out;
1091 if (offset < buf->tail[0].iov_len) {
1092 thislen = buf->tail[0].iov_len - offset;
1093 if (thislen > len)
1094 thislen = len;
1095 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1096 ret = actor(sg, data);
1097 len -= thislen;
1099 if (len != 0)
1100 ret = -EINVAL;
1101 out:
1102 return ret;
1104 EXPORT_SYMBOL(xdr_process_buf);