2 * linux/net/sunrpc/xdr.c
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/pagemap.h>
14 #include <linux/errno.h>
15 #include <linux/sunrpc/xdr.h>
16 #include <linux/sunrpc/msg_prot.h>
19 * XDR functions for basic NFS types
22 xdr_encode_netobj(u32
*p
, const struct xdr_netobj
*obj
)
24 unsigned int quadlen
= XDR_QUADLEN(obj
->len
);
26 p
[quadlen
] = 0; /* zero trailing bytes */
27 *p
++ = htonl(obj
->len
);
28 memcpy(p
, obj
->data
, obj
->len
);
29 return p
+ XDR_QUADLEN(obj
->len
);
33 xdr_decode_netobj(u32
*p
, struct xdr_netobj
*obj
)
37 if ((len
= ntohl(*p
++)) > XDR_MAX_NETOBJ
)
41 return p
+ XDR_QUADLEN(len
);
45 * xdr_encode_opaque_fixed - Encode fixed length opaque data
46 * @p: pointer to current position in XDR buffer.
47 * @ptr: pointer to data to encode (or NULL)
48 * @nbytes: size of data.
50 * Copy the array of data of length nbytes at ptr to the XDR buffer
51 * at position p, then align to the next 32-bit boundary by padding
52 * with zero bytes (see RFC1832).
53 * Note: if ptr is NULL, only the padding is performed.
55 * Returns the updated current XDR buffer position
58 u32
*xdr_encode_opaque_fixed(u32
*p
, const void *ptr
, unsigned int nbytes
)
60 if (likely(nbytes
!= 0)) {
61 unsigned int quadlen
= XDR_QUADLEN(nbytes
);
62 unsigned int padding
= (quadlen
<< 2) - nbytes
;
65 memcpy(p
, ptr
, nbytes
);
67 memset((char *)p
+ nbytes
, 0, padding
);
72 EXPORT_SYMBOL(xdr_encode_opaque_fixed
);
75 * xdr_encode_opaque - Encode variable length opaque data
76 * @p: pointer to current position in XDR buffer.
77 * @ptr: pointer to data to encode (or NULL)
78 * @nbytes: size of data.
80 * Returns the updated current XDR buffer position
82 u32
*xdr_encode_opaque(u32
*p
, const void *ptr
, unsigned int nbytes
)
85 return xdr_encode_opaque_fixed(p
, ptr
, nbytes
);
87 EXPORT_SYMBOL(xdr_encode_opaque
);
90 xdr_encode_string(u32
*p
, const char *string
)
92 return xdr_encode_array(p
, string
, strlen(string
));
96 xdr_decode_string_inplace(u32
*p
, char **sp
, int *lenp
, int maxlen
)
100 if ((len
= ntohl(*p
++)) > maxlen
)
104 return p
+ XDR_QUADLEN(len
);
108 xdr_encode_pages(struct xdr_buf
*xdr
, struct page
**pages
, unsigned int base
,
111 struct kvec
*tail
= xdr
->tail
;
115 xdr
->page_base
= base
;
118 p
= (u32
*)xdr
->head
[0].iov_base
+ XDR_QUADLEN(xdr
->head
[0].iov_len
);
123 unsigned int pad
= 4 - (len
& 3);
126 tail
->iov_base
= (char *)p
+ (len
& 3);
135 xdr_inline_pages(struct xdr_buf
*xdr
, unsigned int offset
,
136 struct page
**pages
, unsigned int base
, unsigned int len
)
138 struct kvec
*head
= xdr
->head
;
139 struct kvec
*tail
= xdr
->tail
;
140 char *buf
= (char *)head
->iov_base
;
141 unsigned int buflen
= head
->iov_len
;
143 head
->iov_len
= offset
;
146 xdr
->page_base
= base
;
149 tail
->iov_base
= buf
+ offset
;
150 tail
->iov_len
= buflen
- offset
;
157 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
159 * _shift_data_right_pages
160 * @pages: vector of pages containing both the source and dest memory area.
161 * @pgto_base: page vector address of destination
162 * @pgfrom_base: page vector address of source
163 * @len: number of bytes to copy
165 * Note: the addresses pgto_base and pgfrom_base are both calculated in
167 * if a memory area starts at byte 'base' in page 'pages[i]',
168 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
169 * Also note: pgfrom_base must be < pgto_base, but the memory areas
170 * they point to may overlap.
173 _shift_data_right_pages(struct page
**pages
, size_t pgto_base
,
174 size_t pgfrom_base
, size_t len
)
176 struct page
**pgfrom
, **pgto
;
180 BUG_ON(pgto_base
<= pgfrom_base
);
185 pgto
= pages
+ (pgto_base
>> PAGE_CACHE_SHIFT
);
186 pgfrom
= pages
+ (pgfrom_base
>> PAGE_CACHE_SHIFT
);
188 pgto_base
&= ~PAGE_CACHE_MASK
;
189 pgfrom_base
&= ~PAGE_CACHE_MASK
;
192 /* Are any pointers crossing a page boundary? */
193 if (pgto_base
== 0) {
194 flush_dcache_page(*pgto
);
195 pgto_base
= PAGE_CACHE_SIZE
;
198 if (pgfrom_base
== 0) {
199 pgfrom_base
= PAGE_CACHE_SIZE
;
204 if (copy
> pgto_base
)
206 if (copy
> pgfrom_base
)
211 vto
= kmap_atomic(*pgto
, KM_USER0
);
212 vfrom
= kmap_atomic(*pgfrom
, KM_USER1
);
213 memmove(vto
+ pgto_base
, vfrom
+ pgfrom_base
, copy
);
214 kunmap_atomic(vfrom
, KM_USER1
);
215 kunmap_atomic(vto
, KM_USER0
);
217 } while ((len
-= copy
) != 0);
218 flush_dcache_page(*pgto
);
223 * @pages: array of pages
224 * @pgbase: page vector address of destination
225 * @p: pointer to source data
228 * Copies data from an arbitrary memory location into an array of pages
229 * The copy is assumed to be non-overlapping.
232 _copy_to_pages(struct page
**pages
, size_t pgbase
, const char *p
, size_t len
)
238 pgto
= pages
+ (pgbase
>> PAGE_CACHE_SHIFT
);
239 pgbase
&= ~PAGE_CACHE_MASK
;
242 copy
= PAGE_CACHE_SIZE
- pgbase
;
246 vto
= kmap_atomic(*pgto
, KM_USER0
);
247 memcpy(vto
+ pgbase
, p
, copy
);
248 kunmap_atomic(vto
, KM_USER0
);
251 if (pgbase
== PAGE_CACHE_SIZE
) {
252 flush_dcache_page(*pgto
);
258 } while ((len
-= copy
) != 0);
259 flush_dcache_page(*pgto
);
264 * @p: pointer to destination
265 * @pages: array of pages
266 * @pgbase: offset of source data
269 * Copies data into an arbitrary memory location from an array of pages
270 * The copy is assumed to be non-overlapping.
273 _copy_from_pages(char *p
, struct page
**pages
, size_t pgbase
, size_t len
)
275 struct page
**pgfrom
;
279 pgfrom
= pages
+ (pgbase
>> PAGE_CACHE_SHIFT
);
280 pgbase
&= ~PAGE_CACHE_MASK
;
283 copy
= PAGE_CACHE_SIZE
- pgbase
;
287 vfrom
= kmap_atomic(*pgfrom
, KM_USER0
);
288 memcpy(p
, vfrom
+ pgbase
, copy
);
289 kunmap_atomic(vfrom
, KM_USER0
);
292 if (pgbase
== PAGE_CACHE_SIZE
) {
298 } while ((len
-= copy
) != 0);
304 * @len: bytes to remove from buf->head[0]
306 * Shrinks XDR buffer's header kvec buf->head[0] by
307 * 'len' bytes. The extra data is not lost, but is instead
308 * moved into the inlined pages and/or the tail.
311 xdr_shrink_bufhead(struct xdr_buf
*buf
, size_t len
)
313 struct kvec
*head
, *tail
;
315 unsigned int pglen
= buf
->page_len
;
319 BUG_ON (len
> head
->iov_len
);
321 /* Shift the tail first */
322 if (tail
->iov_len
!= 0) {
323 if (tail
->iov_len
> len
) {
324 copy
= tail
->iov_len
- len
;
325 memmove((char *)tail
->iov_base
+ len
,
326 tail
->iov_base
, copy
);
328 /* Copy from the inlined pages into the tail */
333 if (offs
>= tail
->iov_len
)
335 else if (copy
> tail
->iov_len
- offs
)
336 copy
= tail
->iov_len
- offs
;
338 _copy_from_pages((char *)tail
->iov_base
+ offs
,
340 buf
->page_base
+ pglen
+ offs
- len
,
342 /* Do we also need to copy data from the head into the tail ? */
344 offs
= copy
= len
- pglen
;
345 if (copy
> tail
->iov_len
)
346 copy
= tail
->iov_len
;
347 memcpy(tail
->iov_base
,
348 (char *)head
->iov_base
+
349 head
->iov_len
- offs
,
353 /* Now handle pages */
356 _shift_data_right_pages(buf
->pages
,
357 buf
->page_base
+ len
,
363 _copy_to_pages(buf
->pages
, buf
->page_base
,
364 (char *)head
->iov_base
+ head
->iov_len
- len
,
367 head
->iov_len
-= len
;
369 /* Have we truncated the message? */
370 if (buf
->len
> buf
->buflen
)
371 buf
->len
= buf
->buflen
;
377 * @len: bytes to remove from buf->pages
379 * Shrinks XDR buffer's page array buf->pages by
380 * 'len' bytes. The extra data is not lost, but is instead
381 * moved into the tail.
384 xdr_shrink_pagelen(struct xdr_buf
*buf
, size_t len
)
389 unsigned int pglen
= buf
->page_len
;
392 BUG_ON (len
> pglen
);
394 /* Shift the tail first */
395 if (tail
->iov_len
!= 0) {
396 p
= (char *)tail
->iov_base
+ len
;
397 if (tail
->iov_len
> len
) {
398 copy
= tail
->iov_len
- len
;
399 memmove(p
, tail
->iov_base
, copy
);
402 /* Copy from the inlined pages into the tail */
404 if (copy
> tail
->iov_len
)
405 copy
= tail
->iov_len
;
406 _copy_from_pages((char *)tail
->iov_base
,
407 buf
->pages
, buf
->page_base
+ pglen
- len
,
410 buf
->page_len
-= len
;
412 /* Have we truncated the message? */
413 if (buf
->len
> buf
->buflen
)
414 buf
->len
= buf
->buflen
;
418 xdr_shift_buf(struct xdr_buf
*buf
, size_t len
)
420 xdr_shrink_bufhead(buf
, len
);
424 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
425 * @xdr: pointer to xdr_stream struct
426 * @buf: pointer to XDR buffer in which to encode data
427 * @p: current pointer inside XDR buffer
429 * Note: at the moment the RPC client only passes the length of our
430 * scratch buffer in the xdr_buf's header kvec. Previously this
431 * meant we needed to call xdr_adjust_iovec() after encoding the
432 * data. With the new scheme, the xdr_stream manages the details
433 * of the buffer length, and takes care of adjusting the kvec
436 void xdr_init_encode(struct xdr_stream
*xdr
, struct xdr_buf
*buf
, uint32_t *p
)
438 struct kvec
*iov
= buf
->head
;
439 int scratch_len
= buf
->buflen
- buf
->page_len
- buf
->tail
[0].iov_len
;
441 BUG_ON(scratch_len
< 0);
444 xdr
->p
= (uint32_t *)((char *)iov
->iov_base
+ iov
->iov_len
);
445 xdr
->end
= (uint32_t *)((char *)iov
->iov_base
+ scratch_len
);
446 BUG_ON(iov
->iov_len
> scratch_len
);
448 if (p
!= xdr
->p
&& p
!= NULL
) {
451 BUG_ON(p
< xdr
->p
|| p
> xdr
->end
);
452 len
= (char *)p
- (char *)xdr
->p
;
458 EXPORT_SYMBOL(xdr_init_encode
);
461 * xdr_reserve_space - Reserve buffer space for sending
462 * @xdr: pointer to xdr_stream
463 * @nbytes: number of bytes to reserve
465 * Checks that we have enough buffer space to encode 'nbytes' more
466 * bytes of data. If so, update the total xdr_buf length, and
467 * adjust the length of the current kvec.
469 uint32_t * xdr_reserve_space(struct xdr_stream
*xdr
, size_t nbytes
)
471 uint32_t *p
= xdr
->p
;
474 /* align nbytes on the next 32-bit boundary */
477 q
= p
+ (nbytes
>> 2);
478 if (unlikely(q
> xdr
->end
|| q
< p
))
481 xdr
->iov
->iov_len
+= nbytes
;
482 xdr
->buf
->len
+= nbytes
;
485 EXPORT_SYMBOL(xdr_reserve_space
);
488 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
489 * @xdr: pointer to xdr_stream
490 * @pages: list of pages
491 * @base: offset of first byte
492 * @len: length of data in bytes
495 void xdr_write_pages(struct xdr_stream
*xdr
, struct page
**pages
, unsigned int base
,
498 struct xdr_buf
*buf
= xdr
->buf
;
499 struct kvec
*iov
= buf
->tail
;
501 buf
->page_base
= base
;
504 iov
->iov_base
= (char *)xdr
->p
;
509 unsigned int pad
= 4 - (len
& 3);
511 BUG_ON(xdr
->p
>= xdr
->end
);
512 iov
->iov_base
= (char *)xdr
->p
+ (len
& 3);
520 EXPORT_SYMBOL(xdr_write_pages
);
523 * xdr_init_decode - Initialize an xdr_stream for decoding data.
524 * @xdr: pointer to xdr_stream struct
525 * @buf: pointer to XDR buffer from which to decode data
526 * @p: current pointer inside XDR buffer
528 void xdr_init_decode(struct xdr_stream
*xdr
, struct xdr_buf
*buf
, uint32_t *p
)
530 struct kvec
*iov
= buf
->head
;
531 unsigned int len
= iov
->iov_len
;
538 xdr
->end
= (uint32_t *)((char *)iov
->iov_base
+ len
);
540 EXPORT_SYMBOL(xdr_init_decode
);
543 * xdr_inline_decode - Retrieve non-page XDR data to decode
544 * @xdr: pointer to xdr_stream struct
545 * @nbytes: number of bytes of data to decode
547 * Check if the input buffer is long enough to enable us to decode
548 * 'nbytes' more bytes of data starting at the current position.
549 * If so return the current pointer, then update the current
552 uint32_t * xdr_inline_decode(struct xdr_stream
*xdr
, size_t nbytes
)
554 uint32_t *p
= xdr
->p
;
555 uint32_t *q
= p
+ XDR_QUADLEN(nbytes
);
557 if (unlikely(q
> xdr
->end
|| q
< p
))
562 EXPORT_SYMBOL(xdr_inline_decode
);
565 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
566 * @xdr: pointer to xdr_stream struct
567 * @len: number of bytes of page data
569 * Moves data beyond the current pointer position from the XDR head[] buffer
570 * into the page list. Any data that lies beyond current position + "len"
571 * bytes is moved into the XDR tail[].
573 void xdr_read_pages(struct xdr_stream
*xdr
, unsigned int len
)
575 struct xdr_buf
*buf
= xdr
->buf
;
581 /* Realign pages to current pointer position */
583 shift
= iov
->iov_len
+ (char *)iov
->iov_base
- (char *)xdr
->p
;
585 xdr_shrink_bufhead(buf
, shift
);
587 /* Truncate page data and move it into the tail */
588 if (buf
->page_len
> len
)
589 xdr_shrink_pagelen(buf
, buf
->page_len
- len
);
590 padding
= (XDR_QUADLEN(len
) << 2) - len
;
591 xdr
->iov
= iov
= buf
->tail
;
592 /* Compute remaining message length. */
594 shift
= buf
->buflen
- buf
->len
;
600 * Position current pointer at beginning of tail, and
601 * set remaining message length.
603 xdr
->p
= (uint32_t *)((char *)iov
->iov_base
+ padding
);
604 xdr
->end
= (uint32_t *)((char *)iov
->iov_base
+ end
);
606 EXPORT_SYMBOL(xdr_read_pages
);
609 * xdr_enter_page - decode data from the XDR page
610 * @xdr: pointer to xdr_stream struct
611 * @len: number of bytes of page data
613 * Moves data beyond the current pointer position from the XDR head[] buffer
614 * into the page list. Any data that lies beyond current position + "len"
615 * bytes is moved into the XDR tail[]. The current pointer is then
616 * repositioned at the beginning of the first XDR page.
618 void xdr_enter_page(struct xdr_stream
*xdr
, unsigned int len
)
620 char * kaddr
= page_address(xdr
->buf
->pages
[0]);
621 xdr_read_pages(xdr
, len
);
623 * Position current pointer at beginning of tail, and
624 * set remaining message length.
626 if (len
> PAGE_CACHE_SIZE
- xdr
->buf
->page_base
)
627 len
= PAGE_CACHE_SIZE
- xdr
->buf
->page_base
;
628 xdr
->p
= (uint32_t *)(kaddr
+ xdr
->buf
->page_base
);
629 xdr
->end
= (uint32_t *)((char *)xdr
->p
+ len
);
631 EXPORT_SYMBOL(xdr_enter_page
);
633 static struct kvec empty_iov
= {.iov_base
= NULL
, .iov_len
= 0};
636 xdr_buf_from_iov(struct kvec
*iov
, struct xdr_buf
*buf
)
639 buf
->tail
[0] = empty_iov
;
641 buf
->buflen
= buf
->len
= iov
->iov_len
;
644 /* Sets subiov to the intersection of iov with the buffer of length len
645 * starting base bytes after iov. Indicates empty intersection by setting
646 * length of subiov to zero. Decrements len by length of subiov, sets base
647 * to zero (or decrements it by length of iov if subiov is empty). */
649 iov_subsegment(struct kvec
*iov
, struct kvec
*subiov
, int *base
, int *len
)
651 if (*base
> iov
->iov_len
) {
652 subiov
->iov_base
= NULL
;
654 *base
-= iov
->iov_len
;
656 subiov
->iov_base
= iov
->iov_base
+ *base
;
657 subiov
->iov_len
= min(*len
, (int)iov
->iov_len
- *base
);
660 *len
-= subiov
->iov_len
;
663 /* Sets subbuf to the portion of buf of length len beginning base bytes
664 * from the start of buf. Returns -1 if base of length are out of bounds. */
666 xdr_buf_subsegment(struct xdr_buf
*buf
, struct xdr_buf
*subbuf
,
671 subbuf
->buflen
= subbuf
->len
= len
;
672 iov_subsegment(buf
->head
, subbuf
->head
, &base
, &len
);
674 if (base
< buf
->page_len
) {
675 i
= (base
+ buf
->page_base
) >> PAGE_CACHE_SHIFT
;
676 subbuf
->pages
= &buf
->pages
[i
];
677 subbuf
->page_base
= (base
+ buf
->page_base
) & ~PAGE_CACHE_MASK
;
678 subbuf
->page_len
= min((int)buf
->page_len
- base
, len
);
679 len
-= subbuf
->page_len
;
682 base
-= buf
->page_len
;
683 subbuf
->page_len
= 0;
686 iov_subsegment(buf
->tail
, subbuf
->tail
, &base
, &len
);
692 /* obj is assumed to point to allocated memory of size at least len: */
694 read_bytes_from_xdr_buf(struct xdr_buf
*buf
, int base
, void *obj
, int len
)
696 struct xdr_buf subbuf
;
700 status
= xdr_buf_subsegment(buf
, &subbuf
, base
, len
);
703 this_len
= min(len
, (int)subbuf
.head
[0].iov_len
);
704 memcpy(obj
, subbuf
.head
[0].iov_base
, this_len
);
707 this_len
= min(len
, (int)subbuf
.page_len
);
709 _copy_from_pages(obj
, subbuf
.pages
, subbuf
.page_base
, this_len
);
712 this_len
= min(len
, (int)subbuf
.tail
[0].iov_len
);
713 memcpy(obj
, subbuf
.tail
[0].iov_base
, this_len
);
718 /* obj is assumed to point to allocated memory of size at least len: */
720 write_bytes_to_xdr_buf(struct xdr_buf
*buf
, int base
, void *obj
, int len
)
722 struct xdr_buf subbuf
;
726 status
= xdr_buf_subsegment(buf
, &subbuf
, base
, len
);
729 this_len
= min(len
, (int)subbuf
.head
[0].iov_len
);
730 memcpy(subbuf
.head
[0].iov_base
, obj
, this_len
);
733 this_len
= min(len
, (int)subbuf
.page_len
);
735 _copy_to_pages(subbuf
.pages
, subbuf
.page_base
, obj
, this_len
);
738 this_len
= min(len
, (int)subbuf
.tail
[0].iov_len
);
739 memcpy(subbuf
.tail
[0].iov_base
, obj
, this_len
);
745 xdr_decode_word(struct xdr_buf
*buf
, int base
, u32
*obj
)
750 status
= read_bytes_from_xdr_buf(buf
, base
, &raw
, sizeof(*obj
));
758 xdr_encode_word(struct xdr_buf
*buf
, int base
, u32 obj
)
760 u32 raw
= htonl(obj
);
762 return write_bytes_to_xdr_buf(buf
, base
, &raw
, sizeof(obj
));
765 /* If the netobj starting offset bytes from the start of xdr_buf is contained
766 * entirely in the head or the tail, set object to point to it; otherwise
767 * try to find space for it at the end of the tail, copy it there, and
768 * set obj to point to it. */
770 xdr_buf_read_netobj(struct xdr_buf
*buf
, struct xdr_netobj
*obj
, int offset
)
772 u32 tail_offset
= buf
->head
[0].iov_len
+ buf
->page_len
;
775 if (xdr_decode_word(buf
, offset
, &obj
->len
))
777 obj_end_offset
= offset
+ 4 + obj
->len
;
779 if (obj_end_offset
<= buf
->head
[0].iov_len
) {
780 /* The obj is contained entirely in the head: */
781 obj
->data
= buf
->head
[0].iov_base
+ offset
+ 4;
782 } else if (offset
+ 4 >= tail_offset
) {
783 if (obj_end_offset
- tail_offset
784 > buf
->tail
[0].iov_len
)
786 /* The obj is contained entirely in the tail: */
787 obj
->data
= buf
->tail
[0].iov_base
788 + offset
- tail_offset
+ 4;
790 /* use end of tail as storage for obj:
791 * (We don't copy to the beginning because then we'd have
792 * to worry about doing a potentially overlapping copy.
793 * This assumes the object is at most half the length of the
795 if (obj
->len
> buf
->tail
[0].iov_len
)
797 obj
->data
= buf
->tail
[0].iov_base
+ buf
->tail
[0].iov_len
-
799 if (read_bytes_from_xdr_buf(buf
, offset
+ 4,
800 obj
->data
, obj
->len
))
809 /* Returns 0 on success, or else a negative error code. */
811 xdr_xcode_array2(struct xdr_buf
*buf
, unsigned int base
,
812 struct xdr_array2_desc
*desc
, int encode
)
814 char *elem
= NULL
, *c
;
815 unsigned int copied
= 0, todo
, avail_here
;
816 struct page
**ppages
= NULL
;
820 if (xdr_encode_word(buf
, base
, desc
->array_len
) != 0)
823 if (xdr_decode_word(buf
, base
, &desc
->array_len
) != 0 ||
824 desc
->array_len
> desc
->array_maxlen
||
825 (unsigned long) base
+ 4 + desc
->array_len
*
826 desc
->elem_size
> buf
->len
)
834 todo
= desc
->array_len
* desc
->elem_size
;
837 if (todo
&& base
< buf
->head
->iov_len
) {
838 c
= buf
->head
->iov_base
+ base
;
839 avail_here
= min_t(unsigned int, todo
,
840 buf
->head
->iov_len
- base
);
843 while (avail_here
>= desc
->elem_size
) {
844 err
= desc
->xcode(desc
, c
);
847 c
+= desc
->elem_size
;
848 avail_here
-= desc
->elem_size
;
852 elem
= kmalloc(desc
->elem_size
, GFP_KERNEL
);
858 err
= desc
->xcode(desc
, elem
);
861 memcpy(c
, elem
, avail_here
);
863 memcpy(elem
, c
, avail_here
);
866 base
= buf
->head
->iov_len
; /* align to start of pages */
869 /* process pages array */
870 base
-= buf
->head
->iov_len
;
871 if (todo
&& base
< buf
->page_len
) {
872 unsigned int avail_page
;
874 avail_here
= min(todo
, buf
->page_len
- base
);
877 base
+= buf
->page_base
;
878 ppages
= buf
->pages
+ (base
>> PAGE_CACHE_SHIFT
);
879 base
&= ~PAGE_CACHE_MASK
;
880 avail_page
= min_t(unsigned int, PAGE_CACHE_SIZE
- base
,
882 c
= kmap(*ppages
) + base
;
885 avail_here
-= avail_page
;
886 if (copied
|| avail_page
< desc
->elem_size
) {
887 unsigned int l
= min(avail_page
,
888 desc
->elem_size
- copied
);
890 elem
= kmalloc(desc
->elem_size
,
898 err
= desc
->xcode(desc
, elem
);
902 memcpy(c
, elem
+ copied
, l
);
904 if (copied
== desc
->elem_size
)
907 memcpy(elem
+ copied
, c
, l
);
909 if (copied
== desc
->elem_size
) {
910 err
= desc
->xcode(desc
, elem
);
919 while (avail_page
>= desc
->elem_size
) {
920 err
= desc
->xcode(desc
, c
);
923 c
+= desc
->elem_size
;
924 avail_page
-= desc
->elem_size
;
927 unsigned int l
= min(avail_page
,
928 desc
->elem_size
- copied
);
930 elem
= kmalloc(desc
->elem_size
,
938 err
= desc
->xcode(desc
, elem
);
942 memcpy(c
, elem
+ copied
, l
);
944 if (copied
== desc
->elem_size
)
947 memcpy(elem
+ copied
, c
, l
);
949 if (copied
== desc
->elem_size
) {
950 err
= desc
->xcode(desc
, elem
);
963 avail_page
= min(avail_here
,
964 (unsigned int) PAGE_CACHE_SIZE
);
966 base
= buf
->page_len
; /* align to start of tail */
970 base
-= buf
->page_len
;
972 c
= buf
->tail
->iov_base
+ base
;
974 unsigned int l
= desc
->elem_size
- copied
;
977 memcpy(c
, elem
+ copied
, l
);
979 memcpy(elem
+ copied
, c
, l
);
980 err
= desc
->xcode(desc
, elem
);
988 err
= desc
->xcode(desc
, c
);
991 c
+= desc
->elem_size
;
992 todo
-= desc
->elem_size
;
1005 xdr_decode_array2(struct xdr_buf
*buf
, unsigned int base
,
1006 struct xdr_array2_desc
*desc
)
1008 if (base
>= buf
->len
)
1011 return xdr_xcode_array2(buf
, base
, desc
, 0);
1015 xdr_encode_array2(struct xdr_buf
*buf
, unsigned int base
,
1016 struct xdr_array2_desc
*desc
)
1018 if ((unsigned long) base
+ 4 + desc
->array_len
* desc
->elem_size
>
1019 buf
->head
->iov_len
+ buf
->page_len
+ buf
->tail
->iov_len
)
1022 return xdr_xcode_array2(buf
, base
, desc
, 1);