2 * linux/net/sunrpc/xdr.c
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pagemap.h>
15 #include <linux/errno.h>
16 #include <linux/sunrpc/xdr.h>
17 #include <linux/sunrpc/msg_prot.h>
20 * XDR functions for basic NFS types
23 xdr_encode_netobj(__be32
*p
, const struct xdr_netobj
*obj
)
25 unsigned int quadlen
= XDR_QUADLEN(obj
->len
);
27 p
[quadlen
] = 0; /* zero trailing bytes */
28 *p
++ = cpu_to_be32(obj
->len
);
29 memcpy(p
, obj
->data
, obj
->len
);
30 return p
+ XDR_QUADLEN(obj
->len
);
32 EXPORT_SYMBOL_GPL(xdr_encode_netobj
);
35 xdr_decode_netobj(__be32
*p
, struct xdr_netobj
*obj
)
39 if ((len
= be32_to_cpu(*p
++)) > XDR_MAX_NETOBJ
)
43 return p
+ XDR_QUADLEN(len
);
45 EXPORT_SYMBOL_GPL(xdr_decode_netobj
);
48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
49 * @p: pointer to current position in XDR buffer.
50 * @ptr: pointer to data to encode (or NULL)
51 * @nbytes: size of data.
53 * Copy the array of data of length nbytes at ptr to the XDR buffer
54 * at position p, then align to the next 32-bit boundary by padding
55 * with zero bytes (see RFC1832).
56 * Note: if ptr is NULL, only the padding is performed.
58 * Returns the updated current XDR buffer position
61 __be32
*xdr_encode_opaque_fixed(__be32
*p
, const void *ptr
, unsigned int nbytes
)
63 if (likely(nbytes
!= 0)) {
64 unsigned int quadlen
= XDR_QUADLEN(nbytes
);
65 unsigned int padding
= (quadlen
<< 2) - nbytes
;
68 memcpy(p
, ptr
, nbytes
);
70 memset((char *)p
+ nbytes
, 0, padding
);
75 EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed
);
78 * xdr_encode_opaque - Encode variable length opaque data
79 * @p: pointer to current position in XDR buffer.
80 * @ptr: pointer to data to encode (or NULL)
81 * @nbytes: size of data.
83 * Returns the updated current XDR buffer position
85 __be32
*xdr_encode_opaque(__be32
*p
, const void *ptr
, unsigned int nbytes
)
87 *p
++ = cpu_to_be32(nbytes
);
88 return xdr_encode_opaque_fixed(p
, ptr
, nbytes
);
90 EXPORT_SYMBOL_GPL(xdr_encode_opaque
);
93 xdr_encode_string(__be32
*p
, const char *string
)
95 return xdr_encode_array(p
, string
, strlen(string
));
97 EXPORT_SYMBOL_GPL(xdr_encode_string
);
100 xdr_decode_string_inplace(__be32
*p
, char **sp
,
101 unsigned int *lenp
, unsigned int maxlen
)
105 len
= be32_to_cpu(*p
++);
110 return p
+ XDR_QUADLEN(len
);
112 EXPORT_SYMBOL_GPL(xdr_decode_string_inplace
);
115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
116 * @buf: XDR buffer where string resides
117 * @len: length of string, in bytes
121 xdr_terminate_string(struct xdr_buf
*buf
, const u32 len
)
125 kaddr
= kmap_atomic(buf
->pages
[0]);
126 kaddr
[buf
->page_base
+ len
] = '\0';
127 kunmap_atomic(kaddr
);
129 EXPORT_SYMBOL_GPL(xdr_terminate_string
);
132 xdr_inline_pages(struct xdr_buf
*xdr
, unsigned int offset
,
133 struct page
**pages
, unsigned int base
, unsigned int len
)
135 struct kvec
*head
= xdr
->head
;
136 struct kvec
*tail
= xdr
->tail
;
137 char *buf
= (char *)head
->iov_base
;
138 unsigned int buflen
= head
->iov_len
;
140 head
->iov_len
= offset
;
143 xdr
->page_base
= base
;
146 tail
->iov_base
= buf
+ offset
;
147 tail
->iov_len
= buflen
- offset
;
151 EXPORT_SYMBOL_GPL(xdr_inline_pages
);
154 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
158 * _shift_data_right_pages
159 * @pages: vector of pages containing both the source and dest memory area.
160 * @pgto_base: page vector address of destination
161 * @pgfrom_base: page vector address of source
162 * @len: number of bytes to copy
164 * Note: the addresses pgto_base and pgfrom_base are both calculated in
166 * if a memory area starts at byte 'base' in page 'pages[i]',
167 * then its address is given as (i << PAGE_CACHE_SHIFT) + base
168 * Also note: pgfrom_base must be < pgto_base, but the memory areas
169 * they point to may overlap.
172 _shift_data_right_pages(struct page
**pages
, size_t pgto_base
,
173 size_t pgfrom_base
, size_t len
)
175 struct page
**pgfrom
, **pgto
;
179 BUG_ON(pgto_base
<= pgfrom_base
);
184 pgto
= pages
+ (pgto_base
>> PAGE_CACHE_SHIFT
);
185 pgfrom
= pages
+ (pgfrom_base
>> PAGE_CACHE_SHIFT
);
187 pgto_base
&= ~PAGE_CACHE_MASK
;
188 pgfrom_base
&= ~PAGE_CACHE_MASK
;
191 /* Are any pointers crossing a page boundary? */
192 if (pgto_base
== 0) {
193 pgto_base
= PAGE_CACHE_SIZE
;
196 if (pgfrom_base
== 0) {
197 pgfrom_base
= PAGE_CACHE_SIZE
;
202 if (copy
> pgto_base
)
204 if (copy
> pgfrom_base
)
209 vto
= kmap_atomic(*pgto
);
210 vfrom
= kmap_atomic(*pgfrom
);
211 memmove(vto
+ pgto_base
, vfrom
+ pgfrom_base
, copy
);
212 flush_dcache_page(*pgto
);
213 kunmap_atomic(vfrom
);
216 } while ((len
-= copy
) != 0);
221 * @pages: array of pages
222 * @pgbase: page vector address of destination
223 * @p: pointer to source data
226 * Copies data from an arbitrary memory location into an array of pages
227 * The copy is assumed to be non-overlapping.
230 _copy_to_pages(struct page
**pages
, size_t pgbase
, const char *p
, size_t len
)
236 pgto
= pages
+ (pgbase
>> PAGE_CACHE_SHIFT
);
237 pgbase
&= ~PAGE_CACHE_MASK
;
240 copy
= PAGE_CACHE_SIZE
- pgbase
;
244 vto
= kmap_atomic(*pgto
);
245 memcpy(vto
+ pgbase
, p
, copy
);
253 if (pgbase
== PAGE_CACHE_SIZE
) {
254 flush_dcache_page(*pgto
);
260 flush_dcache_page(*pgto
);
265 * @p: pointer to destination
266 * @pages: array of pages
267 * @pgbase: offset of source data
270 * Copies data into an arbitrary memory location from an array of pages
271 * The copy is assumed to be non-overlapping.
274 _copy_from_pages(char *p
, struct page
**pages
, size_t pgbase
, size_t len
)
276 struct page
**pgfrom
;
280 pgfrom
= pages
+ (pgbase
>> PAGE_CACHE_SHIFT
);
281 pgbase
&= ~PAGE_CACHE_MASK
;
284 copy
= PAGE_CACHE_SIZE
- pgbase
;
288 vfrom
= kmap_atomic(*pgfrom
);
289 memcpy(p
, vfrom
+ pgbase
, copy
);
290 kunmap_atomic(vfrom
);
293 if (pgbase
== PAGE_CACHE_SIZE
) {
299 } while ((len
-= copy
) != 0);
301 EXPORT_SYMBOL_GPL(_copy_from_pages
);
306 * @len: bytes to remove from buf->head[0]
308 * Shrinks XDR buffer's header kvec buf->head[0] by
309 * 'len' bytes. The extra data is not lost, but is instead
310 * moved into the inlined pages and/or the tail.
313 xdr_shrink_bufhead(struct xdr_buf
*buf
, size_t len
)
315 struct kvec
*head
, *tail
;
317 unsigned int pglen
= buf
->page_len
;
321 BUG_ON (len
> head
->iov_len
);
323 /* Shift the tail first */
324 if (tail
->iov_len
!= 0) {
325 if (tail
->iov_len
> len
) {
326 copy
= tail
->iov_len
- len
;
327 memmove((char *)tail
->iov_base
+ len
,
328 tail
->iov_base
, copy
);
330 /* Copy from the inlined pages into the tail */
335 if (offs
>= tail
->iov_len
)
337 else if (copy
> tail
->iov_len
- offs
)
338 copy
= tail
->iov_len
- offs
;
340 _copy_from_pages((char *)tail
->iov_base
+ offs
,
342 buf
->page_base
+ pglen
+ offs
- len
,
344 /* Do we also need to copy data from the head into the tail ? */
346 offs
= copy
= len
- pglen
;
347 if (copy
> tail
->iov_len
)
348 copy
= tail
->iov_len
;
349 memcpy(tail
->iov_base
,
350 (char *)head
->iov_base
+
351 head
->iov_len
- offs
,
355 /* Now handle pages */
358 _shift_data_right_pages(buf
->pages
,
359 buf
->page_base
+ len
,
365 _copy_to_pages(buf
->pages
, buf
->page_base
,
366 (char *)head
->iov_base
+ head
->iov_len
- len
,
369 head
->iov_len
-= len
;
371 /* Have we truncated the message? */
372 if (buf
->len
> buf
->buflen
)
373 buf
->len
= buf
->buflen
;
379 * @len: bytes to remove from buf->pages
381 * Shrinks XDR buffer's page array buf->pages by
382 * 'len' bytes. The extra data is not lost, but is instead
383 * moved into the tail.
386 xdr_shrink_pagelen(struct xdr_buf
*buf
, size_t len
)
390 unsigned int pglen
= buf
->page_len
;
391 unsigned int tailbuf_len
;
394 BUG_ON (len
> pglen
);
396 tailbuf_len
= buf
->buflen
- buf
->head
->iov_len
- buf
->page_len
;
398 /* Shift the tail first */
399 if (tailbuf_len
!= 0) {
400 unsigned int free_space
= tailbuf_len
- tail
->iov_len
;
402 if (len
< free_space
)
404 tail
->iov_len
+= free_space
;
407 if (tail
->iov_len
> len
) {
408 char *p
= (char *)tail
->iov_base
+ len
;
409 memmove(p
, tail
->iov_base
, tail
->iov_len
- len
);
411 copy
= tail
->iov_len
;
412 /* Copy from the inlined pages into the tail */
413 _copy_from_pages((char *)tail
->iov_base
,
414 buf
->pages
, buf
->page_base
+ pglen
- len
,
417 buf
->page_len
-= len
;
419 /* Have we truncated the message? */
420 if (buf
->len
> buf
->buflen
)
421 buf
->len
= buf
->buflen
;
425 xdr_shift_buf(struct xdr_buf
*buf
, size_t len
)
427 xdr_shrink_bufhead(buf
, len
);
429 EXPORT_SYMBOL_GPL(xdr_shift_buf
);
432 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
433 * @xdr: pointer to struct xdr_stream
435 unsigned int xdr_stream_pos(const struct xdr_stream
*xdr
)
437 return (unsigned int)(XDR_QUADLEN(xdr
->buf
->len
) - xdr
->nwords
) << 2;
439 EXPORT_SYMBOL_GPL(xdr_stream_pos
);
442 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
443 * @xdr: pointer to xdr_stream struct
444 * @buf: pointer to XDR buffer in which to encode data
445 * @p: current pointer inside XDR buffer
447 * Note: at the moment the RPC client only passes the length of our
448 * scratch buffer in the xdr_buf's header kvec. Previously this
449 * meant we needed to call xdr_adjust_iovec() after encoding the
450 * data. With the new scheme, the xdr_stream manages the details
451 * of the buffer length, and takes care of adjusting the kvec
454 void xdr_init_encode(struct xdr_stream
*xdr
, struct xdr_buf
*buf
, __be32
*p
)
456 struct kvec
*iov
= buf
->head
;
457 int scratch_len
= buf
->buflen
- buf
->page_len
- buf
->tail
[0].iov_len
;
459 BUG_ON(scratch_len
< 0);
462 xdr
->p
= (__be32
*)((char *)iov
->iov_base
+ iov
->iov_len
);
463 xdr
->end
= (__be32
*)((char *)iov
->iov_base
+ scratch_len
);
464 BUG_ON(iov
->iov_len
> scratch_len
);
466 if (p
!= xdr
->p
&& p
!= NULL
) {
469 BUG_ON(p
< xdr
->p
|| p
> xdr
->end
);
470 len
= (char *)p
- (char *)xdr
->p
;
476 EXPORT_SYMBOL_GPL(xdr_init_encode
);
479 * xdr_reserve_space - Reserve buffer space for sending
480 * @xdr: pointer to xdr_stream
481 * @nbytes: number of bytes to reserve
483 * Checks that we have enough buffer space to encode 'nbytes' more
484 * bytes of data. If so, update the total xdr_buf length, and
485 * adjust the length of the current kvec.
487 __be32
* xdr_reserve_space(struct xdr_stream
*xdr
, size_t nbytes
)
492 /* align nbytes on the next 32-bit boundary */
495 q
= p
+ (nbytes
>> 2);
496 if (unlikely(q
> xdr
->end
|| q
< p
))
499 xdr
->iov
->iov_len
+= nbytes
;
500 xdr
->buf
->len
+= nbytes
;
503 EXPORT_SYMBOL_GPL(xdr_reserve_space
);
506 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
507 * @xdr: pointer to xdr_stream
508 * @pages: list of pages
509 * @base: offset of first byte
510 * @len: length of data in bytes
513 void xdr_write_pages(struct xdr_stream
*xdr
, struct page
**pages
, unsigned int base
,
516 struct xdr_buf
*buf
= xdr
->buf
;
517 struct kvec
*iov
= buf
->tail
;
519 buf
->page_base
= base
;
522 iov
->iov_base
= (char *)xdr
->p
;
527 unsigned int pad
= 4 - (len
& 3);
529 BUG_ON(xdr
->p
>= xdr
->end
);
530 iov
->iov_base
= (char *)xdr
->p
+ (len
& 3);
538 EXPORT_SYMBOL_GPL(xdr_write_pages
);
540 static void xdr_set_iov(struct xdr_stream
*xdr
, struct kvec
*iov
,
543 if (len
> iov
->iov_len
)
545 xdr
->p
= (__be32
*)iov
->iov_base
;
546 xdr
->end
= (__be32
*)(iov
->iov_base
+ len
);
548 xdr
->page_ptr
= NULL
;
551 static int xdr_set_page_base(struct xdr_stream
*xdr
,
552 unsigned int base
, unsigned int len
)
560 maxlen
= xdr
->buf
->page_len
;
567 base
+= xdr
->buf
->page_base
;
569 pgnr
= base
>> PAGE_SHIFT
;
570 xdr
->page_ptr
= &xdr
->buf
->pages
[pgnr
];
571 kaddr
= page_address(*xdr
->page_ptr
);
573 pgoff
= base
& ~PAGE_MASK
;
574 xdr
->p
= (__be32
*)(kaddr
+ pgoff
);
577 if (pgend
> PAGE_SIZE
)
579 xdr
->end
= (__be32
*)(kaddr
+ pgend
);
584 static void xdr_set_next_page(struct xdr_stream
*xdr
)
586 unsigned int newbase
;
588 newbase
= (1 + xdr
->page_ptr
- xdr
->buf
->pages
) << PAGE_SHIFT
;
589 newbase
-= xdr
->buf
->page_base
;
591 if (xdr_set_page_base(xdr
, newbase
, PAGE_SIZE
) < 0)
592 xdr_set_iov(xdr
, xdr
->buf
->tail
, xdr
->buf
->len
);
595 static bool xdr_set_next_buffer(struct xdr_stream
*xdr
)
597 if (xdr
->page_ptr
!= NULL
)
598 xdr_set_next_page(xdr
);
599 else if (xdr
->iov
== xdr
->buf
->head
) {
600 if (xdr_set_page_base(xdr
, 0, PAGE_SIZE
) < 0)
601 xdr_set_iov(xdr
, xdr
->buf
->tail
, xdr
->buf
->len
);
603 return xdr
->p
!= xdr
->end
;
607 * xdr_init_decode - Initialize an xdr_stream for decoding data.
608 * @xdr: pointer to xdr_stream struct
609 * @buf: pointer to XDR buffer from which to decode data
610 * @p: current pointer inside XDR buffer
612 void xdr_init_decode(struct xdr_stream
*xdr
, struct xdr_buf
*buf
, __be32
*p
)
615 xdr
->scratch
.iov_base
= NULL
;
616 xdr
->scratch
.iov_len
= 0;
617 xdr
->nwords
= XDR_QUADLEN(buf
->len
);
618 if (buf
->head
[0].iov_len
!= 0)
619 xdr_set_iov(xdr
, buf
->head
, buf
->len
);
620 else if (buf
->page_len
!= 0)
621 xdr_set_page_base(xdr
, 0, buf
->len
);
622 if (p
!= NULL
&& p
> xdr
->p
&& xdr
->end
>= p
) {
623 xdr
->nwords
-= p
- xdr
->p
;
627 EXPORT_SYMBOL_GPL(xdr_init_decode
);
630 * xdr_init_decode - Initialize an xdr_stream for decoding data.
631 * @xdr: pointer to xdr_stream struct
632 * @buf: pointer to XDR buffer from which to decode data
633 * @pages: list of pages to decode into
634 * @len: length in bytes of buffer in pages
636 void xdr_init_decode_pages(struct xdr_stream
*xdr
, struct xdr_buf
*buf
,
637 struct page
**pages
, unsigned int len
)
639 memset(buf
, 0, sizeof(*buf
));
644 xdr_init_decode(xdr
, buf
, NULL
);
646 EXPORT_SYMBOL_GPL(xdr_init_decode_pages
);
648 static __be32
* __xdr_inline_decode(struct xdr_stream
*xdr
, size_t nbytes
)
650 unsigned int nwords
= XDR_QUADLEN(nbytes
);
652 __be32
*q
= p
+ nwords
;
654 if (unlikely(nwords
> xdr
->nwords
|| q
> xdr
->end
|| q
< p
))
657 xdr
->nwords
-= nwords
;
662 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
663 * @xdr: pointer to xdr_stream struct
664 * @buf: pointer to an empty buffer
665 * @buflen: size of 'buf'
667 * The scratch buffer is used when decoding from an array of pages.
668 * If an xdr_inline_decode() call spans across page boundaries, then
669 * we copy the data into the scratch buffer in order to allow linear
672 void xdr_set_scratch_buffer(struct xdr_stream
*xdr
, void *buf
, size_t buflen
)
674 xdr
->scratch
.iov_base
= buf
;
675 xdr
->scratch
.iov_len
= buflen
;
677 EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer
);
679 static __be32
*xdr_copy_to_scratch(struct xdr_stream
*xdr
, size_t nbytes
)
682 void *cpdest
= xdr
->scratch
.iov_base
;
683 size_t cplen
= (char *)xdr
->end
- (char *)xdr
->p
;
685 if (nbytes
> xdr
->scratch
.iov_len
)
687 memcpy(cpdest
, xdr
->p
, cplen
);
690 if (!xdr_set_next_buffer(xdr
))
692 p
= __xdr_inline_decode(xdr
, nbytes
);
695 memcpy(cpdest
, p
, nbytes
);
696 return xdr
->scratch
.iov_base
;
700 * xdr_inline_decode - Retrieve XDR data to decode
701 * @xdr: pointer to xdr_stream struct
702 * @nbytes: number of bytes of data to decode
704 * Check if the input buffer is long enough to enable us to decode
705 * 'nbytes' more bytes of data starting at the current position.
706 * If so return the current pointer, then update the current
709 __be32
* xdr_inline_decode(struct xdr_stream
*xdr
, size_t nbytes
)
715 if (xdr
->p
== xdr
->end
&& !xdr_set_next_buffer(xdr
))
717 p
= __xdr_inline_decode(xdr
, nbytes
);
720 return xdr_copy_to_scratch(xdr
, nbytes
);
722 EXPORT_SYMBOL_GPL(xdr_inline_decode
);
724 static unsigned int xdr_align_pages(struct xdr_stream
*xdr
, unsigned int len
)
726 struct xdr_buf
*buf
= xdr
->buf
;
728 unsigned int nwords
= XDR_QUADLEN(len
);
729 unsigned int cur
= xdr_stream_pos(xdr
);
731 if (xdr
->nwords
== 0)
733 if (nwords
> xdr
->nwords
) {
734 nwords
= xdr
->nwords
;
737 /* Realign pages to current pointer position */
739 if (iov
->iov_len
> cur
)
740 xdr_shrink_bufhead(buf
, iov
->iov_len
- cur
);
742 /* Truncate page data and move it into the tail */
743 if (buf
->page_len
> len
)
744 xdr_shrink_pagelen(buf
, buf
->page_len
- len
);
745 xdr
->nwords
= XDR_QUADLEN(buf
->len
- cur
);
750 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
751 * @xdr: pointer to xdr_stream struct
752 * @len: number of bytes of page data
754 * Moves data beyond the current pointer position from the XDR head[] buffer
755 * into the page list. Any data that lies beyond current position + "len"
756 * bytes is moved into the XDR tail[].
758 * Returns the number of XDR encoded bytes now contained in the pages
760 unsigned int xdr_read_pages(struct xdr_stream
*xdr
, unsigned int len
)
762 struct xdr_buf
*buf
= xdr
->buf
;
766 unsigned int padding
;
768 len
= xdr_align_pages(xdr
, len
);
771 nwords
= XDR_QUADLEN(len
);
772 padding
= (nwords
<< 2) - len
;
773 xdr
->iov
= iov
= buf
->tail
;
774 /* Compute remaining message length. */
775 end
= ((xdr
->nwords
- nwords
) << 2) + padding
;
776 if (end
> iov
->iov_len
)
780 * Position current pointer at beginning of tail, and
781 * set remaining message length.
783 xdr
->p
= (__be32
*)((char *)iov
->iov_base
+ padding
);
784 xdr
->end
= (__be32
*)((char *)iov
->iov_base
+ end
);
785 xdr
->page_ptr
= NULL
;
786 xdr
->nwords
= XDR_QUADLEN(end
- padding
);
789 EXPORT_SYMBOL_GPL(xdr_read_pages
);
792 * xdr_enter_page - decode data from the XDR page
793 * @xdr: pointer to xdr_stream struct
794 * @len: number of bytes of page data
796 * Moves data beyond the current pointer position from the XDR head[] buffer
797 * into the page list. Any data that lies beyond current position + "len"
798 * bytes is moved into the XDR tail[]. The current pointer is then
799 * repositioned at the beginning of the first XDR page.
801 void xdr_enter_page(struct xdr_stream
*xdr
, unsigned int len
)
803 len
= xdr_align_pages(xdr
, len
);
805 * Position current pointer at beginning of tail, and
806 * set remaining message length.
809 xdr_set_page_base(xdr
, 0, len
);
811 EXPORT_SYMBOL_GPL(xdr_enter_page
);
813 static struct kvec empty_iov
= {.iov_base
= NULL
, .iov_len
= 0};
816 xdr_buf_from_iov(struct kvec
*iov
, struct xdr_buf
*buf
)
819 buf
->tail
[0] = empty_iov
;
821 buf
->buflen
= buf
->len
= iov
->iov_len
;
823 EXPORT_SYMBOL_GPL(xdr_buf_from_iov
);
825 /* Sets subbuf to the portion of buf of length len beginning base bytes
826 * from the start of buf. Returns -1 if base of length are out of bounds. */
828 xdr_buf_subsegment(struct xdr_buf
*buf
, struct xdr_buf
*subbuf
,
829 unsigned int base
, unsigned int len
)
831 subbuf
->buflen
= subbuf
->len
= len
;
832 if (base
< buf
->head
[0].iov_len
) {
833 subbuf
->head
[0].iov_base
= buf
->head
[0].iov_base
+ base
;
834 subbuf
->head
[0].iov_len
= min_t(unsigned int, len
,
835 buf
->head
[0].iov_len
- base
);
836 len
-= subbuf
->head
[0].iov_len
;
839 subbuf
->head
[0].iov_base
= NULL
;
840 subbuf
->head
[0].iov_len
= 0;
841 base
-= buf
->head
[0].iov_len
;
844 if (base
< buf
->page_len
) {
845 subbuf
->page_len
= min(buf
->page_len
- base
, len
);
846 base
+= buf
->page_base
;
847 subbuf
->page_base
= base
& ~PAGE_CACHE_MASK
;
848 subbuf
->pages
= &buf
->pages
[base
>> PAGE_CACHE_SHIFT
];
849 len
-= subbuf
->page_len
;
852 base
-= buf
->page_len
;
853 subbuf
->page_len
= 0;
856 if (base
< buf
->tail
[0].iov_len
) {
857 subbuf
->tail
[0].iov_base
= buf
->tail
[0].iov_base
+ base
;
858 subbuf
->tail
[0].iov_len
= min_t(unsigned int, len
,
859 buf
->tail
[0].iov_len
- base
);
860 len
-= subbuf
->tail
[0].iov_len
;
863 subbuf
->tail
[0].iov_base
= NULL
;
864 subbuf
->tail
[0].iov_len
= 0;
865 base
-= buf
->tail
[0].iov_len
;
872 EXPORT_SYMBOL_GPL(xdr_buf_subsegment
);
874 static void __read_bytes_from_xdr_buf(struct xdr_buf
*subbuf
, void *obj
, unsigned int len
)
876 unsigned int this_len
;
878 this_len
= min_t(unsigned int, len
, subbuf
->head
[0].iov_len
);
879 memcpy(obj
, subbuf
->head
[0].iov_base
, this_len
);
882 this_len
= min_t(unsigned int, len
, subbuf
->page_len
);
884 _copy_from_pages(obj
, subbuf
->pages
, subbuf
->page_base
, this_len
);
887 this_len
= min_t(unsigned int, len
, subbuf
->tail
[0].iov_len
);
888 memcpy(obj
, subbuf
->tail
[0].iov_base
, this_len
);
891 /* obj is assumed to point to allocated memory of size at least len: */
892 int read_bytes_from_xdr_buf(struct xdr_buf
*buf
, unsigned int base
, void *obj
, unsigned int len
)
894 struct xdr_buf subbuf
;
897 status
= xdr_buf_subsegment(buf
, &subbuf
, base
, len
);
900 __read_bytes_from_xdr_buf(&subbuf
, obj
, len
);
903 EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf
);
905 static void __write_bytes_to_xdr_buf(struct xdr_buf
*subbuf
, void *obj
, unsigned int len
)
907 unsigned int this_len
;
909 this_len
= min_t(unsigned int, len
, subbuf
->head
[0].iov_len
);
910 memcpy(subbuf
->head
[0].iov_base
, obj
, this_len
);
913 this_len
= min_t(unsigned int, len
, subbuf
->page_len
);
915 _copy_to_pages(subbuf
->pages
, subbuf
->page_base
, obj
, this_len
);
918 this_len
= min_t(unsigned int, len
, subbuf
->tail
[0].iov_len
);
919 memcpy(subbuf
->tail
[0].iov_base
, obj
, this_len
);
922 /* obj is assumed to point to allocated memory of size at least len: */
923 int write_bytes_to_xdr_buf(struct xdr_buf
*buf
, unsigned int base
, void *obj
, unsigned int len
)
925 struct xdr_buf subbuf
;
928 status
= xdr_buf_subsegment(buf
, &subbuf
, base
, len
);
931 __write_bytes_to_xdr_buf(&subbuf
, obj
, len
);
934 EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf
);
937 xdr_decode_word(struct xdr_buf
*buf
, unsigned int base
, u32
*obj
)
942 status
= read_bytes_from_xdr_buf(buf
, base
, &raw
, sizeof(*obj
));
945 *obj
= be32_to_cpu(raw
);
948 EXPORT_SYMBOL_GPL(xdr_decode_word
);
951 xdr_encode_word(struct xdr_buf
*buf
, unsigned int base
, u32 obj
)
953 __be32 raw
= cpu_to_be32(obj
);
955 return write_bytes_to_xdr_buf(buf
, base
, &raw
, sizeof(obj
));
957 EXPORT_SYMBOL_GPL(xdr_encode_word
);
959 /* If the netobj starting offset bytes from the start of xdr_buf is contained
960 * entirely in the head or the tail, set object to point to it; otherwise
961 * try to find space for it at the end of the tail, copy it there, and
962 * set obj to point to it. */
963 int xdr_buf_read_netobj(struct xdr_buf
*buf
, struct xdr_netobj
*obj
, unsigned int offset
)
965 struct xdr_buf subbuf
;
967 if (xdr_decode_word(buf
, offset
, &obj
->len
))
969 if (xdr_buf_subsegment(buf
, &subbuf
, offset
+ 4, obj
->len
))
972 /* Is the obj contained entirely in the head? */
973 obj
->data
= subbuf
.head
[0].iov_base
;
974 if (subbuf
.head
[0].iov_len
== obj
->len
)
976 /* ..or is the obj contained entirely in the tail? */
977 obj
->data
= subbuf
.tail
[0].iov_base
;
978 if (subbuf
.tail
[0].iov_len
== obj
->len
)
981 /* use end of tail as storage for obj:
982 * (We don't copy to the beginning because then we'd have
983 * to worry about doing a potentially overlapping copy.
984 * This assumes the object is at most half the length of the
986 if (obj
->len
> buf
->buflen
- buf
->len
)
988 if (buf
->tail
[0].iov_len
!= 0)
989 obj
->data
= buf
->tail
[0].iov_base
+ buf
->tail
[0].iov_len
;
991 obj
->data
= buf
->head
[0].iov_base
+ buf
->head
[0].iov_len
;
992 __read_bytes_from_xdr_buf(&subbuf
, obj
->data
, obj
->len
);
995 EXPORT_SYMBOL_GPL(xdr_buf_read_netobj
);
997 /* Returns 0 on success, or else a negative error code. */
999 xdr_xcode_array2(struct xdr_buf
*buf
, unsigned int base
,
1000 struct xdr_array2_desc
*desc
, int encode
)
1002 char *elem
= NULL
, *c
;
1003 unsigned int copied
= 0, todo
, avail_here
;
1004 struct page
**ppages
= NULL
;
1008 if (xdr_encode_word(buf
, base
, desc
->array_len
) != 0)
1011 if (xdr_decode_word(buf
, base
, &desc
->array_len
) != 0 ||
1012 desc
->array_len
> desc
->array_maxlen
||
1013 (unsigned long) base
+ 4 + desc
->array_len
*
1014 desc
->elem_size
> buf
->len
)
1022 todo
= desc
->array_len
* desc
->elem_size
;
1025 if (todo
&& base
< buf
->head
->iov_len
) {
1026 c
= buf
->head
->iov_base
+ base
;
1027 avail_here
= min_t(unsigned int, todo
,
1028 buf
->head
->iov_len
- base
);
1031 while (avail_here
>= desc
->elem_size
) {
1032 err
= desc
->xcode(desc
, c
);
1035 c
+= desc
->elem_size
;
1036 avail_here
-= desc
->elem_size
;
1040 elem
= kmalloc(desc
->elem_size
, GFP_KERNEL
);
1046 err
= desc
->xcode(desc
, elem
);
1049 memcpy(c
, elem
, avail_here
);
1051 memcpy(elem
, c
, avail_here
);
1052 copied
= avail_here
;
1054 base
= buf
->head
->iov_len
; /* align to start of pages */
1057 /* process pages array */
1058 base
-= buf
->head
->iov_len
;
1059 if (todo
&& base
< buf
->page_len
) {
1060 unsigned int avail_page
;
1062 avail_here
= min(todo
, buf
->page_len
- base
);
1065 base
+= buf
->page_base
;
1066 ppages
= buf
->pages
+ (base
>> PAGE_CACHE_SHIFT
);
1067 base
&= ~PAGE_CACHE_MASK
;
1068 avail_page
= min_t(unsigned int, PAGE_CACHE_SIZE
- base
,
1070 c
= kmap(*ppages
) + base
;
1072 while (avail_here
) {
1073 avail_here
-= avail_page
;
1074 if (copied
|| avail_page
< desc
->elem_size
) {
1075 unsigned int l
= min(avail_page
,
1076 desc
->elem_size
- copied
);
1078 elem
= kmalloc(desc
->elem_size
,
1086 err
= desc
->xcode(desc
, elem
);
1090 memcpy(c
, elem
+ copied
, l
);
1092 if (copied
== desc
->elem_size
)
1095 memcpy(elem
+ copied
, c
, l
);
1097 if (copied
== desc
->elem_size
) {
1098 err
= desc
->xcode(desc
, elem
);
1107 while (avail_page
>= desc
->elem_size
) {
1108 err
= desc
->xcode(desc
, c
);
1111 c
+= desc
->elem_size
;
1112 avail_page
-= desc
->elem_size
;
1115 unsigned int l
= min(avail_page
,
1116 desc
->elem_size
- copied
);
1118 elem
= kmalloc(desc
->elem_size
,
1126 err
= desc
->xcode(desc
, elem
);
1130 memcpy(c
, elem
+ copied
, l
);
1132 if (copied
== desc
->elem_size
)
1135 memcpy(elem
+ copied
, c
, l
);
1137 if (copied
== desc
->elem_size
) {
1138 err
= desc
->xcode(desc
, elem
);
1151 avail_page
= min(avail_here
,
1152 (unsigned int) PAGE_CACHE_SIZE
);
1154 base
= buf
->page_len
; /* align to start of tail */
1158 base
-= buf
->page_len
;
1160 c
= buf
->tail
->iov_base
+ base
;
1162 unsigned int l
= desc
->elem_size
- copied
;
1165 memcpy(c
, elem
+ copied
, l
);
1167 memcpy(elem
+ copied
, c
, l
);
1168 err
= desc
->xcode(desc
, elem
);
1176 err
= desc
->xcode(desc
, c
);
1179 c
+= desc
->elem_size
;
1180 todo
-= desc
->elem_size
;
1193 xdr_decode_array2(struct xdr_buf
*buf
, unsigned int base
,
1194 struct xdr_array2_desc
*desc
)
1196 if (base
>= buf
->len
)
1199 return xdr_xcode_array2(buf
, base
, desc
, 0);
1201 EXPORT_SYMBOL_GPL(xdr_decode_array2
);
1204 xdr_encode_array2(struct xdr_buf
*buf
, unsigned int base
,
1205 struct xdr_array2_desc
*desc
)
1207 if ((unsigned long) base
+ 4 + desc
->array_len
* desc
->elem_size
>
1208 buf
->head
->iov_len
+ buf
->page_len
+ buf
->tail
->iov_len
)
1211 return xdr_xcode_array2(buf
, base
, desc
, 1);
1213 EXPORT_SYMBOL_GPL(xdr_encode_array2
);
1216 xdr_process_buf(struct xdr_buf
*buf
, unsigned int offset
, unsigned int len
,
1217 int (*actor
)(struct scatterlist
*, void *), void *data
)
1220 unsigned int page_len
, thislen
, page_offset
;
1221 struct scatterlist sg
[1];
1223 sg_init_table(sg
, 1);
1225 if (offset
>= buf
->head
[0].iov_len
) {
1226 offset
-= buf
->head
[0].iov_len
;
1228 thislen
= buf
->head
[0].iov_len
- offset
;
1231 sg_set_buf(sg
, buf
->head
[0].iov_base
+ offset
, thislen
);
1232 ret
= actor(sg
, data
);
1241 if (offset
>= buf
->page_len
) {
1242 offset
-= buf
->page_len
;
1244 page_len
= buf
->page_len
- offset
;
1248 page_offset
= (offset
+ buf
->page_base
) & (PAGE_CACHE_SIZE
- 1);
1249 i
= (offset
+ buf
->page_base
) >> PAGE_CACHE_SHIFT
;
1250 thislen
= PAGE_CACHE_SIZE
- page_offset
;
1252 if (thislen
> page_len
)
1254 sg_set_page(sg
, buf
->pages
[i
], thislen
, page_offset
);
1255 ret
= actor(sg
, data
);
1258 page_len
-= thislen
;
1261 thislen
= PAGE_CACHE_SIZE
;
1262 } while (page_len
!= 0);
1267 if (offset
< buf
->tail
[0].iov_len
) {
1268 thislen
= buf
->tail
[0].iov_len
- offset
;
1271 sg_set_buf(sg
, buf
->tail
[0].iov_base
+ offset
, thislen
);
1272 ret
= actor(sg
, data
);
1280 EXPORT_SYMBOL_GPL(xdr_process_buf
);