2 * iovec manipulation routines.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
11 * Andrew Lunn : Errors in iovec copying.
12 * Pedro Roque : Added memcpy_fromiovecend and
13 * csum_..._fromiovecend.
14 * Andi Kleen : fixed error handling for 2.1
15 * Alexey Kuznetsov: 2.1 optimisations
16 * Andi Kleen : Fix csum*fromiovecend for IPv6.
19 #include <linux/errno.h>
20 #include <linux/module.h>
21 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/net.h>
25 #include <linux/in6.h>
26 #include <asm/uaccess.h>
27 #include <asm/byteorder.h>
28 #include <net/checksum.h>
32 * Verify iovec. The caller must ensure that the iovec is big enough
33 * to hold the message iovec.
35 * Save time not doing access_ok. copy_*_user will make this work
39 int verify_iovec(struct msghdr
*m
, struct iovec
*iov
, struct sockaddr
*address
, int mode
)
44 if (mode
== VERIFY_READ
) {
45 err
= move_addr_to_kernel(m
->msg_name
, m
->msg_namelen
,
50 m
->msg_name
= address
;
55 size
= m
->msg_iovlen
* sizeof(struct iovec
);
56 if (copy_from_user(iov
, m
->msg_iov
, size
))
62 for (ct
= 0; ct
< m
->msg_iovlen
; ct
++) {
63 err
+= iov
[ct
].iov_len
;
65 * Goal is not to verify user data, but to prevent returning
66 * negative value, which is interpreted as errno.
67 * Overflow is still possible, but it is harmless.
77 * Copy kernel to iovec. Returns -EFAULT on error.
79 * Note: this modifies the original iovec.
82 int memcpy_toiovec(struct iovec
*iov
, unsigned char *kdata
, int len
)
86 int copy
= min_t(unsigned int, iov
->iov_len
, len
);
87 if (copy_to_user(iov
->iov_base
, kdata
, copy
))
92 iov
->iov_base
+= copy
;
101 * Copy iovec to kernel. Returns -EFAULT on error.
103 * Note: this modifies the original iovec.
106 int memcpy_fromiovec(unsigned char *kdata
, struct iovec
*iov
, int len
)
110 int copy
= min_t(unsigned int, len
, iov
->iov_len
);
111 if (copy_from_user(kdata
, iov
->iov_base
, copy
))
115 iov
->iov_base
+= copy
;
116 iov
->iov_len
-= copy
;
125 * For use with ip_build_xmit
127 int memcpy_fromiovecend(unsigned char *kdata
, struct iovec
*iov
, int offset
,
130 /* Skip over the finished iovecs */
131 while (offset
>= iov
->iov_len
) {
132 offset
-= iov
->iov_len
;
137 u8 __user
*base
= iov
->iov_base
+ offset
;
138 int copy
= min_t(unsigned int, len
, iov
->iov_len
- offset
);
141 if (copy_from_user(kdata
, base
, copy
))
152 * And now for the all-in-one: copy and checksum from a user iovec
153 * directly to a datagram
154 * Calls to csum_partial but the last must be in 32 bit chunks
156 * ip_build_xmit must ensure that when fragmenting only the last
157 * call to this function will be unaligned also.
159 int csum_partial_copy_fromiovecend(unsigned char *kdata
, struct iovec
*iov
,
160 int offset
, unsigned int len
, __wsum
*csump
)
162 __wsum csum
= *csump
;
163 int partial_cnt
= 0, err
= 0;
165 /* Skip over the finished iovecs */
166 while (offset
>= iov
->iov_len
) {
167 offset
-= iov
->iov_len
;
172 u8 __user
*base
= iov
->iov_base
+ offset
;
173 int copy
= min_t(unsigned int, len
, iov
->iov_len
- offset
);
177 /* There is a remnant from previous iov. */
179 int par_len
= 4 - partial_cnt
;
181 /* iov component is too short ... */
182 if (par_len
> copy
) {
183 if (copy_from_user(kdata
, base
, copy
))
192 *csump
= csum_partial(kdata
- partial_cnt
,
196 if (copy_from_user(kdata
, base
, par_len
))
198 csum
= csum_partial(kdata
- partial_cnt
, 4, csum
);
207 partial_cnt
= copy
% 4;
210 if (copy_from_user(kdata
+ copy
, base
+ copy
,
217 csum
= csum_and_copy_from_user(base
, kdata
, copy
,
222 len
-= copy
+ partial_cnt
;
223 kdata
+= copy
+ partial_cnt
;
235 EXPORT_SYMBOL(csum_partial_copy_fromiovecend
);
236 EXPORT_SYMBOL(memcpy_fromiovec
);
237 EXPORT_SYMBOL(memcpy_fromiovecend
);
238 EXPORT_SYMBOL(memcpy_toiovec
);