2 * iovec manipulation routines.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
11 * Andrew Lunn : Errors in iovec copying.
12 * Pedro Roque : Added memcpy_fromiovecend and
13 * csum_..._fromiovecend.
14 * Andi Kleen : fixed error handling for 2.1
15 * Alexey Kuznetsov: 2.1 optimisations
16 * Andi Kleen : Fix csum*fromiovecend for IPv6.
19 #include <linux/errno.h>
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <asm/uaccess.h>
28 #include <asm/byteorder.h>
29 #include <net/checksum.h>
33 * Verify iovec. The caller must ensure that the iovec is big enough
34 * to hold the message iovec.
36 * Save time not doing access_ok. copy_*_user will make this work
40 int verify_iovec(struct msghdr
*m
, struct iovec
*iov
, char *address
, int mode
)
45 if (mode
== VERIFY_READ
) {
46 err
= move_addr_to_kernel(m
->msg_name
, m
->msg_namelen
,
51 m
->msg_name
= address
;
56 size
= m
->msg_iovlen
* sizeof(struct iovec
);
57 if (copy_from_user(iov
, m
->msg_iov
, size
))
63 for (ct
= 0; ct
< m
->msg_iovlen
; ct
++) {
64 err
+= iov
[ct
].iov_len
;
66 * Goal is not to verify user data, but to prevent returning
67 * negative value, which is interpreted as errno.
68 * Overflow is still possible, but it is harmless.
78 * Copy kernel to iovec. Returns -EFAULT on error.
80 * Note: this modifies the original iovec.
83 int memcpy_toiovec(struct iovec
*iov
, unsigned char *kdata
, int len
)
87 int copy
= min_t(unsigned int, iov
->iov_len
, len
);
88 if (copy_to_user(iov
->iov_base
, kdata
, copy
))
93 iov
->iov_base
+= copy
;
102 * Copy iovec to kernel. Returns -EFAULT on error.
104 * Note: this modifies the original iovec.
107 int memcpy_fromiovec(unsigned char *kdata
, struct iovec
*iov
, int len
)
111 int copy
= min_t(unsigned int, len
, iov
->iov_len
);
112 if (copy_from_user(kdata
, iov
->iov_base
, copy
))
116 iov
->iov_base
+= copy
;
117 iov
->iov_len
-= copy
;
126 * For use with ip_build_xmit
128 int memcpy_fromiovecend(unsigned char *kdata
, struct iovec
*iov
, int offset
,
131 /* Skip over the finished iovecs */
132 while (offset
>= iov
->iov_len
) {
133 offset
-= iov
->iov_len
;
138 u8 __user
*base
= iov
->iov_base
+ offset
;
139 int copy
= min_t(unsigned int, len
, iov
->iov_len
- offset
);
142 if (copy_from_user(kdata
, base
, copy
))
153 * And now for the all-in-one: copy and checksum from a user iovec
154 * directly to a datagram
155 * Calls to csum_partial but the last must be in 32 bit chunks
157 * ip_build_xmit must ensure that when fragmenting only the last
158 * call to this function will be unaligned also.
160 int csum_partial_copy_fromiovecend(unsigned char *kdata
, struct iovec
*iov
,
161 int offset
, unsigned int len
, int *csump
)
164 int partial_cnt
= 0, err
= 0;
166 /* Skip over the finished iovecs */
167 while (offset
>= iov
->iov_len
) {
168 offset
-= iov
->iov_len
;
173 u8 __user
*base
= iov
->iov_base
+ offset
;
174 int copy
= min_t(unsigned int, len
, iov
->iov_len
- offset
);
178 /* There is a remnant from previous iov. */
180 int par_len
= 4 - partial_cnt
;
182 /* iov component is too short ... */
183 if (par_len
> copy
) {
184 if (copy_from_user(kdata
, base
, copy
))
193 *csump
= csum_partial(kdata
- partial_cnt
,
197 if (copy_from_user(kdata
, base
, par_len
))
199 csum
= csum_partial(kdata
- partial_cnt
, 4, csum
);
208 partial_cnt
= copy
% 4;
211 if (copy_from_user(kdata
+ copy
, base
+ copy
,
218 csum
= csum_and_copy_from_user(base
, kdata
, copy
,
223 len
-= copy
+ partial_cnt
;
224 kdata
+= copy
+ partial_cnt
;
236 EXPORT_SYMBOL(csum_partial_copy_fromiovecend
);
237 EXPORT_SYMBOL(memcpy_fromiovec
);
238 EXPORT_SYMBOL(memcpy_fromiovecend
);
239 EXPORT_SYMBOL(memcpy_toiovec
);