wl12xx: Check buffer bound when processing nvs data
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / core / iovec.c
blobc40f27e7d2089f713f5e2dac80d0940a7095156a
1 /*
2 * iovec manipulation routines.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Fixes:
11 * Andrew Lunn : Errors in iovec copying.
12 * Pedro Roque : Added memcpy_fromiovecend and
13 * csum_..._fromiovecend.
14 * Andi Kleen : fixed error handling for 2.1
15 * Alexey Kuznetsov: 2.1 optimisations
16 * Andi Kleen : Fix csum*fromiovecend for IPv6.
19 #include <linux/errno.h>
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/net.h>
24 #include <linux/in6.h>
25 #include <asm/uaccess.h>
26 #include <asm/byteorder.h>
27 #include <net/checksum.h>
28 #include <net/sock.h>
31 * Verify iovec. The caller must ensure that the iovec is big enough
32 * to hold the message iovec.
34 * Save time not doing access_ok. copy_*_user will make this work
35 * in any case.
38 int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
40 int size, ct, err;
42 if (m->msg_namelen) {
43 if (mode == VERIFY_READ) {
44 void __user *namep;
45 namep = (void __user __force *) m->msg_name;
46 err = move_addr_to_kernel(namep, m->msg_namelen,
47 address);
48 if (err < 0)
49 return err;
51 m->msg_name = address;
52 } else {
53 m->msg_name = NULL;
56 size = m->msg_iovlen * sizeof(struct iovec);
57 if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
58 return -EFAULT;
60 m->msg_iov = iov;
61 err = 0;
63 for (ct = 0; ct < m->msg_iovlen; ct++) {
64 size_t len = iov[ct].iov_len;
66 if (len > INT_MAX - err) {
67 len = INT_MAX - err;
68 iov[ct].iov_len = len;
70 err += len;
73 return err;
77 * Copy kernel to iovec. Returns -EFAULT on error.
79 * Note: this modifies the original iovec.
82 int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
84 while (len > 0) {
85 if (iov->iov_len) {
86 int copy = min_t(unsigned int, iov->iov_len, len);
87 if (copy_to_user(iov->iov_base, kdata, copy))
88 return -EFAULT;
89 kdata += copy;
90 len -= copy;
91 iov->iov_len -= copy;
92 iov->iov_base += copy;
94 iov++;
97 return 0;
99 EXPORT_SYMBOL(memcpy_toiovec);
102 * Copy kernel to iovec. Returns -EFAULT on error.
105 int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
106 int offset, int len)
108 int copy;
109 for (; len > 0; ++iov) {
110 /* Skip over the finished iovecs */
111 if (unlikely(offset >= iov->iov_len)) {
112 offset -= iov->iov_len;
113 continue;
115 copy = min_t(unsigned int, iov->iov_len - offset, len);
116 if (copy_to_user(iov->iov_base + offset, kdata, copy))
117 return -EFAULT;
118 offset = 0;
119 kdata += copy;
120 len -= copy;
123 return 0;
125 EXPORT_SYMBOL(memcpy_toiovecend);
128 * Copy iovec to kernel. Returns -EFAULT on error.
130 * Note: this modifies the original iovec.
133 int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
135 while (len > 0) {
136 if (iov->iov_len) {
137 int copy = min_t(unsigned int, len, iov->iov_len);
138 if (copy_from_user(kdata, iov->iov_base, copy))
139 return -EFAULT;
140 len -= copy;
141 kdata += copy;
142 iov->iov_base += copy;
143 iov->iov_len -= copy;
145 iov++;
148 return 0;
150 EXPORT_SYMBOL(memcpy_fromiovec);
153 * Copy iovec from kernel. Returns -EFAULT on error.
156 int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
157 int offset, int len)
159 /* Skip over the finished iovecs */
160 while (offset >= iov->iov_len) {
161 offset -= iov->iov_len;
162 iov++;
165 while (len > 0) {
166 u8 __user *base = iov->iov_base + offset;
167 int copy = min_t(unsigned int, len, iov->iov_len - offset);
169 offset = 0;
170 if (copy_from_user(kdata, base, copy))
171 return -EFAULT;
172 len -= copy;
173 kdata += copy;
174 iov++;
177 return 0;
179 EXPORT_SYMBOL(memcpy_fromiovecend);
182 * And now for the all-in-one: copy and checksum from a user iovec
183 * directly to a datagram
184 * Calls to csum_partial but the last must be in 32 bit chunks
186 * ip_build_xmit must ensure that when fragmenting only the last
187 * call to this function will be unaligned also.
189 int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
190 int offset, unsigned int len, __wsum *csump)
192 __wsum csum = *csump;
193 int partial_cnt = 0, err = 0;
195 /* Skip over the finished iovecs */
196 while (offset >= iov->iov_len) {
197 offset -= iov->iov_len;
198 iov++;
201 while (len > 0) {
202 u8 __user *base = iov->iov_base + offset;
203 int copy = min_t(unsigned int, len, iov->iov_len - offset);
205 offset = 0;
207 /* There is a remnant from previous iov. */
208 if (partial_cnt) {
209 int par_len = 4 - partial_cnt;
211 /* iov component is too short ... */
212 if (par_len > copy) {
213 if (copy_from_user(kdata, base, copy))
214 goto out_fault;
215 kdata += copy;
216 base += copy;
217 partial_cnt += copy;
218 len -= copy;
219 iov++;
220 if (len)
221 continue;
222 *csump = csum_partial(kdata - partial_cnt,
223 partial_cnt, csum);
224 goto out;
226 if (copy_from_user(kdata, base, par_len))
227 goto out_fault;
228 csum = csum_partial(kdata - partial_cnt, 4, csum);
229 kdata += par_len;
230 base += par_len;
231 copy -= par_len;
232 len -= par_len;
233 partial_cnt = 0;
236 if (len > copy) {
237 partial_cnt = copy % 4;
238 if (partial_cnt) {
239 copy -= partial_cnt;
240 if (copy_from_user(kdata + copy, base + copy,
241 partial_cnt))
242 goto out_fault;
246 if (copy) {
247 csum = csum_and_copy_from_user(base, kdata, copy,
248 csum, &err);
249 if (err)
250 goto out;
252 len -= copy + partial_cnt;
253 kdata += copy + partial_cnt;
254 iov++;
256 *csump = csum;
257 out:
258 return err;
260 out_fault:
261 err = -EFAULT;
262 goto out;
264 EXPORT_SYMBOL(csum_partial_copy_fromiovecend);