ARC: module: support R_ARC_32_PCREL relocation
[linux-stable.git] / lib / iov_iter.c
blob7e3138cfc8c9c72858534b10ed58e4ef4c5e72f7
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <net/checksum.h>
8 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
9 size_t left; \
10 size_t wanted = n; \
11 __p = i->iov; \
12 __v.iov_len = min(n, __p->iov_len - skip); \
13 if (likely(__v.iov_len)) { \
14 __v.iov_base = __p->iov_base + skip; \
15 left = (STEP); \
16 __v.iov_len -= left; \
17 skip += __v.iov_len; \
18 n -= __v.iov_len; \
19 } else { \
20 left = 0; \
21 } \
22 while (unlikely(!left && n)) { \
23 __p++; \
24 __v.iov_len = min(n, __p->iov_len); \
25 if (unlikely(!__v.iov_len)) \
26 continue; \
27 __v.iov_base = __p->iov_base; \
28 left = (STEP); \
29 __v.iov_len -= left; \
30 skip = __v.iov_len; \
31 n -= __v.iov_len; \
32 } \
33 n = wanted - n; \
36 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
37 size_t wanted = n; \
38 __p = i->kvec; \
39 __v.iov_len = min(n, __p->iov_len - skip); \
40 if (likely(__v.iov_len)) { \
41 __v.iov_base = __p->iov_base + skip; \
42 (void)(STEP); \
43 skip += __v.iov_len; \
44 n -= __v.iov_len; \
45 } \
46 while (unlikely(n)) { \
47 __p++; \
48 __v.iov_len = min(n, __p->iov_len); \
49 if (unlikely(!__v.iov_len)) \
50 continue; \
51 __v.iov_base = __p->iov_base; \
52 (void)(STEP); \
53 skip = __v.iov_len; \
54 n -= __v.iov_len; \
55 } \
56 n = wanted; \
59 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
60 struct bvec_iter __start; \
61 __start.bi_size = n; \
62 __start.bi_bvec_done = skip; \
63 __start.bi_idx = 0; \
64 for_each_bvec(__v, i->bvec, __bi, __start) { \
65 if (!__v.bv_len) \
66 continue; \
67 (void)(STEP); \
68 } \
71 #define iterate_all_kinds(i, n, v, I, B, K) { \
72 size_t skip = i->iov_offset; \
73 if (unlikely(i->type & ITER_BVEC)) { \
74 struct bio_vec v; \
75 struct bvec_iter __bi; \
76 iterate_bvec(i, n, v, __bi, skip, (B)) \
77 } else if (unlikely(i->type & ITER_KVEC)) { \
78 const struct kvec *kvec; \
79 struct kvec v; \
80 iterate_kvec(i, n, v, kvec, skip, (K)) \
81 } else { \
82 const struct iovec *iov; \
83 struct iovec v; \
84 iterate_iovec(i, n, v, iov, skip, (I)) \
85 } \
88 #define iterate_and_advance(i, n, v, I, B, K) { \
89 if (unlikely(i->count < n)) \
90 n = i->count; \
91 if (i->count) { \
92 size_t skip = i->iov_offset; \
93 if (unlikely(i->type & ITER_BVEC)) { \
94 const struct bio_vec *bvec = i->bvec; \
95 struct bio_vec v; \
96 struct bvec_iter __bi; \
97 iterate_bvec(i, n, v, __bi, skip, (B)) \
98 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
99 i->nr_segs -= i->bvec - bvec; \
100 skip = __bi.bi_bvec_done; \
101 } else if (unlikely(i->type & ITER_KVEC)) { \
102 const struct kvec *kvec; \
103 struct kvec v; \
104 iterate_kvec(i, n, v, kvec, skip, (K)) \
105 if (skip == kvec->iov_len) { \
106 kvec++; \
107 skip = 0; \
109 i->nr_segs -= kvec - i->kvec; \
110 i->kvec = kvec; \
111 } else { \
112 const struct iovec *iov; \
113 struct iovec v; \
114 iterate_iovec(i, n, v, iov, skip, (I)) \
115 if (skip == iov->iov_len) { \
116 iov++; \
117 skip = 0; \
119 i->nr_segs -= iov - i->iov; \
120 i->iov = iov; \
122 i->count -= n; \
123 i->iov_offset = skip; \
127 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
128 struct iov_iter *i)
130 size_t skip, copy, left, wanted;
131 const struct iovec *iov;
132 char __user *buf;
133 void *kaddr, *from;
135 if (unlikely(bytes > i->count))
136 bytes = i->count;
138 if (unlikely(!bytes))
139 return 0;
141 wanted = bytes;
142 iov = i->iov;
143 skip = i->iov_offset;
144 buf = iov->iov_base + skip;
145 copy = min(bytes, iov->iov_len - skip);
147 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
148 kaddr = kmap_atomic(page);
149 from = kaddr + offset;
151 /* first chunk, usually the only one */
152 left = __copy_to_user_inatomic(buf, from, copy);
153 copy -= left;
154 skip += copy;
155 from += copy;
156 bytes -= copy;
158 while (unlikely(!left && bytes)) {
159 iov++;
160 buf = iov->iov_base;
161 copy = min(bytes, iov->iov_len);
162 left = __copy_to_user_inatomic(buf, from, copy);
163 copy -= left;
164 skip = copy;
165 from += copy;
166 bytes -= copy;
168 if (likely(!bytes)) {
169 kunmap_atomic(kaddr);
170 goto done;
172 offset = from - kaddr;
173 buf += copy;
174 kunmap_atomic(kaddr);
175 copy = min(bytes, iov->iov_len - skip);
177 /* Too bad - revert to non-atomic kmap */
179 kaddr = kmap(page);
180 from = kaddr + offset;
181 left = __copy_to_user(buf, from, copy);
182 copy -= left;
183 skip += copy;
184 from += copy;
185 bytes -= copy;
186 while (unlikely(!left && bytes)) {
187 iov++;
188 buf = iov->iov_base;
189 copy = min(bytes, iov->iov_len);
190 left = __copy_to_user(buf, from, copy);
191 copy -= left;
192 skip = copy;
193 from += copy;
194 bytes -= copy;
196 kunmap(page);
198 done:
199 if (skip == iov->iov_len) {
200 iov++;
201 skip = 0;
203 i->count -= wanted - bytes;
204 i->nr_segs -= iov - i->iov;
205 i->iov = iov;
206 i->iov_offset = skip;
207 return wanted - bytes;
210 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
211 struct iov_iter *i)
213 size_t skip, copy, left, wanted;
214 const struct iovec *iov;
215 char __user *buf;
216 void *kaddr, *to;
218 if (unlikely(bytes > i->count))
219 bytes = i->count;
221 if (unlikely(!bytes))
222 return 0;
224 wanted = bytes;
225 iov = i->iov;
226 skip = i->iov_offset;
227 buf = iov->iov_base + skip;
228 copy = min(bytes, iov->iov_len - skip);
230 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
231 kaddr = kmap_atomic(page);
232 to = kaddr + offset;
234 /* first chunk, usually the only one */
235 left = __copy_from_user_inatomic(to, buf, copy);
236 copy -= left;
237 skip += copy;
238 to += copy;
239 bytes -= copy;
241 while (unlikely(!left && bytes)) {
242 iov++;
243 buf = iov->iov_base;
244 copy = min(bytes, iov->iov_len);
245 left = __copy_from_user_inatomic(to, buf, copy);
246 copy -= left;
247 skip = copy;
248 to += copy;
249 bytes -= copy;
251 if (likely(!bytes)) {
252 kunmap_atomic(kaddr);
253 goto done;
255 offset = to - kaddr;
256 buf += copy;
257 kunmap_atomic(kaddr);
258 copy = min(bytes, iov->iov_len - skip);
260 /* Too bad - revert to non-atomic kmap */
262 kaddr = kmap(page);
263 to = kaddr + offset;
264 left = __copy_from_user(to, buf, copy);
265 copy -= left;
266 skip += copy;
267 to += copy;
268 bytes -= copy;
269 while (unlikely(!left && bytes)) {
270 iov++;
271 buf = iov->iov_base;
272 copy = min(bytes, iov->iov_len);
273 left = __copy_from_user(to, buf, copy);
274 copy -= left;
275 skip = copy;
276 to += copy;
277 bytes -= copy;
279 kunmap(page);
281 done:
282 if (skip == iov->iov_len) {
283 iov++;
284 skip = 0;
286 i->count -= wanted - bytes;
287 i->nr_segs -= iov - i->iov;
288 i->iov = iov;
289 i->iov_offset = skip;
290 return wanted - bytes;
294 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
295 * bytes. For each iovec, fault in each page that constitutes the iovec.
297 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
298 * because it is an invalid address).
300 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
302 size_t skip = i->iov_offset;
303 const struct iovec *iov;
304 int err;
305 struct iovec v;
307 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
308 iterate_iovec(i, bytes, v, iov, skip, ({
309 err = fault_in_multipages_readable(v.iov_base,
310 v.iov_len);
311 if (unlikely(err))
312 return err;
313 0;}))
315 return 0;
317 EXPORT_SYMBOL(iov_iter_fault_in_readable);
319 void iov_iter_init(struct iov_iter *i, int direction,
320 const struct iovec *iov, unsigned long nr_segs,
321 size_t count)
323 /* It will get better. Eventually... */
324 if (segment_eq(get_fs(), KERNEL_DS)) {
325 direction |= ITER_KVEC;
326 i->type = direction;
327 i->kvec = (struct kvec *)iov;
328 } else {
329 i->type = direction;
330 i->iov = iov;
332 i->nr_segs = nr_segs;
333 i->iov_offset = 0;
334 i->count = count;
336 EXPORT_SYMBOL(iov_iter_init);
338 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
340 char *from = kmap_atomic(page);
341 memcpy(to, from + offset, len);
342 kunmap_atomic(from);
345 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
347 char *to = kmap_atomic(page);
348 memcpy(to + offset, from, len);
349 kunmap_atomic(to);
352 static void memzero_page(struct page *page, size_t offset, size_t len)
354 char *addr = kmap_atomic(page);
355 memset(addr + offset, 0, len);
356 kunmap_atomic(addr);
359 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
361 const char *from = addr;
362 iterate_and_advance(i, bytes, v,
363 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
364 v.iov_len),
365 memcpy_to_page(v.bv_page, v.bv_offset,
366 (from += v.bv_len) - v.bv_len, v.bv_len),
367 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
370 return bytes;
372 EXPORT_SYMBOL(copy_to_iter);
374 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
376 char *to = addr;
377 iterate_and_advance(i, bytes, v,
378 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
379 v.iov_len),
380 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
381 v.bv_offset, v.bv_len),
382 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
385 return bytes;
387 EXPORT_SYMBOL(copy_from_iter);
389 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
391 char *to = addr;
392 iterate_and_advance(i, bytes, v,
393 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
394 v.iov_base, v.iov_len),
395 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
396 v.bv_offset, v.bv_len),
397 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
400 return bytes;
402 EXPORT_SYMBOL(copy_from_iter_nocache);
404 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
405 struct iov_iter *i)
407 if (i->type & (ITER_BVEC|ITER_KVEC)) {
408 void *kaddr = kmap_atomic(page);
409 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
410 kunmap_atomic(kaddr);
411 return wanted;
412 } else
413 return copy_page_to_iter_iovec(page, offset, bytes, i);
415 EXPORT_SYMBOL(copy_page_to_iter);
417 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
418 struct iov_iter *i)
420 if (i->type & (ITER_BVEC|ITER_KVEC)) {
421 void *kaddr = kmap_atomic(page);
422 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
423 kunmap_atomic(kaddr);
424 return wanted;
425 } else
426 return copy_page_from_iter_iovec(page, offset, bytes, i);
428 EXPORT_SYMBOL(copy_page_from_iter);
430 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
432 iterate_and_advance(i, bytes, v,
433 __clear_user(v.iov_base, v.iov_len),
434 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
435 memset(v.iov_base, 0, v.iov_len)
438 return bytes;
440 EXPORT_SYMBOL(iov_iter_zero);
442 size_t iov_iter_copy_from_user_atomic(struct page *page,
443 struct iov_iter *i, unsigned long offset, size_t bytes)
445 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
446 iterate_all_kinds(i, bytes, v,
447 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
448 v.iov_base, v.iov_len),
449 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
450 v.bv_offset, v.bv_len),
451 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
453 kunmap_atomic(kaddr);
454 return bytes;
456 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
458 void iov_iter_advance(struct iov_iter *i, size_t size)
460 iterate_and_advance(i, size, v, 0, 0, 0)
462 EXPORT_SYMBOL(iov_iter_advance);
465 * Return the count of just the current iov_iter segment.
467 size_t iov_iter_single_seg_count(const struct iov_iter *i)
469 if (i->nr_segs == 1)
470 return i->count;
471 else if (i->type & ITER_BVEC)
472 return min(i->count, i->bvec->bv_len - i->iov_offset);
473 else
474 return min(i->count, i->iov->iov_len - i->iov_offset);
476 EXPORT_SYMBOL(iov_iter_single_seg_count);
478 void iov_iter_kvec(struct iov_iter *i, int direction,
479 const struct kvec *kvec, unsigned long nr_segs,
480 size_t count)
482 BUG_ON(!(direction & ITER_KVEC));
483 i->type = direction;
484 i->kvec = kvec;
485 i->nr_segs = nr_segs;
486 i->iov_offset = 0;
487 i->count = count;
489 EXPORT_SYMBOL(iov_iter_kvec);
491 void iov_iter_bvec(struct iov_iter *i, int direction,
492 const struct bio_vec *bvec, unsigned long nr_segs,
493 size_t count)
495 BUG_ON(!(direction & ITER_BVEC));
496 i->type = direction;
497 i->bvec = bvec;
498 i->nr_segs = nr_segs;
499 i->iov_offset = 0;
500 i->count = count;
502 EXPORT_SYMBOL(iov_iter_bvec);
504 unsigned long iov_iter_alignment(const struct iov_iter *i)
506 unsigned long res = 0;
507 size_t size = i->count;
509 if (!size)
510 return 0;
512 iterate_all_kinds(i, size, v,
513 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
514 res |= v.bv_offset | v.bv_len,
515 res |= (unsigned long)v.iov_base | v.iov_len
517 return res;
519 EXPORT_SYMBOL(iov_iter_alignment);
521 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
523 unsigned long res = 0;
524 size_t size = i->count;
525 if (!size)
526 return 0;
528 iterate_all_kinds(i, size, v,
529 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
530 (size != v.iov_len ? size : 0), 0),
531 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
532 (size != v.bv_len ? size : 0)),
533 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
534 (size != v.iov_len ? size : 0))
536 return res;
538 EXPORT_SYMBOL(iov_iter_gap_alignment);
540 ssize_t iov_iter_get_pages(struct iov_iter *i,
541 struct page **pages, size_t maxsize, unsigned maxpages,
542 size_t *start)
544 if (maxsize > i->count)
545 maxsize = i->count;
547 if (!maxsize)
548 return 0;
550 iterate_all_kinds(i, maxsize, v, ({
551 unsigned long addr = (unsigned long)v.iov_base;
552 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
553 int n;
554 int res;
556 if (len > maxpages * PAGE_SIZE)
557 len = maxpages * PAGE_SIZE;
558 addr &= ~(PAGE_SIZE - 1);
559 n = DIV_ROUND_UP(len, PAGE_SIZE);
560 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
561 if (unlikely(res < 0))
562 return res;
563 return (res == n ? len : res * PAGE_SIZE) - *start;
564 0;}),({
565 /* can't be more than PAGE_SIZE */
566 *start = v.bv_offset;
567 get_page(*pages = v.bv_page);
568 return v.bv_len;
569 }),({
570 return -EFAULT;
573 return 0;
575 EXPORT_SYMBOL(iov_iter_get_pages);
577 static struct page **get_pages_array(size_t n)
579 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
580 if (!p)
581 p = vmalloc(n * sizeof(struct page *));
582 return p;
585 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
586 struct page ***pages, size_t maxsize,
587 size_t *start)
589 struct page **p;
591 if (maxsize > i->count)
592 maxsize = i->count;
594 if (!maxsize)
595 return 0;
597 iterate_all_kinds(i, maxsize, v, ({
598 unsigned long addr = (unsigned long)v.iov_base;
599 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
600 int n;
601 int res;
603 addr &= ~(PAGE_SIZE - 1);
604 n = DIV_ROUND_UP(len, PAGE_SIZE);
605 p = get_pages_array(n);
606 if (!p)
607 return -ENOMEM;
608 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
609 if (unlikely(res < 0)) {
610 kvfree(p);
611 return res;
613 *pages = p;
614 return (res == n ? len : res * PAGE_SIZE) - *start;
615 0;}),({
616 /* can't be more than PAGE_SIZE */
617 *start = v.bv_offset;
618 *pages = p = get_pages_array(1);
619 if (!p)
620 return -ENOMEM;
621 get_page(*p = v.bv_page);
622 return v.bv_len;
623 }),({
624 return -EFAULT;
627 return 0;
629 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
631 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
632 struct iov_iter *i)
634 char *to = addr;
635 __wsum sum, next;
636 size_t off = 0;
637 sum = *csum;
638 iterate_and_advance(i, bytes, v, ({
639 int err = 0;
640 next = csum_and_copy_from_user(v.iov_base,
641 (to += v.iov_len) - v.iov_len,
642 v.iov_len, 0, &err);
643 if (!err) {
644 sum = csum_block_add(sum, next, off);
645 off += v.iov_len;
647 err ? v.iov_len : 0;
648 }), ({
649 char *p = kmap_atomic(v.bv_page);
650 next = csum_partial_copy_nocheck(p + v.bv_offset,
651 (to += v.bv_len) - v.bv_len,
652 v.bv_len, 0);
653 kunmap_atomic(p);
654 sum = csum_block_add(sum, next, off);
655 off += v.bv_len;
656 }),({
657 next = csum_partial_copy_nocheck(v.iov_base,
658 (to += v.iov_len) - v.iov_len,
659 v.iov_len, 0);
660 sum = csum_block_add(sum, next, off);
661 off += v.iov_len;
664 *csum = sum;
665 return bytes;
667 EXPORT_SYMBOL(csum_and_copy_from_iter);
669 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
670 struct iov_iter *i)
672 const char *from = addr;
673 __wsum sum, next;
674 size_t off = 0;
675 sum = *csum;
676 iterate_and_advance(i, bytes, v, ({
677 int err = 0;
678 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
679 v.iov_base,
680 v.iov_len, 0, &err);
681 if (!err) {
682 sum = csum_block_add(sum, next, off);
683 off += v.iov_len;
685 err ? v.iov_len : 0;
686 }), ({
687 char *p = kmap_atomic(v.bv_page);
688 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
689 p + v.bv_offset,
690 v.bv_len, 0);
691 kunmap_atomic(p);
692 sum = csum_block_add(sum, next, off);
693 off += v.bv_len;
694 }),({
695 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
696 v.iov_base,
697 v.iov_len, 0);
698 sum = csum_block_add(sum, next, off);
699 off += v.iov_len;
702 *csum = sum;
703 return bytes;
705 EXPORT_SYMBOL(csum_and_copy_to_iter);
707 int iov_iter_npages(const struct iov_iter *i, int maxpages)
709 size_t size = i->count;
710 int npages = 0;
712 if (!size)
713 return 0;
715 iterate_all_kinds(i, size, v, ({
716 unsigned long p = (unsigned long)v.iov_base;
717 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
718 - p / PAGE_SIZE;
719 if (npages >= maxpages)
720 return maxpages;
721 0;}),({
722 npages++;
723 if (npages >= maxpages)
724 return maxpages;
725 }),({
726 unsigned long p = (unsigned long)v.iov_base;
727 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
728 - p / PAGE_SIZE;
729 if (npages >= maxpages)
730 return maxpages;
733 return npages;
735 EXPORT_SYMBOL(iov_iter_npages);
737 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
739 *new = *old;
740 if (new->type & ITER_BVEC)
741 return new->bvec = kmemdup(new->bvec,
742 new->nr_segs * sizeof(struct bio_vec),
743 flags);
744 else
745 /* iovec and kvec have identical layout */
746 return new->iov = kmemdup(new->iov,
747 new->nr_segs * sizeof(struct iovec),
748 flags);
750 EXPORT_SYMBOL(dup_iter);
752 int import_iovec(int type, const struct iovec __user * uvector,
753 unsigned nr_segs, unsigned fast_segs,
754 struct iovec **iov, struct iov_iter *i)
756 ssize_t n;
757 struct iovec *p;
758 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
759 *iov, &p);
760 if (n < 0) {
761 if (p != *iov)
762 kfree(p);
763 *iov = NULL;
764 return n;
766 iov_iter_init(i, type, p, nr_segs, n);
767 *iov = p == *iov ? NULL : p;
768 return 0;
770 EXPORT_SYMBOL(import_iovec);
772 #ifdef CONFIG_COMPAT
773 #include <linux/compat.h>
775 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
776 unsigned nr_segs, unsigned fast_segs,
777 struct iovec **iov, struct iov_iter *i)
779 ssize_t n;
780 struct iovec *p;
781 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
782 *iov, &p);
783 if (n < 0) {
784 if (p != *iov)
785 kfree(p);
786 *iov = NULL;
787 return n;
789 iov_iter_init(i, type, p, nr_segs, n);
790 *iov = p == *iov ? NULL : p;
791 return 0;
793 #endif
795 int import_single_range(int rw, void __user *buf, size_t len,
796 struct iovec *iov, struct iov_iter *i)
798 if (len > MAX_RW_COUNT)
799 len = MAX_RW_COUNT;
800 if (unlikely(!access_ok(!rw, buf, len)))
801 return -EFAULT;
803 iov->iov_base = buf;
804 iov->iov_len = len;
805 iov_iter_init(i, rw, iov, 1, len);
806 return 0;
808 EXPORT_SYMBOL(import_single_range);