1 #include <linux/export.h>
2 #include <linux/bvec.h>
4 #include <linux/pagemap.h>
5 #include <linux/slab.h>
6 #include <linux/vmalloc.h>
7 #include <linux/splice.h>
8 #include <net/checksum.h>
10 #define PIPE_PARANOIA /* for now */
12 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
26 while (unlikely(!left && n)) { \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
31 __v.iov_base = __p->iov_base; \
33 __v.iov_len -= left; \
40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
47 skip += __v.iov_len; \
50 while (unlikely(n)) { \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
55 __v.iov_base = __p->iov_base; \
63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
75 #define iterate_all_kinds(i, n, v, I, B, K) { \
76 size_t skip = i->iov_offset; \
77 if (unlikely(i->type & ITER_BVEC)) { \
79 struct bvec_iter __bi; \
80 iterate_bvec(i, n, v, __bi, skip, (B)) \
81 } else if (unlikely(i->type & ITER_KVEC)) { \
82 const struct kvec *kvec; \
84 iterate_kvec(i, n, v, kvec, skip, (K)) \
86 const struct iovec *iov; \
88 iterate_iovec(i, n, v, iov, skip, (I)) \
92 #define iterate_and_advance(i, n, v, I, B, K) { \
93 if (unlikely(i->count < n)) \
96 size_t skip = i->iov_offset; \
97 if (unlikely(i->type & ITER_BVEC)) { \
98 const struct bio_vec *bvec = i->bvec; \
100 struct bvec_iter __bi; \
101 iterate_bvec(i, n, v, __bi, skip, (B)) \
102 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
103 i->nr_segs -= i->bvec - bvec; \
104 skip = __bi.bi_bvec_done; \
105 } else if (unlikely(i->type & ITER_KVEC)) { \
106 const struct kvec *kvec; \
108 iterate_kvec(i, n, v, kvec, skip, (K)) \
109 if (skip == kvec->iov_len) { \
113 i->nr_segs -= kvec - i->kvec; \
116 const struct iovec *iov; \
118 iterate_iovec(i, n, v, iov, skip, (I)) \
119 if (skip == iov->iov_len) { \
123 i->nr_segs -= iov - i->iov; \
127 i->iov_offset = skip; \
131 static size_t copy_page_to_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
134 size_t skip
, copy
, left
, wanted
;
135 const struct iovec
*iov
;
139 if (unlikely(bytes
> i
->count
))
142 if (unlikely(!bytes
))
147 skip
= i
->iov_offset
;
148 buf
= iov
->iov_base
+ skip
;
149 copy
= min(bytes
, iov
->iov_len
- skip
);
151 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_writeable(buf
, copy
)) {
152 kaddr
= kmap_atomic(page
);
153 from
= kaddr
+ offset
;
155 /* first chunk, usually the only one */
156 left
= __copy_to_user_inatomic(buf
, from
, copy
);
162 while (unlikely(!left
&& bytes
)) {
165 copy
= min(bytes
, iov
->iov_len
);
166 left
= __copy_to_user_inatomic(buf
, from
, copy
);
172 if (likely(!bytes
)) {
173 kunmap_atomic(kaddr
);
176 offset
= from
- kaddr
;
178 kunmap_atomic(kaddr
);
179 copy
= min(bytes
, iov
->iov_len
- skip
);
181 /* Too bad - revert to non-atomic kmap */
184 from
= kaddr
+ offset
;
185 left
= __copy_to_user(buf
, from
, copy
);
190 while (unlikely(!left
&& bytes
)) {
193 copy
= min(bytes
, iov
->iov_len
);
194 left
= __copy_to_user(buf
, from
, copy
);
203 if (skip
== iov
->iov_len
) {
207 i
->count
-= wanted
- bytes
;
208 i
->nr_segs
-= iov
- i
->iov
;
210 i
->iov_offset
= skip
;
211 return wanted
- bytes
;
214 static size_t copy_page_from_iter_iovec(struct page
*page
, size_t offset
, size_t bytes
,
217 size_t skip
, copy
, left
, wanted
;
218 const struct iovec
*iov
;
222 if (unlikely(bytes
> i
->count
))
225 if (unlikely(!bytes
))
230 skip
= i
->iov_offset
;
231 buf
= iov
->iov_base
+ skip
;
232 copy
= min(bytes
, iov
->iov_len
- skip
);
234 if (IS_ENABLED(CONFIG_HIGHMEM
) && !fault_in_pages_readable(buf
, copy
)) {
235 kaddr
= kmap_atomic(page
);
238 /* first chunk, usually the only one */
239 left
= __copy_from_user_inatomic(to
, buf
, copy
);
245 while (unlikely(!left
&& bytes
)) {
248 copy
= min(bytes
, iov
->iov_len
);
249 left
= __copy_from_user_inatomic(to
, buf
, copy
);
255 if (likely(!bytes
)) {
256 kunmap_atomic(kaddr
);
261 kunmap_atomic(kaddr
);
262 copy
= min(bytes
, iov
->iov_len
- skip
);
264 /* Too bad - revert to non-atomic kmap */
268 left
= __copy_from_user(to
, buf
, copy
);
273 while (unlikely(!left
&& bytes
)) {
276 copy
= min(bytes
, iov
->iov_len
);
277 left
= __copy_from_user(to
, buf
, copy
);
286 if (skip
== iov
->iov_len
) {
290 i
->count
-= wanted
- bytes
;
291 i
->nr_segs
-= iov
- i
->iov
;
293 i
->iov_offset
= skip
;
294 return wanted
- bytes
;
298 static bool sanity(const struct iov_iter
*i
)
300 struct pipe_inode_info
*pipe
= i
->pipe
;
302 int next
= pipe
->curbuf
+ pipe
->nrbufs
;
304 struct pipe_buffer
*p
;
305 if (unlikely(!pipe
->nrbufs
))
306 goto Bad
; // pipe must be non-empty
307 if (unlikely(idx
!= ((next
- 1) & (pipe
->buffers
- 1))))
308 goto Bad
; // must be at the last buffer...
310 p
= &pipe
->bufs
[idx
];
311 if (unlikely(p
->offset
+ p
->len
!= i
->iov_offset
))
312 goto Bad
; // ... at the end of segment
314 if (idx
!= (next
& (pipe
->buffers
- 1)))
315 goto Bad
; // must be right after the last buffer
319 printk(KERN_ERR
"idx = %d, offset = %zd\n", i
->idx
, i
->iov_offset
);
320 printk(KERN_ERR
"curbuf = %d, nrbufs = %d, buffers = %d\n",
321 pipe
->curbuf
, pipe
->nrbufs
, pipe
->buffers
);
322 for (idx
= 0; idx
< pipe
->buffers
; idx
++)
323 printk(KERN_ERR
"[%p %p %d %d]\n",
325 pipe
->bufs
[idx
].page
,
326 pipe
->bufs
[idx
].offset
,
327 pipe
->bufs
[idx
].len
);
332 #define sanity(i) true
335 static inline int next_idx(int idx
, struct pipe_inode_info
*pipe
)
337 return (idx
+ 1) & (pipe
->buffers
- 1);
340 static size_t copy_page_to_iter_pipe(struct page
*page
, size_t offset
, size_t bytes
,
343 struct pipe_inode_info
*pipe
= i
->pipe
;
344 struct pipe_buffer
*buf
;
348 if (unlikely(bytes
> i
->count
))
351 if (unlikely(!bytes
))
359 buf
= &pipe
->bufs
[idx
];
361 if (offset
== off
&& buf
->page
== page
) {
362 /* merge with the last one */
364 i
->iov_offset
+= bytes
;
367 idx
= next_idx(idx
, pipe
);
368 buf
= &pipe
->bufs
[idx
];
370 if (idx
== pipe
->curbuf
&& pipe
->nrbufs
)
373 buf
->ops
= &page_cache_pipe_buf_ops
;
374 get_page(buf
->page
= page
);
375 buf
->offset
= offset
;
377 i
->iov_offset
= offset
+ bytes
;
385 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
386 * bytes. For each iovec, fault in each page that constitutes the iovec.
388 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
389 * because it is an invalid address).
391 int iov_iter_fault_in_readable(struct iov_iter
*i
, size_t bytes
)
393 size_t skip
= i
->iov_offset
;
394 const struct iovec
*iov
;
398 if (!(i
->type
& (ITER_BVEC
|ITER_KVEC
))) {
399 iterate_iovec(i
, bytes
, v
, iov
, skip
, ({
400 err
= fault_in_pages_readable(v
.iov_base
, v
.iov_len
);
407 EXPORT_SYMBOL(iov_iter_fault_in_readable
);
409 void iov_iter_init(struct iov_iter
*i
, int direction
,
410 const struct iovec
*iov
, unsigned long nr_segs
,
413 /* It will get better. Eventually... */
414 if (segment_eq(get_fs(), KERNEL_DS
)) {
415 direction
|= ITER_KVEC
;
417 i
->kvec
= (struct kvec
*)iov
;
422 i
->nr_segs
= nr_segs
;
426 EXPORT_SYMBOL(iov_iter_init
);
428 static void memcpy_from_page(char *to
, struct page
*page
, size_t offset
, size_t len
)
430 char *from
= kmap_atomic(page
);
431 memcpy(to
, from
+ offset
, len
);
435 static void memcpy_to_page(struct page
*page
, size_t offset
, const char *from
, size_t len
)
437 char *to
= kmap_atomic(page
);
438 memcpy(to
+ offset
, from
, len
);
442 static void memzero_page(struct page
*page
, size_t offset
, size_t len
)
444 char *addr
= kmap_atomic(page
);
445 memset(addr
+ offset
, 0, len
);
449 static inline bool allocated(struct pipe_buffer
*buf
)
451 return buf
->ops
== &default_pipe_buf_ops
;
454 static inline void data_start(const struct iov_iter
*i
, int *idxp
, size_t *offp
)
456 size_t off
= i
->iov_offset
;
458 if (off
&& (!allocated(&i
->pipe
->bufs
[idx
]) || off
== PAGE_SIZE
)) {
459 idx
= next_idx(idx
, i
->pipe
);
466 static size_t push_pipe(struct iov_iter
*i
, size_t size
,
467 int *idxp
, size_t *offp
)
469 struct pipe_inode_info
*pipe
= i
->pipe
;
474 if (unlikely(size
> i
->count
))
480 data_start(i
, &idx
, &off
);
484 left
-= PAGE_SIZE
- off
;
486 pipe
->bufs
[idx
].len
+= size
;
489 pipe
->bufs
[idx
].len
= PAGE_SIZE
;
490 idx
= next_idx(idx
, pipe
);
492 while (idx
!= pipe
->curbuf
|| !pipe
->nrbufs
) {
493 struct page
*page
= alloc_page(GFP_USER
);
497 pipe
->bufs
[idx
].ops
= &default_pipe_buf_ops
;
498 pipe
->bufs
[idx
].page
= page
;
499 pipe
->bufs
[idx
].offset
= 0;
500 if (left
<= PAGE_SIZE
) {
501 pipe
->bufs
[idx
].len
= left
;
504 pipe
->bufs
[idx
].len
= PAGE_SIZE
;
506 idx
= next_idx(idx
, pipe
);
511 static size_t copy_pipe_to_iter(const void *addr
, size_t bytes
,
514 struct pipe_inode_info
*pipe
= i
->pipe
;
521 bytes
= n
= push_pipe(i
, bytes
, &idx
, &off
);
524 for ( ; n
; idx
= next_idx(idx
, pipe
), off
= 0) {
525 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
526 memcpy_to_page(pipe
->bufs
[idx
].page
, off
, addr
, chunk
);
528 i
->iov_offset
= off
+ chunk
;
536 size_t copy_to_iter(const void *addr
, size_t bytes
, struct iov_iter
*i
)
538 const char *from
= addr
;
539 if (unlikely(i
->type
& ITER_PIPE
))
540 return copy_pipe_to_iter(addr
, bytes
, i
);
541 iterate_and_advance(i
, bytes
, v
,
542 __copy_to_user(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
,
544 memcpy_to_page(v
.bv_page
, v
.bv_offset
,
545 (from
+= v
.bv_len
) - v
.bv_len
, v
.bv_len
),
546 memcpy(v
.iov_base
, (from
+= v
.iov_len
) - v
.iov_len
, v
.iov_len
)
551 EXPORT_SYMBOL(copy_to_iter
);
553 size_t copy_from_iter(void *addr
, size_t bytes
, struct iov_iter
*i
)
556 if (unlikely(i
->type
& ITER_PIPE
)) {
560 iterate_and_advance(i
, bytes
, v
,
561 __copy_from_user((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
,
563 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
564 v
.bv_offset
, v
.bv_len
),
565 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
570 EXPORT_SYMBOL(copy_from_iter
);
572 bool copy_from_iter_full(void *addr
, size_t bytes
, struct iov_iter
*i
)
575 if (unlikely(i
->type
& ITER_PIPE
)) {
579 if (unlikely(i
->count
< bytes
)) \
582 iterate_all_kinds(i
, bytes
, v
, ({
583 if (__copy_from_user((to
+= v
.iov_len
) - v
.iov_len
,
584 v
.iov_base
, v
.iov_len
))
587 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
588 v
.bv_offset
, v
.bv_len
),
589 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
592 iov_iter_advance(i
, bytes
);
595 EXPORT_SYMBOL(copy_from_iter_full
);
597 size_t copy_from_iter_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
600 if (unlikely(i
->type
& ITER_PIPE
)) {
604 iterate_and_advance(i
, bytes
, v
,
605 __copy_from_user_nocache((to
+= v
.iov_len
) - v
.iov_len
,
606 v
.iov_base
, v
.iov_len
),
607 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
608 v
.bv_offset
, v
.bv_len
),
609 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
614 EXPORT_SYMBOL(copy_from_iter_nocache
);
616 bool copy_from_iter_full_nocache(void *addr
, size_t bytes
, struct iov_iter
*i
)
619 if (unlikely(i
->type
& ITER_PIPE
)) {
623 if (unlikely(i
->count
< bytes
)) \
625 iterate_all_kinds(i
, bytes
, v
, ({
626 if (__copy_from_user_nocache((to
+= v
.iov_len
) - v
.iov_len
,
627 v
.iov_base
, v
.iov_len
))
630 memcpy_from_page((to
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
631 v
.bv_offset
, v
.bv_len
),
632 memcpy((to
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
635 iov_iter_advance(i
, bytes
);
638 EXPORT_SYMBOL(copy_from_iter_full_nocache
);
640 size_t copy_page_to_iter(struct page
*page
, size_t offset
, size_t bytes
,
643 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
644 void *kaddr
= kmap_atomic(page
);
645 size_t wanted
= copy_to_iter(kaddr
+ offset
, bytes
, i
);
646 kunmap_atomic(kaddr
);
648 } else if (likely(!(i
->type
& ITER_PIPE
)))
649 return copy_page_to_iter_iovec(page
, offset
, bytes
, i
);
651 return copy_page_to_iter_pipe(page
, offset
, bytes
, i
);
653 EXPORT_SYMBOL(copy_page_to_iter
);
655 size_t copy_page_from_iter(struct page
*page
, size_t offset
, size_t bytes
,
658 if (unlikely(i
->type
& ITER_PIPE
)) {
662 if (i
->type
& (ITER_BVEC
|ITER_KVEC
)) {
663 void *kaddr
= kmap_atomic(page
);
664 size_t wanted
= copy_from_iter(kaddr
+ offset
, bytes
, i
);
665 kunmap_atomic(kaddr
);
668 return copy_page_from_iter_iovec(page
, offset
, bytes
, i
);
670 EXPORT_SYMBOL(copy_page_from_iter
);
672 static size_t pipe_zero(size_t bytes
, struct iov_iter
*i
)
674 struct pipe_inode_info
*pipe
= i
->pipe
;
681 bytes
= n
= push_pipe(i
, bytes
, &idx
, &off
);
685 for ( ; n
; idx
= next_idx(idx
, pipe
), off
= 0) {
686 size_t chunk
= min_t(size_t, n
, PAGE_SIZE
- off
);
687 memzero_page(pipe
->bufs
[idx
].page
, off
, chunk
);
689 i
->iov_offset
= off
+ chunk
;
696 size_t iov_iter_zero(size_t bytes
, struct iov_iter
*i
)
698 if (unlikely(i
->type
& ITER_PIPE
))
699 return pipe_zero(bytes
, i
);
700 iterate_and_advance(i
, bytes
, v
,
701 __clear_user(v
.iov_base
, v
.iov_len
),
702 memzero_page(v
.bv_page
, v
.bv_offset
, v
.bv_len
),
703 memset(v
.iov_base
, 0, v
.iov_len
)
708 EXPORT_SYMBOL(iov_iter_zero
);
710 size_t iov_iter_copy_from_user_atomic(struct page
*page
,
711 struct iov_iter
*i
, unsigned long offset
, size_t bytes
)
713 char *kaddr
= kmap_atomic(page
), *p
= kaddr
+ offset
;
714 if (unlikely(i
->type
& ITER_PIPE
)) {
715 kunmap_atomic(kaddr
);
719 iterate_all_kinds(i
, bytes
, v
,
720 __copy_from_user_inatomic((p
+= v
.iov_len
) - v
.iov_len
,
721 v
.iov_base
, v
.iov_len
),
722 memcpy_from_page((p
+= v
.bv_len
) - v
.bv_len
, v
.bv_page
,
723 v
.bv_offset
, v
.bv_len
),
724 memcpy((p
+= v
.iov_len
) - v
.iov_len
, v
.iov_base
, v
.iov_len
)
726 kunmap_atomic(kaddr
);
729 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic
);
731 static void pipe_advance(struct iov_iter
*i
, size_t size
)
733 struct pipe_inode_info
*pipe
= i
->pipe
;
734 struct pipe_buffer
*buf
;
736 size_t off
= i
->iov_offset
, orig_sz
;
738 if (unlikely(i
->count
< size
))
743 if (off
) /* make it relative to the beginning of buffer */
744 size
+= off
- pipe
->bufs
[idx
].offset
;
746 buf
= &pipe
->bufs
[idx
];
747 if (size
<= buf
->len
)
750 idx
= next_idx(idx
, pipe
);
754 off
= i
->iov_offset
= buf
->offset
+ size
;
757 idx
= next_idx(idx
, pipe
);
759 int unused
= (pipe
->curbuf
+ pipe
->nrbufs
) & (pipe
->buffers
- 1);
760 /* [curbuf,unused) is in use. Free [idx,unused) */
761 while (idx
!= unused
) {
762 pipe_buf_release(pipe
, &pipe
->bufs
[idx
]);
763 idx
= next_idx(idx
, pipe
);
770 void iov_iter_advance(struct iov_iter
*i
, size_t size
)
772 if (unlikely(i
->type
& ITER_PIPE
)) {
773 pipe_advance(i
, size
);
776 iterate_and_advance(i
, size
, v
, 0, 0, 0)
778 EXPORT_SYMBOL(iov_iter_advance
);
781 * Return the count of just the current iov_iter segment.
783 size_t iov_iter_single_seg_count(const struct iov_iter
*i
)
785 if (unlikely(i
->type
& ITER_PIPE
))
786 return i
->count
; // it is a silly place, anyway
789 else if (i
->type
& ITER_BVEC
)
790 return min(i
->count
, i
->bvec
->bv_len
- i
->iov_offset
);
792 return min(i
->count
, i
->iov
->iov_len
- i
->iov_offset
);
794 EXPORT_SYMBOL(iov_iter_single_seg_count
);
796 void iov_iter_kvec(struct iov_iter
*i
, int direction
,
797 const struct kvec
*kvec
, unsigned long nr_segs
,
800 BUG_ON(!(direction
& ITER_KVEC
));
803 i
->nr_segs
= nr_segs
;
807 EXPORT_SYMBOL(iov_iter_kvec
);
809 void iov_iter_bvec(struct iov_iter
*i
, int direction
,
810 const struct bio_vec
*bvec
, unsigned long nr_segs
,
813 BUG_ON(!(direction
& ITER_BVEC
));
816 i
->nr_segs
= nr_segs
;
820 EXPORT_SYMBOL(iov_iter_bvec
);
822 void iov_iter_pipe(struct iov_iter
*i
, int direction
,
823 struct pipe_inode_info
*pipe
,
826 BUG_ON(direction
!= ITER_PIPE
);
829 i
->idx
= (pipe
->curbuf
+ pipe
->nrbufs
) & (pipe
->buffers
- 1);
833 EXPORT_SYMBOL(iov_iter_pipe
);
835 unsigned long iov_iter_alignment(const struct iov_iter
*i
)
837 unsigned long res
= 0;
838 size_t size
= i
->count
;
843 if (unlikely(i
->type
& ITER_PIPE
)) {
844 if (i
->iov_offset
&& allocated(&i
->pipe
->bufs
[i
->idx
]))
845 return size
| i
->iov_offset
;
848 iterate_all_kinds(i
, size
, v
,
849 (res
|= (unsigned long)v
.iov_base
| v
.iov_len
, 0),
850 res
|= v
.bv_offset
| v
.bv_len
,
851 res
|= (unsigned long)v
.iov_base
| v
.iov_len
855 EXPORT_SYMBOL(iov_iter_alignment
);
857 unsigned long iov_iter_gap_alignment(const struct iov_iter
*i
)
859 unsigned long res
= 0;
860 size_t size
= i
->count
;
864 if (unlikely(i
->type
& ITER_PIPE
)) {
869 iterate_all_kinds(i
, size
, v
,
870 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
871 (size
!= v
.iov_len
? size
: 0), 0),
872 (res
|= (!res
? 0 : (unsigned long)v
.bv_offset
) |
873 (size
!= v
.bv_len
? size
: 0)),
874 (res
|= (!res
? 0 : (unsigned long)v
.iov_base
) |
875 (size
!= v
.iov_len
? size
: 0))
879 EXPORT_SYMBOL(iov_iter_gap_alignment
);
881 static inline size_t __pipe_get_pages(struct iov_iter
*i
,
887 struct pipe_inode_info
*pipe
= i
->pipe
;
888 ssize_t n
= push_pipe(i
, maxsize
, &idx
, start
);
895 get_page(*pages
++ = pipe
->bufs
[idx
].page
);
896 idx
= next_idx(idx
, pipe
);
903 static ssize_t
pipe_get_pages(struct iov_iter
*i
,
904 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
914 data_start(i
, &idx
, start
);
915 /* some of this one + all after this one */
916 npages
= ((i
->pipe
->curbuf
- idx
- 1) & (i
->pipe
->buffers
- 1)) + 1;
917 capacity
= min(npages
,maxpages
) * PAGE_SIZE
- *start
;
919 return __pipe_get_pages(i
, min(maxsize
, capacity
), pages
, idx
, start
);
922 ssize_t
iov_iter_get_pages(struct iov_iter
*i
,
923 struct page
**pages
, size_t maxsize
, unsigned maxpages
,
926 if (maxsize
> i
->count
)
932 if (unlikely(i
->type
& ITER_PIPE
))
933 return pipe_get_pages(i
, pages
, maxsize
, maxpages
, start
);
934 iterate_all_kinds(i
, maxsize
, v
, ({
935 unsigned long addr
= (unsigned long)v
.iov_base
;
936 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
940 if (len
> maxpages
* PAGE_SIZE
)
941 len
= maxpages
* PAGE_SIZE
;
942 addr
&= ~(PAGE_SIZE
- 1);
943 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
944 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, pages
);
945 if (unlikely(res
< 0))
947 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
949 /* can't be more than PAGE_SIZE */
950 *start
= v
.bv_offset
;
951 get_page(*pages
= v
.bv_page
);
959 EXPORT_SYMBOL(iov_iter_get_pages
);
961 static struct page
**get_pages_array(size_t n
)
963 struct page
**p
= kmalloc(n
* sizeof(struct page
*), GFP_KERNEL
);
965 p
= vmalloc(n
* sizeof(struct page
*));
969 static ssize_t
pipe_get_pages_alloc(struct iov_iter
*i
,
970 struct page
***pages
, size_t maxsize
,
981 data_start(i
, &idx
, start
);
982 /* some of this one + all after this one */
983 npages
= ((i
->pipe
->curbuf
- idx
- 1) & (i
->pipe
->buffers
- 1)) + 1;
984 n
= npages
* PAGE_SIZE
- *start
;
988 npages
= DIV_ROUND_UP(maxsize
+ *start
, PAGE_SIZE
);
989 p
= get_pages_array(npages
);
992 n
= __pipe_get_pages(i
, maxsize
, p
, idx
, start
);
1000 ssize_t
iov_iter_get_pages_alloc(struct iov_iter
*i
,
1001 struct page
***pages
, size_t maxsize
,
1006 if (maxsize
> i
->count
)
1012 if (unlikely(i
->type
& ITER_PIPE
))
1013 return pipe_get_pages_alloc(i
, pages
, maxsize
, start
);
1014 iterate_all_kinds(i
, maxsize
, v
, ({
1015 unsigned long addr
= (unsigned long)v
.iov_base
;
1016 size_t len
= v
.iov_len
+ (*start
= addr
& (PAGE_SIZE
- 1));
1020 addr
&= ~(PAGE_SIZE
- 1);
1021 n
= DIV_ROUND_UP(len
, PAGE_SIZE
);
1022 p
= get_pages_array(n
);
1025 res
= get_user_pages_fast(addr
, n
, (i
->type
& WRITE
) != WRITE
, p
);
1026 if (unlikely(res
< 0)) {
1031 return (res
== n
? len
: res
* PAGE_SIZE
) - *start
;
1033 /* can't be more than PAGE_SIZE */
1034 *start
= v
.bv_offset
;
1035 *pages
= p
= get_pages_array(1);
1038 get_page(*p
= v
.bv_page
);
1046 EXPORT_SYMBOL(iov_iter_get_pages_alloc
);
1048 size_t csum_and_copy_from_iter(void *addr
, size_t bytes
, __wsum
*csum
,
1055 if (unlikely(i
->type
& ITER_PIPE
)) {
1059 iterate_and_advance(i
, bytes
, v
, ({
1061 next
= csum_and_copy_from_user(v
.iov_base
,
1062 (to
+= v
.iov_len
) - v
.iov_len
,
1063 v
.iov_len
, 0, &err
);
1065 sum
= csum_block_add(sum
, next
, off
);
1068 err
? v
.iov_len
: 0;
1070 char *p
= kmap_atomic(v
.bv_page
);
1071 next
= csum_partial_copy_nocheck(p
+ v
.bv_offset
,
1072 (to
+= v
.bv_len
) - v
.bv_len
,
1075 sum
= csum_block_add(sum
, next
, off
);
1078 next
= csum_partial_copy_nocheck(v
.iov_base
,
1079 (to
+= v
.iov_len
) - v
.iov_len
,
1081 sum
= csum_block_add(sum
, next
, off
);
1088 EXPORT_SYMBOL(csum_and_copy_from_iter
);
1090 bool csum_and_copy_from_iter_full(void *addr
, size_t bytes
, __wsum
*csum
,
1097 if (unlikely(i
->type
& ITER_PIPE
)) {
1101 if (unlikely(i
->count
< bytes
))
1103 iterate_all_kinds(i
, bytes
, v
, ({
1105 next
= csum_and_copy_from_user(v
.iov_base
,
1106 (to
+= v
.iov_len
) - v
.iov_len
,
1107 v
.iov_len
, 0, &err
);
1110 sum
= csum_block_add(sum
, next
, off
);
1114 char *p
= kmap_atomic(v
.bv_page
);
1115 next
= csum_partial_copy_nocheck(p
+ v
.bv_offset
,
1116 (to
+= v
.bv_len
) - v
.bv_len
,
1119 sum
= csum_block_add(sum
, next
, off
);
1122 next
= csum_partial_copy_nocheck(v
.iov_base
,
1123 (to
+= v
.iov_len
) - v
.iov_len
,
1125 sum
= csum_block_add(sum
, next
, off
);
1130 iov_iter_advance(i
, bytes
);
1133 EXPORT_SYMBOL(csum_and_copy_from_iter_full
);
1135 size_t csum_and_copy_to_iter(const void *addr
, size_t bytes
, __wsum
*csum
,
1138 const char *from
= addr
;
1142 if (unlikely(i
->type
& ITER_PIPE
)) {
1143 WARN_ON(1); /* for now */
1146 iterate_and_advance(i
, bytes
, v
, ({
1148 next
= csum_and_copy_to_user((from
+= v
.iov_len
) - v
.iov_len
,
1150 v
.iov_len
, 0, &err
);
1152 sum
= csum_block_add(sum
, next
, off
);
1155 err
? v
.iov_len
: 0;
1157 char *p
= kmap_atomic(v
.bv_page
);
1158 next
= csum_partial_copy_nocheck((from
+= v
.bv_len
) - v
.bv_len
,
1162 sum
= csum_block_add(sum
, next
, off
);
1165 next
= csum_partial_copy_nocheck((from
+= v
.iov_len
) - v
.iov_len
,
1168 sum
= csum_block_add(sum
, next
, off
);
1175 EXPORT_SYMBOL(csum_and_copy_to_iter
);
1177 int iov_iter_npages(const struct iov_iter
*i
, int maxpages
)
1179 size_t size
= i
->count
;
1185 if (unlikely(i
->type
& ITER_PIPE
)) {
1186 struct pipe_inode_info
*pipe
= i
->pipe
;
1193 data_start(i
, &idx
, &off
);
1194 /* some of this one + all after this one */
1195 npages
= ((pipe
->curbuf
- idx
- 1) & (pipe
->buffers
- 1)) + 1;
1196 if (npages
>= maxpages
)
1198 } else iterate_all_kinds(i
, size
, v
, ({
1199 unsigned long p
= (unsigned long)v
.iov_base
;
1200 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1202 if (npages
>= maxpages
)
1206 if (npages
>= maxpages
)
1209 unsigned long p
= (unsigned long)v
.iov_base
;
1210 npages
+= DIV_ROUND_UP(p
+ v
.iov_len
, PAGE_SIZE
)
1212 if (npages
>= maxpages
)
1218 EXPORT_SYMBOL(iov_iter_npages
);
1220 const void *dup_iter(struct iov_iter
*new, struct iov_iter
*old
, gfp_t flags
)
1223 if (unlikely(new->type
& ITER_PIPE
)) {
1227 if (new->type
& ITER_BVEC
)
1228 return new->bvec
= kmemdup(new->bvec
,
1229 new->nr_segs
* sizeof(struct bio_vec
),
1232 /* iovec and kvec have identical layout */
1233 return new->iov
= kmemdup(new->iov
,
1234 new->nr_segs
* sizeof(struct iovec
),
1237 EXPORT_SYMBOL(dup_iter
);
1240 * import_iovec() - Copy an array of &struct iovec from userspace
1241 * into the kernel, check that it is valid, and initialize a new
1242 * &struct iov_iter iterator to access it.
1244 * @type: One of %READ or %WRITE.
1245 * @uvector: Pointer to the userspace array.
1246 * @nr_segs: Number of elements in userspace array.
1247 * @fast_segs: Number of elements in @iov.
1248 * @iov: (input and output parameter) Pointer to pointer to (usually small
1249 * on-stack) kernel array.
1250 * @i: Pointer to iterator that will be initialized on success.
1252 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1253 * then this function places %NULL in *@iov on return. Otherwise, a new
1254 * array will be allocated and the result placed in *@iov. This means that
1255 * the caller may call kfree() on *@iov regardless of whether the small
1256 * on-stack array was used or not (and regardless of whether this function
1257 * returns an error or not).
1259 * Return: 0 on success or negative error code on error.
1261 int import_iovec(int type
, const struct iovec __user
* uvector
,
1262 unsigned nr_segs
, unsigned fast_segs
,
1263 struct iovec
**iov
, struct iov_iter
*i
)
1267 n
= rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
1275 iov_iter_init(i
, type
, p
, nr_segs
, n
);
1276 *iov
= p
== *iov
? NULL
: p
;
1279 EXPORT_SYMBOL(import_iovec
);
1281 #ifdef CONFIG_COMPAT
1282 #include <linux/compat.h>
1284 int compat_import_iovec(int type
, const struct compat_iovec __user
* uvector
,
1285 unsigned nr_segs
, unsigned fast_segs
,
1286 struct iovec
**iov
, struct iov_iter
*i
)
1290 n
= compat_rw_copy_check_uvector(type
, uvector
, nr_segs
, fast_segs
,
1298 iov_iter_init(i
, type
, p
, nr_segs
, n
);
1299 *iov
= p
== *iov
? NULL
: p
;
1304 int import_single_range(int rw
, void __user
*buf
, size_t len
,
1305 struct iovec
*iov
, struct iov_iter
*i
)
1307 if (len
> MAX_RW_COUNT
)
1309 if (unlikely(!access_ok(!rw
, buf
, len
)))
1312 iov
->iov_base
= buf
;
1314 iov_iter_init(i
, rw
, iov
, 1, len
);
1317 EXPORT_SYMBOL(import_single_range
);