2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/splice.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
33 * Attempt to steal a page from a pipe buffer. This should perhaps go into
34 * a vm helper function, it's already simplified quite a bit by the
35 * addition of remove_mapping(). If success is returned, the caller may
36 * attempt to reuse this page for another destination.
38 static int page_cache_pipe_buf_steal(struct pipe_inode_info
*pipe
,
39 struct pipe_buffer
*buf
)
41 struct page
*page
= buf
->page
;
42 struct address_space
*mapping
;
46 mapping
= page_mapping(page
);
48 WARN_ON(!PageUptodate(page
));
51 * At least for ext2 with nobh option, we need to wait on
52 * writeback completing on this page, since we'll remove it
53 * from the pagecache. Otherwise truncate wont wait on the
54 * page, allowing the disk blocks to be reused by someone else
55 * before we actually wrote our data to them. fs corruption
58 wait_on_page_writeback(page
);
60 if (PagePrivate(page
))
61 try_to_release_page(page
, GFP_KERNEL
);
64 * If we succeeded in removing the mapping, set LRU flag
67 if (remove_mapping(mapping
, page
)) {
68 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
74 * Raced with truncate or failed to remove page from current
75 * address space, unlock and return failure.
81 static void page_cache_pipe_buf_release(struct pipe_inode_info
*pipe
,
82 struct pipe_buffer
*buf
)
84 page_cache_release(buf
->page
);
85 buf
->flags
&= ~PIPE_BUF_FLAG_LRU
;
88 static int page_cache_pipe_buf_confirm(struct pipe_inode_info
*pipe
,
89 struct pipe_buffer
*buf
)
91 struct page
*page
= buf
->page
;
94 if (!PageUptodate(page
)) {
98 * Page got truncated/unhashed. This will cause a 0-byte
99 * splice, if this is the first page.
101 if (!page
->mapping
) {
107 * Uh oh, read-error from disk.
109 if (!PageUptodate(page
)) {
115 * Page is ok afterall, we are done.
126 static const struct pipe_buf_operations page_cache_pipe_buf_ops
= {
128 .map
= generic_pipe_buf_map
,
129 .unmap
= generic_pipe_buf_unmap
,
130 .confirm
= page_cache_pipe_buf_confirm
,
131 .release
= page_cache_pipe_buf_release
,
132 .steal
= page_cache_pipe_buf_steal
,
133 .get
= generic_pipe_buf_get
,
136 static int user_page_pipe_buf_steal(struct pipe_inode_info
*pipe
,
137 struct pipe_buffer
*buf
)
139 if (!(buf
->flags
& PIPE_BUF_FLAG_GIFT
))
142 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
143 return generic_pipe_buf_steal(pipe
, buf
);
146 static const struct pipe_buf_operations user_page_pipe_buf_ops
= {
148 .map
= generic_pipe_buf_map
,
149 .unmap
= generic_pipe_buf_unmap
,
150 .confirm
= generic_pipe_buf_confirm
,
151 .release
= page_cache_pipe_buf_release
,
152 .steal
= user_page_pipe_buf_steal
,
153 .get
= generic_pipe_buf_get
,
157 * splice_to_pipe - fill passed data into a pipe
158 * @pipe: pipe to fill
162 * @spd contains a map of pages and len/offset tupples, a long with
163 * the struct pipe_buf_operations associated with these pages. This
164 * function will link that data to the pipe.
167 ssize_t
splice_to_pipe(struct pipe_inode_info
*pipe
,
168 struct splice_pipe_desc
*spd
)
170 unsigned int spd_pages
= spd
->nr_pages
;
171 int ret
, do_wakeup
, page_nr
;
178 mutex_lock(&pipe
->inode
->i_mutex
);
181 if (!pipe
->readers
) {
182 send_sig(SIGPIPE
, current
, 0);
188 if (pipe
->nrbufs
< PIPE_BUFFERS
) {
189 int newbuf
= (pipe
->curbuf
+ pipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
190 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
192 buf
->page
= spd
->pages
[page_nr
];
193 buf
->offset
= spd
->partial
[page_nr
].offset
;
194 buf
->len
= spd
->partial
[page_nr
].len
;
195 buf
->private = spd
->partial
[page_nr
].private;
197 if (spd
->flags
& SPLICE_F_GIFT
)
198 buf
->flags
|= PIPE_BUF_FLAG_GIFT
;
207 if (!--spd
->nr_pages
)
209 if (pipe
->nrbufs
< PIPE_BUFFERS
)
215 if (spd
->flags
& SPLICE_F_NONBLOCK
) {
221 if (signal_pending(current
)) {
229 if (waitqueue_active(&pipe
->wait
))
230 wake_up_interruptible_sync(&pipe
->wait
);
231 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
235 pipe
->waiting_writers
++;
237 pipe
->waiting_writers
--;
241 mutex_unlock(&pipe
->inode
->i_mutex
);
245 if (waitqueue_active(&pipe
->wait
))
246 wake_up_interruptible(&pipe
->wait
);
247 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
251 while (page_nr
< spd_pages
)
252 page_cache_release(spd
->pages
[page_nr
++]);
258 __generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
259 struct pipe_inode_info
*pipe
, size_t len
,
262 struct address_space
*mapping
= in
->f_mapping
;
263 unsigned int loff
, nr_pages
;
264 struct page
*pages
[PIPE_BUFFERS
];
265 struct partial_page partial
[PIPE_BUFFERS
];
267 pgoff_t index
, end_index
;
270 struct splice_pipe_desc spd
= {
274 .ops
= &page_cache_pipe_buf_ops
,
277 index
= *ppos
>> PAGE_CACHE_SHIFT
;
278 loff
= *ppos
& ~PAGE_CACHE_MASK
;
279 nr_pages
= (len
+ loff
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
281 if (nr_pages
> PIPE_BUFFERS
)
282 nr_pages
= PIPE_BUFFERS
;
285 * Don't try to 2nd guess the read-ahead logic, call into
286 * page_cache_readahead() like the page cache reads would do.
288 page_cache_readahead(mapping
, &in
->f_ra
, in
, index
, nr_pages
);
291 * Lookup the (hopefully) full range of pages we need.
293 spd
.nr_pages
= find_get_pages_contig(mapping
, index
, nr_pages
, pages
);
296 * If find_get_pages_contig() returned fewer pages than we needed,
297 * allocate the rest and fill in the holes.
300 index
+= spd
.nr_pages
;
301 while (spd
.nr_pages
< nr_pages
) {
303 * Page could be there, find_get_pages_contig() breaks on
306 page
= find_get_page(mapping
, index
);
309 * Make sure the read-ahead engine is notified
310 * about this failure.
312 handle_ra_miss(mapping
, &in
->f_ra
, index
);
315 * page didn't exist, allocate one.
317 page
= page_cache_alloc_cold(mapping
);
321 error
= add_to_page_cache_lru(page
, mapping
, index
,
323 if (unlikely(error
)) {
324 page_cache_release(page
);
325 if (error
== -EEXIST
)
330 * add_to_page_cache() locks the page, unlock it
331 * to avoid convoluting the logic below even more.
336 pages
[spd
.nr_pages
++] = page
;
341 * Now loop over the map and see if we need to start IO on any
342 * pages, fill in the partial map, etc.
344 index
= *ppos
>> PAGE_CACHE_SHIFT
;
345 nr_pages
= spd
.nr_pages
;
347 for (page_nr
= 0; page_nr
< nr_pages
; page_nr
++) {
348 unsigned int this_len
;
354 * this_len is the max we'll use from this page
356 this_len
= min_t(unsigned long, len
, PAGE_CACHE_SIZE
- loff
);
357 page
= pages
[page_nr
];
360 * If the page isn't uptodate, we may need to start io on it
362 if (!PageUptodate(page
)) {
364 * If in nonblock mode then dont block on waiting
365 * for an in-flight io page
367 if (flags
& SPLICE_F_NONBLOCK
) {
368 if (TestSetPageLocked(page
))
374 * page was truncated, stop here. if this isn't the
375 * first page, we'll just complete what we already
378 if (!page
->mapping
) {
383 * page was already under io and is now done, great
385 if (PageUptodate(page
)) {
391 * need to read in the page
393 error
= mapping
->a_ops
->readpage(in
, page
);
394 if (unlikely(error
)) {
396 * We really should re-lookup the page here,
397 * but it complicates things a lot. Instead
398 * lets just do what we already stored, and
399 * we'll get it the next time we are called.
401 if (error
== AOP_TRUNCATED_PAGE
)
409 * i_size must be checked after PageUptodate.
411 isize
= i_size_read(mapping
->host
);
412 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
413 if (unlikely(!isize
|| index
> end_index
))
417 * if this is the last page, see if we need to shrink
418 * the length and stop
420 if (end_index
== index
) {
424 * max good bytes in this page
426 plen
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
431 * force quit after adding this page
433 this_len
= min(this_len
, plen
- loff
);
437 partial
[page_nr
].offset
= loff
;
438 partial
[page_nr
].len
= this_len
;
446 * Release any pages at the end, if we quit early. 'page_nr' is how far
447 * we got, 'nr_pages' is how many pages are in the map.
449 while (page_nr
< nr_pages
)
450 page_cache_release(pages
[page_nr
++]);
453 return splice_to_pipe(pipe
, &spd
);
459 * generic_file_splice_read - splice data from file to a pipe
460 * @in: file to splice from
461 * @ppos: position in @in
462 * @pipe: pipe to splice to
463 * @len: number of bytes to splice
464 * @flags: splice modifier flags
467 * Will read pages from given file and fill them into a pipe. Can be
468 * used as long as the address_space operations for the source implements
472 ssize_t
generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
473 struct pipe_inode_info
*pipe
, size_t len
,
480 isize
= i_size_read(in
->f_mapping
->host
);
481 if (unlikely(*ppos
>= isize
))
484 left
= isize
- *ppos
;
485 if (unlikely(left
< len
))
491 ret
= __generic_file_splice_read(in
, ppos
, pipe
, len
, flags
);
498 if (flags
& SPLICE_F_NONBLOCK
) {
515 EXPORT_SYMBOL(generic_file_splice_read
);
518 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
519 * using sendpage(). Return the number of bytes sent.
521 static int pipe_to_sendpage(struct pipe_inode_info
*pipe
,
522 struct pipe_buffer
*buf
, struct splice_desc
*sd
)
524 struct file
*file
= sd
->u
.file
;
525 loff_t pos
= sd
->pos
;
528 ret
= buf
->ops
->confirm(pipe
, buf
);
530 more
= (sd
->flags
& SPLICE_F_MORE
) || sd
->len
< sd
->total_len
;
532 ret
= file
->f_op
->sendpage(file
, buf
->page
, buf
->offset
,
533 sd
->len
, &pos
, more
);
540 * This is a little more tricky than the file -> pipe splicing. There are
541 * basically three cases:
543 * - Destination page already exists in the address space and there
544 * are users of it. For that case we have no other option that
545 * copying the data. Tough luck.
546 * - Destination page already exists in the address space, but there
547 * are no users of it. Make sure it's uptodate, then drop it. Fall
548 * through to last case.
549 * - Destination page does not exist, we can add the pipe page to
550 * the page cache and avoid the copy.
552 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
553 * sd->flags), we attempt to migrate pages from the pipe to the output
554 * file address space page cache. This is possible if no one else has
555 * the pipe page referenced outside of the pipe and page cache. If
556 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
557 * a new page in the output file page cache and fill/dirty that.
559 static int pipe_to_file(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
560 struct splice_desc
*sd
)
562 struct file
*file
= sd
->u
.file
;
563 struct address_space
*mapping
= file
->f_mapping
;
564 unsigned int offset
, this_len
;
570 * make sure the data in this buffer is uptodate
572 ret
= buf
->ops
->confirm(pipe
, buf
);
576 index
= sd
->pos
>> PAGE_CACHE_SHIFT
;
577 offset
= sd
->pos
& ~PAGE_CACHE_MASK
;
580 if (this_len
+ offset
> PAGE_CACHE_SIZE
)
581 this_len
= PAGE_CACHE_SIZE
- offset
;
584 page
= find_lock_page(mapping
, index
);
587 page
= page_cache_alloc_cold(mapping
);
592 * This will also lock the page
594 ret
= add_to_page_cache_lru(page
, mapping
, index
,
600 ret
= mapping
->a_ops
->prepare_write(file
, page
, offset
, offset
+this_len
);
602 loff_t isize
= i_size_read(mapping
->host
);
604 if (ret
!= AOP_TRUNCATED_PAGE
)
606 page_cache_release(page
);
607 if (ret
== AOP_TRUNCATED_PAGE
)
611 * prepare_write() may have instantiated a few blocks
612 * outside i_size. Trim these off again.
614 if (sd
->pos
+ this_len
> isize
)
615 vmtruncate(mapping
->host
, isize
);
620 if (buf
->page
!= page
) {
622 * Careful, ->map() uses KM_USER0!
624 char *src
= buf
->ops
->map(pipe
, buf
, 1);
625 char *dst
= kmap_atomic(page
, KM_USER1
);
627 memcpy(dst
+ offset
, src
+ buf
->offset
, this_len
);
628 flush_dcache_page(page
);
629 kunmap_atomic(dst
, KM_USER1
);
630 buf
->ops
->unmap(pipe
, buf
, src
);
633 ret
= mapping
->a_ops
->commit_write(file
, page
, offset
, offset
+this_len
);
635 if (ret
== AOP_TRUNCATED_PAGE
) {
636 page_cache_release(page
);
642 * Partial write has happened, so 'ret' already initialized by
643 * number of bytes written, Where is nothing we have to do here.
648 * Return the number of bytes written and mark page as
649 * accessed, we are now done!
651 mark_page_accessed(page
);
653 page_cache_release(page
);
660 * __splice_from_pipe - splice data from a pipe to given actor
661 * @pipe: pipe to splice from
662 * @sd: information to @actor
663 * @actor: handler that splices the data
666 * This function does little more than loop over the pipe and call
667 * @actor to do the actual moving of a single struct pipe_buffer to
668 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
672 ssize_t
__splice_from_pipe(struct pipe_inode_info
*pipe
, struct splice_desc
*sd
,
675 int ret
, do_wakeup
, err
;
682 struct pipe_buffer
*buf
= pipe
->bufs
+ pipe
->curbuf
;
683 const struct pipe_buf_operations
*ops
= buf
->ops
;
686 if (sd
->len
> sd
->total_len
)
687 sd
->len
= sd
->total_len
;
689 err
= actor(pipe
, buf
, sd
);
691 if (!ret
&& err
!= -ENODATA
)
703 sd
->total_len
-= err
;
709 ops
->release(pipe
, buf
);
710 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (PIPE_BUFFERS
- 1);
724 if (!pipe
->waiting_writers
) {
729 if (sd
->flags
& SPLICE_F_NONBLOCK
) {
735 if (signal_pending(current
)) {
743 if (waitqueue_active(&pipe
->wait
))
744 wake_up_interruptible_sync(&pipe
->wait
);
745 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
754 if (waitqueue_active(&pipe
->wait
))
755 wake_up_interruptible(&pipe
->wait
);
756 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
761 EXPORT_SYMBOL(__splice_from_pipe
);
764 * splice_from_pipe - splice data from a pipe to a file
765 * @pipe: pipe to splice from
766 * @out: file to splice to
767 * @ppos: position in @out
768 * @len: how many bytes to splice
769 * @flags: splice modifier flags
770 * @actor: handler that splices the data
773 * See __splice_from_pipe. This function locks the input and output inodes,
774 * otherwise it's identical to __splice_from_pipe().
777 ssize_t
splice_from_pipe(struct pipe_inode_info
*pipe
, struct file
*out
,
778 loff_t
*ppos
, size_t len
, unsigned int flags
,
782 struct inode
*inode
= out
->f_mapping
->host
;
783 struct splice_desc sd
= {
791 * The actor worker might be calling ->prepare_write and
792 * ->commit_write. Most of the time, these expect i_mutex to
793 * be held. Since this may result in an ABBA deadlock with
794 * pipe->inode, we have to order lock acquiry here.
796 inode_double_lock(inode
, pipe
->inode
);
797 ret
= __splice_from_pipe(pipe
, &sd
, actor
);
798 inode_double_unlock(inode
, pipe
->inode
);
804 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes
806 * @out: file to write to
807 * @ppos: position in @out
808 * @len: number of bytes to splice
809 * @flags: splice modifier flags
812 * Will either move or copy pages (determined by @flags options) from
813 * the given pipe inode to the given file. The caller is responsible
814 * for acquiring i_mutex on both inodes.
818 generic_file_splice_write_nolock(struct pipe_inode_info
*pipe
, struct file
*out
,
819 loff_t
*ppos
, size_t len
, unsigned int flags
)
821 struct address_space
*mapping
= out
->f_mapping
;
822 struct inode
*inode
= mapping
->host
;
823 struct splice_desc sd
= {
832 err
= remove_suid(out
->f_path
.dentry
);
836 ret
= __splice_from_pipe(pipe
, &sd
, pipe_to_file
);
838 unsigned long nr_pages
;
841 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
844 * If file or inode is SYNC and we actually wrote some data,
847 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
848 err
= generic_osync_inode(inode
, mapping
,
849 OSYNC_METADATA
|OSYNC_DATA
);
854 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
860 EXPORT_SYMBOL(generic_file_splice_write_nolock
);
863 * generic_file_splice_write - splice data from a pipe to a file
865 * @out: file to write to
866 * @ppos: position in @out
867 * @len: number of bytes to splice
868 * @flags: splice modifier flags
871 * Will either move or copy pages (determined by @flags options) from
872 * the given pipe inode to the given file.
876 generic_file_splice_write(struct pipe_inode_info
*pipe
, struct file
*out
,
877 loff_t
*ppos
, size_t len
, unsigned int flags
)
879 struct address_space
*mapping
= out
->f_mapping
;
880 struct inode
*inode
= mapping
->host
;
884 err
= should_remove_suid(out
->f_path
.dentry
);
886 mutex_lock(&inode
->i_mutex
);
887 err
= __remove_suid(out
->f_path
.dentry
, err
);
888 mutex_unlock(&inode
->i_mutex
);
893 ret
= splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_file
);
895 unsigned long nr_pages
;
898 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
901 * If file or inode is SYNC and we actually wrote some data,
904 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
905 mutex_lock(&inode
->i_mutex
);
906 err
= generic_osync_inode(inode
, mapping
,
907 OSYNC_METADATA
|OSYNC_DATA
);
908 mutex_unlock(&inode
->i_mutex
);
913 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
919 EXPORT_SYMBOL(generic_file_splice_write
);
922 * generic_splice_sendpage - splice data from a pipe to a socket
923 * @pipe: pipe to splice from
924 * @out: socket to write to
925 * @ppos: position in @out
926 * @len: number of bytes to splice
927 * @flags: splice modifier flags
930 * Will send @len bytes from the pipe to a network socket. No data copying
934 ssize_t
generic_splice_sendpage(struct pipe_inode_info
*pipe
, struct file
*out
,
935 loff_t
*ppos
, size_t len
, unsigned int flags
)
937 return splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_sendpage
);
940 EXPORT_SYMBOL(generic_splice_sendpage
);
943 * Attempt to initiate a splice from pipe to file.
945 static long do_splice_from(struct pipe_inode_info
*pipe
, struct file
*out
,
946 loff_t
*ppos
, size_t len
, unsigned int flags
)
950 if (unlikely(!out
->f_op
|| !out
->f_op
->splice_write
))
953 if (unlikely(!(out
->f_mode
& FMODE_WRITE
)))
956 ret
= rw_verify_area(WRITE
, out
, ppos
, len
);
957 if (unlikely(ret
< 0))
960 return out
->f_op
->splice_write(pipe
, out
, ppos
, len
, flags
);
964 * Attempt to initiate a splice from a file to a pipe.
966 static long do_splice_to(struct file
*in
, loff_t
*ppos
,
967 struct pipe_inode_info
*pipe
, size_t len
,
972 if (unlikely(!in
->f_op
|| !in
->f_op
->splice_read
))
975 if (unlikely(!(in
->f_mode
& FMODE_READ
)))
978 ret
= rw_verify_area(READ
, in
, ppos
, len
);
979 if (unlikely(ret
< 0))
982 return in
->f_op
->splice_read(in
, ppos
, pipe
, len
, flags
);
986 * splice_direct_to_actor - splices data directly between two non-pipes
987 * @in: file to splice from
988 * @sd: actor information on where to splice to
989 * @actor: handles the data splicing
992 * This is a special case helper to splice directly between two
993 * points, without requiring an explicit pipe. Internally an allocated
994 * pipe is cached in the process, and reused during the life time of
998 ssize_t
splice_direct_to_actor(struct file
*in
, struct splice_desc
*sd
,
999 splice_direct_actor
*actor
)
1001 struct pipe_inode_info
*pipe
;
1008 * We require the input being a regular file, as we don't want to
1009 * randomly drop data for eg socket -> socket splicing. Use the
1010 * piped splicing for that!
1012 i_mode
= in
->f_path
.dentry
->d_inode
->i_mode
;
1013 if (unlikely(!S_ISREG(i_mode
) && !S_ISBLK(i_mode
)))
1017 * neither in nor out is a pipe, setup an internal pipe attached to
1018 * 'out' and transfer the wanted data from 'in' to 'out' through that
1020 pipe
= current
->splice_pipe
;
1021 if (unlikely(!pipe
)) {
1022 pipe
= alloc_pipe_info(NULL
);
1027 * We don't have an immediate reader, but we'll read the stuff
1028 * out of the pipe right after the splice_to_pipe(). So set
1029 * PIPE_READERS appropriately.
1033 current
->splice_pipe
= pipe
;
1041 len
= sd
->total_len
;
1045 * Don't block on output, we have to drain the direct pipe.
1047 sd
->flags
&= ~SPLICE_F_NONBLOCK
;
1050 size_t read_len
, max_read_len
;
1053 * Do at most PIPE_BUFFERS pages worth of transfer:
1055 max_read_len
= min(len
, (size_t)(PIPE_BUFFERS
*PAGE_SIZE
));
1057 ret
= do_splice_to(in
, &sd
->pos
, pipe
, max_read_len
, flags
);
1058 if (unlikely(ret
< 0))
1062 sd
->total_len
= read_len
;
1065 * NOTE: nonblocking mode only applies to the input. We
1066 * must not do the output in nonblocking mode as then we
1067 * could get stuck data in the internal pipe:
1069 ret
= actor(pipe
, sd
);
1070 if (unlikely(ret
< 0))
1077 * In nonblocking mode, if we got back a short read then
1078 * that was due to either an IO error or due to the
1079 * pagecache entry not being there. In the IO error case
1080 * the _next_ splice attempt will produce a clean IO error
1081 * return value (not a short read), so in both cases it's
1082 * correct to break out of the loop here:
1084 if ((flags
& SPLICE_F_NONBLOCK
) && (read_len
< max_read_len
))
1088 pipe
->nrbufs
= pipe
->curbuf
= 0;
1094 * If we did an incomplete transfer we must release
1095 * the pipe buffers in question:
1097 for (i
= 0; i
< PIPE_BUFFERS
; i
++) {
1098 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
1101 buf
->ops
->release(pipe
, buf
);
1105 pipe
->nrbufs
= pipe
->curbuf
= 0;
1108 * If we transferred some data, return the number of bytes:
1116 EXPORT_SYMBOL(splice_direct_to_actor
);
1118 static int direct_splice_actor(struct pipe_inode_info
*pipe
,
1119 struct splice_desc
*sd
)
1121 struct file
*file
= sd
->u
.file
;
1123 return do_splice_from(pipe
, file
, &sd
->pos
, sd
->total_len
, sd
->flags
);
1127 * do_splice_direct - splices data directly between two files
1128 * @in: file to splice from
1129 * @ppos: input file offset
1130 * @out: file to splice to
1131 * @len: number of bytes to splice
1132 * @flags: splice modifier flags
1135 * For use by do_sendfile(). splice can easily emulate sendfile, but
1136 * doing it in the application would incur an extra system call
1137 * (splice in + splice out, as compared to just sendfile()). So this helper
1138 * can splice directly through a process-private pipe.
1141 long do_splice_direct(struct file
*in
, loff_t
*ppos
, struct file
*out
,
1142 size_t len
, unsigned int flags
)
1144 struct splice_desc sd
= {
1153 ret
= splice_direct_to_actor(in
, &sd
, direct_splice_actor
);
1159 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1160 * location, so checking ->i_pipe is not enough to verify that this is a
1163 static inline struct pipe_inode_info
*pipe_info(struct inode
*inode
)
1165 if (S_ISFIFO(inode
->i_mode
))
1166 return inode
->i_pipe
;
1172 * Determine where to splice to/from.
1174 static long do_splice(struct file
*in
, loff_t __user
*off_in
,
1175 struct file
*out
, loff_t __user
*off_out
,
1176 size_t len
, unsigned int flags
)
1178 struct pipe_inode_info
*pipe
;
1179 loff_t offset
, *off
;
1182 pipe
= pipe_info(in
->f_path
.dentry
->d_inode
);
1187 if (out
->f_op
->llseek
== no_llseek
)
1189 if (copy_from_user(&offset
, off_out
, sizeof(loff_t
)))
1195 ret
= do_splice_from(pipe
, out
, off
, len
, flags
);
1197 if (off_out
&& copy_to_user(off_out
, off
, sizeof(loff_t
)))
1203 pipe
= pipe_info(out
->f_path
.dentry
->d_inode
);
1208 if (in
->f_op
->llseek
== no_llseek
)
1210 if (copy_from_user(&offset
, off_in
, sizeof(loff_t
)))
1216 ret
= do_splice_to(in
, off
, pipe
, len
, flags
);
1218 if (off_in
&& copy_to_user(off_in
, off
, sizeof(loff_t
)))
1228 * Map an iov into an array of pages and offset/length tupples. With the
1229 * partial_page structure, we can map several non-contiguous ranges into
1230 * our ones pages[] map instead of splitting that operation into pieces.
1231 * Could easily be exported as a generic helper for other users, in which
1232 * case one would probably want to add a 'max_nr_pages' parameter as well.
1234 static int get_iovec_page_array(const struct iovec __user
*iov
,
1235 unsigned int nr_vecs
, struct page
**pages
,
1236 struct partial_page
*partial
, int aligned
)
1238 int buffers
= 0, error
= 0;
1241 * It's ok to take the mmap_sem for reading, even
1242 * across a "get_user()".
1244 down_read(¤t
->mm
->mmap_sem
);
1247 unsigned long off
, npages
;
1253 * Get user address base and length for this iovec.
1255 error
= get_user(base
, &iov
->iov_base
);
1256 if (unlikely(error
))
1258 error
= get_user(len
, &iov
->iov_len
);
1259 if (unlikely(error
))
1263 * Sanity check this iovec. 0 read succeeds.
1268 if (unlikely(!base
))
1272 * Get this base offset and number of pages, then map
1273 * in the user pages.
1275 off
= (unsigned long) base
& ~PAGE_MASK
;
1278 * If asked for alignment, the offset must be zero and the
1279 * length a multiple of the PAGE_SIZE.
1282 if (aligned
&& (off
|| len
& ~PAGE_MASK
))
1285 npages
= (off
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1286 if (npages
> PIPE_BUFFERS
- buffers
)
1287 npages
= PIPE_BUFFERS
- buffers
;
1289 error
= get_user_pages(current
, current
->mm
,
1290 (unsigned long) base
, npages
, 0, 0,
1291 &pages
[buffers
], NULL
);
1293 if (unlikely(error
<= 0))
1297 * Fill this contiguous range into the partial page map.
1299 for (i
= 0; i
< error
; i
++) {
1300 const int plen
= min_t(size_t, len
, PAGE_SIZE
- off
);
1302 partial
[buffers
].offset
= off
;
1303 partial
[buffers
].len
= plen
;
1311 * We didn't complete this iov, stop here since it probably
1312 * means we have to move some of this into a pipe to
1313 * be able to continue.
1319 * Don't continue if we mapped fewer pages than we asked for,
1320 * or if we mapped the max number of pages that we have
1323 if (error
< npages
|| buffers
== PIPE_BUFFERS
)
1330 up_read(¤t
->mm
->mmap_sem
);
1338 static int pipe_to_user(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
1339 struct splice_desc
*sd
)
1344 ret
= buf
->ops
->confirm(pipe
, buf
);
1349 * See if we can use the atomic maps, by prefaulting in the
1350 * pages and doing an atomic copy
1352 if (!fault_in_pages_writeable(sd
->u
.userptr
, sd
->len
)) {
1353 src
= buf
->ops
->map(pipe
, buf
, 1);
1354 ret
= __copy_to_user_inatomic(sd
->u
.userptr
, src
+ buf
->offset
,
1356 buf
->ops
->unmap(pipe
, buf
, src
);
1364 * No dice, use slow non-atomic map and copy
1366 src
= buf
->ops
->map(pipe
, buf
, 0);
1369 if (copy_to_user(sd
->u
.userptr
, src
+ buf
->offset
, sd
->len
))
1374 sd
->u
.userptr
+= ret
;
1375 buf
->ops
->unmap(pipe
, buf
, src
);
1380 * For lack of a better implementation, implement vmsplice() to userspace
1381 * as a simple copy of the pipes pages to the user iov.
1383 static long vmsplice_to_user(struct file
*file
, const struct iovec __user
*iov
,
1384 unsigned long nr_segs
, unsigned int flags
)
1386 struct pipe_inode_info
*pipe
;
1387 struct splice_desc sd
;
1392 pipe
= pipe_info(file
->f_path
.dentry
->d_inode
);
1397 mutex_lock(&pipe
->inode
->i_mutex
);
1405 * Get user address base and length for this iovec.
1407 error
= get_user(base
, &iov
->iov_base
);
1408 if (unlikely(error
))
1410 error
= get_user(len
, &iov
->iov_len
);
1411 if (unlikely(error
))
1415 * Sanity check this iovec. 0 read succeeds.
1419 if (unlikely(!base
)) {
1427 sd
.u
.userptr
= base
;
1430 size
= __splice_from_pipe(pipe
, &sd
, pipe_to_user
);
1448 mutex_unlock(&pipe
->inode
->i_mutex
);
1457 * vmsplice splices a user address range into a pipe. It can be thought of
1458 * as splice-from-memory, where the regular splice is splice-from-file (or
1459 * to file). In both cases the output is a pipe, naturally.
1461 static long vmsplice_to_pipe(struct file
*file
, const struct iovec __user
*iov
,
1462 unsigned long nr_segs
, unsigned int flags
)
1464 struct pipe_inode_info
*pipe
;
1465 struct page
*pages
[PIPE_BUFFERS
];
1466 struct partial_page partial
[PIPE_BUFFERS
];
1467 struct splice_pipe_desc spd
= {
1471 .ops
= &user_page_pipe_buf_ops
,
1474 pipe
= pipe_info(file
->f_path
.dentry
->d_inode
);
1478 spd
.nr_pages
= get_iovec_page_array(iov
, nr_segs
, pages
, partial
,
1479 flags
& SPLICE_F_GIFT
);
1480 if (spd
.nr_pages
<= 0)
1481 return spd
.nr_pages
;
1483 return splice_to_pipe(pipe
, &spd
);
1487 * Note that vmsplice only really supports true splicing _from_ user memory
1488 * to a pipe, not the other way around. Splicing from user memory is a simple
1489 * operation that can be supported without any funky alignment restrictions
1490 * or nasty vm tricks. We simply map in the user memory and fill them into
1491 * a pipe. The reverse isn't quite as easy, though. There are two possible
1492 * solutions for that:
1494 * - memcpy() the data internally, at which point we might as well just
1495 * do a regular read() on the buffer anyway.
1496 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1497 * has restriction limitations on both ends of the pipe).
1499 * Currently we punt and implement it as a normal copy, see pipe_to_user().
1502 asmlinkage
long sys_vmsplice(int fd
, const struct iovec __user
*iov
,
1503 unsigned long nr_segs
, unsigned int flags
)
1509 if (unlikely(nr_segs
> UIO_MAXIOV
))
1511 else if (unlikely(!nr_segs
))
1515 file
= fget_light(fd
, &fput
);
1517 if (file
->f_mode
& FMODE_WRITE
)
1518 error
= vmsplice_to_pipe(file
, iov
, nr_segs
, flags
);
1519 else if (file
->f_mode
& FMODE_READ
)
1520 error
= vmsplice_to_user(file
, iov
, nr_segs
, flags
);
1522 fput_light(file
, fput
);
1528 asmlinkage
long sys_splice(int fd_in
, loff_t __user
*off_in
,
1529 int fd_out
, loff_t __user
*off_out
,
1530 size_t len
, unsigned int flags
)
1533 struct file
*in
, *out
;
1534 int fput_in
, fput_out
;
1540 in
= fget_light(fd_in
, &fput_in
);
1542 if (in
->f_mode
& FMODE_READ
) {
1543 out
= fget_light(fd_out
, &fput_out
);
1545 if (out
->f_mode
& FMODE_WRITE
)
1546 error
= do_splice(in
, off_in
,
1549 fput_light(out
, fput_out
);
1553 fput_light(in
, fput_in
);
1560 * Make sure there's data to read. Wait for input if we can, otherwise
1561 * return an appropriate error.
1563 static int link_ipipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1568 * Check ->nrbufs without the inode lock first. This function
1569 * is speculative anyways, so missing one is ok.
1575 mutex_lock(&pipe
->inode
->i_mutex
);
1577 while (!pipe
->nrbufs
) {
1578 if (signal_pending(current
)) {
1584 if (!pipe
->waiting_writers
) {
1585 if (flags
& SPLICE_F_NONBLOCK
) {
1593 mutex_unlock(&pipe
->inode
->i_mutex
);
1598 * Make sure there's writeable room. Wait for room if we can, otherwise
1599 * return an appropriate error.
1601 static int link_opipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1606 * Check ->nrbufs without the inode lock first. This function
1607 * is speculative anyways, so missing one is ok.
1609 if (pipe
->nrbufs
< PIPE_BUFFERS
)
1613 mutex_lock(&pipe
->inode
->i_mutex
);
1615 while (pipe
->nrbufs
>= PIPE_BUFFERS
) {
1616 if (!pipe
->readers
) {
1617 send_sig(SIGPIPE
, current
, 0);
1621 if (flags
& SPLICE_F_NONBLOCK
) {
1625 if (signal_pending(current
)) {
1629 pipe
->waiting_writers
++;
1631 pipe
->waiting_writers
--;
1634 mutex_unlock(&pipe
->inode
->i_mutex
);
1639 * Link contents of ipipe to opipe.
1641 static int link_pipe(struct pipe_inode_info
*ipipe
,
1642 struct pipe_inode_info
*opipe
,
1643 size_t len
, unsigned int flags
)
1645 struct pipe_buffer
*ibuf
, *obuf
;
1646 int ret
= 0, i
= 0, nbuf
;
1649 * Potential ABBA deadlock, work around it by ordering lock
1650 * grabbing by inode address. Otherwise two different processes
1651 * could deadlock (one doing tee from A -> B, the other from B -> A).
1653 inode_double_lock(ipipe
->inode
, opipe
->inode
);
1656 if (!opipe
->readers
) {
1657 send_sig(SIGPIPE
, current
, 0);
1664 * If we have iterated all input buffers or ran out of
1665 * output room, break.
1667 if (i
>= ipipe
->nrbufs
|| opipe
->nrbufs
>= PIPE_BUFFERS
)
1670 ibuf
= ipipe
->bufs
+ ((ipipe
->curbuf
+ i
) & (PIPE_BUFFERS
- 1));
1671 nbuf
= (opipe
->curbuf
+ opipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
1674 * Get a reference to this pipe buffer,
1675 * so we can copy the contents over.
1677 ibuf
->ops
->get(ipipe
, ibuf
);
1679 obuf
= opipe
->bufs
+ nbuf
;
1683 * Don't inherit the gift flag, we need to
1684 * prevent multiple steals of this page.
1686 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
1688 if (obuf
->len
> len
)
1697 inode_double_unlock(ipipe
->inode
, opipe
->inode
);
1700 * If we put data in the output pipe, wakeup any potential readers.
1704 if (waitqueue_active(&opipe
->wait
))
1705 wake_up_interruptible(&opipe
->wait
);
1706 kill_fasync(&opipe
->fasync_readers
, SIGIO
, POLL_IN
);
1713 * This is a tee(1) implementation that works on pipes. It doesn't copy
1714 * any data, it simply references the 'in' pages on the 'out' pipe.
1715 * The 'flags' used are the SPLICE_F_* variants, currently the only
1716 * applicable one is SPLICE_F_NONBLOCK.
1718 static long do_tee(struct file
*in
, struct file
*out
, size_t len
,
1721 struct pipe_inode_info
*ipipe
= pipe_info(in
->f_path
.dentry
->d_inode
);
1722 struct pipe_inode_info
*opipe
= pipe_info(out
->f_path
.dentry
->d_inode
);
1726 * Duplicate the contents of ipipe to opipe without actually
1729 if (ipipe
&& opipe
&& ipipe
!= opipe
) {
1731 * Keep going, unless we encounter an error. The ipipe/opipe
1732 * ordering doesn't really matter.
1734 ret
= link_ipipe_prep(ipipe
, flags
);
1736 ret
= link_opipe_prep(opipe
, flags
);
1738 ret
= link_pipe(ipipe
, opipe
, len
, flags
);
1739 if (!ret
&& (flags
& SPLICE_F_NONBLOCK
))
1748 asmlinkage
long sys_tee(int fdin
, int fdout
, size_t len
, unsigned int flags
)
1757 in
= fget_light(fdin
, &fput_in
);
1759 if (in
->f_mode
& FMODE_READ
) {
1761 struct file
*out
= fget_light(fdout
, &fput_out
);
1764 if (out
->f_mode
& FMODE_WRITE
)
1765 error
= do_tee(in
, out
, len
, flags
);
1766 fput_light(out
, fput_out
);
1769 fput_light(in
, fput_in
);