2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
38 * Passed to splice_to_pipe
40 struct splice_pipe_desc
{
41 struct page
**pages
; /* page map */
42 struct partial_page
*partial
; /* pages[] may not be contig */
43 int nr_pages
; /* number of pages in map */
44 unsigned int flags
; /* splice flags */
45 struct pipe_buf_operations
*ops
;/* ops associated with output pipe */
49 * Attempt to steal a page from a pipe buffer. This should perhaps go into
50 * a vm helper function, it's already simplified quite a bit by the
51 * addition of remove_mapping(). If success is returned, the caller may
52 * attempt to reuse this page for another destination.
54 static int page_cache_pipe_buf_steal(struct pipe_inode_info
*pipe
,
55 struct pipe_buffer
*buf
)
57 struct page
*page
= buf
->page
;
58 struct address_space
*mapping
;
62 mapping
= page_mapping(page
);
64 WARN_ON(!PageUptodate(page
));
67 * At least for ext2 with nobh option, we need to wait on
68 * writeback completing on this page, since we'll remove it
69 * from the pagecache. Otherwise truncate wont wait on the
70 * page, allowing the disk blocks to be reused by someone else
71 * before we actually wrote our data to them. fs corruption
74 wait_on_page_writeback(page
);
76 if (PagePrivate(page
))
77 try_to_release_page(page
, GFP_KERNEL
);
80 * If we succeeded in removing the mapping, set LRU flag
83 if (remove_mapping(mapping
, page
)) {
84 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
90 * Raced with truncate or failed to remove page from current
91 * address space, unlock and return failure.
97 static void page_cache_pipe_buf_release(struct pipe_inode_info
*pipe
,
98 struct pipe_buffer
*buf
)
100 page_cache_release(buf
->page
);
101 buf
->flags
&= ~PIPE_BUF_FLAG_LRU
;
104 static int page_cache_pipe_buf_pin(struct pipe_inode_info
*pipe
,
105 struct pipe_buffer
*buf
)
107 struct page
*page
= buf
->page
;
110 if (!PageUptodate(page
)) {
114 * Page got truncated/unhashed. This will cause a 0-byte
115 * splice, if this is the first page.
117 #if 0 // mask by Victor Yu. 02-12-2007
118 if (!page
->mapping
) {
120 if (!page
->u
.xx
.mapping
) {
127 * Uh oh, read-error from disk.
129 if (!PageUptodate(page
)) {
135 * Page is ok afterall, we are done.
146 static struct pipe_buf_operations page_cache_pipe_buf_ops
= {
148 .map
= generic_pipe_buf_map
,
149 .unmap
= generic_pipe_buf_unmap
,
150 .pin
= page_cache_pipe_buf_pin
,
151 .release
= page_cache_pipe_buf_release
,
152 .steal
= page_cache_pipe_buf_steal
,
153 .get
= generic_pipe_buf_get
,
156 static int user_page_pipe_buf_steal(struct pipe_inode_info
*pipe
,
157 struct pipe_buffer
*buf
)
159 if (!(buf
->flags
& PIPE_BUF_FLAG_GIFT
))
162 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
163 return generic_pipe_buf_steal(pipe
, buf
);
166 static struct pipe_buf_operations user_page_pipe_buf_ops
= {
168 .map
= generic_pipe_buf_map
,
169 .unmap
= generic_pipe_buf_unmap
,
170 .pin
= generic_pipe_buf_pin
,
171 .release
= page_cache_pipe_buf_release
,
172 .steal
= user_page_pipe_buf_steal
,
173 .get
= generic_pipe_buf_get
,
177 * Pipe output worker. This sets up our pipe format with the page cache
178 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
180 static ssize_t
splice_to_pipe(struct pipe_inode_info
*pipe
,
181 struct splice_pipe_desc
*spd
)
183 int ret
, do_wakeup
, page_nr
;
190 mutex_lock(&pipe
->inode
->i_mutex
);
193 if (!pipe
->readers
) {
194 send_sig(SIGPIPE
, current
, 0);
200 if (pipe
->nrbufs
< PIPE_BUFFERS
) {
201 int newbuf
= (pipe
->curbuf
+ pipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
202 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
204 buf
->page
= spd
->pages
[page_nr
];
205 buf
->offset
= spd
->partial
[page_nr
].offset
;
206 buf
->len
= spd
->partial
[page_nr
].len
;
208 if (spd
->flags
& SPLICE_F_GIFT
)
209 buf
->flags
|= PIPE_BUF_FLAG_GIFT
;
218 if (!--spd
->nr_pages
)
220 if (pipe
->nrbufs
< PIPE_BUFFERS
)
226 if (spd
->flags
& SPLICE_F_NONBLOCK
) {
232 if (signal_pending(current
)) {
240 if (waitqueue_active(&pipe
->wait
))
241 wake_up_interruptible_sync(&pipe
->wait
);
242 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
246 pipe
->waiting_writers
++;
248 pipe
->waiting_writers
--;
252 mutex_unlock(&pipe
->inode
->i_mutex
);
256 if (waitqueue_active(&pipe
->wait
))
257 wake_up_interruptible(&pipe
->wait
);
258 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
261 while (page_nr
< spd
->nr_pages
)
262 page_cache_release(spd
->pages
[page_nr
++]);
268 __generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
269 struct pipe_inode_info
*pipe
, size_t len
,
272 struct address_space
*mapping
= in
->f_mapping
;
273 unsigned int loff
, nr_pages
;
274 struct page
*pages
[PIPE_BUFFERS
];
275 struct partial_page partial
[PIPE_BUFFERS
];
277 pgoff_t index
, end_index
;
281 struct splice_pipe_desc spd
= {
285 .ops
= &page_cache_pipe_buf_ops
,
288 index
= *ppos
>> PAGE_CACHE_SHIFT
;
289 loff
= *ppos
& ~PAGE_CACHE_MASK
;
290 nr_pages
= (len
+ loff
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
292 if (nr_pages
> PIPE_BUFFERS
)
293 nr_pages
= PIPE_BUFFERS
;
296 * Initiate read-ahead on this page range. however, don't call into
297 * read-ahead if this is a non-zero offset (we are likely doing small
298 * chunk splice and the page is already there) for a single page.
300 if (!loff
|| nr_pages
> 1)
301 page_cache_readahead(mapping
, &in
->f_ra
, in
, index
, nr_pages
);
304 * Now fill in the holes:
310 * Lookup the (hopefully) full range of pages we need.
312 spd
.nr_pages
= find_get_pages_contig(mapping
, index
, nr_pages
, pages
);
315 * If find_get_pages_contig() returned fewer pages than we needed,
318 index
+= spd
.nr_pages
;
319 while (spd
.nr_pages
< nr_pages
) {
321 * Page could be there, find_get_pages_contig() breaks on
324 page
= find_get_page(mapping
, index
);
327 * Make sure the read-ahead engine is notified
328 * about this failure.
330 handle_ra_miss(mapping
, &in
->f_ra
, index
);
333 * page didn't exist, allocate one.
335 page
= page_cache_alloc_cold(mapping
);
339 error
= add_to_page_cache_lru(page
, mapping
, index
,
341 if (unlikely(error
)) {
342 page_cache_release(page
);
343 if (error
== -EEXIST
)
348 * add_to_page_cache() locks the page, unlock it
349 * to avoid convoluting the logic below even more.
354 pages
[spd
.nr_pages
++] = page
;
359 * Now loop over the map and see if we need to start IO on any
360 * pages, fill in the partial map, etc.
362 index
= *ppos
>> PAGE_CACHE_SHIFT
;
363 nr_pages
= spd
.nr_pages
;
365 for (page_nr
= 0; page_nr
< nr_pages
; page_nr
++) {
366 unsigned int this_len
;
372 * this_len is the max we'll use from this page
374 this_len
= min_t(unsigned long, len
, PAGE_CACHE_SIZE
- loff
);
375 page
= pages
[page_nr
];
378 * If the page isn't uptodate, we may need to start io on it
380 if (!PageUptodate(page
)) {
382 * If in nonblock mode then dont block on waiting
383 * for an in-flight io page
385 if (flags
& SPLICE_F_NONBLOCK
)
391 * page was truncated, stop here. if this isn't the
392 * first page, we'll just complete what we already
395 #if 0 // mask by Victor Yu. 02-12-2007
396 if (!page
->mapping
) {
398 if (!page
->u
.xx
.mapping
) {
404 * page was already under io and is now done, great
406 if (PageUptodate(page
)) {
412 * need to read in the page
414 error
= mapping
->a_ops
->readpage(in
, page
);
415 if (unlikely(error
)) {
417 * We really should re-lookup the page here,
418 * but it complicates things a lot. Instead
419 * lets just do what we already stored, and
420 * we'll get it the next time we are called.
422 if (error
== AOP_TRUNCATED_PAGE
)
429 * i_size must be checked after ->readpage().
431 isize
= i_size_read(mapping
->host
);
432 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
433 if (unlikely(!isize
|| index
> end_index
))
437 * if this is the last page, see if we need to shrink
438 * the length and stop
440 if (end_index
== index
) {
441 loff
= PAGE_CACHE_SIZE
- (isize
& ~PAGE_CACHE_MASK
);
442 if (total_len
+ loff
> isize
)
445 * force quit after adding this page
448 this_len
= min(this_len
, loff
);
453 partial
[page_nr
].offset
= loff
;
454 partial
[page_nr
].len
= this_len
;
456 total_len
+= this_len
;
463 * Release any pages at the end, if we quit early. 'i' is how far
464 * we got, 'nr_pages' is how many pages are in the map.
466 while (page_nr
< nr_pages
)
467 page_cache_release(pages
[page_nr
++]);
470 return splice_to_pipe(pipe
, &spd
);
476 * generic_file_splice_read - splice data from file to a pipe
477 * @in: file to splice from
478 * @pipe: pipe to splice to
479 * @len: number of bytes to splice
480 * @flags: splice modifier flags
482 * Will read pages from given file and fill them into a pipe.
484 ssize_t
generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
485 struct pipe_inode_info
*pipe
, size_t len
,
495 ret
= __generic_file_splice_read(in
, ppos
, pipe
, len
, flags
);
502 if (flags
& SPLICE_F_NONBLOCK
) {
519 EXPORT_SYMBOL(generic_file_splice_read
);
522 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
523 * using sendpage(). Return the number of bytes sent.
525 static int pipe_to_sendpage(struct pipe_inode_info
*pipe
,
526 struct pipe_buffer
*buf
, struct splice_desc
*sd
)
528 struct file
*file
= sd
->file
;
529 loff_t pos
= sd
->pos
;
532 ret
= buf
->ops
->pin(pipe
, buf
);
534 more
= (sd
->flags
& SPLICE_F_MORE
) || sd
->len
< sd
->total_len
;
536 ret
= file
->f_op
->sendpage(file
, buf
->page
, buf
->offset
,
537 sd
->len
, &pos
, more
);
544 * This is a little more tricky than the file -> pipe splicing. There are
545 * basically three cases:
547 * - Destination page already exists in the address space and there
548 * are users of it. For that case we have no other option that
549 * copying the data. Tough luck.
550 * - Destination page already exists in the address space, but there
551 * are no users of it. Make sure it's uptodate, then drop it. Fall
552 * through to last case.
553 * - Destination page does not exist, we can add the pipe page to
554 * the page cache and avoid the copy.
556 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
557 * sd->flags), we attempt to migrate pages from the pipe to the output
558 * file address space page cache. This is possible if no one else has
559 * the pipe page referenced outside of the pipe and page cache. If
560 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
561 * a new page in the output file page cache and fill/dirty that.
563 static int pipe_to_file(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
564 struct splice_desc
*sd
)
566 struct file
*file
= sd
->file
;
567 struct address_space
*mapping
= file
->f_mapping
;
568 unsigned int offset
, this_len
;
574 * make sure the data in this buffer is uptodate
576 ret
= buf
->ops
->pin(pipe
, buf
);
580 index
= sd
->pos
>> PAGE_CACHE_SHIFT
;
581 offset
= sd
->pos
& ~PAGE_CACHE_MASK
;
584 if (this_len
+ offset
> PAGE_CACHE_SIZE
)
585 this_len
= PAGE_CACHE_SIZE
- offset
;
588 * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full
591 if ((sd
->flags
& SPLICE_F_MOVE
) && this_len
== PAGE_CACHE_SIZE
) {
593 * If steal succeeds, buf->page is now pruned from the
594 * pagecache and we can reuse it. The page will also be
595 * locked on successful return.
597 if (buf
->ops
->steal(pipe
, buf
))
601 if (add_to_page_cache(page
, mapping
, index
, GFP_KERNEL
)) {
606 page_cache_get(page
);
608 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
612 page
= find_lock_page(mapping
, index
);
615 page
= page_cache_alloc_cold(mapping
);
620 * This will also lock the page
622 ret
= add_to_page_cache_lru(page
, mapping
, index
,
629 * We get here with the page locked. If the page is also
630 * uptodate, we don't need to do more. If it isn't, we
631 * may need to bring it in if we are not going to overwrite
634 if (!PageUptodate(page
)) {
635 if (this_len
< PAGE_CACHE_SIZE
) {
636 ret
= mapping
->a_ops
->readpage(file
, page
);
642 if (!PageUptodate(page
)) {
644 * Page got invalidated, repeat.
646 #if 0 // mask by Victor Yu. 02-12-2007
647 if (!page
->mapping
) {
649 if (!page
->u
.xx
.mapping
) {
652 page_cache_release(page
);
659 SetPageUptodate(page
);
663 ret
= mapping
->a_ops
->prepare_write(file
, page
, offset
, offset
+this_len
);
665 loff_t isize
= i_size_read(mapping
->host
);
667 if (ret
!= AOP_TRUNCATED_PAGE
)
669 page_cache_release(page
);
670 if (ret
== AOP_TRUNCATED_PAGE
)
674 * prepare_write() may have instantiated a few blocks
675 * outside i_size. Trim these off again.
677 if (sd
->pos
+ this_len
> isize
)
678 vmtruncate(mapping
->host
, isize
);
683 if (buf
->page
!= page
) {
685 * Careful, ->map() uses KM_USER0!
687 char *src
= buf
->ops
->map(pipe
, buf
, 1);
688 char *dst
= kmap_atomic(page
, KM_USER1
);
690 memcpy(dst
+ offset
, src
+ buf
->offset
, this_len
);
691 flush_dcache_page(page
);
692 kunmap_atomic(dst
, KM_USER1
);
693 buf
->ops
->unmap(pipe
, buf
, src
);
696 ret
= mapping
->a_ops
->commit_write(file
, page
, offset
, offset
+this_len
);
699 * Return the number of bytes written and mark page as
700 * accessed, we are now done!
703 mark_page_accessed(page
);
704 balance_dirty_pages_ratelimited(mapping
);
705 } else if (ret
== AOP_TRUNCATED_PAGE
) {
706 page_cache_release(page
);
710 page_cache_release(page
);
717 * Pipe input worker. Most of this logic works like a regular pipe, the
718 * key here is the 'actor' worker passed in that actually moves the data
719 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
721 static ssize_t
__splice_from_pipe(struct pipe_inode_info
*pipe
,
722 struct file
*out
, loff_t
*ppos
, size_t len
,
723 unsigned int flags
, splice_actor
*actor
)
725 int ret
, do_wakeup
, err
;
726 struct splice_desc sd
;
738 struct pipe_buffer
*buf
= pipe
->bufs
+ pipe
->curbuf
;
739 struct pipe_buf_operations
*ops
= buf
->ops
;
742 if (sd
.len
> sd
.total_len
)
743 sd
.len
= sd
.total_len
;
745 err
= actor(pipe
, buf
, &sd
);
747 if (!ret
&& err
!= -ENODATA
)
765 ops
->release(pipe
, buf
);
766 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (PIPE_BUFFERS
- 1);
780 if (!pipe
->waiting_writers
) {
785 if (flags
& SPLICE_F_NONBLOCK
) {
791 if (signal_pending(current
)) {
799 if (waitqueue_active(&pipe
->wait
))
800 wake_up_interruptible_sync(&pipe
->wait
);
801 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
810 if (waitqueue_active(&pipe
->wait
))
811 wake_up_interruptible(&pipe
->wait
);
812 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
818 ssize_t
splice_from_pipe(struct pipe_inode_info
*pipe
, struct file
*out
,
819 loff_t
*ppos
, size_t len
, unsigned int flags
,
823 struct inode
*inode
= out
->f_mapping
->host
;
826 * The actor worker might be calling ->prepare_write and
827 * ->commit_write. Most of the time, these expect i_mutex to
828 * be held. Since this may result in an ABBA deadlock with
829 * pipe->inode, we have to order lock acquiry here.
831 inode_double_lock(inode
, pipe
->inode
);
832 ret
= __splice_from_pipe(pipe
, out
, ppos
, len
, flags
, actor
);
833 inode_double_unlock(inode
, pipe
->inode
);
839 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes
841 * @out: file to write to
842 * @len: number of bytes to splice
843 * @flags: splice modifier flags
845 * Will either move or copy pages (determined by @flags options) from
846 * the given pipe inode to the given file. The caller is responsible
847 * for acquiring i_mutex on both inodes.
851 generic_file_splice_write_nolock(struct pipe_inode_info
*pipe
, struct file
*out
,
852 loff_t
*ppos
, size_t len
, unsigned int flags
)
854 struct address_space
*mapping
= out
->f_mapping
;
855 struct inode
*inode
= mapping
->host
;
859 err
= remove_suid(out
->f_dentry
);
863 ret
= __splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_file
);
868 * If file or inode is SYNC and we actually wrote some data,
871 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
872 err
= generic_osync_inode(inode
, mapping
,
873 OSYNC_METADATA
|OSYNC_DATA
);
883 EXPORT_SYMBOL(generic_file_splice_write_nolock
);
886 * generic_file_splice_write - splice data from a pipe to a file
888 * @out: file to write to
889 * @len: number of bytes to splice
890 * @flags: splice modifier flags
892 * Will either move or copy pages (determined by @flags options) from
893 * the given pipe inode to the given file.
897 generic_file_splice_write(struct pipe_inode_info
*pipe
, struct file
*out
,
898 loff_t
*ppos
, size_t len
, unsigned int flags
)
900 struct address_space
*mapping
= out
->f_mapping
;
901 struct inode
*inode
= mapping
->host
;
905 err
= should_remove_suid(out
->f_dentry
);
907 mutex_lock(&inode
->i_mutex
);
908 err
= __remove_suid(out
->f_dentry
, err
);
909 mutex_unlock(&inode
->i_mutex
);
914 ret
= splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_file
);
919 * If file or inode is SYNC and we actually wrote some data,
922 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
923 mutex_lock(&inode
->i_mutex
);
924 err
= generic_osync_inode(inode
, mapping
,
925 OSYNC_METADATA
|OSYNC_DATA
);
926 mutex_unlock(&inode
->i_mutex
);
936 EXPORT_SYMBOL(generic_file_splice_write
);
939 * generic_splice_sendpage - splice data from a pipe to a socket
941 * @out: socket to write to
942 * @len: number of bytes to splice
943 * @flags: splice modifier flags
945 * Will send @len bytes from the pipe to a network socket. No data copying
949 ssize_t
generic_splice_sendpage(struct pipe_inode_info
*pipe
, struct file
*out
,
950 loff_t
*ppos
, size_t len
, unsigned int flags
)
952 return splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_sendpage
);
955 EXPORT_SYMBOL(generic_splice_sendpage
);
958 * Attempt to initiate a splice from pipe to file.
960 static long do_splice_from(struct pipe_inode_info
*pipe
, struct file
*out
,
961 loff_t
*ppos
, size_t len
, unsigned int flags
)
965 if (unlikely(!out
->f_op
|| !out
->f_op
->splice_write
))
968 if (unlikely(!(out
->f_mode
& FMODE_WRITE
)))
971 ret
= rw_verify_area(WRITE
, out
, ppos
, len
);
972 if (unlikely(ret
< 0))
975 return out
->f_op
->splice_write(pipe
, out
, ppos
, len
, flags
);
979 * Attempt to initiate a splice from a file to a pipe.
981 static long do_splice_to(struct file
*in
, loff_t
*ppos
,
982 struct pipe_inode_info
*pipe
, size_t len
,
988 if (unlikely(!in
->f_op
|| !in
->f_op
->splice_read
))
991 if (unlikely(!(in
->f_mode
& FMODE_READ
)))
994 ret
= rw_verify_area(READ
, in
, ppos
, len
);
995 if (unlikely(ret
< 0))
998 isize
= i_size_read(in
->f_mapping
->host
);
999 if (unlikely(*ppos
>= isize
))
1002 left
= isize
- *ppos
;
1003 if (unlikely(left
< len
))
1006 return in
->f_op
->splice_read(in
, ppos
, pipe
, len
, flags
);
1009 long do_splice_direct(struct file
*in
, loff_t
*ppos
, struct file
*out
,
1010 size_t len
, unsigned int flags
)
1012 struct pipe_inode_info
*pipe
;
1019 * We require the input being a regular file, as we don't want to
1020 * randomly drop data for eg socket -> socket splicing. Use the
1021 * piped splicing for that!
1023 i_mode
= in
->f_dentry
->d_inode
->i_mode
;
1024 if (unlikely(!S_ISREG(i_mode
) && !S_ISBLK(i_mode
)))
1028 * neither in nor out is a pipe, setup an internal pipe attached to
1029 * 'out' and transfer the wanted data from 'in' to 'out' through that
1031 pipe
= current
->splice_pipe
;
1032 if (unlikely(!pipe
)) {
1033 pipe
= alloc_pipe_info(NULL
);
1038 * We don't have an immediate reader, but we'll read the stuff
1039 * out of the pipe right after the splice_to_pipe(). So set
1040 * PIPE_READERS appropriately.
1044 current
->splice_pipe
= pipe
;
1055 size_t read_len
, max_read_len
;
1058 * Do at most PIPE_BUFFERS pages worth of transfer:
1060 max_read_len
= min(len
, (size_t)(PIPE_BUFFERS
*PAGE_SIZE
));
1062 ret
= do_splice_to(in
, ppos
, pipe
, max_read_len
, flags
);
1063 if (unlikely(ret
< 0))
1069 * NOTE: nonblocking mode only applies to the input. We
1070 * must not do the output in nonblocking mode as then we
1071 * could get stuck data in the internal pipe:
1073 ret
= do_splice_from(pipe
, out
, &out_off
, read_len
,
1074 flags
& ~SPLICE_F_NONBLOCK
);
1075 if (unlikely(ret
< 0))
1082 * In nonblocking mode, if we got back a short read then
1083 * that was due to either an IO error or due to the
1084 * pagecache entry not being there. In the IO error case
1085 * the _next_ splice attempt will produce a clean IO error
1086 * return value (not a short read), so in both cases it's
1087 * correct to break out of the loop here:
1089 if ((flags
& SPLICE_F_NONBLOCK
) && (read_len
< max_read_len
))
1093 pipe
->nrbufs
= pipe
->curbuf
= 0;
1099 * If we did an incomplete transfer we must release
1100 * the pipe buffers in question:
1102 for (i
= 0; i
< PIPE_BUFFERS
; i
++) {
1103 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
1106 buf
->ops
->release(pipe
, buf
);
1110 pipe
->nrbufs
= pipe
->curbuf
= 0;
1113 * If we transferred some data, return the number of bytes:
1121 EXPORT_SYMBOL(do_splice_direct
);
1124 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1125 * location, so checking ->i_pipe is not enough to verify that this is a
1128 static inline struct pipe_inode_info
*pipe_info(struct inode
*inode
)
1130 if (S_ISFIFO(inode
->i_mode
))
1131 #if 0 // mask by Victor Yu. 02-12-2007
1132 return inode
->i_pipe
;
1134 return inode
->u
.i_pipe
;
1141 * Determine where to splice to/from.
1143 static long do_splice(struct file
*in
, loff_t __user
*off_in
,
1144 struct file
*out
, loff_t __user
*off_out
,
1145 size_t len
, unsigned int flags
)
1147 struct pipe_inode_info
*pipe
;
1148 loff_t offset
, *off
;
1151 pipe
= pipe_info(in
->f_dentry
->d_inode
);
1156 if (out
->f_op
->llseek
== no_llseek
)
1158 if (copy_from_user(&offset
, off_out
, sizeof(loff_t
)))
1164 ret
= do_splice_from(pipe
, out
, off
, len
, flags
);
1166 if (off_out
&& copy_to_user(off_out
, off
, sizeof(loff_t
)))
1172 pipe
= pipe_info(out
->f_dentry
->d_inode
);
1177 if (in
->f_op
->llseek
== no_llseek
)
1179 if (copy_from_user(&offset
, off_in
, sizeof(loff_t
)))
1185 ret
= do_splice_to(in
, off
, pipe
, len
, flags
);
1187 if (off_in
&& copy_to_user(off_in
, off
, sizeof(loff_t
)))
1197 * Map an iov into an array of pages and offset/length tupples. With the
1198 * partial_page structure, we can map several non-contiguous ranges into
1199 * our ones pages[] map instead of splitting that operation into pieces.
1200 * Could easily be exported as a generic helper for other users, in which
1201 * case one would probably want to add a 'max_nr_pages' parameter as well.
1203 static int get_iovec_page_array(const struct iovec __user
*iov
,
1204 unsigned int nr_vecs
, struct page
**pages
,
1205 struct partial_page
*partial
, int aligned
)
1207 int buffers
= 0, error
= 0;
1210 * It's ok to take the mmap_sem for reading, even
1211 * across a "get_user()".
1213 down_read(¤t
->mm
->mmap_sem
);
1216 unsigned long off
, npages
;
1222 * Get user address base and length for this iovec.
1224 error
= get_user(base
, &iov
->iov_base
);
1225 if (unlikely(error
))
1227 error
= get_user(len
, &iov
->iov_len
);
1228 if (unlikely(error
))
1232 * Sanity check this iovec. 0 read succeeds.
1237 if (unlikely(!base
))
1241 * Get this base offset and number of pages, then map
1242 * in the user pages.
1244 off
= (unsigned long) base
& ~PAGE_MASK
;
1247 * If asked for alignment, the offset must be zero and the
1248 * length a multiple of the PAGE_SIZE.
1251 if (aligned
&& (off
|| len
& ~PAGE_MASK
))
1254 npages
= (off
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1255 if (npages
> PIPE_BUFFERS
- buffers
)
1256 npages
= PIPE_BUFFERS
- buffers
;
1258 error
= get_user_pages(current
, current
->mm
,
1259 (unsigned long) base
, npages
, 0, 0,
1260 &pages
[buffers
], NULL
);
1262 if (unlikely(error
<= 0))
1266 * Fill this contiguous range into the partial page map.
1268 for (i
= 0; i
< error
; i
++) {
1269 const int plen
= min_t(size_t, len
, PAGE_SIZE
- off
);
1271 partial
[buffers
].offset
= off
;
1272 partial
[buffers
].len
= plen
;
1280 * We didn't complete this iov, stop here since it probably
1281 * means we have to move some of this into a pipe to
1282 * be able to continue.
1288 * Don't continue if we mapped fewer pages than we asked for,
1289 * or if we mapped the max number of pages that we have
1292 if (error
< npages
|| buffers
== PIPE_BUFFERS
)
1299 up_read(¤t
->mm
->mmap_sem
);
1308 * vmsplice splices a user address range into a pipe. It can be thought of
1309 * as splice-from-memory, where the regular splice is splice-from-file (or
1310 * to file). In both cases the output is a pipe, naturally.
1312 * Note that vmsplice only supports splicing _from_ user memory to a pipe,
1313 * not the other way around. Splicing from user memory is a simple operation
1314 * that can be supported without any funky alignment restrictions or nasty
1315 * vm tricks. We simply map in the user memory and fill them into a pipe.
1316 * The reverse isn't quite as easy, though. There are two possible solutions
1319 * - memcpy() the data internally, at which point we might as well just
1320 * do a regular read() on the buffer anyway.
1321 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1322 * has restriction limitations on both ends of the pipe).
1324 * Alas, it isn't here.
1327 static long do_vmsplice(struct file
*file
, const struct iovec __user
*iov
,
1328 unsigned long nr_segs
, unsigned int flags
)
1330 struct pipe_inode_info
*pipe
;
1331 struct page
*pages
[PIPE_BUFFERS
];
1332 struct partial_page partial
[PIPE_BUFFERS
];
1333 struct splice_pipe_desc spd
= {
1337 .ops
= &user_page_pipe_buf_ops
,
1340 pipe
= pipe_info(file
->f_dentry
->d_inode
);
1343 if (unlikely(nr_segs
> UIO_MAXIOV
))
1345 else if (unlikely(!nr_segs
))
1348 spd
.nr_pages
= get_iovec_page_array(iov
, nr_segs
, pages
, partial
,
1349 flags
& SPLICE_F_GIFT
);
1350 if (spd
.nr_pages
<= 0)
1351 return spd
.nr_pages
;
1353 return splice_to_pipe(pipe
, &spd
);
1356 asmlinkage
long sys_vmsplice(int fd
, const struct iovec __user
*iov
,
1357 unsigned long nr_segs
, unsigned int flags
)
1364 file
= fget_light(fd
, &fput
);
1366 if (file
->f_mode
& FMODE_WRITE
)
1367 error
= do_vmsplice(file
, iov
, nr_segs
, flags
);
1369 fput_light(file
, fput
);
1375 asmlinkage
long sys_splice(int fd_in
, loff_t __user
*off_in
,
1376 int fd_out
, loff_t __user
*off_out
,
1377 size_t len
, unsigned int flags
)
1380 struct file
*in
, *out
;
1381 int fput_in
, fput_out
;
1387 in
= fget_light(fd_in
, &fput_in
);
1389 if (in
->f_mode
& FMODE_READ
) {
1390 out
= fget_light(fd_out
, &fput_out
);
1392 if (out
->f_mode
& FMODE_WRITE
)
1393 error
= do_splice(in
, off_in
,
1396 fput_light(out
, fput_out
);
1400 fput_light(in
, fput_in
);
1407 * Make sure there's data to read. Wait for input if we can, otherwise
1408 * return an appropriate error.
1410 static int link_ipipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1415 * Check ->nrbufs without the inode lock first. This function
1416 * is speculative anyways, so missing one is ok.
1422 mutex_lock(&pipe
->inode
->i_mutex
);
1424 while (!pipe
->nrbufs
) {
1425 if (signal_pending(current
)) {
1431 if (!pipe
->waiting_writers
) {
1432 if (flags
& SPLICE_F_NONBLOCK
) {
1440 mutex_unlock(&pipe
->inode
->i_mutex
);
1445 * Make sure there's writeable room. Wait for room if we can, otherwise
1446 * return an appropriate error.
1448 static int link_opipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1453 * Check ->nrbufs without the inode lock first. This function
1454 * is speculative anyways, so missing one is ok.
1456 if (pipe
->nrbufs
< PIPE_BUFFERS
)
1460 mutex_lock(&pipe
->inode
->i_mutex
);
1462 while (pipe
->nrbufs
>= PIPE_BUFFERS
) {
1463 if (!pipe
->readers
) {
1464 send_sig(SIGPIPE
, current
, 0);
1468 if (flags
& SPLICE_F_NONBLOCK
) {
1472 if (signal_pending(current
)) {
1476 pipe
->waiting_writers
++;
1478 pipe
->waiting_writers
--;
1481 mutex_unlock(&pipe
->inode
->i_mutex
);
1486 * Link contents of ipipe to opipe.
1488 static int link_pipe(struct pipe_inode_info
*ipipe
,
1489 struct pipe_inode_info
*opipe
,
1490 size_t len
, unsigned int flags
)
1492 struct pipe_buffer
*ibuf
, *obuf
;
1493 int ret
= 0, i
= 0, nbuf
;
1496 * Potential ABBA deadlock, work around it by ordering lock
1497 * grabbing by inode address. Otherwise two different processes
1498 * could deadlock (one doing tee from A -> B, the other from B -> A).
1500 inode_double_lock(ipipe
->inode
, opipe
->inode
);
1503 if (!opipe
->readers
) {
1504 send_sig(SIGPIPE
, current
, 0);
1511 * If we have iterated all input buffers or ran out of
1512 * output room, break.
1514 if (i
>= ipipe
->nrbufs
|| opipe
->nrbufs
>= PIPE_BUFFERS
)
1517 ibuf
= ipipe
->bufs
+ ((ipipe
->curbuf
+ i
) & (PIPE_BUFFERS
- 1));
1518 nbuf
= (opipe
->curbuf
+ opipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
1521 * Get a reference to this pipe buffer,
1522 * so we can copy the contents over.
1524 ibuf
->ops
->get(ipipe
, ibuf
);
1526 obuf
= opipe
->bufs
+ nbuf
;
1530 * Don't inherit the gift flag, we need to
1531 * prevent multiple steals of this page.
1533 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
1535 if (obuf
->len
> len
)
1544 inode_double_unlock(ipipe
->inode
, opipe
->inode
);
1547 * If we put data in the output pipe, wakeup any potential readers.
1551 if (waitqueue_active(&opipe
->wait
))
1552 wake_up_interruptible(&opipe
->wait
);
1553 kill_fasync(&opipe
->fasync_readers
, SIGIO
, POLL_IN
);
1560 * This is a tee(1) implementation that works on pipes. It doesn't copy
1561 * any data, it simply references the 'in' pages on the 'out' pipe.
1562 * The 'flags' used are the SPLICE_F_* variants, currently the only
1563 * applicable one is SPLICE_F_NONBLOCK.
1565 static long do_tee(struct file
*in
, struct file
*out
, size_t len
,
1568 struct pipe_inode_info
*ipipe
= pipe_info(in
->f_dentry
->d_inode
);
1569 struct pipe_inode_info
*opipe
= pipe_info(out
->f_dentry
->d_inode
);
1573 * Duplicate the contents of ipipe to opipe without actually
1576 if (ipipe
&& opipe
&& ipipe
!= opipe
) {
1578 * Keep going, unless we encounter an error. The ipipe/opipe
1579 * ordering doesn't really matter.
1581 ret
= link_ipipe_prep(ipipe
, flags
);
1583 ret
= link_opipe_prep(opipe
, flags
);
1585 ret
= link_pipe(ipipe
, opipe
, len
, flags
);
1586 if (!ret
&& (flags
& SPLICE_F_NONBLOCK
))
1595 asmlinkage
long sys_tee(int fdin
, int fdout
, size_t len
, unsigned int flags
)
1604 in
= fget_light(fdin
, &fput_in
);
1606 if (in
->f_mode
& FMODE_READ
) {
1608 struct file
*out
= fget_light(fdout
, &fput_out
);
1611 if (out
->f_mode
& FMODE_WRITE
)
1612 error
= do_tee(in
, out
, len
, flags
);
1613 fput_light(out
, fput_out
);
1616 fput_light(in
, fput_in
);