2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/splice.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
31 #include <linux/security.h>
34 * Attempt to steal a page from a pipe buffer. This should perhaps go into
35 * a vm helper function, it's already simplified quite a bit by the
36 * addition of remove_mapping(). If success is returned, the caller may
37 * attempt to reuse this page for another destination.
39 static int page_cache_pipe_buf_steal(struct pipe_inode_info
*pipe
,
40 struct pipe_buffer
*buf
)
42 struct page
*page
= buf
->page
;
43 struct address_space
*mapping
;
47 mapping
= page_mapping(page
);
49 WARN_ON(!PageUptodate(page
));
52 * At least for ext2 with nobh option, we need to wait on
53 * writeback completing on this page, since we'll remove it
54 * from the pagecache. Otherwise truncate wont wait on the
55 * page, allowing the disk blocks to be reused by someone else
56 * before we actually wrote our data to them. fs corruption
59 wait_on_page_writeback(page
);
61 if (PagePrivate(page
))
62 try_to_release_page(page
, GFP_KERNEL
);
65 * If we succeeded in removing the mapping, set LRU flag
68 if (remove_mapping(mapping
, page
)) {
69 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
75 * Raced with truncate or failed to remove page from current
76 * address space, unlock and return failure.
82 static void page_cache_pipe_buf_release(struct pipe_inode_info
*pipe
,
83 struct pipe_buffer
*buf
)
85 page_cache_release(buf
->page
);
86 buf
->flags
&= ~PIPE_BUF_FLAG_LRU
;
90 * Check whether the contents of buf is OK to access. Since the content
91 * is a page cache page, IO may be in flight.
93 static int page_cache_pipe_buf_confirm(struct pipe_inode_info
*pipe
,
94 struct pipe_buffer
*buf
)
96 struct page
*page
= buf
->page
;
99 if (!PageUptodate(page
)) {
103 * Page got truncated/unhashed. This will cause a 0-byte
104 * splice, if this is the first page.
106 if (!page
->mapping
) {
112 * Uh oh, read-error from disk.
114 if (!PageUptodate(page
)) {
120 * Page is ok afterall, we are done.
131 static const struct pipe_buf_operations page_cache_pipe_buf_ops
= {
133 .map
= generic_pipe_buf_map
,
134 .unmap
= generic_pipe_buf_unmap
,
135 .confirm
= page_cache_pipe_buf_confirm
,
136 .release
= page_cache_pipe_buf_release
,
137 .steal
= page_cache_pipe_buf_steal
,
138 .get
= generic_pipe_buf_get
,
141 static int user_page_pipe_buf_steal(struct pipe_inode_info
*pipe
,
142 struct pipe_buffer
*buf
)
144 if (!(buf
->flags
& PIPE_BUF_FLAG_GIFT
))
147 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
148 return generic_pipe_buf_steal(pipe
, buf
);
151 static const struct pipe_buf_operations user_page_pipe_buf_ops
= {
153 .map
= generic_pipe_buf_map
,
154 .unmap
= generic_pipe_buf_unmap
,
155 .confirm
= generic_pipe_buf_confirm
,
156 .release
= page_cache_pipe_buf_release
,
157 .steal
= user_page_pipe_buf_steal
,
158 .get
= generic_pipe_buf_get
,
162 * splice_to_pipe - fill passed data into a pipe
163 * @pipe: pipe to fill
167 * @spd contains a map of pages and len/offset tupples, a long with
168 * the struct pipe_buf_operations associated with these pages. This
169 * function will link that data to the pipe.
172 ssize_t
splice_to_pipe(struct pipe_inode_info
*pipe
,
173 struct splice_pipe_desc
*spd
)
175 unsigned int spd_pages
= spd
->nr_pages
;
176 int ret
, do_wakeup
, page_nr
;
183 mutex_lock(&pipe
->inode
->i_mutex
);
186 if (!pipe
->readers
) {
187 send_sig(SIGPIPE
, current
, 0);
193 if (pipe
->nrbufs
< PIPE_BUFFERS
) {
194 int newbuf
= (pipe
->curbuf
+ pipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
195 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
197 buf
->page
= spd
->pages
[page_nr
];
198 buf
->offset
= spd
->partial
[page_nr
].offset
;
199 buf
->len
= spd
->partial
[page_nr
].len
;
200 buf
->private = spd
->partial
[page_nr
].private;
202 if (spd
->flags
& SPLICE_F_GIFT
)
203 buf
->flags
|= PIPE_BUF_FLAG_GIFT
;
212 if (!--spd
->nr_pages
)
214 if (pipe
->nrbufs
< PIPE_BUFFERS
)
220 if (spd
->flags
& SPLICE_F_NONBLOCK
) {
226 if (signal_pending(current
)) {
234 if (waitqueue_active(&pipe
->wait
))
235 wake_up_interruptible_sync(&pipe
->wait
);
236 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
240 pipe
->waiting_writers
++;
242 pipe
->waiting_writers
--;
246 mutex_unlock(&pipe
->inode
->i_mutex
);
250 if (waitqueue_active(&pipe
->wait
))
251 wake_up_interruptible(&pipe
->wait
);
252 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
256 while (page_nr
< spd_pages
)
257 spd
->spd_release(spd
, page_nr
++);
262 static void spd_release_page(struct splice_pipe_desc
*spd
, unsigned int i
)
264 page_cache_release(spd
->pages
[i
]);
268 __generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
269 struct pipe_inode_info
*pipe
, size_t len
,
272 struct address_space
*mapping
= in
->f_mapping
;
273 unsigned int loff
, nr_pages
;
274 struct page
*pages
[PIPE_BUFFERS
];
275 struct partial_page partial
[PIPE_BUFFERS
];
277 pgoff_t index
, end_index
;
280 struct splice_pipe_desc spd
= {
284 .ops
= &page_cache_pipe_buf_ops
,
285 .spd_release
= spd_release_page
,
288 index
= *ppos
>> PAGE_CACHE_SHIFT
;
289 loff
= *ppos
& ~PAGE_CACHE_MASK
;
290 nr_pages
= (len
+ loff
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
292 if (nr_pages
> PIPE_BUFFERS
)
293 nr_pages
= PIPE_BUFFERS
;
296 * Don't try to 2nd guess the read-ahead logic, call into
297 * page_cache_readahead() like the page cache reads would do.
299 page_cache_readahead(mapping
, &in
->f_ra
, in
, index
, nr_pages
);
302 * Lookup the (hopefully) full range of pages we need.
304 spd
.nr_pages
= find_get_pages_contig(mapping
, index
, nr_pages
, pages
);
307 * If find_get_pages_contig() returned fewer pages than we needed,
308 * allocate the rest and fill in the holes.
311 index
+= spd
.nr_pages
;
312 while (spd
.nr_pages
< nr_pages
) {
314 * Page could be there, find_get_pages_contig() breaks on
317 page
= find_get_page(mapping
, index
);
320 * Make sure the read-ahead engine is notified
321 * about this failure.
323 handle_ra_miss(mapping
, &in
->f_ra
, index
);
326 * page didn't exist, allocate one.
328 page
= page_cache_alloc_cold(mapping
);
332 error
= add_to_page_cache_lru(page
, mapping
, index
,
334 if (unlikely(error
)) {
335 page_cache_release(page
);
336 if (error
== -EEXIST
)
341 * add_to_page_cache() locks the page, unlock it
342 * to avoid convoluting the logic below even more.
347 pages
[spd
.nr_pages
++] = page
;
352 * Now loop over the map and see if we need to start IO on any
353 * pages, fill in the partial map, etc.
355 index
= *ppos
>> PAGE_CACHE_SHIFT
;
356 nr_pages
= spd
.nr_pages
;
358 for (page_nr
= 0; page_nr
< nr_pages
; page_nr
++) {
359 unsigned int this_len
;
365 * this_len is the max we'll use from this page
367 this_len
= min_t(unsigned long, len
, PAGE_CACHE_SIZE
- loff
);
368 page
= pages
[page_nr
];
371 * If the page isn't uptodate, we may need to start io on it
373 if (!PageUptodate(page
)) {
375 * If in nonblock mode then dont block on waiting
376 * for an in-flight io page
378 if (flags
& SPLICE_F_NONBLOCK
) {
379 if (TestSetPageLocked(page
)) {
387 * page was truncated, stop here. if this isn't the
388 * first page, we'll just complete what we already
391 if (!page
->mapping
) {
396 * page was already under io and is now done, great
398 if (PageUptodate(page
)) {
404 * need to read in the page
406 error
= mapping
->a_ops
->readpage(in
, page
);
407 if (unlikely(error
)) {
409 * We really should re-lookup the page here,
410 * but it complicates things a lot. Instead
411 * lets just do what we already stored, and
412 * we'll get it the next time we are called.
414 if (error
== AOP_TRUNCATED_PAGE
)
422 * i_size must be checked after PageUptodate.
424 isize
= i_size_read(mapping
->host
);
425 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
426 if (unlikely(!isize
|| index
> end_index
))
430 * if this is the last page, see if we need to shrink
431 * the length and stop
433 if (end_index
== index
) {
437 * max good bytes in this page
439 plen
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
444 * force quit after adding this page
446 this_len
= min(this_len
, plen
- loff
);
450 partial
[page_nr
].offset
= loff
;
451 partial
[page_nr
].len
= this_len
;
459 * Release any pages at the end, if we quit early. 'page_nr' is how far
460 * we got, 'nr_pages' is how many pages are in the map.
462 while (page_nr
< nr_pages
)
463 page_cache_release(pages
[page_nr
++]);
466 return splice_to_pipe(pipe
, &spd
);
472 * generic_file_splice_read - splice data from file to a pipe
473 * @in: file to splice from
474 * @ppos: position in @in
475 * @pipe: pipe to splice to
476 * @len: number of bytes to splice
477 * @flags: splice modifier flags
480 * Will read pages from given file and fill them into a pipe. Can be
481 * used as long as the address_space operations for the source implements
485 ssize_t
generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
486 struct pipe_inode_info
*pipe
, size_t len
,
492 isize
= i_size_read(in
->f_mapping
->host
);
493 if (unlikely(*ppos
>= isize
))
496 left
= isize
- *ppos
;
497 if (unlikely(left
< len
))
500 ret
= __generic_file_splice_read(in
, ppos
, pipe
, len
, flags
);
507 EXPORT_SYMBOL(generic_file_splice_read
);
510 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
511 * using sendpage(). Return the number of bytes sent.
513 static int pipe_to_sendpage(struct pipe_inode_info
*pipe
,
514 struct pipe_buffer
*buf
, struct splice_desc
*sd
)
516 struct file
*file
= sd
->u
.file
;
517 loff_t pos
= sd
->pos
;
520 ret
= buf
->ops
->confirm(pipe
, buf
);
522 more
= (sd
->flags
& SPLICE_F_MORE
) || sd
->len
< sd
->total_len
;
524 ret
= file
->f_op
->sendpage(file
, buf
->page
, buf
->offset
,
525 sd
->len
, &pos
, more
);
532 * This is a little more tricky than the file -> pipe splicing. There are
533 * basically three cases:
535 * - Destination page already exists in the address space and there
536 * are users of it. For that case we have no other option that
537 * copying the data. Tough luck.
538 * - Destination page already exists in the address space, but there
539 * are no users of it. Make sure it's uptodate, then drop it. Fall
540 * through to last case.
541 * - Destination page does not exist, we can add the pipe page to
542 * the page cache and avoid the copy.
544 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
545 * sd->flags), we attempt to migrate pages from the pipe to the output
546 * file address space page cache. This is possible if no one else has
547 * the pipe page referenced outside of the pipe and page cache. If
548 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
549 * a new page in the output file page cache and fill/dirty that.
551 static int pipe_to_file(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
552 struct splice_desc
*sd
)
554 struct file
*file
= sd
->u
.file
;
555 struct address_space
*mapping
= file
->f_mapping
;
556 unsigned int offset
, this_len
;
562 * make sure the data in this buffer is uptodate
564 ret
= buf
->ops
->confirm(pipe
, buf
);
568 index
= sd
->pos
>> PAGE_CACHE_SHIFT
;
569 offset
= sd
->pos
& ~PAGE_CACHE_MASK
;
572 if (this_len
+ offset
> PAGE_CACHE_SIZE
)
573 this_len
= PAGE_CACHE_SIZE
- offset
;
576 page
= find_lock_page(mapping
, index
);
579 page
= page_cache_alloc_cold(mapping
);
584 * This will also lock the page
586 ret
= add_to_page_cache_lru(page
, mapping
, index
,
592 ret
= mapping
->a_ops
->prepare_write(file
, page
, offset
, offset
+this_len
);
594 loff_t isize
= i_size_read(mapping
->host
);
596 if (ret
!= AOP_TRUNCATED_PAGE
)
598 page_cache_release(page
);
599 if (ret
== AOP_TRUNCATED_PAGE
)
603 * prepare_write() may have instantiated a few blocks
604 * outside i_size. Trim these off again.
606 if (sd
->pos
+ this_len
> isize
)
607 vmtruncate(mapping
->host
, isize
);
612 if (buf
->page
!= page
) {
614 * Careful, ->map() uses KM_USER0!
616 char *src
= buf
->ops
->map(pipe
, buf
, 1);
617 char *dst
= kmap_atomic(page
, KM_USER1
);
619 memcpy(dst
+ offset
, src
+ buf
->offset
, this_len
);
620 flush_dcache_page(page
);
621 kunmap_atomic(dst
, KM_USER1
);
622 buf
->ops
->unmap(pipe
, buf
, src
);
625 ret
= mapping
->a_ops
->commit_write(file
, page
, offset
, offset
+this_len
);
627 if (ret
== AOP_TRUNCATED_PAGE
) {
628 page_cache_release(page
);
634 * Partial write has happened, so 'ret' already initialized by
635 * number of bytes written, Where is nothing we have to do here.
640 * Return the number of bytes written and mark page as
641 * accessed, we are now done!
643 mark_page_accessed(page
);
647 page_cache_release(page
);
653 * __splice_from_pipe - splice data from a pipe to given actor
654 * @pipe: pipe to splice from
655 * @sd: information to @actor
656 * @actor: handler that splices the data
659 * This function does little more than loop over the pipe and call
660 * @actor to do the actual moving of a single struct pipe_buffer to
661 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
665 ssize_t
__splice_from_pipe(struct pipe_inode_info
*pipe
, struct splice_desc
*sd
,
668 int ret
, do_wakeup
, err
;
675 struct pipe_buffer
*buf
= pipe
->bufs
+ pipe
->curbuf
;
676 const struct pipe_buf_operations
*ops
= buf
->ops
;
679 if (sd
->len
> sd
->total_len
)
680 sd
->len
= sd
->total_len
;
682 err
= actor(pipe
, buf
, sd
);
684 if (!ret
&& err
!= -ENODATA
)
696 sd
->total_len
-= err
;
702 ops
->release(pipe
, buf
);
703 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (PIPE_BUFFERS
- 1);
717 if (!pipe
->waiting_writers
) {
722 if (sd
->flags
& SPLICE_F_NONBLOCK
) {
728 if (signal_pending(current
)) {
736 if (waitqueue_active(&pipe
->wait
))
737 wake_up_interruptible_sync(&pipe
->wait
);
738 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
747 if (waitqueue_active(&pipe
->wait
))
748 wake_up_interruptible(&pipe
->wait
);
749 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
754 EXPORT_SYMBOL(__splice_from_pipe
);
757 * splice_from_pipe - splice data from a pipe to a file
758 * @pipe: pipe to splice from
759 * @out: file to splice to
760 * @ppos: position in @out
761 * @len: how many bytes to splice
762 * @flags: splice modifier flags
763 * @actor: handler that splices the data
766 * See __splice_from_pipe. This function locks the input and output inodes,
767 * otherwise it's identical to __splice_from_pipe().
770 ssize_t
splice_from_pipe(struct pipe_inode_info
*pipe
, struct file
*out
,
771 loff_t
*ppos
, size_t len
, unsigned int flags
,
775 struct inode
*inode
= out
->f_mapping
->host
;
776 struct splice_desc sd
= {
784 * The actor worker might be calling ->prepare_write and
785 * ->commit_write. Most of the time, these expect i_mutex to
786 * be held. Since this may result in an ABBA deadlock with
787 * pipe->inode, we have to order lock acquiry here.
789 inode_double_lock(inode
, pipe
->inode
);
790 ret
= __splice_from_pipe(pipe
, &sd
, actor
);
791 inode_double_unlock(inode
, pipe
->inode
);
797 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes
799 * @out: file to write to
800 * @ppos: position in @out
801 * @len: number of bytes to splice
802 * @flags: splice modifier flags
805 * Will either move or copy pages (determined by @flags options) from
806 * the given pipe inode to the given file. The caller is responsible
807 * for acquiring i_mutex on both inodes.
811 generic_file_splice_write_nolock(struct pipe_inode_info
*pipe
, struct file
*out
,
812 loff_t
*ppos
, size_t len
, unsigned int flags
)
814 struct address_space
*mapping
= out
->f_mapping
;
815 struct inode
*inode
= mapping
->host
;
816 struct splice_desc sd
= {
825 err
= remove_suid(out
->f_path
.dentry
);
829 ret
= __splice_from_pipe(pipe
, &sd
, pipe_to_file
);
831 unsigned long nr_pages
;
834 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
837 * If file or inode is SYNC and we actually wrote some data,
840 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
841 err
= generic_osync_inode(inode
, mapping
,
842 OSYNC_METADATA
|OSYNC_DATA
);
847 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
853 EXPORT_SYMBOL(generic_file_splice_write_nolock
);
856 * generic_file_splice_write - splice data from a pipe to a file
858 * @out: file to write to
859 * @ppos: position in @out
860 * @len: number of bytes to splice
861 * @flags: splice modifier flags
864 * Will either move or copy pages (determined by @flags options) from
865 * the given pipe inode to the given file.
869 generic_file_splice_write(struct pipe_inode_info
*pipe
, struct file
*out
,
870 loff_t
*ppos
, size_t len
, unsigned int flags
)
872 struct address_space
*mapping
= out
->f_mapping
;
873 struct inode
*inode
= mapping
->host
;
877 err
= should_remove_suid(out
->f_path
.dentry
);
879 mutex_lock(&inode
->i_mutex
);
880 err
= __remove_suid(out
->f_path
.dentry
, err
);
881 mutex_unlock(&inode
->i_mutex
);
886 ret
= splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_file
);
888 unsigned long nr_pages
;
891 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
894 * If file or inode is SYNC and we actually wrote some data,
897 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
898 mutex_lock(&inode
->i_mutex
);
899 err
= generic_osync_inode(inode
, mapping
,
900 OSYNC_METADATA
|OSYNC_DATA
);
901 mutex_unlock(&inode
->i_mutex
);
906 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
912 EXPORT_SYMBOL(generic_file_splice_write
);
915 * generic_splice_sendpage - splice data from a pipe to a socket
916 * @pipe: pipe to splice from
917 * @out: socket to write to
918 * @ppos: position in @out
919 * @len: number of bytes to splice
920 * @flags: splice modifier flags
923 * Will send @len bytes from the pipe to a network socket. No data copying
927 ssize_t
generic_splice_sendpage(struct pipe_inode_info
*pipe
, struct file
*out
,
928 loff_t
*ppos
, size_t len
, unsigned int flags
)
930 return splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_sendpage
);
933 EXPORT_SYMBOL(generic_splice_sendpage
);
936 * Attempt to initiate a splice from pipe to file.
938 static long do_splice_from(struct pipe_inode_info
*pipe
, struct file
*out
,
939 loff_t
*ppos
, size_t len
, unsigned int flags
)
943 if (unlikely(!out
->f_op
|| !out
->f_op
->splice_write
))
946 if (unlikely(!(out
->f_mode
& FMODE_WRITE
)))
949 ret
= rw_verify_area(WRITE
, out
, ppos
, len
);
950 if (unlikely(ret
< 0))
953 ret
= security_file_permission(out
, MAY_WRITE
);
954 if (unlikely(ret
< 0))
957 return out
->f_op
->splice_write(pipe
, out
, ppos
, len
, flags
);
961 * Attempt to initiate a splice from a file to a pipe.
963 static long do_splice_to(struct file
*in
, loff_t
*ppos
,
964 struct pipe_inode_info
*pipe
, size_t len
,
969 if (unlikely(!in
->f_op
|| !in
->f_op
->splice_read
))
972 if (unlikely(!(in
->f_mode
& FMODE_READ
)))
975 ret
= rw_verify_area(READ
, in
, ppos
, len
);
976 if (unlikely(ret
< 0))
979 ret
= security_file_permission(in
, MAY_READ
);
980 if (unlikely(ret
< 0))
983 return in
->f_op
->splice_read(in
, ppos
, pipe
, len
, flags
);
987 * splice_direct_to_actor - splices data directly between two non-pipes
988 * @in: file to splice from
989 * @sd: actor information on where to splice to
990 * @actor: handles the data splicing
993 * This is a special case helper to splice directly between two
994 * points, without requiring an explicit pipe. Internally an allocated
995 * pipe is cached in the process, and reused during the life time of
999 ssize_t
splice_direct_to_actor(struct file
*in
, struct splice_desc
*sd
,
1000 splice_direct_actor
*actor
)
1002 struct pipe_inode_info
*pipe
;
1009 * We require the input being a regular file, as we don't want to
1010 * randomly drop data for eg socket -> socket splicing. Use the
1011 * piped splicing for that!
1013 i_mode
= in
->f_path
.dentry
->d_inode
->i_mode
;
1014 if (unlikely(!S_ISREG(i_mode
) && !S_ISBLK(i_mode
)))
1018 * neither in nor out is a pipe, setup an internal pipe attached to
1019 * 'out' and transfer the wanted data from 'in' to 'out' through that
1021 pipe
= current
->splice_pipe
;
1022 if (unlikely(!pipe
)) {
1023 pipe
= alloc_pipe_info(NULL
);
1028 * We don't have an immediate reader, but we'll read the stuff
1029 * out of the pipe right after the splice_to_pipe(). So set
1030 * PIPE_READERS appropriately.
1034 current
->splice_pipe
= pipe
;
1042 len
= sd
->total_len
;
1046 * Don't block on output, we have to drain the direct pipe.
1048 sd
->flags
&= ~SPLICE_F_NONBLOCK
;
1052 loff_t pos
= sd
->pos
, prev_pos
= pos
;
1054 ret
= do_splice_to(in
, &pos
, pipe
, len
, flags
);
1055 if (unlikely(ret
<= 0))
1059 sd
->total_len
= read_len
;
1062 * NOTE: nonblocking mode only applies to the input. We
1063 * must not do the output in nonblocking mode as then we
1064 * could get stuck data in the internal pipe:
1066 ret
= actor(pipe
, sd
);
1067 if (unlikely(ret
<= 0)) {
1076 if (ret
< read_len
) {
1077 sd
->pos
= prev_pos
+ ret
;
1083 pipe
->nrbufs
= pipe
->curbuf
= 0;
1089 * If we did an incomplete transfer we must release
1090 * the pipe buffers in question:
1092 for (i
= 0; i
< PIPE_BUFFERS
; i
++) {
1093 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
1096 buf
->ops
->release(pipe
, buf
);
1106 EXPORT_SYMBOL(splice_direct_to_actor
);
1108 static int direct_splice_actor(struct pipe_inode_info
*pipe
,
1109 struct splice_desc
*sd
)
1111 struct file
*file
= sd
->u
.file
;
1113 return do_splice_from(pipe
, file
, &sd
->pos
, sd
->total_len
, sd
->flags
);
1117 * do_splice_direct - splices data directly between two files
1118 * @in: file to splice from
1119 * @ppos: input file offset
1120 * @out: file to splice to
1121 * @len: number of bytes to splice
1122 * @flags: splice modifier flags
1125 * For use by do_sendfile(). splice can easily emulate sendfile, but
1126 * doing it in the application would incur an extra system call
1127 * (splice in + splice out, as compared to just sendfile()). So this helper
1128 * can splice directly through a process-private pipe.
1131 long do_splice_direct(struct file
*in
, loff_t
*ppos
, struct file
*out
,
1132 size_t len
, unsigned int flags
)
1134 struct splice_desc sd
= {
1143 ret
= splice_direct_to_actor(in
, &sd
, direct_splice_actor
);
1151 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1152 * location, so checking ->i_pipe is not enough to verify that this is a
1155 static inline struct pipe_inode_info
*pipe_info(struct inode
*inode
)
1157 if (S_ISFIFO(inode
->i_mode
))
1158 return inode
->i_pipe
;
1164 * Determine where to splice to/from.
1166 static long do_splice(struct file
*in
, loff_t __user
*off_in
,
1167 struct file
*out
, loff_t __user
*off_out
,
1168 size_t len
, unsigned int flags
)
1170 struct pipe_inode_info
*pipe
;
1171 loff_t offset
, *off
;
1174 pipe
= pipe_info(in
->f_path
.dentry
->d_inode
);
1179 if (out
->f_op
->llseek
== no_llseek
)
1181 if (copy_from_user(&offset
, off_out
, sizeof(loff_t
)))
1187 ret
= do_splice_from(pipe
, out
, off
, len
, flags
);
1189 if (off_out
&& copy_to_user(off_out
, off
, sizeof(loff_t
)))
1195 pipe
= pipe_info(out
->f_path
.dentry
->d_inode
);
1200 if (in
->f_op
->llseek
== no_llseek
)
1202 if (copy_from_user(&offset
, off_in
, sizeof(loff_t
)))
1208 ret
= do_splice_to(in
, off
, pipe
, len
, flags
);
1210 if (off_in
&& copy_to_user(off_in
, off
, sizeof(loff_t
)))
1220 * Do a copy-from-user while holding the mmap_semaphore for reading, in a
1221 * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
1222 * for writing) and page faulting on the user memory pointed to by src.
1223 * This assumes that we will very rarely hit the partial != 0 path, or this
1224 * will not be a win.
1226 static int copy_from_user_mmap_sem(void *dst
, const void __user
*src
, size_t n
)
1230 if (!access_ok(VERIFY_READ
, src
, n
))
1233 pagefault_disable();
1234 partial
= __copy_from_user_inatomic(dst
, src
, n
);
1238 * Didn't copy everything, drop the mmap_sem and do a faulting copy
1240 if (unlikely(partial
)) {
1241 up_read(¤t
->mm
->mmap_sem
);
1242 partial
= copy_from_user(dst
, src
, n
);
1243 down_read(¤t
->mm
->mmap_sem
);
1250 * Map an iov into an array of pages and offset/length tupples. With the
1251 * partial_page structure, we can map several non-contiguous ranges into
1252 * our ones pages[] map instead of splitting that operation into pieces.
1253 * Could easily be exported as a generic helper for other users, in which
1254 * case one would probably want to add a 'max_nr_pages' parameter as well.
1256 static int get_iovec_page_array(const struct iovec __user
*iov
,
1257 unsigned int nr_vecs
, struct page
**pages
,
1258 struct partial_page
*partial
, int aligned
)
1260 int buffers
= 0, error
= 0;
1262 down_read(¤t
->mm
->mmap_sem
);
1265 unsigned long off
, npages
;
1272 if (copy_from_user_mmap_sem(&entry
, iov
, sizeof(entry
)))
1275 base
= entry
.iov_base
;
1276 len
= entry
.iov_len
;
1279 * Sanity check this iovec. 0 read succeeds.
1285 if (!access_ok(VERIFY_READ
, base
, len
))
1289 * Get this base offset and number of pages, then map
1290 * in the user pages.
1292 off
= (unsigned long) base
& ~PAGE_MASK
;
1295 * If asked for alignment, the offset must be zero and the
1296 * length a multiple of the PAGE_SIZE.
1299 if (aligned
&& (off
|| len
& ~PAGE_MASK
))
1302 npages
= (off
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1303 if (npages
> PIPE_BUFFERS
- buffers
)
1304 npages
= PIPE_BUFFERS
- buffers
;
1306 error
= get_user_pages(current
, current
->mm
,
1307 (unsigned long) base
, npages
, 0, 0,
1308 &pages
[buffers
], NULL
);
1310 if (unlikely(error
<= 0))
1314 * Fill this contiguous range into the partial page map.
1316 for (i
= 0; i
< error
; i
++) {
1317 const int plen
= min_t(size_t, len
, PAGE_SIZE
- off
);
1319 partial
[buffers
].offset
= off
;
1320 partial
[buffers
].len
= plen
;
1328 * We didn't complete this iov, stop here since it probably
1329 * means we have to move some of this into a pipe to
1330 * be able to continue.
1336 * Don't continue if we mapped fewer pages than we asked for,
1337 * or if we mapped the max number of pages that we have
1340 if (error
< npages
|| buffers
== PIPE_BUFFERS
)
1347 up_read(¤t
->mm
->mmap_sem
);
1355 static int pipe_to_user(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
1356 struct splice_desc
*sd
)
1361 ret
= buf
->ops
->confirm(pipe
, buf
);
1366 * See if we can use the atomic maps, by prefaulting in the
1367 * pages and doing an atomic copy
1369 if (!fault_in_pages_writeable(sd
->u
.userptr
, sd
->len
)) {
1370 src
= buf
->ops
->map(pipe
, buf
, 1);
1371 ret
= __copy_to_user_inatomic(sd
->u
.userptr
, src
+ buf
->offset
,
1373 buf
->ops
->unmap(pipe
, buf
, src
);
1381 * No dice, use slow non-atomic map and copy
1383 src
= buf
->ops
->map(pipe
, buf
, 0);
1386 if (copy_to_user(sd
->u
.userptr
, src
+ buf
->offset
, sd
->len
))
1389 buf
->ops
->unmap(pipe
, buf
, src
);
1392 sd
->u
.userptr
+= ret
;
1397 * For lack of a better implementation, implement vmsplice() to userspace
1398 * as a simple copy of the pipes pages to the user iov.
1400 static long vmsplice_to_user(struct file
*file
, const struct iovec __user
*iov
,
1401 unsigned long nr_segs
, unsigned int flags
)
1403 struct pipe_inode_info
*pipe
;
1404 struct splice_desc sd
;
1409 pipe
= pipe_info(file
->f_path
.dentry
->d_inode
);
1414 mutex_lock(&pipe
->inode
->i_mutex
);
1422 * Get user address base and length for this iovec.
1424 error
= get_user(base
, &iov
->iov_base
);
1425 if (unlikely(error
))
1427 error
= get_user(len
, &iov
->iov_len
);
1428 if (unlikely(error
))
1432 * Sanity check this iovec. 0 read succeeds.
1436 if (unlikely(!base
)) {
1441 if (unlikely(!access_ok(VERIFY_WRITE
, base
, len
))) {
1449 sd
.u
.userptr
= base
;
1452 size
= __splice_from_pipe(pipe
, &sd
, pipe_to_user
);
1470 mutex_unlock(&pipe
->inode
->i_mutex
);
1479 * vmsplice splices a user address range into a pipe. It can be thought of
1480 * as splice-from-memory, where the regular splice is splice-from-file (or
1481 * to file). In both cases the output is a pipe, naturally.
1483 static long vmsplice_to_pipe(struct file
*file
, const struct iovec __user
*iov
,
1484 unsigned long nr_segs
, unsigned int flags
)
1486 struct pipe_inode_info
*pipe
;
1487 struct page
*pages
[PIPE_BUFFERS
];
1488 struct partial_page partial
[PIPE_BUFFERS
];
1489 struct splice_pipe_desc spd
= {
1493 .ops
= &user_page_pipe_buf_ops
,
1494 .spd_release
= spd_release_page
,
1497 pipe
= pipe_info(file
->f_path
.dentry
->d_inode
);
1501 spd
.nr_pages
= get_iovec_page_array(iov
, nr_segs
, pages
, partial
,
1502 flags
& SPLICE_F_GIFT
);
1503 if (spd
.nr_pages
<= 0)
1504 return spd
.nr_pages
;
1506 return splice_to_pipe(pipe
, &spd
);
1510 * Note that vmsplice only really supports true splicing _from_ user memory
1511 * to a pipe, not the other way around. Splicing from user memory is a simple
1512 * operation that can be supported without any funky alignment restrictions
1513 * or nasty vm tricks. We simply map in the user memory and fill them into
1514 * a pipe. The reverse isn't quite as easy, though. There are two possible
1515 * solutions for that:
1517 * - memcpy() the data internally, at which point we might as well just
1518 * do a regular read() on the buffer anyway.
1519 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1520 * has restriction limitations on both ends of the pipe).
1522 * Currently we punt and implement it as a normal copy, see pipe_to_user().
1525 asmlinkage
long sys_vmsplice(int fd
, const struct iovec __user
*iov
,
1526 unsigned long nr_segs
, unsigned int flags
)
1532 if (unlikely(nr_segs
> UIO_MAXIOV
))
1534 else if (unlikely(!nr_segs
))
1538 file
= fget_light(fd
, &fput
);
1540 if (file
->f_mode
& FMODE_WRITE
)
1541 error
= vmsplice_to_pipe(file
, iov
, nr_segs
, flags
);
1542 else if (file
->f_mode
& FMODE_READ
)
1543 error
= vmsplice_to_user(file
, iov
, nr_segs
, flags
);
1545 fput_light(file
, fput
);
1551 asmlinkage
long sys_splice(int fd_in
, loff_t __user
*off_in
,
1552 int fd_out
, loff_t __user
*off_out
,
1553 size_t len
, unsigned int flags
)
1556 struct file
*in
, *out
;
1557 int fput_in
, fput_out
;
1563 in
= fget_light(fd_in
, &fput_in
);
1565 if (in
->f_mode
& FMODE_READ
) {
1566 out
= fget_light(fd_out
, &fput_out
);
1568 if (out
->f_mode
& FMODE_WRITE
)
1569 error
= do_splice(in
, off_in
,
1572 fput_light(out
, fput_out
);
1576 fput_light(in
, fput_in
);
1583 * Make sure there's data to read. Wait for input if we can, otherwise
1584 * return an appropriate error.
1586 static int link_ipipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1591 * Check ->nrbufs without the inode lock first. This function
1592 * is speculative anyways, so missing one is ok.
1598 mutex_lock(&pipe
->inode
->i_mutex
);
1600 while (!pipe
->nrbufs
) {
1601 if (signal_pending(current
)) {
1607 if (!pipe
->waiting_writers
) {
1608 if (flags
& SPLICE_F_NONBLOCK
) {
1616 mutex_unlock(&pipe
->inode
->i_mutex
);
1621 * Make sure there's writeable room. Wait for room if we can, otherwise
1622 * return an appropriate error.
1624 static int link_opipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1629 * Check ->nrbufs without the inode lock first. This function
1630 * is speculative anyways, so missing one is ok.
1632 if (pipe
->nrbufs
< PIPE_BUFFERS
)
1636 mutex_lock(&pipe
->inode
->i_mutex
);
1638 while (pipe
->nrbufs
>= PIPE_BUFFERS
) {
1639 if (!pipe
->readers
) {
1640 send_sig(SIGPIPE
, current
, 0);
1644 if (flags
& SPLICE_F_NONBLOCK
) {
1648 if (signal_pending(current
)) {
1652 pipe
->waiting_writers
++;
1654 pipe
->waiting_writers
--;
1657 mutex_unlock(&pipe
->inode
->i_mutex
);
1662 * Link contents of ipipe to opipe.
1664 static int link_pipe(struct pipe_inode_info
*ipipe
,
1665 struct pipe_inode_info
*opipe
,
1666 size_t len
, unsigned int flags
)
1668 struct pipe_buffer
*ibuf
, *obuf
;
1669 int ret
= 0, i
= 0, nbuf
;
1672 * Potential ABBA deadlock, work around it by ordering lock
1673 * grabbing by inode address. Otherwise two different processes
1674 * could deadlock (one doing tee from A -> B, the other from B -> A).
1676 inode_double_lock(ipipe
->inode
, opipe
->inode
);
1679 if (!opipe
->readers
) {
1680 send_sig(SIGPIPE
, current
, 0);
1687 * If we have iterated all input buffers or ran out of
1688 * output room, break.
1690 if (i
>= ipipe
->nrbufs
|| opipe
->nrbufs
>= PIPE_BUFFERS
)
1693 ibuf
= ipipe
->bufs
+ ((ipipe
->curbuf
+ i
) & (PIPE_BUFFERS
- 1));
1694 nbuf
= (opipe
->curbuf
+ opipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
1697 * Get a reference to this pipe buffer,
1698 * so we can copy the contents over.
1700 ibuf
->ops
->get(ipipe
, ibuf
);
1702 obuf
= opipe
->bufs
+ nbuf
;
1706 * Don't inherit the gift flag, we need to
1707 * prevent multiple steals of this page.
1709 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
1711 if (obuf
->len
> len
)
1721 * return EAGAIN if we have the potential of some data in the
1722 * future, otherwise just return 0
1724 if (!ret
&& ipipe
->waiting_writers
&& (flags
& SPLICE_F_NONBLOCK
))
1727 inode_double_unlock(ipipe
->inode
, opipe
->inode
);
1730 * If we put data in the output pipe, wakeup any potential readers.
1734 if (waitqueue_active(&opipe
->wait
))
1735 wake_up_interruptible(&opipe
->wait
);
1736 kill_fasync(&opipe
->fasync_readers
, SIGIO
, POLL_IN
);
1743 * This is a tee(1) implementation that works on pipes. It doesn't copy
1744 * any data, it simply references the 'in' pages on the 'out' pipe.
1745 * The 'flags' used are the SPLICE_F_* variants, currently the only
1746 * applicable one is SPLICE_F_NONBLOCK.
1748 static long do_tee(struct file
*in
, struct file
*out
, size_t len
,
1751 struct pipe_inode_info
*ipipe
= pipe_info(in
->f_path
.dentry
->d_inode
);
1752 struct pipe_inode_info
*opipe
= pipe_info(out
->f_path
.dentry
->d_inode
);
1756 * Duplicate the contents of ipipe to opipe without actually
1759 if (ipipe
&& opipe
&& ipipe
!= opipe
) {
1761 * Keep going, unless we encounter an error. The ipipe/opipe
1762 * ordering doesn't really matter.
1764 ret
= link_ipipe_prep(ipipe
, flags
);
1766 ret
= link_opipe_prep(opipe
, flags
);
1768 ret
= link_pipe(ipipe
, opipe
, len
, flags
);
1775 asmlinkage
long sys_tee(int fdin
, int fdout
, size_t len
, unsigned int flags
)
1784 in
= fget_light(fdin
, &fput_in
);
1786 if (in
->f_mode
& FMODE_READ
) {
1788 struct file
*out
= fget_light(fdout
, &fput_out
);
1791 if (out
->f_mode
& FMODE_WRITE
)
1792 error
= do_tee(in
, out
, len
, flags
);
1793 fput_light(out
, fput_out
);
1796 fput_light(in
, fput_in
);