2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@kernel.dk>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/splice.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
31 #include <linux/security.h>
34 * Attempt to steal a page from a pipe buffer. This should perhaps go into
35 * a vm helper function, it's already simplified quite a bit by the
36 * addition of remove_mapping(). If success is returned, the caller may
37 * attempt to reuse this page for another destination.
39 static int page_cache_pipe_buf_steal(struct pipe_inode_info
*pipe
,
40 struct pipe_buffer
*buf
)
42 struct page
*page
= buf
->page
;
43 struct address_space
*mapping
;
47 mapping
= page_mapping(page
);
49 WARN_ON(!PageUptodate(page
));
52 * At least for ext2 with nobh option, we need to wait on
53 * writeback completing on this page, since we'll remove it
54 * from the pagecache. Otherwise truncate wont wait on the
55 * page, allowing the disk blocks to be reused by someone else
56 * before we actually wrote our data to them. fs corruption
59 wait_on_page_writeback(page
);
61 if (PagePrivate(page
))
62 try_to_release_page(page
, GFP_KERNEL
);
65 * If we succeeded in removing the mapping, set LRU flag
68 if (remove_mapping(mapping
, page
)) {
69 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
75 * Raced with truncate or failed to remove page from current
76 * address space, unlock and return failure.
82 static void page_cache_pipe_buf_release(struct pipe_inode_info
*pipe
,
83 struct pipe_buffer
*buf
)
85 page_cache_release(buf
->page
);
86 buf
->flags
&= ~PIPE_BUF_FLAG_LRU
;
90 * Check whether the contents of buf is OK to access. Since the content
91 * is a page cache page, IO may be in flight.
93 static int page_cache_pipe_buf_confirm(struct pipe_inode_info
*pipe
,
94 struct pipe_buffer
*buf
)
96 struct page
*page
= buf
->page
;
99 if (!PageUptodate(page
)) {
103 * Page got truncated/unhashed. This will cause a 0-byte
104 * splice, if this is the first page.
106 if (!page
->mapping
) {
112 * Uh oh, read-error from disk.
114 if (!PageUptodate(page
)) {
120 * Page is ok afterall, we are done.
131 static const struct pipe_buf_operations page_cache_pipe_buf_ops
= {
133 .map
= generic_pipe_buf_map
,
134 .unmap
= generic_pipe_buf_unmap
,
135 .confirm
= page_cache_pipe_buf_confirm
,
136 .release
= page_cache_pipe_buf_release
,
137 .steal
= page_cache_pipe_buf_steal
,
138 .get
= generic_pipe_buf_get
,
141 static int user_page_pipe_buf_steal(struct pipe_inode_info
*pipe
,
142 struct pipe_buffer
*buf
)
144 if (!(buf
->flags
& PIPE_BUF_FLAG_GIFT
))
147 buf
->flags
|= PIPE_BUF_FLAG_LRU
;
148 return generic_pipe_buf_steal(pipe
, buf
);
151 static const struct pipe_buf_operations user_page_pipe_buf_ops
= {
153 .map
= generic_pipe_buf_map
,
154 .unmap
= generic_pipe_buf_unmap
,
155 .confirm
= generic_pipe_buf_confirm
,
156 .release
= page_cache_pipe_buf_release
,
157 .steal
= user_page_pipe_buf_steal
,
158 .get
= generic_pipe_buf_get
,
162 * splice_to_pipe - fill passed data into a pipe
163 * @pipe: pipe to fill
167 * @spd contains a map of pages and len/offset tuples, along with
168 * the struct pipe_buf_operations associated with these pages. This
169 * function will link that data to the pipe.
172 ssize_t
splice_to_pipe(struct pipe_inode_info
*pipe
,
173 struct splice_pipe_desc
*spd
)
175 unsigned int spd_pages
= spd
->nr_pages
;
176 int ret
, do_wakeup
, page_nr
;
183 mutex_lock(&pipe
->inode
->i_mutex
);
186 if (!pipe
->readers
) {
187 send_sig(SIGPIPE
, current
, 0);
193 if (pipe
->nrbufs
< PIPE_BUFFERS
) {
194 int newbuf
= (pipe
->curbuf
+ pipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
195 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
197 buf
->page
= spd
->pages
[page_nr
];
198 buf
->offset
= spd
->partial
[page_nr
].offset
;
199 buf
->len
= spd
->partial
[page_nr
].len
;
200 buf
->private = spd
->partial
[page_nr
].private;
202 if (spd
->flags
& SPLICE_F_GIFT
)
203 buf
->flags
|= PIPE_BUF_FLAG_GIFT
;
212 if (!--spd
->nr_pages
)
214 if (pipe
->nrbufs
< PIPE_BUFFERS
)
220 if (spd
->flags
& SPLICE_F_NONBLOCK
) {
226 if (signal_pending(current
)) {
234 if (waitqueue_active(&pipe
->wait
))
235 wake_up_interruptible_sync(&pipe
->wait
);
236 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
240 pipe
->waiting_writers
++;
242 pipe
->waiting_writers
--;
246 mutex_unlock(&pipe
->inode
->i_mutex
);
250 if (waitqueue_active(&pipe
->wait
))
251 wake_up_interruptible(&pipe
->wait
);
252 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
256 while (page_nr
< spd_pages
)
257 page_cache_release(spd
->pages
[page_nr
++]);
263 __generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
264 struct pipe_inode_info
*pipe
, size_t len
,
267 struct address_space
*mapping
= in
->f_mapping
;
268 unsigned int loff
, nr_pages
, req_pages
;
269 struct page
*pages
[PIPE_BUFFERS
];
270 struct partial_page partial
[PIPE_BUFFERS
];
272 pgoff_t index
, end_index
;
275 struct splice_pipe_desc spd
= {
279 .ops
= &page_cache_pipe_buf_ops
,
282 index
= *ppos
>> PAGE_CACHE_SHIFT
;
283 loff
= *ppos
& ~PAGE_CACHE_MASK
;
284 req_pages
= (len
+ loff
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
285 nr_pages
= min(req_pages
, (unsigned)PIPE_BUFFERS
);
288 * Lookup the (hopefully) full range of pages we need.
290 spd
.nr_pages
= find_get_pages_contig(mapping
, index
, nr_pages
, pages
);
291 index
+= spd
.nr_pages
;
294 * If find_get_pages_contig() returned fewer pages than we needed,
295 * readahead/allocate the rest and fill in the holes.
297 if (spd
.nr_pages
< nr_pages
)
298 page_cache_sync_readahead(mapping
, &in
->f_ra
, in
,
299 index
, req_pages
- spd
.nr_pages
);
302 while (spd
.nr_pages
< nr_pages
) {
304 * Page could be there, find_get_pages_contig() breaks on
307 page
= find_get_page(mapping
, index
);
310 * page didn't exist, allocate one.
312 page
= page_cache_alloc_cold(mapping
);
316 error
= add_to_page_cache_lru(page
, mapping
, index
,
318 if (unlikely(error
)) {
319 page_cache_release(page
);
320 if (error
== -EEXIST
)
325 * add_to_page_cache() locks the page, unlock it
326 * to avoid convoluting the logic below even more.
331 pages
[spd
.nr_pages
++] = page
;
336 * Now loop over the map and see if we need to start IO on any
337 * pages, fill in the partial map, etc.
339 index
= *ppos
>> PAGE_CACHE_SHIFT
;
340 nr_pages
= spd
.nr_pages
;
342 for (page_nr
= 0; page_nr
< nr_pages
; page_nr
++) {
343 unsigned int this_len
;
349 * this_len is the max we'll use from this page
351 this_len
= min_t(unsigned long, len
, PAGE_CACHE_SIZE
- loff
);
352 page
= pages
[page_nr
];
354 if (PageReadahead(page
))
355 page_cache_async_readahead(mapping
, &in
->f_ra
, in
,
356 page
, index
, req_pages
- page_nr
);
359 * If the page isn't uptodate, we may need to start io on it
361 if (!PageUptodate(page
)) {
363 * If in nonblock mode then dont block on waiting
364 * for an in-flight io page
366 if (flags
& SPLICE_F_NONBLOCK
) {
367 if (TestSetPageLocked(page
))
373 * page was truncated, stop here. if this isn't the
374 * first page, we'll just complete what we already
377 if (!page
->mapping
) {
382 * page was already under io and is now done, great
384 if (PageUptodate(page
)) {
390 * need to read in the page
392 error
= mapping
->a_ops
->readpage(in
, page
);
393 if (unlikely(error
)) {
395 * We really should re-lookup the page here,
396 * but it complicates things a lot. Instead
397 * lets just do what we already stored, and
398 * we'll get it the next time we are called.
400 if (error
== AOP_TRUNCATED_PAGE
)
408 * i_size must be checked after PageUptodate.
410 isize
= i_size_read(mapping
->host
);
411 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
412 if (unlikely(!isize
|| index
> end_index
))
416 * if this is the last page, see if we need to shrink
417 * the length and stop
419 if (end_index
== index
) {
423 * max good bytes in this page
425 plen
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
430 * force quit after adding this page
432 this_len
= min(this_len
, plen
- loff
);
436 partial
[page_nr
].offset
= loff
;
437 partial
[page_nr
].len
= this_len
;
445 * Release any pages at the end, if we quit early. 'page_nr' is how far
446 * we got, 'nr_pages' is how many pages are in the map.
448 while (page_nr
< nr_pages
)
449 page_cache_release(pages
[page_nr
++]);
450 in
->f_ra
.prev_pos
= (loff_t
)index
<< PAGE_CACHE_SHIFT
;
453 return splice_to_pipe(pipe
, &spd
);
459 * generic_file_splice_read - splice data from file to a pipe
460 * @in: file to splice from
461 * @ppos: position in @in
462 * @pipe: pipe to splice to
463 * @len: number of bytes to splice
464 * @flags: splice modifier flags
467 * Will read pages from given file and fill them into a pipe. Can be
468 * used as long as the address_space operations for the source implements
472 ssize_t
generic_file_splice_read(struct file
*in
, loff_t
*ppos
,
473 struct pipe_inode_info
*pipe
, size_t len
,
480 isize
= i_size_read(in
->f_mapping
->host
);
481 if (unlikely(*ppos
>= isize
))
484 left
= isize
- *ppos
;
485 if (unlikely(left
< len
))
490 while (len
&& !spliced
) {
491 ret
= __generic_file_splice_read(in
, ppos
, pipe
, len
, flags
);
498 if (flags
& SPLICE_F_NONBLOCK
) {
515 EXPORT_SYMBOL(generic_file_splice_read
);
518 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
519 * using sendpage(). Return the number of bytes sent.
521 static int pipe_to_sendpage(struct pipe_inode_info
*pipe
,
522 struct pipe_buffer
*buf
, struct splice_desc
*sd
)
524 struct file
*file
= sd
->u
.file
;
525 loff_t pos
= sd
->pos
;
528 ret
= buf
->ops
->confirm(pipe
, buf
);
530 more
= (sd
->flags
& SPLICE_F_MORE
) || sd
->len
< sd
->total_len
;
532 ret
= file
->f_op
->sendpage(file
, buf
->page
, buf
->offset
,
533 sd
->len
, &pos
, more
);
540 * This is a little more tricky than the file -> pipe splicing. There are
541 * basically three cases:
543 * - Destination page already exists in the address space and there
544 * are users of it. For that case we have no other option that
545 * copying the data. Tough luck.
546 * - Destination page already exists in the address space, but there
547 * are no users of it. Make sure it's uptodate, then drop it. Fall
548 * through to last case.
549 * - Destination page does not exist, we can add the pipe page to
550 * the page cache and avoid the copy.
552 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
553 * sd->flags), we attempt to migrate pages from the pipe to the output
554 * file address space page cache. This is possible if no one else has
555 * the pipe page referenced outside of the pipe and page cache. If
556 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
557 * a new page in the output file page cache and fill/dirty that.
559 static int pipe_to_file(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
560 struct splice_desc
*sd
)
562 struct file
*file
= sd
->u
.file
;
563 struct address_space
*mapping
= file
->f_mapping
;
564 unsigned int offset
, this_len
;
570 * make sure the data in this buffer is uptodate
572 ret
= buf
->ops
->confirm(pipe
, buf
);
576 offset
= sd
->pos
& ~PAGE_CACHE_MASK
;
579 if (this_len
+ offset
> PAGE_CACHE_SIZE
)
580 this_len
= PAGE_CACHE_SIZE
- offset
;
582 ret
= pagecache_write_begin(file
, mapping
, sd
->pos
, this_len
,
583 AOP_FLAG_UNINTERRUPTIBLE
, &page
, &fsdata
);
587 if (buf
->page
!= page
) {
589 * Careful, ->map() uses KM_USER0!
591 char *src
= buf
->ops
->map(pipe
, buf
, 1);
592 char *dst
= kmap_atomic(page
, KM_USER1
);
594 memcpy(dst
+ offset
, src
+ buf
->offset
, this_len
);
595 flush_dcache_page(page
);
596 kunmap_atomic(dst
, KM_USER1
);
597 buf
->ops
->unmap(pipe
, buf
, src
);
599 ret
= pagecache_write_end(file
, mapping
, sd
->pos
, this_len
, this_len
,
606 * __splice_from_pipe - splice data from a pipe to given actor
607 * @pipe: pipe to splice from
608 * @sd: information to @actor
609 * @actor: handler that splices the data
612 * This function does little more than loop over the pipe and call
613 * @actor to do the actual moving of a single struct pipe_buffer to
614 * the desired destination. See pipe_to_file, pipe_to_sendpage, or
618 ssize_t
__splice_from_pipe(struct pipe_inode_info
*pipe
, struct splice_desc
*sd
,
621 int ret
, do_wakeup
, err
;
628 struct pipe_buffer
*buf
= pipe
->bufs
+ pipe
->curbuf
;
629 const struct pipe_buf_operations
*ops
= buf
->ops
;
632 if (sd
->len
> sd
->total_len
)
633 sd
->len
= sd
->total_len
;
635 err
= actor(pipe
, buf
, sd
);
637 if (!ret
&& err
!= -ENODATA
)
649 sd
->total_len
-= err
;
655 ops
->release(pipe
, buf
);
656 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (PIPE_BUFFERS
- 1);
670 if (!pipe
->waiting_writers
) {
675 if (sd
->flags
& SPLICE_F_NONBLOCK
) {
681 if (signal_pending(current
)) {
689 if (waitqueue_active(&pipe
->wait
))
690 wake_up_interruptible_sync(&pipe
->wait
);
691 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
700 if (waitqueue_active(&pipe
->wait
))
701 wake_up_interruptible(&pipe
->wait
);
702 kill_fasync(&pipe
->fasync_writers
, SIGIO
, POLL_OUT
);
707 EXPORT_SYMBOL(__splice_from_pipe
);
710 * splice_from_pipe - splice data from a pipe to a file
711 * @pipe: pipe to splice from
712 * @out: file to splice to
713 * @ppos: position in @out
714 * @len: how many bytes to splice
715 * @flags: splice modifier flags
716 * @actor: handler that splices the data
719 * See __splice_from_pipe. This function locks the input and output inodes,
720 * otherwise it's identical to __splice_from_pipe().
723 ssize_t
splice_from_pipe(struct pipe_inode_info
*pipe
, struct file
*out
,
724 loff_t
*ppos
, size_t len
, unsigned int flags
,
728 struct inode
*inode
= out
->f_mapping
->host
;
729 struct splice_desc sd
= {
737 * The actor worker might be calling ->prepare_write and
738 * ->commit_write. Most of the time, these expect i_mutex to
739 * be held. Since this may result in an ABBA deadlock with
740 * pipe->inode, we have to order lock acquiry here.
742 inode_double_lock(inode
, pipe
->inode
);
743 ret
= __splice_from_pipe(pipe
, &sd
, actor
);
744 inode_double_unlock(inode
, pipe
->inode
);
750 * generic_file_splice_write_nolock - generic_file_splice_write without mutexes
752 * @out: file to write to
753 * @ppos: position in @out
754 * @len: number of bytes to splice
755 * @flags: splice modifier flags
758 * Will either move or copy pages (determined by @flags options) from
759 * the given pipe inode to the given file. The caller is responsible
760 * for acquiring i_mutex on both inodes.
764 generic_file_splice_write_nolock(struct pipe_inode_info
*pipe
, struct file
*out
,
765 loff_t
*ppos
, size_t len
, unsigned int flags
)
767 struct address_space
*mapping
= out
->f_mapping
;
768 struct inode
*inode
= mapping
->host
;
769 struct splice_desc sd
= {
778 err
= remove_suid(out
->f_path
.dentry
);
782 ret
= __splice_from_pipe(pipe
, &sd
, pipe_to_file
);
784 unsigned long nr_pages
;
787 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
790 * If file or inode is SYNC and we actually wrote some data,
793 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
794 err
= generic_osync_inode(inode
, mapping
,
795 OSYNC_METADATA
|OSYNC_DATA
);
800 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
806 EXPORT_SYMBOL(generic_file_splice_write_nolock
);
809 * generic_file_splice_write - splice data from a pipe to a file
811 * @out: file to write to
812 * @ppos: position in @out
813 * @len: number of bytes to splice
814 * @flags: splice modifier flags
817 * Will either move or copy pages (determined by @flags options) from
818 * the given pipe inode to the given file.
822 generic_file_splice_write(struct pipe_inode_info
*pipe
, struct file
*out
,
823 loff_t
*ppos
, size_t len
, unsigned int flags
)
825 struct address_space
*mapping
= out
->f_mapping
;
826 struct inode
*inode
= mapping
->host
;
827 int killsuid
, killpriv
;
831 killpriv
= security_inode_need_killpriv(out
->f_path
.dentry
);
832 killsuid
= should_remove_suid(out
->f_path
.dentry
);
833 if (unlikely(killsuid
|| killpriv
)) {
834 mutex_lock(&inode
->i_mutex
);
836 err
= security_inode_killpriv(out
->f_path
.dentry
);
837 if (!err
&& killsuid
)
838 err
= __remove_suid(out
->f_path
.dentry
, killsuid
);
839 mutex_unlock(&inode
->i_mutex
);
844 ret
= splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_file
);
846 unsigned long nr_pages
;
849 nr_pages
= (ret
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
852 * If file or inode is SYNC and we actually wrote some data,
855 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(inode
))) {
856 mutex_lock(&inode
->i_mutex
);
857 err
= generic_osync_inode(inode
, mapping
,
858 OSYNC_METADATA
|OSYNC_DATA
);
859 mutex_unlock(&inode
->i_mutex
);
864 balance_dirty_pages_ratelimited_nr(mapping
, nr_pages
);
870 EXPORT_SYMBOL(generic_file_splice_write
);
873 * generic_splice_sendpage - splice data from a pipe to a socket
874 * @pipe: pipe to splice from
875 * @out: socket to write to
876 * @ppos: position in @out
877 * @len: number of bytes to splice
878 * @flags: splice modifier flags
881 * Will send @len bytes from the pipe to a network socket. No data copying
885 ssize_t
generic_splice_sendpage(struct pipe_inode_info
*pipe
, struct file
*out
,
886 loff_t
*ppos
, size_t len
, unsigned int flags
)
888 return splice_from_pipe(pipe
, out
, ppos
, len
, flags
, pipe_to_sendpage
);
891 EXPORT_SYMBOL(generic_splice_sendpage
);
894 * Attempt to initiate a splice from pipe to file.
896 static long do_splice_from(struct pipe_inode_info
*pipe
, struct file
*out
,
897 loff_t
*ppos
, size_t len
, unsigned int flags
)
901 if (unlikely(!out
->f_op
|| !out
->f_op
->splice_write
))
904 if (unlikely(!(out
->f_mode
& FMODE_WRITE
)))
907 ret
= rw_verify_area(WRITE
, out
, ppos
, len
);
908 if (unlikely(ret
< 0))
911 ret
= security_file_permission(out
, MAY_WRITE
);
912 if (unlikely(ret
< 0))
915 return out
->f_op
->splice_write(pipe
, out
, ppos
, len
, flags
);
919 * Attempt to initiate a splice from a file to a pipe.
921 static long do_splice_to(struct file
*in
, loff_t
*ppos
,
922 struct pipe_inode_info
*pipe
, size_t len
,
927 if (unlikely(!in
->f_op
|| !in
->f_op
->splice_read
))
930 if (unlikely(!(in
->f_mode
& FMODE_READ
)))
933 ret
= rw_verify_area(READ
, in
, ppos
, len
);
934 if (unlikely(ret
< 0))
937 ret
= security_file_permission(in
, MAY_READ
);
938 if (unlikely(ret
< 0))
941 return in
->f_op
->splice_read(in
, ppos
, pipe
, len
, flags
);
945 * splice_direct_to_actor - splices data directly between two non-pipes
946 * @in: file to splice from
947 * @sd: actor information on where to splice to
948 * @actor: handles the data splicing
951 * This is a special case helper to splice directly between two
952 * points, without requiring an explicit pipe. Internally an allocated
953 * pipe is cached in the process, and reused during the lifetime of
957 ssize_t
splice_direct_to_actor(struct file
*in
, struct splice_desc
*sd
,
958 splice_direct_actor
*actor
)
960 struct pipe_inode_info
*pipe
;
967 * We require the input being a regular file, as we don't want to
968 * randomly drop data for eg socket -> socket splicing. Use the
969 * piped splicing for that!
971 i_mode
= in
->f_path
.dentry
->d_inode
->i_mode
;
972 if (unlikely(!S_ISREG(i_mode
) && !S_ISBLK(i_mode
)))
976 * neither in nor out is a pipe, setup an internal pipe attached to
977 * 'out' and transfer the wanted data from 'in' to 'out' through that
979 pipe
= current
->splice_pipe
;
980 if (unlikely(!pipe
)) {
981 pipe
= alloc_pipe_info(NULL
);
986 * We don't have an immediate reader, but we'll read the stuff
987 * out of the pipe right after the splice_to_pipe(). So set
988 * PIPE_READERS appropriately.
992 current
->splice_pipe
= pipe
;
1000 len
= sd
->total_len
;
1004 * Don't block on output, we have to drain the direct pipe.
1006 sd
->flags
&= ~SPLICE_F_NONBLOCK
;
1010 loff_t pos
= sd
->pos
;
1012 ret
= do_splice_to(in
, &pos
, pipe
, len
, flags
);
1013 if (unlikely(ret
<= 0))
1017 sd
->total_len
= read_len
;
1020 * NOTE: nonblocking mode only applies to the input. We
1021 * must not do the output in nonblocking mode as then we
1022 * could get stuck data in the internal pipe:
1024 ret
= actor(pipe
, sd
);
1025 if (unlikely(ret
<= 0))
1036 pipe
->nrbufs
= pipe
->curbuf
= 0;
1041 * If we did an incomplete transfer we must release
1042 * the pipe buffers in question:
1044 for (i
= 0; i
< PIPE_BUFFERS
; i
++) {
1045 struct pipe_buffer
*buf
= pipe
->bufs
+ i
;
1048 buf
->ops
->release(pipe
, buf
);
1052 pipe
->nrbufs
= pipe
->curbuf
= 0;
1055 * If we transferred some data, return the number of bytes:
1063 EXPORT_SYMBOL(splice_direct_to_actor
);
1065 static int direct_splice_actor(struct pipe_inode_info
*pipe
,
1066 struct splice_desc
*sd
)
1068 struct file
*file
= sd
->u
.file
;
1070 return do_splice_from(pipe
, file
, &sd
->pos
, sd
->total_len
, sd
->flags
);
1074 * do_splice_direct - splices data directly between two files
1075 * @in: file to splice from
1076 * @ppos: input file offset
1077 * @out: file to splice to
1078 * @len: number of bytes to splice
1079 * @flags: splice modifier flags
1082 * For use by do_sendfile(). splice can easily emulate sendfile, but
1083 * doing it in the application would incur an extra system call
1084 * (splice in + splice out, as compared to just sendfile()). So this helper
1085 * can splice directly through a process-private pipe.
1088 long do_splice_direct(struct file
*in
, loff_t
*ppos
, struct file
*out
,
1089 size_t len
, unsigned int flags
)
1091 struct splice_desc sd
= {
1100 ret
= splice_direct_to_actor(in
, &sd
, direct_splice_actor
);
1108 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1109 * location, so checking ->i_pipe is not enough to verify that this is a
1112 static inline struct pipe_inode_info
*pipe_info(struct inode
*inode
)
1114 if (S_ISFIFO(inode
->i_mode
))
1115 return inode
->i_pipe
;
1121 * Determine where to splice to/from.
1123 static long do_splice(struct file
*in
, loff_t __user
*off_in
,
1124 struct file
*out
, loff_t __user
*off_out
,
1125 size_t len
, unsigned int flags
)
1127 struct pipe_inode_info
*pipe
;
1128 loff_t offset
, *off
;
1131 pipe
= pipe_info(in
->f_path
.dentry
->d_inode
);
1136 if (out
->f_op
->llseek
== no_llseek
)
1138 if (copy_from_user(&offset
, off_out
, sizeof(loff_t
)))
1144 ret
= do_splice_from(pipe
, out
, off
, len
, flags
);
1146 if (off_out
&& copy_to_user(off_out
, off
, sizeof(loff_t
)))
1152 pipe
= pipe_info(out
->f_path
.dentry
->d_inode
);
1157 if (in
->f_op
->llseek
== no_llseek
)
1159 if (copy_from_user(&offset
, off_in
, sizeof(loff_t
)))
1165 ret
= do_splice_to(in
, off
, pipe
, len
, flags
);
1167 if (off_in
&& copy_to_user(off_in
, off
, sizeof(loff_t
)))
1177 * Do a copy-from-user while holding the mmap_semaphore for reading, in a
1178 * manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
1179 * for writing) and page faulting on the user memory pointed to by src.
1180 * This assumes that we will very rarely hit the partial != 0 path, or this
1181 * will not be a win.
1183 static int copy_from_user_mmap_sem(void *dst
, const void __user
*src
, size_t n
)
1187 pagefault_disable();
1188 partial
= __copy_from_user_inatomic(dst
, src
, n
);
1192 * Didn't copy everything, drop the mmap_sem and do a faulting copy
1194 if (unlikely(partial
)) {
1195 up_read(¤t
->mm
->mmap_sem
);
1196 partial
= copy_from_user(dst
, src
, n
);
1197 down_read(¤t
->mm
->mmap_sem
);
1204 * Map an iov into an array of pages and offset/length tupples. With the
1205 * partial_page structure, we can map several non-contiguous ranges into
1206 * our ones pages[] map instead of splitting that operation into pieces.
1207 * Could easily be exported as a generic helper for other users, in which
1208 * case one would probably want to add a 'max_nr_pages' parameter as well.
1210 static int get_iovec_page_array(const struct iovec __user
*iov
,
1211 unsigned int nr_vecs
, struct page
**pages
,
1212 struct partial_page
*partial
, int aligned
)
1214 int buffers
= 0, error
= 0;
1216 down_read(¤t
->mm
->mmap_sem
);
1219 unsigned long off
, npages
;
1226 if (copy_from_user_mmap_sem(&entry
, iov
, sizeof(entry
)))
1229 base
= entry
.iov_base
;
1230 len
= entry
.iov_len
;
1233 * Sanity check this iovec. 0 read succeeds.
1239 if (unlikely(!base
))
1243 * Get this base offset and number of pages, then map
1244 * in the user pages.
1246 off
= (unsigned long) base
& ~PAGE_MASK
;
1249 * If asked for alignment, the offset must be zero and the
1250 * length a multiple of the PAGE_SIZE.
1253 if (aligned
&& (off
|| len
& ~PAGE_MASK
))
1256 npages
= (off
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1257 if (npages
> PIPE_BUFFERS
- buffers
)
1258 npages
= PIPE_BUFFERS
- buffers
;
1260 error
= get_user_pages(current
, current
->mm
,
1261 (unsigned long) base
, npages
, 0, 0,
1262 &pages
[buffers
], NULL
);
1264 if (unlikely(error
<= 0))
1268 * Fill this contiguous range into the partial page map.
1270 for (i
= 0; i
< error
; i
++) {
1271 const int plen
= min_t(size_t, len
, PAGE_SIZE
- off
);
1273 partial
[buffers
].offset
= off
;
1274 partial
[buffers
].len
= plen
;
1282 * We didn't complete this iov, stop here since it probably
1283 * means we have to move some of this into a pipe to
1284 * be able to continue.
1290 * Don't continue if we mapped fewer pages than we asked for,
1291 * or if we mapped the max number of pages that we have
1294 if (error
< npages
|| buffers
== PIPE_BUFFERS
)
1301 up_read(¤t
->mm
->mmap_sem
);
1309 static int pipe_to_user(struct pipe_inode_info
*pipe
, struct pipe_buffer
*buf
,
1310 struct splice_desc
*sd
)
1315 ret
= buf
->ops
->confirm(pipe
, buf
);
1320 * See if we can use the atomic maps, by prefaulting in the
1321 * pages and doing an atomic copy
1323 if (!fault_in_pages_writeable(sd
->u
.userptr
, sd
->len
)) {
1324 src
= buf
->ops
->map(pipe
, buf
, 1);
1325 ret
= __copy_to_user_inatomic(sd
->u
.userptr
, src
+ buf
->offset
,
1327 buf
->ops
->unmap(pipe
, buf
, src
);
1335 * No dice, use slow non-atomic map and copy
1337 src
= buf
->ops
->map(pipe
, buf
, 0);
1340 if (copy_to_user(sd
->u
.userptr
, src
+ buf
->offset
, sd
->len
))
1343 buf
->ops
->unmap(pipe
, buf
, src
);
1346 sd
->u
.userptr
+= ret
;
1351 * For lack of a better implementation, implement vmsplice() to userspace
1352 * as a simple copy of the pipes pages to the user iov.
1354 static long vmsplice_to_user(struct file
*file
, const struct iovec __user
*iov
,
1355 unsigned long nr_segs
, unsigned int flags
)
1357 struct pipe_inode_info
*pipe
;
1358 struct splice_desc sd
;
1363 pipe
= pipe_info(file
->f_path
.dentry
->d_inode
);
1368 mutex_lock(&pipe
->inode
->i_mutex
);
1376 * Get user address base and length for this iovec.
1378 error
= get_user(base
, &iov
->iov_base
);
1379 if (unlikely(error
))
1381 error
= get_user(len
, &iov
->iov_len
);
1382 if (unlikely(error
))
1386 * Sanity check this iovec. 0 read succeeds.
1390 if (unlikely(!base
)) {
1398 sd
.u
.userptr
= base
;
1401 size
= __splice_from_pipe(pipe
, &sd
, pipe_to_user
);
1419 mutex_unlock(&pipe
->inode
->i_mutex
);
1428 * vmsplice splices a user address range into a pipe. It can be thought of
1429 * as splice-from-memory, where the regular splice is splice-from-file (or
1430 * to file). In both cases the output is a pipe, naturally.
1432 static long vmsplice_to_pipe(struct file
*file
, const struct iovec __user
*iov
,
1433 unsigned long nr_segs
, unsigned int flags
)
1435 struct pipe_inode_info
*pipe
;
1436 struct page
*pages
[PIPE_BUFFERS
];
1437 struct partial_page partial
[PIPE_BUFFERS
];
1438 struct splice_pipe_desc spd
= {
1442 .ops
= &user_page_pipe_buf_ops
,
1445 pipe
= pipe_info(file
->f_path
.dentry
->d_inode
);
1449 spd
.nr_pages
= get_iovec_page_array(iov
, nr_segs
, pages
, partial
,
1450 flags
& SPLICE_F_GIFT
);
1451 if (spd
.nr_pages
<= 0)
1452 return spd
.nr_pages
;
1454 return splice_to_pipe(pipe
, &spd
);
1458 * Note that vmsplice only really supports true splicing _from_ user memory
1459 * to a pipe, not the other way around. Splicing from user memory is a simple
1460 * operation that can be supported without any funky alignment restrictions
1461 * or nasty vm tricks. We simply map in the user memory and fill them into
1462 * a pipe. The reverse isn't quite as easy, though. There are two possible
1463 * solutions for that:
1465 * - memcpy() the data internally, at which point we might as well just
1466 * do a regular read() on the buffer anyway.
1467 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1468 * has restriction limitations on both ends of the pipe).
1470 * Currently we punt and implement it as a normal copy, see pipe_to_user().
1473 asmlinkage
long sys_vmsplice(int fd
, const struct iovec __user
*iov
,
1474 unsigned long nr_segs
, unsigned int flags
)
1480 if (unlikely(nr_segs
> UIO_MAXIOV
))
1482 else if (unlikely(!nr_segs
))
1486 file
= fget_light(fd
, &fput
);
1488 if (file
->f_mode
& FMODE_WRITE
)
1489 error
= vmsplice_to_pipe(file
, iov
, nr_segs
, flags
);
1490 else if (file
->f_mode
& FMODE_READ
)
1491 error
= vmsplice_to_user(file
, iov
, nr_segs
, flags
);
1493 fput_light(file
, fput
);
1499 asmlinkage
long sys_splice(int fd_in
, loff_t __user
*off_in
,
1500 int fd_out
, loff_t __user
*off_out
,
1501 size_t len
, unsigned int flags
)
1504 struct file
*in
, *out
;
1505 int fput_in
, fput_out
;
1511 in
= fget_light(fd_in
, &fput_in
);
1513 if (in
->f_mode
& FMODE_READ
) {
1514 out
= fget_light(fd_out
, &fput_out
);
1516 if (out
->f_mode
& FMODE_WRITE
)
1517 error
= do_splice(in
, off_in
,
1520 fput_light(out
, fput_out
);
1524 fput_light(in
, fput_in
);
1531 * Make sure there's data to read. Wait for input if we can, otherwise
1532 * return an appropriate error.
1534 static int link_ipipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1539 * Check ->nrbufs without the inode lock first. This function
1540 * is speculative anyways, so missing one is ok.
1546 mutex_lock(&pipe
->inode
->i_mutex
);
1548 while (!pipe
->nrbufs
) {
1549 if (signal_pending(current
)) {
1555 if (!pipe
->waiting_writers
) {
1556 if (flags
& SPLICE_F_NONBLOCK
) {
1564 mutex_unlock(&pipe
->inode
->i_mutex
);
1569 * Make sure there's writeable room. Wait for room if we can, otherwise
1570 * return an appropriate error.
1572 static int link_opipe_prep(struct pipe_inode_info
*pipe
, unsigned int flags
)
1577 * Check ->nrbufs without the inode lock first. This function
1578 * is speculative anyways, so missing one is ok.
1580 if (pipe
->nrbufs
< PIPE_BUFFERS
)
1584 mutex_lock(&pipe
->inode
->i_mutex
);
1586 while (pipe
->nrbufs
>= PIPE_BUFFERS
) {
1587 if (!pipe
->readers
) {
1588 send_sig(SIGPIPE
, current
, 0);
1592 if (flags
& SPLICE_F_NONBLOCK
) {
1596 if (signal_pending(current
)) {
1600 pipe
->waiting_writers
++;
1602 pipe
->waiting_writers
--;
1605 mutex_unlock(&pipe
->inode
->i_mutex
);
1610 * Link contents of ipipe to opipe.
1612 static int link_pipe(struct pipe_inode_info
*ipipe
,
1613 struct pipe_inode_info
*opipe
,
1614 size_t len
, unsigned int flags
)
1616 struct pipe_buffer
*ibuf
, *obuf
;
1617 int ret
= 0, i
= 0, nbuf
;
1620 * Potential ABBA deadlock, work around it by ordering lock
1621 * grabbing by inode address. Otherwise two different processes
1622 * could deadlock (one doing tee from A -> B, the other from B -> A).
1624 inode_double_lock(ipipe
->inode
, opipe
->inode
);
1627 if (!opipe
->readers
) {
1628 send_sig(SIGPIPE
, current
, 0);
1635 * If we have iterated all input buffers or ran out of
1636 * output room, break.
1638 if (i
>= ipipe
->nrbufs
|| opipe
->nrbufs
>= PIPE_BUFFERS
)
1641 ibuf
= ipipe
->bufs
+ ((ipipe
->curbuf
+ i
) & (PIPE_BUFFERS
- 1));
1642 nbuf
= (opipe
->curbuf
+ opipe
->nrbufs
) & (PIPE_BUFFERS
- 1);
1645 * Get a reference to this pipe buffer,
1646 * so we can copy the contents over.
1648 ibuf
->ops
->get(ipipe
, ibuf
);
1650 obuf
= opipe
->bufs
+ nbuf
;
1654 * Don't inherit the gift flag, we need to
1655 * prevent multiple steals of this page.
1657 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
1659 if (obuf
->len
> len
)
1668 inode_double_unlock(ipipe
->inode
, opipe
->inode
);
1671 * If we put data in the output pipe, wakeup any potential readers.
1675 if (waitqueue_active(&opipe
->wait
))
1676 wake_up_interruptible(&opipe
->wait
);
1677 kill_fasync(&opipe
->fasync_readers
, SIGIO
, POLL_IN
);
1684 * This is a tee(1) implementation that works on pipes. It doesn't copy
1685 * any data, it simply references the 'in' pages on the 'out' pipe.
1686 * The 'flags' used are the SPLICE_F_* variants, currently the only
1687 * applicable one is SPLICE_F_NONBLOCK.
1689 static long do_tee(struct file
*in
, struct file
*out
, size_t len
,
1692 struct pipe_inode_info
*ipipe
= pipe_info(in
->f_path
.dentry
->d_inode
);
1693 struct pipe_inode_info
*opipe
= pipe_info(out
->f_path
.dentry
->d_inode
);
1697 * Duplicate the contents of ipipe to opipe without actually
1700 if (ipipe
&& opipe
&& ipipe
!= opipe
) {
1702 * Keep going, unless we encounter an error. The ipipe/opipe
1703 * ordering doesn't really matter.
1705 ret
= link_ipipe_prep(ipipe
, flags
);
1707 ret
= link_opipe_prep(opipe
, flags
);
1709 ret
= link_pipe(ipipe
, opipe
, len
, flags
);
1710 if (!ret
&& (flags
& SPLICE_F_NONBLOCK
))
1719 asmlinkage
long sys_tee(int fdin
, int fdout
, size_t len
, unsigned int flags
)
1728 in
= fget_light(fdin
, &fput_in
);
1730 if (in
->f_mode
& FMODE_READ
) {
1732 struct file
*out
= fget_light(fdout
, &fput_out
);
1735 if (out
->f_mode
& FMODE_WRITE
)
1736 error
= do_tee(in
, out
, len
, flags
);
1737 fput_light(out
, fput_out
);
1740 fput_light(in
, fput_in
);