2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files and fixing the initial implementation
15 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h>
27 #include <linux/module.h>
28 #include <linux/syscalls.h>
31 * Passed to the actors
34 unsigned int len
, total_len
; /* current and remaining length */
35 unsigned int flags
; /* splice flags */
36 struct file
*file
; /* file to read/write */
37 loff_t pos
; /* file position */
41 * Attempt to steal a page from a pipe buffer. This should perhaps go into
42 * a vm helper function, it's already simplified quite a bit by the
43 * addition of remove_mapping(). If success is returned, the caller may
44 * attempt to reuse this page for another destination.
46 static int page_cache_pipe_buf_steal(struct pipe_inode_info
*info
,
47 struct pipe_buffer
*buf
)
49 struct page
*page
= buf
->page
;
50 struct address_space
*mapping
= page_mapping(page
);
52 WARN_ON(!PageLocked(page
));
53 WARN_ON(!PageUptodate(page
));
56 * At least for ext2 with nobh option, we need to wait on writeback
57 * completing on this page, since we'll remove it from the pagecache.
58 * Otherwise truncate wont wait on the page, allowing the disk
59 * blocks to be reused by someone else before we actually wrote our
60 * data to them. fs corruption ensues.
62 wait_on_page_writeback(page
);
64 if (PagePrivate(page
))
65 try_to_release_page(page
, mapping_gfp_mask(mapping
));
67 if (!remove_mapping(mapping
, page
))
70 buf
->flags
|= PIPE_BUF_FLAG_STOLEN
| PIPE_BUF_FLAG_LRU
;
74 static void page_cache_pipe_buf_release(struct pipe_inode_info
*info
,
75 struct pipe_buffer
*buf
)
77 page_cache_release(buf
->page
);
79 buf
->flags
&= ~(PIPE_BUF_FLAG_STOLEN
| PIPE_BUF_FLAG_LRU
);
82 static void *page_cache_pipe_buf_map(struct file
*file
,
83 struct pipe_inode_info
*info
,
84 struct pipe_buffer
*buf
)
86 struct page
*page
= buf
->page
;
90 if (!PageUptodate(page
)) {
97 return ERR_PTR(-ENODATA
);
100 return kmap(buf
->page
);
103 static void page_cache_pipe_buf_unmap(struct pipe_inode_info
*info
,
104 struct pipe_buffer
*buf
)
106 unlock_page(buf
->page
);
110 static struct pipe_buf_operations page_cache_pipe_buf_ops
= {
112 .map
= page_cache_pipe_buf_map
,
113 .unmap
= page_cache_pipe_buf_unmap
,
114 .release
= page_cache_pipe_buf_release
,
115 .steal
= page_cache_pipe_buf_steal
,
119 * Pipe output worker. This sets up our pipe format with the page cache
120 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
122 static ssize_t
move_to_pipe(struct inode
*inode
, struct page
**pages
,
123 int nr_pages
, unsigned long offset
,
124 unsigned long len
, unsigned int flags
)
126 struct pipe_inode_info
*info
;
127 int ret
, do_wakeup
, i
;
133 mutex_lock(PIPE_MUTEX(*inode
));
135 info
= inode
->i_pipe
;
139 if (!PIPE_READERS(*inode
)) {
140 send_sig(SIGPIPE
, current
, 0);
147 if (bufs
< PIPE_BUFFERS
) {
148 int newbuf
= (info
->curbuf
+ bufs
) & (PIPE_BUFFERS
- 1);
149 struct pipe_buffer
*buf
= info
->bufs
+ newbuf
;
150 struct page
*page
= pages
[i
++];
151 unsigned long this_len
;
153 this_len
= PAGE_CACHE_SIZE
- offset
;
158 buf
->offset
= offset
;
160 buf
->ops
= &page_cache_pipe_buf_ops
;
161 info
->nrbufs
= ++bufs
;
171 if (bufs
< PIPE_BUFFERS
)
177 if (flags
& SPLICE_F_NONBLOCK
) {
183 if (signal_pending(current
)) {
191 if (waitqueue_active(PIPE_WAIT(*inode
)))
192 wake_up_interruptible_sync(PIPE_WAIT(*inode
));
193 kill_fasync(PIPE_FASYNC_READERS(*inode
), SIGIO
,
198 PIPE_WAITING_WRITERS(*inode
)++;
200 PIPE_WAITING_WRITERS(*inode
)--;
203 mutex_unlock(PIPE_MUTEX(*inode
));
207 if (waitqueue_active(PIPE_WAIT(*inode
)))
208 wake_up_interruptible(PIPE_WAIT(*inode
));
209 kill_fasync(PIPE_FASYNC_READERS(*inode
), SIGIO
, POLL_IN
);
213 page_cache_release(pages
[i
++]);
218 static int __generic_file_splice_read(struct file
*in
, struct inode
*pipe
,
219 size_t len
, unsigned int flags
)
221 struct address_space
*mapping
= in
->f_mapping
;
222 unsigned int offset
, nr_pages
;
223 struct page
*pages
[PIPE_BUFFERS
], *shadow
[PIPE_BUFFERS
];
228 index
= in
->f_pos
>> PAGE_CACHE_SHIFT
;
229 offset
= in
->f_pos
& ~PAGE_CACHE_MASK
;
230 nr_pages
= (len
+ offset
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
232 if (nr_pages
> PIPE_BUFFERS
)
233 nr_pages
= PIPE_BUFFERS
;
236 * initiate read-ahead on this page range
238 do_page_cache_readahead(mapping
, in
, index
, nr_pages
);
241 * Get as many pages from the page cache as possible..
242 * Start IO on the page cache entries we create (we
243 * can assume that any pre-existing ones we find have
244 * already had IO started on them).
246 i
= find_get_pages(mapping
, index
, nr_pages
, pages
);
249 * common case - we found all pages and they are contiguous,
252 if (i
&& (pages
[i
- 1]->index
== index
+ i
- 1))
256 * fill shadow[] with pages at the right locations, so we only
259 memset(shadow
, 0, nr_pages
* sizeof(struct page
*));
260 for (j
= 0; j
< i
; j
++)
261 shadow
[pages
[j
]->index
- index
] = pages
[j
];
264 * now fill in the holes
266 for (i
= 0, pidx
= index
; i
< nr_pages
; pidx
++, i
++) {
273 * no page there, look one up / create it
275 page
= find_or_create_page(mapping
, pidx
,
276 mapping_gfp_mask(mapping
));
280 if (PageUptodate(page
))
283 error
= mapping
->a_ops
->readpage(in
, page
);
285 if (unlikely(error
)) {
286 page_cache_release(page
);
294 for (i
= 0; i
< nr_pages
; i
++) {
296 page_cache_release(shadow
[i
]);
301 memcpy(pages
, shadow
, i
* sizeof(struct page
*));
304 * Now we splice them into the pipe..
307 return move_to_pipe(pipe
, pages
, i
, offset
, len
, flags
);
311 * generic_file_splice_read - splice data from file to a pipe
312 * @in: file to splice from
313 * @pipe: pipe to splice to
314 * @len: number of bytes to splice
315 * @flags: splice modifier flags
317 * Will read pages from given file and fill them into a pipe.
320 ssize_t
generic_file_splice_read(struct file
*in
, struct inode
*pipe
,
321 size_t len
, unsigned int flags
)
329 ret
= __generic_file_splice_read(in
, pipe
, len
, flags
);
338 if (!(flags
& SPLICE_F_NONBLOCK
))
350 EXPORT_SYMBOL(generic_file_splice_read
);
353 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
356 static int pipe_to_sendpage(struct pipe_inode_info
*info
,
357 struct pipe_buffer
*buf
, struct splice_desc
*sd
)
359 struct file
*file
= sd
->file
;
360 loff_t pos
= sd
->pos
;
367 * sub-optimal, but we are limited by the pipe ->map. we don't
368 * need a kmap'ed buffer here, we just want to make sure we
369 * have the page pinned if the pipe page originates from the
372 ptr
= buf
->ops
->map(file
, info
, buf
);
376 offset
= pos
& ~PAGE_CACHE_MASK
;
377 more
= (sd
->flags
& SPLICE_F_MORE
) || sd
->len
< sd
->total_len
;
379 ret
= file
->f_op
->sendpage(file
, buf
->page
, offset
, sd
->len
, &pos
,more
);
381 buf
->ops
->unmap(info
, buf
);
389 * This is a little more tricky than the file -> pipe splicing. There are
390 * basically three cases:
392 * - Destination page already exists in the address space and there
393 * are users of it. For that case we have no other option that
394 * copying the data. Tough luck.
395 * - Destination page already exists in the address space, but there
396 * are no users of it. Make sure it's uptodate, then drop it. Fall
397 * through to last case.
398 * - Destination page does not exist, we can add the pipe page to
399 * the page cache and avoid the copy.
401 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
402 * sd->flags), we attempt to migrate pages from the pipe to the output
403 * file address space page cache. This is possible if no one else has
404 * the pipe page referenced outside of the pipe and page cache. If
405 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
406 * a new page in the output file page cache and fill/dirty that.
408 static int pipe_to_file(struct pipe_inode_info
*info
, struct pipe_buffer
*buf
,
409 struct splice_desc
*sd
)
411 struct file
*file
= sd
->file
;
412 struct address_space
*mapping
= file
->f_mapping
;
413 gfp_t gfp_mask
= mapping_gfp_mask(mapping
);
421 * after this, page will be locked and unmapped
423 src
= buf
->ops
->map(file
, info
, buf
);
427 index
= sd
->pos
>> PAGE_CACHE_SHIFT
;
428 offset
= sd
->pos
& ~PAGE_CACHE_MASK
;
431 * reuse buf page, if SPLICE_F_MOVE is set
433 if (sd
->flags
& SPLICE_F_MOVE
) {
435 * If steal succeeds, buf->page is now pruned from the vm
436 * side (LRU and page cache) and we can reuse it.
438 if (buf
->ops
->steal(info
, buf
))
442 if (add_to_page_cache(page
, mapping
, index
, gfp_mask
))
445 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
450 page
= find_or_create_page(mapping
, index
, gfp_mask
);
455 * If the page is uptodate, it is also locked. If it isn't
456 * uptodate, we can mark it uptodate if we are filling the
457 * full page. Otherwise we need to read it in first...
459 if (!PageUptodate(page
)) {
460 if (sd
->len
< PAGE_CACHE_SIZE
) {
461 ret
= mapping
->a_ops
->readpage(file
, page
);
467 if (!PageUptodate(page
)) {
469 * page got invalidated, repeat
471 if (!page
->mapping
) {
473 page_cache_release(page
);
480 WARN_ON(!PageLocked(page
));
481 SetPageUptodate(page
);
486 ret
= mapping
->a_ops
->prepare_write(file
, page
, 0, sd
->len
);
487 if (ret
== AOP_TRUNCATED_PAGE
) {
488 page_cache_release(page
);
493 if (!(buf
->flags
& PIPE_BUF_FLAG_STOLEN
)) {
494 char *dst
= kmap_atomic(page
, KM_USER0
);
496 memcpy(dst
+ offset
, src
+ buf
->offset
, sd
->len
);
497 flush_dcache_page(page
);
498 kunmap_atomic(dst
, KM_USER0
);
501 ret
= mapping
->a_ops
->commit_write(file
, page
, 0, sd
->len
);
502 if (ret
== AOP_TRUNCATED_PAGE
) {
503 page_cache_release(page
);
508 mark_page_accessed(page
);
509 balance_dirty_pages_ratelimited(mapping
);
511 if (!(buf
->flags
& PIPE_BUF_FLAG_STOLEN
)) {
512 page_cache_release(page
);
516 buf
->ops
->unmap(info
, buf
);
520 typedef int (splice_actor
)(struct pipe_inode_info
*, struct pipe_buffer
*,
521 struct splice_desc
*);
524 * Pipe input worker. Most of this logic works like a regular pipe, the
525 * key here is the 'actor' worker passed in that actually moves the data
526 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
528 static ssize_t
move_from_pipe(struct inode
*inode
, struct file
*out
,
529 size_t len
, unsigned int flags
,
532 struct pipe_inode_info
*info
;
533 int ret
, do_wakeup
, err
;
534 struct splice_desc sd
;
544 mutex_lock(PIPE_MUTEX(*inode
));
546 info
= inode
->i_pipe
;
548 int bufs
= info
->nrbufs
;
551 int curbuf
= info
->curbuf
;
552 struct pipe_buffer
*buf
= info
->bufs
+ curbuf
;
553 struct pipe_buf_operations
*ops
= buf
->ops
;
556 if (sd
.len
> sd
.total_len
)
557 sd
.len
= sd
.total_len
;
559 err
= actor(info
, buf
, &sd
);
561 if (!ret
&& err
!= -ENODATA
)
568 buf
->offset
+= sd
.len
;
572 ops
->release(info
, buf
);
573 curbuf
= (curbuf
+ 1) & (PIPE_BUFFERS
- 1);
574 info
->curbuf
= curbuf
;
575 info
->nrbufs
= --bufs
;
580 sd
.total_len
-= sd
.len
;
587 if (!PIPE_WRITERS(*inode
))
589 if (!PIPE_WAITING_WRITERS(*inode
)) {
594 if (flags
& SPLICE_F_NONBLOCK
) {
600 if (signal_pending(current
)) {
608 if (waitqueue_active(PIPE_WAIT(*inode
)))
609 wake_up_interruptible_sync(PIPE_WAIT(*inode
));
610 kill_fasync(PIPE_FASYNC_WRITERS(*inode
),SIGIO
,POLL_OUT
);
617 mutex_unlock(PIPE_MUTEX(*inode
));
621 if (waitqueue_active(PIPE_WAIT(*inode
)))
622 wake_up_interruptible(PIPE_WAIT(*inode
));
623 kill_fasync(PIPE_FASYNC_WRITERS(*inode
), SIGIO
, POLL_OUT
);
626 mutex_lock(&out
->f_mapping
->host
->i_mutex
);
628 mutex_unlock(&out
->f_mapping
->host
->i_mutex
);
634 * generic_file_splice_write - splice data from a pipe to a file
636 * @out: file to write to
637 * @len: number of bytes to splice
638 * @flags: splice modifier flags
640 * Will either move or copy pages (determined by @flags options) from
641 * the given pipe inode to the given file.
644 ssize_t
generic_file_splice_write(struct inode
*inode
, struct file
*out
,
645 size_t len
, unsigned int flags
)
647 struct address_space
*mapping
= out
->f_mapping
;
648 ssize_t ret
= move_from_pipe(inode
, out
, len
, flags
, pipe_to_file
);
651 * if file or inode is SYNC and we actually wrote some data, sync it
653 if (unlikely((out
->f_flags
& O_SYNC
) || IS_SYNC(mapping
->host
))
655 struct inode
*inode
= mapping
->host
;
658 mutex_lock(&inode
->i_mutex
);
659 err
= generic_osync_inode(mapping
->host
, mapping
,
660 OSYNC_METADATA
|OSYNC_DATA
);
661 mutex_unlock(&inode
->i_mutex
);
670 EXPORT_SYMBOL(generic_file_splice_write
);
673 * generic_splice_sendpage - splice data from a pipe to a socket
675 * @out: socket to write to
676 * @len: number of bytes to splice
677 * @flags: splice modifier flags
679 * Will send @len bytes from the pipe to a network socket. No data copying
683 ssize_t
generic_splice_sendpage(struct inode
*inode
, struct file
*out
,
684 size_t len
, unsigned int flags
)
686 return move_from_pipe(inode
, out
, len
, flags
, pipe_to_sendpage
);
689 EXPORT_SYMBOL(generic_splice_sendpage
);
692 * Attempt to initiate a splice from pipe to file.
694 static long do_splice_from(struct inode
*pipe
, struct file
*out
, size_t len
,
700 if (!out
->f_op
|| !out
->f_op
->splice_write
)
703 if (!(out
->f_mode
& FMODE_WRITE
))
707 ret
= rw_verify_area(WRITE
, out
, &pos
, len
);
708 if (unlikely(ret
< 0))
711 return out
->f_op
->splice_write(pipe
, out
, len
, flags
);
715 * Attempt to initiate a splice from a file to a pipe.
717 static long do_splice_to(struct file
*in
, struct inode
*pipe
, size_t len
,
720 loff_t pos
, isize
, left
;
723 if (!in
->f_op
|| !in
->f_op
->splice_read
)
726 if (!(in
->f_mode
& FMODE_READ
))
730 ret
= rw_verify_area(READ
, in
, &pos
, len
);
731 if (unlikely(ret
< 0))
734 isize
= i_size_read(in
->f_mapping
->host
);
735 if (unlikely(in
->f_pos
>= isize
))
738 left
= isize
- in
->f_pos
;
742 return in
->f_op
->splice_read(in
, pipe
, len
, flags
);
746 * Determine where to splice to/from.
748 static long do_splice(struct file
*in
, struct file
*out
, size_t len
,
753 pipe
= in
->f_dentry
->d_inode
;
755 return do_splice_from(pipe
, out
, len
, flags
);
757 pipe
= out
->f_dentry
->d_inode
;
759 return do_splice_to(in
, pipe
, len
, flags
);
764 asmlinkage
long sys_splice(int fdin
, int fdout
, size_t len
, unsigned int flags
)
767 struct file
*in
, *out
;
768 int fput_in
, fput_out
;
774 in
= fget_light(fdin
, &fput_in
);
776 if (in
->f_mode
& FMODE_READ
) {
777 out
= fget_light(fdout
, &fput_out
);
779 if (out
->f_mode
& FMODE_WRITE
)
780 error
= do_splice(in
, out
, len
, flags
);
781 fput_light(out
, fput_out
);
785 fput_light(in
, fput_in
);