[PATCH] libata-eh: implement BMDMA EH
[linux-2.6.git] / fs / splice.c
blob447ebc0a37f342eea7075ee18abb2f4a7a40714e
1 /*
2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
32 struct partial_page {
33 unsigned int offset;
34 unsigned int len;
38 * Passed to splice_to_pipe
40 struct splice_pipe_desc {
41 struct page **pages; /* page map */
42 struct partial_page *partial; /* pages[] may not be contig */
43 int nr_pages; /* number of pages in map */
44 unsigned int flags; /* splice flags */
45 struct pipe_buf_operations *ops;/* ops associated with output pipe */
49 * Attempt to steal a page from a pipe buffer. This should perhaps go into
50 * a vm helper function, it's already simplified quite a bit by the
51 * addition of remove_mapping(). If success is returned, the caller may
52 * attempt to reuse this page for another destination.
54 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
55 struct pipe_buffer *buf)
57 struct page *page = buf->page;
58 struct address_space *mapping = page_mapping(page);
60 lock_page(page);
62 WARN_ON(!PageUptodate(page));
65 * At least for ext2 with nobh option, we need to wait on writeback
66 * completing on this page, since we'll remove it from the pagecache.
67 * Otherwise truncate wont wait on the page, allowing the disk
68 * blocks to be reused by someone else before we actually wrote our
69 * data to them. fs corruption ensues.
71 wait_on_page_writeback(page);
73 if (PagePrivate(page))
74 try_to_release_page(page, mapping_gfp_mask(mapping));
76 if (!remove_mapping(mapping, page)) {
77 unlock_page(page);
78 return 1;
81 buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
82 return 0;
85 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
86 struct pipe_buffer *buf)
88 page_cache_release(buf->page);
89 buf->page = NULL;
90 buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
93 static void *page_cache_pipe_buf_map(struct file *file,
94 struct pipe_inode_info *info,
95 struct pipe_buffer *buf)
97 struct page *page = buf->page;
98 int err;
100 if (!PageUptodate(page)) {
101 lock_page(page);
104 * Page got truncated/unhashed. This will cause a 0-byte
105 * splice, if this is the first page.
107 if (!page->mapping) {
108 err = -ENODATA;
109 goto error;
113 * Uh oh, read-error from disk.
115 if (!PageUptodate(page)) {
116 err = -EIO;
117 goto error;
121 * Page is ok afterall, fall through to mapping.
123 unlock_page(page);
126 return kmap(page);
127 error:
128 unlock_page(page);
129 return ERR_PTR(err);
132 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
133 struct pipe_buffer *buf)
135 kunmap(buf->page);
138 static void *user_page_pipe_buf_map(struct file *file,
139 struct pipe_inode_info *pipe,
140 struct pipe_buffer *buf)
142 return kmap(buf->page);
145 static void user_page_pipe_buf_unmap(struct pipe_inode_info *pipe,
146 struct pipe_buffer *buf)
148 kunmap(buf->page);
151 static void page_cache_pipe_buf_get(struct pipe_inode_info *info,
152 struct pipe_buffer *buf)
154 page_cache_get(buf->page);
157 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
158 .can_merge = 0,
159 .map = page_cache_pipe_buf_map,
160 .unmap = page_cache_pipe_buf_unmap,
161 .release = page_cache_pipe_buf_release,
162 .steal = page_cache_pipe_buf_steal,
163 .get = page_cache_pipe_buf_get,
166 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
167 struct pipe_buffer *buf)
169 return 1;
172 static struct pipe_buf_operations user_page_pipe_buf_ops = {
173 .can_merge = 0,
174 .map = user_page_pipe_buf_map,
175 .unmap = user_page_pipe_buf_unmap,
176 .release = page_cache_pipe_buf_release,
177 .steal = user_page_pipe_buf_steal,
178 .get = page_cache_pipe_buf_get,
182 * Pipe output worker. This sets up our pipe format with the page cache
183 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
185 static ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
186 struct splice_pipe_desc *spd)
188 int ret, do_wakeup, page_nr;
190 ret = 0;
191 do_wakeup = 0;
192 page_nr = 0;
194 if (pipe->inode)
195 mutex_lock(&pipe->inode->i_mutex);
197 for (;;) {
198 if (!pipe->readers) {
199 send_sig(SIGPIPE, current, 0);
200 if (!ret)
201 ret = -EPIPE;
202 break;
205 if (pipe->nrbufs < PIPE_BUFFERS) {
206 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
207 struct pipe_buffer *buf = pipe->bufs + newbuf;
209 buf->page = spd->pages[page_nr];
210 buf->offset = spd->partial[page_nr].offset;
211 buf->len = spd->partial[page_nr].len;
212 buf->ops = spd->ops;
213 pipe->nrbufs++;
214 page_nr++;
215 ret += buf->len;
217 if (pipe->inode)
218 do_wakeup = 1;
220 if (!--spd->nr_pages)
221 break;
222 if (pipe->nrbufs < PIPE_BUFFERS)
223 continue;
225 break;
228 if (spd->flags & SPLICE_F_NONBLOCK) {
229 if (!ret)
230 ret = -EAGAIN;
231 break;
234 if (signal_pending(current)) {
235 if (!ret)
236 ret = -ERESTARTSYS;
237 break;
240 if (do_wakeup) {
241 smp_mb();
242 if (waitqueue_active(&pipe->wait))
243 wake_up_interruptible_sync(&pipe->wait);
244 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
245 do_wakeup = 0;
248 pipe->waiting_writers++;
249 pipe_wait(pipe);
250 pipe->waiting_writers--;
253 if (pipe->inode)
254 mutex_unlock(&pipe->inode->i_mutex);
256 if (do_wakeup) {
257 smp_mb();
258 if (waitqueue_active(&pipe->wait))
259 wake_up_interruptible(&pipe->wait);
260 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
263 while (page_nr < spd->nr_pages)
264 page_cache_release(spd->pages[page_nr++]);
266 return ret;
269 static int
270 __generic_file_splice_read(struct file *in, loff_t *ppos,
271 struct pipe_inode_info *pipe, size_t len,
272 unsigned int flags)
274 struct address_space *mapping = in->f_mapping;
275 unsigned int loff, nr_pages;
276 struct page *pages[PIPE_BUFFERS];
277 struct partial_page partial[PIPE_BUFFERS];
278 struct page *page;
279 pgoff_t index, end_index;
280 loff_t isize;
281 size_t total_len;
282 int error;
283 struct splice_pipe_desc spd = {
284 .pages = pages,
285 .partial = partial,
286 .flags = flags,
287 .ops = &page_cache_pipe_buf_ops,
290 index = *ppos >> PAGE_CACHE_SHIFT;
291 loff = *ppos & ~PAGE_CACHE_MASK;
292 nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
294 if (nr_pages > PIPE_BUFFERS)
295 nr_pages = PIPE_BUFFERS;
298 * Initiate read-ahead on this page range. however, don't call into
299 * read-ahead if this is a non-zero offset (we are likely doing small
300 * chunk splice and the page is already there) for a single page.
302 if (!loff || spd.nr_pages > 1)
303 do_page_cache_readahead(mapping, in, index, spd.nr_pages);
306 * Now fill in the holes:
308 error = 0;
309 total_len = 0;
310 for (spd.nr_pages = 0; spd.nr_pages < nr_pages; spd.nr_pages++, index++) {
311 unsigned int this_len;
313 if (!len)
314 break;
317 * this_len is the max we'll use from this page
319 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
320 find_page:
322 * lookup the page for this index
324 page = find_get_page(mapping, index);
325 if (!page) {
327 * page didn't exist, allocate one
329 page = page_cache_alloc_cold(mapping);
330 if (!page)
331 break;
333 error = add_to_page_cache_lru(page, mapping, index,
334 mapping_gfp_mask(mapping));
335 if (unlikely(error)) {
336 page_cache_release(page);
337 break;
340 goto readpage;
344 * If the page isn't uptodate, we may need to start io on it
346 if (!PageUptodate(page)) {
348 * If in nonblock mode then dont block on waiting
349 * for an in-flight io page
351 if (flags & SPLICE_F_NONBLOCK)
352 break;
354 lock_page(page);
357 * page was truncated, stop here. if this isn't the
358 * first page, we'll just complete what we already
359 * added
361 if (!page->mapping) {
362 unlock_page(page);
363 page_cache_release(page);
364 break;
367 * page was already under io and is now done, great
369 if (PageUptodate(page)) {
370 unlock_page(page);
371 goto fill_it;
374 readpage:
376 * need to read in the page
378 error = mapping->a_ops->readpage(in, page);
380 if (unlikely(error)) {
381 page_cache_release(page);
382 if (error == AOP_TRUNCATED_PAGE)
383 goto find_page;
384 break;
388 * i_size must be checked after ->readpage().
390 isize = i_size_read(mapping->host);
391 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
392 if (unlikely(!isize || index > end_index)) {
393 page_cache_release(page);
394 break;
398 * if this is the last page, see if we need to shrink
399 * the length and stop
401 if (end_index == index) {
402 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
403 if (total_len + loff > isize) {
404 page_cache_release(page);
405 break;
408 * force quit after adding this page
410 nr_pages = spd.nr_pages;
411 this_len = min(this_len, loff);
412 loff = 0;
415 fill_it:
416 pages[spd.nr_pages] = page;
417 partial[spd.nr_pages].offset = loff;
418 partial[spd.nr_pages].len = this_len;
419 len -= this_len;
420 total_len += this_len;
421 loff = 0;
424 if (spd.nr_pages)
425 return splice_to_pipe(pipe, &spd);
427 return error;
431 * generic_file_splice_read - splice data from file to a pipe
432 * @in: file to splice from
433 * @pipe: pipe to splice to
434 * @len: number of bytes to splice
435 * @flags: splice modifier flags
437 * Will read pages from given file and fill them into a pipe.
439 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
440 struct pipe_inode_info *pipe, size_t len,
441 unsigned int flags)
443 ssize_t spliced;
444 int ret;
446 ret = 0;
447 spliced = 0;
449 while (len) {
450 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
452 if (ret < 0)
453 break;
454 else if (!ret) {
455 if (spliced)
456 break;
457 if (flags & SPLICE_F_NONBLOCK) {
458 ret = -EAGAIN;
459 break;
463 *ppos += ret;
464 len -= ret;
465 spliced += ret;
468 if (spliced)
469 return spliced;
471 return ret;
474 EXPORT_SYMBOL(generic_file_splice_read);
477 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
478 * using sendpage(). Return the number of bytes sent.
480 static int pipe_to_sendpage(struct pipe_inode_info *info,
481 struct pipe_buffer *buf, struct splice_desc *sd)
483 struct file *file = sd->file;
484 loff_t pos = sd->pos;
485 ssize_t ret;
486 void *ptr;
487 int more;
490 * Sub-optimal, but we are limited by the pipe ->map. We don't
491 * need a kmap'ed buffer here, we just want to make sure we
492 * have the page pinned if the pipe page originates from the
493 * page cache.
495 ptr = buf->ops->map(file, info, buf);
496 if (IS_ERR(ptr))
497 return PTR_ERR(ptr);
499 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
501 ret = file->f_op->sendpage(file, buf->page, buf->offset, sd->len,
502 &pos, more);
504 buf->ops->unmap(info, buf);
505 return ret;
509 * This is a little more tricky than the file -> pipe splicing. There are
510 * basically three cases:
512 * - Destination page already exists in the address space and there
513 * are users of it. For that case we have no other option that
514 * copying the data. Tough luck.
515 * - Destination page already exists in the address space, but there
516 * are no users of it. Make sure it's uptodate, then drop it. Fall
517 * through to last case.
518 * - Destination page does not exist, we can add the pipe page to
519 * the page cache and avoid the copy.
521 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
522 * sd->flags), we attempt to migrate pages from the pipe to the output
523 * file address space page cache. This is possible if no one else has
524 * the pipe page referenced outside of the pipe and page cache. If
525 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
526 * a new page in the output file page cache and fill/dirty that.
528 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
529 struct splice_desc *sd)
531 struct file *file = sd->file;
532 struct address_space *mapping = file->f_mapping;
533 gfp_t gfp_mask = mapping_gfp_mask(mapping);
534 unsigned int offset, this_len;
535 struct page *page;
536 pgoff_t index;
537 char *src;
538 int ret;
541 * make sure the data in this buffer is uptodate
543 src = buf->ops->map(file, info, buf);
544 if (IS_ERR(src))
545 return PTR_ERR(src);
547 index = sd->pos >> PAGE_CACHE_SHIFT;
548 offset = sd->pos & ~PAGE_CACHE_MASK;
550 this_len = sd->len;
551 if (this_len + offset > PAGE_CACHE_SIZE)
552 this_len = PAGE_CACHE_SIZE - offset;
555 * Reuse buf page, if SPLICE_F_MOVE is set.
557 if (sd->flags & SPLICE_F_MOVE) {
559 * If steal succeeds, buf->page is now pruned from the vm
560 * side (LRU and page cache) and we can reuse it. The page
561 * will also be looked on successful return.
563 if (buf->ops->steal(info, buf))
564 goto find_page;
566 page = buf->page;
567 if (add_to_page_cache(page, mapping, index, gfp_mask))
568 goto find_page;
570 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
571 lru_cache_add(page);
572 } else {
573 find_page:
574 page = find_lock_page(mapping, index);
575 if (!page) {
576 ret = -ENOMEM;
577 page = page_cache_alloc_cold(mapping);
578 if (unlikely(!page))
579 goto out_nomem;
582 * This will also lock the page
584 ret = add_to_page_cache_lru(page, mapping, index,
585 gfp_mask);
586 if (unlikely(ret))
587 goto out;
591 * We get here with the page locked. If the page is also
592 * uptodate, we don't need to do more. If it isn't, we
593 * may need to bring it in if we are not going to overwrite
594 * the full page.
596 if (!PageUptodate(page)) {
597 if (this_len < PAGE_CACHE_SIZE) {
598 ret = mapping->a_ops->readpage(file, page);
599 if (unlikely(ret))
600 goto out;
602 lock_page(page);
604 if (!PageUptodate(page)) {
606 * Page got invalidated, repeat.
608 if (!page->mapping) {
609 unlock_page(page);
610 page_cache_release(page);
611 goto find_page;
613 ret = -EIO;
614 goto out;
616 } else
617 SetPageUptodate(page);
621 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
622 if (ret == AOP_TRUNCATED_PAGE) {
623 page_cache_release(page);
624 goto find_page;
625 } else if (ret)
626 goto out;
628 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
629 char *dst = kmap_atomic(page, KM_USER0);
631 memcpy(dst + offset, src + buf->offset, this_len);
632 flush_dcache_page(page);
633 kunmap_atomic(dst, KM_USER0);
636 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
637 if (ret == AOP_TRUNCATED_PAGE) {
638 page_cache_release(page);
639 goto find_page;
640 } else if (ret)
641 goto out;
644 * Return the number of bytes written.
646 ret = this_len;
647 mark_page_accessed(page);
648 balance_dirty_pages_ratelimited(mapping);
649 out:
650 if (!(buf->flags & PIPE_BUF_FLAG_STOLEN))
651 page_cache_release(page);
653 unlock_page(page);
654 out_nomem:
655 buf->ops->unmap(info, buf);
656 return ret;
660 * Pipe input worker. Most of this logic works like a regular pipe, the
661 * key here is the 'actor' worker passed in that actually moves the data
662 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
664 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
665 loff_t *ppos, size_t len, unsigned int flags,
666 splice_actor *actor)
668 int ret, do_wakeup, err;
669 struct splice_desc sd;
671 ret = 0;
672 do_wakeup = 0;
674 sd.total_len = len;
675 sd.flags = flags;
676 sd.file = out;
677 sd.pos = *ppos;
679 if (pipe->inode)
680 mutex_lock(&pipe->inode->i_mutex);
682 for (;;) {
683 if (pipe->nrbufs) {
684 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
685 struct pipe_buf_operations *ops = buf->ops;
687 sd.len = buf->len;
688 if (sd.len > sd.total_len)
689 sd.len = sd.total_len;
691 err = actor(pipe, buf, &sd);
692 if (err <= 0) {
693 if (!ret && err != -ENODATA)
694 ret = err;
696 break;
699 ret += err;
700 buf->offset += err;
701 buf->len -= err;
703 sd.len -= err;
704 sd.pos += err;
705 sd.total_len -= err;
706 if (sd.len)
707 continue;
709 if (!buf->len) {
710 buf->ops = NULL;
711 ops->release(pipe, buf);
712 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
713 pipe->nrbufs--;
714 if (pipe->inode)
715 do_wakeup = 1;
718 if (!sd.total_len)
719 break;
722 if (pipe->nrbufs)
723 continue;
724 if (!pipe->writers)
725 break;
726 if (!pipe->waiting_writers) {
727 if (ret)
728 break;
731 if (flags & SPLICE_F_NONBLOCK) {
732 if (!ret)
733 ret = -EAGAIN;
734 break;
737 if (signal_pending(current)) {
738 if (!ret)
739 ret = -ERESTARTSYS;
740 break;
743 if (do_wakeup) {
744 smp_mb();
745 if (waitqueue_active(&pipe->wait))
746 wake_up_interruptible_sync(&pipe->wait);
747 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
748 do_wakeup = 0;
751 pipe_wait(pipe);
754 if (pipe->inode)
755 mutex_unlock(&pipe->inode->i_mutex);
757 if (do_wakeup) {
758 smp_mb();
759 if (waitqueue_active(&pipe->wait))
760 wake_up_interruptible(&pipe->wait);
761 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
764 return ret;
768 * generic_file_splice_write - splice data from a pipe to a file
769 * @pipe: pipe info
770 * @out: file to write to
771 * @len: number of bytes to splice
772 * @flags: splice modifier flags
774 * Will either move or copy pages (determined by @flags options) from
775 * the given pipe inode to the given file.
778 ssize_t
779 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
780 loff_t *ppos, size_t len, unsigned int flags)
782 struct address_space *mapping = out->f_mapping;
783 ssize_t ret;
785 ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
786 if (ret > 0) {
787 struct inode *inode = mapping->host;
789 *ppos += ret;
792 * If file or inode is SYNC and we actually wrote some data,
793 * sync it.
795 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
796 int err;
798 mutex_lock(&inode->i_mutex);
799 err = generic_osync_inode(inode, mapping,
800 OSYNC_METADATA|OSYNC_DATA);
801 mutex_unlock(&inode->i_mutex);
803 if (err)
804 ret = err;
808 return ret;
811 EXPORT_SYMBOL(generic_file_splice_write);
814 * generic_splice_sendpage - splice data from a pipe to a socket
815 * @inode: pipe inode
816 * @out: socket to write to
817 * @len: number of bytes to splice
818 * @flags: splice modifier flags
820 * Will send @len bytes from the pipe to a network socket. No data copying
821 * is involved.
824 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
825 loff_t *ppos, size_t len, unsigned int flags)
827 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
830 EXPORT_SYMBOL(generic_splice_sendpage);
833 * Attempt to initiate a splice from pipe to file.
835 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
836 loff_t *ppos, size_t len, unsigned int flags)
838 int ret;
840 if (unlikely(!out->f_op || !out->f_op->splice_write))
841 return -EINVAL;
843 if (unlikely(!(out->f_mode & FMODE_WRITE)))
844 return -EBADF;
846 ret = rw_verify_area(WRITE, out, ppos, len);
847 if (unlikely(ret < 0))
848 return ret;
850 return out->f_op->splice_write(pipe, out, ppos, len, flags);
854 * Attempt to initiate a splice from a file to a pipe.
856 static long do_splice_to(struct file *in, loff_t *ppos,
857 struct pipe_inode_info *pipe, size_t len,
858 unsigned int flags)
860 loff_t isize, left;
861 int ret;
863 if (unlikely(!in->f_op || !in->f_op->splice_read))
864 return -EINVAL;
866 if (unlikely(!(in->f_mode & FMODE_READ)))
867 return -EBADF;
869 ret = rw_verify_area(READ, in, ppos, len);
870 if (unlikely(ret < 0))
871 return ret;
873 isize = i_size_read(in->f_mapping->host);
874 if (unlikely(*ppos >= isize))
875 return 0;
877 left = isize - *ppos;
878 if (unlikely(left < len))
879 len = left;
881 return in->f_op->splice_read(in, ppos, pipe, len, flags);
884 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
885 size_t len, unsigned int flags)
887 struct pipe_inode_info *pipe;
888 long ret, bytes;
889 loff_t out_off;
890 umode_t i_mode;
891 int i;
894 * We require the input being a regular file, as we don't want to
895 * randomly drop data for eg socket -> socket splicing. Use the
896 * piped splicing for that!
898 i_mode = in->f_dentry->d_inode->i_mode;
899 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
900 return -EINVAL;
903 * neither in nor out is a pipe, setup an internal pipe attached to
904 * 'out' and transfer the wanted data from 'in' to 'out' through that
906 pipe = current->splice_pipe;
907 if (unlikely(!pipe)) {
908 pipe = alloc_pipe_info(NULL);
909 if (!pipe)
910 return -ENOMEM;
913 * We don't have an immediate reader, but we'll read the stuff
914 * out of the pipe right after the splice_to_pipe(). So set
915 * PIPE_READERS appropriately.
917 pipe->readers = 1;
919 current->splice_pipe = pipe;
923 * Do the splice.
925 ret = 0;
926 bytes = 0;
927 out_off = 0;
929 while (len) {
930 size_t read_len, max_read_len;
933 * Do at most PIPE_BUFFERS pages worth of transfer:
935 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
937 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
938 if (unlikely(ret < 0))
939 goto out_release;
941 read_len = ret;
944 * NOTE: nonblocking mode only applies to the input. We
945 * must not do the output in nonblocking mode as then we
946 * could get stuck data in the internal pipe:
948 ret = do_splice_from(pipe, out, &out_off, read_len,
949 flags & ~SPLICE_F_NONBLOCK);
950 if (unlikely(ret < 0))
951 goto out_release;
953 bytes += ret;
954 len -= ret;
957 * In nonblocking mode, if we got back a short read then
958 * that was due to either an IO error or due to the
959 * pagecache entry not being there. In the IO error case
960 * the _next_ splice attempt will produce a clean IO error
961 * return value (not a short read), so in both cases it's
962 * correct to break out of the loop here:
964 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
965 break;
968 pipe->nrbufs = pipe->curbuf = 0;
970 return bytes;
972 out_release:
974 * If we did an incomplete transfer we must release
975 * the pipe buffers in question:
977 for (i = 0; i < PIPE_BUFFERS; i++) {
978 struct pipe_buffer *buf = pipe->bufs + i;
980 if (buf->ops) {
981 buf->ops->release(pipe, buf);
982 buf->ops = NULL;
985 pipe->nrbufs = pipe->curbuf = 0;
988 * If we transferred some data, return the number of bytes:
990 if (bytes > 0)
991 return bytes;
993 return ret;
996 EXPORT_SYMBOL(do_splice_direct);
999 * Determine where to splice to/from.
1001 static long do_splice(struct file *in, loff_t __user *off_in,
1002 struct file *out, loff_t __user *off_out,
1003 size_t len, unsigned int flags)
1005 struct pipe_inode_info *pipe;
1006 loff_t offset, *off;
1007 long ret;
1009 pipe = in->f_dentry->d_inode->i_pipe;
1010 if (pipe) {
1011 if (off_in)
1012 return -ESPIPE;
1013 if (off_out) {
1014 if (out->f_op->llseek == no_llseek)
1015 return -EINVAL;
1016 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1017 return -EFAULT;
1018 off = &offset;
1019 } else
1020 off = &out->f_pos;
1022 ret = do_splice_from(pipe, out, off, len, flags);
1024 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1025 ret = -EFAULT;
1027 return ret;
1030 pipe = out->f_dentry->d_inode->i_pipe;
1031 if (pipe) {
1032 if (off_out)
1033 return -ESPIPE;
1034 if (off_in) {
1035 if (in->f_op->llseek == no_llseek)
1036 return -EINVAL;
1037 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1038 return -EFAULT;
1039 off = &offset;
1040 } else
1041 off = &in->f_pos;
1043 ret = do_splice_to(in, off, pipe, len, flags);
1045 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1046 ret = -EFAULT;
1048 return ret;
1051 return -EINVAL;
1055 * Map an iov into an array of pages and offset/length tupples. With the
1056 * partial_page structure, we can map several non-contiguous ranges into
1057 * our ones pages[] map instead of splitting that operation into pieces.
1058 * Could easily be exported as a generic helper for other users, in which
1059 * case one would probably want to add a 'max_nr_pages' parameter as well.
1061 static int get_iovec_page_array(const struct iovec __user *iov,
1062 unsigned int nr_vecs, struct page **pages,
1063 struct partial_page *partial)
1065 int buffers = 0, error = 0;
1068 * It's ok to take the mmap_sem for reading, even
1069 * across a "get_user()".
1071 down_read(&current->mm->mmap_sem);
1073 while (nr_vecs) {
1074 unsigned long off, npages;
1075 void __user *base;
1076 size_t len;
1077 int i;
1080 * Get user address base and length for this iovec.
1082 error = get_user(base, &iov->iov_base);
1083 if (unlikely(error))
1084 break;
1085 error = get_user(len, &iov->iov_len);
1086 if (unlikely(error))
1087 break;
1090 * Sanity check this iovec. 0 read succeeds.
1092 if (unlikely(!len))
1093 break;
1094 error = -EFAULT;
1095 if (unlikely(!base))
1096 break;
1099 * Get this base offset and number of pages, then map
1100 * in the user pages.
1102 off = (unsigned long) base & ~PAGE_MASK;
1103 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104 if (npages > PIPE_BUFFERS - buffers)
1105 npages = PIPE_BUFFERS - buffers;
1107 error = get_user_pages(current, current->mm,
1108 (unsigned long) base, npages, 0, 0,
1109 &pages[buffers], NULL);
1111 if (unlikely(error <= 0))
1112 break;
1115 * Fill this contiguous range into the partial page map.
1117 for (i = 0; i < error; i++) {
1118 const int plen = min_t(size_t, len, PAGE_SIZE) - off;
1120 partial[buffers].offset = off;
1121 partial[buffers].len = plen;
1123 off = 0;
1124 len -= plen;
1125 buffers++;
1129 * We didn't complete this iov, stop here since it probably
1130 * means we have to move some of this into a pipe to
1131 * be able to continue.
1133 if (len)
1134 break;
1137 * Don't continue if we mapped fewer pages than we asked for,
1138 * or if we mapped the max number of pages that we have
1139 * room for.
1141 if (error < npages || buffers == PIPE_BUFFERS)
1142 break;
1144 nr_vecs--;
1145 iov++;
1148 up_read(&current->mm->mmap_sem);
1150 if (buffers)
1151 return buffers;
1153 return error;
1157 * vmsplice splices a user address range into a pipe. It can be thought of
1158 * as splice-from-memory, where the regular splice is splice-from-file (or
1159 * to file). In both cases the output is a pipe, naturally.
1161 * Note that vmsplice only supports splicing _from_ user memory to a pipe,
1162 * not the other way around. Splicing from user memory is a simple operation
1163 * that can be supported without any funky alignment restrictions or nasty
1164 * vm tricks. We simply map in the user memory and fill them into a pipe.
1165 * The reverse isn't quite as easy, though. There are two possible solutions
1166 * for that:
1168 * - memcpy() the data internally, at which point we might as well just
1169 * do a regular read() on the buffer anyway.
1170 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1171 * has restriction limitations on both ends of the pipe).
1173 * Alas, it isn't here.
1176 static long do_vmsplice(struct file *file, const struct iovec __user *iov,
1177 unsigned long nr_segs, unsigned int flags)
1179 struct pipe_inode_info *pipe = file->f_dentry->d_inode->i_pipe;
1180 struct page *pages[PIPE_BUFFERS];
1181 struct partial_page partial[PIPE_BUFFERS];
1182 struct splice_pipe_desc spd = {
1183 .pages = pages,
1184 .partial = partial,
1185 .flags = flags,
1186 .ops = &user_page_pipe_buf_ops,
1189 if (unlikely(!pipe))
1190 return -EBADF;
1191 if (unlikely(nr_segs > UIO_MAXIOV))
1192 return -EINVAL;
1193 else if (unlikely(!nr_segs))
1194 return 0;
1196 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial);
1197 if (spd.nr_pages <= 0)
1198 return spd.nr_pages;
1200 return splice_to_pipe(pipe, &spd);
1203 asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
1204 unsigned long nr_segs, unsigned int flags)
1206 struct file *file;
1207 long error;
1208 int fput;
1210 error = -EBADF;
1211 file = fget_light(fd, &fput);
1212 if (file) {
1213 if (file->f_mode & FMODE_WRITE)
1214 error = do_vmsplice(file, iov, nr_segs, flags);
1216 fput_light(file, fput);
1219 return error;
1222 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
1223 int fd_out, loff_t __user *off_out,
1224 size_t len, unsigned int flags)
1226 long error;
1227 struct file *in, *out;
1228 int fput_in, fput_out;
1230 if (unlikely(!len))
1231 return 0;
1233 error = -EBADF;
1234 in = fget_light(fd_in, &fput_in);
1235 if (in) {
1236 if (in->f_mode & FMODE_READ) {
1237 out = fget_light(fd_out, &fput_out);
1238 if (out) {
1239 if (out->f_mode & FMODE_WRITE)
1240 error = do_splice(in, off_in,
1241 out, off_out,
1242 len, flags);
1243 fput_light(out, fput_out);
1247 fput_light(in, fput_in);
1250 return error;
1254 * Link contents of ipipe to opipe.
1256 static int link_pipe(struct pipe_inode_info *ipipe,
1257 struct pipe_inode_info *opipe,
1258 size_t len, unsigned int flags)
1260 struct pipe_buffer *ibuf, *obuf;
1261 int ret, do_wakeup, i, ipipe_first;
1263 ret = do_wakeup = ipipe_first = 0;
1266 * Potential ABBA deadlock, work around it by ordering lock
1267 * grabbing by inode address. Otherwise two different processes
1268 * could deadlock (one doing tee from A -> B, the other from B -> A).
1270 if (ipipe->inode < opipe->inode) {
1271 ipipe_first = 1;
1272 mutex_lock(&ipipe->inode->i_mutex);
1273 mutex_lock(&opipe->inode->i_mutex);
1274 } else {
1275 mutex_lock(&opipe->inode->i_mutex);
1276 mutex_lock(&ipipe->inode->i_mutex);
1279 for (i = 0;; i++) {
1280 if (!opipe->readers) {
1281 send_sig(SIGPIPE, current, 0);
1282 if (!ret)
1283 ret = -EPIPE;
1284 break;
1286 if (ipipe->nrbufs - i) {
1287 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1290 * If we have room, fill this buffer
1292 if (opipe->nrbufs < PIPE_BUFFERS) {
1293 int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1296 * Get a reference to this pipe buffer,
1297 * so we can copy the contents over.
1299 ibuf->ops->get(ipipe, ibuf);
1301 obuf = opipe->bufs + nbuf;
1302 *obuf = *ibuf;
1304 if (obuf->len > len)
1305 obuf->len = len;
1307 opipe->nrbufs++;
1308 do_wakeup = 1;
1309 ret += obuf->len;
1310 len -= obuf->len;
1312 if (!len)
1313 break;
1314 if (opipe->nrbufs < PIPE_BUFFERS)
1315 continue;
1319 * We have input available, but no output room.
1320 * If we already copied data, return that. If we
1321 * need to drop the opipe lock, it must be ordered
1322 * last to avoid deadlocks.
1324 if ((flags & SPLICE_F_NONBLOCK) || !ipipe_first) {
1325 if (!ret)
1326 ret = -EAGAIN;
1327 break;
1329 if (signal_pending(current)) {
1330 if (!ret)
1331 ret = -ERESTARTSYS;
1332 break;
1334 if (do_wakeup) {
1335 smp_mb();
1336 if (waitqueue_active(&opipe->wait))
1337 wake_up_interruptible(&opipe->wait);
1338 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1339 do_wakeup = 0;
1342 opipe->waiting_writers++;
1343 pipe_wait(opipe);
1344 opipe->waiting_writers--;
1345 continue;
1349 * No input buffers, do the usual checks for available
1350 * writers and blocking and wait if necessary
1352 if (!ipipe->writers)
1353 break;
1354 if (!ipipe->waiting_writers) {
1355 if (ret)
1356 break;
1359 * pipe_wait() drops the ipipe mutex. To avoid deadlocks
1360 * with another process, we can only safely do that if
1361 * the ipipe lock is ordered last.
1363 if ((flags & SPLICE_F_NONBLOCK) || ipipe_first) {
1364 if (!ret)
1365 ret = -EAGAIN;
1366 break;
1368 if (signal_pending(current)) {
1369 if (!ret)
1370 ret = -ERESTARTSYS;
1371 break;
1374 if (waitqueue_active(&ipipe->wait))
1375 wake_up_interruptible_sync(&ipipe->wait);
1376 kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
1378 pipe_wait(ipipe);
1381 mutex_unlock(&ipipe->inode->i_mutex);
1382 mutex_unlock(&opipe->inode->i_mutex);
1384 if (do_wakeup) {
1385 smp_mb();
1386 if (waitqueue_active(&opipe->wait))
1387 wake_up_interruptible(&opipe->wait);
1388 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1391 return ret;
1395 * This is a tee(1) implementation that works on pipes. It doesn't copy
1396 * any data, it simply references the 'in' pages on the 'out' pipe.
1397 * The 'flags' used are the SPLICE_F_* variants, currently the only
1398 * applicable one is SPLICE_F_NONBLOCK.
1400 static long do_tee(struct file *in, struct file *out, size_t len,
1401 unsigned int flags)
1403 struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
1404 struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
1407 * Link ipipe to the two output pipes, consuming as we go along.
1409 if (ipipe && opipe)
1410 return link_pipe(ipipe, opipe, len, flags);
1412 return -EINVAL;
1415 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1417 struct file *in;
1418 int error, fput_in;
1420 if (unlikely(!len))
1421 return 0;
1423 error = -EBADF;
1424 in = fget_light(fdin, &fput_in);
1425 if (in) {
1426 if (in->f_mode & FMODE_READ) {
1427 int fput_out;
1428 struct file *out = fget_light(fdout, &fput_out);
1430 if (out) {
1431 if (out->f_mode & FMODE_WRITE)
1432 error = do_tee(in, out, len, flags);
1433 fput_light(out, fput_out);
1436 fput_light(in, fput_in);
1439 return error;