OneNand: Fix free byte positions.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / splice.c
blob7fb04970c72d853e53874e4ec03d9df48911dcb3
1 /*
2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files, network, direct splicing, etc and
13 * fixing lots of bugs.
15 * Copyright (C) 2005-2006 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005-2006 Linus Torvalds <torvalds@osdl.org>
17 * Copyright (C) 2006 Ingo Molnar <mingo@elte.hu>
20 #include <linux/fs.h>
21 #include <linux/file.h>
22 #include <linux/pagemap.h>
23 #include <linux/pipe_fs_i.h>
24 #include <linux/mm_inline.h>
25 #include <linux/swap.h>
26 #include <linux/writeback.h>
27 #include <linux/buffer_head.h>
28 #include <linux/module.h>
29 #include <linux/syscalls.h>
30 #include <linux/uio.h>
32 struct partial_page {
33 unsigned int offset;
34 unsigned int len;
38 * Passed to splice_to_pipe
40 struct splice_pipe_desc {
41 struct page **pages; /* page map */
42 struct partial_page *partial; /* pages[] may not be contig */
43 int nr_pages; /* number of pages in map */
44 unsigned int flags; /* splice flags */
45 struct pipe_buf_operations *ops;/* ops associated with output pipe */
49 * Attempt to steal a page from a pipe buffer. This should perhaps go into
50 * a vm helper function, it's already simplified quite a bit by the
51 * addition of remove_mapping(). If success is returned, the caller may
52 * attempt to reuse this page for another destination.
54 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
55 struct pipe_buffer *buf)
57 struct page *page = buf->page;
58 struct address_space *mapping = page_mapping(page);
60 lock_page(page);
62 WARN_ON(!PageUptodate(page));
65 * At least for ext2 with nobh option, we need to wait on writeback
66 * completing on this page, since we'll remove it from the pagecache.
67 * Otherwise truncate wont wait on the page, allowing the disk
68 * blocks to be reused by someone else before we actually wrote our
69 * data to them. fs corruption ensues.
71 wait_on_page_writeback(page);
73 if (PagePrivate(page))
74 try_to_release_page(page, mapping_gfp_mask(mapping));
76 if (!remove_mapping(mapping, page)) {
77 unlock_page(page);
78 return 1;
81 return 0;
84 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
85 struct pipe_buffer *buf)
87 page_cache_release(buf->page);
90 static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
91 struct pipe_buffer *buf)
93 struct page *page = buf->page;
94 int err;
96 if (!PageUptodate(page)) {
97 lock_page(page);
100 * Page got truncated/unhashed. This will cause a 0-byte
101 * splice, if this is the first page.
103 if (!page->mapping) {
104 err = -ENODATA;
105 goto error;
109 * Uh oh, read-error from disk.
111 if (!PageUptodate(page)) {
112 err = -EIO;
113 goto error;
117 * Page is ok afterall, we are done.
119 unlock_page(page);
122 return 0;
123 error:
124 unlock_page(page);
125 return err;
128 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
129 .can_merge = 0,
130 .map = generic_pipe_buf_map,
131 .unmap = generic_pipe_buf_unmap,
132 .pin = page_cache_pipe_buf_pin,
133 .release = page_cache_pipe_buf_release,
134 .steal = page_cache_pipe_buf_steal,
135 .get = generic_pipe_buf_get,
138 static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
139 struct pipe_buffer *buf)
141 if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
142 return 1;
144 return generic_pipe_buf_steal(pipe, buf);
147 static struct pipe_buf_operations user_page_pipe_buf_ops = {
148 .can_merge = 0,
149 .map = generic_pipe_buf_map,
150 .unmap = generic_pipe_buf_unmap,
151 .pin = generic_pipe_buf_pin,
152 .release = page_cache_pipe_buf_release,
153 .steal = user_page_pipe_buf_steal,
154 .get = generic_pipe_buf_get,
158 * Pipe output worker. This sets up our pipe format with the page cache
159 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
161 static ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
162 struct splice_pipe_desc *spd)
164 int ret, do_wakeup, page_nr;
166 ret = 0;
167 do_wakeup = 0;
168 page_nr = 0;
170 if (pipe->inode)
171 mutex_lock(&pipe->inode->i_mutex);
173 for (;;) {
174 if (!pipe->readers) {
175 send_sig(SIGPIPE, current, 0);
176 if (!ret)
177 ret = -EPIPE;
178 break;
181 if (pipe->nrbufs < PIPE_BUFFERS) {
182 int newbuf = (pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1);
183 struct pipe_buffer *buf = pipe->bufs + newbuf;
185 buf->page = spd->pages[page_nr];
186 buf->offset = spd->partial[page_nr].offset;
187 buf->len = spd->partial[page_nr].len;
188 buf->ops = spd->ops;
189 if (spd->flags & SPLICE_F_GIFT)
190 buf->flags |= PIPE_BUF_FLAG_GIFT;
192 pipe->nrbufs++;
193 page_nr++;
194 ret += buf->len;
196 if (pipe->inode)
197 do_wakeup = 1;
199 if (!--spd->nr_pages)
200 break;
201 if (pipe->nrbufs < PIPE_BUFFERS)
202 continue;
204 break;
207 if (spd->flags & SPLICE_F_NONBLOCK) {
208 if (!ret)
209 ret = -EAGAIN;
210 break;
213 if (signal_pending(current)) {
214 if (!ret)
215 ret = -ERESTARTSYS;
216 break;
219 if (do_wakeup) {
220 smp_mb();
221 if (waitqueue_active(&pipe->wait))
222 wake_up_interruptible_sync(&pipe->wait);
223 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
224 do_wakeup = 0;
227 pipe->waiting_writers++;
228 pipe_wait(pipe);
229 pipe->waiting_writers--;
232 if (pipe->inode)
233 mutex_unlock(&pipe->inode->i_mutex);
235 if (do_wakeup) {
236 smp_mb();
237 if (waitqueue_active(&pipe->wait))
238 wake_up_interruptible(&pipe->wait);
239 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
242 while (page_nr < spd->nr_pages)
243 page_cache_release(spd->pages[page_nr++]);
245 return ret;
248 static int
249 __generic_file_splice_read(struct file *in, loff_t *ppos,
250 struct pipe_inode_info *pipe, size_t len,
251 unsigned int flags)
253 struct address_space *mapping = in->f_mapping;
254 unsigned int loff, nr_pages;
255 struct page *pages[PIPE_BUFFERS];
256 struct partial_page partial[PIPE_BUFFERS];
257 struct page *page;
258 pgoff_t index, end_index;
259 loff_t isize;
260 size_t total_len;
261 int error, page_nr;
262 struct splice_pipe_desc spd = {
263 .pages = pages,
264 .partial = partial,
265 .flags = flags,
266 .ops = &page_cache_pipe_buf_ops,
269 index = *ppos >> PAGE_CACHE_SHIFT;
270 loff = *ppos & ~PAGE_CACHE_MASK;
271 nr_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
273 if (nr_pages > PIPE_BUFFERS)
274 nr_pages = PIPE_BUFFERS;
277 * Initiate read-ahead on this page range. however, don't call into
278 * read-ahead if this is a non-zero offset (we are likely doing small
279 * chunk splice and the page is already there) for a single page.
281 if (!loff || nr_pages > 1)
282 page_cache_readahead(mapping, &in->f_ra, in, index, nr_pages);
285 * Now fill in the holes:
287 error = 0;
288 total_len = 0;
291 * Lookup the (hopefully) full range of pages we need.
293 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, pages);
296 * If find_get_pages_contig() returned fewer pages than we needed,
297 * allocate the rest.
299 index += spd.nr_pages;
300 while (spd.nr_pages < nr_pages) {
302 * Page could be there, find_get_pages_contig() breaks on
303 * the first hole.
305 page = find_get_page(mapping, index);
306 if (!page) {
308 * Make sure the read-ahead engine is notified
309 * about this failure.
311 handle_ra_miss(mapping, &in->f_ra, index);
314 * page didn't exist, allocate one.
316 page = page_cache_alloc_cold(mapping);
317 if (!page)
318 break;
320 error = add_to_page_cache_lru(page, mapping, index,
321 mapping_gfp_mask(mapping));
322 if (unlikely(error)) {
323 page_cache_release(page);
324 break;
327 * add_to_page_cache() locks the page, unlock it
328 * to avoid convoluting the logic below even more.
330 unlock_page(page);
333 pages[spd.nr_pages++] = page;
334 index++;
338 * Now loop over the map and see if we need to start IO on any
339 * pages, fill in the partial map, etc.
341 index = *ppos >> PAGE_CACHE_SHIFT;
342 nr_pages = spd.nr_pages;
343 spd.nr_pages = 0;
344 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
345 unsigned int this_len;
347 if (!len)
348 break;
351 * this_len is the max we'll use from this page
353 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
354 page = pages[page_nr];
357 * If the page isn't uptodate, we may need to start io on it
359 if (!PageUptodate(page)) {
361 * If in nonblock mode then dont block on waiting
362 * for an in-flight io page
364 if (flags & SPLICE_F_NONBLOCK)
365 break;
367 lock_page(page);
370 * page was truncated, stop here. if this isn't the
371 * first page, we'll just complete what we already
372 * added
374 if (!page->mapping) {
375 unlock_page(page);
376 break;
379 * page was already under io and is now done, great
381 if (PageUptodate(page)) {
382 unlock_page(page);
383 goto fill_it;
387 * need to read in the page
389 error = mapping->a_ops->readpage(in, page);
390 if (unlikely(error)) {
392 * We really should re-lookup the page here,
393 * but it complicates things a lot. Instead
394 * lets just do what we already stored, and
395 * we'll get it the next time we are called.
397 if (error == AOP_TRUNCATED_PAGE)
398 error = 0;
400 break;
404 * i_size must be checked after ->readpage().
406 isize = i_size_read(mapping->host);
407 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
408 if (unlikely(!isize || index > end_index))
409 break;
412 * if this is the last page, see if we need to shrink
413 * the length and stop
415 if (end_index == index) {
416 loff = PAGE_CACHE_SIZE - (isize & ~PAGE_CACHE_MASK);
417 if (total_len + loff > isize)
418 break;
420 * force quit after adding this page
422 len = this_len;
423 this_len = min(this_len, loff);
424 loff = 0;
427 fill_it:
428 partial[page_nr].offset = loff;
429 partial[page_nr].len = this_len;
430 len -= this_len;
431 total_len += this_len;
432 loff = 0;
433 spd.nr_pages++;
434 index++;
438 * Release any pages at the end, if we quit early. 'i' is how far
439 * we got, 'nr_pages' is how many pages are in the map.
441 while (page_nr < nr_pages)
442 page_cache_release(pages[page_nr++]);
444 if (spd.nr_pages)
445 return splice_to_pipe(pipe, &spd);
447 return error;
451 * generic_file_splice_read - splice data from file to a pipe
452 * @in: file to splice from
453 * @pipe: pipe to splice to
454 * @len: number of bytes to splice
455 * @flags: splice modifier flags
457 * Will read pages from given file and fill them into a pipe.
459 ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
460 struct pipe_inode_info *pipe, size_t len,
461 unsigned int flags)
463 ssize_t spliced;
464 int ret;
466 ret = 0;
467 spliced = 0;
469 while (len) {
470 ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
472 if (ret < 0)
473 break;
474 else if (!ret) {
475 if (spliced)
476 break;
477 if (flags & SPLICE_F_NONBLOCK) {
478 ret = -EAGAIN;
479 break;
483 *ppos += ret;
484 len -= ret;
485 spliced += ret;
488 if (spliced)
489 return spliced;
491 return ret;
494 EXPORT_SYMBOL(generic_file_splice_read);
497 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
498 * using sendpage(). Return the number of bytes sent.
500 static int pipe_to_sendpage(struct pipe_inode_info *info,
501 struct pipe_buffer *buf, struct splice_desc *sd)
503 struct file *file = sd->file;
504 loff_t pos = sd->pos;
505 int ret, more;
507 ret = buf->ops->pin(info, buf);
508 if (!ret) {
509 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
511 ret = file->f_op->sendpage(file, buf->page, buf->offset,
512 sd->len, &pos, more);
515 return ret;
519 * This is a little more tricky than the file -> pipe splicing. There are
520 * basically three cases:
522 * - Destination page already exists in the address space and there
523 * are users of it. For that case we have no other option that
524 * copying the data. Tough luck.
525 * - Destination page already exists in the address space, but there
526 * are no users of it. Make sure it's uptodate, then drop it. Fall
527 * through to last case.
528 * - Destination page does not exist, we can add the pipe page to
529 * the page cache and avoid the copy.
531 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
532 * sd->flags), we attempt to migrate pages from the pipe to the output
533 * file address space page cache. This is possible if no one else has
534 * the pipe page referenced outside of the pipe and page cache. If
535 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
536 * a new page in the output file page cache and fill/dirty that.
538 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
539 struct splice_desc *sd)
541 struct file *file = sd->file;
542 struct address_space *mapping = file->f_mapping;
543 gfp_t gfp_mask = mapping_gfp_mask(mapping);
544 unsigned int offset, this_len;
545 struct page *page;
546 pgoff_t index;
547 int ret;
550 * make sure the data in this buffer is uptodate
552 ret = buf->ops->pin(info, buf);
553 if (unlikely(ret))
554 return ret;
556 index = sd->pos >> PAGE_CACHE_SHIFT;
557 offset = sd->pos & ~PAGE_CACHE_MASK;
559 this_len = sd->len;
560 if (this_len + offset > PAGE_CACHE_SIZE)
561 this_len = PAGE_CACHE_SIZE - offset;
564 * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full
565 * page.
567 if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
569 * If steal succeeds, buf->page is now pruned from the vm
570 * side (page cache) and we can reuse it. The page will also
571 * be locked on successful return.
573 if (buf->ops->steal(info, buf))
574 goto find_page;
576 page = buf->page;
577 page_cache_get(page);
580 * page must be on the LRU for adding to the pagecache.
581 * Check this without grabbing the zone lock, if it isn't
582 * the do grab the zone lock, recheck, and add if necessary.
584 if (!PageLRU(page)) {
585 struct zone *zone = page_zone(page);
587 spin_lock_irq(&zone->lru_lock);
588 if (!PageLRU(page)) {
589 SetPageLRU(page);
590 add_page_to_inactive_list(zone, page);
592 spin_unlock_irq(&zone->lru_lock);
595 if (add_to_page_cache(page, mapping, index, gfp_mask)) {
596 page_cache_release(page);
597 unlock_page(page);
598 goto find_page;
600 } else {
601 find_page:
602 page = find_lock_page(mapping, index);
603 if (!page) {
604 ret = -ENOMEM;
605 page = page_cache_alloc_cold(mapping);
606 if (unlikely(!page))
607 goto out_nomem;
610 * This will also lock the page
612 ret = add_to_page_cache_lru(page, mapping, index,
613 gfp_mask);
614 if (unlikely(ret))
615 goto out;
619 * We get here with the page locked. If the page is also
620 * uptodate, we don't need to do more. If it isn't, we
621 * may need to bring it in if we are not going to overwrite
622 * the full page.
624 if (!PageUptodate(page)) {
625 if (this_len < PAGE_CACHE_SIZE) {
626 ret = mapping->a_ops->readpage(file, page);
627 if (unlikely(ret))
628 goto out;
630 lock_page(page);
632 if (!PageUptodate(page)) {
634 * Page got invalidated, repeat.
636 if (!page->mapping) {
637 unlock_page(page);
638 page_cache_release(page);
639 goto find_page;
641 ret = -EIO;
642 goto out;
644 } else
645 SetPageUptodate(page);
649 ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
650 if (ret == AOP_TRUNCATED_PAGE) {
651 page_cache_release(page);
652 goto find_page;
653 } else if (ret)
654 goto out;
656 if (buf->page != page) {
658 * Careful, ->map() uses KM_USER0!
660 char *src = buf->ops->map(info, buf, 1);
661 char *dst = kmap_atomic(page, KM_USER1);
663 memcpy(dst + offset, src + buf->offset, this_len);
664 flush_dcache_page(page);
665 kunmap_atomic(dst, KM_USER1);
666 buf->ops->unmap(info, buf, src);
669 ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
670 if (!ret) {
672 * Return the number of bytes written and mark page as
673 * accessed, we are now done!
675 ret = this_len;
676 mark_page_accessed(page);
677 balance_dirty_pages_ratelimited(mapping);
678 } else if (ret == AOP_TRUNCATED_PAGE) {
679 page_cache_release(page);
680 goto find_page;
682 out:
683 page_cache_release(page);
684 unlock_page(page);
685 out_nomem:
686 return ret;
690 * Pipe input worker. Most of this logic works like a regular pipe, the
691 * key here is the 'actor' worker passed in that actually moves the data
692 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
694 ssize_t splice_from_pipe(struct pipe_inode_info *pipe, struct file *out,
695 loff_t *ppos, size_t len, unsigned int flags,
696 splice_actor *actor)
698 int ret, do_wakeup, err;
699 struct splice_desc sd;
701 ret = 0;
702 do_wakeup = 0;
704 sd.total_len = len;
705 sd.flags = flags;
706 sd.file = out;
707 sd.pos = *ppos;
709 if (pipe->inode)
710 mutex_lock(&pipe->inode->i_mutex);
712 for (;;) {
713 if (pipe->nrbufs) {
714 struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
715 struct pipe_buf_operations *ops = buf->ops;
717 sd.len = buf->len;
718 if (sd.len > sd.total_len)
719 sd.len = sd.total_len;
721 err = actor(pipe, buf, &sd);
722 if (err <= 0) {
723 if (!ret && err != -ENODATA)
724 ret = err;
726 break;
729 ret += err;
730 buf->offset += err;
731 buf->len -= err;
733 sd.len -= err;
734 sd.pos += err;
735 sd.total_len -= err;
736 if (sd.len)
737 continue;
739 if (!buf->len) {
740 buf->ops = NULL;
741 ops->release(pipe, buf);
742 pipe->curbuf = (pipe->curbuf + 1) & (PIPE_BUFFERS - 1);
743 pipe->nrbufs--;
744 if (pipe->inode)
745 do_wakeup = 1;
748 if (!sd.total_len)
749 break;
752 if (pipe->nrbufs)
753 continue;
754 if (!pipe->writers)
755 break;
756 if (!pipe->waiting_writers) {
757 if (ret)
758 break;
761 if (flags & SPLICE_F_NONBLOCK) {
762 if (!ret)
763 ret = -EAGAIN;
764 break;
767 if (signal_pending(current)) {
768 if (!ret)
769 ret = -ERESTARTSYS;
770 break;
773 if (do_wakeup) {
774 smp_mb();
775 if (waitqueue_active(&pipe->wait))
776 wake_up_interruptible_sync(&pipe->wait);
777 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
778 do_wakeup = 0;
781 pipe_wait(pipe);
784 if (pipe->inode)
785 mutex_unlock(&pipe->inode->i_mutex);
787 if (do_wakeup) {
788 smp_mb();
789 if (waitqueue_active(&pipe->wait))
790 wake_up_interruptible(&pipe->wait);
791 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
794 return ret;
798 * generic_file_splice_write - splice data from a pipe to a file
799 * @pipe: pipe info
800 * @out: file to write to
801 * @len: number of bytes to splice
802 * @flags: splice modifier flags
804 * Will either move or copy pages (determined by @flags options) from
805 * the given pipe inode to the given file.
808 ssize_t
809 generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
810 loff_t *ppos, size_t len, unsigned int flags)
812 struct address_space *mapping = out->f_mapping;
813 ssize_t ret;
815 ret = splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_file);
816 if (ret > 0) {
817 struct inode *inode = mapping->host;
819 *ppos += ret;
822 * If file or inode is SYNC and we actually wrote some data,
823 * sync it.
825 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
826 int err;
828 mutex_lock(&inode->i_mutex);
829 err = generic_osync_inode(inode, mapping,
830 OSYNC_METADATA|OSYNC_DATA);
831 mutex_unlock(&inode->i_mutex);
833 if (err)
834 ret = err;
838 return ret;
841 EXPORT_SYMBOL(generic_file_splice_write);
844 * generic_splice_sendpage - splice data from a pipe to a socket
845 * @inode: pipe inode
846 * @out: socket to write to
847 * @len: number of bytes to splice
848 * @flags: splice modifier flags
850 * Will send @len bytes from the pipe to a network socket. No data copying
851 * is involved.
854 ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, struct file *out,
855 loff_t *ppos, size_t len, unsigned int flags)
857 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_sendpage);
860 EXPORT_SYMBOL(generic_splice_sendpage);
863 * Attempt to initiate a splice from pipe to file.
865 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
866 loff_t *ppos, size_t len, unsigned int flags)
868 int ret;
870 if (unlikely(!out->f_op || !out->f_op->splice_write))
871 return -EINVAL;
873 if (unlikely(!(out->f_mode & FMODE_WRITE)))
874 return -EBADF;
876 ret = rw_verify_area(WRITE, out, ppos, len);
877 if (unlikely(ret < 0))
878 return ret;
880 return out->f_op->splice_write(pipe, out, ppos, len, flags);
884 * Attempt to initiate a splice from a file to a pipe.
886 static long do_splice_to(struct file *in, loff_t *ppos,
887 struct pipe_inode_info *pipe, size_t len,
888 unsigned int flags)
890 loff_t isize, left;
891 int ret;
893 if (unlikely(!in->f_op || !in->f_op->splice_read))
894 return -EINVAL;
896 if (unlikely(!(in->f_mode & FMODE_READ)))
897 return -EBADF;
899 ret = rw_verify_area(READ, in, ppos, len);
900 if (unlikely(ret < 0))
901 return ret;
903 isize = i_size_read(in->f_mapping->host);
904 if (unlikely(*ppos >= isize))
905 return 0;
907 left = isize - *ppos;
908 if (unlikely(left < len))
909 len = left;
911 return in->f_op->splice_read(in, ppos, pipe, len, flags);
914 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
915 size_t len, unsigned int flags)
917 struct pipe_inode_info *pipe;
918 long ret, bytes;
919 loff_t out_off;
920 umode_t i_mode;
921 int i;
924 * We require the input being a regular file, as we don't want to
925 * randomly drop data for eg socket -> socket splicing. Use the
926 * piped splicing for that!
928 i_mode = in->f_dentry->d_inode->i_mode;
929 if (unlikely(!S_ISREG(i_mode) && !S_ISBLK(i_mode)))
930 return -EINVAL;
933 * neither in nor out is a pipe, setup an internal pipe attached to
934 * 'out' and transfer the wanted data from 'in' to 'out' through that
936 pipe = current->splice_pipe;
937 if (unlikely(!pipe)) {
938 pipe = alloc_pipe_info(NULL);
939 if (!pipe)
940 return -ENOMEM;
943 * We don't have an immediate reader, but we'll read the stuff
944 * out of the pipe right after the splice_to_pipe(). So set
945 * PIPE_READERS appropriately.
947 pipe->readers = 1;
949 current->splice_pipe = pipe;
953 * Do the splice.
955 ret = 0;
956 bytes = 0;
957 out_off = 0;
959 while (len) {
960 size_t read_len, max_read_len;
963 * Do at most PIPE_BUFFERS pages worth of transfer:
965 max_read_len = min(len, (size_t)(PIPE_BUFFERS*PAGE_SIZE));
967 ret = do_splice_to(in, ppos, pipe, max_read_len, flags);
968 if (unlikely(ret < 0))
969 goto out_release;
971 read_len = ret;
974 * NOTE: nonblocking mode only applies to the input. We
975 * must not do the output in nonblocking mode as then we
976 * could get stuck data in the internal pipe:
978 ret = do_splice_from(pipe, out, &out_off, read_len,
979 flags & ~SPLICE_F_NONBLOCK);
980 if (unlikely(ret < 0))
981 goto out_release;
983 bytes += ret;
984 len -= ret;
987 * In nonblocking mode, if we got back a short read then
988 * that was due to either an IO error or due to the
989 * pagecache entry not being there. In the IO error case
990 * the _next_ splice attempt will produce a clean IO error
991 * return value (not a short read), so in both cases it's
992 * correct to break out of the loop here:
994 if ((flags & SPLICE_F_NONBLOCK) && (read_len < max_read_len))
995 break;
998 pipe->nrbufs = pipe->curbuf = 0;
1000 return bytes;
1002 out_release:
1004 * If we did an incomplete transfer we must release
1005 * the pipe buffers in question:
1007 for (i = 0; i < PIPE_BUFFERS; i++) {
1008 struct pipe_buffer *buf = pipe->bufs + i;
1010 if (buf->ops) {
1011 buf->ops->release(pipe, buf);
1012 buf->ops = NULL;
1015 pipe->nrbufs = pipe->curbuf = 0;
1018 * If we transferred some data, return the number of bytes:
1020 if (bytes > 0)
1021 return bytes;
1023 return ret;
1026 EXPORT_SYMBOL(do_splice_direct);
1029 * Determine where to splice to/from.
1031 static long do_splice(struct file *in, loff_t __user *off_in,
1032 struct file *out, loff_t __user *off_out,
1033 size_t len, unsigned int flags)
1035 struct pipe_inode_info *pipe;
1036 loff_t offset, *off;
1037 long ret;
1039 pipe = in->f_dentry->d_inode->i_pipe;
1040 if (pipe) {
1041 if (off_in)
1042 return -ESPIPE;
1043 if (off_out) {
1044 if (out->f_op->llseek == no_llseek)
1045 return -EINVAL;
1046 if (copy_from_user(&offset, off_out, sizeof(loff_t)))
1047 return -EFAULT;
1048 off = &offset;
1049 } else
1050 off = &out->f_pos;
1052 ret = do_splice_from(pipe, out, off, len, flags);
1054 if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
1055 ret = -EFAULT;
1057 return ret;
1060 pipe = out->f_dentry->d_inode->i_pipe;
1061 if (pipe) {
1062 if (off_out)
1063 return -ESPIPE;
1064 if (off_in) {
1065 if (in->f_op->llseek == no_llseek)
1066 return -EINVAL;
1067 if (copy_from_user(&offset, off_in, sizeof(loff_t)))
1068 return -EFAULT;
1069 off = &offset;
1070 } else
1071 off = &in->f_pos;
1073 ret = do_splice_to(in, off, pipe, len, flags);
1075 if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
1076 ret = -EFAULT;
1078 return ret;
1081 return -EINVAL;
1085 * Map an iov into an array of pages and offset/length tupples. With the
1086 * partial_page structure, we can map several non-contiguous ranges into
1087 * our ones pages[] map instead of splitting that operation into pieces.
1088 * Could easily be exported as a generic helper for other users, in which
1089 * case one would probably want to add a 'max_nr_pages' parameter as well.
1091 static int get_iovec_page_array(const struct iovec __user *iov,
1092 unsigned int nr_vecs, struct page **pages,
1093 struct partial_page *partial, int aligned)
1095 int buffers = 0, error = 0;
1098 * It's ok to take the mmap_sem for reading, even
1099 * across a "get_user()".
1101 down_read(&current->mm->mmap_sem);
1103 while (nr_vecs) {
1104 unsigned long off, npages;
1105 void __user *base;
1106 size_t len;
1107 int i;
1110 * Get user address base and length for this iovec.
1112 error = get_user(base, &iov->iov_base);
1113 if (unlikely(error))
1114 break;
1115 error = get_user(len, &iov->iov_len);
1116 if (unlikely(error))
1117 break;
1120 * Sanity check this iovec. 0 read succeeds.
1122 if (unlikely(!len))
1123 break;
1124 error = -EFAULT;
1125 if (unlikely(!base))
1126 break;
1129 * Get this base offset and number of pages, then map
1130 * in the user pages.
1132 off = (unsigned long) base & ~PAGE_MASK;
1135 * If asked for alignment, the offset must be zero and the
1136 * length a multiple of the PAGE_SIZE.
1138 error = -EINVAL;
1139 if (aligned && (off || len & ~PAGE_MASK))
1140 break;
1142 npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1143 if (npages > PIPE_BUFFERS - buffers)
1144 npages = PIPE_BUFFERS - buffers;
1146 error = get_user_pages(current, current->mm,
1147 (unsigned long) base, npages, 0, 0,
1148 &pages[buffers], NULL);
1150 if (unlikely(error <= 0))
1151 break;
1154 * Fill this contiguous range into the partial page map.
1156 for (i = 0; i < error; i++) {
1157 const int plen = min_t(size_t, len, PAGE_SIZE - off);
1159 partial[buffers].offset = off;
1160 partial[buffers].len = plen;
1162 off = 0;
1163 len -= plen;
1164 buffers++;
1168 * We didn't complete this iov, stop here since it probably
1169 * means we have to move some of this into a pipe to
1170 * be able to continue.
1172 if (len)
1173 break;
1176 * Don't continue if we mapped fewer pages than we asked for,
1177 * or if we mapped the max number of pages that we have
1178 * room for.
1180 if (error < npages || buffers == PIPE_BUFFERS)
1181 break;
1183 nr_vecs--;
1184 iov++;
1187 up_read(&current->mm->mmap_sem);
1189 if (buffers)
1190 return buffers;
1192 return error;
1196 * vmsplice splices a user address range into a pipe. It can be thought of
1197 * as splice-from-memory, where the regular splice is splice-from-file (or
1198 * to file). In both cases the output is a pipe, naturally.
1200 * Note that vmsplice only supports splicing _from_ user memory to a pipe,
1201 * not the other way around. Splicing from user memory is a simple operation
1202 * that can be supported without any funky alignment restrictions or nasty
1203 * vm tricks. We simply map in the user memory and fill them into a pipe.
1204 * The reverse isn't quite as easy, though. There are two possible solutions
1205 * for that:
1207 * - memcpy() the data internally, at which point we might as well just
1208 * do a regular read() on the buffer anyway.
1209 * - Lots of nasty vm tricks, that are neither fast nor flexible (it
1210 * has restriction limitations on both ends of the pipe).
1212 * Alas, it isn't here.
1215 static long do_vmsplice(struct file *file, const struct iovec __user *iov,
1216 unsigned long nr_segs, unsigned int flags)
1218 struct pipe_inode_info *pipe = file->f_dentry->d_inode->i_pipe;
1219 struct page *pages[PIPE_BUFFERS];
1220 struct partial_page partial[PIPE_BUFFERS];
1221 struct splice_pipe_desc spd = {
1222 .pages = pages,
1223 .partial = partial,
1224 .flags = flags,
1225 .ops = &user_page_pipe_buf_ops,
1228 if (unlikely(!pipe))
1229 return -EBADF;
1230 if (unlikely(nr_segs > UIO_MAXIOV))
1231 return -EINVAL;
1232 else if (unlikely(!nr_segs))
1233 return 0;
1235 spd.nr_pages = get_iovec_page_array(iov, nr_segs, pages, partial,
1236 flags & SPLICE_F_GIFT);
1237 if (spd.nr_pages <= 0)
1238 return spd.nr_pages;
1240 return splice_to_pipe(pipe, &spd);
1243 asmlinkage long sys_vmsplice(int fd, const struct iovec __user *iov,
1244 unsigned long nr_segs, unsigned int flags)
1246 struct file *file;
1247 long error;
1248 int fput;
1250 error = -EBADF;
1251 file = fget_light(fd, &fput);
1252 if (file) {
1253 if (file->f_mode & FMODE_WRITE)
1254 error = do_vmsplice(file, iov, nr_segs, flags);
1256 fput_light(file, fput);
1259 return error;
1262 asmlinkage long sys_splice(int fd_in, loff_t __user *off_in,
1263 int fd_out, loff_t __user *off_out,
1264 size_t len, unsigned int flags)
1266 long error;
1267 struct file *in, *out;
1268 int fput_in, fput_out;
1270 if (unlikely(!len))
1271 return 0;
1273 error = -EBADF;
1274 in = fget_light(fd_in, &fput_in);
1275 if (in) {
1276 if (in->f_mode & FMODE_READ) {
1277 out = fget_light(fd_out, &fput_out);
1278 if (out) {
1279 if (out->f_mode & FMODE_WRITE)
1280 error = do_splice(in, off_in,
1281 out, off_out,
1282 len, flags);
1283 fput_light(out, fput_out);
1287 fput_light(in, fput_in);
1290 return error;
1294 * Link contents of ipipe to opipe.
1296 static int link_pipe(struct pipe_inode_info *ipipe,
1297 struct pipe_inode_info *opipe,
1298 size_t len, unsigned int flags)
1300 struct pipe_buffer *ibuf, *obuf;
1301 int ret, do_wakeup, i, ipipe_first;
1303 ret = do_wakeup = ipipe_first = 0;
1306 * Potential ABBA deadlock, work around it by ordering lock
1307 * grabbing by inode address. Otherwise two different processes
1308 * could deadlock (one doing tee from A -> B, the other from B -> A).
1310 if (ipipe->inode < opipe->inode) {
1311 ipipe_first = 1;
1312 mutex_lock(&ipipe->inode->i_mutex);
1313 mutex_lock(&opipe->inode->i_mutex);
1314 } else {
1315 mutex_lock(&opipe->inode->i_mutex);
1316 mutex_lock(&ipipe->inode->i_mutex);
1319 for (i = 0;; i++) {
1320 if (!opipe->readers) {
1321 send_sig(SIGPIPE, current, 0);
1322 if (!ret)
1323 ret = -EPIPE;
1324 break;
1326 if (ipipe->nrbufs - i) {
1327 ibuf = ipipe->bufs + ((ipipe->curbuf + i) & (PIPE_BUFFERS - 1));
1330 * If we have room, fill this buffer
1332 if (opipe->nrbufs < PIPE_BUFFERS) {
1333 int nbuf = (opipe->curbuf + opipe->nrbufs) & (PIPE_BUFFERS - 1);
1336 * Get a reference to this pipe buffer,
1337 * so we can copy the contents over.
1339 ibuf->ops->get(ipipe, ibuf);
1341 obuf = opipe->bufs + nbuf;
1342 *obuf = *ibuf;
1345 * Don't inherit the gift flag, we need to
1346 * prevent multiple steals of this page.
1348 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1350 if (obuf->len > len)
1351 obuf->len = len;
1353 opipe->nrbufs++;
1354 do_wakeup = 1;
1355 ret += obuf->len;
1356 len -= obuf->len;
1358 if (!len)
1359 break;
1360 if (opipe->nrbufs < PIPE_BUFFERS)
1361 continue;
1365 * We have input available, but no output room.
1366 * If we already copied data, return that. If we
1367 * need to drop the opipe lock, it must be ordered
1368 * last to avoid deadlocks.
1370 if ((flags & SPLICE_F_NONBLOCK) || !ipipe_first) {
1371 if (!ret)
1372 ret = -EAGAIN;
1373 break;
1375 if (signal_pending(current)) {
1376 if (!ret)
1377 ret = -ERESTARTSYS;
1378 break;
1380 if (do_wakeup) {
1381 smp_mb();
1382 if (waitqueue_active(&opipe->wait))
1383 wake_up_interruptible(&opipe->wait);
1384 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1385 do_wakeup = 0;
1388 opipe->waiting_writers++;
1389 pipe_wait(opipe);
1390 opipe->waiting_writers--;
1391 continue;
1395 * No input buffers, do the usual checks for available
1396 * writers and blocking and wait if necessary
1398 if (!ipipe->writers)
1399 break;
1400 if (!ipipe->waiting_writers) {
1401 if (ret)
1402 break;
1405 * pipe_wait() drops the ipipe mutex. To avoid deadlocks
1406 * with another process, we can only safely do that if
1407 * the ipipe lock is ordered last.
1409 if ((flags & SPLICE_F_NONBLOCK) || ipipe_first) {
1410 if (!ret)
1411 ret = -EAGAIN;
1412 break;
1414 if (signal_pending(current)) {
1415 if (!ret)
1416 ret = -ERESTARTSYS;
1417 break;
1420 if (waitqueue_active(&ipipe->wait))
1421 wake_up_interruptible_sync(&ipipe->wait);
1422 kill_fasync(&ipipe->fasync_writers, SIGIO, POLL_OUT);
1424 pipe_wait(ipipe);
1427 mutex_unlock(&ipipe->inode->i_mutex);
1428 mutex_unlock(&opipe->inode->i_mutex);
1430 if (do_wakeup) {
1431 smp_mb();
1432 if (waitqueue_active(&opipe->wait))
1433 wake_up_interruptible(&opipe->wait);
1434 kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN);
1437 return ret;
1441 * This is a tee(1) implementation that works on pipes. It doesn't copy
1442 * any data, it simply references the 'in' pages on the 'out' pipe.
1443 * The 'flags' used are the SPLICE_F_* variants, currently the only
1444 * applicable one is SPLICE_F_NONBLOCK.
1446 static long do_tee(struct file *in, struct file *out, size_t len,
1447 unsigned int flags)
1449 struct pipe_inode_info *ipipe = in->f_dentry->d_inode->i_pipe;
1450 struct pipe_inode_info *opipe = out->f_dentry->d_inode->i_pipe;
1453 * Link ipipe to the two output pipes, consuming as we go along.
1455 if (ipipe && opipe)
1456 return link_pipe(ipipe, opipe, len, flags);
1458 return -EINVAL;
1461 asmlinkage long sys_tee(int fdin, int fdout, size_t len, unsigned int flags)
1463 struct file *in;
1464 int error, fput_in;
1466 if (unlikely(!len))
1467 return 0;
1469 error = -EBADF;
1470 in = fget_light(fdin, &fput_in);
1471 if (in) {
1472 if (in->f_mode & FMODE_READ) {
1473 int fput_out;
1474 struct file *out = fget_light(fdout, &fput_out);
1476 if (out) {
1477 if (out->f_mode & FMODE_WRITE)
1478 error = do_tee(in, out, len, flags);
1479 fput_light(out, fput_out);
1482 fput_light(in, fput_in);
1485 return error;