[PATCH] splice: page stealing needs to wait_on_page_writeback()
[linux-2.6.22.y-op.git] / fs / splice.c
blobb5fb2f3e3ac6c3525c6567f56288ea4553e2568d
1 /*
2 * "splice": joining two ropes together by interweaving their strands.
4 * This is the "extended pipe" functionality, where a pipe is used as
5 * an arbitrary in-memory buffer. Think of a pipe as a small kernel
6 * buffer that you can use to transfer data from one end to the other.
8 * The traditional unix read/write is extended with a "splice()" operation
9 * that transfers data buffers to or from a pipe buffer.
11 * Named by Larry McVoy, original implementation from Linus, extended by
12 * Jens to support splicing to files and fixing the initial implementation
13 * bugs.
15 * Copyright (C) 2005 Jens Axboe <axboe@suse.de>
16 * Copyright (C) 2005 Linus Torvalds <torvalds@osdl.org>
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/pagemap.h>
22 #include <linux/pipe_fs_i.h>
23 #include <linux/mm_inline.h>
24 #include <linux/swap.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h>
27 #include <linux/module.h>
28 #include <linux/syscalls.h>
31 * Passed to the actors
33 struct splice_desc {
34 unsigned int len, total_len; /* current and remaining length */
35 unsigned int flags; /* splice flags */
36 struct file *file; /* file to read/write */
37 loff_t pos; /* file position */
41 * Attempt to steal a page from a pipe buffer. This should perhaps go into
42 * a vm helper function, it's already simplified quite a bit by the
43 * addition of remove_mapping(). If success is returned, the caller may
44 * attempt to reuse this page for another destination.
46 static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
47 struct pipe_buffer *buf)
49 struct page *page = buf->page;
50 struct address_space *mapping = page_mapping(page);
52 WARN_ON(!PageLocked(page));
53 WARN_ON(!PageUptodate(page));
56 * At least for ext2 with nobh option, we need to wait on writeback
57 * completing on this page, since we'll remove it from the pagecache.
58 * Otherwise truncate wont wait on the page, allowing the disk
59 * blocks to be reused by someone else before we actually wrote our
60 * data to them. fs corruption ensues.
62 wait_on_page_writeback(page);
64 if (PagePrivate(page))
65 try_to_release_page(page, mapping_gfp_mask(mapping));
67 if (!remove_mapping(mapping, page))
68 return 1;
70 if (PageLRU(page)) {
71 struct zone *zone = page_zone(page);
73 spin_lock_irq(&zone->lru_lock);
74 BUG_ON(!PageLRU(page));
75 __ClearPageLRU(page);
76 del_page_from_lru(zone, page);
77 spin_unlock_irq(&zone->lru_lock);
80 return 0;
83 static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
84 struct pipe_buffer *buf)
86 page_cache_release(buf->page);
87 buf->page = NULL;
90 static void *page_cache_pipe_buf_map(struct file *file,
91 struct pipe_inode_info *info,
92 struct pipe_buffer *buf)
94 struct page *page = buf->page;
96 lock_page(page);
98 if (!PageUptodate(page)) {
99 unlock_page(page);
100 return ERR_PTR(-EIO);
103 if (!page->mapping) {
104 unlock_page(page);
105 return ERR_PTR(-ENODATA);
108 return kmap(buf->page);
111 static void page_cache_pipe_buf_unmap(struct pipe_inode_info *info,
112 struct pipe_buffer *buf)
114 unlock_page(buf->page);
115 kunmap(buf->page);
118 static struct pipe_buf_operations page_cache_pipe_buf_ops = {
119 .can_merge = 0,
120 .map = page_cache_pipe_buf_map,
121 .unmap = page_cache_pipe_buf_unmap,
122 .release = page_cache_pipe_buf_release,
123 .steal = page_cache_pipe_buf_steal,
127 * Pipe output worker. This sets up our pipe format with the page cache
128 * pipe buffer operations. Otherwise very similar to the regular pipe_writev().
130 static ssize_t move_to_pipe(struct inode *inode, struct page **pages,
131 int nr_pages, unsigned long offset,
132 unsigned long len, unsigned int flags)
134 struct pipe_inode_info *info;
135 int ret, do_wakeup, i;
137 ret = 0;
138 do_wakeup = 0;
139 i = 0;
141 mutex_lock(PIPE_MUTEX(*inode));
143 info = inode->i_pipe;
144 for (;;) {
145 int bufs;
147 if (!PIPE_READERS(*inode)) {
148 send_sig(SIGPIPE, current, 0);
149 if (!ret)
150 ret = -EPIPE;
151 break;
154 bufs = info->nrbufs;
155 if (bufs < PIPE_BUFFERS) {
156 int newbuf = (info->curbuf + bufs) & (PIPE_BUFFERS - 1);
157 struct pipe_buffer *buf = info->bufs + newbuf;
158 struct page *page = pages[i++];
159 unsigned long this_len;
161 this_len = PAGE_CACHE_SIZE - offset;
162 if (this_len > len)
163 this_len = len;
165 buf->page = page;
166 buf->offset = offset;
167 buf->len = this_len;
168 buf->ops = &page_cache_pipe_buf_ops;
169 info->nrbufs = ++bufs;
170 do_wakeup = 1;
172 ret += this_len;
173 len -= this_len;
174 offset = 0;
175 if (!--nr_pages)
176 break;
177 if (!len)
178 break;
179 if (bufs < PIPE_BUFFERS)
180 continue;
182 break;
185 if (flags & SPLICE_F_NONBLOCK) {
186 if (!ret)
187 ret = -EAGAIN;
188 break;
191 if (signal_pending(current)) {
192 if (!ret)
193 ret = -ERESTARTSYS;
194 break;
197 if (do_wakeup) {
198 wake_up_interruptible_sync(PIPE_WAIT(*inode));
199 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO,
200 POLL_IN);
201 do_wakeup = 0;
204 PIPE_WAITING_WRITERS(*inode)++;
205 pipe_wait(inode);
206 PIPE_WAITING_WRITERS(*inode)--;
209 mutex_unlock(PIPE_MUTEX(*inode));
211 if (do_wakeup) {
212 wake_up_interruptible(PIPE_WAIT(*inode));
213 kill_fasync(PIPE_FASYNC_READERS(*inode), SIGIO, POLL_IN);
216 while (i < nr_pages)
217 page_cache_release(pages[i++]);
219 return ret;
222 static int __generic_file_splice_read(struct file *in, struct inode *pipe,
223 size_t len, unsigned int flags)
225 struct address_space *mapping = in->f_mapping;
226 unsigned int offset, nr_pages;
227 struct page *pages[PIPE_BUFFERS], *shadow[PIPE_BUFFERS];
228 struct page *page;
229 pgoff_t index, pidx;
230 int i, j;
232 index = in->f_pos >> PAGE_CACHE_SHIFT;
233 offset = in->f_pos & ~PAGE_CACHE_MASK;
234 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
236 if (nr_pages > PIPE_BUFFERS)
237 nr_pages = PIPE_BUFFERS;
240 * initiate read-ahead on this page range
242 do_page_cache_readahead(mapping, in, index, nr_pages);
245 * Get as many pages from the page cache as possible..
246 * Start IO on the page cache entries we create (we
247 * can assume that any pre-existing ones we find have
248 * already had IO started on them).
250 i = find_get_pages(mapping, index, nr_pages, pages);
253 * common case - we found all pages and they are contiguous,
254 * kick them off
256 if (i && (pages[i - 1]->index == index + i - 1))
257 goto splice_them;
260 * fill shadow[] with pages at the right locations, so we only
261 * have to fill holes
263 memset(shadow, 0, nr_pages * sizeof(struct page *));
264 for (j = 0; j < i; j++)
265 shadow[pages[j]->index - index] = pages[j];
268 * now fill in the holes
270 for (i = 0, pidx = index; i < nr_pages; pidx++, i++) {
271 int error;
273 if (shadow[i])
274 continue;
277 * no page there, look one up / create it
279 page = find_or_create_page(mapping, pidx,
280 mapping_gfp_mask(mapping));
281 if (!page)
282 break;
284 if (PageUptodate(page))
285 unlock_page(page);
286 else {
287 error = mapping->a_ops->readpage(in, page);
289 if (unlikely(error)) {
290 page_cache_release(page);
291 break;
294 shadow[i] = page;
297 if (!i) {
298 for (i = 0; i < nr_pages; i++) {
299 if (shadow[i])
300 page_cache_release(shadow[i]);
302 return 0;
305 memcpy(pages, shadow, i * sizeof(struct page *));
308 * Now we splice them into the pipe..
310 splice_them:
311 return move_to_pipe(pipe, pages, i, offset, len, flags);
315 * generic_file_splice_read - splice data from file to a pipe
316 * @in: file to splice from
317 * @pipe: pipe to splice to
318 * @len: number of bytes to splice
319 * @flags: splice modifier flags
321 * Will read pages from given file and fill them into a pipe.
324 ssize_t generic_file_splice_read(struct file *in, struct inode *pipe,
325 size_t len, unsigned int flags)
327 ssize_t spliced;
328 int ret;
330 ret = 0;
331 spliced = 0;
332 while (len) {
333 ret = __generic_file_splice_read(in, pipe, len, flags);
335 if (ret <= 0)
336 break;
338 in->f_pos += ret;
339 len -= ret;
340 spliced += ret;
342 if (!(flags & SPLICE_F_NONBLOCK))
343 continue;
344 ret = -EAGAIN;
345 break;
348 if (spliced)
349 return spliced;
351 return ret;
354 EXPORT_SYMBOL(generic_file_splice_read);
357 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
358 * using sendpage().
360 static int pipe_to_sendpage(struct pipe_inode_info *info,
361 struct pipe_buffer *buf, struct splice_desc *sd)
363 struct file *file = sd->file;
364 loff_t pos = sd->pos;
365 unsigned int offset;
366 ssize_t ret;
367 void *ptr;
368 int more;
371 * sub-optimal, but we are limited by the pipe ->map. we don't
372 * need a kmap'ed buffer here, we just want to make sure we
373 * have the page pinned if the pipe page originates from the
374 * page cache
376 ptr = buf->ops->map(file, info, buf);
377 if (IS_ERR(ptr))
378 return PTR_ERR(ptr);
380 offset = pos & ~PAGE_CACHE_MASK;
381 more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
383 ret = file->f_op->sendpage(file, buf->page, offset, sd->len, &pos,more);
385 buf->ops->unmap(info, buf);
386 if (ret == sd->len)
387 return 0;
389 return -EIO;
393 * This is a little more tricky than the file -> pipe splicing. There are
394 * basically three cases:
396 * - Destination page already exists in the address space and there
397 * are users of it. For that case we have no other option that
398 * copying the data. Tough luck.
399 * - Destination page already exists in the address space, but there
400 * are no users of it. Make sure it's uptodate, then drop it. Fall
401 * through to last case.
402 * - Destination page does not exist, we can add the pipe page to
403 * the page cache and avoid the copy.
405 * If asked to move pages to the output file (SPLICE_F_MOVE is set in
406 * sd->flags), we attempt to migrate pages from the pipe to the output
407 * file address space page cache. This is possible if no one else has
408 * the pipe page referenced outside of the pipe and page cache. If
409 * SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
410 * a new page in the output file page cache and fill/dirty that.
412 static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
413 struct splice_desc *sd)
415 struct file *file = sd->file;
416 struct address_space *mapping = file->f_mapping;
417 unsigned int offset;
418 struct page *page;
419 pgoff_t index;
420 char *src;
421 int ret, stolen;
424 * after this, page will be locked and unmapped
426 src = buf->ops->map(file, info, buf);
427 if (IS_ERR(src))
428 return PTR_ERR(src);
430 index = sd->pos >> PAGE_CACHE_SHIFT;
431 offset = sd->pos & ~PAGE_CACHE_MASK;
432 stolen = 0;
435 * reuse buf page, if SPLICE_F_MOVE is set
437 if (sd->flags & SPLICE_F_MOVE) {
439 * If steal succeeds, buf->page is now pruned from the vm
440 * side (LRU and page cache) and we can reuse it.
442 if (buf->ops->steal(info, buf))
443 goto find_page;
445 page = buf->page;
446 stolen = 1;
447 if (add_to_page_cache_lru(page, mapping, index,
448 mapping_gfp_mask(mapping)))
449 goto find_page;
450 } else {
451 find_page:
452 ret = -ENOMEM;
453 page = find_or_create_page(mapping, index,
454 mapping_gfp_mask(mapping));
455 if (!page)
456 goto out;
459 * If the page is uptodate, it is also locked. If it isn't
460 * uptodate, we can mark it uptodate if we are filling the
461 * full page. Otherwise we need to read it in first...
463 if (!PageUptodate(page)) {
464 if (sd->len < PAGE_CACHE_SIZE) {
465 ret = mapping->a_ops->readpage(file, page);
466 if (unlikely(ret))
467 goto out;
469 lock_page(page);
471 if (!PageUptodate(page)) {
473 * page got invalidated, repeat
475 if (!page->mapping) {
476 unlock_page(page);
477 page_cache_release(page);
478 goto find_page;
480 ret = -EIO;
481 goto out;
483 } else {
484 WARN_ON(!PageLocked(page));
485 SetPageUptodate(page);
490 ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
491 if (ret == AOP_TRUNCATED_PAGE) {
492 page_cache_release(page);
493 goto find_page;
494 } else if (ret)
495 goto out;
497 if (!stolen) {
498 char *dst = kmap_atomic(page, KM_USER0);
500 memcpy(dst + offset, src + buf->offset, sd->len);
501 flush_dcache_page(page);
502 kunmap_atomic(dst, KM_USER0);
505 ret = mapping->a_ops->commit_write(file, page, 0, sd->len);
506 if (ret == AOP_TRUNCATED_PAGE) {
507 page_cache_release(page);
508 goto find_page;
509 } else if (ret)
510 goto out;
512 balance_dirty_pages_ratelimited(mapping);
513 out:
514 if (!stolen) {
515 page_cache_release(page);
516 unlock_page(page);
518 buf->ops->unmap(info, buf);
519 return ret;
522 typedef int (splice_actor)(struct pipe_inode_info *, struct pipe_buffer *,
523 struct splice_desc *);
526 * Pipe input worker. Most of this logic works like a regular pipe, the
527 * key here is the 'actor' worker passed in that actually moves the data
528 * to the wanted destination. See pipe_to_file/pipe_to_sendpage above.
530 static ssize_t move_from_pipe(struct inode *inode, struct file *out,
531 size_t len, unsigned int flags,
532 splice_actor *actor)
534 struct pipe_inode_info *info;
535 int ret, do_wakeup, err;
536 struct splice_desc sd;
538 ret = 0;
539 do_wakeup = 0;
541 sd.total_len = len;
542 sd.flags = flags;
543 sd.file = out;
544 sd.pos = out->f_pos;
546 mutex_lock(PIPE_MUTEX(*inode));
548 info = inode->i_pipe;
549 for (;;) {
550 int bufs = info->nrbufs;
552 if (bufs) {
553 int curbuf = info->curbuf;
554 struct pipe_buffer *buf = info->bufs + curbuf;
555 struct pipe_buf_operations *ops = buf->ops;
557 sd.len = buf->len;
558 if (sd.len > sd.total_len)
559 sd.len = sd.total_len;
561 err = actor(info, buf, &sd);
562 if (err) {
563 if (!ret && err != -ENODATA)
564 ret = err;
566 break;
569 ret += sd.len;
570 buf->offset += sd.len;
571 buf->len -= sd.len;
572 if (!buf->len) {
573 buf->ops = NULL;
574 ops->release(info, buf);
575 curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
576 info->curbuf = curbuf;
577 info->nrbufs = --bufs;
578 do_wakeup = 1;
581 sd.pos += sd.len;
582 sd.total_len -= sd.len;
583 if (!sd.total_len)
584 break;
587 if (bufs)
588 continue;
589 if (!PIPE_WRITERS(*inode))
590 break;
591 if (!PIPE_WAITING_WRITERS(*inode)) {
592 if (ret)
593 break;
596 if (flags & SPLICE_F_NONBLOCK) {
597 if (!ret)
598 ret = -EAGAIN;
599 break;
602 if (signal_pending(current)) {
603 if (!ret)
604 ret = -ERESTARTSYS;
605 break;
608 if (do_wakeup) {
609 wake_up_interruptible_sync(PIPE_WAIT(*inode));
610 kill_fasync(PIPE_FASYNC_WRITERS(*inode),SIGIO,POLL_OUT);
611 do_wakeup = 0;
614 pipe_wait(inode);
617 mutex_unlock(PIPE_MUTEX(*inode));
619 if (do_wakeup) {
620 wake_up_interruptible(PIPE_WAIT(*inode));
621 kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
624 mutex_lock(&out->f_mapping->host->i_mutex);
625 out->f_pos = sd.pos;
626 mutex_unlock(&out->f_mapping->host->i_mutex);
627 return ret;
632 * generic_file_splice_write - splice data from a pipe to a file
633 * @inode: pipe inode
634 * @out: file to write to
635 * @len: number of bytes to splice
636 * @flags: splice modifier flags
638 * Will either move or copy pages (determined by @flags options) from
639 * the given pipe inode to the given file.
642 ssize_t generic_file_splice_write(struct inode *inode, struct file *out,
643 size_t len, unsigned int flags)
645 struct address_space *mapping = out->f_mapping;
646 ssize_t ret = move_from_pipe(inode, out, len, flags, pipe_to_file);
649 * if file or inode is SYNC and we actually wrote some data, sync it
651 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(mapping->host))
652 && ret > 0) {
653 struct inode *inode = mapping->host;
654 int err;
656 mutex_lock(&inode->i_mutex);
657 err = generic_osync_inode(mapping->host, mapping,
658 OSYNC_METADATA|OSYNC_DATA);
659 mutex_unlock(&inode->i_mutex);
661 if (err)
662 ret = err;
665 return ret;
668 EXPORT_SYMBOL(generic_file_splice_write);
671 * generic_splice_sendpage - splice data from a pipe to a socket
672 * @inode: pipe inode
673 * @out: socket to write to
674 * @len: number of bytes to splice
675 * @flags: splice modifier flags
677 * Will send @len bytes from the pipe to a network socket. No data copying
678 * is involved.
681 ssize_t generic_splice_sendpage(struct inode *inode, struct file *out,
682 size_t len, unsigned int flags)
684 return move_from_pipe(inode, out, len, flags, pipe_to_sendpage);
687 EXPORT_SYMBOL(generic_splice_sendpage);
690 * Attempt to initiate a splice from pipe to file.
692 static long do_splice_from(struct inode *pipe, struct file *out, size_t len,
693 unsigned int flags)
695 loff_t pos;
696 int ret;
698 if (!out->f_op || !out->f_op->splice_write)
699 return -EINVAL;
701 if (!(out->f_mode & FMODE_WRITE))
702 return -EBADF;
704 pos = out->f_pos;
705 ret = rw_verify_area(WRITE, out, &pos, len);
706 if (unlikely(ret < 0))
707 return ret;
709 return out->f_op->splice_write(pipe, out, len, flags);
713 * Attempt to initiate a splice from a file to a pipe.
715 static long do_splice_to(struct file *in, struct inode *pipe, size_t len,
716 unsigned int flags)
718 loff_t pos, isize, left;
719 int ret;
721 if (!in->f_op || !in->f_op->splice_read)
722 return -EINVAL;
724 if (!(in->f_mode & FMODE_READ))
725 return -EBADF;
727 pos = in->f_pos;
728 ret = rw_verify_area(READ, in, &pos, len);
729 if (unlikely(ret < 0))
730 return ret;
732 isize = i_size_read(in->f_mapping->host);
733 if (unlikely(in->f_pos >= isize))
734 return 0;
736 left = isize - in->f_pos;
737 if (left < len)
738 len = left;
740 return in->f_op->splice_read(in, pipe, len, flags);
744 * Determine where to splice to/from.
746 static long do_splice(struct file *in, struct file *out, size_t len,
747 unsigned int flags)
749 struct inode *pipe;
751 pipe = in->f_dentry->d_inode;
752 if (pipe->i_pipe)
753 return do_splice_from(pipe, out, len, flags);
755 pipe = out->f_dentry->d_inode;
756 if (pipe->i_pipe)
757 return do_splice_to(in, pipe, len, flags);
759 return -EINVAL;
762 asmlinkage long sys_splice(int fdin, int fdout, size_t len, unsigned int flags)
764 long error;
765 struct file *in, *out;
766 int fput_in, fput_out;
768 if (unlikely(!len))
769 return 0;
771 error = -EBADF;
772 in = fget_light(fdin, &fput_in);
773 if (in) {
774 if (in->f_mode & FMODE_READ) {
775 out = fget_light(fdout, &fput_out);
776 if (out) {
777 if (out->f_mode & FMODE_WRITE)
778 error = do_splice(in, out, len, flags);
779 fput_light(out, fput_out);
783 fput_light(in, fput_in);
786 return error;