[PATCH] PCI: clean up msi.c a bit
[linux-2.6.22.y-op.git] / mm / filemap.c
blobe8f58f7dd7a551cf9b1138b386af673f99bdc8a4
1 /*
2 * linux/mm/filemap.c
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
7 /*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/aio.h>
18 #include <linux/capability.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/mman.h>
23 #include <linux/pagemap.h>
24 #include <linux/file.h>
25 #include <linux/uio.h>
26 #include <linux/hash.h>
27 #include <linux/writeback.h>
28 #include <linux/pagevec.h>
29 #include <linux/blkdev.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include "filemap.h"
33 #include "internal.h"
36 * FIXME: remove all knowledge of the buffer layer from the core VM
38 #include <linux/buffer_head.h> /* for generic_osync_inode */
40 #include <asm/uaccess.h>
41 #include <asm/mman.h>
43 static ssize_t
44 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
45 loff_t offset, unsigned long nr_segs);
48 * Shared mappings implemented 30.11.1994. It's not fully working yet,
49 * though.
51 * Shared mappings now work. 15.8.1995 Bruno.
53 * finished 'unifying' the page and buffer cache and SMP-threaded the
54 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
56 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
60 * Lock ordering:
62 * ->i_mmap_lock (vmtruncate)
63 * ->private_lock (__free_pte->__set_page_dirty_buffers)
64 * ->swap_lock (exclusive_swap_page, others)
65 * ->mapping->tree_lock
67 * ->i_mutex
68 * ->i_mmap_lock (truncate->unmap_mapping_range)
70 * ->mmap_sem
71 * ->i_mmap_lock
72 * ->page_table_lock or pte_lock (various, mainly in memory.c)
73 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
75 * ->mmap_sem
76 * ->lock_page (access_process_vm)
78 * ->mmap_sem
79 * ->i_mutex (msync)
81 * ->i_mutex
82 * ->i_alloc_sem (various)
84 * ->inode_lock
85 * ->sb_lock (fs/fs-writeback.c)
86 * ->mapping->tree_lock (__sync_single_inode)
88 * ->i_mmap_lock
89 * ->anon_vma.lock (vma_adjust)
91 * ->anon_vma.lock
92 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
94 * ->page_table_lock or pte_lock
95 * ->swap_lock (try_to_unmap_one)
96 * ->private_lock (try_to_unmap_one)
97 * ->tree_lock (try_to_unmap_one)
98 * ->zone.lru_lock (follow_page->mark_page_accessed)
99 * ->zone.lru_lock (check_pte_range->isolate_lru_page)
100 * ->private_lock (page_remove_rmap->set_page_dirty)
101 * ->tree_lock (page_remove_rmap->set_page_dirty)
102 * ->inode_lock (page_remove_rmap->set_page_dirty)
103 * ->inode_lock (zap_pte_range->set_page_dirty)
104 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
106 * ->task->proc_lock
107 * ->dcache_lock (proc_pid_lookup)
111 * Remove a page from the page cache and free it. Caller has to make
112 * sure the page is locked and that nobody else uses it - or that usage
113 * is safe. The caller must hold a write_lock on the mapping's tree_lock.
115 void __remove_from_page_cache(struct page *page)
117 struct address_space *mapping = page->mapping;
119 radix_tree_delete(&mapping->page_tree, page->index);
120 page->mapping = NULL;
121 mapping->nrpages--;
122 pagecache_acct(-1);
125 void remove_from_page_cache(struct page *page)
127 struct address_space *mapping = page->mapping;
129 BUG_ON(!PageLocked(page));
131 write_lock_irq(&mapping->tree_lock);
132 __remove_from_page_cache(page);
133 write_unlock_irq(&mapping->tree_lock);
136 static int sync_page(void *word)
138 struct address_space *mapping;
139 struct page *page;
141 page = container_of((unsigned long *)word, struct page, flags);
144 * page_mapping() is being called without PG_locked held.
145 * Some knowledge of the state and use of the page is used to
146 * reduce the requirements down to a memory barrier.
147 * The danger here is of a stale page_mapping() return value
148 * indicating a struct address_space different from the one it's
149 * associated with when it is associated with one.
150 * After smp_mb(), it's either the correct page_mapping() for
151 * the page, or an old page_mapping() and the page's own
152 * page_mapping() has gone NULL.
153 * The ->sync_page() address_space operation must tolerate
154 * page_mapping() going NULL. By an amazing coincidence,
155 * this comes about because none of the users of the page
156 * in the ->sync_page() methods make essential use of the
157 * page_mapping(), merely passing the page down to the backing
158 * device's unplug functions when it's non-NULL, which in turn
159 * ignore it for all cases but swap, where only page_private(page) is
160 * of interest. When page_mapping() does go NULL, the entire
161 * call stack gracefully ignores the page and returns.
162 * -- wli
164 smp_mb();
165 mapping = page_mapping(page);
166 if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
167 mapping->a_ops->sync_page(page);
168 io_schedule();
169 return 0;
173 * filemap_fdatawrite_range - start writeback against all of a mapping's
174 * dirty pages that lie within the byte offsets <start, end>
175 * @mapping: address space structure to write
176 * @start: offset in bytes where the range starts
177 * @end: offset in bytes where the range ends
178 * @sync_mode: enable synchronous operation
180 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
181 * opposed to a regular memory * cleansing writeback. The difference between
182 * these two operations is that if a dirty page/buffer is encountered, it must
183 * be waited upon, and not just skipped over.
185 static int __filemap_fdatawrite_range(struct address_space *mapping,
186 loff_t start, loff_t end, int sync_mode)
188 int ret;
189 struct writeback_control wbc = {
190 .sync_mode = sync_mode,
191 .nr_to_write = mapping->nrpages * 2,
192 .start = start,
193 .end = end,
196 if (!mapping_cap_writeback_dirty(mapping))
197 return 0;
199 ret = do_writepages(mapping, &wbc);
200 return ret;
203 static inline int __filemap_fdatawrite(struct address_space *mapping,
204 int sync_mode)
206 return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
209 int filemap_fdatawrite(struct address_space *mapping)
211 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
213 EXPORT_SYMBOL(filemap_fdatawrite);
215 static int filemap_fdatawrite_range(struct address_space *mapping,
216 loff_t start, loff_t end)
218 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
222 * This is a mostly non-blocking flush. Not suitable for data-integrity
223 * purposes - I/O may not be started against all dirty pages.
225 int filemap_flush(struct address_space *mapping)
227 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
229 EXPORT_SYMBOL(filemap_flush);
232 * Wait for writeback to complete against pages indexed by start->end
233 * inclusive
235 static int wait_on_page_writeback_range(struct address_space *mapping,
236 pgoff_t start, pgoff_t end)
238 struct pagevec pvec;
239 int nr_pages;
240 int ret = 0;
241 pgoff_t index;
243 if (end < start)
244 return 0;
246 pagevec_init(&pvec, 0);
247 index = start;
248 while ((index <= end) &&
249 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
250 PAGECACHE_TAG_WRITEBACK,
251 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
252 unsigned i;
254 for (i = 0; i < nr_pages; i++) {
255 struct page *page = pvec.pages[i];
257 /* until radix tree lookup accepts end_index */
258 if (page->index > end)
259 continue;
261 wait_on_page_writeback(page);
262 if (PageError(page))
263 ret = -EIO;
265 pagevec_release(&pvec);
266 cond_resched();
269 /* Check for outstanding write errors */
270 if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
271 ret = -ENOSPC;
272 if (test_and_clear_bit(AS_EIO, &mapping->flags))
273 ret = -EIO;
275 return ret;
279 * Write and wait upon all the pages in the passed range. This is a "data
280 * integrity" operation. It waits upon in-flight writeout before starting and
281 * waiting upon new writeout. If there was an IO error, return it.
283 * We need to re-take i_mutex during the generic_osync_inode list walk because
284 * it is otherwise livelockable.
286 int sync_page_range(struct inode *inode, struct address_space *mapping,
287 loff_t pos, loff_t count)
289 pgoff_t start = pos >> PAGE_CACHE_SHIFT;
290 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
291 int ret;
293 if (!mapping_cap_writeback_dirty(mapping) || !count)
294 return 0;
295 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
296 if (ret == 0) {
297 mutex_lock(&inode->i_mutex);
298 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
299 mutex_unlock(&inode->i_mutex);
301 if (ret == 0)
302 ret = wait_on_page_writeback_range(mapping, start, end);
303 return ret;
305 EXPORT_SYMBOL(sync_page_range);
308 * Note: Holding i_mutex across sync_page_range_nolock is not a good idea
309 * as it forces O_SYNC writers to different parts of the same file
310 * to be serialised right until io completion.
312 int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
313 loff_t pos, loff_t count)
315 pgoff_t start = pos >> PAGE_CACHE_SHIFT;
316 pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
317 int ret;
319 if (!mapping_cap_writeback_dirty(mapping) || !count)
320 return 0;
321 ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
322 if (ret == 0)
323 ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
324 if (ret == 0)
325 ret = wait_on_page_writeback_range(mapping, start, end);
326 return ret;
328 EXPORT_SYMBOL(sync_page_range_nolock);
331 * filemap_fdatawait - walk the list of under-writeback pages of the given
332 * address space and wait for all of them.
334 * @mapping: address space structure to wait for
336 int filemap_fdatawait(struct address_space *mapping)
338 loff_t i_size = i_size_read(mapping->host);
340 if (i_size == 0)
341 return 0;
343 return wait_on_page_writeback_range(mapping, 0,
344 (i_size - 1) >> PAGE_CACHE_SHIFT);
346 EXPORT_SYMBOL(filemap_fdatawait);
348 int filemap_write_and_wait(struct address_space *mapping)
350 int err = 0;
352 if (mapping->nrpages) {
353 err = filemap_fdatawrite(mapping);
355 * Even if the above returned error, the pages may be
356 * written partially (e.g. -ENOSPC), so we wait for it.
357 * But the -EIO is special case, it may indicate the worst
358 * thing (e.g. bug) happened, so we avoid waiting for it.
360 if (err != -EIO) {
361 int err2 = filemap_fdatawait(mapping);
362 if (!err)
363 err = err2;
366 return err;
368 EXPORT_SYMBOL(filemap_write_and_wait);
370 int filemap_write_and_wait_range(struct address_space *mapping,
371 loff_t lstart, loff_t lend)
373 int err = 0;
375 if (mapping->nrpages) {
376 err = __filemap_fdatawrite_range(mapping, lstart, lend,
377 WB_SYNC_ALL);
378 /* See comment of filemap_write_and_wait() */
379 if (err != -EIO) {
380 int err2 = wait_on_page_writeback_range(mapping,
381 lstart >> PAGE_CACHE_SHIFT,
382 lend >> PAGE_CACHE_SHIFT);
383 if (!err)
384 err = err2;
387 return err;
391 * This function is used to add newly allocated pagecache pages:
392 * the page is new, so we can just run SetPageLocked() against it.
393 * The other page state flags were set by rmqueue().
395 * This function does not add the page to the LRU. The caller must do that.
397 int add_to_page_cache(struct page *page, struct address_space *mapping,
398 pgoff_t offset, gfp_t gfp_mask)
400 int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
402 if (error == 0) {
403 write_lock_irq(&mapping->tree_lock);
404 error = radix_tree_insert(&mapping->page_tree, offset, page);
405 if (!error) {
406 page_cache_get(page);
407 SetPageLocked(page);
408 page->mapping = mapping;
409 page->index = offset;
410 mapping->nrpages++;
411 pagecache_acct(1);
413 write_unlock_irq(&mapping->tree_lock);
414 radix_tree_preload_end();
416 return error;
419 EXPORT_SYMBOL(add_to_page_cache);
421 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
422 pgoff_t offset, gfp_t gfp_mask)
424 int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
425 if (ret == 0)
426 lru_cache_add(page);
427 return ret;
431 * In order to wait for pages to become available there must be
432 * waitqueues associated with pages. By using a hash table of
433 * waitqueues where the bucket discipline is to maintain all
434 * waiters on the same queue and wake all when any of the pages
435 * become available, and for the woken contexts to check to be
436 * sure the appropriate page became available, this saves space
437 * at a cost of "thundering herd" phenomena during rare hash
438 * collisions.
440 static wait_queue_head_t *page_waitqueue(struct page *page)
442 const struct zone *zone = page_zone(page);
444 return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
447 static inline void wake_up_page(struct page *page, int bit)
449 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
452 void fastcall wait_on_page_bit(struct page *page, int bit_nr)
454 DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
456 if (test_bit(bit_nr, &page->flags))
457 __wait_on_bit(page_waitqueue(page), &wait, sync_page,
458 TASK_UNINTERRUPTIBLE);
460 EXPORT_SYMBOL(wait_on_page_bit);
463 * unlock_page() - unlock a locked page
465 * @page: the page
467 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
468 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
469 * mechananism between PageLocked pages and PageWriteback pages is shared.
470 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
472 * The first mb is necessary to safely close the critical section opened by the
473 * TestSetPageLocked(), the second mb is necessary to enforce ordering between
474 * the clear_bit and the read of the waitqueue (to avoid SMP races with a
475 * parallel wait_on_page_locked()).
477 void fastcall unlock_page(struct page *page)
479 smp_mb__before_clear_bit();
480 if (!TestClearPageLocked(page))
481 BUG();
482 smp_mb__after_clear_bit();
483 wake_up_page(page, PG_locked);
485 EXPORT_SYMBOL(unlock_page);
488 * End writeback against a page.
490 void end_page_writeback(struct page *page)
492 if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
493 if (!test_clear_page_writeback(page))
494 BUG();
496 smp_mb__after_clear_bit();
497 wake_up_page(page, PG_writeback);
499 EXPORT_SYMBOL(end_page_writeback);
502 * Get a lock on the page, assuming we need to sleep to get it.
504 * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some
505 * random driver's requestfn sets TASK_RUNNING, we could busywait. However
506 * chances are that on the second loop, the block layer's plug list is empty,
507 * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
509 void fastcall __lock_page(struct page *page)
511 DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
513 __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
514 TASK_UNINTERRUPTIBLE);
516 EXPORT_SYMBOL(__lock_page);
519 * a rather lightweight function, finding and getting a reference to a
520 * hashed page atomically.
522 struct page * find_get_page(struct address_space *mapping, unsigned long offset)
524 struct page *page;
526 read_lock_irq(&mapping->tree_lock);
527 page = radix_tree_lookup(&mapping->page_tree, offset);
528 if (page)
529 page_cache_get(page);
530 read_unlock_irq(&mapping->tree_lock);
531 return page;
534 EXPORT_SYMBOL(find_get_page);
537 * Same as above, but trylock it instead of incrementing the count.
539 struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
541 struct page *page;
543 read_lock_irq(&mapping->tree_lock);
544 page = radix_tree_lookup(&mapping->page_tree, offset);
545 if (page && TestSetPageLocked(page))
546 page = NULL;
547 read_unlock_irq(&mapping->tree_lock);
548 return page;
551 EXPORT_SYMBOL(find_trylock_page);
554 * find_lock_page - locate, pin and lock a pagecache page
556 * @mapping: the address_space to search
557 * @offset: the page index
559 * Locates the desired pagecache page, locks it, increments its reference
560 * count and returns its address.
562 * Returns zero if the page was not present. find_lock_page() may sleep.
564 struct page *find_lock_page(struct address_space *mapping,
565 unsigned long offset)
567 struct page *page;
569 read_lock_irq(&mapping->tree_lock);
570 repeat:
571 page = radix_tree_lookup(&mapping->page_tree, offset);
572 if (page) {
573 page_cache_get(page);
574 if (TestSetPageLocked(page)) {
575 read_unlock_irq(&mapping->tree_lock);
576 __lock_page(page);
577 read_lock_irq(&mapping->tree_lock);
579 /* Has the page been truncated while we slept? */
580 if (unlikely(page->mapping != mapping ||
581 page->index != offset)) {
582 unlock_page(page);
583 page_cache_release(page);
584 goto repeat;
588 read_unlock_irq(&mapping->tree_lock);
589 return page;
592 EXPORT_SYMBOL(find_lock_page);
595 * find_or_create_page - locate or add a pagecache page
597 * @mapping: the page's address_space
598 * @index: the page's index into the mapping
599 * @gfp_mask: page allocation mode
601 * Locates a page in the pagecache. If the page is not present, a new page
602 * is allocated using @gfp_mask and is added to the pagecache and to the VM's
603 * LRU list. The returned page is locked and has its reference count
604 * incremented.
606 * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
607 * allocation!
609 * find_or_create_page() returns the desired page's address, or zero on
610 * memory exhaustion.
612 struct page *find_or_create_page(struct address_space *mapping,
613 unsigned long index, gfp_t gfp_mask)
615 struct page *page, *cached_page = NULL;
616 int err;
617 repeat:
618 page = find_lock_page(mapping, index);
619 if (!page) {
620 if (!cached_page) {
621 cached_page = alloc_page(gfp_mask);
622 if (!cached_page)
623 return NULL;
625 err = add_to_page_cache_lru(cached_page, mapping,
626 index, gfp_mask);
627 if (!err) {
628 page = cached_page;
629 cached_page = NULL;
630 } else if (err == -EEXIST)
631 goto repeat;
633 if (cached_page)
634 page_cache_release(cached_page);
635 return page;
638 EXPORT_SYMBOL(find_or_create_page);
641 * find_get_pages - gang pagecache lookup
642 * @mapping: The address_space to search
643 * @start: The starting page index
644 * @nr_pages: The maximum number of pages
645 * @pages: Where the resulting pages are placed
647 * find_get_pages() will search for and return a group of up to
648 * @nr_pages pages in the mapping. The pages are placed at @pages.
649 * find_get_pages() takes a reference against the returned pages.
651 * The search returns a group of mapping-contiguous pages with ascending
652 * indexes. There may be holes in the indices due to not-present pages.
654 * find_get_pages() returns the number of pages which were found.
656 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
657 unsigned int nr_pages, struct page **pages)
659 unsigned int i;
660 unsigned int ret;
662 read_lock_irq(&mapping->tree_lock);
663 ret = radix_tree_gang_lookup(&mapping->page_tree,
664 (void **)pages, start, nr_pages);
665 for (i = 0; i < ret; i++)
666 page_cache_get(pages[i]);
667 read_unlock_irq(&mapping->tree_lock);
668 return ret;
672 * Like find_get_pages, except we only return pages which are tagged with
673 * `tag'. We update *index to index the next page for the traversal.
675 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
676 int tag, unsigned int nr_pages, struct page **pages)
678 unsigned int i;
679 unsigned int ret;
681 read_lock_irq(&mapping->tree_lock);
682 ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
683 (void **)pages, *index, nr_pages, tag);
684 for (i = 0; i < ret; i++)
685 page_cache_get(pages[i]);
686 if (ret)
687 *index = pages[ret - 1]->index + 1;
688 read_unlock_irq(&mapping->tree_lock);
689 return ret;
693 * Same as grab_cache_page, but do not wait if the page is unavailable.
694 * This is intended for speculative data generators, where the data can
695 * be regenerated if the page couldn't be grabbed. This routine should
696 * be safe to call while holding the lock for another page.
698 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
699 * and deadlock against the caller's locked page.
701 struct page *
702 grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
704 struct page *page = find_get_page(mapping, index);
705 gfp_t gfp_mask;
707 if (page) {
708 if (!TestSetPageLocked(page))
709 return page;
710 page_cache_release(page);
711 return NULL;
713 gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
714 page = alloc_pages(gfp_mask, 0);
715 if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
716 page_cache_release(page);
717 page = NULL;
719 return page;
722 EXPORT_SYMBOL(grab_cache_page_nowait);
725 * This is a generic file read routine, and uses the
726 * mapping->a_ops->readpage() function for the actual low-level
727 * stuff.
729 * This is really ugly. But the goto's actually try to clarify some
730 * of the logic when it comes to error handling etc.
732 * Note the struct file* is only passed for the use of readpage. It may be
733 * NULL.
735 void do_generic_mapping_read(struct address_space *mapping,
736 struct file_ra_state *_ra,
737 struct file *filp,
738 loff_t *ppos,
739 read_descriptor_t *desc,
740 read_actor_t actor)
742 struct inode *inode = mapping->host;
743 unsigned long index;
744 unsigned long end_index;
745 unsigned long offset;
746 unsigned long last_index;
747 unsigned long next_index;
748 unsigned long prev_index;
749 loff_t isize;
750 struct page *cached_page;
751 int error;
752 struct file_ra_state ra = *_ra;
754 cached_page = NULL;
755 index = *ppos >> PAGE_CACHE_SHIFT;
756 next_index = index;
757 prev_index = ra.prev_page;
758 last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
759 offset = *ppos & ~PAGE_CACHE_MASK;
761 isize = i_size_read(inode);
762 if (!isize)
763 goto out;
765 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
766 for (;;) {
767 struct page *page;
768 unsigned long nr, ret;
770 /* nr is the maximum number of bytes to copy from this page */
771 nr = PAGE_CACHE_SIZE;
772 if (index >= end_index) {
773 if (index > end_index)
774 goto out;
775 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
776 if (nr <= offset) {
777 goto out;
780 nr = nr - offset;
782 cond_resched();
783 if (index == next_index)
784 next_index = page_cache_readahead(mapping, &ra, filp,
785 index, last_index - index);
787 find_page:
788 page = find_get_page(mapping, index);
789 if (unlikely(page == NULL)) {
790 handle_ra_miss(mapping, &ra, index);
791 goto no_cached_page;
793 if (!PageUptodate(page))
794 goto page_not_up_to_date;
795 page_ok:
797 /* If users can be writing to this page using arbitrary
798 * virtual addresses, take care about potential aliasing
799 * before reading the page on the kernel side.
801 if (mapping_writably_mapped(mapping))
802 flush_dcache_page(page);
805 * When (part of) the same page is read multiple times
806 * in succession, only mark it as accessed the first time.
808 if (prev_index != index)
809 mark_page_accessed(page);
810 prev_index = index;
813 * Ok, we have the page, and it's up-to-date, so
814 * now we can copy it to user space...
816 * The actor routine returns how many bytes were actually used..
817 * NOTE! This may not be the same as how much of a user buffer
818 * we filled up (we may be padding etc), so we can only update
819 * "pos" here (the actor routine has to update the user buffer
820 * pointers and the remaining count).
822 ret = actor(desc, page, offset, nr);
823 offset += ret;
824 index += offset >> PAGE_CACHE_SHIFT;
825 offset &= ~PAGE_CACHE_MASK;
827 page_cache_release(page);
828 if (ret == nr && desc->count)
829 continue;
830 goto out;
832 page_not_up_to_date:
833 /* Get exclusive access to the page ... */
834 lock_page(page);
836 /* Did it get unhashed before we got the lock? */
837 if (!page->mapping) {
838 unlock_page(page);
839 page_cache_release(page);
840 continue;
843 /* Did somebody else fill it already? */
844 if (PageUptodate(page)) {
845 unlock_page(page);
846 goto page_ok;
849 readpage:
850 /* Start the actual read. The read will unlock the page. */
851 error = mapping->a_ops->readpage(filp, page);
853 if (unlikely(error)) {
854 if (error == AOP_TRUNCATED_PAGE) {
855 page_cache_release(page);
856 goto find_page;
858 goto readpage_error;
861 if (!PageUptodate(page)) {
862 lock_page(page);
863 if (!PageUptodate(page)) {
864 if (page->mapping == NULL) {
866 * invalidate_inode_pages got it
868 unlock_page(page);
869 page_cache_release(page);
870 goto find_page;
872 unlock_page(page);
873 error = -EIO;
874 goto readpage_error;
876 unlock_page(page);
880 * i_size must be checked after we have done ->readpage.
882 * Checking i_size after the readpage allows us to calculate
883 * the correct value for "nr", which means the zero-filled
884 * part of the page is not copied back to userspace (unless
885 * another truncate extends the file - this is desired though).
887 isize = i_size_read(inode);
888 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
889 if (unlikely(!isize || index > end_index)) {
890 page_cache_release(page);
891 goto out;
894 /* nr is the maximum number of bytes to copy from this page */
895 nr = PAGE_CACHE_SIZE;
896 if (index == end_index) {
897 nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
898 if (nr <= offset) {
899 page_cache_release(page);
900 goto out;
903 nr = nr - offset;
904 goto page_ok;
906 readpage_error:
907 /* UHHUH! A synchronous read error occurred. Report it */
908 desc->error = error;
909 page_cache_release(page);
910 goto out;
912 no_cached_page:
914 * Ok, it wasn't cached, so we need to create a new
915 * page..
917 if (!cached_page) {
918 cached_page = page_cache_alloc_cold(mapping);
919 if (!cached_page) {
920 desc->error = -ENOMEM;
921 goto out;
924 error = add_to_page_cache_lru(cached_page, mapping,
925 index, GFP_KERNEL);
926 if (error) {
927 if (error == -EEXIST)
928 goto find_page;
929 desc->error = error;
930 goto out;
932 page = cached_page;
933 cached_page = NULL;
934 goto readpage;
937 out:
938 *_ra = ra;
940 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
941 if (cached_page)
942 page_cache_release(cached_page);
943 if (filp)
944 file_accessed(filp);
947 EXPORT_SYMBOL(do_generic_mapping_read);
949 int file_read_actor(read_descriptor_t *desc, struct page *page,
950 unsigned long offset, unsigned long size)
952 char *kaddr;
953 unsigned long left, count = desc->count;
955 if (size > count)
956 size = count;
959 * Faults on the destination of a read are common, so do it before
960 * taking the kmap.
962 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
963 kaddr = kmap_atomic(page, KM_USER0);
964 left = __copy_to_user_inatomic(desc->arg.buf,
965 kaddr + offset, size);
966 kunmap_atomic(kaddr, KM_USER0);
967 if (left == 0)
968 goto success;
971 /* Do it the slow way */
972 kaddr = kmap(page);
973 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
974 kunmap(page);
976 if (left) {
977 size -= left;
978 desc->error = -EFAULT;
980 success:
981 desc->count = count - size;
982 desc->written += size;
983 desc->arg.buf += size;
984 return size;
988 * This is the "read()" routine for all filesystems
989 * that can use the page cache directly.
991 ssize_t
992 __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
993 unsigned long nr_segs, loff_t *ppos)
995 struct file *filp = iocb->ki_filp;
996 ssize_t retval;
997 unsigned long seg;
998 size_t count;
1000 count = 0;
1001 for (seg = 0; seg < nr_segs; seg++) {
1002 const struct iovec *iv = &iov[seg];
1005 * If any segment has a negative length, or the cumulative
1006 * length ever wraps negative then return -EINVAL.
1008 count += iv->iov_len;
1009 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
1010 return -EINVAL;
1011 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
1012 continue;
1013 if (seg == 0)
1014 return -EFAULT;
1015 nr_segs = seg;
1016 count -= iv->iov_len; /* This segment is no good */
1017 break;
1020 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
1021 if (filp->f_flags & O_DIRECT) {
1022 loff_t pos = *ppos, size;
1023 struct address_space *mapping;
1024 struct inode *inode;
1026 mapping = filp->f_mapping;
1027 inode = mapping->host;
1028 retval = 0;
1029 if (!count)
1030 goto out; /* skip atime */
1031 size = i_size_read(inode);
1032 if (pos < size) {
1033 retval = generic_file_direct_IO(READ, iocb,
1034 iov, pos, nr_segs);
1035 if (retval > 0 && !is_sync_kiocb(iocb))
1036 retval = -EIOCBQUEUED;
1037 if (retval > 0)
1038 *ppos = pos + retval;
1040 file_accessed(filp);
1041 goto out;
1044 retval = 0;
1045 if (count) {
1046 for (seg = 0; seg < nr_segs; seg++) {
1047 read_descriptor_t desc;
1049 desc.written = 0;
1050 desc.arg.buf = iov[seg].iov_base;
1051 desc.count = iov[seg].iov_len;
1052 if (desc.count == 0)
1053 continue;
1054 desc.error = 0;
1055 do_generic_file_read(filp,ppos,&desc,file_read_actor);
1056 retval += desc.written;
1057 if (desc.error) {
1058 retval = retval ?: desc.error;
1059 break;
1063 out:
1064 return retval;
1067 EXPORT_SYMBOL(__generic_file_aio_read);
1069 ssize_t
1070 generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
1072 struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1074 BUG_ON(iocb->ki_pos != pos);
1075 return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
1078 EXPORT_SYMBOL(generic_file_aio_read);
1080 ssize_t
1081 generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1083 struct iovec local_iov = { .iov_base = buf, .iov_len = count };
1084 struct kiocb kiocb;
1085 ssize_t ret;
1087 init_sync_kiocb(&kiocb, filp);
1088 ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos);
1089 if (-EIOCBQUEUED == ret)
1090 ret = wait_on_sync_kiocb(&kiocb);
1091 return ret;
1094 EXPORT_SYMBOL(generic_file_read);
1096 int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
1098 ssize_t written;
1099 unsigned long count = desc->count;
1100 struct file *file = desc->arg.data;
1102 if (size > count)
1103 size = count;
1105 written = file->f_op->sendpage(file, page, offset,
1106 size, &file->f_pos, size<count);
1107 if (written < 0) {
1108 desc->error = written;
1109 written = 0;
1111 desc->count = count - written;
1112 desc->written += written;
1113 return written;
1116 ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
1117 size_t count, read_actor_t actor, void *target)
1119 read_descriptor_t desc;
1121 if (!count)
1122 return 0;
1124 desc.written = 0;
1125 desc.count = count;
1126 desc.arg.data = target;
1127 desc.error = 0;
1129 do_generic_file_read(in_file, ppos, &desc, actor);
1130 if (desc.written)
1131 return desc.written;
1132 return desc.error;
1135 EXPORT_SYMBOL(generic_file_sendfile);
1137 static ssize_t
1138 do_readahead(struct address_space *mapping, struct file *filp,
1139 unsigned long index, unsigned long nr)
1141 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1142 return -EINVAL;
1144 force_page_cache_readahead(mapping, filp, index,
1145 max_sane_readahead(nr));
1146 return 0;
1149 asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
1151 ssize_t ret;
1152 struct file *file;
1154 ret = -EBADF;
1155 file = fget(fd);
1156 if (file) {
1157 if (file->f_mode & FMODE_READ) {
1158 struct address_space *mapping = file->f_mapping;
1159 unsigned long start = offset >> PAGE_CACHE_SHIFT;
1160 unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1161 unsigned long len = end - start + 1;
1162 ret = do_readahead(mapping, file, start, len);
1164 fput(file);
1166 return ret;
1169 #ifdef CONFIG_MMU
1171 * This adds the requested page to the page cache if it isn't already there,
1172 * and schedules an I/O to read in its contents from disk.
1174 static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
1175 static int fastcall page_cache_read(struct file * file, unsigned long offset)
1177 struct address_space *mapping = file->f_mapping;
1178 struct page *page;
1179 int ret;
1181 do {
1182 page = page_cache_alloc_cold(mapping);
1183 if (!page)
1184 return -ENOMEM;
1186 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
1187 if (ret == 0)
1188 ret = mapping->a_ops->readpage(file, page);
1189 else if (ret == -EEXIST)
1190 ret = 0; /* losing race to add is OK */
1192 page_cache_release(page);
1194 } while (ret == AOP_TRUNCATED_PAGE);
1196 return ret;
1199 #define MMAP_LOTSAMISS (100)
1202 * filemap_nopage() is invoked via the vma operations vector for a
1203 * mapped memory region to read in file data during a page fault.
1205 * The goto's are kind of ugly, but this streamlines the normal case of having
1206 * it in the page cache, and handles the special cases reasonably without
1207 * having a lot of duplicated code.
1209 struct page *filemap_nopage(struct vm_area_struct *area,
1210 unsigned long address, int *type)
1212 int error;
1213 struct file *file = area->vm_file;
1214 struct address_space *mapping = file->f_mapping;
1215 struct file_ra_state *ra = &file->f_ra;
1216 struct inode *inode = mapping->host;
1217 struct page *page;
1218 unsigned long size, pgoff;
1219 int did_readaround = 0, majmin = VM_FAULT_MINOR;
1221 pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
1223 retry_all:
1224 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1225 if (pgoff >= size)
1226 goto outside_data_content;
1228 /* If we don't want any read-ahead, don't bother */
1229 if (VM_RandomReadHint(area))
1230 goto no_cached_page;
1233 * The readahead code wants to be told about each and every page
1234 * so it can build and shrink its windows appropriately
1236 * For sequential accesses, we use the generic readahead logic.
1238 if (VM_SequentialReadHint(area))
1239 page_cache_readahead(mapping, ra, file, pgoff, 1);
1242 * Do we have something in the page cache already?
1244 retry_find:
1245 page = find_get_page(mapping, pgoff);
1246 if (!page) {
1247 unsigned long ra_pages;
1249 if (VM_SequentialReadHint(area)) {
1250 handle_ra_miss(mapping, ra, pgoff);
1251 goto no_cached_page;
1253 ra->mmap_miss++;
1256 * Do we miss much more than hit in this file? If so,
1257 * stop bothering with read-ahead. It will only hurt.
1259 if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
1260 goto no_cached_page;
1263 * To keep the pgmajfault counter straight, we need to
1264 * check did_readaround, as this is an inner loop.
1266 if (!did_readaround) {
1267 majmin = VM_FAULT_MAJOR;
1268 inc_page_state(pgmajfault);
1270 did_readaround = 1;
1271 ra_pages = max_sane_readahead(file->f_ra.ra_pages);
1272 if (ra_pages) {
1273 pgoff_t start = 0;
1275 if (pgoff > ra_pages / 2)
1276 start = pgoff - ra_pages / 2;
1277 do_page_cache_readahead(mapping, file, start, ra_pages);
1279 page = find_get_page(mapping, pgoff);
1280 if (!page)
1281 goto no_cached_page;
1284 if (!did_readaround)
1285 ra->mmap_hit++;
1288 * Ok, found a page in the page cache, now we need to check
1289 * that it's up-to-date.
1291 if (!PageUptodate(page))
1292 goto page_not_uptodate;
1294 success:
1296 * Found the page and have a reference on it.
1298 mark_page_accessed(page);
1299 if (type)
1300 *type = majmin;
1301 return page;
1303 outside_data_content:
1305 * An external ptracer can access pages that normally aren't
1306 * accessible..
1308 if (area->vm_mm == current->mm)
1309 return NULL;
1310 /* Fall through to the non-read-ahead case */
1311 no_cached_page:
1313 * We're only likely to ever get here if MADV_RANDOM is in
1314 * effect.
1316 error = page_cache_read(file, pgoff);
1317 grab_swap_token();
1320 * The page we want has now been added to the page cache.
1321 * In the unlikely event that someone removed it in the
1322 * meantime, we'll just come back here and read it again.
1324 if (error >= 0)
1325 goto retry_find;
1328 * An error return from page_cache_read can result if the
1329 * system is low on memory, or a problem occurs while trying
1330 * to schedule I/O.
1332 if (error == -ENOMEM)
1333 return NOPAGE_OOM;
1334 return NULL;
1336 page_not_uptodate:
1337 if (!did_readaround) {
1338 majmin = VM_FAULT_MAJOR;
1339 inc_page_state(pgmajfault);
1341 lock_page(page);
1343 /* Did it get unhashed while we waited for it? */
1344 if (!page->mapping) {
1345 unlock_page(page);
1346 page_cache_release(page);
1347 goto retry_all;
1350 /* Did somebody else get it up-to-date? */
1351 if (PageUptodate(page)) {
1352 unlock_page(page);
1353 goto success;
1356 error = mapping->a_ops->readpage(file, page);
1357 if (!error) {
1358 wait_on_page_locked(page);
1359 if (PageUptodate(page))
1360 goto success;
1361 } else if (error == AOP_TRUNCATED_PAGE) {
1362 page_cache_release(page);
1363 goto retry_find;
1367 * Umm, take care of errors if the page isn't up-to-date.
1368 * Try to re-read it _once_. We do this synchronously,
1369 * because there really aren't any performance issues here
1370 * and we need to check for errors.
1372 lock_page(page);
1374 /* Somebody truncated the page on us? */
1375 if (!page->mapping) {
1376 unlock_page(page);
1377 page_cache_release(page);
1378 goto retry_all;
1381 /* Somebody else successfully read it in? */
1382 if (PageUptodate(page)) {
1383 unlock_page(page);
1384 goto success;
1386 ClearPageError(page);
1387 error = mapping->a_ops->readpage(file, page);
1388 if (!error) {
1389 wait_on_page_locked(page);
1390 if (PageUptodate(page))
1391 goto success;
1392 } else if (error == AOP_TRUNCATED_PAGE) {
1393 page_cache_release(page);
1394 goto retry_find;
1398 * Things didn't work out. Return zero to tell the
1399 * mm layer so, possibly freeing the page cache page first.
1401 page_cache_release(page);
1402 return NULL;
1405 EXPORT_SYMBOL(filemap_nopage);
1407 static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
1408 int nonblock)
1410 struct address_space *mapping = file->f_mapping;
1411 struct page *page;
1412 int error;
1415 * Do we have something in the page cache already?
1417 retry_find:
1418 page = find_get_page(mapping, pgoff);
1419 if (!page) {
1420 if (nonblock)
1421 return NULL;
1422 goto no_cached_page;
1426 * Ok, found a page in the page cache, now we need to check
1427 * that it's up-to-date.
1429 if (!PageUptodate(page)) {
1430 if (nonblock) {
1431 page_cache_release(page);
1432 return NULL;
1434 goto page_not_uptodate;
1437 success:
1439 * Found the page and have a reference on it.
1441 mark_page_accessed(page);
1442 return page;
1444 no_cached_page:
1445 error = page_cache_read(file, pgoff);
1448 * The page we want has now been added to the page cache.
1449 * In the unlikely event that someone removed it in the
1450 * meantime, we'll just come back here and read it again.
1452 if (error >= 0)
1453 goto retry_find;
1456 * An error return from page_cache_read can result if the
1457 * system is low on memory, or a problem occurs while trying
1458 * to schedule I/O.
1460 return NULL;
1462 page_not_uptodate:
1463 lock_page(page);
1465 /* Did it get unhashed while we waited for it? */
1466 if (!page->mapping) {
1467 unlock_page(page);
1468 goto err;
1471 /* Did somebody else get it up-to-date? */
1472 if (PageUptodate(page)) {
1473 unlock_page(page);
1474 goto success;
1477 error = mapping->a_ops->readpage(file, page);
1478 if (!error) {
1479 wait_on_page_locked(page);
1480 if (PageUptodate(page))
1481 goto success;
1482 } else if (error == AOP_TRUNCATED_PAGE) {
1483 page_cache_release(page);
1484 goto retry_find;
1488 * Umm, take care of errors if the page isn't up-to-date.
1489 * Try to re-read it _once_. We do this synchronously,
1490 * because there really aren't any performance issues here
1491 * and we need to check for errors.
1493 lock_page(page);
1495 /* Somebody truncated the page on us? */
1496 if (!page->mapping) {
1497 unlock_page(page);
1498 goto err;
1500 /* Somebody else successfully read it in? */
1501 if (PageUptodate(page)) {
1502 unlock_page(page);
1503 goto success;
1506 ClearPageError(page);
1507 error = mapping->a_ops->readpage(file, page);
1508 if (!error) {
1509 wait_on_page_locked(page);
1510 if (PageUptodate(page))
1511 goto success;
1512 } else if (error == AOP_TRUNCATED_PAGE) {
1513 page_cache_release(page);
1514 goto retry_find;
1518 * Things didn't work out. Return zero to tell the
1519 * mm layer so, possibly freeing the page cache page first.
1521 err:
1522 page_cache_release(page);
1524 return NULL;
1527 int filemap_populate(struct vm_area_struct *vma, unsigned long addr,
1528 unsigned long len, pgprot_t prot, unsigned long pgoff,
1529 int nonblock)
1531 struct file *file = vma->vm_file;
1532 struct address_space *mapping = file->f_mapping;
1533 struct inode *inode = mapping->host;
1534 unsigned long size;
1535 struct mm_struct *mm = vma->vm_mm;
1536 struct page *page;
1537 int err;
1539 if (!nonblock)
1540 force_page_cache_readahead(mapping, vma->vm_file,
1541 pgoff, len >> PAGE_CACHE_SHIFT);
1543 repeat:
1544 size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1545 if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
1546 return -EINVAL;
1548 page = filemap_getpage(file, pgoff, nonblock);
1550 /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
1551 * done in shmem_populate calling shmem_getpage */
1552 if (!page && !nonblock)
1553 return -ENOMEM;
1555 if (page) {
1556 err = install_page(mm, vma, addr, page, prot);
1557 if (err) {
1558 page_cache_release(page);
1559 return err;
1561 } else if (vma->vm_flags & VM_NONLINEAR) {
1562 /* No page was found just because we can't read it in now (being
1563 * here implies nonblock != 0), but the page may exist, so set
1564 * the PTE to fault it in later. */
1565 err = install_file_pte(mm, vma, addr, pgoff, prot);
1566 if (err)
1567 return err;
1570 len -= PAGE_SIZE;
1571 addr += PAGE_SIZE;
1572 pgoff++;
1573 if (len)
1574 goto repeat;
1576 return 0;
1578 EXPORT_SYMBOL(filemap_populate);
1580 struct vm_operations_struct generic_file_vm_ops = {
1581 .nopage = filemap_nopage,
1582 .populate = filemap_populate,
1585 /* This is used for a general mmap of a disk file */
1587 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1589 struct address_space *mapping = file->f_mapping;
1591 if (!mapping->a_ops->readpage)
1592 return -ENOEXEC;
1593 file_accessed(file);
1594 vma->vm_ops = &generic_file_vm_ops;
1595 return 0;
1599 * This is for filesystems which do not implement ->writepage.
1601 int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
1603 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
1604 return -EINVAL;
1605 return generic_file_mmap(file, vma);
1607 #else
1608 int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
1610 return -ENOSYS;
1612 int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
1614 return -ENOSYS;
1616 #endif /* CONFIG_MMU */
1618 EXPORT_SYMBOL(generic_file_mmap);
1619 EXPORT_SYMBOL(generic_file_readonly_mmap);
1621 static inline struct page *__read_cache_page(struct address_space *mapping,
1622 unsigned long index,
1623 int (*filler)(void *,struct page*),
1624 void *data)
1626 struct page *page, *cached_page = NULL;
1627 int err;
1628 repeat:
1629 page = find_get_page(mapping, index);
1630 if (!page) {
1631 if (!cached_page) {
1632 cached_page = page_cache_alloc_cold(mapping);
1633 if (!cached_page)
1634 return ERR_PTR(-ENOMEM);
1636 err = add_to_page_cache_lru(cached_page, mapping,
1637 index, GFP_KERNEL);
1638 if (err == -EEXIST)
1639 goto repeat;
1640 if (err < 0) {
1641 /* Presumably ENOMEM for radix tree node */
1642 page_cache_release(cached_page);
1643 return ERR_PTR(err);
1645 page = cached_page;
1646 cached_page = NULL;
1647 err = filler(data, page);
1648 if (err < 0) {
1649 page_cache_release(page);
1650 page = ERR_PTR(err);
1653 if (cached_page)
1654 page_cache_release(cached_page);
1655 return page;
1659 * Read into the page cache. If a page already exists,
1660 * and PageUptodate() is not set, try to fill the page.
1662 struct page *read_cache_page(struct address_space *mapping,
1663 unsigned long index,
1664 int (*filler)(void *,struct page*),
1665 void *data)
1667 struct page *page;
1668 int err;
1670 retry:
1671 page = __read_cache_page(mapping, index, filler, data);
1672 if (IS_ERR(page))
1673 goto out;
1674 mark_page_accessed(page);
1675 if (PageUptodate(page))
1676 goto out;
1678 lock_page(page);
1679 if (!page->mapping) {
1680 unlock_page(page);
1681 page_cache_release(page);
1682 goto retry;
1684 if (PageUptodate(page)) {
1685 unlock_page(page);
1686 goto out;
1688 err = filler(data, page);
1689 if (err < 0) {
1690 page_cache_release(page);
1691 page = ERR_PTR(err);
1693 out:
1694 return page;
1697 EXPORT_SYMBOL(read_cache_page);
1700 * If the page was newly created, increment its refcount and add it to the
1701 * caller's lru-buffering pagevec. This function is specifically for
1702 * generic_file_write().
1704 static inline struct page *
1705 __grab_cache_page(struct address_space *mapping, unsigned long index,
1706 struct page **cached_page, struct pagevec *lru_pvec)
1708 int err;
1709 struct page *page;
1710 repeat:
1711 page = find_lock_page(mapping, index);
1712 if (!page) {
1713 if (!*cached_page) {
1714 *cached_page = page_cache_alloc(mapping);
1715 if (!*cached_page)
1716 return NULL;
1718 err = add_to_page_cache(*cached_page, mapping,
1719 index, GFP_KERNEL);
1720 if (err == -EEXIST)
1721 goto repeat;
1722 if (err == 0) {
1723 page = *cached_page;
1724 page_cache_get(page);
1725 if (!pagevec_add(lru_pvec, page))
1726 __pagevec_lru_add(lru_pvec);
1727 *cached_page = NULL;
1730 return page;
1734 * The logic we want is
1736 * if suid or (sgid and xgrp)
1737 * remove privs
1739 int remove_suid(struct dentry *dentry)
1741 mode_t mode = dentry->d_inode->i_mode;
1742 int kill = 0;
1743 int result = 0;
1745 /* suid always must be killed */
1746 if (unlikely(mode & S_ISUID))
1747 kill = ATTR_KILL_SUID;
1750 * sgid without any exec bits is just a mandatory locking mark; leave
1751 * it alone. If some exec bits are set, it's a real sgid; kill it.
1753 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1754 kill |= ATTR_KILL_SGID;
1756 if (unlikely(kill && !capable(CAP_FSETID))) {
1757 struct iattr newattrs;
1759 newattrs.ia_valid = ATTR_FORCE | kill;
1760 result = notify_change(dentry, &newattrs);
1762 return result;
1764 EXPORT_SYMBOL(remove_suid);
1766 size_t
1767 __filemap_copy_from_user_iovec(char *vaddr,
1768 const struct iovec *iov, size_t base, size_t bytes)
1770 size_t copied = 0, left = 0;
1772 while (bytes) {
1773 char __user *buf = iov->iov_base + base;
1774 int copy = min(bytes, iov->iov_len - base);
1776 base = 0;
1777 left = __copy_from_user_inatomic(vaddr, buf, copy);
1778 copied += copy;
1779 bytes -= copy;
1780 vaddr += copy;
1781 iov++;
1783 if (unlikely(left)) {
1784 /* zero the rest of the target like __copy_from_user */
1785 if (bytes)
1786 memset(vaddr, 0, bytes);
1787 break;
1790 return copied - left;
1794 * Performs necessary checks before doing a write
1796 * Can adjust writing position aor amount of bytes to write.
1797 * Returns appropriate error code that caller should return or
1798 * zero in case that write should be allowed.
1800 inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
1802 struct inode *inode = file->f_mapping->host;
1803 unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1805 if (unlikely(*pos < 0))
1806 return -EINVAL;
1808 if (!isblk) {
1809 /* FIXME: this is for backwards compatibility with 2.4 */
1810 if (file->f_flags & O_APPEND)
1811 *pos = i_size_read(inode);
1813 if (limit != RLIM_INFINITY) {
1814 if (*pos >= limit) {
1815 send_sig(SIGXFSZ, current, 0);
1816 return -EFBIG;
1818 if (*count > limit - (typeof(limit))*pos) {
1819 *count = limit - (typeof(limit))*pos;
1825 * LFS rule
1827 if (unlikely(*pos + *count > MAX_NON_LFS &&
1828 !(file->f_flags & O_LARGEFILE))) {
1829 if (*pos >= MAX_NON_LFS) {
1830 send_sig(SIGXFSZ, current, 0);
1831 return -EFBIG;
1833 if (*count > MAX_NON_LFS - (unsigned long)*pos) {
1834 *count = MAX_NON_LFS - (unsigned long)*pos;
1839 * Are we about to exceed the fs block limit ?
1841 * If we have written data it becomes a short write. If we have
1842 * exceeded without writing data we send a signal and return EFBIG.
1843 * Linus frestrict idea will clean these up nicely..
1845 if (likely(!isblk)) {
1846 if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
1847 if (*count || *pos > inode->i_sb->s_maxbytes) {
1848 send_sig(SIGXFSZ, current, 0);
1849 return -EFBIG;
1851 /* zero-length writes at ->s_maxbytes are OK */
1854 if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
1855 *count = inode->i_sb->s_maxbytes - *pos;
1856 } else {
1857 loff_t isize;
1858 if (bdev_read_only(I_BDEV(inode)))
1859 return -EPERM;
1860 isize = i_size_read(inode);
1861 if (*pos >= isize) {
1862 if (*count || *pos > isize)
1863 return -ENOSPC;
1866 if (*pos + *count > isize)
1867 *count = isize - *pos;
1869 return 0;
1871 EXPORT_SYMBOL(generic_write_checks);
1873 ssize_t
1874 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
1875 unsigned long *nr_segs, loff_t pos, loff_t *ppos,
1876 size_t count, size_t ocount)
1878 struct file *file = iocb->ki_filp;
1879 struct address_space *mapping = file->f_mapping;
1880 struct inode *inode = mapping->host;
1881 ssize_t written;
1883 if (count != ocount)
1884 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
1886 written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
1887 if (written > 0) {
1888 loff_t end = pos + written;
1889 if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
1890 i_size_write(inode, end);
1891 mark_inode_dirty(inode);
1893 *ppos = end;
1897 * Sync the fs metadata but not the minor inode changes and
1898 * of course not the data as we did direct DMA for the IO.
1899 * i_mutex is held, which protects generic_osync_inode() from
1900 * livelocking.
1902 if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
1903 int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
1904 if (err < 0)
1905 written = err;
1907 if (written == count && !is_sync_kiocb(iocb))
1908 written = -EIOCBQUEUED;
1909 return written;
1911 EXPORT_SYMBOL(generic_file_direct_write);
1913 ssize_t
1914 generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1915 unsigned long nr_segs, loff_t pos, loff_t *ppos,
1916 size_t count, ssize_t written)
1918 struct file *file = iocb->ki_filp;
1919 struct address_space * mapping = file->f_mapping;
1920 struct address_space_operations *a_ops = mapping->a_ops;
1921 struct inode *inode = mapping->host;
1922 long status = 0;
1923 struct page *page;
1924 struct page *cached_page = NULL;
1925 size_t bytes;
1926 struct pagevec lru_pvec;
1927 const struct iovec *cur_iov = iov; /* current iovec */
1928 size_t iov_base = 0; /* offset in the current iovec */
1929 char __user *buf;
1931 pagevec_init(&lru_pvec, 0);
1934 * handle partial DIO write. Adjust cur_iov if needed.
1936 if (likely(nr_segs == 1))
1937 buf = iov->iov_base + written;
1938 else {
1939 filemap_set_next_iovec(&cur_iov, &iov_base, written);
1940 buf = cur_iov->iov_base + iov_base;
1943 do {
1944 unsigned long index;
1945 unsigned long offset;
1946 unsigned long maxlen;
1947 size_t copied;
1949 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1950 index = pos >> PAGE_CACHE_SHIFT;
1951 bytes = PAGE_CACHE_SIZE - offset;
1952 if (bytes > count)
1953 bytes = count;
1956 * Bring in the user page that we will copy from _first_.
1957 * Otherwise there's a nasty deadlock on copying from the
1958 * same page as we're writing to, without it being marked
1959 * up-to-date.
1961 maxlen = cur_iov->iov_len - iov_base;
1962 if (maxlen > bytes)
1963 maxlen = bytes;
1964 fault_in_pages_readable(buf, maxlen);
1966 page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
1967 if (!page) {
1968 status = -ENOMEM;
1969 break;
1972 status = a_ops->prepare_write(file, page, offset, offset+bytes);
1973 if (unlikely(status)) {
1974 loff_t isize = i_size_read(inode);
1976 if (status != AOP_TRUNCATED_PAGE)
1977 unlock_page(page);
1978 page_cache_release(page);
1979 if (status == AOP_TRUNCATED_PAGE)
1980 continue;
1982 * prepare_write() may have instantiated a few blocks
1983 * outside i_size. Trim these off again.
1985 if (pos + bytes > isize)
1986 vmtruncate(inode, isize);
1987 break;
1989 if (likely(nr_segs == 1))
1990 copied = filemap_copy_from_user(page, offset,
1991 buf, bytes);
1992 else
1993 copied = filemap_copy_from_user_iovec(page, offset,
1994 cur_iov, iov_base, bytes);
1995 flush_dcache_page(page);
1996 status = a_ops->commit_write(file, page, offset, offset+bytes);
1997 if (status == AOP_TRUNCATED_PAGE) {
1998 page_cache_release(page);
1999 continue;
2001 if (likely(copied > 0)) {
2002 if (!status)
2003 status = copied;
2005 if (status >= 0) {
2006 written += status;
2007 count -= status;
2008 pos += status;
2009 buf += status;
2010 if (unlikely(nr_segs > 1)) {
2011 filemap_set_next_iovec(&cur_iov,
2012 &iov_base, status);
2013 if (count)
2014 buf = cur_iov->iov_base +
2015 iov_base;
2016 } else {
2017 iov_base += status;
2021 if (unlikely(copied != bytes))
2022 if (status >= 0)
2023 status = -EFAULT;
2024 unlock_page(page);
2025 mark_page_accessed(page);
2026 page_cache_release(page);
2027 if (status < 0)
2028 break;
2029 balance_dirty_pages_ratelimited(mapping);
2030 cond_resched();
2031 } while (count);
2032 *ppos = pos;
2034 if (cached_page)
2035 page_cache_release(cached_page);
2038 * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
2040 if (likely(status >= 0)) {
2041 if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2042 if (!a_ops->writepage || !is_sync_kiocb(iocb))
2043 status = generic_osync_inode(inode, mapping,
2044 OSYNC_METADATA|OSYNC_DATA);
2049 * If we get here for O_DIRECT writes then we must have fallen through
2050 * to buffered writes (block instantiation inside i_size). So we sync
2051 * the file data here, to try to honour O_DIRECT expectations.
2053 if (unlikely(file->f_flags & O_DIRECT) && written)
2054 status = filemap_write_and_wait(mapping);
2056 pagevec_lru_add(&lru_pvec);
2057 return written ? written : status;
2059 EXPORT_SYMBOL(generic_file_buffered_write);
2061 static ssize_t
2062 __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2063 unsigned long nr_segs, loff_t *ppos)
2065 struct file *file = iocb->ki_filp;
2066 struct address_space * mapping = file->f_mapping;
2067 size_t ocount; /* original count */
2068 size_t count; /* after file limit checks */
2069 struct inode *inode = mapping->host;
2070 unsigned long seg;
2071 loff_t pos;
2072 ssize_t written;
2073 ssize_t err;
2075 ocount = 0;
2076 for (seg = 0; seg < nr_segs; seg++) {
2077 const struct iovec *iv = &iov[seg];
2080 * If any segment has a negative length, or the cumulative
2081 * length ever wraps negative then return -EINVAL.
2083 ocount += iv->iov_len;
2084 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
2085 return -EINVAL;
2086 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
2087 continue;
2088 if (seg == 0)
2089 return -EFAULT;
2090 nr_segs = seg;
2091 ocount -= iv->iov_len; /* This segment is no good */
2092 break;
2095 count = ocount;
2096 pos = *ppos;
2098 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
2100 /* We can write back this queue in page reclaim */
2101 current->backing_dev_info = mapping->backing_dev_info;
2102 written = 0;
2104 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
2105 if (err)
2106 goto out;
2108 if (count == 0)
2109 goto out;
2111 err = remove_suid(file->f_dentry);
2112 if (err)
2113 goto out;
2115 file_update_time(file);
2117 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2118 if (unlikely(file->f_flags & O_DIRECT)) {
2119 written = generic_file_direct_write(iocb, iov,
2120 &nr_segs, pos, ppos, count, ocount);
2121 if (written < 0 || written == count)
2122 goto out;
2124 * direct-io write to a hole: fall through to buffered I/O
2125 * for completing the rest of the request.
2127 pos += written;
2128 count -= written;
2131 written = generic_file_buffered_write(iocb, iov, nr_segs,
2132 pos, ppos, count, written);
2133 out:
2134 current->backing_dev_info = NULL;
2135 return written ? written : err;
2137 EXPORT_SYMBOL(generic_file_aio_write_nolock);
2139 ssize_t
2140 generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
2141 unsigned long nr_segs, loff_t *ppos)
2143 struct file *file = iocb->ki_filp;
2144 struct address_space *mapping = file->f_mapping;
2145 struct inode *inode = mapping->host;
2146 ssize_t ret;
2147 loff_t pos = *ppos;
2149 ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos);
2151 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2152 int err;
2154 err = sync_page_range_nolock(inode, mapping, pos, ret);
2155 if (err < 0)
2156 ret = err;
2158 return ret;
2161 static ssize_t
2162 __generic_file_write_nolock(struct file *file, const struct iovec *iov,
2163 unsigned long nr_segs, loff_t *ppos)
2165 struct kiocb kiocb;
2166 ssize_t ret;
2168 init_sync_kiocb(&kiocb, file);
2169 ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2170 if (ret == -EIOCBQUEUED)
2171 ret = wait_on_sync_kiocb(&kiocb);
2172 return ret;
2175 ssize_t
2176 generic_file_write_nolock(struct file *file, const struct iovec *iov,
2177 unsigned long nr_segs, loff_t *ppos)
2179 struct kiocb kiocb;
2180 ssize_t ret;
2182 init_sync_kiocb(&kiocb, file);
2183 ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
2184 if (-EIOCBQUEUED == ret)
2185 ret = wait_on_sync_kiocb(&kiocb);
2186 return ret;
2188 EXPORT_SYMBOL(generic_file_write_nolock);
2190 ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf,
2191 size_t count, loff_t pos)
2193 struct file *file = iocb->ki_filp;
2194 struct address_space *mapping = file->f_mapping;
2195 struct inode *inode = mapping->host;
2196 ssize_t ret;
2197 struct iovec local_iov = { .iov_base = (void __user *)buf,
2198 .iov_len = count };
2200 BUG_ON(iocb->ki_pos != pos);
2202 mutex_lock(&inode->i_mutex);
2203 ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1,
2204 &iocb->ki_pos);
2205 mutex_unlock(&inode->i_mutex);
2207 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2208 ssize_t err;
2210 err = sync_page_range(inode, mapping, pos, ret);
2211 if (err < 0)
2212 ret = err;
2214 return ret;
2216 EXPORT_SYMBOL(generic_file_aio_write);
2218 ssize_t generic_file_write(struct file *file, const char __user *buf,
2219 size_t count, loff_t *ppos)
2221 struct address_space *mapping = file->f_mapping;
2222 struct inode *inode = mapping->host;
2223 ssize_t ret;
2224 struct iovec local_iov = { .iov_base = (void __user *)buf,
2225 .iov_len = count };
2227 mutex_lock(&inode->i_mutex);
2228 ret = __generic_file_write_nolock(file, &local_iov, 1, ppos);
2229 mutex_unlock(&inode->i_mutex);
2231 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2232 ssize_t err;
2234 err = sync_page_range(inode, mapping, *ppos - ret, ret);
2235 if (err < 0)
2236 ret = err;
2238 return ret;
2240 EXPORT_SYMBOL(generic_file_write);
2242 ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,
2243 unsigned long nr_segs, loff_t *ppos)
2245 struct kiocb kiocb;
2246 ssize_t ret;
2248 init_sync_kiocb(&kiocb, filp);
2249 ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos);
2250 if (-EIOCBQUEUED == ret)
2251 ret = wait_on_sync_kiocb(&kiocb);
2252 return ret;
2254 EXPORT_SYMBOL(generic_file_readv);
2256 ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
2257 unsigned long nr_segs, loff_t *ppos)
2259 struct address_space *mapping = file->f_mapping;
2260 struct inode *inode = mapping->host;
2261 ssize_t ret;
2263 mutex_lock(&inode->i_mutex);
2264 ret = __generic_file_write_nolock(file, iov, nr_segs, ppos);
2265 mutex_unlock(&inode->i_mutex);
2267 if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
2268 int err;
2270 err = sync_page_range(inode, mapping, *ppos - ret, ret);
2271 if (err < 0)
2272 ret = err;
2274 return ret;
2276 EXPORT_SYMBOL(generic_file_writev);
2279 * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something
2280 * went wrong during pagecache shootdown.
2282 static ssize_t
2283 generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2284 loff_t offset, unsigned long nr_segs)
2286 struct file *file = iocb->ki_filp;
2287 struct address_space *mapping = file->f_mapping;
2288 ssize_t retval;
2289 size_t write_len = 0;
2292 * If it's a write, unmap all mmappings of the file up-front. This
2293 * will cause any pte dirty bits to be propagated into the pageframes
2294 * for the subsequent filemap_write_and_wait().
2296 if (rw == WRITE) {
2297 write_len = iov_length(iov, nr_segs);
2298 if (mapping_mapped(mapping))
2299 unmap_mapping_range(mapping, offset, write_len, 0);
2302 retval = filemap_write_and_wait(mapping);
2303 if (retval == 0) {
2304 retval = mapping->a_ops->direct_IO(rw, iocb, iov,
2305 offset, nr_segs);
2306 if (rw == WRITE && mapping->nrpages) {
2307 pgoff_t end = (offset + write_len - 1)
2308 >> PAGE_CACHE_SHIFT;
2309 int err = invalidate_inode_pages2_range(mapping,
2310 offset >> PAGE_CACHE_SHIFT, end);
2311 if (err)
2312 retval = err;
2315 return retval;