Cleanup syscall code to look more like it's mips64 equivalent.
[linux-2.6/linux-mips.git] / mm / truncate.c
blob0f6706fcda1851307be6b94350cb6f97efa5598f
1 /*
2 * mm/truncate.c - code for taking down pages from address_spaces
4 * Copyright (C) 2002, Linus Torvalds
6 * 10Sep2002 akpm@zip.com.au
7 * Initial version.
8 */
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/buffer_head.h> /* grr. try_to_release_page,
15 block_invalidatepage */
18 static int do_invalidatepage(struct page *page, unsigned long offset)
20 int (*invalidatepage)(struct page *, unsigned long);
21 invalidatepage = page->mapping->a_ops->invalidatepage;
22 if (invalidatepage == NULL)
23 invalidatepage = block_invalidatepage;
24 return (*invalidatepage)(page, offset);
27 static inline void truncate_partial_page(struct page *page, unsigned partial)
29 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
30 if (PagePrivate(page))
31 do_invalidatepage(page, partial);
35 * If truncate cannot remove the fs-private metadata from the page, the page
36 * becomes anonymous. It will be left on the LRU and may even be mapped into
37 * user pagetables if we're racing with filemap_nopage().
39 * We need to bale out if page->mapping is no longer equal to the original
40 * mapping. This happens a) when the VM reclaimed the page while we waited on
41 * its lock, b) when a concurrent invalidate_inode_pages got there first and
42 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
44 static void
45 truncate_complete_page(struct address_space *mapping, struct page *page)
47 if (page->mapping != mapping)
48 return;
50 if (PagePrivate(page))
51 do_invalidatepage(page, 0);
53 clear_page_dirty(page);
54 ClearPageUptodate(page);
55 ClearPageMappedToDisk(page);
56 remove_from_page_cache(page);
57 page_cache_release(page); /* pagecache ref */
61 * This is for invalidate_inode_pages(). That function can be called at
62 * any time, and is not supposed to throw away dirty pages. But pages can
63 * be marked dirty at any time too. So we re-check the dirtiness inside
64 * ->page_lock. That provides exclusion against the __set_page_dirty
65 * functions.
67 static int
68 invalidate_complete_page(struct address_space *mapping, struct page *page)
70 if (page->mapping != mapping)
71 return 0;
73 if (PagePrivate(page) && !try_to_release_page(page, 0))
74 return 0;
76 spin_lock(&mapping->page_lock);
77 if (PageDirty(page)) {
78 spin_unlock(&mapping->page_lock);
79 return 0;
81 __remove_from_page_cache(page);
82 spin_unlock(&mapping->page_lock);
83 ClearPageUptodate(page);
84 page_cache_release(page); /* pagecache ref */
85 return 1;
88 /**
89 * truncate_inode_pages - truncate *all* the pages from an offset
90 * @mapping: mapping to truncate
91 * @lstart: offset from which to truncate
93 * Truncate the page cache at a set offset, removing the pages that are beyond
94 * that offset (and zeroing out partial pages).
96 * Truncate takes two passes - the first pass is nonblocking. It will not
97 * block on page locks and it will not block on writeback. The second pass
98 * will wait. This is to prevent as much IO as possible in the affected region.
99 * The first pass will remove most pages, so the search cost of the second pass
100 * is low.
102 * When looking at page->index outside the page lock we need to be careful to
103 * copy it into a local to avoid races (it could change at any time).
105 * We pass down the cache-hot hint to the page freeing code. Even if the
106 * mapping is large, it is probably the case that the final pages are the most
107 * recently touched, and freeing happens in ascending file offset order.
109 * Called under (and serialised by) inode->i_sem.
111 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
113 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
114 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
115 struct pagevec pvec;
116 pgoff_t next;
117 int i;
119 if (mapping->nrpages == 0)
120 return;
122 pagevec_init(&pvec, 0);
123 next = start;
124 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
125 for (i = 0; i < pagevec_count(&pvec); i++) {
126 struct page *page = pvec.pages[i];
127 pgoff_t page_index = page->index;
129 if (page_index > next)
130 next = page_index;
131 next++;
132 if (TestSetPageLocked(page))
133 continue;
134 if (PageWriteback(page)) {
135 unlock_page(page);
136 continue;
138 truncate_complete_page(mapping, page);
139 unlock_page(page);
141 pagevec_release(&pvec);
142 cond_resched();
145 if (partial) {
146 struct page *page = find_lock_page(mapping, start - 1);
147 if (page) {
148 wait_on_page_writeback(page);
149 truncate_partial_page(page, partial);
150 unlock_page(page);
151 page_cache_release(page);
155 next = start;
156 for ( ; ; ) {
157 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
158 if (next == start)
159 break;
160 next = start;
161 continue;
163 for (i = 0; i < pagevec_count(&pvec); i++) {
164 struct page *page = pvec.pages[i];
166 lock_page(page);
167 wait_on_page_writeback(page);
168 if (page->index > next)
169 next = page->index;
170 next++;
171 truncate_complete_page(mapping, page);
172 unlock_page(page);
174 pagevec_release(&pvec);
179 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
180 * @mapping: the address_space which holds the pages to invalidate
181 * @start: the offset 'from' which to invalidate
182 * @end: the offset 'to' which to invalidate (inclusive)
184 * This function only removes the unlocked pages, if you want to
185 * remove all the pages of one inode, you must call truncate_inode_pages.
187 * invalidate_mapping_pages() will not block on IO activity. It will not
188 * invalidate pages which are dirty, locked, under writeback or mapped into
189 * pagetables.
191 unsigned long invalidate_mapping_pages(struct address_space *mapping,
192 pgoff_t start, pgoff_t end)
194 struct pagevec pvec;
195 pgoff_t next = start;
196 unsigned long ret = 0;
197 int i;
199 pagevec_init(&pvec, 0);
200 while (next <= end &&
201 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
202 for (i = 0; i < pagevec_count(&pvec); i++) {
203 struct page *page = pvec.pages[i];
205 if (TestSetPageLocked(page)) {
206 next++;
207 continue;
209 if (page->index > next)
210 next = page->index;
211 next++;
212 if (PageDirty(page) || PageWriteback(page))
213 goto unlock;
214 if (page_mapped(page))
215 goto unlock;
216 ret += invalidate_complete_page(mapping, page);
217 unlock:
218 unlock_page(page);
220 pagevec_release(&pvec);
221 cond_resched();
223 return ret;
226 unsigned long invalidate_inode_pages(struct address_space *mapping)
228 return invalidate_mapping_pages(mapping, 0, ~0UL);
232 * invalidate_inode_pages2 - remove all unmapped pages from an address_space
233 * @mapping - the address_space
235 * invalidate_inode_pages2() is like truncate_inode_pages(), except for the case
236 * where the page is seen to be mapped into process pagetables. In that case,
237 * the page is marked clean but is left attached to its address_space.
239 * FIXME: invalidate_inode_pages2() is probably trivially livelockable.
241 void invalidate_inode_pages2(struct address_space *mapping)
243 struct pagevec pvec;
244 pgoff_t next = 0;
245 int i;
247 pagevec_init(&pvec, 0);
248 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
249 for (i = 0; i < pagevec_count(&pvec); i++) {
250 struct page *page = pvec.pages[i];
252 lock_page(page);
253 if (page->mapping == mapping) { /* truncate race? */
254 wait_on_page_writeback(page);
255 next = page->index + 1;
256 if (page_mapped(page))
257 clear_page_dirty(page);
258 else
259 invalidate_complete_page(mapping, page);
261 unlock_page(page);
263 pagevec_release(&pvec);
264 cond_resched();