[SCSI] qla2xxx: Fail initialization when inconsistent NVRAM detected.
[linux-2.6/libata-dev.git] / mm / truncate.c
blob6c79ca4a1ca7cfe9226e46f651e752db78ef88c6
1 /*
2 * mm/truncate.c - code for taking down pages from address_spaces
4 * Copyright (C) 2002, Linus Torvalds
6 * 10Sep2002 akpm@zip.com.au
7 * Initial version.
8 */
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/swap.h>
13 #include <linux/module.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagevec.h>
16 #include <linux/task_io_accounting_ops.h>
17 #include <linux/buffer_head.h> /* grr. try_to_release_page,
18 do_invalidatepage */
21 /**
22 * do_invalidatepage - invalidate part of all of a page
23 * @page: the page which is affected
24 * @offset: the index of the truncation point
26 * do_invalidatepage() is called when all or part of the page has become
27 * invalidated by a truncate operation.
29 * do_invalidatepage() does not have to release all buffers, but it must
30 * ensure that no dirty buffer is left outside @offset and that no I/O
31 * is underway against any of the blocks which are outside the truncation
32 * point. Because the caller is about to free (and possibly reuse) those
33 * blocks on-disk.
35 void do_invalidatepage(struct page *page, unsigned long offset)
37 void (*invalidatepage)(struct page *, unsigned long);
38 invalidatepage = page->mapping->a_ops->invalidatepage;
39 #ifdef CONFIG_BLOCK
40 if (!invalidatepage)
41 invalidatepage = block_invalidatepage;
42 #endif
43 if (invalidatepage)
44 (*invalidatepage)(page, offset);
47 static inline void truncate_partial_page(struct page *page, unsigned partial)
49 memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
50 if (PagePrivate(page))
51 do_invalidatepage(page, partial);
54 void cancel_dirty_page(struct page *page, unsigned int account_size)
56 /* If we're cancelling the page, it had better not be mapped any more */
57 if (page_mapped(page)) {
58 static unsigned int warncount;
60 WARN_ON(++warncount < 5);
63 if (TestClearPageDirty(page)) {
64 struct address_space *mapping = page->mapping;
65 if (mapping && mapping_cap_account_dirty(mapping)) {
66 dec_zone_page_state(page, NR_FILE_DIRTY);
67 if (account_size)
68 task_io_account_cancelled_write(account_size);
72 EXPORT_SYMBOL(cancel_dirty_page);
75 * If truncate cannot remove the fs-private metadata from the page, the page
76 * becomes anonymous. It will be left on the LRU and may even be mapped into
77 * user pagetables if we're racing with filemap_nopage().
79 * We need to bale out if page->mapping is no longer equal to the original
80 * mapping. This happens a) when the VM reclaimed the page while we waited on
81 * its lock, b) when a concurrent invalidate_inode_pages got there first and
82 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
84 static void
85 truncate_complete_page(struct address_space *mapping, struct page *page)
87 if (page->mapping != mapping)
88 return;
90 cancel_dirty_page(page, PAGE_CACHE_SIZE);
92 if (PagePrivate(page))
93 do_invalidatepage(page, 0);
95 ClearPageUptodate(page);
96 ClearPageMappedToDisk(page);
97 remove_from_page_cache(page);
98 page_cache_release(page); /* pagecache ref */
102 * This is for invalidate_inode_pages(). That function can be called at
103 * any time, and is not supposed to throw away dirty pages. But pages can
104 * be marked dirty at any time too, so use remove_mapping which safely
105 * discards clean, unused pages.
107 * Returns non-zero if the page was successfully invalidated.
109 static int
110 invalidate_complete_page(struct address_space *mapping, struct page *page)
112 int ret;
114 if (page->mapping != mapping)
115 return 0;
117 if (PagePrivate(page) && !try_to_release_page(page, 0))
118 return 0;
120 ret = remove_mapping(mapping, page);
122 return ret;
126 * truncate_inode_pages - truncate range of pages specified by start and
127 * end byte offsets
128 * @mapping: mapping to truncate
129 * @lstart: offset from which to truncate
130 * @lend: offset to which to truncate
132 * Truncate the page cache, removing the pages that are between
133 * specified offsets (and zeroing out partial page
134 * (if lstart is not page aligned)).
136 * Truncate takes two passes - the first pass is nonblocking. It will not
137 * block on page locks and it will not block on writeback. The second pass
138 * will wait. This is to prevent as much IO as possible in the affected region.
139 * The first pass will remove most pages, so the search cost of the second pass
140 * is low.
142 * When looking at page->index outside the page lock we need to be careful to
143 * copy it into a local to avoid races (it could change at any time).
145 * We pass down the cache-hot hint to the page freeing code. Even if the
146 * mapping is large, it is probably the case that the final pages are the most
147 * recently touched, and freeing happens in ascending file offset order.
149 void truncate_inode_pages_range(struct address_space *mapping,
150 loff_t lstart, loff_t lend)
152 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
153 pgoff_t end;
154 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
155 struct pagevec pvec;
156 pgoff_t next;
157 int i;
159 if (mapping->nrpages == 0)
160 return;
162 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
163 end = (lend >> PAGE_CACHE_SHIFT);
165 pagevec_init(&pvec, 0);
166 next = start;
167 while (next <= end &&
168 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
169 for (i = 0; i < pagevec_count(&pvec); i++) {
170 struct page *page = pvec.pages[i];
171 pgoff_t page_index = page->index;
173 if (page_index > end) {
174 next = page_index;
175 break;
178 if (page_index > next)
179 next = page_index;
180 next++;
181 if (TestSetPageLocked(page))
182 continue;
183 if (PageWriteback(page)) {
184 unlock_page(page);
185 continue;
187 truncate_complete_page(mapping, page);
188 unlock_page(page);
190 pagevec_release(&pvec);
191 cond_resched();
194 if (partial) {
195 struct page *page = find_lock_page(mapping, start - 1);
196 if (page) {
197 wait_on_page_writeback(page);
198 truncate_partial_page(page, partial);
199 unlock_page(page);
200 page_cache_release(page);
204 next = start;
205 for ( ; ; ) {
206 cond_resched();
207 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
208 if (next == start)
209 break;
210 next = start;
211 continue;
213 if (pvec.pages[0]->index > end) {
214 pagevec_release(&pvec);
215 break;
217 for (i = 0; i < pagevec_count(&pvec); i++) {
218 struct page *page = pvec.pages[i];
220 if (page->index > end)
221 break;
222 lock_page(page);
223 wait_on_page_writeback(page);
224 if (page->index > next)
225 next = page->index;
226 next++;
227 truncate_complete_page(mapping, page);
228 unlock_page(page);
230 pagevec_release(&pvec);
233 EXPORT_SYMBOL(truncate_inode_pages_range);
236 * truncate_inode_pages - truncate *all* the pages from an offset
237 * @mapping: mapping to truncate
238 * @lstart: offset from which to truncate
240 * Called under (and serialised by) inode->i_mutex.
242 void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
244 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
246 EXPORT_SYMBOL(truncate_inode_pages);
249 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
250 * @mapping: the address_space which holds the pages to invalidate
251 * @start: the offset 'from' which to invalidate
252 * @end: the offset 'to' which to invalidate (inclusive)
254 * This function only removes the unlocked pages, if you want to
255 * remove all the pages of one inode, you must call truncate_inode_pages.
257 * invalidate_mapping_pages() will not block on IO activity. It will not
258 * invalidate pages which are dirty, locked, under writeback or mapped into
259 * pagetables.
261 unsigned long invalidate_mapping_pages(struct address_space *mapping,
262 pgoff_t start, pgoff_t end)
264 struct pagevec pvec;
265 pgoff_t next = start;
266 unsigned long ret = 0;
267 int i;
269 pagevec_init(&pvec, 0);
270 while (next <= end &&
271 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
272 for (i = 0; i < pagevec_count(&pvec); i++) {
273 struct page *page = pvec.pages[i];
274 pgoff_t index;
275 int lock_failed;
277 lock_failed = TestSetPageLocked(page);
280 * We really shouldn't be looking at the ->index of an
281 * unlocked page. But we're not allowed to lock these
282 * pages. So we rely upon nobody altering the ->index
283 * of this (pinned-by-us) page.
285 index = page->index;
286 if (index > next)
287 next = index;
288 next++;
289 if (lock_failed)
290 continue;
292 if (PageDirty(page) || PageWriteback(page))
293 goto unlock;
294 if (page_mapped(page))
295 goto unlock;
296 ret += invalidate_complete_page(mapping, page);
297 unlock:
298 unlock_page(page);
299 if (next > end)
300 break;
302 pagevec_release(&pvec);
304 return ret;
307 unsigned long invalidate_inode_pages(struct address_space *mapping)
309 return invalidate_mapping_pages(mapping, 0, ~0UL);
311 EXPORT_SYMBOL(invalidate_inode_pages);
314 * This is like invalidate_complete_page(), except it ignores the page's
315 * refcount. We do this because invalidate_inode_pages2() needs stronger
316 * invalidation guarantees, and cannot afford to leave pages behind because
317 * shrink_list() has a temp ref on them, or because they're transiently sitting
318 * in the lru_cache_add() pagevecs.
320 static int
321 invalidate_complete_page2(struct address_space *mapping, struct page *page)
323 if (page->mapping != mapping)
324 return 0;
326 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
327 return 0;
329 write_lock_irq(&mapping->tree_lock);
330 if (PageDirty(page))
331 goto failed;
333 BUG_ON(PagePrivate(page));
334 __remove_from_page_cache(page);
335 write_unlock_irq(&mapping->tree_lock);
336 ClearPageUptodate(page);
337 page_cache_release(page); /* pagecache ref */
338 return 1;
339 failed:
340 write_unlock_irq(&mapping->tree_lock);
341 return 0;
344 static int do_launder_page(struct address_space *mapping, struct page *page)
346 if (!PageDirty(page))
347 return 0;
348 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
349 return 0;
350 return mapping->a_ops->launder_page(page);
354 * invalidate_inode_pages2_range - remove range of pages from an address_space
355 * @mapping: the address_space
356 * @start: the page offset 'from' which to invalidate
357 * @end: the page offset 'to' which to invalidate (inclusive)
359 * Any pages which are found to be mapped into pagetables are unmapped prior to
360 * invalidation.
362 * Returns -EIO if any pages could not be invalidated.
364 int invalidate_inode_pages2_range(struct address_space *mapping,
365 pgoff_t start, pgoff_t end)
367 struct pagevec pvec;
368 pgoff_t next;
369 int i;
370 int ret = 0;
371 int did_range_unmap = 0;
372 int wrapped = 0;
374 pagevec_init(&pvec, 0);
375 next = start;
376 while (next <= end && !ret && !wrapped &&
377 pagevec_lookup(&pvec, mapping, next,
378 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
379 for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
380 struct page *page = pvec.pages[i];
381 pgoff_t page_index;
383 lock_page(page);
384 if (page->mapping != mapping) {
385 unlock_page(page);
386 continue;
388 page_index = page->index;
389 next = page_index + 1;
390 if (next == 0)
391 wrapped = 1;
392 if (page_index > end) {
393 unlock_page(page);
394 break;
396 wait_on_page_writeback(page);
397 while (page_mapped(page)) {
398 if (!did_range_unmap) {
400 * Zap the rest of the file in one hit.
402 unmap_mapping_range(mapping,
403 (loff_t)page_index<<PAGE_CACHE_SHIFT,
404 (loff_t)(end - page_index + 1)
405 << PAGE_CACHE_SHIFT,
407 did_range_unmap = 1;
408 } else {
410 * Just zap this page
412 unmap_mapping_range(mapping,
413 (loff_t)page_index<<PAGE_CACHE_SHIFT,
414 PAGE_CACHE_SIZE, 0);
417 ret = do_launder_page(mapping, page);
418 if (ret == 0 && !invalidate_complete_page2(mapping, page))
419 ret = -EIO;
420 unlock_page(page);
422 pagevec_release(&pvec);
423 cond_resched();
425 WARN_ON_ONCE(ret);
426 return ret;
428 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
431 * invalidate_inode_pages2 - remove all pages from an address_space
432 * @mapping: the address_space
434 * Any pages which are found to be mapped into pagetables are unmapped prior to
435 * invalidation.
437 * Returns -EIO if any pages could not be invalidated.
439 int invalidate_inode_pages2(struct address_space *mapping)
441 return invalidate_inode_pages2_range(mapping, 0, -1);
443 EXPORT_SYMBOL_GPL(invalidate_inode_pages2);