2 * mm/truncate.c - code for taking down pages from address_spaces
4 * Copyright (C) 2002, Linus Torvalds
6 * 10Sep2002 akpm@zip.com.au
10 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/pagemap.h>
14 #include <linux/pagevec.h>
15 #include <linux/buffer_head.h> /* grr. try_to_release_page,
19 static inline void truncate_partial_page(struct page
*page
, unsigned partial
)
21 memclear_highpage_flush(page
, partial
, PAGE_CACHE_SIZE
-partial
);
22 if (PagePrivate(page
))
23 do_invalidatepage(page
, partial
);
27 * If truncate cannot remove the fs-private metadata from the page, the page
28 * becomes anonymous. It will be left on the LRU and may even be mapped into
29 * user pagetables if we're racing with filemap_nopage().
31 * We need to bale out if page->mapping is no longer equal to the original
32 * mapping. This happens a) when the VM reclaimed the page while we waited on
33 * its lock, b) when a concurrent invalidate_inode_pages got there first and
34 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
37 truncate_complete_page(struct address_space
*mapping
, struct page
*page
)
39 if (page
->mapping
!= mapping
)
42 if (PagePrivate(page
))
43 do_invalidatepage(page
, 0);
45 clear_page_dirty(page
);
46 ClearPageUptodate(page
);
47 ClearPageMappedToDisk(page
);
48 remove_from_page_cache(page
);
49 page_cache_release(page
); /* pagecache ref */
53 * This is for invalidate_inode_pages(). That function can be called at
54 * any time, and is not supposed to throw away dirty pages. But pages can
55 * be marked dirty at any time too. So we re-check the dirtiness inside
56 * ->tree_lock. That provides exclusion against the __set_page_dirty
59 * Returns non-zero if the page was successfully invalidated.
62 invalidate_complete_page(struct address_space
*mapping
, struct page
*page
)
64 if (page
->mapping
!= mapping
)
67 if (PagePrivate(page
) && !try_to_release_page(page
, 0))
70 write_lock_irq(&mapping
->tree_lock
);
73 if (page_count(page
) != 2) /* caller's ref + pagecache ref */
76 BUG_ON(PagePrivate(page
));
77 __remove_from_page_cache(page
);
78 write_unlock_irq(&mapping
->tree_lock
);
79 ClearPageUptodate(page
);
80 page_cache_release(page
); /* pagecache ref */
83 write_unlock_irq(&mapping
->tree_lock
);
88 * truncate_inode_pages - truncate range of pages specified by start and
90 * @mapping: mapping to truncate
91 * @lstart: offset from which to truncate
92 * @lend: offset to which to truncate
94 * Truncate the page cache, removing the pages that are between
95 * specified offsets (and zeroing out partial page
96 * (if lstart is not page aligned)).
98 * Truncate takes two passes - the first pass is nonblocking. It will not
99 * block on page locks and it will not block on writeback. The second pass
100 * will wait. This is to prevent as much IO as possible in the affected region.
101 * The first pass will remove most pages, so the search cost of the second pass
104 * When looking at page->index outside the page lock we need to be careful to
105 * copy it into a local to avoid races (it could change at any time).
107 * We pass down the cache-hot hint to the page freeing code. Even if the
108 * mapping is large, it is probably the case that the final pages are the most
109 * recently touched, and freeing happens in ascending file offset order.
111 void truncate_inode_pages_range(struct address_space
*mapping
,
112 loff_t lstart
, loff_t lend
)
114 const pgoff_t start
= (lstart
+ PAGE_CACHE_SIZE
-1) >> PAGE_CACHE_SHIFT
;
116 const unsigned partial
= lstart
& (PAGE_CACHE_SIZE
- 1);
121 if (mapping
->nrpages
== 0)
124 BUG_ON((lend
& (PAGE_CACHE_SIZE
- 1)) != (PAGE_CACHE_SIZE
- 1));
125 end
= (lend
>> PAGE_CACHE_SHIFT
);
127 pagevec_init(&pvec
, 0);
129 while (next
<= end
&&
130 pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
)) {
131 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
132 struct page
*page
= pvec
.pages
[i
];
133 pgoff_t page_index
= page
->index
;
135 if (page_index
> end
) {
140 if (page_index
> next
)
143 if (TestSetPageLocked(page
))
145 if (PageWriteback(page
)) {
149 truncate_complete_page(mapping
, page
);
152 pagevec_release(&pvec
);
157 struct page
*page
= find_lock_page(mapping
, start
- 1);
159 wait_on_page_writeback(page
);
160 truncate_partial_page(page
, partial
);
162 page_cache_release(page
);
169 if (!pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
)) {
175 if (pvec
.pages
[0]->index
> end
) {
176 pagevec_release(&pvec
);
179 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
180 struct page
*page
= pvec
.pages
[i
];
182 if (page
->index
> end
)
185 wait_on_page_writeback(page
);
186 if (page
->index
> next
)
189 truncate_complete_page(mapping
, page
);
192 pagevec_release(&pvec
);
195 EXPORT_SYMBOL(truncate_inode_pages_range
);
198 * truncate_inode_pages - truncate *all* the pages from an offset
199 * @mapping: mapping to truncate
200 * @lstart: offset from which to truncate
202 * Called under (and serialised by) inode->i_mutex.
204 void truncate_inode_pages(struct address_space
*mapping
, loff_t lstart
)
206 truncate_inode_pages_range(mapping
, lstart
, (loff_t
)-1);
208 EXPORT_SYMBOL(truncate_inode_pages
);
211 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
212 * @mapping: the address_space which holds the pages to invalidate
213 * @start: the offset 'from' which to invalidate
214 * @end: the offset 'to' which to invalidate (inclusive)
216 * This function only removes the unlocked pages, if you want to
217 * remove all the pages of one inode, you must call truncate_inode_pages.
219 * invalidate_mapping_pages() will not block on IO activity. It will not
220 * invalidate pages which are dirty, locked, under writeback or mapped into
223 unsigned long invalidate_mapping_pages(struct address_space
*mapping
,
224 pgoff_t start
, pgoff_t end
)
227 pgoff_t next
= start
;
228 unsigned long ret
= 0;
231 pagevec_init(&pvec
, 0);
232 while (next
<= end
&&
233 pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
)) {
234 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
235 struct page
*page
= pvec
.pages
[i
];
239 lock_failed
= TestSetPageLocked(page
);
242 * We really shouldn't be looking at the ->index of an
243 * unlocked page. But we're not allowed to lock these
244 * pages. So we rely upon nobody altering the ->index
245 * of this (pinned-by-us) page.
254 if (PageDirty(page
) || PageWriteback(page
))
256 if (page_mapped(page
))
258 ret
+= invalidate_complete_page(mapping
, page
);
264 pagevec_release(&pvec
);
269 unsigned long invalidate_inode_pages(struct address_space
*mapping
)
271 return invalidate_mapping_pages(mapping
, 0, ~0UL);
273 EXPORT_SYMBOL(invalidate_inode_pages
);
276 * This is like invalidate_complete_page(), except it ignores the page's
277 * refcount. We do this because invalidate_inode_pages2() needs stronger
278 * invalidation guarantees, and cannot afford to leave pages behind because
279 * shrink_list() has a temp ref on them, or because they're transiently sitting
280 * in the lru_cache_add() pagevecs.
283 invalidate_complete_page2(struct address_space
*mapping
, struct page
*page
)
285 if (page
->mapping
!= mapping
)
288 if (PagePrivate(page
) && !try_to_release_page(page
, 0))
291 write_lock_irq(&mapping
->tree_lock
);
295 BUG_ON(PagePrivate(page
));
296 __remove_from_page_cache(page
);
297 write_unlock_irq(&mapping
->tree_lock
);
298 ClearPageUptodate(page
);
299 page_cache_release(page
); /* pagecache ref */
302 write_unlock_irq(&mapping
->tree_lock
);
307 * invalidate_inode_pages2_range - remove range of pages from an address_space
308 * @mapping: the address_space
309 * @start: the page offset 'from' which to invalidate
310 * @end: the page offset 'to' which to invalidate (inclusive)
312 * Any pages which are found to be mapped into pagetables are unmapped prior to
315 * Returns -EIO if any pages could not be invalidated.
317 int invalidate_inode_pages2_range(struct address_space
*mapping
,
318 pgoff_t start
, pgoff_t end
)
324 int did_range_unmap
= 0;
327 pagevec_init(&pvec
, 0);
329 while (next
<= end
&& !ret
&& !wrapped
&&
330 pagevec_lookup(&pvec
, mapping
, next
,
331 min(end
- next
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1)) {
332 for (i
= 0; !ret
&& i
< pagevec_count(&pvec
); i
++) {
333 struct page
*page
= pvec
.pages
[i
];
338 if (page
->mapping
!= mapping
) {
342 page_index
= page
->index
;
343 next
= page_index
+ 1;
346 if (page_index
> end
) {
350 wait_on_page_writeback(page
);
351 while (page_mapped(page
)) {
352 if (!did_range_unmap
) {
354 * Zap the rest of the file in one hit.
356 unmap_mapping_range(mapping
,
357 (loff_t
)page_index
<<PAGE_CACHE_SHIFT
,
358 (loff_t
)(end
- page_index
+ 1)
366 unmap_mapping_range(mapping
,
367 (loff_t
)page_index
<<PAGE_CACHE_SHIFT
,
371 was_dirty
= test_clear_page_dirty(page
);
372 if (!invalidate_complete_page2(mapping
, page
)) {
374 set_page_dirty(page
);
379 pagevec_release(&pvec
);
384 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range
);
387 * invalidate_inode_pages2 - remove all pages from an address_space
388 * @mapping: the address_space
390 * Any pages which are found to be mapped into pagetables are unmapped prior to
393 * Returns -EIO if any pages could not be invalidated.
395 int invalidate_inode_pages2(struct address_space
*mapping
)
397 return invalidate_inode_pages2_range(mapping
, 0, -1);
399 EXPORT_SYMBOL_GPL(invalidate_inode_pages2
);