2 * mm/truncate.c - code for taking down pages from address_spaces
4 * Copyright (C) 2002, Linus Torvalds
6 * 10Sep2002 akpm@zip.com.au
10 #include <linux/kernel.h>
12 #include <linux/swap.h>
13 #include <linux/module.h>
14 #include <linux/pagemap.h>
15 #include <linux/pagevec.h>
16 #include <linux/buffer_head.h> /* grr. try_to_release_page,
20 static inline void truncate_partial_page(struct page
*page
, unsigned partial
)
22 memclear_highpage_flush(page
, partial
, PAGE_CACHE_SIZE
-partial
);
23 if (PagePrivate(page
))
24 do_invalidatepage(page
, partial
);
28 * If truncate cannot remove the fs-private metadata from the page, the page
29 * becomes anonymous. It will be left on the LRU and may even be mapped into
30 * user pagetables if we're racing with filemap_nopage().
32 * We need to bale out if page->mapping is no longer equal to the original
33 * mapping. This happens a) when the VM reclaimed the page while we waited on
34 * its lock, b) when a concurrent invalidate_inode_pages got there first and
35 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
38 truncate_complete_page(struct address_space
*mapping
, struct page
*page
)
40 if (page
->mapping
!= mapping
)
43 if (PagePrivate(page
))
44 do_invalidatepage(page
, 0);
46 clear_page_dirty(page
);
47 ClearPageUptodate(page
);
48 ClearPageMappedToDisk(page
);
49 remove_from_page_cache(page
);
50 page_cache_release(page
); /* pagecache ref */
54 * This is for invalidate_inode_pages(). That function can be called at
55 * any time, and is not supposed to throw away dirty pages. But pages can
56 * be marked dirty at any time too, so use remove_mapping which safely
57 * discards clean, unused pages.
59 * Returns non-zero if the page was successfully invalidated.
62 invalidate_complete_page(struct address_space
*mapping
, struct page
*page
)
66 if (page
->mapping
!= mapping
)
69 if (PagePrivate(page
) && !try_to_release_page(page
, 0))
72 ret
= remove_mapping(mapping
, page
);
73 ClearPageUptodate(page
);
79 * truncate_inode_pages - truncate range of pages specified by start and
81 * @mapping: mapping to truncate
82 * @lstart: offset from which to truncate
83 * @lend: offset to which to truncate
85 * Truncate the page cache, removing the pages that are between
86 * specified offsets (and zeroing out partial page
87 * (if lstart is not page aligned)).
89 * Truncate takes two passes - the first pass is nonblocking. It will not
90 * block on page locks and it will not block on writeback. The second pass
91 * will wait. This is to prevent as much IO as possible in the affected region.
92 * The first pass will remove most pages, so the search cost of the second pass
95 * When looking at page->index outside the page lock we need to be careful to
96 * copy it into a local to avoid races (it could change at any time).
98 * We pass down the cache-hot hint to the page freeing code. Even if the
99 * mapping is large, it is probably the case that the final pages are the most
100 * recently touched, and freeing happens in ascending file offset order.
102 void truncate_inode_pages_range(struct address_space
*mapping
,
103 loff_t lstart
, loff_t lend
)
105 const pgoff_t start
= (lstart
+ PAGE_CACHE_SIZE
-1) >> PAGE_CACHE_SHIFT
;
107 const unsigned partial
= lstart
& (PAGE_CACHE_SIZE
- 1);
112 if (mapping
->nrpages
== 0)
115 BUG_ON((lend
& (PAGE_CACHE_SIZE
- 1)) != (PAGE_CACHE_SIZE
- 1));
116 end
= (lend
>> PAGE_CACHE_SHIFT
);
118 pagevec_init(&pvec
, 0);
120 while (next
<= end
&&
121 pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
)) {
122 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
123 struct page
*page
= pvec
.pages
[i
];
124 pgoff_t page_index
= page
->index
;
126 if (page_index
> end
) {
131 if (page_index
> next
)
134 if (TestSetPageLocked(page
))
136 if (PageWriteback(page
)) {
140 truncate_complete_page(mapping
, page
);
143 pagevec_release(&pvec
);
148 struct page
*page
= find_lock_page(mapping
, start
- 1);
150 wait_on_page_writeback(page
);
151 truncate_partial_page(page
, partial
);
153 page_cache_release(page
);
160 if (!pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
)) {
166 if (pvec
.pages
[0]->index
> end
) {
167 pagevec_release(&pvec
);
170 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
171 struct page
*page
= pvec
.pages
[i
];
173 if (page
->index
> end
)
176 wait_on_page_writeback(page
);
177 if (page
->index
> next
)
180 truncate_complete_page(mapping
, page
);
183 pagevec_release(&pvec
);
186 EXPORT_SYMBOL(truncate_inode_pages_range
);
189 * truncate_inode_pages - truncate *all* the pages from an offset
190 * @mapping: mapping to truncate
191 * @lstart: offset from which to truncate
193 * Called under (and serialised by) inode->i_mutex.
195 void truncate_inode_pages(struct address_space
*mapping
, loff_t lstart
)
197 truncate_inode_pages_range(mapping
, lstart
, (loff_t
)-1);
199 EXPORT_SYMBOL(truncate_inode_pages
);
202 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
203 * @mapping: the address_space which holds the pages to invalidate
204 * @start: the offset 'from' which to invalidate
205 * @end: the offset 'to' which to invalidate (inclusive)
207 * This function only removes the unlocked pages, if you want to
208 * remove all the pages of one inode, you must call truncate_inode_pages.
210 * invalidate_mapping_pages() will not block on IO activity. It will not
211 * invalidate pages which are dirty, locked, under writeback or mapped into
214 unsigned long invalidate_mapping_pages(struct address_space
*mapping
,
215 pgoff_t start
, pgoff_t end
)
218 pgoff_t next
= start
;
219 unsigned long ret
= 0;
222 pagevec_init(&pvec
, 0);
223 while (next
<= end
&&
224 pagevec_lookup(&pvec
, mapping
, next
, PAGEVEC_SIZE
)) {
225 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
226 struct page
*page
= pvec
.pages
[i
];
230 lock_failed
= TestSetPageLocked(page
);
233 * We really shouldn't be looking at the ->index of an
234 * unlocked page. But we're not allowed to lock these
235 * pages. So we rely upon nobody altering the ->index
236 * of this (pinned-by-us) page.
245 if (PageDirty(page
) || PageWriteback(page
))
247 if (page_mapped(page
))
249 ret
+= invalidate_complete_page(mapping
, page
);
255 pagevec_release(&pvec
);
260 unsigned long invalidate_inode_pages(struct address_space
*mapping
)
262 return invalidate_mapping_pages(mapping
, 0, ~0UL);
265 EXPORT_SYMBOL(invalidate_inode_pages
);
268 * invalidate_inode_pages2_range - remove range of pages from an address_space
269 * @mapping: the address_space
270 * @start: the page offset 'from' which to invalidate
271 * @end: the page offset 'to' which to invalidate (inclusive)
273 * Any pages which are found to be mapped into pagetables are unmapped prior to
276 * Returns -EIO if any pages could not be invalidated.
278 int invalidate_inode_pages2_range(struct address_space
*mapping
,
279 pgoff_t start
, pgoff_t end
)
285 int did_range_unmap
= 0;
288 pagevec_init(&pvec
, 0);
290 while (next
<= end
&& !ret
&& !wrapped
&&
291 pagevec_lookup(&pvec
, mapping
, next
,
292 min(end
- next
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1)) {
293 for (i
= 0; !ret
&& i
< pagevec_count(&pvec
); i
++) {
294 struct page
*page
= pvec
.pages
[i
];
299 if (page
->mapping
!= mapping
) {
303 page_index
= page
->index
;
304 next
= page_index
+ 1;
307 if (page_index
> end
) {
311 wait_on_page_writeback(page
);
312 while (page_mapped(page
)) {
313 if (!did_range_unmap
) {
315 * Zap the rest of the file in one hit.
317 unmap_mapping_range(mapping
,
318 (loff_t
)page_index
<<PAGE_CACHE_SHIFT
,
319 (loff_t
)(end
- page_index
+ 1)
327 unmap_mapping_range(mapping
,
328 (loff_t
)page_index
<<PAGE_CACHE_SHIFT
,
332 was_dirty
= test_clear_page_dirty(page
);
333 if (!invalidate_complete_page(mapping
, page
)) {
335 set_page_dirty(page
);
340 pagevec_release(&pvec
);
345 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range
);
348 * invalidate_inode_pages2 - remove all pages from an address_space
349 * @mapping: the address_space
351 * Any pages which are found to be mapped into pagetables are unmapped prior to
354 * Returns -EIO if any pages could not be invalidated.
356 int invalidate_inode_pages2(struct address_space
*mapping
)
358 return invalidate_inode_pages2_range(mapping
, 0, -1);
360 EXPORT_SYMBOL_GPL(invalidate_inode_pages2
);