2 * page.c - buffer/page management specific to NILFS
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Ryusuke Konishi <ryusuke@osrg.net>,
21 * Seiji Kihara <kihara@osrg.net>.
24 #include <linux/pagemap.h>
25 #include <linux/writeback.h>
26 #include <linux/swap.h>
27 #include <linux/bitops.h>
28 #include <linux/page-flags.h>
29 #include <linux/list.h>
30 #include <linux/highmem.h>
31 #include <linux/pagevec.h>
37 #define NILFS_BUFFER_INHERENT_BITS \
38 ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
39 (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Allocated))
41 static struct buffer_head
*
42 __nilfs_get_page_block(struct page
*page
, unsigned long block
, pgoff_t index
,
43 int blkbits
, unsigned long b_state
)
46 unsigned long first_block
;
47 struct buffer_head
*bh
;
49 if (!page_has_buffers(page
))
50 create_empty_buffers(page
, 1 << blkbits
, b_state
);
52 first_block
= (unsigned long)index
<< (PAGE_CACHE_SHIFT
- blkbits
);
53 bh
= nilfs_page_get_nth_block(page
, block
- first_block
);
61 * Since the page cache of B-tree node pages or data page cache of pseudo
62 * inodes does not have a valid mapping->host pointer, calling
63 * mark_buffer_dirty() for their buffers causes a NULL pointer dereference;
64 * it calls __mark_inode_dirty(NULL) through __set_page_dirty().
65 * To avoid this problem, the old style mark_buffer_dirty() is used instead.
67 void nilfs_mark_buffer_dirty(struct buffer_head
*bh
)
69 if (!buffer_dirty(bh
) && !test_set_buffer_dirty(bh
))
70 __set_page_dirty_nobuffers(bh
->b_page
);
73 struct buffer_head
*nilfs_grab_buffer(struct inode
*inode
,
74 struct address_space
*mapping
,
76 unsigned long b_state
)
78 int blkbits
= inode
->i_blkbits
;
79 pgoff_t index
= blkoff
>> (PAGE_CACHE_SHIFT
- blkbits
);
80 struct page
*page
, *opage
;
81 struct buffer_head
*bh
, *obh
;
83 page
= grab_cache_page(mapping
, index
);
87 bh
= __nilfs_get_page_block(page
, blkoff
, index
, blkbits
, b_state
);
90 page_cache_release(page
);
93 if (!buffer_uptodate(bh
) && mapping
->assoc_mapping
!= NULL
) {
95 * Shadow page cache uses assoc_mapping to point its original
96 * page cache. The following code tries the original cache
97 * if the given cache is a shadow and it didn't hit.
99 opage
= find_lock_page(mapping
->assoc_mapping
, index
);
103 obh
= __nilfs_get_page_block(opage
, blkoff
, index
, blkbits
,
105 if (buffer_uptodate(obh
)) {
106 nilfs_copy_buffer(bh
, obh
);
107 if (buffer_dirty(obh
)) {
108 nilfs_mark_buffer_dirty(bh
);
109 if (!buffer_nilfs_node(bh
) && NILFS_MDT(inode
))
110 nilfs_mdt_mark_dirty(inode
);
115 page_cache_release(opage
);
121 * nilfs_forget_buffer - discard dirty state
122 * @inode: owner inode of the buffer
123 * @bh: buffer head of the buffer to be discarded
125 void nilfs_forget_buffer(struct buffer_head
*bh
)
127 struct page
*page
= bh
->b_page
;
130 clear_buffer_nilfs_volatile(bh
);
131 if (test_clear_buffer_dirty(bh
) && nilfs_page_buffers_clean(page
))
132 __nilfs_clear_page_dirty(page
);
134 clear_buffer_uptodate(bh
);
135 clear_buffer_mapped(bh
);
137 ClearPageUptodate(page
);
138 ClearPageMappedToDisk(page
);
144 * nilfs_copy_buffer -- copy buffer data and flags
145 * @dbh: destination buffer
146 * @sbh: source buffer
148 void nilfs_copy_buffer(struct buffer_head
*dbh
, struct buffer_head
*sbh
)
150 void *kaddr0
, *kaddr1
;
152 struct page
*spage
= sbh
->b_page
, *dpage
= dbh
->b_page
;
153 struct buffer_head
*bh
;
155 kaddr0
= kmap_atomic(spage
, KM_USER0
);
156 kaddr1
= kmap_atomic(dpage
, KM_USER1
);
157 memcpy(kaddr1
+ bh_offset(dbh
), kaddr0
+ bh_offset(sbh
), sbh
->b_size
);
158 kunmap_atomic(kaddr1
, KM_USER1
);
159 kunmap_atomic(kaddr0
, KM_USER0
);
161 dbh
->b_state
= sbh
->b_state
& NILFS_BUFFER_INHERENT_BITS
;
162 dbh
->b_blocknr
= sbh
->b_blocknr
;
163 dbh
->b_bdev
= sbh
->b_bdev
;
166 bits
= sbh
->b_state
& ((1UL << BH_Uptodate
) | (1UL << BH_Mapped
));
167 while ((bh
= bh
->b_this_page
) != dbh
) {
172 if (bits
& (1UL << BH_Uptodate
))
173 SetPageUptodate(dpage
);
175 ClearPageUptodate(dpage
);
176 if (bits
& (1UL << BH_Mapped
))
177 SetPageMappedToDisk(dpage
);
179 ClearPageMappedToDisk(dpage
);
183 * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
184 * @page: page to be checked
186 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
187 * Otherwise, it returns non-zero value.
189 int nilfs_page_buffers_clean(struct page
*page
)
191 struct buffer_head
*bh
, *head
;
193 bh
= head
= page_buffers(page
);
195 if (buffer_dirty(bh
))
197 bh
= bh
->b_this_page
;
198 } while (bh
!= head
);
202 void nilfs_page_bug(struct page
*page
)
204 struct address_space
*m
;
205 unsigned long ino
= 0;
207 if (unlikely(!page
)) {
208 printk(KERN_CRIT
"NILFS_PAGE_BUG(NULL)\n");
214 struct inode
*inode
= NILFS_AS_I(m
);
218 printk(KERN_CRIT
"NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
219 "mapping=%p ino=%lu\n",
220 page
, atomic_read(&page
->_count
),
221 (unsigned long long)page
->index
, page
->flags
, m
, ino
);
223 if (page_has_buffers(page
)) {
224 struct buffer_head
*bh
, *head
;
227 bh
= head
= page_buffers(page
);
230 " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
231 i
++, bh
, atomic_read(&bh
->b_count
),
232 (unsigned long long)bh
->b_blocknr
, bh
->b_state
);
233 bh
= bh
->b_this_page
;
234 } while (bh
!= head
);
239 * nilfs_alloc_private_page - allocate a private page with buffer heads
241 * Return Value: On success, a pointer to the allocated page is returned.
242 * On error, NULL is returned.
244 struct page
*nilfs_alloc_private_page(struct block_device
*bdev
, int size
,
247 struct buffer_head
*bh
, *head
, *tail
;
250 page
= alloc_page(GFP_NOFS
); /* page_count of the returned page is 1 */
255 head
= alloc_page_buffers(page
, size
, 0);
256 if (unlikely(!head
)) {
264 bh
->b_state
= (1UL << BH_NILFS_Allocated
) | state
;
267 bh
= bh
->b_this_page
;
270 tail
->b_this_page
= head
;
271 attach_page_buffers(page
, head
);
276 void nilfs_free_private_page(struct page
*page
)
278 BUG_ON(!PageLocked(page
));
279 BUG_ON(page
->mapping
);
281 if (page_has_buffers(page
) && !try_to_free_buffers(page
))
282 NILFS_PAGE_BUG(page
, "failed to free page");
289 * nilfs_copy_page -- copy the page with buffers
290 * @dst: destination page
292 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
294 * This fuction is for both data pages and btnode pages. The dirty flag
295 * should be treated by caller. The page must not be under i/o.
296 * Both src and dst page must be locked
298 static void nilfs_copy_page(struct page
*dst
, struct page
*src
, int copy_dirty
)
300 struct buffer_head
*dbh
, *dbufs
, *sbh
, *sbufs
;
301 unsigned long mask
= NILFS_BUFFER_INHERENT_BITS
;
303 BUG_ON(PageWriteback(dst
));
305 sbh
= sbufs
= page_buffers(src
);
306 if (!page_has_buffers(dst
))
307 create_empty_buffers(dst
, sbh
->b_size
, 0);
310 mask
|= (1UL << BH_Dirty
);
312 dbh
= dbufs
= page_buffers(dst
);
316 dbh
->b_state
= sbh
->b_state
& mask
;
317 dbh
->b_blocknr
= sbh
->b_blocknr
;
318 dbh
->b_bdev
= sbh
->b_bdev
;
319 sbh
= sbh
->b_this_page
;
320 dbh
= dbh
->b_this_page
;
321 } while (dbh
!= dbufs
);
323 copy_highpage(dst
, src
);
325 if (PageUptodate(src
) && !PageUptodate(dst
))
326 SetPageUptodate(dst
);
327 else if (!PageUptodate(src
) && PageUptodate(dst
))
328 ClearPageUptodate(dst
);
329 if (PageMappedToDisk(src
) && !PageMappedToDisk(dst
))
330 SetPageMappedToDisk(dst
);
331 else if (!PageMappedToDisk(src
) && PageMappedToDisk(dst
))
332 ClearPageMappedToDisk(dst
);
337 sbh
= sbh
->b_this_page
;
338 dbh
= dbh
->b_this_page
;
339 } while (dbh
!= dbufs
);
342 int nilfs_copy_dirty_pages(struct address_space
*dmap
,
343 struct address_space
*smap
)
350 pagevec_init(&pvec
, 0);
352 if (!pagevec_lookup_tag(&pvec
, smap
, &index
, PAGECACHE_TAG_DIRTY
,
356 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
357 struct page
*page
= pvec
.pages
[i
], *dpage
;
360 if (unlikely(!PageDirty(page
)))
361 NILFS_PAGE_BUG(page
, "inconsistent dirty state");
363 dpage
= grab_cache_page(dmap
, page
->index
);
364 if (unlikely(!dpage
)) {
365 /* No empty page is added to the page cache */
370 if (unlikely(!page_has_buffers(page
)))
372 "found empty page in dat page cache");
374 nilfs_copy_page(dpage
, page
, 1);
375 __set_page_dirty_nobuffers(dpage
);
378 page_cache_release(dpage
);
381 pagevec_release(&pvec
);
390 * nilfs_copy_back_pages -- copy back pages to orignal cache from shadow cache
391 * @dmap: destination page cache
392 * @smap: source page cache
394 * No pages must no be added to the cache during this process.
395 * This must be ensured by the caller.
397 void nilfs_copy_back_pages(struct address_space
*dmap
,
398 struct address_space
*smap
)
405 pagevec_init(&pvec
, 0);
407 n
= pagevec_lookup(&pvec
, smap
, index
, PAGEVEC_SIZE
);
410 index
= pvec
.pages
[n
- 1]->index
+ 1;
412 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
413 struct page
*page
= pvec
.pages
[i
], *dpage
;
414 pgoff_t offset
= page
->index
;
417 dpage
= find_lock_page(dmap
, offset
);
419 /* override existing page on the destination cache */
420 WARN_ON(PageDirty(dpage
));
421 nilfs_copy_page(dpage
, page
, 0);
423 page_cache_release(dpage
);
427 /* move the page to the destination cache */
428 spin_lock_irq(&smap
->tree_lock
);
429 page2
= radix_tree_delete(&smap
->page_tree
, offset
);
430 WARN_ON(page2
!= page
);
433 spin_unlock_irq(&smap
->tree_lock
);
435 spin_lock_irq(&dmap
->tree_lock
);
436 err
= radix_tree_insert(&dmap
->page_tree
, offset
, page
);
437 if (unlikely(err
< 0)) {
438 WARN_ON(err
== -EEXIST
);
439 page
->mapping
= NULL
;
440 page_cache_release(page
); /* for cache */
442 page
->mapping
= dmap
;
445 radix_tree_tag_set(&dmap
->page_tree
,
447 PAGECACHE_TAG_DIRTY
);
449 spin_unlock_irq(&dmap
->tree_lock
);
453 pagevec_release(&pvec
);
459 void nilfs_clear_dirty_pages(struct address_space
*mapping
)
465 pagevec_init(&pvec
, 0);
467 while (pagevec_lookup_tag(&pvec
, mapping
, &index
, PAGECACHE_TAG_DIRTY
,
469 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
470 struct page
*page
= pvec
.pages
[i
];
471 struct buffer_head
*bh
, *head
;
474 ClearPageUptodate(page
);
475 ClearPageMappedToDisk(page
);
476 bh
= head
= page_buffers(page
);
479 clear_buffer_dirty(bh
);
480 clear_buffer_nilfs_volatile(bh
);
481 clear_buffer_uptodate(bh
);
482 clear_buffer_mapped(bh
);
484 bh
= bh
->b_this_page
;
485 } while (bh
!= head
);
487 __nilfs_clear_page_dirty(page
);
490 pagevec_release(&pvec
);
495 unsigned nilfs_page_count_clean_buffers(struct page
*page
,
496 unsigned from
, unsigned to
)
498 unsigned block_start
, block_end
;
499 struct buffer_head
*bh
, *head
;
502 for (bh
= head
= page_buffers(page
), block_start
= 0;
503 bh
!= head
|| !block_start
;
504 block_start
= block_end
, bh
= bh
->b_this_page
) {
505 block_end
= block_start
+ bh
->b_size
;
506 if (block_end
> from
&& block_start
< to
&& !buffer_dirty(bh
))
513 * NILFS2 needs clear_page_dirty() in the following two cases:
515 * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
516 * page dirty flags when it copies back pages from the shadow cache
517 * (gcdat->{i_mapping,i_btnode_cache}) to its original cache
518 * (dat->{i_mapping,i_btnode_cache}).
520 * 2) Some B-tree operations like insertion or deletion may dispose buffers
521 * in dirty state, and this needs to cancel the dirty state of their pages.
523 int __nilfs_clear_page_dirty(struct page
*page
)
525 struct address_space
*mapping
= page
->mapping
;
528 spin_lock_irq(&mapping
->tree_lock
);
529 if (test_bit(PG_dirty
, &page
->flags
)) {
530 radix_tree_tag_clear(&mapping
->page_tree
,
532 PAGECACHE_TAG_DIRTY
);
533 spin_unlock_irq(&mapping
->tree_lock
);
534 return clear_page_dirty_for_io(page
);
536 spin_unlock_irq(&mapping
->tree_lock
);
539 return TestClearPageDirty(page
);