2 * Copyright (C) International Business Machines Corp., 2000-2005
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/module.h>
23 #include <linux/bio.h>
24 #include <linux/init.h>
25 #include <linux/buffer_head.h>
26 #include <linux/mempool.h>
27 #include <linux/seq_file.h>
28 #include "jfs_incore.h"
29 #include "jfs_superblock.h"
30 #include "jfs_filsys.h"
31 #include "jfs_metapage.h"
32 #include "jfs_txnmgr.h"
33 #include "jfs_debug.h"
35 #ifdef CONFIG_JFS_STATISTICS
37 uint pagealloc
; /* # of page allocations */
38 uint pagefree
; /* # of page frees */
39 uint lockwait
; /* # of sleeping lock_metapage() calls */
43 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
44 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
46 static inline void unlock_metapage(struct metapage
*mp
)
48 clear_bit_unlock(META_locked
, &mp
->flag
);
52 static inline void __lock_metapage(struct metapage
*mp
)
54 DECLARE_WAITQUEUE(wait
, current
);
55 INCREMENT(mpStat
.lockwait
);
56 add_wait_queue_exclusive(&mp
->wait
, &wait
);
58 set_current_state(TASK_UNINTERRUPTIBLE
);
59 if (metapage_locked(mp
)) {
60 unlock_page(mp
->page
);
64 } while (trylock_metapage(mp
));
65 __set_current_state(TASK_RUNNING
);
66 remove_wait_queue(&mp
->wait
, &wait
);
70 * Must have mp->page locked
72 static inline void lock_metapage(struct metapage
*mp
)
74 if (trylock_metapage(mp
))
78 #define METAPOOL_MIN_PAGES 32
79 static struct kmem_cache
*metapage_cache
;
80 static mempool_t
*metapage_mempool
;
82 #define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
89 struct metapage
*mp
[MPS_PER_PAGE
];
91 #define mp_anchor(page) ((struct meta_anchor *)page_private(page))
93 static inline struct metapage
*page_to_mp(struct page
*page
, int offset
)
95 if (!PagePrivate(page
))
97 return mp_anchor(page
)->mp
[offset
>> L2PSIZE
];
100 static inline int insert_metapage(struct page
*page
, struct metapage
*mp
)
102 struct meta_anchor
*a
;
104 int l2mp_blocks
; /* log2 blocks per metapage */
106 if (PagePrivate(page
))
109 a
= kzalloc(sizeof(struct meta_anchor
), GFP_NOFS
);
112 set_page_private(page
, (unsigned long)a
);
113 SetPagePrivate(page
);
118 l2mp_blocks
= L2PSIZE
- page
->mapping
->host
->i_blkbits
;
119 index
= (mp
->index
>> l2mp_blocks
) & (MPS_PER_PAGE
- 1);
127 static inline void remove_metapage(struct page
*page
, struct metapage
*mp
)
129 struct meta_anchor
*a
= mp_anchor(page
);
130 int l2mp_blocks
= L2PSIZE
- page
->mapping
->host
->i_blkbits
;
133 index
= (mp
->index
>> l2mp_blocks
) & (MPS_PER_PAGE
- 1);
135 BUG_ON(a
->mp
[index
] != mp
);
138 if (--a
->mp_count
== 0) {
140 set_page_private(page
, 0);
141 ClearPagePrivate(page
);
146 static inline void inc_io(struct page
*page
)
148 atomic_inc(&mp_anchor(page
)->io_count
);
151 static inline void dec_io(struct page
*page
, void (*handler
) (struct page
*))
153 if (atomic_dec_and_test(&mp_anchor(page
)->io_count
))
158 static inline struct metapage
*page_to_mp(struct page
*page
, int offset
)
160 return PagePrivate(page
) ? (struct metapage
*)page_private(page
) : NULL
;
163 static inline int insert_metapage(struct page
*page
, struct metapage
*mp
)
166 set_page_private(page
, (unsigned long)mp
);
167 SetPagePrivate(page
);
173 static inline void remove_metapage(struct page
*page
, struct metapage
*mp
)
175 set_page_private(page
, 0);
176 ClearPagePrivate(page
);
180 #define inc_io(page) do {} while(0)
181 #define dec_io(page, handler) handler(page)
185 static void init_once(void *foo
)
187 struct metapage
*mp
= (struct metapage
*)foo
;
195 set_bit(META_free
, &mp
->flag
);
196 init_waitqueue_head(&mp
->wait
);
199 static inline struct metapage
*alloc_metapage(gfp_t gfp_mask
)
201 return mempool_alloc(metapage_mempool
, gfp_mask
);
204 static inline void free_metapage(struct metapage
*mp
)
207 set_bit(META_free
, &mp
->flag
);
209 mempool_free(mp
, metapage_mempool
);
212 int __init
metapage_init(void)
215 * Allocate the metapage structures
217 metapage_cache
= kmem_cache_create("jfs_mp", sizeof(struct metapage
),
219 if (metapage_cache
== NULL
)
222 metapage_mempool
= mempool_create_slab_pool(METAPOOL_MIN_PAGES
,
225 if (metapage_mempool
== NULL
) {
226 kmem_cache_destroy(metapage_cache
);
233 void metapage_exit(void)
235 mempool_destroy(metapage_mempool
);
236 kmem_cache_destroy(metapage_cache
);
239 static inline void drop_metapage(struct page
*page
, struct metapage
*mp
)
241 if (mp
->count
|| mp
->nohomeok
|| test_bit(META_dirty
, &mp
->flag
) ||
242 test_bit(META_io
, &mp
->flag
))
244 remove_metapage(page
, mp
);
245 INCREMENT(mpStat
.pagefree
);
250 * Metapage address space operations
253 static sector_t
metapage_get_blocks(struct inode
*inode
, sector_t lblock
,
259 sector_t file_blocks
= (inode
->i_size
+ inode
->i_sb
->s_blocksize
- 1) >>
262 if (lblock
>= file_blocks
)
264 if (lblock
+ *len
> file_blocks
)
265 *len
= file_blocks
- lblock
;
268 rc
= xtLookup(inode
, (s64
)lblock
, *len
, &xflag
, &xaddr
, len
, 0);
269 if ((rc
== 0) && *len
)
270 lblock
= (sector_t
)xaddr
;
273 } /* else no mapping */
278 static void last_read_complete(struct page
*page
)
280 if (!PageError(page
))
281 SetPageUptodate(page
);
285 static void metapage_read_end_io(struct bio
*bio
, int err
)
287 struct page
*page
= bio
->bi_private
;
289 if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
290 printk(KERN_ERR
"metapage_read_end_io: I/O error\n");
294 dec_io(page
, last_read_complete
);
298 static void remove_from_logsync(struct metapage
*mp
)
300 struct jfs_log
*log
= mp
->log
;
303 * This can race. Recheck that log hasn't been set to null, and after
304 * acquiring logsync lock, recheck lsn
309 LOGSYNC_LOCK(log
, flags
);
315 list_del(&mp
->synclist
);
317 LOGSYNC_UNLOCK(log
, flags
);
320 static void last_write_complete(struct page
*page
)
325 for (offset
= 0; offset
< PAGE_CACHE_SIZE
; offset
+= PSIZE
) {
326 mp
= page_to_mp(page
, offset
);
327 if (mp
&& test_bit(META_io
, &mp
->flag
)) {
329 remove_from_logsync(mp
);
330 clear_bit(META_io
, &mp
->flag
);
333 * I'd like to call drop_metapage here, but I don't think it's
334 * safe unless I have the page locked
337 end_page_writeback(page
);
340 static void metapage_write_end_io(struct bio
*bio
, int err
)
342 struct page
*page
= bio
->bi_private
;
344 BUG_ON(!PagePrivate(page
));
346 if (! test_bit(BIO_UPTODATE
, &bio
->bi_flags
)) {
347 printk(KERN_ERR
"metapage_write_end_io: I/O error\n");
350 dec_io(page
, last_write_complete
);
354 static int metapage_writepage(struct page
*page
, struct writeback_control
*wbc
)
356 struct bio
*bio
= NULL
;
357 int block_offset
; /* block offset of mp within page */
358 struct inode
*inode
= page
->mapping
->host
;
359 int blocks_per_mp
= JFS_SBI(inode
->i_sb
)->nbperpage
;
367 sector_t next_block
= 0;
369 unsigned long bio_bytes
= 0;
370 unsigned long bio_offset
= 0;
373 page_start
= (sector_t
)page
->index
<<
374 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
375 BUG_ON(!PageLocked(page
));
376 BUG_ON(PageWriteback(page
));
377 set_page_writeback(page
);
379 for (offset
= 0; offset
< PAGE_CACHE_SIZE
; offset
+= PSIZE
) {
380 mp
= page_to_mp(page
, offset
);
382 if (!mp
|| !test_bit(META_dirty
, &mp
->flag
))
385 if (mp
->nohomeok
&& !test_bit(META_forcewrite
, &mp
->flag
)) {
388 * Make sure this page isn't blocked indefinitely.
389 * If the journal isn't undergoing I/O, push it
391 if (mp
->log
&& !(mp
->log
->cflag
& logGC_PAGEOUT
))
392 jfs_flush_journal(mp
->log
, 0);
396 clear_bit(META_dirty
, &mp
->flag
);
397 block_offset
= offset
>> inode
->i_blkbits
;
398 lblock
= page_start
+ block_offset
;
400 if (xlen
&& lblock
== next_block
) {
401 /* Contiguous, in memory & on disk */
402 len
= min(xlen
, blocks_per_mp
);
404 bio_bytes
+= len
<< inode
->i_blkbits
;
405 set_bit(META_io
, &mp
->flag
);
409 if (bio_add_page(bio
, page
, bio_bytes
, bio_offset
) <
413 * Increment counter before submitting i/o to keep
414 * count from hitting zero before we're through
419 submit_bio(WRITE
, bio
);
424 xlen
= (PAGE_CACHE_SIZE
- offset
) >> inode
->i_blkbits
;
425 pblock
= metapage_get_blocks(inode
, lblock
, &xlen
);
427 /* Need better error handling */
428 printk(KERN_ERR
"JFS: metapage_get_blocks failed\n");
429 dec_io(page
, last_write_complete
);
432 set_bit(META_io
, &mp
->flag
);
433 len
= min(xlen
, (int)JFS_SBI(inode
->i_sb
)->nbperpage
);
435 bio
= bio_alloc(GFP_NOFS
, 1);
436 bio
->bi_bdev
= inode
->i_sb
->s_bdev
;
437 bio
->bi_sector
= pblock
<< (inode
->i_blkbits
- 9);
438 bio
->bi_end_io
= metapage_write_end_io
;
439 bio
->bi_private
= page
;
441 /* Don't call bio_add_page yet, we may add to this vec */
443 bio_bytes
= len
<< inode
->i_blkbits
;
446 next_block
= lblock
+ len
;
449 if (bio_add_page(bio
, page
, bio_bytes
, bio_offset
) < bio_bytes
)
454 submit_bio(WRITE
, bio
);
458 redirty_page_for_writepage(wbc
, page
);
462 if (nr_underway
== 0)
463 end_page_writeback(page
);
467 /* We should never reach here, since we're only adding one vec */
468 printk(KERN_ERR
"JFS: bio_add_page failed unexpectedly\n");
471 print_hex_dump(KERN_ERR
, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS
, 16,
472 4, bio
, sizeof(*bio
), 0);
476 dec_io(page
, last_write_complete
);
481 static int metapage_readpage(struct file
*fp
, struct page
*page
)
483 struct inode
*inode
= page
->mapping
->host
;
484 struct bio
*bio
= NULL
;
486 int blocks_per_page
= PAGE_CACHE_SIZE
>> inode
->i_blkbits
;
487 sector_t page_start
; /* address of page in fs blocks */
493 BUG_ON(!PageLocked(page
));
494 page_start
= (sector_t
)page
->index
<<
495 (PAGE_CACHE_SHIFT
- inode
->i_blkbits
);
498 while (block_offset
< blocks_per_page
) {
499 xlen
= blocks_per_page
- block_offset
;
500 pblock
= metapage_get_blocks(inode
, page_start
+ block_offset
,
503 if (!PagePrivate(page
))
504 insert_metapage(page
, NULL
);
507 submit_bio(READ
, bio
);
509 bio
= bio_alloc(GFP_NOFS
, 1);
510 bio
->bi_bdev
= inode
->i_sb
->s_bdev
;
511 bio
->bi_sector
= pblock
<< (inode
->i_blkbits
- 9);
512 bio
->bi_end_io
= metapage_read_end_io
;
513 bio
->bi_private
= page
;
514 len
= xlen
<< inode
->i_blkbits
;
515 offset
= block_offset
<< inode
->i_blkbits
;
516 if (bio_add_page(bio
, page
, len
, offset
) < len
)
518 block_offset
+= xlen
;
523 submit_bio(READ
, bio
);
530 printk(KERN_ERR
"JFS: bio_add_page failed unexpectedly\n");
532 dec_io(page
, last_read_complete
);
536 static int metapage_releasepage(struct page
*page
, gfp_t gfp_mask
)
542 for (offset
= 0; offset
< PAGE_CACHE_SIZE
; offset
+= PSIZE
) {
543 mp
= page_to_mp(page
, offset
);
548 jfs_info("metapage_releasepage: mp = 0x%p", mp
);
549 if (mp
->count
|| mp
->nohomeok
||
550 test_bit(META_dirty
, &mp
->flag
)) {
551 jfs_info("count = %ld, nohomeok = %d", mp
->count
,
557 remove_from_logsync(mp
);
558 remove_metapage(page
, mp
);
559 INCREMENT(mpStat
.pagefree
);
565 static void metapage_invalidatepage(struct page
*page
, unsigned long offset
)
569 BUG_ON(PageWriteback(page
));
571 metapage_releasepage(page
, 0);
574 const struct address_space_operations jfs_metapage_aops
= {
575 .readpage
= metapage_readpage
,
576 .writepage
= metapage_writepage
,
577 .sync_page
= block_sync_page
,
578 .releasepage
= metapage_releasepage
,
579 .invalidatepage
= metapage_invalidatepage
,
580 .set_page_dirty
= __set_page_dirty_nobuffers
,
583 struct metapage
*__get_metapage(struct inode
*inode
, unsigned long lblock
,
584 unsigned int size
, int absolute
,
589 struct address_space
*mapping
;
590 struct metapage
*mp
= NULL
;
592 unsigned long page_index
;
593 unsigned long page_offset
;
595 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
596 inode
->i_ino
, lblock
, absolute
);
598 l2bsize
= inode
->i_blkbits
;
599 l2BlocksPerPage
= PAGE_CACHE_SHIFT
- l2bsize
;
600 page_index
= lblock
>> l2BlocksPerPage
;
601 page_offset
= (lblock
- (page_index
<< l2BlocksPerPage
)) << l2bsize
;
602 if ((page_offset
+ size
) > PAGE_CACHE_SIZE
) {
603 jfs_err("MetaData crosses page boundary!!");
604 jfs_err("lblock = %lx, size = %d", lblock
, size
);
609 mapping
= JFS_SBI(inode
->i_sb
)->direct_inode
->i_mapping
;
612 * If an nfs client tries to read an inode that is larger
613 * than any existing inodes, we may try to read past the
614 * end of the inode map
616 if ((lblock
<< inode
->i_blkbits
) >= inode
->i_size
)
618 mapping
= inode
->i_mapping
;
621 if (new && (PSIZE
== PAGE_CACHE_SIZE
)) {
622 page
= grab_cache_page(mapping
, page_index
);
624 jfs_err("grab_cache_page failed!");
627 SetPageUptodate(page
);
629 page
= read_mapping_page(mapping
, page_index
, NULL
);
630 if (IS_ERR(page
) || !PageUptodate(page
)) {
631 jfs_err("read_mapping_page failed!");
637 mp
= page_to_mp(page
, page_offset
);
639 if (mp
->logical_size
!= size
) {
640 jfs_error(inode
->i_sb
,
641 "__get_metapage: mp->logical_size != size");
642 jfs_err("logical_size = %d, size = %d",
643 mp
->logical_size
, size
);
649 if (test_bit(META_discard
, &mp
->flag
)) {
651 jfs_error(inode
->i_sb
,
652 "__get_metapage: using a "
653 "discarded metapage");
654 discard_metapage(mp
);
657 clear_bit(META_discard
, &mp
->flag
);
660 INCREMENT(mpStat
.pagealloc
);
661 mp
= alloc_metapage(GFP_NOFS
);
664 mp
->xflag
= COMMIT_PAGE
;
667 mp
->logical_size
= size
;
668 mp
->data
= page_address(page
) + page_offset
;
670 if (unlikely(insert_metapage(page
, mp
))) {
678 jfs_info("zeroing mp = 0x%p", mp
);
679 memset(mp
->data
, 0, PSIZE
);
683 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp
, mp
->data
);
691 void grab_metapage(struct metapage
* mp
)
693 jfs_info("grab_metapage: mp = 0x%p", mp
);
694 page_cache_get(mp
->page
);
698 unlock_page(mp
->page
);
701 void force_metapage(struct metapage
*mp
)
703 struct page
*page
= mp
->page
;
704 jfs_info("force_metapage: mp = 0x%p", mp
);
705 set_bit(META_forcewrite
, &mp
->flag
);
706 clear_bit(META_sync
, &mp
->flag
);
707 page_cache_get(page
);
709 set_page_dirty(page
);
710 write_one_page(page
, 1);
711 clear_bit(META_forcewrite
, &mp
->flag
);
712 page_cache_release(page
);
715 void hold_metapage(struct metapage
*mp
)
720 void put_metapage(struct metapage
*mp
)
722 if (mp
->count
|| mp
->nohomeok
) {
723 /* Someone else will release this */
724 unlock_page(mp
->page
);
727 page_cache_get(mp
->page
);
730 unlock_page(mp
->page
);
731 release_metapage(mp
);
734 void release_metapage(struct metapage
* mp
)
736 struct page
*page
= mp
->page
;
737 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp
, mp
->flag
);
745 if (--mp
->count
|| mp
->nohomeok
) {
747 page_cache_release(page
);
751 if (test_bit(META_dirty
, &mp
->flag
)) {
752 set_page_dirty(page
);
753 if (test_bit(META_sync
, &mp
->flag
)) {
754 clear_bit(META_sync
, &mp
->flag
);
755 write_one_page(page
, 1);
756 lock_page(page
); /* write_one_page unlocks the page */
758 } else if (mp
->lsn
) /* discard_metapage doesn't remove it */
759 remove_from_logsync(mp
);
761 /* Try to keep metapages from using up too much memory */
762 drop_metapage(page
, mp
);
765 page_cache_release(page
);
768 void __invalidate_metapages(struct inode
*ip
, s64 addr
, int len
)
771 int l2BlocksPerPage
= PAGE_CACHE_SHIFT
- ip
->i_blkbits
;
772 int BlocksPerPage
= 1 << l2BlocksPerPage
;
773 /* All callers are interested in block device's mapping */
774 struct address_space
*mapping
=
775 JFS_SBI(ip
->i_sb
)->direct_inode
->i_mapping
;
781 * Mark metapages to discard. They will eventually be
782 * released, but should not be written.
784 for (lblock
= addr
& ~(BlocksPerPage
- 1); lblock
< addr
+ len
;
785 lblock
+= BlocksPerPage
) {
786 page
= find_lock_page(mapping
, lblock
>> l2BlocksPerPage
);
789 for (offset
= 0; offset
< PAGE_CACHE_SIZE
; offset
+= PSIZE
) {
790 mp
= page_to_mp(page
, offset
);
793 if (mp
->index
< addr
)
795 if (mp
->index
>= addr
+ len
)
798 clear_bit(META_dirty
, &mp
->flag
);
799 set_bit(META_discard
, &mp
->flag
);
801 remove_from_logsync(mp
);
804 page_cache_release(page
);
808 #ifdef CONFIG_JFS_STATISTICS
809 static int jfs_mpstat_proc_show(struct seq_file
*m
, void *v
)
812 "JFS Metapage statistics\n"
813 "=======================\n"
814 "page allocations = %d\n"
823 static int jfs_mpstat_proc_open(struct inode
*inode
, struct file
*file
)
825 return single_open(file
, jfs_mpstat_proc_show
, NULL
);
828 const struct file_operations jfs_mpstat_proc_fops
= {
829 .owner
= THIS_MODULE
,
830 .open
= jfs_mpstat_proc_open
,
833 .release
= single_release
,