2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/swap.h>
19 #include <linux/delay.h>
20 #include <linux/bio.h>
21 #include <linux/gfs2_ondisk.h>
22 #include <linux/lm_interface.h>
35 #include "ops_address.h"
37 static int aspace_get_block(struct inode
*inode
, sector_t lblock
,
38 struct buffer_head
*bh_result
, int create
)
40 gfs2_assert_warn(inode
->i_sb
->s_fs_info
, 0);
44 static int gfs2_aspace_writepage(struct page
*page
,
45 struct writeback_control
*wbc
)
47 return block_write_full_page(page
, aspace_get_block
, wbc
);
50 static const struct address_space_operations aspace_aops
= {
51 .writepage
= gfs2_aspace_writepage
,
52 .releasepage
= gfs2_releasepage
,
56 * gfs2_aspace_get - Create and initialize a struct inode structure
57 * @sdp: the filesystem the aspace is in
59 * Right now a struct inode is just a struct inode. Maybe Linux
60 * will supply a more lightweight address space construct (that works)
63 * Make sure pages/buffers in this aspace aren't in high memory.
68 struct inode
*gfs2_aspace_get(struct gfs2_sbd
*sdp
)
72 aspace
= new_inode(sdp
->sd_vfs
);
74 mapping_set_gfp_mask(aspace
->i_mapping
, GFP_NOFS
);
75 aspace
->i_mapping
->a_ops
= &aspace_aops
;
76 aspace
->i_size
= ~0ULL;
77 aspace
->i_private
= NULL
;
78 insert_inode_hash(aspace
);
83 void gfs2_aspace_put(struct inode
*aspace
)
85 remove_inode_hash(aspace
);
90 * gfs2_meta_inval - Invalidate all buffers associated with a glock
95 void gfs2_meta_inval(struct gfs2_glock
*gl
)
97 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
98 struct inode
*aspace
= gl
->gl_aspace
;
99 struct address_space
*mapping
= gl
->gl_aspace
->i_mapping
;
101 gfs2_assert_withdraw(sdp
, !atomic_read(&gl
->gl_ail_count
));
103 atomic_inc(&aspace
->i_writecount
);
104 truncate_inode_pages(mapping
, 0);
105 atomic_dec(&aspace
->i_writecount
);
107 gfs2_assert_withdraw(sdp
, !mapping
->nrpages
);
111 * gfs2_meta_sync - Sync all buffers associated with a glock
116 void gfs2_meta_sync(struct gfs2_glock
*gl
)
118 struct address_space
*mapping
= gl
->gl_aspace
->i_mapping
;
121 filemap_fdatawrite(mapping
);
122 error
= filemap_fdatawait(mapping
);
125 gfs2_io_error(gl
->gl_sbd
);
129 * getbuf - Get a buffer with a given address space
131 * @blkno: the block number (filesystem scope)
132 * @create: 1 if the buffer should be created
134 * Returns: the buffer
137 static struct buffer_head
*getbuf(struct gfs2_glock
*gl
, u64 blkno
, int create
)
139 struct address_space
*mapping
= gl
->gl_aspace
->i_mapping
;
140 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
142 struct buffer_head
*bh
;
147 shift
= PAGE_CACHE_SHIFT
- sdp
->sd_sb
.sb_bsize_shift
;
148 index
= blkno
>> shift
; /* convert block to page */
149 bufnum
= blkno
- (index
<< shift
); /* block buf index within page */
153 page
= grab_cache_page(mapping
, index
);
159 page
= find_lock_page(mapping
, index
);
164 if (!page_has_buffers(page
))
165 create_empty_buffers(page
, sdp
->sd_sb
.sb_bsize
, 0);
167 /* Locate header for our buffer within our page */
168 for (bh
= page_buffers(page
); bufnum
--; bh
= bh
->b_this_page
)
172 if (!buffer_mapped(bh
))
173 map_bh(bh
, sdp
->sd_vfs
, blkno
);
176 mark_page_accessed(page
);
177 page_cache_release(page
);
182 static void meta_prep_new(struct buffer_head
*bh
)
184 struct gfs2_meta_header
*mh
= (struct gfs2_meta_header
*)bh
->b_data
;
187 clear_buffer_dirty(bh
);
188 set_buffer_uptodate(bh
);
191 mh
->mh_magic
= cpu_to_be32(GFS2_MAGIC
);
195 * gfs2_meta_new - Get a block
196 * @gl: The glock associated with this block
197 * @blkno: The block number
199 * Returns: The buffer
202 struct buffer_head
*gfs2_meta_new(struct gfs2_glock
*gl
, u64 blkno
)
204 struct buffer_head
*bh
;
205 bh
= getbuf(gl
, blkno
, CREATE
);
211 * gfs2_meta_read - Read a block from disk
212 * @gl: The glock covering the block
213 * @blkno: The block number
215 * @bhp: the place where the buffer is returned (NULL on failure)
220 int gfs2_meta_read(struct gfs2_glock
*gl
, u64 blkno
, int flags
,
221 struct buffer_head
**bhp
)
223 *bhp
= getbuf(gl
, blkno
, CREATE
);
224 if (!buffer_uptodate(*bhp
))
225 ll_rw_block(READ_META
, 1, bhp
);
226 if (flags
& DIO_WAIT
) {
227 int error
= gfs2_meta_wait(gl
->gl_sbd
, *bhp
);
238 * gfs2_meta_wait - Reread a block from disk
239 * @sdp: the filesystem
240 * @bh: The block to wait for
245 int gfs2_meta_wait(struct gfs2_sbd
*sdp
, struct buffer_head
*bh
)
247 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
252 if (!buffer_uptodate(bh
)) {
253 struct gfs2_trans
*tr
= current
->journal_info
;
254 if (tr
&& tr
->tr_touched
)
255 gfs2_io_error_bh(sdp
, bh
);
258 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)))
265 * gfs2_attach_bufdata - attach a struct gfs2_bufdata structure to a buffer
266 * @gl: the glock the buffer belongs to
267 * @bh: The buffer to be attached to
268 * @meta: Flag to indicate whether its metadata or not
271 void gfs2_attach_bufdata(struct gfs2_glock
*gl
, struct buffer_head
*bh
,
274 struct gfs2_bufdata
*bd
;
277 lock_page(bh
->b_page
);
281 unlock_page(bh
->b_page
);
285 bd
= kmem_cache_zalloc(gfs2_bufdata_cachep
, GFP_NOFS
| __GFP_NOFAIL
),
289 INIT_LIST_HEAD(&bd
->bd_list_tr
);
291 lops_init_le(&bd
->bd_le
, &gfs2_buf_lops
);
293 lops_init_le(&bd
->bd_le
, &gfs2_databuf_lops
);
297 unlock_page(bh
->b_page
);
301 * gfs2_meta_wipe - make inode's buffers so they aren't dirty/pinned anymore
302 * @ip: the inode who owns the buffers
303 * @bstart: the first buffer in the run
304 * @blen: the number of buffers in the run
308 void gfs2_meta_wipe(struct gfs2_inode
*ip
, u64 bstart
, u32 blen
)
310 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
311 struct buffer_head
*bh
;
314 bh
= getbuf(ip
->i_gl
, bstart
, NO_CREATE
);
316 struct gfs2_bufdata
*bd
= bh
->b_private
;
318 if (test_clear_buffer_pinned(bh
)) {
319 struct gfs2_trans
*tr
= current
->journal_info
;
320 struct gfs2_inode
*bh_ip
=
321 GFS2_I(bh
->b_page
->mapping
->host
);
324 list_del_init(&bd
->bd_le
.le_list
);
325 gfs2_assert_warn(sdp
, sdp
->sd_log_num_buf
);
326 sdp
->sd_log_num_buf
--;
327 gfs2_log_unlock(sdp
);
328 if (bh_ip
->i_inode
.i_private
!= NULL
)
329 tr
->tr_num_databuf_rm
++;
337 u64 blkno
= bh
->b_blocknr
;
339 list_del(&bd
->bd_ail_st_list
);
340 list_del(&bd
->bd_ail_gl_list
);
341 atomic_dec(&bd
->bd_gl
->gl_ail_count
);
343 gfs2_log_unlock(sdp
);
344 gfs2_trans_add_revoke(sdp
, blkno
);
346 gfs2_log_unlock(sdp
);
350 clear_buffer_dirty(bh
);
351 clear_buffer_uptodate(bh
);
363 * gfs2_meta_cache_flush - get rid of any references on buffers for this inode
364 * @ip: The GFS2 inode
366 * This releases buffers that are in the most-recently-used array of
367 * blocks used for indirect block addressing for this inode.
370 void gfs2_meta_cache_flush(struct gfs2_inode
*ip
)
372 struct buffer_head
**bh_slot
;
375 spin_lock(&ip
->i_spin
);
377 for (x
= 0; x
< GFS2_MAX_META_HEIGHT
; x
++) {
378 bh_slot
= &ip
->i_cache
[x
];
385 spin_unlock(&ip
->i_spin
);
389 * gfs2_meta_indirect_buffer - Get a metadata buffer
390 * @ip: The GFS2 inode
391 * @height: The level of this buf in the metadata (indir addr) tree (if any)
392 * @num: The block number (device relative) of the buffer
393 * @new: Non-zero if we may create a new buffer
394 * @bhp: the buffer is returned here
396 * Try to use the gfs2_inode's MRU metadata tree cache.
401 int gfs2_meta_indirect_buffer(struct gfs2_inode
*ip
, int height
, u64 num
,
402 int new, struct buffer_head
**bhp
)
404 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
405 struct gfs2_glock
*gl
= ip
->i_gl
;
406 struct buffer_head
*bh
= NULL
, **bh_slot
= ip
->i_cache
+ height
;
412 spin_lock(&ip
->i_spin
);
413 if (*bh_slot
&& (*bh_slot
)->b_blocknr
== num
) {
418 spin_unlock(&ip
->i_spin
);
421 bh
= getbuf(gl
, num
, CREATE
);
427 if (gfs2_assert_warn(sdp
, height
))
430 gfs2_trans_add_bh(ip
->i_gl
, bh
, 1);
431 gfs2_metatype_set(bh
, GFS2_METATYPE_IN
, GFS2_FORMAT_IN
);
432 gfs2_buffer_clear_tail(bh
, sizeof(struct gfs2_meta_header
));
434 u32 mtype
= height
? GFS2_METATYPE_IN
: GFS2_METATYPE_DI
;
435 if (!buffer_uptodate(bh
)) {
436 ll_rw_block(READ_META
, 1, &bh
);
437 if (gfs2_meta_wait(sdp
, bh
))
440 if (gfs2_metatype_check(sdp
, bh
, mtype
))
445 spin_lock(&ip
->i_spin
);
450 spin_unlock(&ip
->i_spin
);
461 * gfs2_meta_ra - start readahead on an extent of a file
462 * @gl: the glock the blocks belong to
463 * @dblock: the starting disk block
464 * @extlen: the number of blocks in the extent
466 * returns: the first buffer in the extent
469 struct buffer_head
*gfs2_meta_ra(struct gfs2_glock
*gl
, u64 dblock
, u32 extlen
)
471 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
472 struct buffer_head
*first_bh
, *bh
;
473 u32 max_ra
= gfs2_tune_get(sdp
, gt_max_readahead
) >>
474 sdp
->sd_sb
.sb_bsize_shift
;
483 first_bh
= getbuf(gl
, dblock
, CREATE
);
485 if (buffer_uptodate(first_bh
))
487 if (!buffer_locked(first_bh
))
488 ll_rw_block(READ_META
, 1, &first_bh
);
494 bh
= getbuf(gl
, dblock
, CREATE
);
496 if (!buffer_uptodate(bh
) && !buffer_locked(bh
))
497 ll_rw_block(READA
, 1, &bh
);
501 if (!buffer_locked(first_bh
) && buffer_uptodate(first_bh
))
505 wait_on_buffer(first_bh
);