2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/gfs2_ondisk.h>
15 #include <linux/lm_interface.h>
31 * ail_empty_gl - remove all buffers for a given lock from the AIL
34 * None of the buffers should be dirty, locked, or pinned.
37 static void gfs2_ail_empty_gl(struct gfs2_glock
*gl
)
39 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
41 struct list_head
*head
= &gl
->gl_ail_list
;
42 struct gfs2_bufdata
*bd
;
43 struct buffer_head
*bh
;
47 blocks
= atomic_read(&gl
->gl_ail_count
);
51 error
= gfs2_trans_begin(sdp
, 0, blocks
);
52 if (gfs2_assert_withdraw(sdp
, !error
))
56 while (!list_empty(head
)) {
57 bd
= list_entry(head
->next
, struct gfs2_bufdata
,
60 blkno
= bh
->b_blocknr
;
61 gfs2_assert_withdraw(sdp
, !buffer_busy(bh
));
64 list_del(&bd
->bd_ail_st_list
);
65 list_del(&bd
->bd_ail_gl_list
);
66 atomic_dec(&gl
->gl_ail_count
);
70 gfs2_trans_add_revoke(sdp
, blkno
);
74 gfs2_assert_withdraw(sdp
, !atomic_read(&gl
->gl_ail_count
));
78 gfs2_log_flush(sdp
, NULL
);
82 * gfs2_pte_inval - Sync and invalidate all PTEs associated with a glock
87 static void gfs2_pte_inval(struct gfs2_glock
*gl
)
89 struct gfs2_inode
*ip
;
94 if (!ip
|| !S_ISREG(inode
->i_mode
))
97 if (!test_bit(GIF_PAGED
, &ip
->i_flags
))
100 unmap_shared_mapping_range(inode
->i_mapping
, 0, 0);
102 if (test_bit(GIF_SW_PAGED
, &ip
->i_flags
))
103 set_bit(GLF_DIRTY
, &gl
->gl_flags
);
105 clear_bit(GIF_SW_PAGED
, &ip
->i_flags
);
109 * meta_go_sync - sync out the metadata for this glock
112 * Called when demoting or unlocking an EX glock. We must flush
113 * to disk all dirty buffers/pages relating to this glock, and must not
114 * not return to caller to demote/unlock the glock until I/O is complete.
117 static void meta_go_sync(struct gfs2_glock
*gl
)
119 if (gl
->gl_state
!= LM_ST_EXCLUSIVE
)
122 if (test_and_clear_bit(GLF_DIRTY
, &gl
->gl_flags
)) {
123 gfs2_log_flush(gl
->gl_sbd
, gl
);
125 gfs2_ail_empty_gl(gl
);
130 * meta_go_inval - invalidate the metadata for this glock
136 static void meta_go_inval(struct gfs2_glock
*gl
, int flags
)
138 if (!(flags
& DIO_METADATA
))
146 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
147 * @gl: the glock protecting the inode
151 static void inode_go_sync(struct gfs2_glock
*gl
)
153 struct gfs2_inode
*ip
= gl
->gl_object
;
155 if (ip
&& !S_ISREG(ip
->i_inode
.i_mode
))
158 if (test_bit(GLF_DIRTY
, &gl
->gl_flags
)) {
160 filemap_fdatawrite(ip
->i_inode
.i_mapping
);
161 gfs2_log_flush(gl
->gl_sbd
, gl
);
164 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
165 int error
= filemap_fdatawait(mapping
);
166 mapping_set_error(mapping
, error
);
168 clear_bit(GLF_DIRTY
, &gl
->gl_flags
);
169 gfs2_ail_empty_gl(gl
);
174 * inode_go_xmote_th - promote/demote a glock
176 * @state: the requested state
181 static void inode_go_xmote_th(struct gfs2_glock
*gl
)
183 if (gl
->gl_state
!= LM_ST_UNLOCKED
)
185 if (gl
->gl_state
== LM_ST_EXCLUSIVE
)
190 * inode_go_xmote_bh - After promoting/demoting a glock
195 static void inode_go_xmote_bh(struct gfs2_glock
*gl
)
197 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
198 struct buffer_head
*bh
;
201 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
202 (!gh
|| !(gh
->gh_flags
& GL_SKIP
))) {
203 error
= gfs2_meta_read(gl
, gl
->gl_name
.ln_number
, 0, &bh
);
210 * inode_go_drop_th - unlock a glock
213 * Invoked from rq_demote().
214 * Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
215 * is being purged from our node's glock cache; we're dropping lock.
218 static void inode_go_drop_th(struct gfs2_glock
*gl
)
221 if (gl
->gl_state
== LM_ST_EXCLUSIVE
)
226 * inode_go_inval - prepare a inode glock to be released
232 static void inode_go_inval(struct gfs2_glock
*gl
, int flags
)
234 struct gfs2_inode
*ip
= gl
->gl_object
;
235 int meta
= (flags
& DIO_METADATA
);
240 set_bit(GIF_INVALID
, &ip
->i_flags
);
243 if (ip
&& S_ISREG(ip
->i_inode
.i_mode
)) {
244 truncate_inode_pages(ip
->i_inode
.i_mapping
, 0);
245 clear_bit(GIF_PAGED
, &ip
->i_flags
);
250 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
253 * Returns: 1 if it's ok
256 static int inode_go_demote_ok(struct gfs2_glock
*gl
)
258 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
261 if (!gl
->gl_object
&& !gl
->gl_aspace
->i_mapping
->nrpages
)
263 else if (!sdp
->sd_args
.ar_localcaching
&&
264 time_after_eq(jiffies
, gl
->gl_stamp
+
265 gfs2_tune_get(sdp
, gt_demote_secs
) * HZ
))
272 * inode_go_lock - operation done after an inode lock is locked by a process
279 static int inode_go_lock(struct gfs2_holder
*gh
)
281 struct gfs2_glock
*gl
= gh
->gh_gl
;
282 struct gfs2_inode
*ip
= gl
->gl_object
;
288 if (test_bit(GIF_INVALID
, &ip
->i_flags
)) {
289 error
= gfs2_inode_refresh(ip
);
294 if ((ip
->i_di
.di_flags
& GFS2_DIF_TRUNC_IN_PROG
) &&
295 (gl
->gl_state
== LM_ST_EXCLUSIVE
) &&
296 (gh
->gh_state
== LM_ST_EXCLUSIVE
))
297 error
= gfs2_truncatei_resume(ip
);
303 * inode_go_unlock - operation done before an inode lock is unlocked by a
310 static void inode_go_unlock(struct gfs2_holder
*gh
)
312 struct gfs2_glock
*gl
= gh
->gh_gl
;
313 struct gfs2_inode
*ip
= gl
->gl_object
;
316 gfs2_meta_cache_flush(ip
);
320 * rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
323 * Returns: 1 if it's ok
326 static int rgrp_go_demote_ok(struct gfs2_glock
*gl
)
328 return !gl
->gl_aspace
->i_mapping
->nrpages
;
332 * rgrp_go_lock - operation done after an rgrp lock is locked by
333 * a first holder on this node.
340 static int rgrp_go_lock(struct gfs2_holder
*gh
)
342 return gfs2_rgrp_bh_get(gh
->gh_gl
->gl_object
);
346 * rgrp_go_unlock - operation done before an rgrp lock is unlocked by
347 * a last holder on this node.
353 static void rgrp_go_unlock(struct gfs2_holder
*gh
)
355 gfs2_rgrp_bh_put(gh
->gh_gl
->gl_object
);
359 * trans_go_xmote_th - promote/demote the transaction glock
361 * @state: the requested state
366 static void trans_go_xmote_th(struct gfs2_glock
*gl
)
368 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
370 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
371 test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
372 gfs2_meta_syncfs(sdp
);
373 gfs2_log_shutdown(sdp
);
378 * trans_go_xmote_bh - After promoting/demoting the transaction glock
383 static void trans_go_xmote_bh(struct gfs2_glock
*gl
)
385 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
386 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_jdesc
->jd_inode
);
387 struct gfs2_glock
*j_gl
= ip
->i_gl
;
388 struct gfs2_log_header_host head
;
391 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
392 test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
393 gfs2_meta_cache_flush(GFS2_I(sdp
->sd_jdesc
->jd_inode
));
394 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
);
396 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
);
399 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
))
402 /* Initialize some head of the log stuff */
403 if (!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)) {
404 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
405 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
411 * trans_go_drop_th - unlock the transaction glock
414 * We want to sync the device even with localcaching. Remember
415 * that localcaching journal replay only marks buffers dirty.
418 static void trans_go_drop_th(struct gfs2_glock
*gl
)
420 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
422 if (test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
423 gfs2_meta_syncfs(sdp
);
424 gfs2_log_shutdown(sdp
);
429 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
432 * Returns: 1 if it's ok
435 static int quota_go_demote_ok(struct gfs2_glock
*gl
)
437 return !atomic_read(&gl
->gl_lvb_count
);
440 const struct gfs2_glock_operations gfs2_meta_glops
= {
441 .go_xmote_th
= meta_go_sync
,
442 .go_drop_th
= meta_go_sync
,
443 .go_type
= LM_TYPE_META
,
446 const struct gfs2_glock_operations gfs2_inode_glops
= {
447 .go_xmote_th
= inode_go_xmote_th
,
448 .go_xmote_bh
= inode_go_xmote_bh
,
449 .go_drop_th
= inode_go_drop_th
,
450 .go_inval
= inode_go_inval
,
451 .go_demote_ok
= inode_go_demote_ok
,
452 .go_lock
= inode_go_lock
,
453 .go_unlock
= inode_go_unlock
,
454 .go_type
= LM_TYPE_INODE
,
457 const struct gfs2_glock_operations gfs2_rgrp_glops
= {
458 .go_xmote_th
= meta_go_sync
,
459 .go_drop_th
= meta_go_sync
,
460 .go_inval
= meta_go_inval
,
461 .go_demote_ok
= rgrp_go_demote_ok
,
462 .go_lock
= rgrp_go_lock
,
463 .go_unlock
= rgrp_go_unlock
,
464 .go_type
= LM_TYPE_RGRP
,
467 const struct gfs2_glock_operations gfs2_trans_glops
= {
468 .go_xmote_th
= trans_go_xmote_th
,
469 .go_xmote_bh
= trans_go_xmote_bh
,
470 .go_drop_th
= trans_go_drop_th
,
471 .go_type
= LM_TYPE_NONDISK
,
474 const struct gfs2_glock_operations gfs2_iopen_glops
= {
475 .go_type
= LM_TYPE_IOPEN
,
478 const struct gfs2_glock_operations gfs2_flock_glops
= {
479 .go_type
= LM_TYPE_FLOCK
,
482 const struct gfs2_glock_operations gfs2_nondisk_glops
= {
483 .go_type
= LM_TYPE_NONDISK
,
486 const struct gfs2_glock_operations gfs2_quota_glops
= {
487 .go_demote_ok
= quota_go_demote_ok
,
488 .go_type
= LM_TYPE_QUOTA
,
491 const struct gfs2_glock_operations gfs2_journal_glops
= {
492 .go_type
= LM_TYPE_JOURNAL
,