2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/spinlock.h>
11 #include <linux/completion.h>
12 #include <linux/buffer_head.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/posix_acl.h>
31 static void gfs2_ail_error(struct gfs2_glock
*gl
, const struct buffer_head
*bh
)
33 fs_err(gl
->gl_sbd
, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n",
34 bh
, (unsigned long long)bh
->b_blocknr
, bh
->b_state
,
35 bh
->b_page
->mapping
, bh
->b_page
->flags
);
36 fs_err(gl
->gl_sbd
, "AIL glock %u:%llu mapping %p\n",
37 gl
->gl_name
.ln_type
, gl
->gl_name
.ln_number
,
38 gfs2_glock2aspace(gl
));
39 gfs2_lm_withdraw(gl
->gl_sbd
, "AIL error\n");
43 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
45 * @fsync: set when called from fsync (not all buffers will be clean)
47 * None of the buffers should be dirty, locked, or pinned.
50 static void __gfs2_ail_flush(struct gfs2_glock
*gl
, bool fsync
,
51 unsigned int nr_revokes
)
53 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
54 struct list_head
*head
= &gl
->gl_ail_list
;
55 struct gfs2_bufdata
*bd
, *tmp
;
56 struct buffer_head
*bh
;
57 const unsigned long b_state
= (1UL << BH_Dirty
)|(1UL << BH_Pinned
)|(1UL << BH_Lock
);
60 spin_lock(&sdp
->sd_ail_lock
);
61 list_for_each_entry_safe_reverse(bd
, tmp
, head
, bd_ail_gl_list
) {
65 if (bh
->b_state
& b_state
) {
68 gfs2_ail_error(gl
, bh
);
70 gfs2_trans_add_revoke(sdp
, bd
);
73 GLOCK_BUG_ON(gl
, !fsync
&& atomic_read(&gl
->gl_ail_count
));
74 spin_unlock(&sdp
->sd_ail_lock
);
79 static void gfs2_ail_empty_gl(struct gfs2_glock
*gl
)
81 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
84 memset(&tr
, 0, sizeof(tr
));
85 tr
.tr_revokes
= atomic_read(&gl
->gl_ail_count
);
90 /* A shortened, inline version of gfs2_trans_begin() */
91 tr
.tr_reserved
= 1 + gfs2_struct2blk(sdp
, tr
.tr_revokes
, sizeof(u64
));
92 tr
.tr_ip
= (unsigned long)__builtin_return_address(0);
93 sb_start_intwrite(sdp
->sd_vfs
);
94 gfs2_log_reserve(sdp
, tr
.tr_reserved
);
95 WARN_ON_ONCE(current
->journal_info
);
96 current
->journal_info
= &tr
;
98 __gfs2_ail_flush(gl
, 0, tr
.tr_revokes
);
101 gfs2_log_flush(sdp
, NULL
);
104 void gfs2_ail_flush(struct gfs2_glock
*gl
, bool fsync
)
106 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
107 unsigned int revokes
= atomic_read(&gl
->gl_ail_count
);
108 unsigned int max_revokes
= (sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_log_descriptor
)) / sizeof(u64
);
114 while (revokes
> max_revokes
)
115 max_revokes
+= (sdp
->sd_sb
.sb_bsize
- sizeof(struct gfs2_meta_header
)) / sizeof(u64
);
117 ret
= gfs2_trans_begin(sdp
, 0, max_revokes
);
120 __gfs2_ail_flush(gl
, fsync
, max_revokes
);
122 gfs2_log_flush(sdp
, NULL
);
126 * rgrp_go_sync - sync out the metadata for this glock
129 * Called when demoting or unlocking an EX glock. We must flush
130 * to disk all dirty buffers/pages relating to this glock, and must not
131 * not return to caller to demote/unlock the glock until I/O is complete.
134 static void rgrp_go_sync(struct gfs2_glock
*gl
)
136 struct address_space
*metamapping
= gfs2_glock2aspace(gl
);
137 struct gfs2_rgrpd
*rgd
;
140 if (!test_and_clear_bit(GLF_DIRTY
, &gl
->gl_flags
))
142 GLOCK_BUG_ON(gl
, gl
->gl_state
!= LM_ST_EXCLUSIVE
);
144 gfs2_log_flush(gl
->gl_sbd
, gl
);
145 filemap_fdatawrite(metamapping
);
146 error
= filemap_fdatawait(metamapping
);
147 mapping_set_error(metamapping
, error
);
148 gfs2_ail_empty_gl(gl
);
150 spin_lock(&gl
->gl_spin
);
153 gfs2_free_clones(rgd
);
154 spin_unlock(&gl
->gl_spin
);
158 * rgrp_go_inval - invalidate the metadata for this glock
162 * We never used LM_ST_DEFERRED with resource groups, so that we
163 * should always see the metadata flag set here.
167 static void rgrp_go_inval(struct gfs2_glock
*gl
, int flags
)
169 struct address_space
*mapping
= gfs2_glock2aspace(gl
);
171 WARN_ON_ONCE(!(flags
& DIO_METADATA
));
172 gfs2_assert_withdraw(gl
->gl_sbd
, !atomic_read(&gl
->gl_ail_count
));
173 truncate_inode_pages(mapping
, 0);
176 struct gfs2_rgrpd
*rgd
= (struct gfs2_rgrpd
*)gl
->gl_object
;
177 rgd
->rd_flags
&= ~GFS2_RDF_UPTODATE
;
182 * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
183 * @gl: the glock protecting the inode
187 static void inode_go_sync(struct gfs2_glock
*gl
)
189 struct gfs2_inode
*ip
= gl
->gl_object
;
190 struct address_space
*metamapping
= gfs2_glock2aspace(gl
);
193 if (ip
&& !S_ISREG(ip
->i_inode
.i_mode
))
195 if (ip
&& test_and_clear_bit(GIF_SW_PAGED
, &ip
->i_flags
))
196 unmap_shared_mapping_range(ip
->i_inode
.i_mapping
, 0, 0);
197 if (!test_and_clear_bit(GLF_DIRTY
, &gl
->gl_flags
))
200 GLOCK_BUG_ON(gl
, gl
->gl_state
!= LM_ST_EXCLUSIVE
);
202 gfs2_log_flush(gl
->gl_sbd
, gl
);
203 filemap_fdatawrite(metamapping
);
205 struct address_space
*mapping
= ip
->i_inode
.i_mapping
;
206 filemap_fdatawrite(mapping
);
207 error
= filemap_fdatawait(mapping
);
208 mapping_set_error(mapping
, error
);
210 error
= filemap_fdatawait(metamapping
);
211 mapping_set_error(metamapping
, error
);
212 gfs2_ail_empty_gl(gl
);
214 * Writeback of the data mapping may cause the dirty flag to be set
215 * so we have to clear it again here.
217 smp_mb__before_clear_bit();
218 clear_bit(GLF_DIRTY
, &gl
->gl_flags
);
222 * inode_go_inval - prepare a inode glock to be released
226 * Normally we invlidate everything, but if we are moving into
227 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
228 * can keep hold of the metadata, since it won't have changed.
232 static void inode_go_inval(struct gfs2_glock
*gl
, int flags
)
234 struct gfs2_inode
*ip
= gl
->gl_object
;
236 gfs2_assert_withdraw(gl
->gl_sbd
, !atomic_read(&gl
->gl_ail_count
));
238 if (flags
& DIO_METADATA
) {
239 struct address_space
*mapping
= gfs2_glock2aspace(gl
);
240 truncate_inode_pages(mapping
, 0);
242 set_bit(GIF_INVALID
, &ip
->i_flags
);
243 forget_all_cached_acls(&ip
->i_inode
);
244 gfs2_dir_hash_inval(ip
);
248 if (ip
== GFS2_I(gl
->gl_sbd
->sd_rindex
)) {
249 gfs2_log_flush(gl
->gl_sbd
, NULL
);
250 gl
->gl_sbd
->sd_rindex_uptodate
= 0;
252 if (ip
&& S_ISREG(ip
->i_inode
.i_mode
))
253 truncate_inode_pages(ip
->i_inode
.i_mapping
, 0);
257 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
260 * Returns: 1 if it's ok
263 static int inode_go_demote_ok(const struct gfs2_glock
*gl
)
265 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
266 struct gfs2_holder
*gh
;
268 if (sdp
->sd_jindex
== gl
->gl_object
|| sdp
->sd_rindex
== gl
->gl_object
)
271 if (!list_empty(&gl
->gl_holders
)) {
272 gh
= list_entry(gl
->gl_holders
.next
, struct gfs2_holder
, gh_list
);
273 if (gh
->gh_list
.next
!= &gl
->gl_holders
)
281 * gfs2_set_nlink - Set the inode's link count based on on-disk info
282 * @inode: The inode in question
283 * @nlink: The link count
285 * If the link count has hit zero, it must never be raised, whatever the
286 * on-disk inode might say. When new struct inodes are created the link
287 * count is set to 1, so that we can safely use this test even when reading
288 * in on disk information for the first time.
291 static void gfs2_set_nlink(struct inode
*inode
, u32 nlink
)
294 * We will need to review setting the nlink count here in the
295 * light of the forthcoming ro bind mount work. This is a reminder
298 if ((inode
->i_nlink
!= nlink
) && (inode
->i_nlink
!= 0)) {
302 set_nlink(inode
, nlink
);
306 static int gfs2_dinode_in(struct gfs2_inode
*ip
, const void *buf
)
308 const struct gfs2_dinode
*str
= buf
;
309 struct timespec atime
;
312 if (unlikely(ip
->i_no_addr
!= be64_to_cpu(str
->di_num
.no_addr
)))
314 ip
->i_no_formal_ino
= be64_to_cpu(str
->di_num
.no_formal_ino
);
315 ip
->i_inode
.i_mode
= be32_to_cpu(str
->di_mode
);
316 ip
->i_inode
.i_rdev
= 0;
317 switch (ip
->i_inode
.i_mode
& S_IFMT
) {
320 ip
->i_inode
.i_rdev
= MKDEV(be32_to_cpu(str
->di_major
),
321 be32_to_cpu(str
->di_minor
));
325 i_uid_write(&ip
->i_inode
, be32_to_cpu(str
->di_uid
));
326 i_gid_write(&ip
->i_inode
, be32_to_cpu(str
->di_gid
));
327 gfs2_set_nlink(&ip
->i_inode
, be32_to_cpu(str
->di_nlink
));
328 i_size_write(&ip
->i_inode
, be64_to_cpu(str
->di_size
));
329 gfs2_set_inode_blocks(&ip
->i_inode
, be64_to_cpu(str
->di_blocks
));
330 atime
.tv_sec
= be64_to_cpu(str
->di_atime
);
331 atime
.tv_nsec
= be32_to_cpu(str
->di_atime_nsec
);
332 if (timespec_compare(&ip
->i_inode
.i_atime
, &atime
) < 0)
333 ip
->i_inode
.i_atime
= atime
;
334 ip
->i_inode
.i_mtime
.tv_sec
= be64_to_cpu(str
->di_mtime
);
335 ip
->i_inode
.i_mtime
.tv_nsec
= be32_to_cpu(str
->di_mtime_nsec
);
336 ip
->i_inode
.i_ctime
.tv_sec
= be64_to_cpu(str
->di_ctime
);
337 ip
->i_inode
.i_ctime
.tv_nsec
= be32_to_cpu(str
->di_ctime_nsec
);
339 ip
->i_goal
= be64_to_cpu(str
->di_goal_meta
);
340 ip
->i_generation
= be64_to_cpu(str
->di_generation
);
342 ip
->i_diskflags
= be32_to_cpu(str
->di_flags
);
343 ip
->i_eattr
= be64_to_cpu(str
->di_eattr
);
344 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
345 gfs2_set_inode_flags(&ip
->i_inode
);
346 height
= be16_to_cpu(str
->di_height
);
347 if (unlikely(height
> GFS2_MAX_META_HEIGHT
))
349 ip
->i_height
= (u8
)height
;
351 depth
= be16_to_cpu(str
->di_depth
);
352 if (unlikely(depth
> GFS2_DIR_MAX_DEPTH
))
354 ip
->i_depth
= (u8
)depth
;
355 ip
->i_entries
= be32_to_cpu(str
->di_entries
);
357 if (S_ISREG(ip
->i_inode
.i_mode
))
358 gfs2_set_aops(&ip
->i_inode
);
362 gfs2_consist_inode(ip
);
367 * gfs2_inode_refresh - Refresh the incore copy of the dinode
368 * @ip: The GFS2 inode
373 int gfs2_inode_refresh(struct gfs2_inode
*ip
)
375 struct buffer_head
*dibh
;
378 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
382 error
= gfs2_dinode_in(ip
, dibh
->b_data
);
384 clear_bit(GIF_INVALID
, &ip
->i_flags
);
390 * inode_go_lock - operation done after an inode lock is locked by a process
397 static int inode_go_lock(struct gfs2_holder
*gh
)
399 struct gfs2_glock
*gl
= gh
->gh_gl
;
400 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
401 struct gfs2_inode
*ip
= gl
->gl_object
;
404 if (!ip
|| (gh
->gh_flags
& GL_SKIP
))
407 if (test_bit(GIF_INVALID
, &ip
->i_flags
)) {
408 error
= gfs2_inode_refresh(ip
);
413 if ((ip
->i_diskflags
& GFS2_DIF_TRUNC_IN_PROG
) &&
414 (gl
->gl_state
== LM_ST_EXCLUSIVE
) &&
415 (gh
->gh_state
== LM_ST_EXCLUSIVE
)) {
416 spin_lock(&sdp
->sd_trunc_lock
);
417 if (list_empty(&ip
->i_trunc_list
))
418 list_add(&sdp
->sd_trunc_list
, &ip
->i_trunc_list
);
419 spin_unlock(&sdp
->sd_trunc_lock
);
420 wake_up(&sdp
->sd_quota_wait
);
428 * inode_go_dump - print information about an inode
432 * Returns: 0 on success, -ENOBUFS when we run out of space
435 static int inode_go_dump(struct seq_file
*seq
, const struct gfs2_glock
*gl
)
437 const struct gfs2_inode
*ip
= gl
->gl_object
;
440 gfs2_print_dbg(seq
, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
441 (unsigned long long)ip
->i_no_formal_ino
,
442 (unsigned long long)ip
->i_no_addr
,
443 IF2DT(ip
->i_inode
.i_mode
), ip
->i_flags
,
444 (unsigned int)ip
->i_diskflags
,
445 (unsigned long long)i_size_read(&ip
->i_inode
));
450 * trans_go_sync - promote/demote the transaction glock
452 * @state: the requested state
457 static void trans_go_sync(struct gfs2_glock
*gl
)
459 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
461 if (gl
->gl_state
!= LM_ST_UNLOCKED
&&
462 test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
463 gfs2_meta_syncfs(sdp
);
464 gfs2_log_shutdown(sdp
);
469 * trans_go_xmote_bh - After promoting/demoting the transaction glock
474 static int trans_go_xmote_bh(struct gfs2_glock
*gl
, struct gfs2_holder
*gh
)
476 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
477 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_jdesc
->jd_inode
);
478 struct gfs2_glock
*j_gl
= ip
->i_gl
;
479 struct gfs2_log_header_host head
;
482 if (test_bit(SDF_JOURNAL_LIVE
, &sdp
->sd_flags
)) {
483 j_gl
->gl_ops
->go_inval(j_gl
, DIO_METADATA
);
485 error
= gfs2_find_jhead(sdp
->sd_jdesc
, &head
);
488 if (!(head
.lh_flags
& GFS2_LOG_HEAD_UNMOUNT
))
491 /* Initialize some head of the log stuff */
492 if (!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
)) {
493 sdp
->sd_log_sequence
= head
.lh_sequence
+ 1;
494 gfs2_log_pointers_init(sdp
, head
.lh_blkno
);
507 static int trans_go_demote_ok(const struct gfs2_glock
*gl
)
513 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
516 * gl_spin lock is held while calling this
518 static void iopen_go_callback(struct gfs2_glock
*gl
, bool remote
)
520 struct gfs2_inode
*ip
= (struct gfs2_inode
*)gl
->gl_object
;
521 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
523 if (!remote
|| (sdp
->sd_vfs
->s_flags
& MS_RDONLY
))
526 if (gl
->gl_demote_state
== LM_ST_UNLOCKED
&&
527 gl
->gl_state
== LM_ST_SHARED
&& ip
) {
528 gl
->gl_lockref
.count
++;
529 if (queue_work(gfs2_delete_workqueue
, &gl
->gl_delete
) == 0)
530 gl
->gl_lockref
.count
--;
534 const struct gfs2_glock_operations gfs2_meta_glops
= {
535 .go_type
= LM_TYPE_META
,
538 const struct gfs2_glock_operations gfs2_inode_glops
= {
539 .go_sync
= inode_go_sync
,
540 .go_inval
= inode_go_inval
,
541 .go_demote_ok
= inode_go_demote_ok
,
542 .go_lock
= inode_go_lock
,
543 .go_dump
= inode_go_dump
,
544 .go_type
= LM_TYPE_INODE
,
545 .go_flags
= GLOF_ASPACE
,
548 const struct gfs2_glock_operations gfs2_rgrp_glops
= {
549 .go_sync
= rgrp_go_sync
,
550 .go_inval
= rgrp_go_inval
,
551 .go_lock
= gfs2_rgrp_go_lock
,
552 .go_unlock
= gfs2_rgrp_go_unlock
,
553 .go_dump
= gfs2_rgrp_dump
,
554 .go_type
= LM_TYPE_RGRP
,
555 .go_flags
= GLOF_ASPACE
| GLOF_LVB
,
558 const struct gfs2_glock_operations gfs2_trans_glops
= {
559 .go_sync
= trans_go_sync
,
560 .go_xmote_bh
= trans_go_xmote_bh
,
561 .go_demote_ok
= trans_go_demote_ok
,
562 .go_type
= LM_TYPE_NONDISK
,
565 const struct gfs2_glock_operations gfs2_iopen_glops
= {
566 .go_type
= LM_TYPE_IOPEN
,
567 .go_callback
= iopen_go_callback
,
570 const struct gfs2_glock_operations gfs2_flock_glops
= {
571 .go_type
= LM_TYPE_FLOCK
,
574 const struct gfs2_glock_operations gfs2_nondisk_glops
= {
575 .go_type
= LM_TYPE_NONDISK
,
578 const struct gfs2_glock_operations gfs2_quota_glops
= {
579 .go_type
= LM_TYPE_QUOTA
,
580 .go_flags
= GLOF_LVB
,
583 const struct gfs2_glock_operations gfs2_journal_glops
= {
584 .go_type
= LM_TYPE_JOURNAL
,
587 const struct gfs2_glock_operations
*gfs2_glops_list
[] = {
588 [LM_TYPE_META
] = &gfs2_meta_glops
,
589 [LM_TYPE_INODE
] = &gfs2_inode_glops
,
590 [LM_TYPE_RGRP
] = &gfs2_rgrp_glops
,
591 [LM_TYPE_IOPEN
] = &gfs2_iopen_glops
,
592 [LM_TYPE_FLOCK
] = &gfs2_flock_glops
,
593 [LM_TYPE_NONDISK
] = &gfs2_nondisk_glops
,
594 [LM_TYPE_QUOTA
] = &gfs2_quota_glops
,
595 [LM_TYPE_JOURNAL
] = &gfs2_journal_glops
,