2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/rwsem.h>
24 #include <asm/uaccess.h>
38 struct gfs2_gl_hash_bucket
{
39 struct hlist_head hb_list
;
42 typedef void (*glock_examiner
) (struct gfs2_glock
* gl
);
44 static int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
);
45 static int dump_glock(struct gfs2_glock
*gl
);
46 static int dump_inode(struct gfs2_inode
*ip
);
47 static void gfs2_glock_xmote_th(struct gfs2_holder
*gh
);
48 static void gfs2_glock_drop_th(struct gfs2_glock
*gl
);
49 static DECLARE_RWSEM(gfs2_umount_flush_sem
);
51 #define GFS2_GL_HASH_SHIFT 15
52 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
53 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
55 static struct gfs2_gl_hash_bucket gl_hash_table
[GFS2_GL_HASH_SIZE
];
58 * Despite what you might think, the numbers below are not arbitrary :-)
59 * They are taken from the ipv4 routing hash code, which is well tested
60 * and thus should be nearly optimal. Later on we might tweek the numbers
61 * but for now this should be fine.
63 * The reason for putting the locks in a separate array from the list heads
64 * is that we can have fewer locks than list heads and save memory. We use
65 * the same hash function for both, but with a different hash mask.
67 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
68 defined(CONFIG_PROVE_LOCKING)
71 # define GL_HASH_LOCK_SZ 256
74 # define GL_HASH_LOCK_SZ 4096
76 # define GL_HASH_LOCK_SZ 2048
78 # define GL_HASH_LOCK_SZ 1024
80 # define GL_HASH_LOCK_SZ 512
82 # define GL_HASH_LOCK_SZ 256
86 /* We never want more locks than chains */
87 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
88 # undef GL_HASH_LOCK_SZ
89 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
92 static rwlock_t gl_hash_locks
[GL_HASH_LOCK_SZ
];
94 static inline rwlock_t
*gl_lock_addr(unsigned int x
)
96 return &gl_hash_locks
[x
& (GL_HASH_LOCK_SZ
-1)];
98 #else /* not SMP, so no spinlocks required */
99 static inline rwlock_t
*gl_lock_addr(unsigned int x
)
106 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
107 * @actual: the current state of the lock
108 * @requested: the lock state that was requested by the caller
109 * @flags: the modifier flags passed in by the caller
111 * Returns: 1 if the locks are compatible, 0 otherwise
114 static inline int relaxed_state_ok(unsigned int actual
, unsigned requested
,
117 if (actual
== requested
)
120 if (flags
& GL_EXACT
)
123 if (actual
== LM_ST_EXCLUSIVE
&& requested
== LM_ST_SHARED
)
126 if (actual
!= LM_ST_UNLOCKED
&& (flags
& LM_FLAG_ANY
))
133 * gl_hash() - Turn glock number into hash bucket number
134 * @lock: The glock number
136 * Returns: The number of the corresponding hash bucket
139 static unsigned int gl_hash(const struct gfs2_sbd
*sdp
,
140 const struct lm_lockname
*name
)
144 h
= jhash(&name
->ln_number
, sizeof(u64
), 0);
145 h
= jhash(&name
->ln_type
, sizeof(unsigned int), h
);
146 h
= jhash(&sdp
, sizeof(struct gfs2_sbd
*), h
);
147 h
&= GFS2_GL_HASH_MASK
;
153 * glock_free() - Perform a few checks and then release struct gfs2_glock
154 * @gl: The glock to release
156 * Also calls lock module to release its internal structure for this glock.
160 static void glock_free(struct gfs2_glock
*gl
)
162 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
163 struct inode
*aspace
= gl
->gl_aspace
;
165 gfs2_lm_put_lock(sdp
, gl
->gl_lock
);
168 gfs2_aspace_put(aspace
);
170 kmem_cache_free(gfs2_glock_cachep
, gl
);
174 * gfs2_glock_hold() - increment reference count on glock
175 * @gl: The glock to hold
179 void gfs2_glock_hold(struct gfs2_glock
*gl
)
181 atomic_inc(&gl
->gl_ref
);
185 * gfs2_glock_put() - Decrement reference count on glock
186 * @gl: The glock to put
190 int gfs2_glock_put(struct gfs2_glock
*gl
)
193 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
195 write_lock(gl_lock_addr(gl
->gl_hash
));
196 if (atomic_dec_and_test(&gl
->gl_ref
)) {
197 hlist_del(&gl
->gl_list
);
198 write_unlock(gl_lock_addr(gl
->gl_hash
));
199 BUG_ON(spin_is_locked(&gl
->gl_spin
));
200 gfs2_assert(sdp
, gl
->gl_state
== LM_ST_UNLOCKED
);
201 gfs2_assert(sdp
, list_empty(&gl
->gl_reclaim
));
202 gfs2_assert(sdp
, list_empty(&gl
->gl_holders
));
203 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters1
));
204 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters2
));
205 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters3
));
210 write_unlock(gl_lock_addr(gl
->gl_hash
));
216 * search_bucket() - Find struct gfs2_glock by lock number
217 * @bucket: the bucket to search
218 * @name: The lock name
220 * Returns: NULL, or the struct gfs2_glock with the requested number
223 static struct gfs2_glock
*search_bucket(unsigned int hash
,
224 const struct gfs2_sbd
*sdp
,
225 const struct lm_lockname
*name
)
227 struct gfs2_glock
*gl
;
228 struct hlist_node
*h
;
230 hlist_for_each_entry(gl
, h
, &gl_hash_table
[hash
].hb_list
, gl_list
) {
231 if (!lm_name_equal(&gl
->gl_name
, name
))
233 if (gl
->gl_sbd
!= sdp
)
236 atomic_inc(&gl
->gl_ref
);
245 * gfs2_glock_find() - Find glock by lock number
246 * @sdp: The GFS2 superblock
247 * @name: The lock name
249 * Returns: NULL, or the struct gfs2_glock with the requested number
252 static struct gfs2_glock
*gfs2_glock_find(const struct gfs2_sbd
*sdp
,
253 const struct lm_lockname
*name
)
255 unsigned int hash
= gl_hash(sdp
, name
);
256 struct gfs2_glock
*gl
;
258 read_lock(gl_lock_addr(hash
));
259 gl
= search_bucket(hash
, sdp
, name
);
260 read_unlock(gl_lock_addr(hash
));
266 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
267 * @sdp: The GFS2 superblock
268 * @number: the lock number
269 * @glops: The glock_operations to use
270 * @create: If 0, don't create the glock if it doesn't exist
271 * @glp: the glock is returned here
273 * This does not lock a glock, just finds/creates structures for one.
278 int gfs2_glock_get(struct gfs2_sbd
*sdp
, u64 number
,
279 const struct gfs2_glock_operations
*glops
, int create
,
280 struct gfs2_glock
**glp
)
282 struct lm_lockname name
= { .ln_number
= number
, .ln_type
= glops
->go_type
};
283 struct gfs2_glock
*gl
, *tmp
;
284 unsigned int hash
= gl_hash(sdp
, &name
);
287 read_lock(gl_lock_addr(hash
));
288 gl
= search_bucket(hash
, sdp
, &name
);
289 read_unlock(gl_lock_addr(hash
));
296 gl
= kmem_cache_alloc(gfs2_glock_cachep
, GFP_KERNEL
);
302 atomic_set(&gl
->gl_ref
, 1);
303 gl
->gl_state
= LM_ST_UNLOCKED
;
308 gl
->gl_req_gh
= NULL
;
309 gl
->gl_req_bh
= NULL
;
311 gl
->gl_stamp
= jiffies
;
312 gl
->gl_object
= NULL
;
314 gl
->gl_aspace
= NULL
;
315 lops_init_le(&gl
->gl_le
, &gfs2_glock_lops
);
317 /* If this glock protects actual on-disk data or metadata blocks,
318 create a VFS inode to manage the pages/buffers holding them. */
319 if (glops
== &gfs2_inode_glops
|| glops
== &gfs2_rgrp_glops
) {
320 gl
->gl_aspace
= gfs2_aspace_get(sdp
);
321 if (!gl
->gl_aspace
) {
327 error
= gfs2_lm_get_lock(sdp
, &name
, &gl
->gl_lock
);
331 write_lock(gl_lock_addr(hash
));
332 tmp
= search_bucket(hash
, sdp
, &name
);
334 write_unlock(gl_lock_addr(hash
));
338 hlist_add_head(&gl
->gl_list
, &gl_hash_table
[hash
].hb_list
);
339 write_unlock(gl_lock_addr(hash
));
348 gfs2_aspace_put(gl
->gl_aspace
);
350 kmem_cache_free(gfs2_glock_cachep
, gl
);
355 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
357 * @state: the state we're requesting
358 * @flags: the modifier flags
359 * @gh: the holder structure
363 void gfs2_holder_init(struct gfs2_glock
*gl
, unsigned int state
, unsigned flags
,
364 struct gfs2_holder
*gh
)
366 INIT_LIST_HEAD(&gh
->gh_list
);
368 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
369 gh
->gh_owner
= current
;
370 gh
->gh_state
= state
;
371 gh
->gh_flags
= flags
;
378 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
379 * @state: the state we're requesting
380 * @flags: the modifier flags
381 * @gh: the holder structure
383 * Don't mess with the glock.
387 void gfs2_holder_reinit(unsigned int state
, unsigned flags
, struct gfs2_holder
*gh
)
389 gh
->gh_state
= state
;
390 gh
->gh_flags
= flags
;
391 gh
->gh_iflags
&= 1 << HIF_ALLOCED
;
392 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
396 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
397 * @gh: the holder structure
401 void gfs2_holder_uninit(struct gfs2_holder
*gh
)
403 gfs2_glock_put(gh
->gh_gl
);
409 * gfs2_holder_get - get a struct gfs2_holder structure
411 * @state: the state we're requesting
412 * @flags: the modifier flags
415 * Figure out how big an impact this function has. Either:
416 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
417 * 2) Leave it like it is
419 * Returns: the holder structure, NULL on ENOMEM
422 static struct gfs2_holder
*gfs2_holder_get(struct gfs2_glock
*gl
,
424 int flags
, gfp_t gfp_flags
)
426 struct gfs2_holder
*gh
;
428 gh
= kmalloc(sizeof(struct gfs2_holder
), gfp_flags
);
432 gfs2_holder_init(gl
, state
, flags
, gh
);
433 set_bit(HIF_ALLOCED
, &gh
->gh_iflags
);
434 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
439 * gfs2_holder_put - get rid of a struct gfs2_holder structure
440 * @gh: the holder structure
444 static void gfs2_holder_put(struct gfs2_holder
*gh
)
446 gfs2_holder_uninit(gh
);
450 static void gfs2_holder_dispose_or_wake(struct gfs2_holder
*gh
)
452 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
)) {
456 clear_bit(HIF_WAIT
, &gh
->gh_iflags
);
458 wake_up_bit(&gh
->gh_iflags
, HIF_WAIT
);
461 static int holder_wait(void *word
)
467 static void wait_on_holder(struct gfs2_holder
*gh
)
470 wait_on_bit(&gh
->gh_iflags
, HIF_WAIT
, holder_wait
, TASK_UNINTERRUPTIBLE
);
474 * rq_mutex - process a mutex request in the queue
475 * @gh: the glock holder
477 * Returns: 1 if the queue is blocked
480 static int rq_mutex(struct gfs2_holder
*gh
)
482 struct gfs2_glock
*gl
= gh
->gh_gl
;
484 list_del_init(&gh
->gh_list
);
485 /* gh->gh_error never examined. */
486 set_bit(GLF_LOCK
, &gl
->gl_flags
);
487 clear_bit(HIF_WAIT
, &gh
->gh_iflags
);
489 wake_up_bit(&gh
->gh_iflags
, HIF_WAIT
);
495 * rq_promote - process a promote request in the queue
496 * @gh: the glock holder
498 * Acquire a new inter-node lock, or change a lock state to more restrictive.
500 * Returns: 1 if the queue is blocked
503 static int rq_promote(struct gfs2_holder
*gh
)
505 struct gfs2_glock
*gl
= gh
->gh_gl
;
506 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
508 if (!relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
509 if (list_empty(&gl
->gl_holders
)) {
511 set_bit(GLF_LOCK
, &gl
->gl_flags
);
512 spin_unlock(&gl
->gl_spin
);
514 if (atomic_read(&sdp
->sd_reclaim_count
) >
515 gfs2_tune_get(sdp
, gt_reclaim_limit
) &&
516 !(gh
->gh_flags
& LM_FLAG_PRIORITY
)) {
517 gfs2_reclaim_glock(sdp
);
518 gfs2_reclaim_glock(sdp
);
521 gfs2_glock_xmote_th(gh
);
522 spin_lock(&gl
->gl_spin
);
527 if (list_empty(&gl
->gl_holders
)) {
528 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
529 set_bit(GLF_LOCK
, &gl
->gl_flags
);
531 struct gfs2_holder
*next_gh
;
532 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
534 next_gh
= list_entry(gl
->gl_holders
.next
, struct gfs2_holder
,
536 if (next_gh
->gh_state
== LM_ST_EXCLUSIVE
)
540 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
542 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
544 gfs2_holder_dispose_or_wake(gh
);
550 * rq_demote - process a demote request in the queue
551 * @gh: the glock holder
553 * Returns: 1 if the queue is blocked
556 static int rq_demote(struct gfs2_holder
*gh
)
558 struct gfs2_glock
*gl
= gh
->gh_gl
;
560 if (!list_empty(&gl
->gl_holders
))
563 if (gl
->gl_state
== gh
->gh_state
|| gl
->gl_state
== LM_ST_UNLOCKED
) {
564 list_del_init(&gh
->gh_list
);
566 spin_unlock(&gl
->gl_spin
);
567 gfs2_holder_dispose_or_wake(gh
);
568 spin_lock(&gl
->gl_spin
);
571 set_bit(GLF_LOCK
, &gl
->gl_flags
);
572 spin_unlock(&gl
->gl_spin
);
574 if (gh
->gh_state
== LM_ST_UNLOCKED
||
575 gl
->gl_state
!= LM_ST_EXCLUSIVE
)
576 gfs2_glock_drop_th(gl
);
578 gfs2_glock_xmote_th(gh
);
580 spin_lock(&gl
->gl_spin
);
587 * run_queue - process holder structures on a glock
591 static void run_queue(struct gfs2_glock
*gl
)
593 struct gfs2_holder
*gh
;
597 if (test_bit(GLF_LOCK
, &gl
->gl_flags
))
600 if (!list_empty(&gl
->gl_waiters1
)) {
601 gh
= list_entry(gl
->gl_waiters1
.next
,
602 struct gfs2_holder
, gh_list
);
604 if (test_bit(HIF_MUTEX
, &gh
->gh_iflags
))
605 blocked
= rq_mutex(gh
);
607 gfs2_assert_warn(gl
->gl_sbd
, 0);
609 } else if (!list_empty(&gl
->gl_waiters2
) &&
610 !test_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
)) {
611 gh
= list_entry(gl
->gl_waiters2
.next
,
612 struct gfs2_holder
, gh_list
);
614 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
))
615 blocked
= rq_demote(gh
);
617 gfs2_assert_warn(gl
->gl_sbd
, 0);
619 } else if (!list_empty(&gl
->gl_waiters3
)) {
620 gh
= list_entry(gl
->gl_waiters3
.next
,
621 struct gfs2_holder
, gh_list
);
623 if (test_bit(HIF_PROMOTE
, &gh
->gh_iflags
))
624 blocked
= rq_promote(gh
);
626 gfs2_assert_warn(gl
->gl_sbd
, 0);
637 * gfs2_glmutex_lock - acquire a local lock on a glock
640 * Gives caller exclusive access to manipulate a glock structure.
643 static void gfs2_glmutex_lock(struct gfs2_glock
*gl
)
645 struct gfs2_holder gh
;
647 gfs2_holder_init(gl
, 0, 0, &gh
);
648 set_bit(HIF_MUTEX
, &gh
.gh_iflags
);
649 if (test_and_set_bit(HIF_WAIT
, &gh
.gh_iflags
))
652 spin_lock(&gl
->gl_spin
);
653 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
)) {
654 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters1
);
656 gl
->gl_owner
= current
;
657 gl
->gl_ip
= (unsigned long)__builtin_return_address(0);
658 clear_bit(HIF_WAIT
, &gh
.gh_iflags
);
660 wake_up_bit(&gh
.gh_iflags
, HIF_WAIT
);
662 spin_unlock(&gl
->gl_spin
);
665 gfs2_holder_uninit(&gh
);
669 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
672 * Returns: 1 if the glock is acquired
675 static int gfs2_glmutex_trylock(struct gfs2_glock
*gl
)
679 spin_lock(&gl
->gl_spin
);
680 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
)) {
683 gl
->gl_owner
= current
;
684 gl
->gl_ip
= (unsigned long)__builtin_return_address(0);
686 spin_unlock(&gl
->gl_spin
);
692 * gfs2_glmutex_unlock - release a local lock on a glock
697 static void gfs2_glmutex_unlock(struct gfs2_glock
*gl
)
699 spin_lock(&gl
->gl_spin
);
700 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
704 BUG_ON(!spin_is_locked(&gl
->gl_spin
));
705 spin_unlock(&gl
->gl_spin
);
709 * handle_callback - add a demote request to a lock's queue
711 * @state: the state the caller wants us to change to
713 * Note: This may fail sliently if we are out of memory.
716 static void handle_callback(struct gfs2_glock
*gl
, unsigned int state
)
718 struct gfs2_holder
*gh
, *new_gh
= NULL
;
721 spin_lock(&gl
->gl_spin
);
723 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
724 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
) &&
725 gl
->gl_req_gh
!= gh
) {
726 if (gh
->gh_state
!= state
)
727 gh
->gh_state
= LM_ST_UNLOCKED
;
733 list_add_tail(&new_gh
->gh_list
, &gl
->gl_waiters2
);
736 spin_unlock(&gl
->gl_spin
);
738 new_gh
= gfs2_holder_get(gl
, state
, LM_FLAG_TRY
, GFP_NOFS
);
741 set_bit(HIF_DEMOTE
, &new_gh
->gh_iflags
);
742 set_bit(HIF_DEALLOC
, &new_gh
->gh_iflags
);
743 set_bit(HIF_WAIT
, &new_gh
->gh_iflags
);
749 spin_unlock(&gl
->gl_spin
);
752 gfs2_holder_put(new_gh
);
756 * state_change - record that the glock is now in a different state
758 * @new_state the new state
762 static void state_change(struct gfs2_glock
*gl
, unsigned int new_state
)
766 held1
= (gl
->gl_state
!= LM_ST_UNLOCKED
);
767 held2
= (new_state
!= LM_ST_UNLOCKED
);
769 if (held1
!= held2
) {
776 gl
->gl_state
= new_state
;
780 * xmote_bh - Called after the lock module is done acquiring a lock
781 * @gl: The glock in question
782 * @ret: the int returned from the lock module
786 static void xmote_bh(struct gfs2_glock
*gl
, unsigned int ret
)
788 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
789 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
790 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
791 int prev_state
= gl
->gl_state
;
794 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
795 gfs2_assert_warn(sdp
, list_empty(&gl
->gl_holders
));
796 gfs2_assert_warn(sdp
, !(ret
& LM_OUT_ASYNC
));
798 state_change(gl
, ret
& LM_OUT_ST_MASK
);
800 if (prev_state
!= LM_ST_UNLOCKED
&& !(ret
& LM_OUT_CACHEABLE
)) {
802 glops
->go_inval(gl
, DIO_METADATA
);
803 } else if (gl
->gl_state
== LM_ST_DEFERRED
) {
804 /* We might not want to do this here.
805 Look at moving to the inode glops. */
807 glops
->go_inval(gl
, 0);
810 /* Deal with each possible exit condition */
813 gl
->gl_stamp
= jiffies
;
814 else if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
815 spin_lock(&gl
->gl_spin
);
816 list_del_init(&gh
->gh_list
);
818 spin_unlock(&gl
->gl_spin
);
819 } else if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
)) {
820 spin_lock(&gl
->gl_spin
);
821 list_del_init(&gh
->gh_list
);
822 if (gl
->gl_state
== gh
->gh_state
||
823 gl
->gl_state
== LM_ST_UNLOCKED
) {
826 if (gfs2_assert_warn(sdp
, gh
->gh_flags
&
827 (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) == -1)
828 fs_warn(sdp
, "ret = 0x%.8X\n", ret
);
829 gh
->gh_error
= GLR_TRYFAILED
;
831 spin_unlock(&gl
->gl_spin
);
833 if (ret
& LM_OUT_CANCELED
)
834 handle_callback(gl
, LM_ST_UNLOCKED
);
836 } else if (ret
& LM_OUT_CANCELED
) {
837 spin_lock(&gl
->gl_spin
);
838 list_del_init(&gh
->gh_list
);
839 gh
->gh_error
= GLR_CANCELED
;
840 spin_unlock(&gl
->gl_spin
);
842 } else if (relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
843 spin_lock(&gl
->gl_spin
);
844 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
846 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
847 spin_unlock(&gl
->gl_spin
);
849 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
853 } else if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
854 spin_lock(&gl
->gl_spin
);
855 list_del_init(&gh
->gh_list
);
856 gh
->gh_error
= GLR_TRYFAILED
;
857 spin_unlock(&gl
->gl_spin
);
860 if (gfs2_assert_withdraw(sdp
, 0) == -1)
861 fs_err(sdp
, "ret = 0x%.8X\n", ret
);
864 if (glops
->go_xmote_bh
)
865 glops
->go_xmote_bh(gl
);
868 spin_lock(&gl
->gl_spin
);
869 gl
->gl_req_gh
= NULL
;
870 gl
->gl_req_bh
= NULL
;
871 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
873 spin_unlock(&gl
->gl_spin
);
879 gfs2_holder_dispose_or_wake(gh
);
883 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
884 * @gl: The glock in question
885 * @state: the requested state
886 * @flags: modifier flags to the lock call
890 void gfs2_glock_xmote_th(struct gfs2_holder
*gh
)
892 struct gfs2_glock
*gl
= gh
->gh_gl
;
893 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
894 int flags
= gh
->gh_flags
;
895 unsigned state
= gh
->gh_state
;
896 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
897 int lck_flags
= flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
|
898 LM_FLAG_NOEXP
| LM_FLAG_ANY
|
900 unsigned int lck_ret
;
902 if (glops
->go_xmote_th
)
903 glops
->go_xmote_th(gl
);
905 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
906 gfs2_assert_warn(sdp
, list_empty(&gl
->gl_holders
));
907 gfs2_assert_warn(sdp
, state
!= LM_ST_UNLOCKED
);
908 gfs2_assert_warn(sdp
, state
!= gl
->gl_state
);
911 gl
->gl_req_bh
= xmote_bh
;
913 lck_ret
= gfs2_lm_lock(sdp
, gl
->gl_lock
, gl
->gl_state
, state
, lck_flags
);
915 if (gfs2_assert_withdraw(sdp
, !(lck_ret
& LM_OUT_ERROR
)))
918 if (lck_ret
& LM_OUT_ASYNC
)
919 gfs2_assert_warn(sdp
, lck_ret
== LM_OUT_ASYNC
);
921 xmote_bh(gl
, lck_ret
);
925 * drop_bh - Called after a lock module unlock completes
927 * @ret: the return status
929 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
930 * Doesn't drop the reference on the glock the top half took out
934 static void drop_bh(struct gfs2_glock
*gl
, unsigned int ret
)
936 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
937 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
938 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
940 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
941 gfs2_assert_warn(sdp
, list_empty(&gl
->gl_holders
));
942 gfs2_assert_warn(sdp
, !ret
);
944 state_change(gl
, LM_ST_UNLOCKED
);
947 glops
->go_inval(gl
, DIO_METADATA
);
950 spin_lock(&gl
->gl_spin
);
951 list_del_init(&gh
->gh_list
);
953 spin_unlock(&gl
->gl_spin
);
956 if (glops
->go_drop_bh
)
957 glops
->go_drop_bh(gl
);
959 spin_lock(&gl
->gl_spin
);
960 gl
->gl_req_gh
= NULL
;
961 gl
->gl_req_bh
= NULL
;
962 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
964 spin_unlock(&gl
->gl_spin
);
969 gfs2_holder_dispose_or_wake(gh
);
973 * gfs2_glock_drop_th - call into the lock module to unlock a lock
978 static void gfs2_glock_drop_th(struct gfs2_glock
*gl
)
980 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
981 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
984 if (glops
->go_drop_th
)
985 glops
->go_drop_th(gl
);
987 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
988 gfs2_assert_warn(sdp
, list_empty(&gl
->gl_holders
));
989 gfs2_assert_warn(sdp
, gl
->gl_state
!= LM_ST_UNLOCKED
);
992 gl
->gl_req_bh
= drop_bh
;
994 ret
= gfs2_lm_unlock(sdp
, gl
->gl_lock
, gl
->gl_state
);
996 if (gfs2_assert_withdraw(sdp
, !(ret
& LM_OUT_ERROR
)))
1002 gfs2_assert_warn(sdp
, ret
== LM_OUT_ASYNC
);
1006 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1007 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1009 * Don't cancel GL_NOCANCEL requests.
1012 static void do_cancels(struct gfs2_holder
*gh
)
1014 struct gfs2_glock
*gl
= gh
->gh_gl
;
1016 spin_lock(&gl
->gl_spin
);
1018 while (gl
->gl_req_gh
!= gh
&&
1019 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1020 !list_empty(&gh
->gh_list
)) {
1021 if (gl
->gl_req_bh
&& !(gl
->gl_req_gh
&&
1022 (gl
->gl_req_gh
->gh_flags
& GL_NOCANCEL
))) {
1023 spin_unlock(&gl
->gl_spin
);
1024 gfs2_lm_cancel(gl
->gl_sbd
, gl
->gl_lock
);
1026 spin_lock(&gl
->gl_spin
);
1028 spin_unlock(&gl
->gl_spin
);
1030 spin_lock(&gl
->gl_spin
);
1034 spin_unlock(&gl
->gl_spin
);
1038 * glock_wait_internal - wait on a glock acquisition
1039 * @gh: the glock holder
1041 * Returns: 0 on success
1044 static int glock_wait_internal(struct gfs2_holder
*gh
)
1046 struct gfs2_glock
*gl
= gh
->gh_gl
;
1047 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1048 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1050 if (test_bit(HIF_ABORTED
, &gh
->gh_iflags
))
1053 if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
1054 spin_lock(&gl
->gl_spin
);
1055 if (gl
->gl_req_gh
!= gh
&&
1056 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1057 !list_empty(&gh
->gh_list
)) {
1058 list_del_init(&gh
->gh_list
);
1059 gh
->gh_error
= GLR_TRYFAILED
;
1061 spin_unlock(&gl
->gl_spin
);
1062 return gh
->gh_error
;
1064 spin_unlock(&gl
->gl_spin
);
1067 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1072 return gh
->gh_error
;
1074 gfs2_assert_withdraw(sdp
, test_bit(HIF_HOLDER
, &gh
->gh_iflags
));
1075 gfs2_assert_withdraw(sdp
, relaxed_state_ok(gl
->gl_state
, gh
->gh_state
,
1078 if (test_bit(HIF_FIRST
, &gh
->gh_iflags
)) {
1079 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1081 if (glops
->go_lock
) {
1082 gh
->gh_error
= glops
->go_lock(gh
);
1084 spin_lock(&gl
->gl_spin
);
1085 list_del_init(&gh
->gh_list
);
1086 spin_unlock(&gl
->gl_spin
);
1090 spin_lock(&gl
->gl_spin
);
1091 gl
->gl_req_gh
= NULL
;
1092 gl
->gl_req_bh
= NULL
;
1093 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1095 spin_unlock(&gl
->gl_spin
);
1098 return gh
->gh_error
;
1101 static inline struct gfs2_holder
*
1102 find_holder_by_owner(struct list_head
*head
, struct task_struct
*owner
)
1104 struct gfs2_holder
*gh
;
1106 list_for_each_entry(gh
, head
, gh_list
) {
1107 if (gh
->gh_owner
== owner
)
1115 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1116 * @gh: the holder structure to add
1120 static void add_to_queue(struct gfs2_holder
*gh
)
1122 struct gfs2_glock
*gl
= gh
->gh_gl
;
1123 struct gfs2_holder
*existing
;
1125 BUG_ON(!gh
->gh_owner
);
1126 if (test_and_set_bit(HIF_WAIT
, &gh
->gh_iflags
))
1129 existing
= find_holder_by_owner(&gl
->gl_holders
, gh
->gh_owner
);
1131 print_symbol(KERN_WARNING
"original: %s\n", existing
->gh_ip
);
1132 printk(KERN_INFO
"pid : %d\n", existing
->gh_owner
->pid
);
1133 printk(KERN_INFO
"lock type : %d lock state : %d\n",
1134 existing
->gh_gl
->gl_name
.ln_type
, existing
->gh_gl
->gl_state
);
1135 print_symbol(KERN_WARNING
"new: %s\n", gh
->gh_ip
);
1136 printk(KERN_INFO
"pid : %d\n", gh
->gh_owner
->pid
);
1137 printk(KERN_INFO
"lock type : %d lock state : %d\n",
1138 gl
->gl_name
.ln_type
, gl
->gl_state
);
1142 existing
= find_holder_by_owner(&gl
->gl_waiters3
, gh
->gh_owner
);
1144 print_symbol(KERN_WARNING
"original: %s\n", existing
->gh_ip
);
1145 print_symbol(KERN_WARNING
"new: %s\n", gh
->gh_ip
);
1149 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1150 list_add(&gh
->gh_list
, &gl
->gl_waiters3
);
1152 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1156 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1157 * @gh: the holder structure
1159 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1161 * Returns: 0, GLR_TRYFAILED, or errno on failure
1164 int gfs2_glock_nq(struct gfs2_holder
*gh
)
1166 struct gfs2_glock
*gl
= gh
->gh_gl
;
1167 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1171 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
1172 set_bit(HIF_ABORTED
, &gh
->gh_iflags
);
1176 set_bit(HIF_PROMOTE
, &gh
->gh_iflags
);
1178 spin_lock(&gl
->gl_spin
);
1181 spin_unlock(&gl
->gl_spin
);
1183 if (!(gh
->gh_flags
& GL_ASYNC
)) {
1184 error
= glock_wait_internal(gh
);
1185 if (error
== GLR_CANCELED
) {
1195 * gfs2_glock_poll - poll to see if an async request has been completed
1198 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1201 int gfs2_glock_poll(struct gfs2_holder
*gh
)
1203 struct gfs2_glock
*gl
= gh
->gh_gl
;
1206 spin_lock(&gl
->gl_spin
);
1208 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
1210 else if (list_empty(&gh
->gh_list
)) {
1211 if (gh
->gh_error
== GLR_CANCELED
) {
1212 spin_unlock(&gl
->gl_spin
);
1214 if (gfs2_glock_nq(gh
))
1221 spin_unlock(&gl
->gl_spin
);
1227 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1228 * @gh: the holder structure
1230 * Returns: 0, GLR_TRYFAILED, or errno on failure
1233 int gfs2_glock_wait(struct gfs2_holder
*gh
)
1237 error
= glock_wait_internal(gh
);
1238 if (error
== GLR_CANCELED
) {
1240 gh
->gh_flags
&= ~GL_ASYNC
;
1241 error
= gfs2_glock_nq(gh
);
1248 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1249 * @gh: the glock holder
1253 void gfs2_glock_dq(struct gfs2_holder
*gh
)
1255 struct gfs2_glock
*gl
= gh
->gh_gl
;
1256 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1258 if (gh
->gh_flags
& GL_NOCACHE
)
1259 handle_callback(gl
, LM_ST_UNLOCKED
);
1261 gfs2_glmutex_lock(gl
);
1263 spin_lock(&gl
->gl_spin
);
1264 list_del_init(&gh
->gh_list
);
1266 if (list_empty(&gl
->gl_holders
)) {
1267 spin_unlock(&gl
->gl_spin
);
1269 if (glops
->go_unlock
)
1270 glops
->go_unlock(gh
);
1272 gl
->gl_stamp
= jiffies
;
1274 spin_lock(&gl
->gl_spin
);
1277 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1279 spin_unlock(&gl
->gl_spin
);
1283 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1284 * @gh: the holder structure
1288 void gfs2_glock_dq_uninit(struct gfs2_holder
*gh
)
1291 gfs2_holder_uninit(gh
);
1295 * gfs2_glock_nq_num - acquire a glock based on lock number
1296 * @sdp: the filesystem
1297 * @number: the lock number
1298 * @glops: the glock operations for the type of glock
1299 * @state: the state to acquire the glock in
1300 * @flags: modifier flags for the aquisition
1301 * @gh: the struct gfs2_holder
1306 int gfs2_glock_nq_num(struct gfs2_sbd
*sdp
, u64 number
,
1307 const struct gfs2_glock_operations
*glops
,
1308 unsigned int state
, int flags
, struct gfs2_holder
*gh
)
1310 struct gfs2_glock
*gl
;
1313 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1315 error
= gfs2_glock_nq_init(gl
, state
, flags
, gh
);
1323 * glock_compare - Compare two struct gfs2_glock structures for sorting
1324 * @arg_a: the first structure
1325 * @arg_b: the second structure
1329 static int glock_compare(const void *arg_a
, const void *arg_b
)
1331 const struct gfs2_holder
*gh_a
= *(const struct gfs2_holder
**)arg_a
;
1332 const struct gfs2_holder
*gh_b
= *(const struct gfs2_holder
**)arg_b
;
1333 const struct lm_lockname
*a
= &gh_a
->gh_gl
->gl_name
;
1334 const struct lm_lockname
*b
= &gh_b
->gh_gl
->gl_name
;
1336 if (a
->ln_number
> b
->ln_number
)
1338 if (a
->ln_number
< b
->ln_number
)
1340 BUG_ON(gh_a
->gh_gl
->gl_ops
->go_type
== gh_b
->gh_gl
->gl_ops
->go_type
);
1345 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1346 * @num_gh: the number of structures
1347 * @ghs: an array of struct gfs2_holder structures
1349 * Returns: 0 on success (all glocks acquired),
1350 * errno on failure (no glocks acquired)
1353 static int nq_m_sync(unsigned int num_gh
, struct gfs2_holder
*ghs
,
1354 struct gfs2_holder
**p
)
1359 for (x
= 0; x
< num_gh
; x
++)
1362 sort(p
, num_gh
, sizeof(struct gfs2_holder
*), glock_compare
, NULL
);
1364 for (x
= 0; x
< num_gh
; x
++) {
1365 p
[x
]->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1367 error
= gfs2_glock_nq(p
[x
]);
1370 gfs2_glock_dq(p
[x
]);
1379 * gfs2_glock_nq_m - acquire multiple glocks
1380 * @num_gh: the number of structures
1381 * @ghs: an array of struct gfs2_holder structures
1383 * Figure out how big an impact this function has. Either:
1384 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1385 * 2) Forget async stuff and just call nq_m_sync()
1386 * 3) Leave it like it is
1388 * Returns: 0 on success (all glocks acquired),
1389 * errno on failure (no glocks acquired)
1392 int gfs2_glock_nq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1396 int borked
= 0, serious
= 0;
1403 ghs
->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1404 return gfs2_glock_nq(ghs
);
1407 e
= kcalloc(num_gh
, sizeof(struct gfs2_holder
*), GFP_KERNEL
);
1411 for (x
= 0; x
< num_gh
; x
++) {
1412 ghs
[x
].gh_flags
|= LM_FLAG_TRY
| GL_ASYNC
;
1413 error
= gfs2_glock_nq(&ghs
[x
]);
1422 for (x
= 0; x
< num_gh
; x
++) {
1423 error
= e
[x
] = glock_wait_internal(&ghs
[x
]);
1426 if (error
!= GLR_TRYFAILED
&& error
!= GLR_CANCELED
)
1436 for (x
= 0; x
< num_gh
; x
++)
1438 gfs2_glock_dq(&ghs
[x
]);
1443 for (x
= 0; x
< num_gh
; x
++)
1444 gfs2_holder_reinit(ghs
[x
].gh_state
, ghs
[x
].gh_flags
,
1446 error
= nq_m_sync(num_gh
, ghs
, (struct gfs2_holder
**)e
);
1455 * gfs2_glock_dq_m - release multiple glocks
1456 * @num_gh: the number of structures
1457 * @ghs: an array of struct gfs2_holder structures
1461 void gfs2_glock_dq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1465 for (x
= 0; x
< num_gh
; x
++)
1466 gfs2_glock_dq(&ghs
[x
]);
1470 * gfs2_glock_dq_uninit_m - release multiple glocks
1471 * @num_gh: the number of structures
1472 * @ghs: an array of struct gfs2_holder structures
1476 void gfs2_glock_dq_uninit_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1480 for (x
= 0; x
< num_gh
; x
++)
1481 gfs2_glock_dq_uninit(&ghs
[x
]);
1485 * gfs2_lvb_hold - attach a LVB from a glock
1486 * @gl: The glock in question
1490 int gfs2_lvb_hold(struct gfs2_glock
*gl
)
1494 gfs2_glmutex_lock(gl
);
1496 if (!atomic_read(&gl
->gl_lvb_count
)) {
1497 error
= gfs2_lm_hold_lvb(gl
->gl_sbd
, gl
->gl_lock
, &gl
->gl_lvb
);
1499 gfs2_glmutex_unlock(gl
);
1502 gfs2_glock_hold(gl
);
1504 atomic_inc(&gl
->gl_lvb_count
);
1506 gfs2_glmutex_unlock(gl
);
1512 * gfs2_lvb_unhold - detach a LVB from a glock
1513 * @gl: The glock in question
1517 void gfs2_lvb_unhold(struct gfs2_glock
*gl
)
1519 gfs2_glock_hold(gl
);
1520 gfs2_glmutex_lock(gl
);
1522 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
) > 0);
1523 if (atomic_dec_and_test(&gl
->gl_lvb_count
)) {
1524 gfs2_lm_unhold_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1529 gfs2_glmutex_unlock(gl
);
1533 static void blocking_cb(struct gfs2_sbd
*sdp
, struct lm_lockname
*name
,
1536 struct gfs2_glock
*gl
;
1538 gl
= gfs2_glock_find(sdp
, name
);
1542 handle_callback(gl
, state
);
1544 spin_lock(&gl
->gl_spin
);
1546 spin_unlock(&gl
->gl_spin
);
1552 * gfs2_glock_cb - Callback used by locking module
1553 * @sdp: Pointer to the superblock
1554 * @type: Type of callback
1555 * @data: Type dependent data pointer
1557 * Called by the locking module when it wants to tell us something.
1558 * Either we need to drop a lock, one of our ASYNC requests completed, or
1559 * a journal from another client needs to be recovered.
1562 void gfs2_glock_cb(void *cb_data
, unsigned int type
, void *data
)
1564 struct gfs2_sbd
*sdp
= cb_data
;
1568 blocking_cb(sdp
, data
, LM_ST_UNLOCKED
);
1572 blocking_cb(sdp
, data
, LM_ST_DEFERRED
);
1576 blocking_cb(sdp
, data
, LM_ST_SHARED
);
1580 struct lm_async_cb
*async
= data
;
1581 struct gfs2_glock
*gl
;
1583 down_read(&gfs2_umount_flush_sem
);
1584 gl
= gfs2_glock_find(sdp
, &async
->lc_name
);
1585 if (gfs2_assert_warn(sdp
, gl
))
1587 if (!gfs2_assert_warn(sdp
, gl
->gl_req_bh
))
1588 gl
->gl_req_bh(gl
, async
->lc_ret
);
1590 up_read(&gfs2_umount_flush_sem
);
1594 case LM_CB_NEED_RECOVERY
:
1595 gfs2_jdesc_make_dirty(sdp
, *(unsigned int *)data
);
1596 if (sdp
->sd_recoverd_process
)
1597 wake_up_process(sdp
->sd_recoverd_process
);
1600 case LM_CB_DROPLOCKS
:
1601 gfs2_gl_hash_clear(sdp
, NO_WAIT
);
1602 gfs2_quota_scan(sdp
);
1606 gfs2_assert_warn(sdp
, 0);
1612 * demote_ok - Check to see if it's ok to unlock a glock
1615 * Returns: 1 if it's ok
1618 static int demote_ok(struct gfs2_glock
*gl
)
1620 const struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1623 if (test_bit(GLF_STICKY
, &gl
->gl_flags
))
1625 else if (glops
->go_demote_ok
)
1626 demote
= glops
->go_demote_ok(gl
);
1632 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1637 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock
*gl
)
1639 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1641 spin_lock(&sdp
->sd_reclaim_lock
);
1642 if (list_empty(&gl
->gl_reclaim
)) {
1643 gfs2_glock_hold(gl
);
1644 list_add(&gl
->gl_reclaim
, &sdp
->sd_reclaim_list
);
1645 atomic_inc(&sdp
->sd_reclaim_count
);
1647 spin_unlock(&sdp
->sd_reclaim_lock
);
1649 wake_up(&sdp
->sd_reclaim_wq
);
1653 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1654 * @sdp: the filesystem
1656 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1657 * different glock and we notice that there are a lot of glocks in the
1662 void gfs2_reclaim_glock(struct gfs2_sbd
*sdp
)
1664 struct gfs2_glock
*gl
;
1666 spin_lock(&sdp
->sd_reclaim_lock
);
1667 if (list_empty(&sdp
->sd_reclaim_list
)) {
1668 spin_unlock(&sdp
->sd_reclaim_lock
);
1671 gl
= list_entry(sdp
->sd_reclaim_list
.next
,
1672 struct gfs2_glock
, gl_reclaim
);
1673 list_del_init(&gl
->gl_reclaim
);
1674 spin_unlock(&sdp
->sd_reclaim_lock
);
1676 atomic_dec(&sdp
->sd_reclaim_count
);
1677 atomic_inc(&sdp
->sd_reclaimed
);
1679 if (gfs2_glmutex_trylock(gl
)) {
1680 if (list_empty(&gl
->gl_holders
) &&
1681 gl
->gl_state
!= LM_ST_UNLOCKED
&& demote_ok(gl
))
1682 handle_callback(gl
, LM_ST_UNLOCKED
);
1683 gfs2_glmutex_unlock(gl
);
1690 * examine_bucket - Call a function for glock in a hash bucket
1691 * @examiner: the function
1692 * @sdp: the filesystem
1693 * @bucket: the bucket
1695 * Returns: 1 if the bucket has entries
1698 static int examine_bucket(glock_examiner examiner
, struct gfs2_sbd
*sdp
,
1701 struct gfs2_glock
*gl
, *prev
= NULL
;
1702 int has_entries
= 0;
1703 struct hlist_head
*head
= &gl_hash_table
[hash
].hb_list
;
1705 read_lock(gl_lock_addr(hash
));
1706 /* Can't use hlist_for_each_entry - don't want prefetch here */
1707 if (hlist_empty(head
))
1709 gl
= list_entry(head
->first
, struct gfs2_glock
, gl_list
);
1711 if (gl
->gl_sbd
== sdp
) {
1712 gfs2_glock_hold(gl
);
1713 read_unlock(gl_lock_addr(hash
));
1715 gfs2_glock_put(prev
);
1719 read_lock(gl_lock_addr(hash
));
1721 if (gl
->gl_list
.next
== NULL
)
1723 gl
= list_entry(gl
->gl_list
.next
, struct gfs2_glock
, gl_list
);
1726 read_unlock(gl_lock_addr(hash
));
1728 gfs2_glock_put(prev
);
1733 * scan_glock - look at a glock and see if we can reclaim it
1734 * @gl: the glock to look at
1738 static void scan_glock(struct gfs2_glock
*gl
)
1740 if (gl
->gl_ops
== &gfs2_inode_glops
&& gl
->gl_object
)
1743 if (gfs2_glmutex_trylock(gl
)) {
1744 if (list_empty(&gl
->gl_holders
) &&
1745 gl
->gl_state
!= LM_ST_UNLOCKED
&& demote_ok(gl
))
1747 gfs2_glmutex_unlock(gl
);
1752 gfs2_glmutex_unlock(gl
);
1753 gfs2_glock_schedule_for_reclaim(gl
);
1757 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1758 * @sdp: the filesystem
1762 void gfs2_scand_internal(struct gfs2_sbd
*sdp
)
1766 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++)
1767 examine_bucket(scan_glock
, sdp
, x
);
1771 * clear_glock - look at a glock and see if we can free it from glock cache
1772 * @gl: the glock to look at
1776 static void clear_glock(struct gfs2_glock
*gl
)
1778 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1781 spin_lock(&sdp
->sd_reclaim_lock
);
1782 if (!list_empty(&gl
->gl_reclaim
)) {
1783 list_del_init(&gl
->gl_reclaim
);
1784 atomic_dec(&sdp
->sd_reclaim_count
);
1785 spin_unlock(&sdp
->sd_reclaim_lock
);
1786 released
= gfs2_glock_put(gl
);
1787 gfs2_assert(sdp
, !released
);
1789 spin_unlock(&sdp
->sd_reclaim_lock
);
1792 if (gfs2_glmutex_trylock(gl
)) {
1793 if (list_empty(&gl
->gl_holders
) &&
1794 gl
->gl_state
!= LM_ST_UNLOCKED
)
1795 handle_callback(gl
, LM_ST_UNLOCKED
);
1796 gfs2_glmutex_unlock(gl
);
1801 * gfs2_gl_hash_clear - Empty out the glock hash table
1802 * @sdp: the filesystem
1803 * @wait: wait until it's all gone
1805 * Called when unmounting the filesystem, or when inter-node lock manager
1806 * requests DROPLOCKS because it is running out of capacity.
1809 void gfs2_gl_hash_clear(struct gfs2_sbd
*sdp
, int wait
)
1819 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
1820 if (examine_bucket(clear_glock
, sdp
, x
))
1827 if (time_after_eq(jiffies
,
1828 t
+ gfs2_tune_get(sdp
, gt_stall_secs
) * HZ
)) {
1829 fs_warn(sdp
, "Unmount seems to be stalled. "
1830 "Dumping lock state...\n");
1831 gfs2_dump_lockstate(sdp
);
1835 down_write(&gfs2_umount_flush_sem
);
1836 invalidate_inodes(sdp
->sd_vfs
);
1837 up_write(&gfs2_umount_flush_sem
);
1843 * Diagnostic routines to help debug distributed deadlock
1847 * dump_holder - print information about a glock holder
1848 * @str: a string naming the type of holder
1849 * @gh: the glock holder
1851 * Returns: 0 on success, -ENOBUFS when we run out of space
1854 static int dump_holder(char *str
, struct gfs2_holder
*gh
)
1857 int error
= -ENOBUFS
;
1859 printk(KERN_INFO
" %s\n", str
);
1860 printk(KERN_INFO
" owner = %ld\n",
1861 (gh
->gh_owner
) ? (long)gh
->gh_owner
->pid
: -1);
1862 printk(KERN_INFO
" gh_state = %u\n", gh
->gh_state
);
1863 printk(KERN_INFO
" gh_flags =");
1864 for (x
= 0; x
< 32; x
++)
1865 if (gh
->gh_flags
& (1 << x
))
1868 printk(KERN_INFO
" error = %d\n", gh
->gh_error
);
1869 printk(KERN_INFO
" gh_iflags =");
1870 for (x
= 0; x
< 32; x
++)
1871 if (test_bit(x
, &gh
->gh_iflags
))
1874 print_symbol(KERN_INFO
" initialized at: %s\n", gh
->gh_ip
);
1882 * dump_inode - print information about an inode
1885 * Returns: 0 on success, -ENOBUFS when we run out of space
1888 static int dump_inode(struct gfs2_inode
*ip
)
1891 int error
= -ENOBUFS
;
1893 printk(KERN_INFO
" Inode:\n");
1894 printk(KERN_INFO
" num = %llu %llu\n",
1895 (unsigned long long)ip
->i_num
.no_formal_ino
,
1896 (unsigned long long)ip
->i_num
.no_addr
);
1897 printk(KERN_INFO
" type = %u\n", IF2DT(ip
->i_inode
.i_mode
));
1898 printk(KERN_INFO
" i_flags =");
1899 for (x
= 0; x
< 32; x
++)
1900 if (test_bit(x
, &ip
->i_flags
))
1910 * dump_glock - print information about a glock
1912 * @count: where we are in the buffer
1914 * Returns: 0 on success, -ENOBUFS when we run out of space
1917 static int dump_glock(struct gfs2_glock
*gl
)
1919 struct gfs2_holder
*gh
;
1921 int error
= -ENOBUFS
;
1923 spin_lock(&gl
->gl_spin
);
1925 printk(KERN_INFO
"Glock 0x%p (%u, %llu)\n", gl
, gl
->gl_name
.ln_type
,
1926 (unsigned long long)gl
->gl_name
.ln_number
);
1927 printk(KERN_INFO
" gl_flags =");
1928 for (x
= 0; x
< 32; x
++) {
1929 if (test_bit(x
, &gl
->gl_flags
))
1933 printk(KERN_INFO
" gl_ref = %d\n", atomic_read(&gl
->gl_ref
));
1934 printk(KERN_INFO
" gl_state = %u\n", gl
->gl_state
);
1935 printk(KERN_INFO
" gl_owner = %s\n", gl
->gl_owner
->comm
);
1936 print_symbol(KERN_INFO
" gl_ip = %s\n", gl
->gl_ip
);
1937 printk(KERN_INFO
" req_gh = %s\n", (gl
->gl_req_gh
) ? "yes" : "no");
1938 printk(KERN_INFO
" req_bh = %s\n", (gl
->gl_req_bh
) ? "yes" : "no");
1939 printk(KERN_INFO
" lvb_count = %d\n", atomic_read(&gl
->gl_lvb_count
));
1940 printk(KERN_INFO
" object = %s\n", (gl
->gl_object
) ? "yes" : "no");
1941 printk(KERN_INFO
" le = %s\n",
1942 (list_empty(&gl
->gl_le
.le_list
)) ? "no" : "yes");
1943 printk(KERN_INFO
" reclaim = %s\n",
1944 (list_empty(&gl
->gl_reclaim
)) ? "no" : "yes");
1946 printk(KERN_INFO
" aspace = 0x%p nrpages = %lu\n", gl
->gl_aspace
,
1947 gl
->gl_aspace
->i_mapping
->nrpages
);
1949 printk(KERN_INFO
" aspace = no\n");
1950 printk(KERN_INFO
" ail = %d\n", atomic_read(&gl
->gl_ail_count
));
1951 if (gl
->gl_req_gh
) {
1952 error
= dump_holder("Request", gl
->gl_req_gh
);
1956 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
1957 error
= dump_holder("Holder", gh
);
1961 list_for_each_entry(gh
, &gl
->gl_waiters1
, gh_list
) {
1962 error
= dump_holder("Waiter1", gh
);
1966 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
1967 error
= dump_holder("Waiter2", gh
);
1971 list_for_each_entry(gh
, &gl
->gl_waiters3
, gh_list
) {
1972 error
= dump_holder("Waiter3", gh
);
1976 if (gl
->gl_ops
== &gfs2_inode_glops
&& gl
->gl_object
) {
1977 if (!test_bit(GLF_LOCK
, &gl
->gl_flags
) &&
1978 list_empty(&gl
->gl_holders
)) {
1979 error
= dump_inode(gl
->gl_object
);
1984 printk(KERN_INFO
" Inode: busy\n");
1991 spin_unlock(&gl
->gl_spin
);
1996 * gfs2_dump_lockstate - print out the current lockstate
1997 * @sdp: the filesystem
1998 * @ub: the buffer to copy the information into
2000 * If @ub is NULL, dump the lockstate to the console.
2004 static int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
)
2006 struct gfs2_glock
*gl
;
2007 struct hlist_node
*h
;
2011 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2013 read_lock(gl_lock_addr(x
));
2015 hlist_for_each_entry(gl
, h
, &gl_hash_table
[x
].hb_list
, gl_list
) {
2016 if (gl
->gl_sbd
!= sdp
)
2019 error
= dump_glock(gl
);
2024 read_unlock(gl_lock_addr(x
));
2034 int __init
gfs2_glock_init(void)
2037 for(i
= 0; i
< GFS2_GL_HASH_SIZE
; i
++) {
2038 INIT_HLIST_HEAD(&gl_hash_table
[i
].hb_list
);
2040 #ifdef GL_HASH_LOCK_SZ
2041 for(i
= 0; i
< GL_HASH_LOCK_SZ
; i
++) {
2042 rwlock_init(&gl_hash_locks
[i
]);