USB: remove duplicate device id from usb_storage
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / gfs2 / glock.c
blob6618c1190252881f6aae8266d7303deec76398ae
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kallsyms.h>
19 #include <linux/gfs2_ondisk.h>
20 #include <linux/list.h>
21 #include <linux/lm_interface.h>
22 #include <linux/wait.h>
23 #include <linux/rwsem.h>
24 #include <asm/uaccess.h>
26 #include "gfs2.h"
27 #include "incore.h"
28 #include "glock.h"
29 #include "glops.h"
30 #include "inode.h"
31 #include "lm.h"
32 #include "lops.h"
33 #include "meta_io.h"
34 #include "quota.h"
35 #include "super.h"
36 #include "util.h"
38 struct gfs2_gl_hash_bucket {
39 struct hlist_head hb_list;
42 typedef void (*glock_examiner) (struct gfs2_glock * gl);
44 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
45 static int dump_glock(struct gfs2_glock *gl);
46 static int dump_inode(struct gfs2_inode *ip);
47 static void gfs2_glock_xmote_th(struct gfs2_holder *gh);
48 static void gfs2_glock_drop_th(struct gfs2_glock *gl);
49 static DECLARE_RWSEM(gfs2_umount_flush_sem);
51 #define GFS2_GL_HASH_SHIFT 15
52 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
53 #define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
55 static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
58 * Despite what you might think, the numbers below are not arbitrary :-)
59 * They are taken from the ipv4 routing hash code, which is well tested
60 * and thus should be nearly optimal. Later on we might tweek the numbers
61 * but for now this should be fine.
63 * The reason for putting the locks in a separate array from the list heads
64 * is that we can have fewer locks than list heads and save memory. We use
65 * the same hash function for both, but with a different hash mask.
67 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
68 defined(CONFIG_PROVE_LOCKING)
70 #ifdef CONFIG_LOCKDEP
71 # define GL_HASH_LOCK_SZ 256
72 #else
73 # if NR_CPUS >= 32
74 # define GL_HASH_LOCK_SZ 4096
75 # elif NR_CPUS >= 16
76 # define GL_HASH_LOCK_SZ 2048
77 # elif NR_CPUS >= 8
78 # define GL_HASH_LOCK_SZ 1024
79 # elif NR_CPUS >= 4
80 # define GL_HASH_LOCK_SZ 512
81 # else
82 # define GL_HASH_LOCK_SZ 256
83 # endif
84 #endif
86 /* We never want more locks than chains */
87 #if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
88 # undef GL_HASH_LOCK_SZ
89 # define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
90 #endif
92 static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
94 static inline rwlock_t *gl_lock_addr(unsigned int x)
96 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
98 #else /* not SMP, so no spinlocks required */
99 static inline rwlock_t *gl_lock_addr(unsigned int x)
101 return NULL;
103 #endif
106 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
107 * @actual: the current state of the lock
108 * @requested: the lock state that was requested by the caller
109 * @flags: the modifier flags passed in by the caller
111 * Returns: 1 if the locks are compatible, 0 otherwise
114 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
115 int flags)
117 if (actual == requested)
118 return 1;
120 if (flags & GL_EXACT)
121 return 0;
123 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
124 return 1;
126 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
127 return 1;
129 return 0;
133 * gl_hash() - Turn glock number into hash bucket number
134 * @lock: The glock number
136 * Returns: The number of the corresponding hash bucket
139 static unsigned int gl_hash(const struct gfs2_sbd *sdp,
140 const struct lm_lockname *name)
142 unsigned int h;
144 h = jhash(&name->ln_number, sizeof(u64), 0);
145 h = jhash(&name->ln_type, sizeof(unsigned int), h);
146 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
147 h &= GFS2_GL_HASH_MASK;
149 return h;
153 * glock_free() - Perform a few checks and then release struct gfs2_glock
154 * @gl: The glock to release
156 * Also calls lock module to release its internal structure for this glock.
160 static void glock_free(struct gfs2_glock *gl)
162 struct gfs2_sbd *sdp = gl->gl_sbd;
163 struct inode *aspace = gl->gl_aspace;
165 gfs2_lm_put_lock(sdp, gl->gl_lock);
167 if (aspace)
168 gfs2_aspace_put(aspace);
170 kmem_cache_free(gfs2_glock_cachep, gl);
174 * gfs2_glock_hold() - increment reference count on glock
175 * @gl: The glock to hold
179 void gfs2_glock_hold(struct gfs2_glock *gl)
181 atomic_inc(&gl->gl_ref);
185 * gfs2_glock_put() - Decrement reference count on glock
186 * @gl: The glock to put
190 int gfs2_glock_put(struct gfs2_glock *gl)
192 int rv = 0;
193 struct gfs2_sbd *sdp = gl->gl_sbd;
195 write_lock(gl_lock_addr(gl->gl_hash));
196 if (atomic_dec_and_test(&gl->gl_ref)) {
197 hlist_del(&gl->gl_list);
198 write_unlock(gl_lock_addr(gl->gl_hash));
199 BUG_ON(spin_is_locked(&gl->gl_spin));
200 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
201 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
202 gfs2_assert(sdp, list_empty(&gl->gl_holders));
203 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
204 gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
205 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
206 glock_free(gl);
207 rv = 1;
208 goto out;
210 write_unlock(gl_lock_addr(gl->gl_hash));
211 out:
212 return rv;
216 * search_bucket() - Find struct gfs2_glock by lock number
217 * @bucket: the bucket to search
218 * @name: The lock name
220 * Returns: NULL, or the struct gfs2_glock with the requested number
223 static struct gfs2_glock *search_bucket(unsigned int hash,
224 const struct gfs2_sbd *sdp,
225 const struct lm_lockname *name)
227 struct gfs2_glock *gl;
228 struct hlist_node *h;
230 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
231 if (!lm_name_equal(&gl->gl_name, name))
232 continue;
233 if (gl->gl_sbd != sdp)
234 continue;
236 atomic_inc(&gl->gl_ref);
238 return gl;
241 return NULL;
245 * gfs2_glock_find() - Find glock by lock number
246 * @sdp: The GFS2 superblock
247 * @name: The lock name
249 * Returns: NULL, or the struct gfs2_glock with the requested number
252 static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
253 const struct lm_lockname *name)
255 unsigned int hash = gl_hash(sdp, name);
256 struct gfs2_glock *gl;
258 read_lock(gl_lock_addr(hash));
259 gl = search_bucket(hash, sdp, name);
260 read_unlock(gl_lock_addr(hash));
262 return gl;
266 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
267 * @sdp: The GFS2 superblock
268 * @number: the lock number
269 * @glops: The glock_operations to use
270 * @create: If 0, don't create the glock if it doesn't exist
271 * @glp: the glock is returned here
273 * This does not lock a glock, just finds/creates structures for one.
275 * Returns: errno
278 int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
279 const struct gfs2_glock_operations *glops, int create,
280 struct gfs2_glock **glp)
282 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
283 struct gfs2_glock *gl, *tmp;
284 unsigned int hash = gl_hash(sdp, &name);
285 int error;
287 read_lock(gl_lock_addr(hash));
288 gl = search_bucket(hash, sdp, &name);
289 read_unlock(gl_lock_addr(hash));
291 if (gl || !create) {
292 *glp = gl;
293 return 0;
296 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
297 if (!gl)
298 return -ENOMEM;
300 gl->gl_flags = 0;
301 gl->gl_name = name;
302 atomic_set(&gl->gl_ref, 1);
303 gl->gl_state = LM_ST_UNLOCKED;
304 gl->gl_hash = hash;
305 gl->gl_owner = NULL;
306 gl->gl_ip = 0;
307 gl->gl_ops = glops;
308 gl->gl_req_gh = NULL;
309 gl->gl_req_bh = NULL;
310 gl->gl_vn = 0;
311 gl->gl_stamp = jiffies;
312 gl->gl_object = NULL;
313 gl->gl_sbd = sdp;
314 gl->gl_aspace = NULL;
315 lops_init_le(&gl->gl_le, &gfs2_glock_lops);
317 /* If this glock protects actual on-disk data or metadata blocks,
318 create a VFS inode to manage the pages/buffers holding them. */
319 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
320 gl->gl_aspace = gfs2_aspace_get(sdp);
321 if (!gl->gl_aspace) {
322 error = -ENOMEM;
323 goto fail;
327 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
328 if (error)
329 goto fail_aspace;
331 write_lock(gl_lock_addr(hash));
332 tmp = search_bucket(hash, sdp, &name);
333 if (tmp) {
334 write_unlock(gl_lock_addr(hash));
335 glock_free(gl);
336 gl = tmp;
337 } else {
338 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
339 write_unlock(gl_lock_addr(hash));
342 *glp = gl;
344 return 0;
346 fail_aspace:
347 if (gl->gl_aspace)
348 gfs2_aspace_put(gl->gl_aspace);
349 fail:
350 kmem_cache_free(gfs2_glock_cachep, gl);
351 return error;
355 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
356 * @gl: the glock
357 * @state: the state we're requesting
358 * @flags: the modifier flags
359 * @gh: the holder structure
363 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
364 struct gfs2_holder *gh)
366 INIT_LIST_HEAD(&gh->gh_list);
367 gh->gh_gl = gl;
368 gh->gh_ip = (unsigned long)__builtin_return_address(0);
369 gh->gh_owner = current;
370 gh->gh_state = state;
371 gh->gh_flags = flags;
372 gh->gh_error = 0;
373 gh->gh_iflags = 0;
374 gfs2_glock_hold(gl);
378 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
379 * @state: the state we're requesting
380 * @flags: the modifier flags
381 * @gh: the holder structure
383 * Don't mess with the glock.
387 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
389 gh->gh_state = state;
390 gh->gh_flags = flags;
391 gh->gh_iflags &= 1 << HIF_ALLOCED;
392 gh->gh_ip = (unsigned long)__builtin_return_address(0);
396 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
397 * @gh: the holder structure
401 void gfs2_holder_uninit(struct gfs2_holder *gh)
403 gfs2_glock_put(gh->gh_gl);
404 gh->gh_gl = NULL;
405 gh->gh_ip = 0;
409 * gfs2_holder_get - get a struct gfs2_holder structure
410 * @gl: the glock
411 * @state: the state we're requesting
412 * @flags: the modifier flags
413 * @gfp_flags:
415 * Figure out how big an impact this function has. Either:
416 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
417 * 2) Leave it like it is
419 * Returns: the holder structure, NULL on ENOMEM
422 static struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl,
423 unsigned int state,
424 int flags, gfp_t gfp_flags)
426 struct gfs2_holder *gh;
428 gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
429 if (!gh)
430 return NULL;
432 gfs2_holder_init(gl, state, flags, gh);
433 set_bit(HIF_ALLOCED, &gh->gh_iflags);
434 gh->gh_ip = (unsigned long)__builtin_return_address(0);
435 return gh;
439 * gfs2_holder_put - get rid of a struct gfs2_holder structure
440 * @gh: the holder structure
444 static void gfs2_holder_put(struct gfs2_holder *gh)
446 gfs2_holder_uninit(gh);
447 kfree(gh);
450 static void gfs2_holder_dispose_or_wake(struct gfs2_holder *gh)
452 if (test_bit(HIF_DEALLOC, &gh->gh_iflags)) {
453 gfs2_holder_put(gh);
454 return;
456 clear_bit(HIF_WAIT, &gh->gh_iflags);
457 smp_mb();
458 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
461 static int holder_wait(void *word)
463 schedule();
464 return 0;
467 static void wait_on_holder(struct gfs2_holder *gh)
469 might_sleep();
470 wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
474 * rq_mutex - process a mutex request in the queue
475 * @gh: the glock holder
477 * Returns: 1 if the queue is blocked
480 static int rq_mutex(struct gfs2_holder *gh)
482 struct gfs2_glock *gl = gh->gh_gl;
484 list_del_init(&gh->gh_list);
485 /* gh->gh_error never examined. */
486 set_bit(GLF_LOCK, &gl->gl_flags);
487 clear_bit(HIF_WAIT, &gh->gh_iflags);
488 smp_mb();
489 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
491 return 1;
495 * rq_promote - process a promote request in the queue
496 * @gh: the glock holder
498 * Acquire a new inter-node lock, or change a lock state to more restrictive.
500 * Returns: 1 if the queue is blocked
503 static int rq_promote(struct gfs2_holder *gh)
505 struct gfs2_glock *gl = gh->gh_gl;
506 struct gfs2_sbd *sdp = gl->gl_sbd;
508 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
509 if (list_empty(&gl->gl_holders)) {
510 gl->gl_req_gh = gh;
511 set_bit(GLF_LOCK, &gl->gl_flags);
512 spin_unlock(&gl->gl_spin);
514 if (atomic_read(&sdp->sd_reclaim_count) >
515 gfs2_tune_get(sdp, gt_reclaim_limit) &&
516 !(gh->gh_flags & LM_FLAG_PRIORITY)) {
517 gfs2_reclaim_glock(sdp);
518 gfs2_reclaim_glock(sdp);
521 gfs2_glock_xmote_th(gh);
522 spin_lock(&gl->gl_spin);
524 return 1;
527 if (list_empty(&gl->gl_holders)) {
528 set_bit(HIF_FIRST, &gh->gh_iflags);
529 set_bit(GLF_LOCK, &gl->gl_flags);
530 } else {
531 struct gfs2_holder *next_gh;
532 if (gh->gh_state == LM_ST_EXCLUSIVE)
533 return 1;
534 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
535 gh_list);
536 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
537 return 1;
540 list_move_tail(&gh->gh_list, &gl->gl_holders);
541 gh->gh_error = 0;
542 set_bit(HIF_HOLDER, &gh->gh_iflags);
544 gfs2_holder_dispose_or_wake(gh);
546 return 0;
550 * rq_demote - process a demote request in the queue
551 * @gh: the glock holder
553 * Returns: 1 if the queue is blocked
556 static int rq_demote(struct gfs2_holder *gh)
558 struct gfs2_glock *gl = gh->gh_gl;
560 if (!list_empty(&gl->gl_holders))
561 return 1;
563 if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
564 list_del_init(&gh->gh_list);
565 gh->gh_error = 0;
566 spin_unlock(&gl->gl_spin);
567 gfs2_holder_dispose_or_wake(gh);
568 spin_lock(&gl->gl_spin);
569 } else {
570 gl->gl_req_gh = gh;
571 set_bit(GLF_LOCK, &gl->gl_flags);
572 spin_unlock(&gl->gl_spin);
574 if (gh->gh_state == LM_ST_UNLOCKED ||
575 gl->gl_state != LM_ST_EXCLUSIVE)
576 gfs2_glock_drop_th(gl);
577 else
578 gfs2_glock_xmote_th(gh);
580 spin_lock(&gl->gl_spin);
583 return 0;
587 * run_queue - process holder structures on a glock
588 * @gl: the glock
591 static void run_queue(struct gfs2_glock *gl)
593 struct gfs2_holder *gh;
594 int blocked = 1;
596 for (;;) {
597 if (test_bit(GLF_LOCK, &gl->gl_flags))
598 break;
600 if (!list_empty(&gl->gl_waiters1)) {
601 gh = list_entry(gl->gl_waiters1.next,
602 struct gfs2_holder, gh_list);
604 if (test_bit(HIF_MUTEX, &gh->gh_iflags))
605 blocked = rq_mutex(gh);
606 else
607 gfs2_assert_warn(gl->gl_sbd, 0);
609 } else if (!list_empty(&gl->gl_waiters2) &&
610 !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
611 gh = list_entry(gl->gl_waiters2.next,
612 struct gfs2_holder, gh_list);
614 if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
615 blocked = rq_demote(gh);
616 else
617 gfs2_assert_warn(gl->gl_sbd, 0);
619 } else if (!list_empty(&gl->gl_waiters3)) {
620 gh = list_entry(gl->gl_waiters3.next,
621 struct gfs2_holder, gh_list);
623 if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
624 blocked = rq_promote(gh);
625 else
626 gfs2_assert_warn(gl->gl_sbd, 0);
628 } else
629 break;
631 if (blocked)
632 break;
637 * gfs2_glmutex_lock - acquire a local lock on a glock
638 * @gl: the glock
640 * Gives caller exclusive access to manipulate a glock structure.
643 static void gfs2_glmutex_lock(struct gfs2_glock *gl)
645 struct gfs2_holder gh;
647 gfs2_holder_init(gl, 0, 0, &gh);
648 set_bit(HIF_MUTEX, &gh.gh_iflags);
649 if (test_and_set_bit(HIF_WAIT, &gh.gh_iflags))
650 BUG();
652 spin_lock(&gl->gl_spin);
653 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
654 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
655 } else {
656 gl->gl_owner = current;
657 gl->gl_ip = (unsigned long)__builtin_return_address(0);
658 clear_bit(HIF_WAIT, &gh.gh_iflags);
659 smp_mb();
660 wake_up_bit(&gh.gh_iflags, HIF_WAIT);
662 spin_unlock(&gl->gl_spin);
664 wait_on_holder(&gh);
665 gfs2_holder_uninit(&gh);
669 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
670 * @gl: the glock
672 * Returns: 1 if the glock is acquired
675 static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
677 int acquired = 1;
679 spin_lock(&gl->gl_spin);
680 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
681 acquired = 0;
682 } else {
683 gl->gl_owner = current;
684 gl->gl_ip = (unsigned long)__builtin_return_address(0);
686 spin_unlock(&gl->gl_spin);
688 return acquired;
692 * gfs2_glmutex_unlock - release a local lock on a glock
693 * @gl: the glock
697 static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
699 spin_lock(&gl->gl_spin);
700 clear_bit(GLF_LOCK, &gl->gl_flags);
701 gl->gl_owner = NULL;
702 gl->gl_ip = 0;
703 run_queue(gl);
704 BUG_ON(!spin_is_locked(&gl->gl_spin));
705 spin_unlock(&gl->gl_spin);
709 * handle_callback - add a demote request to a lock's queue
710 * @gl: the glock
711 * @state: the state the caller wants us to change to
713 * Note: This may fail sliently if we are out of memory.
716 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
718 struct gfs2_holder *gh, *new_gh = NULL;
720 restart:
721 spin_lock(&gl->gl_spin);
723 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
724 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
725 gl->gl_req_gh != gh) {
726 if (gh->gh_state != state)
727 gh->gh_state = LM_ST_UNLOCKED;
728 goto out;
732 if (new_gh) {
733 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
734 new_gh = NULL;
735 } else {
736 spin_unlock(&gl->gl_spin);
738 new_gh = gfs2_holder_get(gl, state, LM_FLAG_TRY, GFP_NOFS);
739 if (!new_gh)
740 return;
741 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
742 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
743 set_bit(HIF_WAIT, &new_gh->gh_iflags);
745 goto restart;
748 out:
749 spin_unlock(&gl->gl_spin);
751 if (new_gh)
752 gfs2_holder_put(new_gh);
756 * state_change - record that the glock is now in a different state
757 * @gl: the glock
758 * @new_state the new state
762 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
764 int held1, held2;
766 held1 = (gl->gl_state != LM_ST_UNLOCKED);
767 held2 = (new_state != LM_ST_UNLOCKED);
769 if (held1 != held2) {
770 if (held2)
771 gfs2_glock_hold(gl);
772 else
773 gfs2_glock_put(gl);
776 gl->gl_state = new_state;
780 * xmote_bh - Called after the lock module is done acquiring a lock
781 * @gl: The glock in question
782 * @ret: the int returned from the lock module
786 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
788 struct gfs2_sbd *sdp = gl->gl_sbd;
789 const struct gfs2_glock_operations *glops = gl->gl_ops;
790 struct gfs2_holder *gh = gl->gl_req_gh;
791 int prev_state = gl->gl_state;
792 int op_done = 1;
794 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
795 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
796 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
798 state_change(gl, ret & LM_OUT_ST_MASK);
800 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
801 if (glops->go_inval)
802 glops->go_inval(gl, DIO_METADATA);
803 } else if (gl->gl_state == LM_ST_DEFERRED) {
804 /* We might not want to do this here.
805 Look at moving to the inode glops. */
806 if (glops->go_inval)
807 glops->go_inval(gl, 0);
810 /* Deal with each possible exit condition */
812 if (!gh)
813 gl->gl_stamp = jiffies;
814 else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
815 spin_lock(&gl->gl_spin);
816 list_del_init(&gh->gh_list);
817 gh->gh_error = -EIO;
818 spin_unlock(&gl->gl_spin);
819 } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
820 spin_lock(&gl->gl_spin);
821 list_del_init(&gh->gh_list);
822 if (gl->gl_state == gh->gh_state ||
823 gl->gl_state == LM_ST_UNLOCKED) {
824 gh->gh_error = 0;
825 } else {
826 if (gfs2_assert_warn(sdp, gh->gh_flags &
827 (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
828 fs_warn(sdp, "ret = 0x%.8X\n", ret);
829 gh->gh_error = GLR_TRYFAILED;
831 spin_unlock(&gl->gl_spin);
833 if (ret & LM_OUT_CANCELED)
834 handle_callback(gl, LM_ST_UNLOCKED);
836 } else if (ret & LM_OUT_CANCELED) {
837 spin_lock(&gl->gl_spin);
838 list_del_init(&gh->gh_list);
839 gh->gh_error = GLR_CANCELED;
840 spin_unlock(&gl->gl_spin);
842 } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
843 spin_lock(&gl->gl_spin);
844 list_move_tail(&gh->gh_list, &gl->gl_holders);
845 gh->gh_error = 0;
846 set_bit(HIF_HOLDER, &gh->gh_iflags);
847 spin_unlock(&gl->gl_spin);
849 set_bit(HIF_FIRST, &gh->gh_iflags);
851 op_done = 0;
853 } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
854 spin_lock(&gl->gl_spin);
855 list_del_init(&gh->gh_list);
856 gh->gh_error = GLR_TRYFAILED;
857 spin_unlock(&gl->gl_spin);
859 } else {
860 if (gfs2_assert_withdraw(sdp, 0) == -1)
861 fs_err(sdp, "ret = 0x%.8X\n", ret);
864 if (glops->go_xmote_bh)
865 glops->go_xmote_bh(gl);
867 if (op_done) {
868 spin_lock(&gl->gl_spin);
869 gl->gl_req_gh = NULL;
870 gl->gl_req_bh = NULL;
871 clear_bit(GLF_LOCK, &gl->gl_flags);
872 run_queue(gl);
873 spin_unlock(&gl->gl_spin);
876 gfs2_glock_put(gl);
878 if (gh)
879 gfs2_holder_dispose_or_wake(gh);
883 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
884 * @gl: The glock in question
885 * @state: the requested state
886 * @flags: modifier flags to the lock call
890 void gfs2_glock_xmote_th(struct gfs2_holder *gh)
892 struct gfs2_glock *gl = gh->gh_gl;
893 struct gfs2_sbd *sdp = gl->gl_sbd;
894 int flags = gh->gh_flags;
895 unsigned state = gh->gh_state;
896 const struct gfs2_glock_operations *glops = gl->gl_ops;
897 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
898 LM_FLAG_NOEXP | LM_FLAG_ANY |
899 LM_FLAG_PRIORITY);
900 unsigned int lck_ret;
902 if (glops->go_xmote_th)
903 glops->go_xmote_th(gl);
905 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
906 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
907 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
908 gfs2_assert_warn(sdp, state != gl->gl_state);
910 gfs2_glock_hold(gl);
911 gl->gl_req_bh = xmote_bh;
913 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
915 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
916 return;
918 if (lck_ret & LM_OUT_ASYNC)
919 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
920 else
921 xmote_bh(gl, lck_ret);
925 * drop_bh - Called after a lock module unlock completes
926 * @gl: the glock
927 * @ret: the return status
929 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
930 * Doesn't drop the reference on the glock the top half took out
934 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
936 struct gfs2_sbd *sdp = gl->gl_sbd;
937 const struct gfs2_glock_operations *glops = gl->gl_ops;
938 struct gfs2_holder *gh = gl->gl_req_gh;
940 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
941 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
942 gfs2_assert_warn(sdp, !ret);
944 state_change(gl, LM_ST_UNLOCKED);
946 if (glops->go_inval)
947 glops->go_inval(gl, DIO_METADATA);
949 if (gh) {
950 spin_lock(&gl->gl_spin);
951 list_del_init(&gh->gh_list);
952 gh->gh_error = 0;
953 spin_unlock(&gl->gl_spin);
956 if (glops->go_drop_bh)
957 glops->go_drop_bh(gl);
959 spin_lock(&gl->gl_spin);
960 gl->gl_req_gh = NULL;
961 gl->gl_req_bh = NULL;
962 clear_bit(GLF_LOCK, &gl->gl_flags);
963 run_queue(gl);
964 spin_unlock(&gl->gl_spin);
966 gfs2_glock_put(gl);
968 if (gh)
969 gfs2_holder_dispose_or_wake(gh);
973 * gfs2_glock_drop_th - call into the lock module to unlock a lock
974 * @gl: the glock
978 static void gfs2_glock_drop_th(struct gfs2_glock *gl)
980 struct gfs2_sbd *sdp = gl->gl_sbd;
981 const struct gfs2_glock_operations *glops = gl->gl_ops;
982 unsigned int ret;
984 if (glops->go_drop_th)
985 glops->go_drop_th(gl);
987 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
988 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
989 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
991 gfs2_glock_hold(gl);
992 gl->gl_req_bh = drop_bh;
994 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
996 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
997 return;
999 if (!ret)
1000 drop_bh(gl, ret);
1001 else
1002 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1006 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1007 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1009 * Don't cancel GL_NOCANCEL requests.
1012 static void do_cancels(struct gfs2_holder *gh)
1014 struct gfs2_glock *gl = gh->gh_gl;
1016 spin_lock(&gl->gl_spin);
1018 while (gl->gl_req_gh != gh &&
1019 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1020 !list_empty(&gh->gh_list)) {
1021 if (gl->gl_req_bh && !(gl->gl_req_gh &&
1022 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1023 spin_unlock(&gl->gl_spin);
1024 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1025 msleep(100);
1026 spin_lock(&gl->gl_spin);
1027 } else {
1028 spin_unlock(&gl->gl_spin);
1029 msleep(100);
1030 spin_lock(&gl->gl_spin);
1034 spin_unlock(&gl->gl_spin);
1038 * glock_wait_internal - wait on a glock acquisition
1039 * @gh: the glock holder
1041 * Returns: 0 on success
1044 static int glock_wait_internal(struct gfs2_holder *gh)
1046 struct gfs2_glock *gl = gh->gh_gl;
1047 struct gfs2_sbd *sdp = gl->gl_sbd;
1048 const struct gfs2_glock_operations *glops = gl->gl_ops;
1050 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1051 return -EIO;
1053 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1054 spin_lock(&gl->gl_spin);
1055 if (gl->gl_req_gh != gh &&
1056 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1057 !list_empty(&gh->gh_list)) {
1058 list_del_init(&gh->gh_list);
1059 gh->gh_error = GLR_TRYFAILED;
1060 run_queue(gl);
1061 spin_unlock(&gl->gl_spin);
1062 return gh->gh_error;
1064 spin_unlock(&gl->gl_spin);
1067 if (gh->gh_flags & LM_FLAG_PRIORITY)
1068 do_cancels(gh);
1070 wait_on_holder(gh);
1071 if (gh->gh_error)
1072 return gh->gh_error;
1074 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1075 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
1076 gh->gh_flags));
1078 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1079 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1081 if (glops->go_lock) {
1082 gh->gh_error = glops->go_lock(gh);
1083 if (gh->gh_error) {
1084 spin_lock(&gl->gl_spin);
1085 list_del_init(&gh->gh_list);
1086 spin_unlock(&gl->gl_spin);
1090 spin_lock(&gl->gl_spin);
1091 gl->gl_req_gh = NULL;
1092 gl->gl_req_bh = NULL;
1093 clear_bit(GLF_LOCK, &gl->gl_flags);
1094 run_queue(gl);
1095 spin_unlock(&gl->gl_spin);
1098 return gh->gh_error;
1101 static inline struct gfs2_holder *
1102 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1104 struct gfs2_holder *gh;
1106 list_for_each_entry(gh, head, gh_list) {
1107 if (gh->gh_owner == owner)
1108 return gh;
1111 return NULL;
1115 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1116 * @gh: the holder structure to add
1120 static void add_to_queue(struct gfs2_holder *gh)
1122 struct gfs2_glock *gl = gh->gh_gl;
1123 struct gfs2_holder *existing;
1125 BUG_ON(!gh->gh_owner);
1126 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1127 BUG();
1129 existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1130 if (existing) {
1131 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1132 printk(KERN_INFO "pid : %d\n", existing->gh_owner->pid);
1133 printk(KERN_INFO "lock type : %d lock state : %d\n",
1134 existing->gh_gl->gl_name.ln_type, existing->gh_gl->gl_state);
1135 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1136 printk(KERN_INFO "pid : %d\n", gh->gh_owner->pid);
1137 printk(KERN_INFO "lock type : %d lock state : %d\n",
1138 gl->gl_name.ln_type, gl->gl_state);
1139 BUG();
1142 existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1143 if (existing) {
1144 print_symbol(KERN_WARNING "original: %s\n", existing->gh_ip);
1145 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1146 BUG();
1149 if (gh->gh_flags & LM_FLAG_PRIORITY)
1150 list_add(&gh->gh_list, &gl->gl_waiters3);
1151 else
1152 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1156 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1157 * @gh: the holder structure
1159 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1161 * Returns: 0, GLR_TRYFAILED, or errno on failure
1164 int gfs2_glock_nq(struct gfs2_holder *gh)
1166 struct gfs2_glock *gl = gh->gh_gl;
1167 struct gfs2_sbd *sdp = gl->gl_sbd;
1168 int error = 0;
1170 restart:
1171 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1172 set_bit(HIF_ABORTED, &gh->gh_iflags);
1173 return -EIO;
1176 set_bit(HIF_PROMOTE, &gh->gh_iflags);
1178 spin_lock(&gl->gl_spin);
1179 add_to_queue(gh);
1180 run_queue(gl);
1181 spin_unlock(&gl->gl_spin);
1183 if (!(gh->gh_flags & GL_ASYNC)) {
1184 error = glock_wait_internal(gh);
1185 if (error == GLR_CANCELED) {
1186 msleep(100);
1187 goto restart;
1191 return error;
1195 * gfs2_glock_poll - poll to see if an async request has been completed
1196 * @gh: the holder
1198 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1201 int gfs2_glock_poll(struct gfs2_holder *gh)
1203 struct gfs2_glock *gl = gh->gh_gl;
1204 int ready = 0;
1206 spin_lock(&gl->gl_spin);
1208 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1209 ready = 1;
1210 else if (list_empty(&gh->gh_list)) {
1211 if (gh->gh_error == GLR_CANCELED) {
1212 spin_unlock(&gl->gl_spin);
1213 msleep(100);
1214 if (gfs2_glock_nq(gh))
1215 return 1;
1216 return 0;
1217 } else
1218 ready = 1;
1221 spin_unlock(&gl->gl_spin);
1223 return ready;
1227 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1228 * @gh: the holder structure
1230 * Returns: 0, GLR_TRYFAILED, or errno on failure
1233 int gfs2_glock_wait(struct gfs2_holder *gh)
1235 int error;
1237 error = glock_wait_internal(gh);
1238 if (error == GLR_CANCELED) {
1239 msleep(100);
1240 gh->gh_flags &= ~GL_ASYNC;
1241 error = gfs2_glock_nq(gh);
1244 return error;
1248 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1249 * @gh: the glock holder
1253 void gfs2_glock_dq(struct gfs2_holder *gh)
1255 struct gfs2_glock *gl = gh->gh_gl;
1256 const struct gfs2_glock_operations *glops = gl->gl_ops;
1258 if (gh->gh_flags & GL_NOCACHE)
1259 handle_callback(gl, LM_ST_UNLOCKED);
1261 gfs2_glmutex_lock(gl);
1263 spin_lock(&gl->gl_spin);
1264 list_del_init(&gh->gh_list);
1266 if (list_empty(&gl->gl_holders)) {
1267 spin_unlock(&gl->gl_spin);
1269 if (glops->go_unlock)
1270 glops->go_unlock(gh);
1272 gl->gl_stamp = jiffies;
1274 spin_lock(&gl->gl_spin);
1277 clear_bit(GLF_LOCK, &gl->gl_flags);
1278 run_queue(gl);
1279 spin_unlock(&gl->gl_spin);
1283 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1284 * @gh: the holder structure
1288 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1290 gfs2_glock_dq(gh);
1291 gfs2_holder_uninit(gh);
1295 * gfs2_glock_nq_num - acquire a glock based on lock number
1296 * @sdp: the filesystem
1297 * @number: the lock number
1298 * @glops: the glock operations for the type of glock
1299 * @state: the state to acquire the glock in
1300 * @flags: modifier flags for the aquisition
1301 * @gh: the struct gfs2_holder
1303 * Returns: errno
1306 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
1307 const struct gfs2_glock_operations *glops,
1308 unsigned int state, int flags, struct gfs2_holder *gh)
1310 struct gfs2_glock *gl;
1311 int error;
1313 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1314 if (!error) {
1315 error = gfs2_glock_nq_init(gl, state, flags, gh);
1316 gfs2_glock_put(gl);
1319 return error;
1323 * glock_compare - Compare two struct gfs2_glock structures for sorting
1324 * @arg_a: the first structure
1325 * @arg_b: the second structure
1329 static int glock_compare(const void *arg_a, const void *arg_b)
1331 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1332 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1333 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1334 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1336 if (a->ln_number > b->ln_number)
1337 return 1;
1338 if (a->ln_number < b->ln_number)
1339 return -1;
1340 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
1341 return 0;
1345 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1346 * @num_gh: the number of structures
1347 * @ghs: an array of struct gfs2_holder structures
1349 * Returns: 0 on success (all glocks acquired),
1350 * errno on failure (no glocks acquired)
1353 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1354 struct gfs2_holder **p)
1356 unsigned int x;
1357 int error = 0;
1359 for (x = 0; x < num_gh; x++)
1360 p[x] = &ghs[x];
1362 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1364 for (x = 0; x < num_gh; x++) {
1365 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1367 error = gfs2_glock_nq(p[x]);
1368 if (error) {
1369 while (x--)
1370 gfs2_glock_dq(p[x]);
1371 break;
1375 return error;
1379 * gfs2_glock_nq_m - acquire multiple glocks
1380 * @num_gh: the number of structures
1381 * @ghs: an array of struct gfs2_holder structures
1383 * Figure out how big an impact this function has. Either:
1384 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1385 * 2) Forget async stuff and just call nq_m_sync()
1386 * 3) Leave it like it is
1388 * Returns: 0 on success (all glocks acquired),
1389 * errno on failure (no glocks acquired)
1392 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1394 int *e;
1395 unsigned int x;
1396 int borked = 0, serious = 0;
1397 int error = 0;
1399 if (!num_gh)
1400 return 0;
1402 if (num_gh == 1) {
1403 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1404 return gfs2_glock_nq(ghs);
1407 e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1408 if (!e)
1409 return -ENOMEM;
1411 for (x = 0; x < num_gh; x++) {
1412 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1413 error = gfs2_glock_nq(&ghs[x]);
1414 if (error) {
1415 borked = 1;
1416 serious = error;
1417 num_gh = x;
1418 break;
1422 for (x = 0; x < num_gh; x++) {
1423 error = e[x] = glock_wait_internal(&ghs[x]);
1424 if (error) {
1425 borked = 1;
1426 if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1427 serious = error;
1431 if (!borked) {
1432 kfree(e);
1433 return 0;
1436 for (x = 0; x < num_gh; x++)
1437 if (!e[x])
1438 gfs2_glock_dq(&ghs[x]);
1440 if (serious)
1441 error = serious;
1442 else {
1443 for (x = 0; x < num_gh; x++)
1444 gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1445 &ghs[x]);
1446 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1449 kfree(e);
1451 return error;
1455 * gfs2_glock_dq_m - release multiple glocks
1456 * @num_gh: the number of structures
1457 * @ghs: an array of struct gfs2_holder structures
1461 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1463 unsigned int x;
1465 for (x = 0; x < num_gh; x++)
1466 gfs2_glock_dq(&ghs[x]);
1470 * gfs2_glock_dq_uninit_m - release multiple glocks
1471 * @num_gh: the number of structures
1472 * @ghs: an array of struct gfs2_holder structures
1476 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1478 unsigned int x;
1480 for (x = 0; x < num_gh; x++)
1481 gfs2_glock_dq_uninit(&ghs[x]);
1485 * gfs2_lvb_hold - attach a LVB from a glock
1486 * @gl: The glock in question
1490 int gfs2_lvb_hold(struct gfs2_glock *gl)
1492 int error;
1494 gfs2_glmutex_lock(gl);
1496 if (!atomic_read(&gl->gl_lvb_count)) {
1497 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1498 if (error) {
1499 gfs2_glmutex_unlock(gl);
1500 return error;
1502 gfs2_glock_hold(gl);
1504 atomic_inc(&gl->gl_lvb_count);
1506 gfs2_glmutex_unlock(gl);
1508 return 0;
1512 * gfs2_lvb_unhold - detach a LVB from a glock
1513 * @gl: The glock in question
1517 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1519 gfs2_glock_hold(gl);
1520 gfs2_glmutex_lock(gl);
1522 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1523 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1524 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1525 gl->gl_lvb = NULL;
1526 gfs2_glock_put(gl);
1529 gfs2_glmutex_unlock(gl);
1530 gfs2_glock_put(gl);
1533 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1534 unsigned int state)
1536 struct gfs2_glock *gl;
1538 gl = gfs2_glock_find(sdp, name);
1539 if (!gl)
1540 return;
1542 handle_callback(gl, state);
1544 spin_lock(&gl->gl_spin);
1545 run_queue(gl);
1546 spin_unlock(&gl->gl_spin);
1548 gfs2_glock_put(gl);
1552 * gfs2_glock_cb - Callback used by locking module
1553 * @sdp: Pointer to the superblock
1554 * @type: Type of callback
1555 * @data: Type dependent data pointer
1557 * Called by the locking module when it wants to tell us something.
1558 * Either we need to drop a lock, one of our ASYNC requests completed, or
1559 * a journal from another client needs to be recovered.
1562 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
1564 struct gfs2_sbd *sdp = cb_data;
1566 switch (type) {
1567 case LM_CB_NEED_E:
1568 blocking_cb(sdp, data, LM_ST_UNLOCKED);
1569 return;
1571 case LM_CB_NEED_D:
1572 blocking_cb(sdp, data, LM_ST_DEFERRED);
1573 return;
1575 case LM_CB_NEED_S:
1576 blocking_cb(sdp, data, LM_ST_SHARED);
1577 return;
1579 case LM_CB_ASYNC: {
1580 struct lm_async_cb *async = data;
1581 struct gfs2_glock *gl;
1583 down_read(&gfs2_umount_flush_sem);
1584 gl = gfs2_glock_find(sdp, &async->lc_name);
1585 if (gfs2_assert_warn(sdp, gl))
1586 return;
1587 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1588 gl->gl_req_bh(gl, async->lc_ret);
1589 gfs2_glock_put(gl);
1590 up_read(&gfs2_umount_flush_sem);
1591 return;
1594 case LM_CB_NEED_RECOVERY:
1595 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1596 if (sdp->sd_recoverd_process)
1597 wake_up_process(sdp->sd_recoverd_process);
1598 return;
1600 case LM_CB_DROPLOCKS:
1601 gfs2_gl_hash_clear(sdp, NO_WAIT);
1602 gfs2_quota_scan(sdp);
1603 return;
1605 default:
1606 gfs2_assert_warn(sdp, 0);
1607 return;
1612 * demote_ok - Check to see if it's ok to unlock a glock
1613 * @gl: the glock
1615 * Returns: 1 if it's ok
1618 static int demote_ok(struct gfs2_glock *gl)
1620 const struct gfs2_glock_operations *glops = gl->gl_ops;
1621 int demote = 1;
1623 if (test_bit(GLF_STICKY, &gl->gl_flags))
1624 demote = 0;
1625 else if (glops->go_demote_ok)
1626 demote = glops->go_demote_ok(gl);
1628 return demote;
1632 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1633 * @gl: the glock
1637 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1639 struct gfs2_sbd *sdp = gl->gl_sbd;
1641 spin_lock(&sdp->sd_reclaim_lock);
1642 if (list_empty(&gl->gl_reclaim)) {
1643 gfs2_glock_hold(gl);
1644 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1645 atomic_inc(&sdp->sd_reclaim_count);
1647 spin_unlock(&sdp->sd_reclaim_lock);
1649 wake_up(&sdp->sd_reclaim_wq);
1653 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1654 * @sdp: the filesystem
1656 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1657 * different glock and we notice that there are a lot of glocks in the
1658 * reclaim list.
1662 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1664 struct gfs2_glock *gl;
1666 spin_lock(&sdp->sd_reclaim_lock);
1667 if (list_empty(&sdp->sd_reclaim_list)) {
1668 spin_unlock(&sdp->sd_reclaim_lock);
1669 return;
1671 gl = list_entry(sdp->sd_reclaim_list.next,
1672 struct gfs2_glock, gl_reclaim);
1673 list_del_init(&gl->gl_reclaim);
1674 spin_unlock(&sdp->sd_reclaim_lock);
1676 atomic_dec(&sdp->sd_reclaim_count);
1677 atomic_inc(&sdp->sd_reclaimed);
1679 if (gfs2_glmutex_trylock(gl)) {
1680 if (list_empty(&gl->gl_holders) &&
1681 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1682 handle_callback(gl, LM_ST_UNLOCKED);
1683 gfs2_glmutex_unlock(gl);
1686 gfs2_glock_put(gl);
1690 * examine_bucket - Call a function for glock in a hash bucket
1691 * @examiner: the function
1692 * @sdp: the filesystem
1693 * @bucket: the bucket
1695 * Returns: 1 if the bucket has entries
1698 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1699 unsigned int hash)
1701 struct gfs2_glock *gl, *prev = NULL;
1702 int has_entries = 0;
1703 struct hlist_head *head = &gl_hash_table[hash].hb_list;
1705 read_lock(gl_lock_addr(hash));
1706 /* Can't use hlist_for_each_entry - don't want prefetch here */
1707 if (hlist_empty(head))
1708 goto out;
1709 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1710 while(1) {
1711 if (gl->gl_sbd == sdp) {
1712 gfs2_glock_hold(gl);
1713 read_unlock(gl_lock_addr(hash));
1714 if (prev)
1715 gfs2_glock_put(prev);
1716 prev = gl;
1717 examiner(gl);
1718 has_entries = 1;
1719 read_lock(gl_lock_addr(hash));
1721 if (gl->gl_list.next == NULL)
1722 break;
1723 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
1725 out:
1726 read_unlock(gl_lock_addr(hash));
1727 if (prev)
1728 gfs2_glock_put(prev);
1729 return has_entries;
1733 * scan_glock - look at a glock and see if we can reclaim it
1734 * @gl: the glock to look at
1738 static void scan_glock(struct gfs2_glock *gl)
1740 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
1741 return;
1743 if (gfs2_glmutex_trylock(gl)) {
1744 if (list_empty(&gl->gl_holders) &&
1745 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
1746 goto out_schedule;
1747 gfs2_glmutex_unlock(gl);
1749 return;
1751 out_schedule:
1752 gfs2_glmutex_unlock(gl);
1753 gfs2_glock_schedule_for_reclaim(gl);
1757 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1758 * @sdp: the filesystem
1762 void gfs2_scand_internal(struct gfs2_sbd *sdp)
1764 unsigned int x;
1766 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1767 examine_bucket(scan_glock, sdp, x);
1771 * clear_glock - look at a glock and see if we can free it from glock cache
1772 * @gl: the glock to look at
1776 static void clear_glock(struct gfs2_glock *gl)
1778 struct gfs2_sbd *sdp = gl->gl_sbd;
1779 int released;
1781 spin_lock(&sdp->sd_reclaim_lock);
1782 if (!list_empty(&gl->gl_reclaim)) {
1783 list_del_init(&gl->gl_reclaim);
1784 atomic_dec(&sdp->sd_reclaim_count);
1785 spin_unlock(&sdp->sd_reclaim_lock);
1786 released = gfs2_glock_put(gl);
1787 gfs2_assert(sdp, !released);
1788 } else {
1789 spin_unlock(&sdp->sd_reclaim_lock);
1792 if (gfs2_glmutex_trylock(gl)) {
1793 if (list_empty(&gl->gl_holders) &&
1794 gl->gl_state != LM_ST_UNLOCKED)
1795 handle_callback(gl, LM_ST_UNLOCKED);
1796 gfs2_glmutex_unlock(gl);
1801 * gfs2_gl_hash_clear - Empty out the glock hash table
1802 * @sdp: the filesystem
1803 * @wait: wait until it's all gone
1805 * Called when unmounting the filesystem, or when inter-node lock manager
1806 * requests DROPLOCKS because it is running out of capacity.
1809 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1811 unsigned long t;
1812 unsigned int x;
1813 int cont;
1815 t = jiffies;
1817 for (;;) {
1818 cont = 0;
1819 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
1820 if (examine_bucket(clear_glock, sdp, x))
1821 cont = 1;
1824 if (!wait || !cont)
1825 break;
1827 if (time_after_eq(jiffies,
1828 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1829 fs_warn(sdp, "Unmount seems to be stalled. "
1830 "Dumping lock state...\n");
1831 gfs2_dump_lockstate(sdp);
1832 t = jiffies;
1835 down_write(&gfs2_umount_flush_sem);
1836 invalidate_inodes(sdp->sd_vfs);
1837 up_write(&gfs2_umount_flush_sem);
1838 msleep(10);
1843 * Diagnostic routines to help debug distributed deadlock
1847 * dump_holder - print information about a glock holder
1848 * @str: a string naming the type of holder
1849 * @gh: the glock holder
1851 * Returns: 0 on success, -ENOBUFS when we run out of space
1854 static int dump_holder(char *str, struct gfs2_holder *gh)
1856 unsigned int x;
1857 int error = -ENOBUFS;
1859 printk(KERN_INFO " %s\n", str);
1860 printk(KERN_INFO " owner = %ld\n",
1861 (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
1862 printk(KERN_INFO " gh_state = %u\n", gh->gh_state);
1863 printk(KERN_INFO " gh_flags =");
1864 for (x = 0; x < 32; x++)
1865 if (gh->gh_flags & (1 << x))
1866 printk(" %u", x);
1867 printk(" \n");
1868 printk(KERN_INFO " error = %d\n", gh->gh_error);
1869 printk(KERN_INFO " gh_iflags =");
1870 for (x = 0; x < 32; x++)
1871 if (test_bit(x, &gh->gh_iflags))
1872 printk(" %u", x);
1873 printk(" \n");
1874 print_symbol(KERN_INFO " initialized at: %s\n", gh->gh_ip);
1876 error = 0;
1878 return error;
1882 * dump_inode - print information about an inode
1883 * @ip: the inode
1885 * Returns: 0 on success, -ENOBUFS when we run out of space
1888 static int dump_inode(struct gfs2_inode *ip)
1890 unsigned int x;
1891 int error = -ENOBUFS;
1893 printk(KERN_INFO " Inode:\n");
1894 printk(KERN_INFO " num = %llu %llu\n",
1895 (unsigned long long)ip->i_num.no_formal_ino,
1896 (unsigned long long)ip->i_num.no_addr);
1897 printk(KERN_INFO " type = %u\n", IF2DT(ip->i_inode.i_mode));
1898 printk(KERN_INFO " i_flags =");
1899 for (x = 0; x < 32; x++)
1900 if (test_bit(x, &ip->i_flags))
1901 printk(" %u", x);
1902 printk(" \n");
1904 error = 0;
1906 return error;
1910 * dump_glock - print information about a glock
1911 * @gl: the glock
1912 * @count: where we are in the buffer
1914 * Returns: 0 on success, -ENOBUFS when we run out of space
1917 static int dump_glock(struct gfs2_glock *gl)
1919 struct gfs2_holder *gh;
1920 unsigned int x;
1921 int error = -ENOBUFS;
1923 spin_lock(&gl->gl_spin);
1925 printk(KERN_INFO "Glock 0x%p (%u, %llu)\n", gl, gl->gl_name.ln_type,
1926 (unsigned long long)gl->gl_name.ln_number);
1927 printk(KERN_INFO " gl_flags =");
1928 for (x = 0; x < 32; x++) {
1929 if (test_bit(x, &gl->gl_flags))
1930 printk(" %u", x);
1932 printk(" \n");
1933 printk(KERN_INFO " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1934 printk(KERN_INFO " gl_state = %u\n", gl->gl_state);
1935 printk(KERN_INFO " gl_owner = %s\n", gl->gl_owner->comm);
1936 print_symbol(KERN_INFO " gl_ip = %s\n", gl->gl_ip);
1937 printk(KERN_INFO " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1938 printk(KERN_INFO " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1939 printk(KERN_INFO " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1940 printk(KERN_INFO " object = %s\n", (gl->gl_object) ? "yes" : "no");
1941 printk(KERN_INFO " le = %s\n",
1942 (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
1943 printk(KERN_INFO " reclaim = %s\n",
1944 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
1945 if (gl->gl_aspace)
1946 printk(KERN_INFO " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1947 gl->gl_aspace->i_mapping->nrpages);
1948 else
1949 printk(KERN_INFO " aspace = no\n");
1950 printk(KERN_INFO " ail = %d\n", atomic_read(&gl->gl_ail_count));
1951 if (gl->gl_req_gh) {
1952 error = dump_holder("Request", gl->gl_req_gh);
1953 if (error)
1954 goto out;
1956 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1957 error = dump_holder("Holder", gh);
1958 if (error)
1959 goto out;
1961 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
1962 error = dump_holder("Waiter1", gh);
1963 if (error)
1964 goto out;
1966 list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
1967 error = dump_holder("Waiter2", gh);
1968 if (error)
1969 goto out;
1971 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
1972 error = dump_holder("Waiter3", gh);
1973 if (error)
1974 goto out;
1976 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
1977 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
1978 list_empty(&gl->gl_holders)) {
1979 error = dump_inode(gl->gl_object);
1980 if (error)
1981 goto out;
1982 } else {
1983 error = -ENOBUFS;
1984 printk(KERN_INFO " Inode: busy\n");
1988 error = 0;
1990 out:
1991 spin_unlock(&gl->gl_spin);
1992 return error;
1996 * gfs2_dump_lockstate - print out the current lockstate
1997 * @sdp: the filesystem
1998 * @ub: the buffer to copy the information into
2000 * If @ub is NULL, dump the lockstate to the console.
2004 static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2006 struct gfs2_glock *gl;
2007 struct hlist_node *h;
2008 unsigned int x;
2009 int error = 0;
2011 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2013 read_lock(gl_lock_addr(x));
2015 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
2016 if (gl->gl_sbd != sdp)
2017 continue;
2019 error = dump_glock(gl);
2020 if (error)
2021 break;
2024 read_unlock(gl_lock_addr(x));
2026 if (error)
2027 break;
2031 return error;
2034 int __init gfs2_glock_init(void)
2036 unsigned i;
2037 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
2038 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
2040 #ifdef GL_HASH_LOCK_SZ
2041 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2042 rwlock_init(&gl_hash_locks[i]);
2044 #endif
2045 return 0;