serial: sh-sci: Use DMA submission helpers instead of open-coding
[linux-2.6/btrfs-unstable.git] / kernel / locking / lglock.c
blob951cfcd10b4a0dc98d81f59ec61667378922269e
1 /* See include/linux/lglock.h for description */
2 #include <linux/module.h>
3 #include <linux/lglock.h>
4 #include <linux/cpu.h>
5 #include <linux/string.h>
7 /*
8 * Note there is no uninit, so lglocks cannot be defined in
9 * modules (but it's fine to use them from there)
10 * Could be added though, just undo lg_lock_init
13 void lg_lock_init(struct lglock *lg, char *name)
15 LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
17 EXPORT_SYMBOL(lg_lock_init);
19 void lg_local_lock(struct lglock *lg)
21 arch_spinlock_t *lock;
23 preempt_disable();
24 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
25 lock = this_cpu_ptr(lg->lock);
26 arch_spin_lock(lock);
28 EXPORT_SYMBOL(lg_local_lock);
30 void lg_local_unlock(struct lglock *lg)
32 arch_spinlock_t *lock;
34 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
35 lock = this_cpu_ptr(lg->lock);
36 arch_spin_unlock(lock);
37 preempt_enable();
39 EXPORT_SYMBOL(lg_local_unlock);
41 void lg_local_lock_cpu(struct lglock *lg, int cpu)
43 arch_spinlock_t *lock;
45 preempt_disable();
46 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
47 lock = per_cpu_ptr(lg->lock, cpu);
48 arch_spin_lock(lock);
50 EXPORT_SYMBOL(lg_local_lock_cpu);
52 void lg_local_unlock_cpu(struct lglock *lg, int cpu)
54 arch_spinlock_t *lock;
56 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
57 lock = per_cpu_ptr(lg->lock, cpu);
58 arch_spin_unlock(lock);
59 preempt_enable();
61 EXPORT_SYMBOL(lg_local_unlock_cpu);
63 void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
65 BUG_ON(cpu1 == cpu2);
67 /* lock in cpu order, just like lg_global_lock */
68 if (cpu2 < cpu1)
69 swap(cpu1, cpu2);
71 preempt_disable();
72 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
73 arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
74 arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
77 void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
79 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
80 arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
81 arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
82 preempt_enable();
85 void lg_global_lock(struct lglock *lg)
87 int i;
89 preempt_disable();
90 lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
91 for_each_possible_cpu(i) {
92 arch_spinlock_t *lock;
93 lock = per_cpu_ptr(lg->lock, i);
94 arch_spin_lock(lock);
97 EXPORT_SYMBOL(lg_global_lock);
99 void lg_global_unlock(struct lglock *lg)
101 int i;
103 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
104 for_each_possible_cpu(i) {
105 arch_spinlock_t *lock;
106 lock = per_cpu_ptr(lg->lock, i);
107 arch_spin_unlock(lock);
109 preempt_enable();
111 EXPORT_SYMBOL(lg_global_unlock);