Goodbye mips64. 31704 lines of code bite the dust.
[linux-2.6/linux-mips.git] / include / asm-ia64 / smplock.h
blobd5b5222b344bf7289b0c36f3f38cc10dd2433a0d
1 /*
2 * <asm/smplock.h>
4 * Default SMP lock implementation
5 */
6 #include <linux/interrupt.h>
7 #include <linux/spinlock.h>
8 #include <linux/sched.h>
10 #include <asm/current.h>
11 #include <asm/hardirq.h>
13 extern spinlock_t kernel_flag;
15 #ifdef CONFIG_SMP
16 # define kernel_locked() spin_is_locked(&kernel_flag)
17 # define check_irq_holder(cpu) \
18 do { \
19 if (global_irq_holder == (cpu)) \
20 BUG(); \
21 } while (0)
22 #else
23 # define kernel_locked() (1)
24 #endif
27 * Release global kernel lock and global interrupt lock
29 #define release_kernel_lock(task, cpu) \
30 do { \
31 if (unlikely(task->lock_depth >= 0)) { \
32 spin_unlock(&kernel_flag); \
33 check_irq_holder(cpu); \
34 } \
35 } while (0)
38 * Re-acquire the kernel lock
40 #define reacquire_kernel_lock(task) \
41 do { \
42 if (unlikely(task->lock_depth >= 0)) \
43 spin_lock(&kernel_flag); \
44 } while (0)
47 * Getting the big kernel lock.
49 * This cannot happen asynchronously,
50 * so we only need to worry about other
51 * CPU's.
53 static __inline__ void
54 lock_kernel(void)
56 if (!++current->lock_depth)
57 spin_lock(&kernel_flag);
60 static __inline__ void
61 unlock_kernel(void)
63 if (--current->lock_depth < 0)
64 spin_unlock(&kernel_flag);