Import 2.3.18pre1
[davej-history.git] / include / asm-i386 / smplock.h
blob152c1a9fa62ef640918f6aca904e88d09986429a
1 /*
2 * <asm/smplock.h>
4 * i386 SMP lock implementation
5 */
6 #include <linux/interrupt.h>
7 #include <linux/spinlock.h>
9 extern spinlock_t kernel_flag;
12 * Release global kernel lock and global interrupt lock
14 #define release_kernel_lock(task, cpu) \
15 do { \
16 if (task->lock_depth >= 0) \
17 spin_unlock(&kernel_flag); \
18 release_irqlock(cpu); \
19 __sti(); \
20 } while (0)
23 * Re-acquire the kernel lock
25 #define reacquire_kernel_lock(task) \
26 do { \
27 if (task->lock_depth >= 0) \
28 spin_lock(&kernel_flag); \
29 } while (0)
33 * Getting the big kernel lock.
35 * This cannot happen asynchronously,
36 * so we only need to worry about other
37 * CPU's.
39 extern __inline__ void lock_kernel(void)
41 __asm__ __volatile__(
42 "incl %1\n\t"
43 "jne 9f"
44 spin_lock_string
45 "\n9:"
46 :"=m" (__dummy_lock(&kernel_flag)),
47 "=m" (current->lock_depth));
50 extern __inline__ void unlock_kernel(void)
52 if (current->lock_depth < 0)
53 BUG();
54 __asm__ __volatile__(
55 "decl %1\n\t"
56 "jns 9f\n\t"
57 spin_unlock_string
58 "\n9:"
59 :"=m" (__dummy_lock(&kernel_flag)),
60 "=m" (current->lock_depth));