Import 2.1.116pre2
[davej-history.git] / include / asm-i386 / smplock.h
blob004e699514826cae6567fe60226d0286e46ca509
1 /*
2 * <asm/smplock.h>
4 * i386 SMP lock implementation
5 */
6 #include <linux/interrupt.h>
7 #include <asm/spinlock.h>
9 extern spinlock_t kernel_flag;
11 #ifdef __SMP__
12 extern void __check_locks(unsigned int);
13 #else
14 #define __check_locks(x) do { } while (0)
15 #endif
18 * Release global kernel lock and global interrupt lock
20 #define release_kernel_lock(task, cpu) \
21 do { \
22 if (task->lock_depth >= 0) \
23 __asm__ __volatile__(spin_unlock_string \
24 :"=m" (__dummy_lock(&kernel_flag))); \
25 release_irqlock(cpu); \
26 __sti(); \
27 } while (0)
30 * Re-acquire the kernel lock
32 #define reacquire_kernel_lock(task) \
33 do { \
34 if (task->lock_depth >= 0) \
35 __asm__ __volatile__(spin_lock_string \
36 :"=m" (__dummy_lock(&kernel_flag))); \
37 } while (0)
41 * Getting the big kernel lock.
43 * This cannot happen asynchronously,
44 * so we only need to worry about other
45 * CPU's.
47 extern __inline__ void lock_kernel(void)
49 __check_locks(1);
50 __asm__ __volatile__(
51 "incl %1\n\t"
52 "jne 9f"
53 spin_lock_string
54 "\n9:"
55 :"=m" (__dummy_lock(&kernel_flag)),
56 "=m" (current->lock_depth));
59 extern __inline__ void unlock_kernel(void)
61 __asm__ __volatile__(
62 "decl %1\n\t"
63 "jns 9f\n"
64 spin_unlock_string
65 "\n9:"
66 :"=m" (__dummy_lock(&kernel_flag)),
67 "=m" (current->lock_depth));