Use a spinlock to protect the vdma data structures.
[linux-2.6/linux-mips.git] / include / asm-i386 / smplock.h
blob8bee3fd434c0f4e19fbe9539ebf0a94d43366478
1 /*
2 * <asm/smplock.h>
4 * i386 SMP lock implementation
5 */
6 #include <linux/interrupt.h>
7 #include <linux/spinlock.h>
8 #include <linux/sched.h>
9 #include <asm/current.h>
11 extern spinlock_t kernel_flag;
13 #ifdef CONFIG_SMP
14 #define kernel_locked() spin_is_locked(&kernel_flag)
15 #else
16 #ifdef CONFIG_PREEMPT
17 #define kernel_locked() preempt_count()
18 #else
19 #define kernel_locked() 1
20 #endif
21 #endif
24 * Release global kernel lock and global interrupt lock
26 #define release_kernel_lock(task) \
27 do { \
28 if (unlikely(task->lock_depth >= 0)) \
29 spin_unlock(&kernel_flag); \
30 } while (0)
33 * Re-acquire the kernel lock
35 #define reacquire_kernel_lock(task) \
36 do { \
37 if (unlikely(task->lock_depth >= 0)) \
38 spin_lock(&kernel_flag); \
39 } while (0)
43 * Getting the big kernel lock.
45 * This cannot happen asynchronously,
46 * so we only need to worry about other
47 * CPU's.
49 static __inline__ void lock_kernel(void)
51 #ifdef CONFIG_PREEMPT
52 if (current->lock_depth == -1)
53 spin_lock(&kernel_flag);
54 ++current->lock_depth;
55 #else
56 #if 1
57 if (!++current->lock_depth)
58 spin_lock(&kernel_flag);
59 #else
60 __asm__ __volatile__(
61 "incl %1\n\t"
62 "jne 9f"
63 spin_lock_string
64 "\n9:"
65 :"=m" (__dummy_lock(&kernel_flag)),
66 "=m" (current->lock_depth));
67 #endif
68 #endif
71 static __inline__ void unlock_kernel(void)
73 if (current->lock_depth < 0)
74 BUG();
75 #if 1
76 if (--current->lock_depth < 0)
77 spin_unlock(&kernel_flag);
78 #else
79 __asm__ __volatile__(
80 "decl %1\n\t"
81 "jns 9f\n\t"
82 spin_unlock_string
83 "\n9:"
84 :"=m" (__dummy_lock(&kernel_flag)),
85 "=m" (current->lock_depth));
86 #endif