4 * Default SMP lock implementation
6 #include <linux/config.h>
7 #include <linux/interrupt.h>
8 #include <linux/spinlock.h>
10 extern spinlock_t kernel_flag
;
13 #define kernel_locked() preempt_get_count()
15 #define kernel_locked() spin_is_locked(&kernel_flag)
19 * Release global kernel lock and global interrupt lock
21 #define release_kernel_lock(task, cpu) \
23 if (unlikely(task->lock_depth >= 0)) \
24 spin_unlock(&kernel_flag); \
28 * Re-acquire the kernel lock
30 #define reacquire_kernel_lock(task) \
32 if (unlikely(task->lock_depth >= 0)) \
33 spin_lock(&kernel_flag); \
38 * Getting the big kernel lock.
40 * This cannot happen asynchronously,
41 * so we only need to worry about other
44 static inline void lock_kernel(void)
47 if (current
->lock_depth
== -1)
48 spin_lock(&kernel_flag
);
49 ++current
->lock_depth
;
51 if (!++current
->lock_depth
)
52 spin_lock(&kernel_flag
);
56 static inline void unlock_kernel(void)
58 if (--current
->lock_depth
< 0)
59 spin_unlock(&kernel_flag
);