4 * Default SMP lock implementation
6 #include <linux/interrupt.h>
7 #include <linux/spinlock.h>
8 #include <linux/sched.h>
10 #include <asm/current.h>
11 #include <asm/hardirq.h>
13 extern spinlock_t kernel_flag
;
16 # define kernel_locked() spin_is_locked(&kernel_flag)
17 # define check_irq_holder(cpu) \
19 if (global_irq_holder == (cpu)) \
23 # define kernel_locked() (1)
27 * Release global kernel lock and global interrupt lock
29 #define release_kernel_lock(task, cpu) \
31 if (unlikely(task->lock_depth >= 0)) { \
32 spin_unlock(&kernel_flag); \
33 check_irq_holder(cpu); \
38 * Re-acquire the kernel lock
40 #define reacquire_kernel_lock(task) \
42 if (unlikely(task->lock_depth >= 0)) \
43 spin_lock(&kernel_flag); \
47 * Getting the big kernel lock.
49 * This cannot happen asynchronously,
50 * so we only need to worry about other
53 static __inline__
void
56 if (!++current
->lock_depth
)
57 spin_lock(&kernel_flag
);
60 static __inline__
void
63 if (--current
->lock_depth
< 0)
64 spin_unlock(&kernel_flag
);