Revert last change. Bug noticed by Linus.
[linux-2.6/linux-mips.git] / include / asm-mips64 / smplock.h
blobf0b627e6a92e22e7af3d335898abab0f7f440372
1 /*
2 * <asm/smplock.h>
4 * Default SMP lock implementation
5 */
6 #ifndef _ASM_SMPLOCK_H
7 #define _ASM_SMPLOCK_H
9 #include <linux/sched.h>
10 #include <linux/interrupt.h>
11 #include <linux/spinlock.h>
13 extern spinlock_t kernel_flag;
15 #define kernel_locked() spin_is_locked(&kernel_flag)
18 * Release global kernel lock and global interrupt lock
20 static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
22 if (task->lock_depth >= 0)
23 spin_unlock(&kernel_flag);
24 release_irqlock(cpu);
25 __sti();
29 * Re-acquire the kernel lock
31 static __inline__ void reacquire_kernel_lock(struct task_struct *task)
33 if (task->lock_depth >= 0)
34 spin_lock(&kernel_flag);
38 * Getting the big kernel lock.
40 * This cannot happen asynchronously,
41 * so we only need to worry about other
42 * CPU's.
44 static __inline__ void lock_kernel(void)
46 if (!++current->lock_depth)
47 spin_lock(&kernel_flag);
50 static __inline__ void unlock_kernel(void)
52 if (--current->lock_depth < 0)
53 spin_unlock(&kernel_flag);
56 #endif /* _ASM_SMPLOCK_H */