Initial commit
[cbs-scheduler.git] / lib / kernel_lock.c
blob5c1e88597124fd62d14fa82add4cf9b54a887923
1 /*
2 * lib/kernel_lock.c
4 * This is the traditional BKL - big kernel lock. Largely
5 * relegated to obsolescence, but used by various less
6 * important (or lazy) subsystems.
7 */
8 #include <linux/smp_lock.h>
9 #include <linux/module.h>
10 #include <linux/kallsyms.h>
11 #include <linux/semaphore.h>
14 * The 'big kernel semaphore'
16 * This mutex is taken and released recursively by lock_kernel()
17 * and unlock_kernel(). It is transparently dropped and reacquired
18 * over schedule(). It is used to protect legacy code that hasn't
19 * been migrated to a proper locking design yet.
21 * Note: code locked by this semaphore will only be serialized against
22 * other code using the same locking facility. The code guarantees that
23 * the task remains on the same CPU.
25 * Don't use in new code.
27 DECLARE_MUTEX(kernel_sem);
30 * Re-acquire the kernel semaphore.
32 * This function is called with preemption off.
34 * We are executing in schedule() so the code must be extremely careful
35 * about recursion, both due to the down() and due to the enabling of
36 * preemption. schedule() will re-check the preemption flag after
37 * reacquiring the semaphore.
39 * Called with interrupts disabled.
41 int __lockfunc __reacquire_kernel_lock(void)
43 struct task_struct *task = current;
44 int saved_lock_depth = task->lock_depth;
46 local_irq_enable();
47 BUG_ON(saved_lock_depth < 0);
49 task->lock_depth = -1;
51 down(&kernel_sem);
53 task->lock_depth = saved_lock_depth;
55 local_irq_disable();
57 return 0;
60 void __lockfunc __release_kernel_lock(void)
62 up(&kernel_sem);
66 * Getting the big kernel semaphore.
68 void __lockfunc lock_kernel(void)
70 struct task_struct *task = current;
71 int depth = task->lock_depth + 1;
73 if (likely(!depth)) {
75 * No recursion worries - we set up lock_depth _after_
77 down(&kernel_sem);
78 #ifdef CONFIG_DEBUG_RT_MUTEXES
79 current->last_kernel_lock = __builtin_return_address(0);
80 #endif
83 task->lock_depth = depth;
86 void __lockfunc unlock_kernel(void)
88 struct task_struct *task = current;
90 BUG_ON(task->lock_depth < 0);
92 if (likely(--task->lock_depth == -1)) {
93 #ifdef CONFIG_DEBUG_RT_MUTEXES
94 current->last_kernel_lock = NULL;
95 #endif
96 up(&kernel_sem);
100 EXPORT_SYMBOL(lock_kernel);
101 EXPORT_SYMBOL(unlock_kernel);