Add KEY_MICMUTE and enable it on Lenovo X220
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / rcutiny.c
blob7bbac7d0f5abc1d4737c0664c6779d9db1ca5b85
1 /*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2008
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
22 * For detailed explanation of Read-Copy Update mechanism see -
23 * Documentation/RCU
25 #include <linux/moduleparam.h>
26 #include <linux/completion.h>
27 #include <linux/interrupt.h>
28 #include <linux/notifier.h>
29 #include <linux/rcupdate.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/mutex.h>
33 #include <linux/sched.h>
34 #include <linux/types.h>
35 #include <linux/init.h>
36 #include <linux/time.h>
37 #include <linux/cpu.h>
38 #include <linux/prefetch.h>
40 /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */
41 static struct task_struct *rcu_kthread_task;
42 static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
43 static unsigned long have_rcu_kthread_work;
45 /* Forward declarations for rcutiny_plugin.h. */
46 struct rcu_ctrlblk;
47 static void invoke_rcu_kthread(void);
48 static void rcu_process_callbacks(struct rcu_ctrlblk *rcp);
49 static int rcu_kthread(void *arg);
50 static void __call_rcu(struct rcu_head *head,
51 void (*func)(struct rcu_head *rcu),
52 struct rcu_ctrlblk *rcp);
54 #include "rcutiny_plugin.h"
56 #ifdef CONFIG_NO_HZ
58 static long rcu_dynticks_nesting = 1;
61 * Enter dynticks-idle mode, which is an extended quiescent state
62 * if we have fully entered that mode (i.e., if the new value of
63 * dynticks_nesting is zero).
65 void rcu_enter_nohz(void)
67 if (--rcu_dynticks_nesting == 0)
68 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
72 * Exit dynticks-idle mode, so that we are no longer in an extended
73 * quiescent state.
75 void rcu_exit_nohz(void)
77 rcu_dynticks_nesting++;
80 #endif /* #ifdef CONFIG_NO_HZ */
83 * Helper function for rcu_sched_qs() and rcu_bh_qs().
84 * Also irqs are disabled to avoid confusion due to interrupt handlers
85 * invoking call_rcu().
87 static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
89 if (rcp->rcucblist != NULL &&
90 rcp->donetail != rcp->curtail) {
91 rcp->donetail = rcp->curtail;
92 return 1;
95 return 0;
99 * Wake up rcu_kthread() to process callbacks now eligible for invocation
100 * or to boost readers.
102 static void invoke_rcu_kthread(void)
104 have_rcu_kthread_work = 1;
105 wake_up(&rcu_kthread_wq);
109 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
110 * are at it, given that any rcu quiescent state is also an rcu_bh
111 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
113 void rcu_sched_qs(int cpu)
115 unsigned long flags;
117 local_irq_save(flags);
118 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
119 rcu_qsctr_help(&rcu_bh_ctrlblk))
120 invoke_rcu_kthread();
121 local_irq_restore(flags);
125 * Record an rcu_bh quiescent state.
127 void rcu_bh_qs(int cpu)
129 unsigned long flags;
131 local_irq_save(flags);
132 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
133 invoke_rcu_kthread();
134 local_irq_restore(flags);
138 * Check to see if the scheduling-clock interrupt came from an extended
139 * quiescent state, and, if so, tell RCU about it.
141 void rcu_check_callbacks(int cpu, int user)
143 if (user ||
144 (idle_cpu(cpu) &&
145 !in_softirq() &&
146 hardirq_count() <= (1 << HARDIRQ_SHIFT)))
147 rcu_sched_qs(cpu);
148 else if (!in_softirq())
149 rcu_bh_qs(cpu);
150 rcu_preempt_check_callbacks();
154 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
155 * whose grace period has elapsed.
157 static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
159 struct rcu_head *next, *list;
160 unsigned long flags;
161 RCU_TRACE(int cb_count = 0);
163 /* If no RCU callbacks ready to invoke, just return. */
164 if (&rcp->rcucblist == rcp->donetail)
165 return;
167 /* Move the ready-to-invoke callbacks to a local list. */
168 local_irq_save(flags);
169 list = rcp->rcucblist;
170 rcp->rcucblist = *rcp->donetail;
171 *rcp->donetail = NULL;
172 if (rcp->curtail == rcp->donetail)
173 rcp->curtail = &rcp->rcucblist;
174 rcu_preempt_remove_callbacks(rcp);
175 rcp->donetail = &rcp->rcucblist;
176 local_irq_restore(flags);
178 /* Invoke the callbacks on the local list. */
179 while (list) {
180 next = list->next;
181 prefetch(next);
182 debug_rcu_head_unqueue(list);
183 local_bh_disable();
184 __rcu_reclaim(list);
185 local_bh_enable();
186 list = next;
187 RCU_TRACE(cb_count++);
189 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
193 * This kthread invokes RCU callbacks whose grace periods have
194 * elapsed. It is awakened as needed, and takes the place of the
195 * RCU_SOFTIRQ that was used previously for this purpose.
196 * This is a kthread, but it is never stopped, at least not until
197 * the system goes down.
199 static int rcu_kthread(void *arg)
201 unsigned long work;
202 unsigned long morework;
203 unsigned long flags;
205 for (;;) {
206 wait_event_interruptible(rcu_kthread_wq,
207 have_rcu_kthread_work != 0);
208 morework = rcu_boost();
209 local_irq_save(flags);
210 work = have_rcu_kthread_work;
211 have_rcu_kthread_work = morework;
212 local_irq_restore(flags);
213 if (work) {
214 rcu_process_callbacks(&rcu_sched_ctrlblk);
215 rcu_process_callbacks(&rcu_bh_ctrlblk);
216 rcu_preempt_process_callbacks();
218 schedule_timeout_interruptible(1); /* Leave CPU for others. */
221 return 0; /* Not reached, but needed to shut gcc up. */
225 * Wait for a grace period to elapse. But it is illegal to invoke
226 * synchronize_sched() from within an RCU read-side critical section.
227 * Therefore, any legal call to synchronize_sched() is a quiescent
228 * state, and so on a UP system, synchronize_sched() need do nothing.
229 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
230 * benefits of doing might_sleep() to reduce latency.)
232 * Cool, huh? (Due to Josh Triplett.)
234 * But we want to make this a static inline later. The cond_resched()
235 * currently makes this problematic.
237 void synchronize_sched(void)
239 cond_resched();
241 EXPORT_SYMBOL_GPL(synchronize_sched);
244 * Helper function for call_rcu() and call_rcu_bh().
246 static void __call_rcu(struct rcu_head *head,
247 void (*func)(struct rcu_head *rcu),
248 struct rcu_ctrlblk *rcp)
250 unsigned long flags;
252 debug_rcu_head_queue(head);
253 head->func = func;
254 head->next = NULL;
256 local_irq_save(flags);
257 *rcp->curtail = head;
258 rcp->curtail = &head->next;
259 RCU_TRACE(rcp->qlen++);
260 local_irq_restore(flags);
264 * Post an RCU callback to be invoked after the end of an RCU-sched grace
265 * period. But since we have but one CPU, that would be after any
266 * quiescent state.
268 void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
270 __call_rcu(head, func, &rcu_sched_ctrlblk);
272 EXPORT_SYMBOL_GPL(call_rcu_sched);
275 * Post an RCU bottom-half callback to be invoked after any subsequent
276 * quiescent state.
278 void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
280 __call_rcu(head, func, &rcu_bh_ctrlblk);
282 EXPORT_SYMBOL_GPL(call_rcu_bh);
284 void rcu_barrier_bh(void)
286 struct rcu_synchronize rcu;
288 init_rcu_head_on_stack(&rcu.head);
289 init_completion(&rcu.completion);
290 /* Will wake me after RCU finished. */
291 call_rcu_bh(&rcu.head, wakeme_after_rcu);
292 /* Wait for it. */
293 wait_for_completion(&rcu.completion);
294 destroy_rcu_head_on_stack(&rcu.head);
296 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
298 void rcu_barrier_sched(void)
300 struct rcu_synchronize rcu;
302 init_rcu_head_on_stack(&rcu.head);
303 init_completion(&rcu.completion);
304 /* Will wake me after RCU finished. */
305 call_rcu_sched(&rcu.head, wakeme_after_rcu);
306 /* Wait for it. */
307 wait_for_completion(&rcu.completion);
308 destroy_rcu_head_on_stack(&rcu.head);
310 EXPORT_SYMBOL_GPL(rcu_barrier_sched);
313 * Spawn the kthread that invokes RCU callbacks.
315 static int __init rcu_spawn_kthreads(void)
317 struct sched_param sp;
319 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
320 sp.sched_priority = RCU_BOOST_PRIO;
321 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
322 return 0;
324 early_initcall(rcu_spawn_kthreads);