2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2008
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
22 * For detailed explanation of Read-Copy Update mechanism see -
25 #include <linux/completion.h>
26 #include <linux/interrupt.h>
27 #include <linux/notifier.h>
28 #include <linux/rcupdate.h>
29 #include <linux/kernel.h>
30 #include <linux/export.h>
31 #include <linux/mutex.h>
32 #include <linux/sched.h>
33 #include <linux/types.h>
34 #include <linux/init.h>
35 #include <linux/time.h>
36 #include <linux/cpu.h>
37 #include <linux/prefetch.h>
39 #ifdef CONFIG_RCU_TRACE
40 #include <trace/events/rcu.h>
41 #endif /* #else #ifdef CONFIG_RCU_TRACE */
45 /* Forward declarations for rcutiny_plugin.h. */
47 static void invoke_rcu_callbacks(void);
48 static void __rcu_process_callbacks(struct rcu_ctrlblk
*rcp
);
49 static void rcu_process_callbacks(struct softirq_action
*unused
);
50 static void __call_rcu(struct rcu_head
*head
,
51 void (*func
)(struct rcu_head
*rcu
),
52 struct rcu_ctrlblk
*rcp
);
54 #include "rcutiny_plugin.h"
56 static long long rcu_dynticks_nesting
= DYNTICK_TASK_NESTING
;
58 /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
59 static void rcu_idle_enter_common(long long oldval
)
61 if (rcu_dynticks_nesting
) {
62 RCU_TRACE(trace_rcu_dyntick("--=",
63 oldval
, rcu_dynticks_nesting
));
66 RCU_TRACE(trace_rcu_dyntick("Start", oldval
, rcu_dynticks_nesting
));
67 if (!is_idle_task(current
)) {
68 struct task_struct
*idle
= idle_task(smp_processor_id());
70 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
71 oldval
, rcu_dynticks_nesting
));
72 ftrace_dump(DUMP_ALL
);
73 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
74 current
->pid
, current
->comm
,
75 idle
->pid
, idle
->comm
); /* must be idle task! */
77 rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
81 * Enter idle, which is an extended quiescent state if we have fully
82 * entered that mode (i.e., if the new value of dynticks_nesting is zero).
84 void rcu_idle_enter(void)
89 local_irq_save(flags
);
90 oldval
= rcu_dynticks_nesting
;
91 rcu_dynticks_nesting
= 0;
92 rcu_idle_enter_common(oldval
);
93 local_irq_restore(flags
);
97 * Exit an interrupt handler towards idle.
99 void rcu_irq_exit(void)
104 local_irq_save(flags
);
105 oldval
= rcu_dynticks_nesting
;
106 rcu_dynticks_nesting
--;
107 WARN_ON_ONCE(rcu_dynticks_nesting
< 0);
108 rcu_idle_enter_common(oldval
);
109 local_irq_restore(flags
);
112 /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
113 static void rcu_idle_exit_common(long long oldval
)
116 RCU_TRACE(trace_rcu_dyntick("++=",
117 oldval
, rcu_dynticks_nesting
));
120 RCU_TRACE(trace_rcu_dyntick("End", oldval
, rcu_dynticks_nesting
));
121 if (!is_idle_task(current
)) {
122 struct task_struct
*idle
= idle_task(smp_processor_id());
124 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
125 oldval
, rcu_dynticks_nesting
));
126 ftrace_dump(DUMP_ALL
);
127 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
128 current
->pid
, current
->comm
,
129 idle
->pid
, idle
->comm
); /* must be idle task! */
134 * Exit idle, so that we are no longer in an extended quiescent state.
136 void rcu_idle_exit(void)
141 local_irq_save(flags
);
142 oldval
= rcu_dynticks_nesting
;
143 WARN_ON_ONCE(oldval
!= 0);
144 rcu_dynticks_nesting
= DYNTICK_TASK_NESTING
;
145 rcu_idle_exit_common(oldval
);
146 local_irq_restore(flags
);
150 * Enter an interrupt handler, moving away from idle.
152 void rcu_irq_enter(void)
157 local_irq_save(flags
);
158 oldval
= rcu_dynticks_nesting
;
159 rcu_dynticks_nesting
++;
160 WARN_ON_ONCE(rcu_dynticks_nesting
== 0);
161 rcu_idle_exit_common(oldval
);
162 local_irq_restore(flags
);
165 #ifdef CONFIG_PROVE_RCU
168 * Test whether RCU thinks that the current CPU is idle.
170 int rcu_is_cpu_idle(void)
172 return !rcu_dynticks_nesting
;
174 EXPORT_SYMBOL(rcu_is_cpu_idle
);
176 #endif /* #ifdef CONFIG_PROVE_RCU */
179 * Test whether the current CPU was interrupted from idle. Nested
180 * interrupts don't count, we must be running at the first interrupt
183 int rcu_is_cpu_rrupt_from_idle(void)
185 return rcu_dynticks_nesting
<= 0;
189 * Helper function for rcu_sched_qs() and rcu_bh_qs().
190 * Also irqs are disabled to avoid confusion due to interrupt handlers
191 * invoking call_rcu().
193 static int rcu_qsctr_help(struct rcu_ctrlblk
*rcp
)
195 if (rcp
->rcucblist
!= NULL
&&
196 rcp
->donetail
!= rcp
->curtail
) {
197 rcp
->donetail
= rcp
->curtail
;
205 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
206 * are at it, given that any rcu quiescent state is also an rcu_bh
207 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
209 void rcu_sched_qs(int cpu
)
213 local_irq_save(flags
);
214 if (rcu_qsctr_help(&rcu_sched_ctrlblk
) +
215 rcu_qsctr_help(&rcu_bh_ctrlblk
))
216 invoke_rcu_callbacks();
217 local_irq_restore(flags
);
221 * Record an rcu_bh quiescent state.
223 void rcu_bh_qs(int cpu
)
227 local_irq_save(flags
);
228 if (rcu_qsctr_help(&rcu_bh_ctrlblk
))
229 invoke_rcu_callbacks();
230 local_irq_restore(flags
);
234 * Check to see if the scheduling-clock interrupt came from an extended
235 * quiescent state, and, if so, tell RCU about it. This function must
236 * be called from hardirq context. It is normally called from the
237 * scheduling-clock interrupt.
239 void rcu_check_callbacks(int cpu
, int user
)
241 if (user
|| rcu_is_cpu_rrupt_from_idle())
243 else if (!in_softirq())
245 rcu_preempt_check_callbacks();
249 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
250 * whose grace period has elapsed.
252 static void __rcu_process_callbacks(struct rcu_ctrlblk
*rcp
)
255 struct rcu_head
*next
, *list
;
257 RCU_TRACE(int cb_count
= 0);
259 /* If no RCU callbacks ready to invoke, just return. */
260 if (&rcp
->rcucblist
== rcp
->donetail
) {
261 RCU_TRACE(trace_rcu_batch_start(rcp
->name
, 0, -1));
262 RCU_TRACE(trace_rcu_batch_end(rcp
->name
, 0,
263 ACCESS_ONCE(rcp
->rcucblist
),
265 is_idle_task(current
),
266 rcu_is_callbacks_kthread()));
270 /* Move the ready-to-invoke callbacks to a local list. */
271 local_irq_save(flags
);
272 RCU_TRACE(trace_rcu_batch_start(rcp
->name
, 0, -1));
273 list
= rcp
->rcucblist
;
274 rcp
->rcucblist
= *rcp
->donetail
;
275 *rcp
->donetail
= NULL
;
276 if (rcp
->curtail
== rcp
->donetail
)
277 rcp
->curtail
= &rcp
->rcucblist
;
278 rcu_preempt_remove_callbacks(rcp
);
279 rcp
->donetail
= &rcp
->rcucblist
;
280 local_irq_restore(flags
);
282 /* Invoke the callbacks on the local list. */
283 RCU_TRACE(rn
= rcp
->name
);
287 debug_rcu_head_unqueue(list
);
289 __rcu_reclaim(rn
, list
);
292 RCU_TRACE(cb_count
++);
294 RCU_TRACE(rcu_trace_sub_qlen(rcp
, cb_count
));
295 RCU_TRACE(trace_rcu_batch_end(rcp
->name
, cb_count
, 0, need_resched(),
296 is_idle_task(current
),
297 rcu_is_callbacks_kthread()));
300 static void rcu_process_callbacks(struct softirq_action
*unused
)
302 __rcu_process_callbacks(&rcu_sched_ctrlblk
);
303 __rcu_process_callbacks(&rcu_bh_ctrlblk
);
304 rcu_preempt_process_callbacks();
308 * Wait for a grace period to elapse. But it is illegal to invoke
309 * synchronize_sched() from within an RCU read-side critical section.
310 * Therefore, any legal call to synchronize_sched() is a quiescent
311 * state, and so on a UP system, synchronize_sched() need do nothing.
312 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
313 * benefits of doing might_sleep() to reduce latency.)
315 * Cool, huh? (Due to Josh Triplett.)
317 * But we want to make this a static inline later. The cond_resched()
318 * currently makes this problematic.
320 void synchronize_sched(void)
324 EXPORT_SYMBOL_GPL(synchronize_sched
);
327 * Helper function for call_rcu() and call_rcu_bh().
329 static void __call_rcu(struct rcu_head
*head
,
330 void (*func
)(struct rcu_head
*rcu
),
331 struct rcu_ctrlblk
*rcp
)
335 debug_rcu_head_queue(head
);
339 local_irq_save(flags
);
340 *rcp
->curtail
= head
;
341 rcp
->curtail
= &head
->next
;
342 RCU_TRACE(rcp
->qlen
++);
343 local_irq_restore(flags
);
347 * Post an RCU callback to be invoked after the end of an RCU-sched grace
348 * period. But since we have but one CPU, that would be after any
351 void call_rcu_sched(struct rcu_head
*head
, void (*func
)(struct rcu_head
*rcu
))
353 __call_rcu(head
, func
, &rcu_sched_ctrlblk
);
355 EXPORT_SYMBOL_GPL(call_rcu_sched
);
358 * Post an RCU bottom-half callback to be invoked after any subsequent
361 void call_rcu_bh(struct rcu_head
*head
, void (*func
)(struct rcu_head
*rcu
))
363 __call_rcu(head
, func
, &rcu_bh_ctrlblk
);
365 EXPORT_SYMBOL_GPL(call_rcu_bh
);