perf sched: Implement the scheduling workload replay engine
[linux-2.6/verdex.git] / kernel / rcutree_plugin.h
blob47789369ea59deb97365b67bcc071e48ae8d5e08
1 /*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptable semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
28 #ifdef CONFIG_TREE_PREEMPT_RCU
30 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
31 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
34 * Tell them what RCU they are running.
36 static inline void rcu_bootup_announce(void)
38 printk(KERN_INFO
39 "Experimental preemptable hierarchical RCU implementation.\n");
43 * Return the number of RCU-preempt batches processed thus far
44 * for debug and statistics.
46 long rcu_batches_completed_preempt(void)
48 return rcu_preempt_state.completed;
50 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
53 * Return the number of RCU batches processed thus far for debug & stats.
55 long rcu_batches_completed(void)
57 return rcu_batches_completed_preempt();
59 EXPORT_SYMBOL_GPL(rcu_batches_completed);
62 * Record a preemptable-RCU quiescent state for the specified CPU. Note
63 * that this just means that the task currently running on the CPU is
64 * not in a quiescent state. There might be any number of tasks blocked
65 * while in an RCU read-side critical section.
67 static void rcu_preempt_qs_record(int cpu)
69 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
70 rdp->passed_quiesc = 1;
71 rdp->passed_quiesc_completed = rdp->completed;
75 * We have entered the scheduler or are between softirqs in ksoftirqd.
76 * If we are in an RCU read-side critical section, we need to reflect
77 * that in the state of the rcu_node structure corresponding to this CPU.
78 * Caller must disable hardirqs.
80 static void rcu_preempt_qs(int cpu)
82 struct task_struct *t = current;
83 int phase;
84 struct rcu_data *rdp;
85 struct rcu_node *rnp;
87 if (t->rcu_read_lock_nesting &&
88 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
90 /* Possibly blocking in an RCU read-side critical section. */
91 rdp = rcu_preempt_state.rda[cpu];
92 rnp = rdp->mynode;
93 spin_lock(&rnp->lock);
94 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
95 t->rcu_blocked_node = rnp;
98 * If this CPU has already checked in, then this task
99 * will hold up the next grace period rather than the
100 * current grace period. Queue the task accordingly.
101 * If the task is queued for the current grace period
102 * (i.e., this CPU has not yet passed through a quiescent
103 * state for the current grace period), then as long
104 * as that task remains queued, the current grace period
105 * cannot end.
107 phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1);
108 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
109 smp_mb(); /* Ensure later ctxt swtch seen after above. */
110 spin_unlock(&rnp->lock);
114 * Either we were not in an RCU read-side critical section to
115 * begin with, or we have now recorded that critical section
116 * globally. Either way, we can now note a quiescent state
117 * for this CPU. Again, if we were in an RCU read-side critical
118 * section, and if that critical section was blocking the current
119 * grace period, then the fact that the task has been enqueued
120 * means that we continue to block the current grace period.
122 rcu_preempt_qs_record(cpu);
123 t->rcu_read_unlock_special &= ~(RCU_READ_UNLOCK_NEED_QS |
124 RCU_READ_UNLOCK_GOT_QS);
128 * Tree-preemptable RCU implementation for rcu_read_lock().
129 * Just increment ->rcu_read_lock_nesting, shared state will be updated
130 * if we block.
132 void __rcu_read_lock(void)
134 ACCESS_ONCE(current->rcu_read_lock_nesting)++;
135 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
137 EXPORT_SYMBOL_GPL(__rcu_read_lock);
139 static void rcu_read_unlock_special(struct task_struct *t)
141 int empty;
142 unsigned long flags;
143 unsigned long mask;
144 struct rcu_node *rnp;
145 int special;
147 /* NMI handlers cannot block and cannot safely manipulate state. */
148 if (in_nmi())
149 return;
151 local_irq_save(flags);
154 * If RCU core is waiting for this CPU to exit critical section,
155 * let it know that we have done so.
157 special = t->rcu_read_unlock_special;
158 if (special & RCU_READ_UNLOCK_NEED_QS) {
159 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
160 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_GOT_QS;
163 /* Hardware IRQ handlers cannot block. */
164 if (in_irq()) {
165 local_irq_restore(flags);
166 return;
169 /* Clean up if blocked during RCU read-side critical section. */
170 if (special & RCU_READ_UNLOCK_BLOCKED) {
171 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
174 * Remove this task from the list it blocked on. The
175 * task can migrate while we acquire the lock, but at
176 * most one time. So at most two passes through loop.
178 for (;;) {
179 rnp = t->rcu_blocked_node;
180 spin_lock(&rnp->lock);
181 if (rnp == t->rcu_blocked_node)
182 break;
183 spin_unlock(&rnp->lock);
185 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
186 list_del_init(&t->rcu_node_entry);
187 t->rcu_blocked_node = NULL;
190 * If this was the last task on the current list, and if
191 * we aren't waiting on any CPUs, report the quiescent state.
192 * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk()
193 * drop rnp->lock and restore irq.
195 if (!empty && rnp->qsmask == 0 &&
196 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
197 t->rcu_read_unlock_special &=
198 ~(RCU_READ_UNLOCK_NEED_QS |
199 RCU_READ_UNLOCK_GOT_QS);
200 if (rnp->parent == NULL) {
201 /* Only one rcu_node in the tree. */
202 cpu_quiet_msk_finish(&rcu_preempt_state, flags);
203 return;
205 /* Report up the rest of the hierarchy. */
206 mask = rnp->grpmask;
207 spin_unlock_irqrestore(&rnp->lock, flags);
208 rnp = rnp->parent;
209 spin_lock_irqsave(&rnp->lock, flags);
210 cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags);
211 return;
213 spin_unlock(&rnp->lock);
215 local_irq_restore(flags);
219 * Tree-preemptable RCU implementation for rcu_read_unlock().
220 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
221 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
222 * invoke rcu_read_unlock_special() to clean up after a context switch
223 * in an RCU read-side critical section and other special cases.
225 void __rcu_read_unlock(void)
227 struct task_struct *t = current;
229 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
230 if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
231 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
232 rcu_read_unlock_special(t);
234 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
236 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
239 * Scan the current list of tasks blocked within RCU read-side critical
240 * sections, printing out the tid of each.
242 static void rcu_print_task_stall(struct rcu_node *rnp)
244 unsigned long flags;
245 struct list_head *lp;
246 int phase = rnp->gpnum & 0x1;
247 struct task_struct *t;
249 if (!list_empty(&rnp->blocked_tasks[phase])) {
250 spin_lock_irqsave(&rnp->lock, flags);
251 phase = rnp->gpnum & 0x1; /* re-read under lock. */
252 lp = &rnp->blocked_tasks[phase];
253 list_for_each_entry(t, lp, rcu_node_entry)
254 printk(" P%d", t->pid);
255 spin_unlock_irqrestore(&rnp->lock, flags);
259 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
262 * Check for preempted RCU readers for the specified rcu_node structure.
263 * If the caller needs a reliable answer, it must hold the rcu_node's
264 * >lock.
266 static int rcu_preempted_readers(struct rcu_node *rnp)
268 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
271 #ifdef CONFIG_HOTPLUG_CPU
274 * Handle tasklist migration for case in which all CPUs covered by the
275 * specified rcu_node have gone offline. Move them up to the root
276 * rcu_node. The reason for not just moving them to the immediate
277 * parent is to remove the need for rcu_read_unlock_special() to
278 * make more than two attempts to acquire the target rcu_node's lock.
280 * The caller must hold rnp->lock with irqs disabled.
282 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
283 struct rcu_node *rnp)
285 int i;
286 struct list_head *lp;
287 struct list_head *lp_root;
288 struct rcu_node *rnp_root = rcu_get_root(rsp);
289 struct task_struct *tp;
291 if (rnp == rnp_root) {
292 WARN_ONCE(1, "Last CPU thought to be offlined?");
293 return; /* Shouldn't happen: at least one CPU online. */
297 * Move tasks up to root rcu_node. Rely on the fact that the
298 * root rcu_node can be at most one ahead of the rest of the
299 * rcu_nodes in terms of gp_num value. This fact allows us to
300 * move the blocked_tasks[] array directly, element by element.
302 for (i = 0; i < 2; i++) {
303 lp = &rnp->blocked_tasks[i];
304 lp_root = &rnp_root->blocked_tasks[i];
305 while (!list_empty(lp)) {
306 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
307 spin_lock(&rnp_root->lock); /* irqs already disabled */
308 list_del(&tp->rcu_node_entry);
309 tp->rcu_blocked_node = rnp_root;
310 list_add(&tp->rcu_node_entry, lp_root);
311 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
317 * Do CPU-offline processing for preemptable RCU.
319 static void rcu_preempt_offline_cpu(int cpu)
321 __rcu_offline_cpu(cpu, &rcu_preempt_state);
324 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
327 * Check for a quiescent state from the current CPU. When a task blocks,
328 * the task is recorded in the corresponding CPU's rcu_node structure,
329 * which is checked elsewhere.
331 * Caller must disable hard irqs.
333 static void rcu_preempt_check_callbacks(int cpu)
335 struct task_struct *t = current;
337 if (t->rcu_read_lock_nesting == 0) {
338 t->rcu_read_unlock_special &=
339 ~(RCU_READ_UNLOCK_NEED_QS | RCU_READ_UNLOCK_GOT_QS);
340 rcu_preempt_qs_record(cpu);
341 return;
343 if (per_cpu(rcu_preempt_data, cpu).qs_pending) {
344 if (t->rcu_read_unlock_special & RCU_READ_UNLOCK_GOT_QS) {
345 rcu_preempt_qs_record(cpu);
346 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_GOT_QS;
347 } else if (!(t->rcu_read_unlock_special &
348 RCU_READ_UNLOCK_NEED_QS)) {
349 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
355 * Process callbacks for preemptable RCU.
357 static void rcu_preempt_process_callbacks(void)
359 __rcu_process_callbacks(&rcu_preempt_state,
360 &__get_cpu_var(rcu_preempt_data));
364 * Queue a preemptable-RCU callback for invocation after a grace period.
366 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
368 __call_rcu(head, func, &rcu_preempt_state);
370 EXPORT_SYMBOL_GPL(call_rcu);
373 * Check to see if there is any immediate preemptable-RCU-related work
374 * to be done.
376 static int rcu_preempt_pending(int cpu)
378 return __rcu_pending(&rcu_preempt_state,
379 &per_cpu(rcu_preempt_data, cpu));
383 * Does preemptable RCU need the CPU to stay out of dynticks mode?
385 static int rcu_preempt_needs_cpu(int cpu)
387 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
391 * Initialize preemptable RCU's per-CPU data.
393 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
395 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
399 * Check for a task exiting while in a preemptable-RCU read-side
400 * critical section, clean up if so. No need to issue warnings,
401 * as debug_check_no_locks_held() already does this if lockdep
402 * is enabled.
404 void exit_rcu(void)
406 struct task_struct *t = current;
408 if (t->rcu_read_lock_nesting == 0)
409 return;
410 t->rcu_read_lock_nesting = 1;
411 rcu_read_unlock();
414 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
417 * Tell them what RCU they are running.
419 static inline void rcu_bootup_announce(void)
421 printk(KERN_INFO "Hierarchical RCU implementation.\n");
425 * Return the number of RCU batches processed thus far for debug & stats.
427 long rcu_batches_completed(void)
429 return rcu_batches_completed_sched();
431 EXPORT_SYMBOL_GPL(rcu_batches_completed);
434 * Because preemptable RCU does not exist, we never have to check for
435 * CPUs being in quiescent states.
437 static void rcu_preempt_qs(int cpu)
441 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
444 * Because preemptable RCU does not exist, we never have to check for
445 * tasks blocked within RCU read-side critical sections.
447 static void rcu_print_task_stall(struct rcu_node *rnp)
451 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
454 * Because preemptable RCU does not exist, there are never any preempted
455 * RCU readers.
457 static int rcu_preempted_readers(struct rcu_node *rnp)
459 return 0;
462 #ifdef CONFIG_HOTPLUG_CPU
465 * Because preemptable RCU does not exist, it never needs to migrate
466 * tasks that were blocked within RCU read-side critical sections.
468 static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
469 struct rcu_node *rnp)
474 * Because preemptable RCU does not exist, it never needs CPU-offline
475 * processing.
477 static void rcu_preempt_offline_cpu(int cpu)
481 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
484 * Because preemptable RCU does not exist, it never has any callbacks
485 * to check.
487 void rcu_preempt_check_callbacks(int cpu)
492 * Because preemptable RCU does not exist, it never has any callbacks
493 * to process.
495 void rcu_preempt_process_callbacks(void)
500 * In classic RCU, call_rcu() is just call_rcu_sched().
502 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
504 call_rcu_sched(head, func);
506 EXPORT_SYMBOL_GPL(call_rcu);
509 * Because preemptable RCU does not exist, it never has any work to do.
511 static int rcu_preempt_pending(int cpu)
513 return 0;
517 * Because preemptable RCU does not exist, it never needs any CPU.
519 static int rcu_preempt_needs_cpu(int cpu)
521 return 0;
525 * Because preemptable RCU does not exist, there is no per-CPU
526 * data to initialize.
528 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
532 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */