rcu: Rework detection of use of RCU by offline CPUs
[linux-2.6/btrfs-unstable.git] / kernel / rcutree_plugin.h
blob07f880445d8dabc0ebc6307992b9e8918d16d516
1 /*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
29 #define RCU_KTHREAD_PRIO 1
31 #ifdef CONFIG_RCU_BOOST
32 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
33 #else
34 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
35 #endif
38 * Check the RCU kernel configuration parameters and print informative
39 * messages about anything out of the ordinary. If you like #ifdef, you
40 * will love this function.
42 static void __init rcu_bootup_announce_oddness(void)
44 #ifdef CONFIG_RCU_TRACE
45 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
46 #endif
47 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
48 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
49 CONFIG_RCU_FANOUT);
50 #endif
51 #ifdef CONFIG_RCU_FANOUT_EXACT
52 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
53 #endif
54 #ifdef CONFIG_RCU_FAST_NO_HZ
55 printk(KERN_INFO
56 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
57 #endif
58 #ifdef CONFIG_PROVE_RCU
59 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
60 #endif
61 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
62 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
63 #endif
64 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
65 printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
66 #endif
67 #if defined(CONFIG_RCU_CPU_STALL_INFO)
68 printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
69 #endif
70 #if NUM_RCU_LVL_4 != 0
71 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
72 #endif
75 #ifdef CONFIG_TREE_PREEMPT_RCU
77 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
78 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
79 static struct rcu_state *rcu_state = &rcu_preempt_state;
81 static void rcu_read_unlock_special(struct task_struct *t);
82 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
85 * Tell them what RCU they are running.
87 static void __init rcu_bootup_announce(void)
89 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
90 rcu_bootup_announce_oddness();
94 * Return the number of RCU-preempt batches processed thus far
95 * for debug and statistics.
97 long rcu_batches_completed_preempt(void)
99 return rcu_preempt_state.completed;
101 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
104 * Return the number of RCU batches processed thus far for debug & stats.
106 long rcu_batches_completed(void)
108 return rcu_batches_completed_preempt();
110 EXPORT_SYMBOL_GPL(rcu_batches_completed);
113 * Force a quiescent state for preemptible RCU.
115 void rcu_force_quiescent_state(void)
117 force_quiescent_state(&rcu_preempt_state, 0);
119 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
122 * Record a preemptible-RCU quiescent state for the specified CPU. Note
123 * that this just means that the task currently running on the CPU is
124 * not in a quiescent state. There might be any number of tasks blocked
125 * while in an RCU read-side critical section.
127 * Unlike the other rcu_*_qs() functions, callers to this function
128 * must disable irqs in order to protect the assignment to
129 * ->rcu_read_unlock_special.
131 static void rcu_preempt_qs(int cpu)
133 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
135 rdp->passed_quiesce_gpnum = rdp->gpnum;
136 barrier();
137 if (rdp->passed_quiesce == 0)
138 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
139 rdp->passed_quiesce = 1;
140 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
144 * We have entered the scheduler, and the current task might soon be
145 * context-switched away from. If this task is in an RCU read-side
146 * critical section, we will no longer be able to rely on the CPU to
147 * record that fact, so we enqueue the task on the blkd_tasks list.
148 * The task will dequeue itself when it exits the outermost enclosing
149 * RCU read-side critical section. Therefore, the current grace period
150 * cannot be permitted to complete until the blkd_tasks list entries
151 * predating the current grace period drain, in other words, until
152 * rnp->gp_tasks becomes NULL.
154 * Caller must disable preemption.
156 static void rcu_preempt_note_context_switch(int cpu)
158 struct task_struct *t = current;
159 unsigned long flags;
160 struct rcu_data *rdp;
161 struct rcu_node *rnp;
163 if (t->rcu_read_lock_nesting > 0 &&
164 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
166 /* Possibly blocking in an RCU read-side critical section. */
167 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
168 rnp = rdp->mynode;
169 raw_spin_lock_irqsave(&rnp->lock, flags);
170 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
171 t->rcu_blocked_node = rnp;
174 * If this CPU has already checked in, then this task
175 * will hold up the next grace period rather than the
176 * current grace period. Queue the task accordingly.
177 * If the task is queued for the current grace period
178 * (i.e., this CPU has not yet passed through a quiescent
179 * state for the current grace period), then as long
180 * as that task remains queued, the current grace period
181 * cannot end. Note that there is some uncertainty as
182 * to exactly when the current grace period started.
183 * We take a conservative approach, which can result
184 * in unnecessarily waiting on tasks that started very
185 * slightly after the current grace period began. C'est
186 * la vie!!!
188 * But first, note that the current CPU must still be
189 * on line!
191 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
192 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
193 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
194 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
195 rnp->gp_tasks = &t->rcu_node_entry;
196 #ifdef CONFIG_RCU_BOOST
197 if (rnp->boost_tasks != NULL)
198 rnp->boost_tasks = rnp->gp_tasks;
199 #endif /* #ifdef CONFIG_RCU_BOOST */
200 } else {
201 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
202 if (rnp->qsmask & rdp->grpmask)
203 rnp->gp_tasks = &t->rcu_node_entry;
205 trace_rcu_preempt_task(rdp->rsp->name,
206 t->pid,
207 (rnp->qsmask & rdp->grpmask)
208 ? rnp->gpnum
209 : rnp->gpnum + 1);
210 raw_spin_unlock_irqrestore(&rnp->lock, flags);
211 } else if (t->rcu_read_lock_nesting < 0 &&
212 t->rcu_read_unlock_special) {
215 * Complete exit from RCU read-side critical section on
216 * behalf of preempted instance of __rcu_read_unlock().
218 rcu_read_unlock_special(t);
222 * Either we were not in an RCU read-side critical section to
223 * begin with, or we have now recorded that critical section
224 * globally. Either way, we can now note a quiescent state
225 * for this CPU. Again, if we were in an RCU read-side critical
226 * section, and if that critical section was blocking the current
227 * grace period, then the fact that the task has been enqueued
228 * means that we continue to block the current grace period.
230 local_irq_save(flags);
231 rcu_preempt_qs(cpu);
232 local_irq_restore(flags);
236 * Tree-preemptible RCU implementation for rcu_read_lock().
237 * Just increment ->rcu_read_lock_nesting, shared state will be updated
238 * if we block.
240 void __rcu_read_lock(void)
242 current->rcu_read_lock_nesting++;
243 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
245 EXPORT_SYMBOL_GPL(__rcu_read_lock);
248 * Check for preempted RCU readers blocking the current grace period
249 * for the specified rcu_node structure. If the caller needs a reliable
250 * answer, it must hold the rcu_node's ->lock.
252 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
254 return rnp->gp_tasks != NULL;
258 * Record a quiescent state for all tasks that were previously queued
259 * on the specified rcu_node structure and that were blocking the current
260 * RCU grace period. The caller must hold the specified rnp->lock with
261 * irqs disabled, and this lock is released upon return, but irqs remain
262 * disabled.
264 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
265 __releases(rnp->lock)
267 unsigned long mask;
268 struct rcu_node *rnp_p;
270 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
271 raw_spin_unlock_irqrestore(&rnp->lock, flags);
272 return; /* Still need more quiescent states! */
275 rnp_p = rnp->parent;
276 if (rnp_p == NULL) {
278 * Either there is only one rcu_node in the tree,
279 * or tasks were kicked up to root rcu_node due to
280 * CPUs going offline.
282 rcu_report_qs_rsp(&rcu_preempt_state, flags);
283 return;
286 /* Report up the rest of the hierarchy. */
287 mask = rnp->grpmask;
288 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
289 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
290 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
294 * Advance a ->blkd_tasks-list pointer to the next entry, instead
295 * returning NULL if at the end of the list.
297 static struct list_head *rcu_next_node_entry(struct task_struct *t,
298 struct rcu_node *rnp)
300 struct list_head *np;
302 np = t->rcu_node_entry.next;
303 if (np == &rnp->blkd_tasks)
304 np = NULL;
305 return np;
309 * Handle special cases during rcu_read_unlock(), such as needing to
310 * notify RCU core processing or task having blocked during the RCU
311 * read-side critical section.
313 static noinline void rcu_read_unlock_special(struct task_struct *t)
315 int empty;
316 int empty_exp;
317 int empty_exp_now;
318 unsigned long flags;
319 struct list_head *np;
320 #ifdef CONFIG_RCU_BOOST
321 struct rt_mutex *rbmp = NULL;
322 #endif /* #ifdef CONFIG_RCU_BOOST */
323 struct rcu_node *rnp;
324 int special;
326 /* NMI handlers cannot block and cannot safely manipulate state. */
327 if (in_nmi())
328 return;
330 local_irq_save(flags);
333 * If RCU core is waiting for this CPU to exit critical section,
334 * let it know that we have done so.
336 special = t->rcu_read_unlock_special;
337 if (special & RCU_READ_UNLOCK_NEED_QS) {
338 rcu_preempt_qs(smp_processor_id());
341 /* Hardware IRQ handlers cannot block. */
342 if (in_irq() || in_serving_softirq()) {
343 local_irq_restore(flags);
344 return;
347 /* Clean up if blocked during RCU read-side critical section. */
348 if (special & RCU_READ_UNLOCK_BLOCKED) {
349 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
352 * Remove this task from the list it blocked on. The
353 * task can migrate while we acquire the lock, but at
354 * most one time. So at most two passes through loop.
356 for (;;) {
357 rnp = t->rcu_blocked_node;
358 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
359 if (rnp == t->rcu_blocked_node)
360 break;
361 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
363 empty = !rcu_preempt_blocked_readers_cgp(rnp);
364 empty_exp = !rcu_preempted_readers_exp(rnp);
365 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
366 np = rcu_next_node_entry(t, rnp);
367 list_del_init(&t->rcu_node_entry);
368 t->rcu_blocked_node = NULL;
369 trace_rcu_unlock_preempted_task("rcu_preempt",
370 rnp->gpnum, t->pid);
371 if (&t->rcu_node_entry == rnp->gp_tasks)
372 rnp->gp_tasks = np;
373 if (&t->rcu_node_entry == rnp->exp_tasks)
374 rnp->exp_tasks = np;
375 #ifdef CONFIG_RCU_BOOST
376 if (&t->rcu_node_entry == rnp->boost_tasks)
377 rnp->boost_tasks = np;
378 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
379 if (t->rcu_boost_mutex) {
380 rbmp = t->rcu_boost_mutex;
381 t->rcu_boost_mutex = NULL;
383 #endif /* #ifdef CONFIG_RCU_BOOST */
386 * If this was the last task on the current list, and if
387 * we aren't waiting on any CPUs, report the quiescent state.
388 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
389 * so we must take a snapshot of the expedited state.
391 empty_exp_now = !rcu_preempted_readers_exp(rnp);
392 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
393 trace_rcu_quiescent_state_report("preempt_rcu",
394 rnp->gpnum,
395 0, rnp->qsmask,
396 rnp->level,
397 rnp->grplo,
398 rnp->grphi,
399 !!rnp->gp_tasks);
400 rcu_report_unblock_qs_rnp(rnp, flags);
401 } else
402 raw_spin_unlock_irqrestore(&rnp->lock, flags);
404 #ifdef CONFIG_RCU_BOOST
405 /* Unboost if we were boosted. */
406 if (rbmp)
407 rt_mutex_unlock(rbmp);
408 #endif /* #ifdef CONFIG_RCU_BOOST */
411 * If this was the last task on the expedited lists,
412 * then we need to report up the rcu_node hierarchy.
414 if (!empty_exp && empty_exp_now)
415 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
416 } else {
417 local_irq_restore(flags);
422 * Tree-preemptible RCU implementation for rcu_read_unlock().
423 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
424 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
425 * invoke rcu_read_unlock_special() to clean up after a context switch
426 * in an RCU read-side critical section and other special cases.
428 void __rcu_read_unlock(void)
430 struct task_struct *t = current;
432 if (t->rcu_read_lock_nesting != 1)
433 --t->rcu_read_lock_nesting;
434 else {
435 barrier(); /* critical section before exit code. */
436 t->rcu_read_lock_nesting = INT_MIN;
437 barrier(); /* assign before ->rcu_read_unlock_special load */
438 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
439 rcu_read_unlock_special(t);
440 barrier(); /* ->rcu_read_unlock_special load before assign */
441 t->rcu_read_lock_nesting = 0;
443 #ifdef CONFIG_PROVE_LOCKING
445 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
447 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
449 #endif /* #ifdef CONFIG_PROVE_LOCKING */
451 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
453 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
456 * Dump detailed information for all tasks blocking the current RCU
457 * grace period on the specified rcu_node structure.
459 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
461 unsigned long flags;
462 struct task_struct *t;
464 if (!rcu_preempt_blocked_readers_cgp(rnp))
465 return;
466 raw_spin_lock_irqsave(&rnp->lock, flags);
467 t = list_entry(rnp->gp_tasks,
468 struct task_struct, rcu_node_entry);
469 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
470 sched_show_task(t);
471 raw_spin_unlock_irqrestore(&rnp->lock, flags);
475 * Dump detailed information for all tasks blocking the current RCU
476 * grace period.
478 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
480 struct rcu_node *rnp = rcu_get_root(rsp);
482 rcu_print_detail_task_stall_rnp(rnp);
483 rcu_for_each_leaf_node(rsp, rnp)
484 rcu_print_detail_task_stall_rnp(rnp);
487 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
489 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
493 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
495 #ifdef CONFIG_RCU_CPU_STALL_INFO
497 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
499 printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
500 rnp->level, rnp->grplo, rnp->grphi);
503 static void rcu_print_task_stall_end(void)
505 printk(KERN_CONT "\n");
508 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
510 static void rcu_print_task_stall_begin(struct rcu_node *rnp)
514 static void rcu_print_task_stall_end(void)
518 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
521 * Scan the current list of tasks blocked within RCU read-side critical
522 * sections, printing out the tid of each.
524 static int rcu_print_task_stall(struct rcu_node *rnp)
526 struct task_struct *t;
527 int ndetected = 0;
529 if (!rcu_preempt_blocked_readers_cgp(rnp))
530 return 0;
531 rcu_print_task_stall_begin(rnp);
532 t = list_entry(rnp->gp_tasks,
533 struct task_struct, rcu_node_entry);
534 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
535 printk(KERN_CONT " P%d", t->pid);
536 ndetected++;
538 rcu_print_task_stall_end();
539 return ndetected;
543 * Suppress preemptible RCU's CPU stall warnings by pushing the
544 * time of the next stall-warning message comfortably far into the
545 * future.
547 static void rcu_preempt_stall_reset(void)
549 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
553 * Check that the list of blocked tasks for the newly completed grace
554 * period is in fact empty. It is a serious bug to complete a grace
555 * period that still has RCU readers blocked! This function must be
556 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
557 * must be held by the caller.
559 * Also, if there are blocked tasks on the list, they automatically
560 * block the newly created grace period, so set up ->gp_tasks accordingly.
562 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
564 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
565 if (!list_empty(&rnp->blkd_tasks))
566 rnp->gp_tasks = rnp->blkd_tasks.next;
567 WARN_ON_ONCE(rnp->qsmask);
570 #ifdef CONFIG_HOTPLUG_CPU
573 * Handle tasklist migration for case in which all CPUs covered by the
574 * specified rcu_node have gone offline. Move them up to the root
575 * rcu_node. The reason for not just moving them to the immediate
576 * parent is to remove the need for rcu_read_unlock_special() to
577 * make more than two attempts to acquire the target rcu_node's lock.
578 * Returns true if there were tasks blocking the current RCU grace
579 * period.
581 * Returns 1 if there was previously a task blocking the current grace
582 * period on the specified rcu_node structure.
584 * The caller must hold rnp->lock with irqs disabled.
586 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
587 struct rcu_node *rnp,
588 struct rcu_data *rdp)
590 struct list_head *lp;
591 struct list_head *lp_root;
592 int retval = 0;
593 struct rcu_node *rnp_root = rcu_get_root(rsp);
594 struct task_struct *t;
596 if (rnp == rnp_root) {
597 WARN_ONCE(1, "Last CPU thought to be offlined?");
598 return 0; /* Shouldn't happen: at least one CPU online. */
601 /* If we are on an internal node, complain bitterly. */
602 WARN_ON_ONCE(rnp != rdp->mynode);
605 * Move tasks up to root rcu_node. Don't try to get fancy for
606 * this corner-case operation -- just put this node's tasks
607 * at the head of the root node's list, and update the root node's
608 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
609 * if non-NULL. This might result in waiting for more tasks than
610 * absolutely necessary, but this is a good performance/complexity
611 * tradeoff.
613 if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
614 retval |= RCU_OFL_TASKS_NORM_GP;
615 if (rcu_preempted_readers_exp(rnp))
616 retval |= RCU_OFL_TASKS_EXP_GP;
617 lp = &rnp->blkd_tasks;
618 lp_root = &rnp_root->blkd_tasks;
619 while (!list_empty(lp)) {
620 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
621 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
622 list_del(&t->rcu_node_entry);
623 t->rcu_blocked_node = rnp_root;
624 list_add(&t->rcu_node_entry, lp_root);
625 if (&t->rcu_node_entry == rnp->gp_tasks)
626 rnp_root->gp_tasks = rnp->gp_tasks;
627 if (&t->rcu_node_entry == rnp->exp_tasks)
628 rnp_root->exp_tasks = rnp->exp_tasks;
629 #ifdef CONFIG_RCU_BOOST
630 if (&t->rcu_node_entry == rnp->boost_tasks)
631 rnp_root->boost_tasks = rnp->boost_tasks;
632 #endif /* #ifdef CONFIG_RCU_BOOST */
633 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
636 #ifdef CONFIG_RCU_BOOST
637 /* In case root is being boosted and leaf is not. */
638 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
639 if (rnp_root->boost_tasks != NULL &&
640 rnp_root->boost_tasks != rnp_root->gp_tasks)
641 rnp_root->boost_tasks = rnp_root->gp_tasks;
642 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
643 #endif /* #ifdef CONFIG_RCU_BOOST */
645 rnp->gp_tasks = NULL;
646 rnp->exp_tasks = NULL;
647 return retval;
650 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
653 * Do CPU-offline processing for preemptible RCU.
655 static void rcu_preempt_cleanup_dead_cpu(int cpu)
657 rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
661 * Check for a quiescent state from the current CPU. When a task blocks,
662 * the task is recorded in the corresponding CPU's rcu_node structure,
663 * which is checked elsewhere.
665 * Caller must disable hard irqs.
667 static void rcu_preempt_check_callbacks(int cpu)
669 struct task_struct *t = current;
671 if (t->rcu_read_lock_nesting == 0) {
672 rcu_preempt_qs(cpu);
673 return;
675 if (t->rcu_read_lock_nesting > 0 &&
676 per_cpu(rcu_preempt_data, cpu).qs_pending)
677 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
681 * Process callbacks for preemptible RCU.
683 static void rcu_preempt_process_callbacks(void)
685 __rcu_process_callbacks(&rcu_preempt_state,
686 &__get_cpu_var(rcu_preempt_data));
689 #ifdef CONFIG_RCU_BOOST
691 static void rcu_preempt_do_callbacks(void)
693 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
696 #endif /* #ifdef CONFIG_RCU_BOOST */
699 * Queue a preemptible-RCU callback for invocation after a grace period.
701 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
703 __call_rcu(head, func, &rcu_preempt_state, 0);
705 EXPORT_SYMBOL_GPL(call_rcu);
708 * Queue an RCU callback for lazy invocation after a grace period.
709 * This will likely be later named something like "call_rcu_lazy()",
710 * but this change will require some way of tagging the lazy RCU
711 * callbacks in the list of pending callbacks. Until then, this
712 * function may only be called from __kfree_rcu().
714 void kfree_call_rcu(struct rcu_head *head,
715 void (*func)(struct rcu_head *rcu))
717 __call_rcu(head, func, &rcu_preempt_state, 1);
719 EXPORT_SYMBOL_GPL(kfree_call_rcu);
722 * synchronize_rcu - wait until a grace period has elapsed.
724 * Control will return to the caller some time after a full grace
725 * period has elapsed, in other words after all currently executing RCU
726 * read-side critical sections have completed. Note, however, that
727 * upon return from synchronize_rcu(), the caller might well be executing
728 * concurrently with new RCU read-side critical sections that began while
729 * synchronize_rcu() was waiting. RCU read-side critical sections are
730 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
732 void synchronize_rcu(void)
734 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
735 !lock_is_held(&rcu_lock_map) &&
736 !lock_is_held(&rcu_sched_lock_map),
737 "Illegal synchronize_rcu() in RCU read-side critical section");
738 if (!rcu_scheduler_active)
739 return;
740 wait_rcu_gp(call_rcu);
742 EXPORT_SYMBOL_GPL(synchronize_rcu);
744 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
745 static long sync_rcu_preempt_exp_count;
746 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
749 * Return non-zero if there are any tasks in RCU read-side critical
750 * sections blocking the current preemptible-RCU expedited grace period.
751 * If there is no preemptible-RCU expedited grace period currently in
752 * progress, returns zero unconditionally.
754 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
756 return rnp->exp_tasks != NULL;
760 * return non-zero if there is no RCU expedited grace period in progress
761 * for the specified rcu_node structure, in other words, if all CPUs and
762 * tasks covered by the specified rcu_node structure have done their bit
763 * for the current expedited grace period. Works only for preemptible
764 * RCU -- other RCU implementation use other means.
766 * Caller must hold sync_rcu_preempt_exp_mutex.
768 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
770 return !rcu_preempted_readers_exp(rnp) &&
771 ACCESS_ONCE(rnp->expmask) == 0;
775 * Report the exit from RCU read-side critical section for the last task
776 * that queued itself during or before the current expedited preemptible-RCU
777 * grace period. This event is reported either to the rcu_node structure on
778 * which the task was queued or to one of that rcu_node structure's ancestors,
779 * recursively up the tree. (Calm down, calm down, we do the recursion
780 * iteratively!)
782 * Most callers will set the "wake" flag, but the task initiating the
783 * expedited grace period need not wake itself.
785 * Caller must hold sync_rcu_preempt_exp_mutex.
787 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
788 bool wake)
790 unsigned long flags;
791 unsigned long mask;
793 raw_spin_lock_irqsave(&rnp->lock, flags);
794 for (;;) {
795 if (!sync_rcu_preempt_exp_done(rnp)) {
796 raw_spin_unlock_irqrestore(&rnp->lock, flags);
797 break;
799 if (rnp->parent == NULL) {
800 raw_spin_unlock_irqrestore(&rnp->lock, flags);
801 if (wake)
802 wake_up(&sync_rcu_preempt_exp_wq);
803 break;
805 mask = rnp->grpmask;
806 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
807 rnp = rnp->parent;
808 raw_spin_lock(&rnp->lock); /* irqs already disabled */
809 rnp->expmask &= ~mask;
814 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
815 * grace period for the specified rcu_node structure. If there are no such
816 * tasks, report it up the rcu_node hierarchy.
818 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
820 static void
821 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
823 unsigned long flags;
824 int must_wait = 0;
826 raw_spin_lock_irqsave(&rnp->lock, flags);
827 if (list_empty(&rnp->blkd_tasks))
828 raw_spin_unlock_irqrestore(&rnp->lock, flags);
829 else {
830 rnp->exp_tasks = rnp->blkd_tasks.next;
831 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
832 must_wait = 1;
834 if (!must_wait)
835 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
839 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
840 * is to invoke synchronize_sched_expedited() to push all the tasks to
841 * the ->blkd_tasks lists and wait for this list to drain.
843 void synchronize_rcu_expedited(void)
845 unsigned long flags;
846 struct rcu_node *rnp;
847 struct rcu_state *rsp = &rcu_preempt_state;
848 long snap;
849 int trycount = 0;
851 smp_mb(); /* Caller's modifications seen first by other CPUs. */
852 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
853 smp_mb(); /* Above access cannot bleed into critical section. */
856 * Acquire lock, falling back to synchronize_rcu() if too many
857 * lock-acquisition failures. Of course, if someone does the
858 * expedited grace period for us, just leave.
860 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
861 if (trycount++ < 10)
862 udelay(trycount * num_online_cpus());
863 else {
864 synchronize_rcu();
865 return;
867 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
868 goto mb_ret; /* Others did our work for us. */
870 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
871 goto unlock_mb_ret; /* Others did our work for us. */
873 /* force all RCU readers onto ->blkd_tasks lists. */
874 synchronize_sched_expedited();
876 raw_spin_lock_irqsave(&rsp->onofflock, flags);
878 /* Initialize ->expmask for all non-leaf rcu_node structures. */
879 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
880 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
881 rnp->expmask = rnp->qsmaskinit;
882 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
885 /* Snapshot current state of ->blkd_tasks lists. */
886 rcu_for_each_leaf_node(rsp, rnp)
887 sync_rcu_preempt_exp_init(rsp, rnp);
888 if (NUM_RCU_NODES > 1)
889 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
891 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
893 /* Wait for snapshotted ->blkd_tasks lists to drain. */
894 rnp = rcu_get_root(rsp);
895 wait_event(sync_rcu_preempt_exp_wq,
896 sync_rcu_preempt_exp_done(rnp));
898 /* Clean up and exit. */
899 smp_mb(); /* ensure expedited GP seen before counter increment. */
900 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
901 unlock_mb_ret:
902 mutex_unlock(&sync_rcu_preempt_exp_mutex);
903 mb_ret:
904 smp_mb(); /* ensure subsequent action seen after grace period. */
906 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
909 * Check to see if there is any immediate preemptible-RCU-related work
910 * to be done.
912 static int rcu_preempt_pending(int cpu)
914 return __rcu_pending(&rcu_preempt_state,
915 &per_cpu(rcu_preempt_data, cpu));
919 * Does preemptible RCU have callbacks on this CPU?
921 static int rcu_preempt_cpu_has_callbacks(int cpu)
923 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
927 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
929 void rcu_barrier(void)
931 _rcu_barrier(&rcu_preempt_state, call_rcu);
933 EXPORT_SYMBOL_GPL(rcu_barrier);
936 * Initialize preemptible RCU's per-CPU data.
938 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
940 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
944 * Move preemptible RCU's callbacks from dying CPU to other online CPU
945 * and record a quiescent state.
947 static void rcu_preempt_cleanup_dying_cpu(void)
949 rcu_cleanup_dying_cpu(&rcu_preempt_state);
953 * Initialize preemptible RCU's state structures.
955 static void __init __rcu_init_preempt(void)
957 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
961 * Check for a task exiting while in a preemptible-RCU read-side
962 * critical section, clean up if so. No need to issue warnings,
963 * as debug_check_no_locks_held() already does this if lockdep
964 * is enabled.
966 void exit_rcu(void)
968 struct task_struct *t = current;
970 if (t->rcu_read_lock_nesting == 0)
971 return;
972 t->rcu_read_lock_nesting = 1;
973 __rcu_read_unlock();
976 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
978 static struct rcu_state *rcu_state = &rcu_sched_state;
981 * Tell them what RCU they are running.
983 static void __init rcu_bootup_announce(void)
985 printk(KERN_INFO "Hierarchical RCU implementation.\n");
986 rcu_bootup_announce_oddness();
990 * Return the number of RCU batches processed thus far for debug & stats.
992 long rcu_batches_completed(void)
994 return rcu_batches_completed_sched();
996 EXPORT_SYMBOL_GPL(rcu_batches_completed);
999 * Force a quiescent state for RCU, which, because there is no preemptible
1000 * RCU, becomes the same as rcu-sched.
1002 void rcu_force_quiescent_state(void)
1004 rcu_sched_force_quiescent_state();
1006 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
1009 * Because preemptible RCU does not exist, we never have to check for
1010 * CPUs being in quiescent states.
1012 static void rcu_preempt_note_context_switch(int cpu)
1017 * Because preemptible RCU does not exist, there are never any preempted
1018 * RCU readers.
1020 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
1022 return 0;
1025 #ifdef CONFIG_HOTPLUG_CPU
1027 /* Because preemptible RCU does not exist, no quieting of tasks. */
1028 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
1030 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1033 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1036 * Because preemptible RCU does not exist, we never have to check for
1037 * tasks blocked within RCU read-side critical sections.
1039 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
1044 * Because preemptible RCU does not exist, we never have to check for
1045 * tasks blocked within RCU read-side critical sections.
1047 static int rcu_print_task_stall(struct rcu_node *rnp)
1049 return 0;
1053 * Because preemptible RCU does not exist, there is no need to suppress
1054 * its CPU stall warnings.
1056 static void rcu_preempt_stall_reset(void)
1061 * Because there is no preemptible RCU, there can be no readers blocked,
1062 * so there is no need to check for blocked tasks. So check only for
1063 * bogus qsmask values.
1065 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1067 WARN_ON_ONCE(rnp->qsmask);
1070 #ifdef CONFIG_HOTPLUG_CPU
1073 * Because preemptible RCU does not exist, it never needs to migrate
1074 * tasks that were blocked within RCU read-side critical sections, and
1075 * such non-existent tasks cannot possibly have been blocking the current
1076 * grace period.
1078 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1079 struct rcu_node *rnp,
1080 struct rcu_data *rdp)
1082 return 0;
1085 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1088 * Because preemptible RCU does not exist, it never needs CPU-offline
1089 * processing.
1091 static void rcu_preempt_cleanup_dead_cpu(int cpu)
1096 * Because preemptible RCU does not exist, it never has any callbacks
1097 * to check.
1099 static void rcu_preempt_check_callbacks(int cpu)
1104 * Because preemptible RCU does not exist, it never has any callbacks
1105 * to process.
1107 static void rcu_preempt_process_callbacks(void)
1112 * Queue an RCU callback for lazy invocation after a grace period.
1113 * This will likely be later named something like "call_rcu_lazy()",
1114 * but this change will require some way of tagging the lazy RCU
1115 * callbacks in the list of pending callbacks. Until then, this
1116 * function may only be called from __kfree_rcu().
1118 * Because there is no preemptible RCU, we use RCU-sched instead.
1120 void kfree_call_rcu(struct rcu_head *head,
1121 void (*func)(struct rcu_head *rcu))
1123 __call_rcu(head, func, &rcu_sched_state, 1);
1125 EXPORT_SYMBOL_GPL(kfree_call_rcu);
1128 * Wait for an rcu-preempt grace period, but make it happen quickly.
1129 * But because preemptible RCU does not exist, map to rcu-sched.
1131 void synchronize_rcu_expedited(void)
1133 synchronize_sched_expedited();
1135 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1137 #ifdef CONFIG_HOTPLUG_CPU
1140 * Because preemptible RCU does not exist, there is never any need to
1141 * report on tasks preempted in RCU read-side critical sections during
1142 * expedited RCU grace periods.
1144 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1145 bool wake)
1149 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1152 * Because preemptible RCU does not exist, it never has any work to do.
1154 static int rcu_preempt_pending(int cpu)
1156 return 0;
1160 * Because preemptible RCU does not exist, it never has callbacks
1162 static int rcu_preempt_cpu_has_callbacks(int cpu)
1164 return 0;
1168 * Because preemptible RCU does not exist, rcu_barrier() is just
1169 * another name for rcu_barrier_sched().
1171 void rcu_barrier(void)
1173 rcu_barrier_sched();
1175 EXPORT_SYMBOL_GPL(rcu_barrier);
1178 * Because preemptible RCU does not exist, there is no per-CPU
1179 * data to initialize.
1181 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1186 * Because there is no preemptible RCU, there is no cleanup to do.
1188 static void rcu_preempt_cleanup_dying_cpu(void)
1193 * Because preemptible RCU does not exist, it need not be initialized.
1195 static void __init __rcu_init_preempt(void)
1199 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1201 #ifdef CONFIG_RCU_BOOST
1203 #include "rtmutex_common.h"
1205 #ifdef CONFIG_RCU_TRACE
1207 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1209 if (list_empty(&rnp->blkd_tasks))
1210 rnp->n_balk_blkd_tasks++;
1211 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1212 rnp->n_balk_exp_gp_tasks++;
1213 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1214 rnp->n_balk_boost_tasks++;
1215 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1216 rnp->n_balk_notblocked++;
1217 else if (rnp->gp_tasks != NULL &&
1218 ULONG_CMP_LT(jiffies, rnp->boost_time))
1219 rnp->n_balk_notyet++;
1220 else
1221 rnp->n_balk_nos++;
1224 #else /* #ifdef CONFIG_RCU_TRACE */
1226 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1230 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1233 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1234 * or ->boost_tasks, advancing the pointer to the next task in the
1235 * ->blkd_tasks list.
1237 * Note that irqs must be enabled: boosting the task can block.
1238 * Returns 1 if there are more tasks needing to be boosted.
1240 static int rcu_boost(struct rcu_node *rnp)
1242 unsigned long flags;
1243 struct rt_mutex mtx;
1244 struct task_struct *t;
1245 struct list_head *tb;
1247 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1248 return 0; /* Nothing left to boost. */
1250 raw_spin_lock_irqsave(&rnp->lock, flags);
1253 * Recheck under the lock: all tasks in need of boosting
1254 * might exit their RCU read-side critical sections on their own.
1256 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1257 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1258 return 0;
1262 * Preferentially boost tasks blocking expedited grace periods.
1263 * This cannot starve the normal grace periods because a second
1264 * expedited grace period must boost all blocked tasks, including
1265 * those blocking the pre-existing normal grace period.
1267 if (rnp->exp_tasks != NULL) {
1268 tb = rnp->exp_tasks;
1269 rnp->n_exp_boosts++;
1270 } else {
1271 tb = rnp->boost_tasks;
1272 rnp->n_normal_boosts++;
1274 rnp->n_tasks_boosted++;
1277 * We boost task t by manufacturing an rt_mutex that appears to
1278 * be held by task t. We leave a pointer to that rt_mutex where
1279 * task t can find it, and task t will release the mutex when it
1280 * exits its outermost RCU read-side critical section. Then
1281 * simply acquiring this artificial rt_mutex will boost task
1282 * t's priority. (Thanks to tglx for suggesting this approach!)
1284 * Note that task t must acquire rnp->lock to remove itself from
1285 * the ->blkd_tasks list, which it will do from exit() if from
1286 * nowhere else. We therefore are guaranteed that task t will
1287 * stay around at least until we drop rnp->lock. Note that
1288 * rnp->lock also resolves races between our priority boosting
1289 * and task t's exiting its outermost RCU read-side critical
1290 * section.
1292 t = container_of(tb, struct task_struct, rcu_node_entry);
1293 rt_mutex_init_proxy_locked(&mtx, t);
1294 t->rcu_boost_mutex = &mtx;
1295 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1296 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1297 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1299 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1300 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1304 * Timer handler to initiate waking up of boost kthreads that
1305 * have yielded the CPU due to excessive numbers of tasks to
1306 * boost. We wake up the per-rcu_node kthread, which in turn
1307 * will wake up the booster kthread.
1309 static void rcu_boost_kthread_timer(unsigned long arg)
1311 invoke_rcu_node_kthread((struct rcu_node *)arg);
1315 * Priority-boosting kthread. One per leaf rcu_node and one for the
1316 * root rcu_node.
1318 static int rcu_boost_kthread(void *arg)
1320 struct rcu_node *rnp = (struct rcu_node *)arg;
1321 int spincnt = 0;
1322 int more2boost;
1324 trace_rcu_utilization("Start boost kthread@init");
1325 for (;;) {
1326 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1327 trace_rcu_utilization("End boost kthread@rcu_wait");
1328 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1329 trace_rcu_utilization("Start boost kthread@rcu_wait");
1330 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1331 more2boost = rcu_boost(rnp);
1332 if (more2boost)
1333 spincnt++;
1334 else
1335 spincnt = 0;
1336 if (spincnt > 10) {
1337 trace_rcu_utilization("End boost kthread@rcu_yield");
1338 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1339 trace_rcu_utilization("Start boost kthread@rcu_yield");
1340 spincnt = 0;
1343 /* NOTREACHED */
1344 trace_rcu_utilization("End boost kthread@notreached");
1345 return 0;
1349 * Check to see if it is time to start boosting RCU readers that are
1350 * blocking the current grace period, and, if so, tell the per-rcu_node
1351 * kthread to start boosting them. If there is an expedited grace
1352 * period in progress, it is always time to boost.
1354 * The caller must hold rnp->lock, which this function releases,
1355 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1356 * so we don't need to worry about it going away.
1358 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1360 struct task_struct *t;
1362 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1363 rnp->n_balk_exp_gp_tasks++;
1364 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1365 return;
1367 if (rnp->exp_tasks != NULL ||
1368 (rnp->gp_tasks != NULL &&
1369 rnp->boost_tasks == NULL &&
1370 rnp->qsmask == 0 &&
1371 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1372 if (rnp->exp_tasks == NULL)
1373 rnp->boost_tasks = rnp->gp_tasks;
1374 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1375 t = rnp->boost_kthread_task;
1376 if (t != NULL)
1377 wake_up_process(t);
1378 } else {
1379 rcu_initiate_boost_trace(rnp);
1380 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1385 * Wake up the per-CPU kthread to invoke RCU callbacks.
1387 static void invoke_rcu_callbacks_kthread(void)
1389 unsigned long flags;
1391 local_irq_save(flags);
1392 __this_cpu_write(rcu_cpu_has_work, 1);
1393 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1394 current != __this_cpu_read(rcu_cpu_kthread_task))
1395 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1396 local_irq_restore(flags);
1400 * Is the current CPU running the RCU-callbacks kthread?
1401 * Caller must have preemption disabled.
1403 static bool rcu_is_callbacks_kthread(void)
1405 return __get_cpu_var(rcu_cpu_kthread_task) == current;
1409 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1410 * held, so no one should be messing with the existence of the boost
1411 * kthread.
1413 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1414 cpumask_var_t cm)
1416 struct task_struct *t;
1418 t = rnp->boost_kthread_task;
1419 if (t != NULL)
1420 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1423 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1426 * Do priority-boost accounting for the start of a new grace period.
1428 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1430 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1434 * Create an RCU-boost kthread for the specified node if one does not
1435 * already exist. We only create this kthread for preemptible RCU.
1436 * Returns zero if all is well, a negated errno otherwise.
1438 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1439 struct rcu_node *rnp,
1440 int rnp_index)
1442 unsigned long flags;
1443 struct sched_param sp;
1444 struct task_struct *t;
1446 if (&rcu_preempt_state != rsp)
1447 return 0;
1448 rsp->boost = 1;
1449 if (rnp->boost_kthread_task != NULL)
1450 return 0;
1451 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1452 "rcub/%d", rnp_index);
1453 if (IS_ERR(t))
1454 return PTR_ERR(t);
1455 raw_spin_lock_irqsave(&rnp->lock, flags);
1456 rnp->boost_kthread_task = t;
1457 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1458 sp.sched_priority = RCU_BOOST_PRIO;
1459 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1460 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1461 return 0;
1464 #ifdef CONFIG_HOTPLUG_CPU
1467 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1469 static void rcu_stop_cpu_kthread(int cpu)
1471 struct task_struct *t;
1473 /* Stop the CPU's kthread. */
1474 t = per_cpu(rcu_cpu_kthread_task, cpu);
1475 if (t != NULL) {
1476 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1477 kthread_stop(t);
1481 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1483 static void rcu_kthread_do_work(void)
1485 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1486 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1487 rcu_preempt_do_callbacks();
1491 * Wake up the specified per-rcu_node-structure kthread.
1492 * Because the per-rcu_node kthreads are immortal, we don't need
1493 * to do anything to keep them alive.
1495 static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1497 struct task_struct *t;
1499 t = rnp->node_kthread_task;
1500 if (t != NULL)
1501 wake_up_process(t);
1505 * Set the specified CPU's kthread to run RT or not, as specified by
1506 * the to_rt argument. The CPU-hotplug locks are held, so the task
1507 * is not going away.
1509 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1511 int policy;
1512 struct sched_param sp;
1513 struct task_struct *t;
1515 t = per_cpu(rcu_cpu_kthread_task, cpu);
1516 if (t == NULL)
1517 return;
1518 if (to_rt) {
1519 policy = SCHED_FIFO;
1520 sp.sched_priority = RCU_KTHREAD_PRIO;
1521 } else {
1522 policy = SCHED_NORMAL;
1523 sp.sched_priority = 0;
1525 sched_setscheduler_nocheck(t, policy, &sp);
1529 * Timer handler to initiate the waking up of per-CPU kthreads that
1530 * have yielded the CPU due to excess numbers of RCU callbacks.
1531 * We wake up the per-rcu_node kthread, which in turn will wake up
1532 * the booster kthread.
1534 static void rcu_cpu_kthread_timer(unsigned long arg)
1536 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1537 struct rcu_node *rnp = rdp->mynode;
1539 atomic_or(rdp->grpmask, &rnp->wakemask);
1540 invoke_rcu_node_kthread(rnp);
1544 * Drop to non-real-time priority and yield, but only after posting a
1545 * timer that will cause us to regain our real-time priority if we
1546 * remain preempted. Either way, we restore our real-time priority
1547 * before returning.
1549 static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1551 struct sched_param sp;
1552 struct timer_list yield_timer;
1553 int prio = current->rt_priority;
1555 setup_timer_on_stack(&yield_timer, f, arg);
1556 mod_timer(&yield_timer, jiffies + 2);
1557 sp.sched_priority = 0;
1558 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1559 set_user_nice(current, 19);
1560 schedule();
1561 set_user_nice(current, 0);
1562 sp.sched_priority = prio;
1563 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1564 del_timer(&yield_timer);
1568 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1569 * This can happen while the corresponding CPU is either coming online
1570 * or going offline. We cannot wait until the CPU is fully online
1571 * before starting the kthread, because the various notifier functions
1572 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1573 * the corresponding CPU is online.
1575 * Return 1 if the kthread needs to stop, 0 otherwise.
1577 * Caller must disable bh. This function can momentarily enable it.
1579 static int rcu_cpu_kthread_should_stop(int cpu)
1581 while (cpu_is_offline(cpu) ||
1582 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1583 smp_processor_id() != cpu) {
1584 if (kthread_should_stop())
1585 return 1;
1586 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1587 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1588 local_bh_enable();
1589 schedule_timeout_uninterruptible(1);
1590 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1591 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1592 local_bh_disable();
1594 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1595 return 0;
1599 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1600 * RCU softirq used in flavors and configurations of RCU that do not
1601 * support RCU priority boosting.
1603 static int rcu_cpu_kthread(void *arg)
1605 int cpu = (int)(long)arg;
1606 unsigned long flags;
1607 int spincnt = 0;
1608 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1609 char work;
1610 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1612 trace_rcu_utilization("Start CPU kthread@init");
1613 for (;;) {
1614 *statusp = RCU_KTHREAD_WAITING;
1615 trace_rcu_utilization("End CPU kthread@rcu_wait");
1616 rcu_wait(*workp != 0 || kthread_should_stop());
1617 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1618 local_bh_disable();
1619 if (rcu_cpu_kthread_should_stop(cpu)) {
1620 local_bh_enable();
1621 break;
1623 *statusp = RCU_KTHREAD_RUNNING;
1624 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1625 local_irq_save(flags);
1626 work = *workp;
1627 *workp = 0;
1628 local_irq_restore(flags);
1629 if (work)
1630 rcu_kthread_do_work();
1631 local_bh_enable();
1632 if (*workp != 0)
1633 spincnt++;
1634 else
1635 spincnt = 0;
1636 if (spincnt > 10) {
1637 *statusp = RCU_KTHREAD_YIELDING;
1638 trace_rcu_utilization("End CPU kthread@rcu_yield");
1639 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1640 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1641 spincnt = 0;
1644 *statusp = RCU_KTHREAD_STOPPED;
1645 trace_rcu_utilization("End CPU kthread@term");
1646 return 0;
1650 * Spawn a per-CPU kthread, setting up affinity and priority.
1651 * Because the CPU hotplug lock is held, no other CPU will be attempting
1652 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1653 * attempting to access it during boot, but the locking in kthread_bind()
1654 * will enforce sufficient ordering.
1656 * Please note that we cannot simply refuse to wake up the per-CPU
1657 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1658 * which can result in softlockup complaints if the task ends up being
1659 * idle for more than a couple of minutes.
1661 * However, please note also that we cannot bind the per-CPU kthread to its
1662 * CPU until that CPU is fully online. We also cannot wait until the
1663 * CPU is fully online before we create its per-CPU kthread, as this would
1664 * deadlock the system when CPU notifiers tried waiting for grace
1665 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1666 * is online. If its CPU is not yet fully online, then the code in
1667 * rcu_cpu_kthread() will wait until it is fully online, and then do
1668 * the binding.
1670 static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1672 struct sched_param sp;
1673 struct task_struct *t;
1675 if (!rcu_scheduler_fully_active ||
1676 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1677 return 0;
1678 t = kthread_create_on_node(rcu_cpu_kthread,
1679 (void *)(long)cpu,
1680 cpu_to_node(cpu),
1681 "rcuc/%d", cpu);
1682 if (IS_ERR(t))
1683 return PTR_ERR(t);
1684 if (cpu_online(cpu))
1685 kthread_bind(t, cpu);
1686 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1687 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1688 sp.sched_priority = RCU_KTHREAD_PRIO;
1689 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1690 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1691 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1692 return 0;
1696 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1697 * kthreads when needed. We ignore requests to wake up kthreads
1698 * for offline CPUs, which is OK because force_quiescent_state()
1699 * takes care of this case.
1701 static int rcu_node_kthread(void *arg)
1703 int cpu;
1704 unsigned long flags;
1705 unsigned long mask;
1706 struct rcu_node *rnp = (struct rcu_node *)arg;
1707 struct sched_param sp;
1708 struct task_struct *t;
1710 for (;;) {
1711 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1712 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1713 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1714 raw_spin_lock_irqsave(&rnp->lock, flags);
1715 mask = atomic_xchg(&rnp->wakemask, 0);
1716 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1717 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1718 if ((mask & 0x1) == 0)
1719 continue;
1720 preempt_disable();
1721 t = per_cpu(rcu_cpu_kthread_task, cpu);
1722 if (!cpu_online(cpu) || t == NULL) {
1723 preempt_enable();
1724 continue;
1726 per_cpu(rcu_cpu_has_work, cpu) = 1;
1727 sp.sched_priority = RCU_KTHREAD_PRIO;
1728 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1729 preempt_enable();
1732 /* NOTREACHED */
1733 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1734 return 0;
1738 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1739 * served by the rcu_node in question. The CPU hotplug lock is still
1740 * held, so the value of rnp->qsmaskinit will be stable.
1742 * We don't include outgoingcpu in the affinity set, use -1 if there is
1743 * no outgoing CPU. If there are no CPUs left in the affinity set,
1744 * this function allows the kthread to execute on any CPU.
1746 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1748 cpumask_var_t cm;
1749 int cpu;
1750 unsigned long mask = rnp->qsmaskinit;
1752 if (rnp->node_kthread_task == NULL)
1753 return;
1754 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1755 return;
1756 cpumask_clear(cm);
1757 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1758 if ((mask & 0x1) && cpu != outgoingcpu)
1759 cpumask_set_cpu(cpu, cm);
1760 if (cpumask_weight(cm) == 0) {
1761 cpumask_setall(cm);
1762 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1763 cpumask_clear_cpu(cpu, cm);
1764 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1766 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1767 rcu_boost_kthread_setaffinity(rnp, cm);
1768 free_cpumask_var(cm);
1772 * Spawn a per-rcu_node kthread, setting priority and affinity.
1773 * Called during boot before online/offline can happen, or, if
1774 * during runtime, with the main CPU-hotplug locks held. So only
1775 * one of these can be executing at a time.
1777 static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1778 struct rcu_node *rnp)
1780 unsigned long flags;
1781 int rnp_index = rnp - &rsp->node[0];
1782 struct sched_param sp;
1783 struct task_struct *t;
1785 if (!rcu_scheduler_fully_active ||
1786 rnp->qsmaskinit == 0)
1787 return 0;
1788 if (rnp->node_kthread_task == NULL) {
1789 t = kthread_create(rcu_node_kthread, (void *)rnp,
1790 "rcun/%d", rnp_index);
1791 if (IS_ERR(t))
1792 return PTR_ERR(t);
1793 raw_spin_lock_irqsave(&rnp->lock, flags);
1794 rnp->node_kthread_task = t;
1795 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1796 sp.sched_priority = 99;
1797 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1798 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1800 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1804 * Spawn all kthreads -- called as soon as the scheduler is running.
1806 static int __init rcu_spawn_kthreads(void)
1808 int cpu;
1809 struct rcu_node *rnp;
1811 rcu_scheduler_fully_active = 1;
1812 for_each_possible_cpu(cpu) {
1813 per_cpu(rcu_cpu_has_work, cpu) = 0;
1814 if (cpu_online(cpu))
1815 (void)rcu_spawn_one_cpu_kthread(cpu);
1817 rnp = rcu_get_root(rcu_state);
1818 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1819 if (NUM_RCU_NODES > 1) {
1820 rcu_for_each_leaf_node(rcu_state, rnp)
1821 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1823 return 0;
1825 early_initcall(rcu_spawn_kthreads);
1827 static void __cpuinit rcu_prepare_kthreads(int cpu)
1829 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1830 struct rcu_node *rnp = rdp->mynode;
1832 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1833 if (rcu_scheduler_fully_active) {
1834 (void)rcu_spawn_one_cpu_kthread(cpu);
1835 if (rnp->node_kthread_task == NULL)
1836 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1840 #else /* #ifdef CONFIG_RCU_BOOST */
1842 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1844 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1847 static void invoke_rcu_callbacks_kthread(void)
1849 WARN_ON_ONCE(1);
1852 static bool rcu_is_callbacks_kthread(void)
1854 return false;
1857 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1861 #ifdef CONFIG_HOTPLUG_CPU
1863 static void rcu_stop_cpu_kthread(int cpu)
1867 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1869 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1873 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1877 static int __init rcu_scheduler_really_started(void)
1879 rcu_scheduler_fully_active = 1;
1880 return 0;
1882 early_initcall(rcu_scheduler_really_started);
1884 static void __cpuinit rcu_prepare_kthreads(int cpu)
1888 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1890 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1893 * Check to see if any future RCU-related work will need to be done
1894 * by the current CPU, even if none need be done immediately, returning
1895 * 1 if so. This function is part of the RCU implementation; it is -not-
1896 * an exported member of the RCU API.
1898 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1899 * any flavor of RCU.
1901 int rcu_needs_cpu(int cpu)
1903 return rcu_cpu_has_callbacks(cpu);
1907 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1909 static void rcu_prepare_for_idle_init(int cpu)
1914 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1915 * after it.
1917 static void rcu_cleanup_after_idle(int cpu)
1922 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1923 * is nothing.
1925 static void rcu_prepare_for_idle(int cpu)
1929 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1932 * This code is invoked when a CPU goes idle, at which point we want
1933 * to have the CPU do everything required for RCU so that it can enter
1934 * the energy-efficient dyntick-idle mode. This is handled by a
1935 * state machine implemented by rcu_prepare_for_idle() below.
1937 * The following three proprocessor symbols control this state machine:
1939 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1940 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1941 * scheduling-clock interrupt than to loop through the state machine
1942 * at full power.
1943 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1944 * optional if RCU does not need anything immediately from this
1945 * CPU, even if this CPU still has RCU callbacks queued. The first
1946 * times through the state machine are mandatory: we need to give
1947 * the state machine a chance to communicate a quiescent state
1948 * to the RCU core.
1949 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1950 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1951 * is sized to be roughly one RCU grace period. Those energy-efficiency
1952 * benchmarkers who might otherwise be tempted to set this to a large
1953 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1954 * system. And if you are -that- concerned about energy efficiency,
1955 * just power the system down and be done with it!
1956 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1957 * permitted to sleep in dyntick-idle mode with only lazy RCU
1958 * callbacks pending. Setting this too high can OOM your system.
1960 * The values below work well in practice. If future workloads require
1961 * adjustment, they can be converted into kernel config parameters, though
1962 * making the state machine smarter might be a better option.
1964 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1965 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
1966 #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
1967 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1969 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1970 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1971 static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
1972 static ktime_t rcu_idle_gp_wait; /* If some non-lazy callbacks. */
1973 static ktime_t rcu_idle_lazy_gp_wait; /* If only lazy callbacks. */
1976 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1977 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1978 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1979 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1980 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1981 * it is better to incur scheduling-clock interrupts than to spin
1982 * continuously for the same time duration!
1984 int rcu_needs_cpu(int cpu)
1986 /* If no callbacks, RCU doesn't need the CPU. */
1987 if (!rcu_cpu_has_callbacks(cpu))
1988 return 0;
1989 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
1990 return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
1994 * Does the specified flavor of RCU have non-lazy callbacks pending on
1995 * the specified CPU? Both RCU flavor and CPU are specified by the
1996 * rcu_data structure.
1998 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
2000 return rdp->qlen != rdp->qlen_lazy;
2003 #ifdef CONFIG_TREE_PREEMPT_RCU
2006 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
2007 * is no RCU-preempt in the kernel.)
2009 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2011 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
2013 return __rcu_cpu_has_nonlazy_callbacks(rdp);
2016 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2018 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
2020 return 0;
2023 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
2026 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
2028 static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
2030 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
2031 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
2032 rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
2036 * Timer handler used to force CPU to start pushing its remaining RCU
2037 * callbacks in the case where it entered dyntick-idle mode with callbacks
2038 * pending. The hander doesn't really need to do anything because the
2039 * real work is done upon re-entry to idle, or by the next scheduling-clock
2040 * interrupt should idle not be re-entered.
2042 static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
2044 trace_rcu_prep_idle("Timer");
2045 return HRTIMER_NORESTART;
2049 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
2051 static void rcu_prepare_for_idle_init(int cpu)
2053 static int firsttime = 1;
2054 struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2056 hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2057 hrtp->function = rcu_idle_gp_timer_func;
2058 if (firsttime) {
2059 unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
2061 rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
2062 upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY);
2063 rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000);
2064 firsttime = 0;
2069 * Clean up for exit from idle. Because we are exiting from idle, there
2070 * is no longer any point to rcu_idle_gp_timer, so cancel it. This will
2071 * do nothing if this timer is not active, so just cancel it unconditionally.
2073 static void rcu_cleanup_after_idle(int cpu)
2075 hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
2079 * Check to see if any RCU-related work can be done by the current CPU,
2080 * and if so, schedule a softirq to get it done. This function is part
2081 * of the RCU implementation; it is -not- an exported member of the RCU API.
2083 * The idea is for the current CPU to clear out all work required by the
2084 * RCU core for the current grace period, so that this CPU can be permitted
2085 * to enter dyntick-idle mode. In some cases, it will need to be awakened
2086 * at the end of the grace period by whatever CPU ends the grace period.
2087 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
2088 * number of wakeups by a modest integer factor.
2090 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2091 * disabled, we do one pass of force_quiescent_state(), then do a
2092 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2093 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
2095 * The caller must have disabled interrupts.
2097 static void rcu_prepare_for_idle(int cpu)
2100 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2101 * Also reset state to avoid prejudicing later attempts.
2103 if (!rcu_cpu_has_callbacks(cpu)) {
2104 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2105 per_cpu(rcu_dyntick_drain, cpu) = 0;
2106 trace_rcu_prep_idle("No callbacks");
2107 return;
2111 * If in holdoff mode, just return. We will presumably have
2112 * refrained from disabling the scheduling-clock tick.
2114 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
2115 trace_rcu_prep_idle("In holdoff");
2116 return;
2119 /* Check and update the rcu_dyntick_drain sequencing. */
2120 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2121 /* First time through, initialize the counter. */
2122 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
2123 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
2124 !rcu_pending(cpu)) {
2125 /* Can we go dyntick-idle despite still having callbacks? */
2126 trace_rcu_prep_idle("Dyntick with callbacks");
2127 per_cpu(rcu_dyntick_drain, cpu) = 0;
2128 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2129 if (rcu_cpu_has_nonlazy_callbacks(cpu))
2130 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2131 rcu_idle_gp_wait, HRTIMER_MODE_REL);
2132 else
2133 hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
2134 rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL);
2135 return; /* Nothing more to do immediately. */
2136 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2137 /* We have hit the limit, so time to give up. */
2138 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2139 trace_rcu_prep_idle("Begin holdoff");
2140 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2141 return;
2145 * Do one step of pushing the remaining RCU callbacks through
2146 * the RCU core state machine.
2148 #ifdef CONFIG_TREE_PREEMPT_RCU
2149 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2150 rcu_preempt_qs(cpu);
2151 force_quiescent_state(&rcu_preempt_state, 0);
2153 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2154 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2155 rcu_sched_qs(cpu);
2156 force_quiescent_state(&rcu_sched_state, 0);
2158 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2159 rcu_bh_qs(cpu);
2160 force_quiescent_state(&rcu_bh_state, 0);
2164 * If RCU callbacks are still pending, RCU still needs this CPU.
2165 * So try forcing the callbacks through the grace period.
2167 if (rcu_cpu_has_callbacks(cpu)) {
2168 trace_rcu_prep_idle("More callbacks");
2169 invoke_rcu_core();
2170 } else
2171 trace_rcu_prep_idle("Callbacks drained");
2174 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
2176 #ifdef CONFIG_RCU_CPU_STALL_INFO
2178 #ifdef CONFIG_RCU_FAST_NO_HZ
2180 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2182 struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
2184 sprintf(cp, "drain=%d %c timer=%lld",
2185 per_cpu(rcu_dyntick_drain, cpu),
2186 per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
2187 hrtimer_active(hrtp)
2188 ? ktime_to_us(hrtimer_get_remaining(hrtp))
2189 : -1);
2192 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2194 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2198 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2200 /* Initiate the stall-info list. */
2201 static void print_cpu_stall_info_begin(void)
2203 printk(KERN_CONT "\n");
2207 * Print out diagnostic information for the specified stalled CPU.
2209 * If the specified CPU is aware of the current RCU grace period
2210 * (flavor specified by rsp), then print the number of scheduling
2211 * clock interrupts the CPU has taken during the time that it has
2212 * been aware. Otherwise, print the number of RCU grace periods
2213 * that this CPU is ignorant of, for example, "1" if the CPU was
2214 * aware of the previous grace period.
2216 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2218 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2220 char fast_no_hz[72];
2221 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2222 struct rcu_dynticks *rdtp = rdp->dynticks;
2223 char *ticks_title;
2224 unsigned long ticks_value;
2226 if (rsp->gpnum == rdp->gpnum) {
2227 ticks_title = "ticks this GP";
2228 ticks_value = rdp->ticks_this_gp;
2229 } else {
2230 ticks_title = "GPs behind";
2231 ticks_value = rsp->gpnum - rdp->gpnum;
2233 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
2234 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2235 cpu, ticks_value, ticks_title,
2236 atomic_read(&rdtp->dynticks) & 0xfff,
2237 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
2238 fast_no_hz);
2241 /* Terminate the stall-info list. */
2242 static void print_cpu_stall_info_end(void)
2244 printk(KERN_ERR "\t");
2247 /* Zero ->ticks_this_gp for all flavors of RCU. */
2248 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2250 rdp->ticks_this_gp = 0;
2253 /* Increment ->ticks_this_gp for all flavors of RCU. */
2254 static void increment_cpu_stall_ticks(void)
2256 __get_cpu_var(rcu_sched_data).ticks_this_gp++;
2257 __get_cpu_var(rcu_bh_data).ticks_this_gp++;
2258 #ifdef CONFIG_TREE_PREEMPT_RCU
2259 __get_cpu_var(rcu_preempt_data).ticks_this_gp++;
2260 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2263 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2265 static void print_cpu_stall_info_begin(void)
2267 printk(KERN_CONT " {");
2270 static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
2272 printk(KERN_CONT " %d", cpu);
2275 static void print_cpu_stall_info_end(void)
2277 printk(KERN_CONT "} ");
2280 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
2284 static void increment_cpu_stall_ticks(void)
2288 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */