2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
28 #include <linux/oom.h>
29 #include <linux/smpboot.h>
31 #define RCU_KTHREAD_PRIO 1
33 #ifdef CONFIG_RCU_BOOST
34 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
36 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
40 * Check the RCU kernel configuration parameters and print informative
41 * messages about anything out of the ordinary. If you like #ifdef, you
42 * will love this function.
44 static void __init
rcu_bootup_announce_oddness(void)
46 #ifdef CONFIG_RCU_TRACE
47 printk(KERN_INFO
"\tRCU debugfs-based tracing is enabled.\n");
49 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
50 printk(KERN_INFO
"\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
53 #ifdef CONFIG_RCU_FANOUT_EXACT
54 printk(KERN_INFO
"\tHierarchical RCU autobalancing is disabled.\n");
56 #ifdef CONFIG_RCU_FAST_NO_HZ
58 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
60 #ifdef CONFIG_PROVE_RCU
61 printk(KERN_INFO
"\tRCU lockdep checking is enabled.\n");
63 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
64 printk(KERN_INFO
"\tRCU torture testing starts during boot.\n");
66 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
67 printk(KERN_INFO
"\tDump stacks of tasks blocking RCU-preempt GP.\n");
69 #if defined(CONFIG_RCU_CPU_STALL_INFO)
70 printk(KERN_INFO
"\tAdditional per-CPU info printed with stalls.\n");
72 #if NUM_RCU_LVL_4 != 0
73 printk(KERN_INFO
"\tFour-level hierarchy is enabled.\n");
75 if (rcu_fanout_leaf
!= CONFIG_RCU_FANOUT_LEAF
)
76 printk(KERN_INFO
"\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf
);
77 if (nr_cpu_ids
!= NR_CPUS
)
78 printk(KERN_INFO
"\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS
, nr_cpu_ids
);
81 #ifdef CONFIG_TREE_PREEMPT_RCU
83 struct rcu_state rcu_preempt_state
=
84 RCU_STATE_INITIALIZER(rcu_preempt
, call_rcu
);
85 DEFINE_PER_CPU(struct rcu_data
, rcu_preempt_data
);
86 static struct rcu_state
*rcu_state
= &rcu_preempt_state
;
88 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
);
91 * Tell them what RCU they are running.
93 static void __init
rcu_bootup_announce(void)
95 printk(KERN_INFO
"Preemptible hierarchical RCU implementation.\n");
96 rcu_bootup_announce_oddness();
100 * Return the number of RCU-preempt batches processed thus far
101 * for debug and statistics.
103 long rcu_batches_completed_preempt(void)
105 return rcu_preempt_state
.completed
;
107 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt
);
110 * Return the number of RCU batches processed thus far for debug & stats.
112 long rcu_batches_completed(void)
114 return rcu_batches_completed_preempt();
116 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
119 * Force a quiescent state for preemptible RCU.
121 void rcu_force_quiescent_state(void)
123 force_quiescent_state(&rcu_preempt_state
);
125 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
128 * Record a preemptible-RCU quiescent state for the specified CPU. Note
129 * that this just means that the task currently running on the CPU is
130 * not in a quiescent state. There might be any number of tasks blocked
131 * while in an RCU read-side critical section.
133 * Unlike the other rcu_*_qs() functions, callers to this function
134 * must disable irqs in order to protect the assignment to
135 * ->rcu_read_unlock_special.
137 static void rcu_preempt_qs(int cpu
)
139 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
141 if (rdp
->passed_quiesce
== 0)
142 trace_rcu_grace_period("rcu_preempt", rdp
->gpnum
, "cpuqs");
143 rdp
->passed_quiesce
= 1;
144 current
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_NEED_QS
;
148 * We have entered the scheduler, and the current task might soon be
149 * context-switched away from. If this task is in an RCU read-side
150 * critical section, we will no longer be able to rely on the CPU to
151 * record that fact, so we enqueue the task on the blkd_tasks list.
152 * The task will dequeue itself when it exits the outermost enclosing
153 * RCU read-side critical section. Therefore, the current grace period
154 * cannot be permitted to complete until the blkd_tasks list entries
155 * predating the current grace period drain, in other words, until
156 * rnp->gp_tasks becomes NULL.
158 * Caller must disable preemption.
160 static void rcu_preempt_note_context_switch(int cpu
)
162 struct task_struct
*t
= current
;
164 struct rcu_data
*rdp
;
165 struct rcu_node
*rnp
;
167 if (t
->rcu_read_lock_nesting
> 0 &&
168 (t
->rcu_read_unlock_special
& RCU_READ_UNLOCK_BLOCKED
) == 0) {
170 /* Possibly blocking in an RCU read-side critical section. */
171 rdp
= per_cpu_ptr(rcu_preempt_state
.rda
, cpu
);
173 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
174 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_BLOCKED
;
175 t
->rcu_blocked_node
= rnp
;
178 * If this CPU has already checked in, then this task
179 * will hold up the next grace period rather than the
180 * current grace period. Queue the task accordingly.
181 * If the task is queued for the current grace period
182 * (i.e., this CPU has not yet passed through a quiescent
183 * state for the current grace period), then as long
184 * as that task remains queued, the current grace period
185 * cannot end. Note that there is some uncertainty as
186 * to exactly when the current grace period started.
187 * We take a conservative approach, which can result
188 * in unnecessarily waiting on tasks that started very
189 * slightly after the current grace period began. C'est
192 * But first, note that the current CPU must still be
195 WARN_ON_ONCE((rdp
->grpmask
& rnp
->qsmaskinit
) == 0);
196 WARN_ON_ONCE(!list_empty(&t
->rcu_node_entry
));
197 if ((rnp
->qsmask
& rdp
->grpmask
) && rnp
->gp_tasks
!= NULL
) {
198 list_add(&t
->rcu_node_entry
, rnp
->gp_tasks
->prev
);
199 rnp
->gp_tasks
= &t
->rcu_node_entry
;
200 #ifdef CONFIG_RCU_BOOST
201 if (rnp
->boost_tasks
!= NULL
)
202 rnp
->boost_tasks
= rnp
->gp_tasks
;
203 #endif /* #ifdef CONFIG_RCU_BOOST */
205 list_add(&t
->rcu_node_entry
, &rnp
->blkd_tasks
);
206 if (rnp
->qsmask
& rdp
->grpmask
)
207 rnp
->gp_tasks
= &t
->rcu_node_entry
;
209 trace_rcu_preempt_task(rdp
->rsp
->name
,
211 (rnp
->qsmask
& rdp
->grpmask
)
214 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
215 } else if (t
->rcu_read_lock_nesting
< 0 &&
216 t
->rcu_read_unlock_special
) {
219 * Complete exit from RCU read-side critical section on
220 * behalf of preempted instance of __rcu_read_unlock().
222 rcu_read_unlock_special(t
);
226 * Either we were not in an RCU read-side critical section to
227 * begin with, or we have now recorded that critical section
228 * globally. Either way, we can now note a quiescent state
229 * for this CPU. Again, if we were in an RCU read-side critical
230 * section, and if that critical section was blocking the current
231 * grace period, then the fact that the task has been enqueued
232 * means that we continue to block the current grace period.
234 local_irq_save(flags
);
236 local_irq_restore(flags
);
240 * Check for preempted RCU readers blocking the current grace period
241 * for the specified rcu_node structure. If the caller needs a reliable
242 * answer, it must hold the rcu_node's ->lock.
244 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
246 return rnp
->gp_tasks
!= NULL
;
250 * Record a quiescent state for all tasks that were previously queued
251 * on the specified rcu_node structure and that were blocking the current
252 * RCU grace period. The caller must hold the specified rnp->lock with
253 * irqs disabled, and this lock is released upon return, but irqs remain
256 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
257 __releases(rnp
->lock
)
260 struct rcu_node
*rnp_p
;
262 if (rnp
->qsmask
!= 0 || rcu_preempt_blocked_readers_cgp(rnp
)) {
263 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
264 return; /* Still need more quiescent states! */
270 * Either there is only one rcu_node in the tree,
271 * or tasks were kicked up to root rcu_node due to
272 * CPUs going offline.
274 rcu_report_qs_rsp(&rcu_preempt_state
, flags
);
278 /* Report up the rest of the hierarchy. */
280 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
281 raw_spin_lock(&rnp_p
->lock
); /* irqs already disabled. */
282 rcu_report_qs_rnp(mask
, &rcu_preempt_state
, rnp_p
, flags
);
286 * Advance a ->blkd_tasks-list pointer to the next entry, instead
287 * returning NULL if at the end of the list.
289 static struct list_head
*rcu_next_node_entry(struct task_struct
*t
,
290 struct rcu_node
*rnp
)
292 struct list_head
*np
;
294 np
= t
->rcu_node_entry
.next
;
295 if (np
== &rnp
->blkd_tasks
)
301 * Handle special cases during rcu_read_unlock(), such as needing to
302 * notify RCU core processing or task having blocked during the RCU
303 * read-side critical section.
305 void rcu_read_unlock_special(struct task_struct
*t
)
311 struct list_head
*np
;
312 #ifdef CONFIG_RCU_BOOST
313 struct rt_mutex
*rbmp
= NULL
;
314 #endif /* #ifdef CONFIG_RCU_BOOST */
315 struct rcu_node
*rnp
;
318 /* NMI handlers cannot block and cannot safely manipulate state. */
322 local_irq_save(flags
);
325 * If RCU core is waiting for this CPU to exit critical section,
326 * let it know that we have done so.
328 special
= t
->rcu_read_unlock_special
;
329 if (special
& RCU_READ_UNLOCK_NEED_QS
) {
330 rcu_preempt_qs(smp_processor_id());
333 /* Hardware IRQ handlers cannot block. */
334 if (in_irq() || in_serving_softirq()) {
335 local_irq_restore(flags
);
339 /* Clean up if blocked during RCU read-side critical section. */
340 if (special
& RCU_READ_UNLOCK_BLOCKED
) {
341 t
->rcu_read_unlock_special
&= ~RCU_READ_UNLOCK_BLOCKED
;
344 * Remove this task from the list it blocked on. The
345 * task can migrate while we acquire the lock, but at
346 * most one time. So at most two passes through loop.
349 rnp
= t
->rcu_blocked_node
;
350 raw_spin_lock(&rnp
->lock
); /* irqs already disabled. */
351 if (rnp
== t
->rcu_blocked_node
)
353 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled. */
355 empty
= !rcu_preempt_blocked_readers_cgp(rnp
);
356 empty_exp
= !rcu_preempted_readers_exp(rnp
);
357 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
358 np
= rcu_next_node_entry(t
, rnp
);
359 list_del_init(&t
->rcu_node_entry
);
360 t
->rcu_blocked_node
= NULL
;
361 trace_rcu_unlock_preempted_task("rcu_preempt",
363 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
365 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
367 #ifdef CONFIG_RCU_BOOST
368 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
369 rnp
->boost_tasks
= np
;
370 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
371 if (t
->rcu_boost_mutex
) {
372 rbmp
= t
->rcu_boost_mutex
;
373 t
->rcu_boost_mutex
= NULL
;
375 #endif /* #ifdef CONFIG_RCU_BOOST */
378 * If this was the last task on the current list, and if
379 * we aren't waiting on any CPUs, report the quiescent state.
380 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
381 * so we must take a snapshot of the expedited state.
383 empty_exp_now
= !rcu_preempted_readers_exp(rnp
);
384 if (!empty
&& !rcu_preempt_blocked_readers_cgp(rnp
)) {
385 trace_rcu_quiescent_state_report("preempt_rcu",
392 rcu_report_unblock_qs_rnp(rnp
, flags
);
394 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
397 #ifdef CONFIG_RCU_BOOST
398 /* Unboost if we were boosted. */
400 rt_mutex_unlock(rbmp
);
401 #endif /* #ifdef CONFIG_RCU_BOOST */
404 * If this was the last task on the expedited lists,
405 * then we need to report up the rcu_node hierarchy.
407 if (!empty_exp
&& empty_exp_now
)
408 rcu_report_exp_rnp(&rcu_preempt_state
, rnp
, true);
410 local_irq_restore(flags
);
414 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
417 * Dump detailed information for all tasks blocking the current RCU
418 * grace period on the specified rcu_node structure.
420 static void rcu_print_detail_task_stall_rnp(struct rcu_node
*rnp
)
423 struct task_struct
*t
;
425 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
426 if (!rcu_preempt_blocked_readers_cgp(rnp
)) {
427 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
430 t
= list_entry(rnp
->gp_tasks
,
431 struct task_struct
, rcu_node_entry
);
432 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
)
434 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
438 * Dump detailed information for all tasks blocking the current RCU
441 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
443 struct rcu_node
*rnp
= rcu_get_root(rsp
);
445 rcu_print_detail_task_stall_rnp(rnp
);
446 rcu_for_each_leaf_node(rsp
, rnp
)
447 rcu_print_detail_task_stall_rnp(rnp
);
450 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
452 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
456 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
458 #ifdef CONFIG_RCU_CPU_STALL_INFO
460 static void rcu_print_task_stall_begin(struct rcu_node
*rnp
)
462 printk(KERN_ERR
"\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
463 rnp
->level
, rnp
->grplo
, rnp
->grphi
);
466 static void rcu_print_task_stall_end(void)
468 printk(KERN_CONT
"\n");
471 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
473 static void rcu_print_task_stall_begin(struct rcu_node
*rnp
)
477 static void rcu_print_task_stall_end(void)
481 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
484 * Scan the current list of tasks blocked within RCU read-side critical
485 * sections, printing out the tid of each.
487 static int rcu_print_task_stall(struct rcu_node
*rnp
)
489 struct task_struct
*t
;
492 if (!rcu_preempt_blocked_readers_cgp(rnp
))
494 rcu_print_task_stall_begin(rnp
);
495 t
= list_entry(rnp
->gp_tasks
,
496 struct task_struct
, rcu_node_entry
);
497 list_for_each_entry_continue(t
, &rnp
->blkd_tasks
, rcu_node_entry
) {
498 printk(KERN_CONT
" P%d", t
->pid
);
501 rcu_print_task_stall_end();
506 * Check that the list of blocked tasks for the newly completed grace
507 * period is in fact empty. It is a serious bug to complete a grace
508 * period that still has RCU readers blocked! This function must be
509 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
510 * must be held by the caller.
512 * Also, if there are blocked tasks on the list, they automatically
513 * block the newly created grace period, so set up ->gp_tasks accordingly.
515 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
517 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp
));
518 if (!list_empty(&rnp
->blkd_tasks
))
519 rnp
->gp_tasks
= rnp
->blkd_tasks
.next
;
520 WARN_ON_ONCE(rnp
->qsmask
);
523 #ifdef CONFIG_HOTPLUG_CPU
526 * Handle tasklist migration for case in which all CPUs covered by the
527 * specified rcu_node have gone offline. Move them up to the root
528 * rcu_node. The reason for not just moving them to the immediate
529 * parent is to remove the need for rcu_read_unlock_special() to
530 * make more than two attempts to acquire the target rcu_node's lock.
531 * Returns true if there were tasks blocking the current RCU grace
534 * Returns 1 if there was previously a task blocking the current grace
535 * period on the specified rcu_node structure.
537 * The caller must hold rnp->lock with irqs disabled.
539 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
540 struct rcu_node
*rnp
,
541 struct rcu_data
*rdp
)
543 struct list_head
*lp
;
544 struct list_head
*lp_root
;
546 struct rcu_node
*rnp_root
= rcu_get_root(rsp
);
547 struct task_struct
*t
;
549 if (rnp
== rnp_root
) {
550 WARN_ONCE(1, "Last CPU thought to be offlined?");
551 return 0; /* Shouldn't happen: at least one CPU online. */
554 /* If we are on an internal node, complain bitterly. */
555 WARN_ON_ONCE(rnp
!= rdp
->mynode
);
558 * Move tasks up to root rcu_node. Don't try to get fancy for
559 * this corner-case operation -- just put this node's tasks
560 * at the head of the root node's list, and update the root node's
561 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
562 * if non-NULL. This might result in waiting for more tasks than
563 * absolutely necessary, but this is a good performance/complexity
566 if (rcu_preempt_blocked_readers_cgp(rnp
) && rnp
->qsmask
== 0)
567 retval
|= RCU_OFL_TASKS_NORM_GP
;
568 if (rcu_preempted_readers_exp(rnp
))
569 retval
|= RCU_OFL_TASKS_EXP_GP
;
570 lp
= &rnp
->blkd_tasks
;
571 lp_root
= &rnp_root
->blkd_tasks
;
572 while (!list_empty(lp
)) {
573 t
= list_entry(lp
->next
, typeof(*t
), rcu_node_entry
);
574 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
575 list_del(&t
->rcu_node_entry
);
576 t
->rcu_blocked_node
= rnp_root
;
577 list_add(&t
->rcu_node_entry
, lp_root
);
578 if (&t
->rcu_node_entry
== rnp
->gp_tasks
)
579 rnp_root
->gp_tasks
= rnp
->gp_tasks
;
580 if (&t
->rcu_node_entry
== rnp
->exp_tasks
)
581 rnp_root
->exp_tasks
= rnp
->exp_tasks
;
582 #ifdef CONFIG_RCU_BOOST
583 if (&t
->rcu_node_entry
== rnp
->boost_tasks
)
584 rnp_root
->boost_tasks
= rnp
->boost_tasks
;
585 #endif /* #ifdef CONFIG_RCU_BOOST */
586 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
589 rnp
->gp_tasks
= NULL
;
590 rnp
->exp_tasks
= NULL
;
591 #ifdef CONFIG_RCU_BOOST
592 rnp
->boost_tasks
= NULL
;
594 * In case root is being boosted and leaf was not. Make sure
595 * that we boost the tasks blocking the current grace period
598 raw_spin_lock(&rnp_root
->lock
); /* irqs already disabled */
599 if (rnp_root
->boost_tasks
!= NULL
&&
600 rnp_root
->boost_tasks
!= rnp_root
->gp_tasks
&&
601 rnp_root
->boost_tasks
!= rnp_root
->exp_tasks
)
602 rnp_root
->boost_tasks
= rnp_root
->gp_tasks
;
603 raw_spin_unlock(&rnp_root
->lock
); /* irqs still disabled */
604 #endif /* #ifdef CONFIG_RCU_BOOST */
609 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
612 * Check for a quiescent state from the current CPU. When a task blocks,
613 * the task is recorded in the corresponding CPU's rcu_node structure,
614 * which is checked elsewhere.
616 * Caller must disable hard irqs.
618 static void rcu_preempt_check_callbacks(int cpu
)
620 struct task_struct
*t
= current
;
622 if (t
->rcu_read_lock_nesting
== 0) {
626 if (t
->rcu_read_lock_nesting
> 0 &&
627 per_cpu(rcu_preempt_data
, cpu
).qs_pending
)
628 t
->rcu_read_unlock_special
|= RCU_READ_UNLOCK_NEED_QS
;
631 #ifdef CONFIG_RCU_BOOST
633 static void rcu_preempt_do_callbacks(void)
635 rcu_do_batch(&rcu_preempt_state
, &__get_cpu_var(rcu_preempt_data
));
638 #endif /* #ifdef CONFIG_RCU_BOOST */
641 * Queue a preemptible-RCU callback for invocation after a grace period.
643 void call_rcu(struct rcu_head
*head
, void (*func
)(struct rcu_head
*rcu
))
645 __call_rcu(head
, func
, &rcu_preempt_state
, 0);
647 EXPORT_SYMBOL_GPL(call_rcu
);
650 * Queue an RCU callback for lazy invocation after a grace period.
651 * This will likely be later named something like "call_rcu_lazy()",
652 * but this change will require some way of tagging the lazy RCU
653 * callbacks in the list of pending callbacks. Until then, this
654 * function may only be called from __kfree_rcu().
656 void kfree_call_rcu(struct rcu_head
*head
,
657 void (*func
)(struct rcu_head
*rcu
))
659 __call_rcu(head
, func
, &rcu_preempt_state
, 1);
661 EXPORT_SYMBOL_GPL(kfree_call_rcu
);
664 * synchronize_rcu - wait until a grace period has elapsed.
666 * Control will return to the caller some time after a full grace
667 * period has elapsed, in other words after all currently executing RCU
668 * read-side critical sections have completed. Note, however, that
669 * upon return from synchronize_rcu(), the caller might well be executing
670 * concurrently with new RCU read-side critical sections that began while
671 * synchronize_rcu() was waiting. RCU read-side critical sections are
672 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
674 void synchronize_rcu(void)
676 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map
) &&
677 !lock_is_held(&rcu_lock_map
) &&
678 !lock_is_held(&rcu_sched_lock_map
),
679 "Illegal synchronize_rcu() in RCU read-side critical section");
680 if (!rcu_scheduler_active
)
682 wait_rcu_gp(call_rcu
);
684 EXPORT_SYMBOL_GPL(synchronize_rcu
);
686 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq
);
687 static unsigned long sync_rcu_preempt_exp_count
;
688 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex
);
691 * Return non-zero if there are any tasks in RCU read-side critical
692 * sections blocking the current preemptible-RCU expedited grace period.
693 * If there is no preemptible-RCU expedited grace period currently in
694 * progress, returns zero unconditionally.
696 static int rcu_preempted_readers_exp(struct rcu_node
*rnp
)
698 return rnp
->exp_tasks
!= NULL
;
702 * return non-zero if there is no RCU expedited grace period in progress
703 * for the specified rcu_node structure, in other words, if all CPUs and
704 * tasks covered by the specified rcu_node structure have done their bit
705 * for the current expedited grace period. Works only for preemptible
706 * RCU -- other RCU implementation use other means.
708 * Caller must hold sync_rcu_preempt_exp_mutex.
710 static int sync_rcu_preempt_exp_done(struct rcu_node
*rnp
)
712 return !rcu_preempted_readers_exp(rnp
) &&
713 ACCESS_ONCE(rnp
->expmask
) == 0;
717 * Report the exit from RCU read-side critical section for the last task
718 * that queued itself during or before the current expedited preemptible-RCU
719 * grace period. This event is reported either to the rcu_node structure on
720 * which the task was queued or to one of that rcu_node structure's ancestors,
721 * recursively up the tree. (Calm down, calm down, we do the recursion
724 * Most callers will set the "wake" flag, but the task initiating the
725 * expedited grace period need not wake itself.
727 * Caller must hold sync_rcu_preempt_exp_mutex.
729 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
735 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
737 if (!sync_rcu_preempt_exp_done(rnp
)) {
738 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
741 if (rnp
->parent
== NULL
) {
742 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
744 wake_up(&sync_rcu_preempt_exp_wq
);
748 raw_spin_unlock(&rnp
->lock
); /* irqs remain disabled */
750 raw_spin_lock(&rnp
->lock
); /* irqs already disabled */
751 rnp
->expmask
&= ~mask
;
756 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
757 * grace period for the specified rcu_node structure. If there are no such
758 * tasks, report it up the rcu_node hierarchy.
760 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
763 sync_rcu_preempt_exp_init(struct rcu_state
*rsp
, struct rcu_node
*rnp
)
768 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
769 if (list_empty(&rnp
->blkd_tasks
)) {
770 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
772 rnp
->exp_tasks
= rnp
->blkd_tasks
.next
;
773 rcu_initiate_boost(rnp
, flags
); /* releases rnp->lock */
777 rcu_report_exp_rnp(rsp
, rnp
, false); /* Don't wake self. */
781 * synchronize_rcu_expedited - Brute-force RCU grace period
783 * Wait for an RCU-preempt grace period, but expedite it. The basic
784 * idea is to invoke synchronize_sched_expedited() to push all the tasks to
785 * the ->blkd_tasks lists and wait for this list to drain. This consumes
786 * significant time on all CPUs and is unfriendly to real-time workloads,
787 * so is thus not recommended for any sort of common-case code.
788 * In fact, if you are using synchronize_rcu_expedited() in a loop,
789 * please restructure your code to batch your updates, and then Use a
790 * single synchronize_rcu() instead.
792 * Note that it is illegal to call this function while holding any lock
793 * that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
794 * to call this function from a CPU-hotplug notifier. Failing to observe
795 * these restriction will result in deadlock.
797 void synchronize_rcu_expedited(void)
800 struct rcu_node
*rnp
;
801 struct rcu_state
*rsp
= &rcu_preempt_state
;
805 smp_mb(); /* Caller's modifications seen first by other CPUs. */
806 snap
= ACCESS_ONCE(sync_rcu_preempt_exp_count
) + 1;
807 smp_mb(); /* Above access cannot bleed into critical section. */
810 * Block CPU-hotplug operations. This means that any CPU-hotplug
811 * operation that finds an rcu_node structure with tasks in the
812 * process of being boosted will know that all tasks blocking
813 * this expedited grace period will already be in the process of
814 * being boosted. This simplifies the process of moving tasks
815 * from leaf to root rcu_node structures.
820 * Acquire lock, falling back to synchronize_rcu() if too many
821 * lock-acquisition failures. Of course, if someone does the
822 * expedited grace period for us, just leave.
824 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex
)) {
825 if (ULONG_CMP_LT(snap
,
826 ACCESS_ONCE(sync_rcu_preempt_exp_count
))) {
828 goto mb_ret
; /* Others did our work for us. */
830 if (trycount
++ < 10) {
831 udelay(trycount
* num_online_cpus());
838 if (ULONG_CMP_LT(snap
, ACCESS_ONCE(sync_rcu_preempt_exp_count
))) {
840 goto unlock_mb_ret
; /* Others did our work for us. */
843 /* force all RCU readers onto ->blkd_tasks lists. */
844 synchronize_sched_expedited();
846 /* Initialize ->expmask for all non-leaf rcu_node structures. */
847 rcu_for_each_nonleaf_node_breadth_first(rsp
, rnp
) {
848 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
849 rnp
->expmask
= rnp
->qsmaskinit
;
850 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
853 /* Snapshot current state of ->blkd_tasks lists. */
854 rcu_for_each_leaf_node(rsp
, rnp
)
855 sync_rcu_preempt_exp_init(rsp
, rnp
);
856 if (NUM_RCU_NODES
> 1)
857 sync_rcu_preempt_exp_init(rsp
, rcu_get_root(rsp
));
861 /* Wait for snapshotted ->blkd_tasks lists to drain. */
862 rnp
= rcu_get_root(rsp
);
863 wait_event(sync_rcu_preempt_exp_wq
,
864 sync_rcu_preempt_exp_done(rnp
));
866 /* Clean up and exit. */
867 smp_mb(); /* ensure expedited GP seen before counter increment. */
868 ACCESS_ONCE(sync_rcu_preempt_exp_count
)++;
870 mutex_unlock(&sync_rcu_preempt_exp_mutex
);
872 smp_mb(); /* ensure subsequent action seen after grace period. */
874 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
877 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
879 void rcu_barrier(void)
881 _rcu_barrier(&rcu_preempt_state
);
883 EXPORT_SYMBOL_GPL(rcu_barrier
);
886 * Initialize preemptible RCU's state structures.
888 static void __init
__rcu_init_preempt(void)
890 rcu_init_one(&rcu_preempt_state
, &rcu_preempt_data
);
893 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
895 static struct rcu_state
*rcu_state
= &rcu_sched_state
;
898 * Tell them what RCU they are running.
900 static void __init
rcu_bootup_announce(void)
902 printk(KERN_INFO
"Hierarchical RCU implementation.\n");
903 rcu_bootup_announce_oddness();
907 * Return the number of RCU batches processed thus far for debug & stats.
909 long rcu_batches_completed(void)
911 return rcu_batches_completed_sched();
913 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
916 * Force a quiescent state for RCU, which, because there is no preemptible
917 * RCU, becomes the same as rcu-sched.
919 void rcu_force_quiescent_state(void)
921 rcu_sched_force_quiescent_state();
923 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state
);
926 * Because preemptible RCU does not exist, we never have to check for
927 * CPUs being in quiescent states.
929 static void rcu_preempt_note_context_switch(int cpu
)
934 * Because preemptible RCU does not exist, there are never any preempted
937 static int rcu_preempt_blocked_readers_cgp(struct rcu_node
*rnp
)
942 #ifdef CONFIG_HOTPLUG_CPU
944 /* Because preemptible RCU does not exist, no quieting of tasks. */
945 static void rcu_report_unblock_qs_rnp(struct rcu_node
*rnp
, unsigned long flags
)
947 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
950 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
953 * Because preemptible RCU does not exist, we never have to check for
954 * tasks blocked within RCU read-side critical sections.
956 static void rcu_print_detail_task_stall(struct rcu_state
*rsp
)
961 * Because preemptible RCU does not exist, we never have to check for
962 * tasks blocked within RCU read-side critical sections.
964 static int rcu_print_task_stall(struct rcu_node
*rnp
)
970 * Because there is no preemptible RCU, there can be no readers blocked,
971 * so there is no need to check for blocked tasks. So check only for
972 * bogus qsmask values.
974 static void rcu_preempt_check_blocked_tasks(struct rcu_node
*rnp
)
976 WARN_ON_ONCE(rnp
->qsmask
);
979 #ifdef CONFIG_HOTPLUG_CPU
982 * Because preemptible RCU does not exist, it never needs to migrate
983 * tasks that were blocked within RCU read-side critical sections, and
984 * such non-existent tasks cannot possibly have been blocking the current
987 static int rcu_preempt_offline_tasks(struct rcu_state
*rsp
,
988 struct rcu_node
*rnp
,
989 struct rcu_data
*rdp
)
994 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
997 * Because preemptible RCU does not exist, it never has any callbacks
1000 static void rcu_preempt_check_callbacks(int cpu
)
1005 * Queue an RCU callback for lazy invocation after a grace period.
1006 * This will likely be later named something like "call_rcu_lazy()",
1007 * but this change will require some way of tagging the lazy RCU
1008 * callbacks in the list of pending callbacks. Until then, this
1009 * function may only be called from __kfree_rcu().
1011 * Because there is no preemptible RCU, we use RCU-sched instead.
1013 void kfree_call_rcu(struct rcu_head
*head
,
1014 void (*func
)(struct rcu_head
*rcu
))
1016 __call_rcu(head
, func
, &rcu_sched_state
, 1);
1018 EXPORT_SYMBOL_GPL(kfree_call_rcu
);
1021 * Wait for an rcu-preempt grace period, but make it happen quickly.
1022 * But because preemptible RCU does not exist, map to rcu-sched.
1024 void synchronize_rcu_expedited(void)
1026 synchronize_sched_expedited();
1028 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited
);
1030 #ifdef CONFIG_HOTPLUG_CPU
1033 * Because preemptible RCU does not exist, there is never any need to
1034 * report on tasks preempted in RCU read-side critical sections during
1035 * expedited RCU grace periods.
1037 static void rcu_report_exp_rnp(struct rcu_state
*rsp
, struct rcu_node
*rnp
,
1042 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1045 * Because preemptible RCU does not exist, rcu_barrier() is just
1046 * another name for rcu_barrier_sched().
1048 void rcu_barrier(void)
1050 rcu_barrier_sched();
1052 EXPORT_SYMBOL_GPL(rcu_barrier
);
1055 * Because preemptible RCU does not exist, it need not be initialized.
1057 static void __init
__rcu_init_preempt(void)
1061 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1063 #ifdef CONFIG_RCU_BOOST
1065 #include "rtmutex_common.h"
1067 #ifdef CONFIG_RCU_TRACE
1069 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1071 if (list_empty(&rnp
->blkd_tasks
))
1072 rnp
->n_balk_blkd_tasks
++;
1073 else if (rnp
->exp_tasks
== NULL
&& rnp
->gp_tasks
== NULL
)
1074 rnp
->n_balk_exp_gp_tasks
++;
1075 else if (rnp
->gp_tasks
!= NULL
&& rnp
->boost_tasks
!= NULL
)
1076 rnp
->n_balk_boost_tasks
++;
1077 else if (rnp
->gp_tasks
!= NULL
&& rnp
->qsmask
!= 0)
1078 rnp
->n_balk_notblocked
++;
1079 else if (rnp
->gp_tasks
!= NULL
&&
1080 ULONG_CMP_LT(jiffies
, rnp
->boost_time
))
1081 rnp
->n_balk_notyet
++;
1086 #else /* #ifdef CONFIG_RCU_TRACE */
1088 static void rcu_initiate_boost_trace(struct rcu_node
*rnp
)
1092 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1094 static void rcu_wake_cond(struct task_struct
*t
, int status
)
1097 * If the thread is yielding, only wake it when this
1098 * is invoked from idle
1100 if (status
!= RCU_KTHREAD_YIELDING
|| is_idle_task(current
))
1105 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1106 * or ->boost_tasks, advancing the pointer to the next task in the
1107 * ->blkd_tasks list.
1109 * Note that irqs must be enabled: boosting the task can block.
1110 * Returns 1 if there are more tasks needing to be boosted.
1112 static int rcu_boost(struct rcu_node
*rnp
)
1114 unsigned long flags
;
1115 struct rt_mutex mtx
;
1116 struct task_struct
*t
;
1117 struct list_head
*tb
;
1119 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
)
1120 return 0; /* Nothing left to boost. */
1122 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1125 * Recheck under the lock: all tasks in need of boosting
1126 * might exit their RCU read-side critical sections on their own.
1128 if (rnp
->exp_tasks
== NULL
&& rnp
->boost_tasks
== NULL
) {
1129 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1134 * Preferentially boost tasks blocking expedited grace periods.
1135 * This cannot starve the normal grace periods because a second
1136 * expedited grace period must boost all blocked tasks, including
1137 * those blocking the pre-existing normal grace period.
1139 if (rnp
->exp_tasks
!= NULL
) {
1140 tb
= rnp
->exp_tasks
;
1141 rnp
->n_exp_boosts
++;
1143 tb
= rnp
->boost_tasks
;
1144 rnp
->n_normal_boosts
++;
1146 rnp
->n_tasks_boosted
++;
1149 * We boost task t by manufacturing an rt_mutex that appears to
1150 * be held by task t. We leave a pointer to that rt_mutex where
1151 * task t can find it, and task t will release the mutex when it
1152 * exits its outermost RCU read-side critical section. Then
1153 * simply acquiring this artificial rt_mutex will boost task
1154 * t's priority. (Thanks to tglx for suggesting this approach!)
1156 * Note that task t must acquire rnp->lock to remove itself from
1157 * the ->blkd_tasks list, which it will do from exit() if from
1158 * nowhere else. We therefore are guaranteed that task t will
1159 * stay around at least until we drop rnp->lock. Note that
1160 * rnp->lock also resolves races between our priority boosting
1161 * and task t's exiting its outermost RCU read-side critical
1164 t
= container_of(tb
, struct task_struct
, rcu_node_entry
);
1165 rt_mutex_init_proxy_locked(&mtx
, t
);
1166 t
->rcu_boost_mutex
= &mtx
;
1167 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1168 rt_mutex_lock(&mtx
); /* Side effect: boosts task t's priority. */
1169 rt_mutex_unlock(&mtx
); /* Keep lockdep happy. */
1171 return ACCESS_ONCE(rnp
->exp_tasks
) != NULL
||
1172 ACCESS_ONCE(rnp
->boost_tasks
) != NULL
;
1176 * Priority-boosting kthread. One per leaf rcu_node and one for the
1179 static int rcu_boost_kthread(void *arg
)
1181 struct rcu_node
*rnp
= (struct rcu_node
*)arg
;
1185 trace_rcu_utilization("Start boost kthread@init");
1187 rnp
->boost_kthread_status
= RCU_KTHREAD_WAITING
;
1188 trace_rcu_utilization("End boost kthread@rcu_wait");
1189 rcu_wait(rnp
->boost_tasks
|| rnp
->exp_tasks
);
1190 trace_rcu_utilization("Start boost kthread@rcu_wait");
1191 rnp
->boost_kthread_status
= RCU_KTHREAD_RUNNING
;
1192 more2boost
= rcu_boost(rnp
);
1198 rnp
->boost_kthread_status
= RCU_KTHREAD_YIELDING
;
1199 trace_rcu_utilization("End boost kthread@rcu_yield");
1200 schedule_timeout_interruptible(2);
1201 trace_rcu_utilization("Start boost kthread@rcu_yield");
1206 trace_rcu_utilization("End boost kthread@notreached");
1211 * Check to see if it is time to start boosting RCU readers that are
1212 * blocking the current grace period, and, if so, tell the per-rcu_node
1213 * kthread to start boosting them. If there is an expedited grace
1214 * period in progress, it is always time to boost.
1216 * The caller must hold rnp->lock, which this function releases.
1217 * The ->boost_kthread_task is immortal, so we don't need to worry
1218 * about it going away.
1220 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1222 struct task_struct
*t
;
1224 if (!rcu_preempt_blocked_readers_cgp(rnp
) && rnp
->exp_tasks
== NULL
) {
1225 rnp
->n_balk_exp_gp_tasks
++;
1226 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1229 if (rnp
->exp_tasks
!= NULL
||
1230 (rnp
->gp_tasks
!= NULL
&&
1231 rnp
->boost_tasks
== NULL
&&
1233 ULONG_CMP_GE(jiffies
, rnp
->boost_time
))) {
1234 if (rnp
->exp_tasks
== NULL
)
1235 rnp
->boost_tasks
= rnp
->gp_tasks
;
1236 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1237 t
= rnp
->boost_kthread_task
;
1239 rcu_wake_cond(t
, rnp
->boost_kthread_status
);
1241 rcu_initiate_boost_trace(rnp
);
1242 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1247 * Wake up the per-CPU kthread to invoke RCU callbacks.
1249 static void invoke_rcu_callbacks_kthread(void)
1251 unsigned long flags
;
1253 local_irq_save(flags
);
1254 __this_cpu_write(rcu_cpu_has_work
, 1);
1255 if (__this_cpu_read(rcu_cpu_kthread_task
) != NULL
&&
1256 current
!= __this_cpu_read(rcu_cpu_kthread_task
)) {
1257 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task
),
1258 __this_cpu_read(rcu_cpu_kthread_status
));
1260 local_irq_restore(flags
);
1264 * Is the current CPU running the RCU-callbacks kthread?
1265 * Caller must have preemption disabled.
1267 static bool rcu_is_callbacks_kthread(void)
1269 return __get_cpu_var(rcu_cpu_kthread_task
) == current
;
1272 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1275 * Do priority-boost accounting for the start of a new grace period.
1277 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1279 rnp
->boost_time
= jiffies
+ RCU_BOOST_DELAY_JIFFIES
;
1283 * Create an RCU-boost kthread for the specified node if one does not
1284 * already exist. We only create this kthread for preemptible RCU.
1285 * Returns zero if all is well, a negated errno otherwise.
1287 static int __cpuinit
rcu_spawn_one_boost_kthread(struct rcu_state
*rsp
,
1288 struct rcu_node
*rnp
)
1290 int rnp_index
= rnp
- &rsp
->node
[0];
1291 unsigned long flags
;
1292 struct sched_param sp
;
1293 struct task_struct
*t
;
1295 if (&rcu_preempt_state
!= rsp
)
1298 if (!rcu_scheduler_fully_active
|| rnp
->qsmaskinit
== 0)
1302 if (rnp
->boost_kthread_task
!= NULL
)
1304 t
= kthread_create(rcu_boost_kthread
, (void *)rnp
,
1305 "rcub/%d", rnp_index
);
1308 raw_spin_lock_irqsave(&rnp
->lock
, flags
);
1309 rnp
->boost_kthread_task
= t
;
1310 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1311 sp
.sched_priority
= RCU_BOOST_PRIO
;
1312 sched_setscheduler_nocheck(t
, SCHED_FIFO
, &sp
);
1313 wake_up_process(t
); /* get to TASK_INTERRUPTIBLE quickly. */
1317 static void rcu_kthread_do_work(void)
1319 rcu_do_batch(&rcu_sched_state
, &__get_cpu_var(rcu_sched_data
));
1320 rcu_do_batch(&rcu_bh_state
, &__get_cpu_var(rcu_bh_data
));
1321 rcu_preempt_do_callbacks();
1324 static void rcu_cpu_kthread_setup(unsigned int cpu
)
1326 struct sched_param sp
;
1328 sp
.sched_priority
= RCU_KTHREAD_PRIO
;
1329 sched_setscheduler_nocheck(current
, SCHED_FIFO
, &sp
);
1332 static void rcu_cpu_kthread_park(unsigned int cpu
)
1334 per_cpu(rcu_cpu_kthread_status
, cpu
) = RCU_KTHREAD_OFFCPU
;
1337 static int rcu_cpu_kthread_should_run(unsigned int cpu
)
1339 return __get_cpu_var(rcu_cpu_has_work
);
1343 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1344 * RCU softirq used in flavors and configurations of RCU that do not
1345 * support RCU priority boosting.
1347 static void rcu_cpu_kthread(unsigned int cpu
)
1349 unsigned int *statusp
= &__get_cpu_var(rcu_cpu_kthread_status
);
1350 char work
, *workp
= &__get_cpu_var(rcu_cpu_has_work
);
1353 for (spincnt
= 0; spincnt
< 10; spincnt
++) {
1354 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1356 *statusp
= RCU_KTHREAD_RUNNING
;
1357 this_cpu_inc(rcu_cpu_kthread_loops
);
1358 local_irq_disable();
1363 rcu_kthread_do_work();
1366 trace_rcu_utilization("End CPU kthread@rcu_wait");
1367 *statusp
= RCU_KTHREAD_WAITING
;
1371 *statusp
= RCU_KTHREAD_YIELDING
;
1372 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1373 schedule_timeout_interruptible(2);
1374 trace_rcu_utilization("End CPU kthread@rcu_yield");
1375 *statusp
= RCU_KTHREAD_WAITING
;
1379 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1380 * served by the rcu_node in question. The CPU hotplug lock is still
1381 * held, so the value of rnp->qsmaskinit will be stable.
1383 * We don't include outgoingcpu in the affinity set, use -1 if there is
1384 * no outgoing CPU. If there are no CPUs left in the affinity set,
1385 * this function allows the kthread to execute on any CPU.
1387 static void rcu_boost_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1389 struct task_struct
*t
= rnp
->boost_kthread_task
;
1390 unsigned long mask
= rnp
->qsmaskinit
;
1396 if (!zalloc_cpumask_var(&cm
, GFP_KERNEL
))
1398 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++, mask
>>= 1)
1399 if ((mask
& 0x1) && cpu
!= outgoingcpu
)
1400 cpumask_set_cpu(cpu
, cm
);
1401 if (cpumask_weight(cm
) == 0) {
1403 for (cpu
= rnp
->grplo
; cpu
<= rnp
->grphi
; cpu
++)
1404 cpumask_clear_cpu(cpu
, cm
);
1405 WARN_ON_ONCE(cpumask_weight(cm
) == 0);
1407 set_cpus_allowed_ptr(t
, cm
);
1408 free_cpumask_var(cm
);
1411 static struct smp_hotplug_thread rcu_cpu_thread_spec
= {
1412 .store
= &rcu_cpu_kthread_task
,
1413 .thread_should_run
= rcu_cpu_kthread_should_run
,
1414 .thread_fn
= rcu_cpu_kthread
,
1415 .thread_comm
= "rcuc/%u",
1416 .setup
= rcu_cpu_kthread_setup
,
1417 .park
= rcu_cpu_kthread_park
,
1421 * Spawn all kthreads -- called as soon as the scheduler is running.
1423 static int __init
rcu_spawn_kthreads(void)
1425 struct rcu_node
*rnp
;
1428 rcu_scheduler_fully_active
= 1;
1429 for_each_possible_cpu(cpu
)
1430 per_cpu(rcu_cpu_has_work
, cpu
) = 0;
1431 BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec
));
1432 rnp
= rcu_get_root(rcu_state
);
1433 (void)rcu_spawn_one_boost_kthread(rcu_state
, rnp
);
1434 if (NUM_RCU_NODES
> 1) {
1435 rcu_for_each_leaf_node(rcu_state
, rnp
)
1436 (void)rcu_spawn_one_boost_kthread(rcu_state
, rnp
);
1440 early_initcall(rcu_spawn_kthreads
);
1442 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1444 struct rcu_data
*rdp
= per_cpu_ptr(rcu_state
->rda
, cpu
);
1445 struct rcu_node
*rnp
= rdp
->mynode
;
1447 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1448 if (rcu_scheduler_fully_active
)
1449 (void)rcu_spawn_one_boost_kthread(rcu_state
, rnp
);
1452 #else /* #ifdef CONFIG_RCU_BOOST */
1454 static void rcu_initiate_boost(struct rcu_node
*rnp
, unsigned long flags
)
1456 raw_spin_unlock_irqrestore(&rnp
->lock
, flags
);
1459 static void invoke_rcu_callbacks_kthread(void)
1464 static bool rcu_is_callbacks_kthread(void)
1469 static void rcu_preempt_boost_start_gp(struct rcu_node
*rnp
)
1473 static void rcu_boost_kthread_setaffinity(struct rcu_node
*rnp
, int outgoingcpu
)
1477 static int __init
rcu_scheduler_really_started(void)
1479 rcu_scheduler_fully_active
= 1;
1482 early_initcall(rcu_scheduler_really_started
);
1484 static void __cpuinit
rcu_prepare_kthreads(int cpu
)
1488 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1490 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1493 * Check to see if any future RCU-related work will need to be done
1494 * by the current CPU, even if none need be done immediately, returning
1495 * 1 if so. This function is part of the RCU implementation; it is -not-
1496 * an exported member of the RCU API.
1498 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1499 * any flavor of RCU.
1501 int rcu_needs_cpu(int cpu
, unsigned long *delta_jiffies
)
1503 *delta_jiffies
= ULONG_MAX
;
1504 return rcu_cpu_has_callbacks(cpu
);
1508 * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
1510 static void rcu_prepare_for_idle_init(int cpu
)
1515 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1518 static void rcu_cleanup_after_idle(int cpu
)
1523 * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1526 static void rcu_prepare_for_idle(int cpu
)
1531 * Don't bother keeping a running count of the number of RCU callbacks
1532 * posted because CONFIG_RCU_FAST_NO_HZ=n.
1534 static void rcu_idle_count_callbacks_posted(void)
1538 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1541 * This code is invoked when a CPU goes idle, at which point we want
1542 * to have the CPU do everything required for RCU so that it can enter
1543 * the energy-efficient dyntick-idle mode. This is handled by a
1544 * state machine implemented by rcu_prepare_for_idle() below.
1546 * The following three proprocessor symbols control this state machine:
1548 * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
1549 * to satisfy RCU. Beyond this point, it is better to incur a periodic
1550 * scheduling-clock interrupt than to loop through the state machine
1552 * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
1553 * optional if RCU does not need anything immediately from this
1554 * CPU, even if this CPU still has RCU callbacks queued. The first
1555 * times through the state machine are mandatory: we need to give
1556 * the state machine a chance to communicate a quiescent state
1558 * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1559 * to sleep in dyntick-idle mode with RCU callbacks pending. This
1560 * is sized to be roughly one RCU grace period. Those energy-efficiency
1561 * benchmarkers who might otherwise be tempted to set this to a large
1562 * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1563 * system. And if you are -that- concerned about energy efficiency,
1564 * just power the system down and be done with it!
1565 * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
1566 * permitted to sleep in dyntick-idle mode with only lazy RCU
1567 * callbacks pending. Setting this too high can OOM your system.
1569 * The values below work well in practice. If future workloads require
1570 * adjustment, they can be converted into kernel config parameters, though
1571 * making the state machine smarter might be a better option.
1573 #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
1574 #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
1575 #define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
1576 #define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1578 extern int tick_nohz_enabled
;
1581 * Does the specified flavor of RCU have non-lazy callbacks pending on
1582 * the specified CPU? Both RCU flavor and CPU are specified by the
1583 * rcu_data structure.
1585 static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data
*rdp
)
1587 return rdp
->qlen
!= rdp
->qlen_lazy
;
1590 #ifdef CONFIG_TREE_PREEMPT_RCU
1593 * Are there non-lazy RCU-preempt callbacks? (There cannot be if there
1594 * is no RCU-preempt in the kernel.)
1596 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu
)
1598 struct rcu_data
*rdp
= &per_cpu(rcu_preempt_data
, cpu
);
1600 return __rcu_cpu_has_nonlazy_callbacks(rdp
);
1603 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1605 static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu
)
1610 #endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
1613 * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
1615 static bool rcu_cpu_has_nonlazy_callbacks(int cpu
)
1617 return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data
, cpu
)) ||
1618 __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data
, cpu
)) ||
1619 rcu_preempt_cpu_has_nonlazy_callbacks(cpu
);
1623 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1624 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1625 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1626 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1627 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1628 * it is better to incur scheduling-clock interrupts than to spin
1629 * continuously for the same time duration!
1631 * The delta_jiffies argument is used to store the time when RCU is
1632 * going to need the CPU again if it still has callbacks. The reason
1633 * for this is that rcu_prepare_for_idle() might need to post a timer,
1634 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
1635 * the wakeup time for this CPU. This means that RCU's timer can be
1636 * delayed until the wakeup time, which defeats the purpose of posting
1639 int rcu_needs_cpu(int cpu
, unsigned long *delta_jiffies
)
1641 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
1643 /* Flag a new idle sojourn to the idle-entry state machine. */
1644 rdtp
->idle_first_pass
= 1;
1645 /* If no callbacks, RCU doesn't need the CPU. */
1646 if (!rcu_cpu_has_callbacks(cpu
)) {
1647 *delta_jiffies
= ULONG_MAX
;
1650 if (rdtp
->dyntick_holdoff
== jiffies
) {
1651 /* RCU recently tried and failed, so don't try again. */
1655 /* Set up for the possibility that RCU will post a timer. */
1656 if (rcu_cpu_has_nonlazy_callbacks(cpu
)) {
1657 *delta_jiffies
= round_up(RCU_IDLE_GP_DELAY
+ jiffies
,
1658 RCU_IDLE_GP_DELAY
) - jiffies
;
1660 *delta_jiffies
= jiffies
+ RCU_IDLE_LAZY_GP_DELAY
;
1661 *delta_jiffies
= round_jiffies(*delta_jiffies
) - jiffies
;
1667 * Handler for smp_call_function_single(). The only point of this
1668 * handler is to wake the CPU up, so the handler does only tracing.
1670 void rcu_idle_demigrate(void *unused
)
1672 trace_rcu_prep_idle("Demigrate");
1676 * Timer handler used to force CPU to start pushing its remaining RCU
1677 * callbacks in the case where it entered dyntick-idle mode with callbacks
1678 * pending. The hander doesn't really need to do anything because the
1679 * real work is done upon re-entry to idle, or by the next scheduling-clock
1680 * interrupt should idle not be re-entered.
1682 * One special case: the timer gets migrated without awakening the CPU
1683 * on which the timer was scheduled on. In this case, we must wake up
1684 * that CPU. We do so with smp_call_function_single().
1686 static void rcu_idle_gp_timer_func(unsigned long cpu_in
)
1688 int cpu
= (int)cpu_in
;
1690 trace_rcu_prep_idle("Timer");
1691 if (cpu
!= smp_processor_id())
1692 smp_call_function_single(cpu
, rcu_idle_demigrate
, NULL
, 0);
1694 WARN_ON_ONCE(1); /* Getting here can hang the system... */
1698 * Initialize the timer used to pull CPUs out of dyntick-idle mode.
1700 static void rcu_prepare_for_idle_init(int cpu
)
1702 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
1704 rdtp
->dyntick_holdoff
= jiffies
- 1;
1705 setup_timer(&rdtp
->idle_gp_timer
, rcu_idle_gp_timer_func
, cpu
);
1706 rdtp
->idle_gp_timer_expires
= jiffies
- 1;
1707 rdtp
->idle_first_pass
= 1;
1711 * Clean up for exit from idle. Because we are exiting from idle, there
1712 * is no longer any point to ->idle_gp_timer, so cancel it. This will
1713 * do nothing if this timer is not active, so just cancel it unconditionally.
1715 static void rcu_cleanup_after_idle(int cpu
)
1717 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
1719 del_timer(&rdtp
->idle_gp_timer
);
1720 trace_rcu_prep_idle("Cleanup after idle");
1721 rdtp
->tick_nohz_enabled_snap
= ACCESS_ONCE(tick_nohz_enabled
);
1725 * Check to see if any RCU-related work can be done by the current CPU,
1726 * and if so, schedule a softirq to get it done. This function is part
1727 * of the RCU implementation; it is -not- an exported member of the RCU API.
1729 * The idea is for the current CPU to clear out all work required by the
1730 * RCU core for the current grace period, so that this CPU can be permitted
1731 * to enter dyntick-idle mode. In some cases, it will need to be awakened
1732 * at the end of the grace period by whatever CPU ends the grace period.
1733 * This allows CPUs to go dyntick-idle more quickly, and to reduce the
1734 * number of wakeups by a modest integer factor.
1736 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1737 * disabled, we do one pass of force_quiescent_state(), then do a
1738 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1739 * later. The ->dyntick_drain field controls the sequencing.
1741 * The caller must have disabled interrupts.
1743 static void rcu_prepare_for_idle(int cpu
)
1745 struct timer_list
*tp
;
1746 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
1749 /* Handle nohz enablement switches conservatively. */
1750 tne
= ACCESS_ONCE(tick_nohz_enabled
);
1751 if (tne
!= rdtp
->tick_nohz_enabled_snap
) {
1752 if (rcu_cpu_has_callbacks(cpu
))
1753 invoke_rcu_core(); /* force nohz to see update. */
1754 rdtp
->tick_nohz_enabled_snap
= tne
;
1760 /* Adaptive-tick mode, where usermode execution is idle to RCU. */
1761 if (!is_idle_task(current
)) {
1762 rdtp
->dyntick_holdoff
= jiffies
- 1;
1763 if (rcu_cpu_has_nonlazy_callbacks(cpu
)) {
1764 trace_rcu_prep_idle("User dyntick with callbacks");
1765 rdtp
->idle_gp_timer_expires
=
1766 round_up(jiffies
+ RCU_IDLE_GP_DELAY
,
1768 } else if (rcu_cpu_has_callbacks(cpu
)) {
1769 rdtp
->idle_gp_timer_expires
=
1770 round_jiffies(jiffies
+ RCU_IDLE_LAZY_GP_DELAY
);
1771 trace_rcu_prep_idle("User dyntick with lazy callbacks");
1775 tp
= &rdtp
->idle_gp_timer
;
1776 mod_timer_pinned(tp
, rdtp
->idle_gp_timer_expires
);
1781 * If this is an idle re-entry, for example, due to use of
1782 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
1783 * loop, then don't take any state-machine actions, unless the
1784 * momentary exit from idle queued additional non-lazy callbacks.
1785 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
1788 if (!rdtp
->idle_first_pass
&&
1789 (rdtp
->nonlazy_posted
== rdtp
->nonlazy_posted_snap
)) {
1790 if (rcu_cpu_has_callbacks(cpu
)) {
1791 tp
= &rdtp
->idle_gp_timer
;
1792 mod_timer_pinned(tp
, rdtp
->idle_gp_timer_expires
);
1796 rdtp
->idle_first_pass
= 0;
1797 rdtp
->nonlazy_posted_snap
= rdtp
->nonlazy_posted
- 1;
1800 * If there are no callbacks on this CPU, enter dyntick-idle mode.
1801 * Also reset state to avoid prejudicing later attempts.
1803 if (!rcu_cpu_has_callbacks(cpu
)) {
1804 rdtp
->dyntick_holdoff
= jiffies
- 1;
1805 rdtp
->dyntick_drain
= 0;
1806 trace_rcu_prep_idle("No callbacks");
1811 * If in holdoff mode, just return. We will presumably have
1812 * refrained from disabling the scheduling-clock tick.
1814 if (rdtp
->dyntick_holdoff
== jiffies
) {
1815 trace_rcu_prep_idle("In holdoff");
1819 /* Check and update the ->dyntick_drain sequencing. */
1820 if (rdtp
->dyntick_drain
<= 0) {
1821 /* First time through, initialize the counter. */
1822 rdtp
->dyntick_drain
= RCU_IDLE_FLUSHES
;
1823 } else if (rdtp
->dyntick_drain
<= RCU_IDLE_OPT_FLUSHES
&&
1824 !rcu_pending(cpu
) &&
1825 !local_softirq_pending()) {
1826 /* Can we go dyntick-idle despite still having callbacks? */
1827 rdtp
->dyntick_drain
= 0;
1828 rdtp
->dyntick_holdoff
= jiffies
;
1829 if (rcu_cpu_has_nonlazy_callbacks(cpu
)) {
1830 trace_rcu_prep_idle("Dyntick with callbacks");
1831 rdtp
->idle_gp_timer_expires
=
1832 round_up(jiffies
+ RCU_IDLE_GP_DELAY
,
1835 rdtp
->idle_gp_timer_expires
=
1836 round_jiffies(jiffies
+ RCU_IDLE_LAZY_GP_DELAY
);
1837 trace_rcu_prep_idle("Dyntick with lazy callbacks");
1839 tp
= &rdtp
->idle_gp_timer
;
1840 mod_timer_pinned(tp
, rdtp
->idle_gp_timer_expires
);
1841 rdtp
->nonlazy_posted_snap
= rdtp
->nonlazy_posted
;
1842 return; /* Nothing more to do immediately. */
1843 } else if (--(rdtp
->dyntick_drain
) <= 0) {
1844 /* We have hit the limit, so time to give up. */
1845 rdtp
->dyntick_holdoff
= jiffies
;
1846 trace_rcu_prep_idle("Begin holdoff");
1847 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
1852 * Do one step of pushing the remaining RCU callbacks through
1853 * the RCU core state machine.
1855 #ifdef CONFIG_TREE_PREEMPT_RCU
1856 if (per_cpu(rcu_preempt_data
, cpu
).nxtlist
) {
1857 rcu_preempt_qs(cpu
);
1858 force_quiescent_state(&rcu_preempt_state
);
1860 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1861 if (per_cpu(rcu_sched_data
, cpu
).nxtlist
) {
1863 force_quiescent_state(&rcu_sched_state
);
1865 if (per_cpu(rcu_bh_data
, cpu
).nxtlist
) {
1867 force_quiescent_state(&rcu_bh_state
);
1871 * If RCU callbacks are still pending, RCU still needs this CPU.
1872 * So try forcing the callbacks through the grace period.
1874 if (rcu_cpu_has_callbacks(cpu
)) {
1875 trace_rcu_prep_idle("More callbacks");
1878 trace_rcu_prep_idle("Callbacks drained");
1883 * Keep a running count of the number of non-lazy callbacks posted
1884 * on this CPU. This running counter (which is never decremented) allows
1885 * rcu_prepare_for_idle() to detect when something out of the idle loop
1886 * posts a callback, even if an equal number of callbacks are invoked.
1887 * Of course, callbacks should only be posted from within a trace event
1888 * designed to be called from idle or from within RCU_NONIDLE().
1890 static void rcu_idle_count_callbacks_posted(void)
1892 __this_cpu_add(rcu_dynticks
.nonlazy_posted
, 1);
1896 * Data for flushing lazy RCU callbacks at OOM time.
1898 static atomic_t oom_callback_count
;
1899 static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq
);
1902 * RCU OOM callback -- decrement the outstanding count and deliver the
1903 * wake-up if we are the last one.
1905 static void rcu_oom_callback(struct rcu_head
*rhp
)
1907 if (atomic_dec_and_test(&oom_callback_count
))
1908 wake_up(&oom_callback_wq
);
1912 * Post an rcu_oom_notify callback on the current CPU if it has at
1913 * least one lazy callback. This will unnecessarily post callbacks
1914 * to CPUs that already have a non-lazy callback at the end of their
1915 * callback list, but this is an infrequent operation, so accept some
1916 * extra overhead to keep things simple.
1918 static void rcu_oom_notify_cpu(void *unused
)
1920 struct rcu_state
*rsp
;
1921 struct rcu_data
*rdp
;
1923 for_each_rcu_flavor(rsp
) {
1924 rdp
= __this_cpu_ptr(rsp
->rda
);
1925 if (rdp
->qlen_lazy
!= 0) {
1926 atomic_inc(&oom_callback_count
);
1927 rsp
->call(&rdp
->oom_head
, rcu_oom_callback
);
1933 * If low on memory, ensure that each CPU has a non-lazy callback.
1934 * This will wake up CPUs that have only lazy callbacks, in turn
1935 * ensuring that they free up the corresponding memory in a timely manner.
1936 * Because an uncertain amount of memory will be freed in some uncertain
1937 * timeframe, we do not claim to have freed anything.
1939 static int rcu_oom_notify(struct notifier_block
*self
,
1940 unsigned long notused
, void *nfreed
)
1944 /* Wait for callbacks from earlier instance to complete. */
1945 wait_event(oom_callback_wq
, atomic_read(&oom_callback_count
) == 0);
1948 * Prevent premature wakeup: ensure that all increments happen
1949 * before there is a chance of the counter reaching zero.
1951 atomic_set(&oom_callback_count
, 1);
1954 for_each_online_cpu(cpu
) {
1955 smp_call_function_single(cpu
, rcu_oom_notify_cpu
, NULL
, 1);
1960 /* Unconditionally decrement: no need to wake ourselves up. */
1961 atomic_dec(&oom_callback_count
);
1966 static struct notifier_block rcu_oom_nb
= {
1967 .notifier_call
= rcu_oom_notify
1970 static int __init
rcu_register_oom_notifier(void)
1972 register_oom_notifier(&rcu_oom_nb
);
1975 early_initcall(rcu_register_oom_notifier
);
1977 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1979 #ifdef CONFIG_RCU_CPU_STALL_INFO
1981 #ifdef CONFIG_RCU_FAST_NO_HZ
1983 static void print_cpu_stall_fast_no_hz(char *cp
, int cpu
)
1985 struct rcu_dynticks
*rdtp
= &per_cpu(rcu_dynticks
, cpu
);
1986 struct timer_list
*tltp
= &rdtp
->idle_gp_timer
;
1989 c
= rdtp
->dyntick_holdoff
== jiffies
? 'H' : '.';
1990 if (timer_pending(tltp
))
1991 sprintf(cp
, "drain=%d %c timer=%lu",
1992 rdtp
->dyntick_drain
, c
, tltp
->expires
- jiffies
);
1994 sprintf(cp
, "drain=%d %c timer not pending",
1995 rdtp
->dyntick_drain
, c
);
1998 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
2000 static void print_cpu_stall_fast_no_hz(char *cp
, int cpu
)
2005 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
2007 /* Initiate the stall-info list. */
2008 static void print_cpu_stall_info_begin(void)
2010 printk(KERN_CONT
"\n");
2014 * Print out diagnostic information for the specified stalled CPU.
2016 * If the specified CPU is aware of the current RCU grace period
2017 * (flavor specified by rsp), then print the number of scheduling
2018 * clock interrupts the CPU has taken during the time that it has
2019 * been aware. Otherwise, print the number of RCU grace periods
2020 * that this CPU is ignorant of, for example, "1" if the CPU was
2021 * aware of the previous grace period.
2023 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
2025 static void print_cpu_stall_info(struct rcu_state
*rsp
, int cpu
)
2027 char fast_no_hz
[72];
2028 struct rcu_data
*rdp
= per_cpu_ptr(rsp
->rda
, cpu
);
2029 struct rcu_dynticks
*rdtp
= rdp
->dynticks
;
2031 unsigned long ticks_value
;
2033 if (rsp
->gpnum
== rdp
->gpnum
) {
2034 ticks_title
= "ticks this GP";
2035 ticks_value
= rdp
->ticks_this_gp
;
2037 ticks_title
= "GPs behind";
2038 ticks_value
= rsp
->gpnum
- rdp
->gpnum
;
2040 print_cpu_stall_fast_no_hz(fast_no_hz
, cpu
);
2041 printk(KERN_ERR
"\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
2042 cpu
, ticks_value
, ticks_title
,
2043 atomic_read(&rdtp
->dynticks
) & 0xfff,
2044 rdtp
->dynticks_nesting
, rdtp
->dynticks_nmi_nesting
,
2048 /* Terminate the stall-info list. */
2049 static void print_cpu_stall_info_end(void)
2051 printk(KERN_ERR
"\t");
2054 /* Zero ->ticks_this_gp for all flavors of RCU. */
2055 static void zero_cpu_stall_ticks(struct rcu_data
*rdp
)
2057 rdp
->ticks_this_gp
= 0;
2060 /* Increment ->ticks_this_gp for all flavors of RCU. */
2061 static void increment_cpu_stall_ticks(void)
2063 struct rcu_state
*rsp
;
2065 for_each_rcu_flavor(rsp
)
2066 __this_cpu_ptr(rsp
->rda
)->ticks_this_gp
++;
2069 #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
2071 static void print_cpu_stall_info_begin(void)
2073 printk(KERN_CONT
" {");
2076 static void print_cpu_stall_info(struct rcu_state
*rsp
, int cpu
)
2078 printk(KERN_CONT
" %d", cpu
);
2081 static void print_cpu_stall_info_end(void)
2083 printk(KERN_CONT
"} ");
2086 static void zero_cpu_stall_ticks(struct rcu_data
*rdp
)
2090 static void increment_cpu_stall_ticks(void)
2094 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */