rcu: Fix early call to rcu_idle_enter()
[linux-2.6.git] / kernel / rcutree_plugin.h
blob7a7961feeecf2e9a1f3e2517667d6229379b3acc
1 /*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright Red Hat, 2009
21 * Copyright IBM Corporation, 2009
23 * Author: Ingo Molnar <mingo@elte.hu>
24 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
27 #include <linux/delay.h>
28 #include <linux/stop_machine.h>
30 #define RCU_KTHREAD_PRIO 1
32 #ifdef CONFIG_RCU_BOOST
33 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
34 #else
35 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
36 #endif
39 * Check the RCU kernel configuration parameters and print informative
40 * messages about anything out of the ordinary. If you like #ifdef, you
41 * will love this function.
43 static void __init rcu_bootup_announce_oddness(void)
45 #ifdef CONFIG_RCU_TRACE
46 printk(KERN_INFO "\tRCU debugfs-based tracing is enabled.\n");
47 #endif
48 #if (defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) || (!defined(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32)
49 printk(KERN_INFO "\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
50 CONFIG_RCU_FANOUT);
51 #endif
52 #ifdef CONFIG_RCU_FANOUT_EXACT
53 printk(KERN_INFO "\tHierarchical RCU autobalancing is disabled.\n");
54 #endif
55 #ifdef CONFIG_RCU_FAST_NO_HZ
56 printk(KERN_INFO
57 "\tRCU dyntick-idle grace-period acceleration is enabled.\n");
58 #endif
59 #ifdef CONFIG_PROVE_RCU
60 printk(KERN_INFO "\tRCU lockdep checking is enabled.\n");
61 #endif
62 #ifdef CONFIG_RCU_TORTURE_TEST_RUNNABLE
63 printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
64 #endif
65 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
66 printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
67 #endif
68 #if NUM_RCU_LVL_4 != 0
69 printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
70 #endif
73 #ifdef CONFIG_TREE_PREEMPT_RCU
75 struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
76 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
77 static struct rcu_state *rcu_state = &rcu_preempt_state;
79 static void rcu_read_unlock_special(struct task_struct *t);
80 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
83 * Tell them what RCU they are running.
85 static void __init rcu_bootup_announce(void)
87 printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
88 rcu_bootup_announce_oddness();
92 * Return the number of RCU-preempt batches processed thus far
93 * for debug and statistics.
95 long rcu_batches_completed_preempt(void)
97 return rcu_preempt_state.completed;
99 EXPORT_SYMBOL_GPL(rcu_batches_completed_preempt);
102 * Return the number of RCU batches processed thus far for debug & stats.
104 long rcu_batches_completed(void)
106 return rcu_batches_completed_preempt();
108 EXPORT_SYMBOL_GPL(rcu_batches_completed);
111 * Force a quiescent state for preemptible RCU.
113 void rcu_force_quiescent_state(void)
115 force_quiescent_state(&rcu_preempt_state, 0);
117 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
120 * Record a preemptible-RCU quiescent state for the specified CPU. Note
121 * that this just means that the task currently running on the CPU is
122 * not in a quiescent state. There might be any number of tasks blocked
123 * while in an RCU read-side critical section.
125 * Unlike the other rcu_*_qs() functions, callers to this function
126 * must disable irqs in order to protect the assignment to
127 * ->rcu_read_unlock_special.
129 static void rcu_preempt_qs(int cpu)
131 struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
133 rdp->passed_quiesce_gpnum = rdp->gpnum;
134 barrier();
135 if (rdp->passed_quiesce == 0)
136 trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
137 rdp->passed_quiesce = 1;
138 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
142 * We have entered the scheduler, and the current task might soon be
143 * context-switched away from. If this task is in an RCU read-side
144 * critical section, we will no longer be able to rely on the CPU to
145 * record that fact, so we enqueue the task on the blkd_tasks list.
146 * The task will dequeue itself when it exits the outermost enclosing
147 * RCU read-side critical section. Therefore, the current grace period
148 * cannot be permitted to complete until the blkd_tasks list entries
149 * predating the current grace period drain, in other words, until
150 * rnp->gp_tasks becomes NULL.
152 * Caller must disable preemption.
154 static void rcu_preempt_note_context_switch(int cpu)
156 struct task_struct *t = current;
157 unsigned long flags;
158 struct rcu_data *rdp;
159 struct rcu_node *rnp;
161 if (t->rcu_read_lock_nesting > 0 &&
162 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
164 /* Possibly blocking in an RCU read-side critical section. */
165 rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
166 rnp = rdp->mynode;
167 raw_spin_lock_irqsave(&rnp->lock, flags);
168 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
169 t->rcu_blocked_node = rnp;
172 * If this CPU has already checked in, then this task
173 * will hold up the next grace period rather than the
174 * current grace period. Queue the task accordingly.
175 * If the task is queued for the current grace period
176 * (i.e., this CPU has not yet passed through a quiescent
177 * state for the current grace period), then as long
178 * as that task remains queued, the current grace period
179 * cannot end. Note that there is some uncertainty as
180 * to exactly when the current grace period started.
181 * We take a conservative approach, which can result
182 * in unnecessarily waiting on tasks that started very
183 * slightly after the current grace period began. C'est
184 * la vie!!!
186 * But first, note that the current CPU must still be
187 * on line!
189 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
190 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
191 if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
192 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
193 rnp->gp_tasks = &t->rcu_node_entry;
194 #ifdef CONFIG_RCU_BOOST
195 if (rnp->boost_tasks != NULL)
196 rnp->boost_tasks = rnp->gp_tasks;
197 #endif /* #ifdef CONFIG_RCU_BOOST */
198 } else {
199 list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
200 if (rnp->qsmask & rdp->grpmask)
201 rnp->gp_tasks = &t->rcu_node_entry;
203 trace_rcu_preempt_task(rdp->rsp->name,
204 t->pid,
205 (rnp->qsmask & rdp->grpmask)
206 ? rnp->gpnum
207 : rnp->gpnum + 1);
208 raw_spin_unlock_irqrestore(&rnp->lock, flags);
209 } else if (t->rcu_read_lock_nesting < 0 &&
210 t->rcu_read_unlock_special) {
213 * Complete exit from RCU read-side critical section on
214 * behalf of preempted instance of __rcu_read_unlock().
216 rcu_read_unlock_special(t);
220 * Either we were not in an RCU read-side critical section to
221 * begin with, or we have now recorded that critical section
222 * globally. Either way, we can now note a quiescent state
223 * for this CPU. Again, if we were in an RCU read-side critical
224 * section, and if that critical section was blocking the current
225 * grace period, then the fact that the task has been enqueued
226 * means that we continue to block the current grace period.
228 local_irq_save(flags);
229 rcu_preempt_qs(cpu);
230 local_irq_restore(flags);
234 * Tree-preemptible RCU implementation for rcu_read_lock().
235 * Just increment ->rcu_read_lock_nesting, shared state will be updated
236 * if we block.
238 void __rcu_read_lock(void)
240 current->rcu_read_lock_nesting++;
241 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
243 EXPORT_SYMBOL_GPL(__rcu_read_lock);
246 * Check for preempted RCU readers blocking the current grace period
247 * for the specified rcu_node structure. If the caller needs a reliable
248 * answer, it must hold the rcu_node's ->lock.
250 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
252 return rnp->gp_tasks != NULL;
256 * Record a quiescent state for all tasks that were previously queued
257 * on the specified rcu_node structure and that were blocking the current
258 * RCU grace period. The caller must hold the specified rnp->lock with
259 * irqs disabled, and this lock is released upon return, but irqs remain
260 * disabled.
262 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
263 __releases(rnp->lock)
265 unsigned long mask;
266 struct rcu_node *rnp_p;
268 if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
269 raw_spin_unlock_irqrestore(&rnp->lock, flags);
270 return; /* Still need more quiescent states! */
273 rnp_p = rnp->parent;
274 if (rnp_p == NULL) {
276 * Either there is only one rcu_node in the tree,
277 * or tasks were kicked up to root rcu_node due to
278 * CPUs going offline.
280 rcu_report_qs_rsp(&rcu_preempt_state, flags);
281 return;
284 /* Report up the rest of the hierarchy. */
285 mask = rnp->grpmask;
286 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
287 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
288 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
292 * Advance a ->blkd_tasks-list pointer to the next entry, instead
293 * returning NULL if at the end of the list.
295 static struct list_head *rcu_next_node_entry(struct task_struct *t,
296 struct rcu_node *rnp)
298 struct list_head *np;
300 np = t->rcu_node_entry.next;
301 if (np == &rnp->blkd_tasks)
302 np = NULL;
303 return np;
307 * Handle special cases during rcu_read_unlock(), such as needing to
308 * notify RCU core processing or task having blocked during the RCU
309 * read-side critical section.
311 static noinline void rcu_read_unlock_special(struct task_struct *t)
313 int empty;
314 int empty_exp;
315 int empty_exp_now;
316 unsigned long flags;
317 struct list_head *np;
318 #ifdef CONFIG_RCU_BOOST
319 struct rt_mutex *rbmp = NULL;
320 #endif /* #ifdef CONFIG_RCU_BOOST */
321 struct rcu_node *rnp;
322 int special;
324 /* NMI handlers cannot block and cannot safely manipulate state. */
325 if (in_nmi())
326 return;
328 local_irq_save(flags);
331 * If RCU core is waiting for this CPU to exit critical section,
332 * let it know that we have done so.
334 special = t->rcu_read_unlock_special;
335 if (special & RCU_READ_UNLOCK_NEED_QS) {
336 rcu_preempt_qs(smp_processor_id());
339 /* Hardware IRQ handlers cannot block. */
340 if (in_irq() || in_serving_softirq()) {
341 local_irq_restore(flags);
342 return;
345 /* Clean up if blocked during RCU read-side critical section. */
346 if (special & RCU_READ_UNLOCK_BLOCKED) {
347 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
350 * Remove this task from the list it blocked on. The
351 * task can migrate while we acquire the lock, but at
352 * most one time. So at most two passes through loop.
354 for (;;) {
355 rnp = t->rcu_blocked_node;
356 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
357 if (rnp == t->rcu_blocked_node)
358 break;
359 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
361 empty = !rcu_preempt_blocked_readers_cgp(rnp);
362 empty_exp = !rcu_preempted_readers_exp(rnp);
363 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
364 np = rcu_next_node_entry(t, rnp);
365 list_del_init(&t->rcu_node_entry);
366 t->rcu_blocked_node = NULL;
367 trace_rcu_unlock_preempted_task("rcu_preempt",
368 rnp->gpnum, t->pid);
369 if (&t->rcu_node_entry == rnp->gp_tasks)
370 rnp->gp_tasks = np;
371 if (&t->rcu_node_entry == rnp->exp_tasks)
372 rnp->exp_tasks = np;
373 #ifdef CONFIG_RCU_BOOST
374 if (&t->rcu_node_entry == rnp->boost_tasks)
375 rnp->boost_tasks = np;
376 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
377 if (t->rcu_boost_mutex) {
378 rbmp = t->rcu_boost_mutex;
379 t->rcu_boost_mutex = NULL;
381 #endif /* #ifdef CONFIG_RCU_BOOST */
384 * If this was the last task on the current list, and if
385 * we aren't waiting on any CPUs, report the quiescent state.
386 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
387 * so we must take a snapshot of the expedited state.
389 empty_exp_now = !rcu_preempted_readers_exp(rnp);
390 if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
391 trace_rcu_quiescent_state_report("preempt_rcu",
392 rnp->gpnum,
393 0, rnp->qsmask,
394 rnp->level,
395 rnp->grplo,
396 rnp->grphi,
397 !!rnp->gp_tasks);
398 rcu_report_unblock_qs_rnp(rnp, flags);
399 } else
400 raw_spin_unlock_irqrestore(&rnp->lock, flags);
402 #ifdef CONFIG_RCU_BOOST
403 /* Unboost if we were boosted. */
404 if (rbmp)
405 rt_mutex_unlock(rbmp);
406 #endif /* #ifdef CONFIG_RCU_BOOST */
409 * If this was the last task on the expedited lists,
410 * then we need to report up the rcu_node hierarchy.
412 if (!empty_exp && empty_exp_now)
413 rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
414 } else {
415 local_irq_restore(flags);
420 * Tree-preemptible RCU implementation for rcu_read_unlock().
421 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
422 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
423 * invoke rcu_read_unlock_special() to clean up after a context switch
424 * in an RCU read-side critical section and other special cases.
426 void __rcu_read_unlock(void)
428 struct task_struct *t = current;
430 if (t->rcu_read_lock_nesting != 1)
431 --t->rcu_read_lock_nesting;
432 else {
433 barrier(); /* critical section before exit code. */
434 t->rcu_read_lock_nesting = INT_MIN;
435 barrier(); /* assign before ->rcu_read_unlock_special load */
436 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
437 rcu_read_unlock_special(t);
438 barrier(); /* ->rcu_read_unlock_special load before assign */
439 t->rcu_read_lock_nesting = 0;
441 #ifdef CONFIG_PROVE_LOCKING
443 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
445 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
447 #endif /* #ifdef CONFIG_PROVE_LOCKING */
449 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
451 #ifdef CONFIG_RCU_CPU_STALL_VERBOSE
454 * Dump detailed information for all tasks blocking the current RCU
455 * grace period on the specified rcu_node structure.
457 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
459 unsigned long flags;
460 struct task_struct *t;
462 if (!rcu_preempt_blocked_readers_cgp(rnp))
463 return;
464 raw_spin_lock_irqsave(&rnp->lock, flags);
465 t = list_entry(rnp->gp_tasks,
466 struct task_struct, rcu_node_entry);
467 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
468 sched_show_task(t);
469 raw_spin_unlock_irqrestore(&rnp->lock, flags);
473 * Dump detailed information for all tasks blocking the current RCU
474 * grace period.
476 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
478 struct rcu_node *rnp = rcu_get_root(rsp);
480 rcu_print_detail_task_stall_rnp(rnp);
481 rcu_for_each_leaf_node(rsp, rnp)
482 rcu_print_detail_task_stall_rnp(rnp);
485 #else /* #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
487 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
491 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
494 * Scan the current list of tasks blocked within RCU read-side critical
495 * sections, printing out the tid of each.
497 static int rcu_print_task_stall(struct rcu_node *rnp)
499 struct task_struct *t;
500 int ndetected = 0;
502 if (!rcu_preempt_blocked_readers_cgp(rnp))
503 return 0;
504 t = list_entry(rnp->gp_tasks,
505 struct task_struct, rcu_node_entry);
506 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
507 printk(" P%d", t->pid);
508 ndetected++;
510 return ndetected;
514 * Suppress preemptible RCU's CPU stall warnings by pushing the
515 * time of the next stall-warning message comfortably far into the
516 * future.
518 static void rcu_preempt_stall_reset(void)
520 rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
524 * Check that the list of blocked tasks for the newly completed grace
525 * period is in fact empty. It is a serious bug to complete a grace
526 * period that still has RCU readers blocked! This function must be
527 * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
528 * must be held by the caller.
530 * Also, if there are blocked tasks on the list, they automatically
531 * block the newly created grace period, so set up ->gp_tasks accordingly.
533 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
535 WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
536 if (!list_empty(&rnp->blkd_tasks))
537 rnp->gp_tasks = rnp->blkd_tasks.next;
538 WARN_ON_ONCE(rnp->qsmask);
541 #ifdef CONFIG_HOTPLUG_CPU
544 * Handle tasklist migration for case in which all CPUs covered by the
545 * specified rcu_node have gone offline. Move them up to the root
546 * rcu_node. The reason for not just moving them to the immediate
547 * parent is to remove the need for rcu_read_unlock_special() to
548 * make more than two attempts to acquire the target rcu_node's lock.
549 * Returns true if there were tasks blocking the current RCU grace
550 * period.
552 * Returns 1 if there was previously a task blocking the current grace
553 * period on the specified rcu_node structure.
555 * The caller must hold rnp->lock with irqs disabled.
557 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
558 struct rcu_node *rnp,
559 struct rcu_data *rdp)
561 struct list_head *lp;
562 struct list_head *lp_root;
563 int retval = 0;
564 struct rcu_node *rnp_root = rcu_get_root(rsp);
565 struct task_struct *t;
567 if (rnp == rnp_root) {
568 WARN_ONCE(1, "Last CPU thought to be offlined?");
569 return 0; /* Shouldn't happen: at least one CPU online. */
572 /* If we are on an internal node, complain bitterly. */
573 WARN_ON_ONCE(rnp != rdp->mynode);
576 * Move tasks up to root rcu_node. Don't try to get fancy for
577 * this corner-case operation -- just put this node's tasks
578 * at the head of the root node's list, and update the root node's
579 * ->gp_tasks and ->exp_tasks pointers to those of this node's,
580 * if non-NULL. This might result in waiting for more tasks than
581 * absolutely necessary, but this is a good performance/complexity
582 * tradeoff.
584 if (rcu_preempt_blocked_readers_cgp(rnp))
585 retval |= RCU_OFL_TASKS_NORM_GP;
586 if (rcu_preempted_readers_exp(rnp))
587 retval |= RCU_OFL_TASKS_EXP_GP;
588 lp = &rnp->blkd_tasks;
589 lp_root = &rnp_root->blkd_tasks;
590 while (!list_empty(lp)) {
591 t = list_entry(lp->next, typeof(*t), rcu_node_entry);
592 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
593 list_del(&t->rcu_node_entry);
594 t->rcu_blocked_node = rnp_root;
595 list_add(&t->rcu_node_entry, lp_root);
596 if (&t->rcu_node_entry == rnp->gp_tasks)
597 rnp_root->gp_tasks = rnp->gp_tasks;
598 if (&t->rcu_node_entry == rnp->exp_tasks)
599 rnp_root->exp_tasks = rnp->exp_tasks;
600 #ifdef CONFIG_RCU_BOOST
601 if (&t->rcu_node_entry == rnp->boost_tasks)
602 rnp_root->boost_tasks = rnp->boost_tasks;
603 #endif /* #ifdef CONFIG_RCU_BOOST */
604 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
607 #ifdef CONFIG_RCU_BOOST
608 /* In case root is being boosted and leaf is not. */
609 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
610 if (rnp_root->boost_tasks != NULL &&
611 rnp_root->boost_tasks != rnp_root->gp_tasks)
612 rnp_root->boost_tasks = rnp_root->gp_tasks;
613 raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */
614 #endif /* #ifdef CONFIG_RCU_BOOST */
616 rnp->gp_tasks = NULL;
617 rnp->exp_tasks = NULL;
618 return retval;
622 * Do CPU-offline processing for preemptible RCU.
624 static void rcu_preempt_offline_cpu(int cpu)
626 __rcu_offline_cpu(cpu, &rcu_preempt_state);
629 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
632 * Check for a quiescent state from the current CPU. When a task blocks,
633 * the task is recorded in the corresponding CPU's rcu_node structure,
634 * which is checked elsewhere.
636 * Caller must disable hard irqs.
638 static void rcu_preempt_check_callbacks(int cpu)
640 struct task_struct *t = current;
642 if (t->rcu_read_lock_nesting == 0) {
643 rcu_preempt_qs(cpu);
644 return;
646 if (t->rcu_read_lock_nesting > 0 &&
647 per_cpu(rcu_preempt_data, cpu).qs_pending)
648 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
652 * Process callbacks for preemptible RCU.
654 static void rcu_preempt_process_callbacks(void)
656 __rcu_process_callbacks(&rcu_preempt_state,
657 &__get_cpu_var(rcu_preempt_data));
660 #ifdef CONFIG_RCU_BOOST
662 static void rcu_preempt_do_callbacks(void)
664 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
667 #endif /* #ifdef CONFIG_RCU_BOOST */
670 * Queue a preemptible-RCU callback for invocation after a grace period.
672 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
674 __call_rcu(head, func, &rcu_preempt_state);
676 EXPORT_SYMBOL_GPL(call_rcu);
679 * synchronize_rcu - wait until a grace period has elapsed.
681 * Control will return to the caller some time after a full grace
682 * period has elapsed, in other words after all currently executing RCU
683 * read-side critical sections have completed. Note, however, that
684 * upon return from synchronize_rcu(), the caller might well be executing
685 * concurrently with new RCU read-side critical sections that began while
686 * synchronize_rcu() was waiting. RCU read-side critical sections are
687 * delimited by rcu_read_lock() and rcu_read_unlock(), and may be nested.
689 void synchronize_rcu(void)
691 if (!rcu_scheduler_active)
692 return;
693 wait_rcu_gp(call_rcu);
695 EXPORT_SYMBOL_GPL(synchronize_rcu);
697 static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
698 static long sync_rcu_preempt_exp_count;
699 static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
702 * Return non-zero if there are any tasks in RCU read-side critical
703 * sections blocking the current preemptible-RCU expedited grace period.
704 * If there is no preemptible-RCU expedited grace period currently in
705 * progress, returns zero unconditionally.
707 static int rcu_preempted_readers_exp(struct rcu_node *rnp)
709 return rnp->exp_tasks != NULL;
713 * return non-zero if there is no RCU expedited grace period in progress
714 * for the specified rcu_node structure, in other words, if all CPUs and
715 * tasks covered by the specified rcu_node structure have done their bit
716 * for the current expedited grace period. Works only for preemptible
717 * RCU -- other RCU implementation use other means.
719 * Caller must hold sync_rcu_preempt_exp_mutex.
721 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
723 return !rcu_preempted_readers_exp(rnp) &&
724 ACCESS_ONCE(rnp->expmask) == 0;
728 * Report the exit from RCU read-side critical section for the last task
729 * that queued itself during or before the current expedited preemptible-RCU
730 * grace period. This event is reported either to the rcu_node structure on
731 * which the task was queued or to one of that rcu_node structure's ancestors,
732 * recursively up the tree. (Calm down, calm down, we do the recursion
733 * iteratively!)
735 * Most callers will set the "wake" flag, but the task initiating the
736 * expedited grace period need not wake itself.
738 * Caller must hold sync_rcu_preempt_exp_mutex.
740 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
741 bool wake)
743 unsigned long flags;
744 unsigned long mask;
746 raw_spin_lock_irqsave(&rnp->lock, flags);
747 for (;;) {
748 if (!sync_rcu_preempt_exp_done(rnp)) {
749 raw_spin_unlock_irqrestore(&rnp->lock, flags);
750 break;
752 if (rnp->parent == NULL) {
753 raw_spin_unlock_irqrestore(&rnp->lock, flags);
754 if (wake)
755 wake_up(&sync_rcu_preempt_exp_wq);
756 break;
758 mask = rnp->grpmask;
759 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
760 rnp = rnp->parent;
761 raw_spin_lock(&rnp->lock); /* irqs already disabled */
762 rnp->expmask &= ~mask;
767 * Snapshot the tasks blocking the newly started preemptible-RCU expedited
768 * grace period for the specified rcu_node structure. If there are no such
769 * tasks, report it up the rcu_node hierarchy.
771 * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock.
773 static void
774 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
776 unsigned long flags;
777 int must_wait = 0;
779 raw_spin_lock_irqsave(&rnp->lock, flags);
780 if (list_empty(&rnp->blkd_tasks))
781 raw_spin_unlock_irqrestore(&rnp->lock, flags);
782 else {
783 rnp->exp_tasks = rnp->blkd_tasks.next;
784 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
785 must_wait = 1;
787 if (!must_wait)
788 rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
792 * Wait for an rcu-preempt grace period, but expedite it. The basic idea
793 * is to invoke synchronize_sched_expedited() to push all the tasks to
794 * the ->blkd_tasks lists and wait for this list to drain.
796 void synchronize_rcu_expedited(void)
798 unsigned long flags;
799 struct rcu_node *rnp;
800 struct rcu_state *rsp = &rcu_preempt_state;
801 long snap;
802 int trycount = 0;
804 smp_mb(); /* Caller's modifications seen first by other CPUs. */
805 snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
806 smp_mb(); /* Above access cannot bleed into critical section. */
809 * Acquire lock, falling back to synchronize_rcu() if too many
810 * lock-acquisition failures. Of course, if someone does the
811 * expedited grace period for us, just leave.
813 while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
814 if (trycount++ < 10)
815 udelay(trycount * num_online_cpus());
816 else {
817 synchronize_rcu();
818 return;
820 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
821 goto mb_ret; /* Others did our work for us. */
823 if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0)
824 goto unlock_mb_ret; /* Others did our work for us. */
826 /* force all RCU readers onto ->blkd_tasks lists. */
827 synchronize_sched_expedited();
829 raw_spin_lock_irqsave(&rsp->onofflock, flags);
831 /* Initialize ->expmask for all non-leaf rcu_node structures. */
832 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
833 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
834 rnp->expmask = rnp->qsmaskinit;
835 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
838 /* Snapshot current state of ->blkd_tasks lists. */
839 rcu_for_each_leaf_node(rsp, rnp)
840 sync_rcu_preempt_exp_init(rsp, rnp);
841 if (NUM_RCU_NODES > 1)
842 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
844 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
846 /* Wait for snapshotted ->blkd_tasks lists to drain. */
847 rnp = rcu_get_root(rsp);
848 wait_event(sync_rcu_preempt_exp_wq,
849 sync_rcu_preempt_exp_done(rnp));
851 /* Clean up and exit. */
852 smp_mb(); /* ensure expedited GP seen before counter increment. */
853 ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
854 unlock_mb_ret:
855 mutex_unlock(&sync_rcu_preempt_exp_mutex);
856 mb_ret:
857 smp_mb(); /* ensure subsequent action seen after grace period. */
859 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
862 * Check to see if there is any immediate preemptible-RCU-related work
863 * to be done.
865 static int rcu_preempt_pending(int cpu)
867 return __rcu_pending(&rcu_preempt_state,
868 &per_cpu(rcu_preempt_data, cpu));
872 * Does preemptible RCU need the CPU to stay out of dynticks mode?
874 static int rcu_preempt_needs_cpu(int cpu)
876 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
880 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
882 void rcu_barrier(void)
884 _rcu_barrier(&rcu_preempt_state, call_rcu);
886 EXPORT_SYMBOL_GPL(rcu_barrier);
889 * Initialize preemptible RCU's per-CPU data.
891 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
893 rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
897 * Move preemptible RCU's callbacks from dying CPU to other online CPU.
899 static void rcu_preempt_send_cbs_to_online(void)
901 rcu_send_cbs_to_online(&rcu_preempt_state);
905 * Initialize preemptible RCU's state structures.
907 static void __init __rcu_init_preempt(void)
909 rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
913 * Check for a task exiting while in a preemptible-RCU read-side
914 * critical section, clean up if so. No need to issue warnings,
915 * as debug_check_no_locks_held() already does this if lockdep
916 * is enabled.
918 void exit_rcu(void)
920 struct task_struct *t = current;
922 if (t->rcu_read_lock_nesting == 0)
923 return;
924 t->rcu_read_lock_nesting = 1;
925 __rcu_read_unlock();
928 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
930 static struct rcu_state *rcu_state = &rcu_sched_state;
933 * Tell them what RCU they are running.
935 static void __init rcu_bootup_announce(void)
937 printk(KERN_INFO "Hierarchical RCU implementation.\n");
938 rcu_bootup_announce_oddness();
942 * Return the number of RCU batches processed thus far for debug & stats.
944 long rcu_batches_completed(void)
946 return rcu_batches_completed_sched();
948 EXPORT_SYMBOL_GPL(rcu_batches_completed);
951 * Force a quiescent state for RCU, which, because there is no preemptible
952 * RCU, becomes the same as rcu-sched.
954 void rcu_force_quiescent_state(void)
956 rcu_sched_force_quiescent_state();
958 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
961 * Because preemptible RCU does not exist, we never have to check for
962 * CPUs being in quiescent states.
964 static void rcu_preempt_note_context_switch(int cpu)
969 * Because preemptible RCU does not exist, there are never any preempted
970 * RCU readers.
972 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
974 return 0;
977 #ifdef CONFIG_HOTPLUG_CPU
979 /* Because preemptible RCU does not exist, no quieting of tasks. */
980 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
982 raw_spin_unlock_irqrestore(&rnp->lock, flags);
985 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
988 * Because preemptible RCU does not exist, we never have to check for
989 * tasks blocked within RCU read-side critical sections.
991 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
996 * Because preemptible RCU does not exist, we never have to check for
997 * tasks blocked within RCU read-side critical sections.
999 static int rcu_print_task_stall(struct rcu_node *rnp)
1001 return 0;
1005 * Because preemptible RCU does not exist, there is no need to suppress
1006 * its CPU stall warnings.
1008 static void rcu_preempt_stall_reset(void)
1013 * Because there is no preemptible RCU, there can be no readers blocked,
1014 * so there is no need to check for blocked tasks. So check only for
1015 * bogus qsmask values.
1017 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
1019 WARN_ON_ONCE(rnp->qsmask);
1022 #ifdef CONFIG_HOTPLUG_CPU
1025 * Because preemptible RCU does not exist, it never needs to migrate
1026 * tasks that were blocked within RCU read-side critical sections, and
1027 * such non-existent tasks cannot possibly have been blocking the current
1028 * grace period.
1030 static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
1031 struct rcu_node *rnp,
1032 struct rcu_data *rdp)
1034 return 0;
1038 * Because preemptible RCU does not exist, it never needs CPU-offline
1039 * processing.
1041 static void rcu_preempt_offline_cpu(int cpu)
1045 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1048 * Because preemptible RCU does not exist, it never has any callbacks
1049 * to check.
1051 static void rcu_preempt_check_callbacks(int cpu)
1056 * Because preemptible RCU does not exist, it never has any callbacks
1057 * to process.
1059 static void rcu_preempt_process_callbacks(void)
1064 * Wait for an rcu-preempt grace period, but make it happen quickly.
1065 * But because preemptible RCU does not exist, map to rcu-sched.
1067 void synchronize_rcu_expedited(void)
1069 synchronize_sched_expedited();
1071 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1073 #ifdef CONFIG_HOTPLUG_CPU
1076 * Because preemptible RCU does not exist, there is never any need to
1077 * report on tasks preempted in RCU read-side critical sections during
1078 * expedited RCU grace periods.
1080 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
1081 bool wake)
1085 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1088 * Because preemptible RCU does not exist, it never has any work to do.
1090 static int rcu_preempt_pending(int cpu)
1092 return 0;
1096 * Because preemptible RCU does not exist, it never needs any CPU.
1098 static int rcu_preempt_needs_cpu(int cpu)
1100 return 0;
1104 * Because preemptible RCU does not exist, rcu_barrier() is just
1105 * another name for rcu_barrier_sched().
1107 void rcu_barrier(void)
1109 rcu_barrier_sched();
1111 EXPORT_SYMBOL_GPL(rcu_barrier);
1114 * Because preemptible RCU does not exist, there is no per-CPU
1115 * data to initialize.
1117 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
1122 * Because there is no preemptible RCU, there are no callbacks to move.
1124 static void rcu_preempt_send_cbs_to_online(void)
1129 * Because preemptible RCU does not exist, it need not be initialized.
1131 static void __init __rcu_init_preempt(void)
1135 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1137 #ifdef CONFIG_RCU_BOOST
1139 #include "rtmutex_common.h"
1141 #ifdef CONFIG_RCU_TRACE
1143 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1145 if (list_empty(&rnp->blkd_tasks))
1146 rnp->n_balk_blkd_tasks++;
1147 else if (rnp->exp_tasks == NULL && rnp->gp_tasks == NULL)
1148 rnp->n_balk_exp_gp_tasks++;
1149 else if (rnp->gp_tasks != NULL && rnp->boost_tasks != NULL)
1150 rnp->n_balk_boost_tasks++;
1151 else if (rnp->gp_tasks != NULL && rnp->qsmask != 0)
1152 rnp->n_balk_notblocked++;
1153 else if (rnp->gp_tasks != NULL &&
1154 ULONG_CMP_LT(jiffies, rnp->boost_time))
1155 rnp->n_balk_notyet++;
1156 else
1157 rnp->n_balk_nos++;
1160 #else /* #ifdef CONFIG_RCU_TRACE */
1162 static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1166 #endif /* #else #ifdef CONFIG_RCU_TRACE */
1168 static struct lock_class_key rcu_boost_class;
1171 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1172 * or ->boost_tasks, advancing the pointer to the next task in the
1173 * ->blkd_tasks list.
1175 * Note that irqs must be enabled: boosting the task can block.
1176 * Returns 1 if there are more tasks needing to be boosted.
1178 static int rcu_boost(struct rcu_node *rnp)
1180 unsigned long flags;
1181 struct rt_mutex mtx;
1182 struct task_struct *t;
1183 struct list_head *tb;
1185 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL)
1186 return 0; /* Nothing left to boost. */
1188 raw_spin_lock_irqsave(&rnp->lock, flags);
1191 * Recheck under the lock: all tasks in need of boosting
1192 * might exit their RCU read-side critical sections on their own.
1194 if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1195 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1196 return 0;
1200 * Preferentially boost tasks blocking expedited grace periods.
1201 * This cannot starve the normal grace periods because a second
1202 * expedited grace period must boost all blocked tasks, including
1203 * those blocking the pre-existing normal grace period.
1205 if (rnp->exp_tasks != NULL) {
1206 tb = rnp->exp_tasks;
1207 rnp->n_exp_boosts++;
1208 } else {
1209 tb = rnp->boost_tasks;
1210 rnp->n_normal_boosts++;
1212 rnp->n_tasks_boosted++;
1215 * We boost task t by manufacturing an rt_mutex that appears to
1216 * be held by task t. We leave a pointer to that rt_mutex where
1217 * task t can find it, and task t will release the mutex when it
1218 * exits its outermost RCU read-side critical section. Then
1219 * simply acquiring this artificial rt_mutex will boost task
1220 * t's priority. (Thanks to tglx for suggesting this approach!)
1222 * Note that task t must acquire rnp->lock to remove itself from
1223 * the ->blkd_tasks list, which it will do from exit() if from
1224 * nowhere else. We therefore are guaranteed that task t will
1225 * stay around at least until we drop rnp->lock. Note that
1226 * rnp->lock also resolves races between our priority boosting
1227 * and task t's exiting its outermost RCU read-side critical
1228 * section.
1230 t = container_of(tb, struct task_struct, rcu_node_entry);
1231 rt_mutex_init_proxy_locked(&mtx, t);
1232 /* Avoid lockdep false positives. This rt_mutex is its own thing. */
1233 lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
1234 "rcu_boost_mutex");
1235 t->rcu_boost_mutex = &mtx;
1236 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1237 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1238 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1240 return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL;
1244 * Timer handler to initiate waking up of boost kthreads that
1245 * have yielded the CPU due to excessive numbers of tasks to
1246 * boost. We wake up the per-rcu_node kthread, which in turn
1247 * will wake up the booster kthread.
1249 static void rcu_boost_kthread_timer(unsigned long arg)
1251 invoke_rcu_node_kthread((struct rcu_node *)arg);
1255 * Priority-boosting kthread. One per leaf rcu_node and one for the
1256 * root rcu_node.
1258 static int rcu_boost_kthread(void *arg)
1260 struct rcu_node *rnp = (struct rcu_node *)arg;
1261 int spincnt = 0;
1262 int more2boost;
1264 trace_rcu_utilization("Start boost kthread@init");
1265 for (;;) {
1266 rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
1267 trace_rcu_utilization("End boost kthread@rcu_wait");
1268 rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
1269 trace_rcu_utilization("Start boost kthread@rcu_wait");
1270 rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
1271 more2boost = rcu_boost(rnp);
1272 if (more2boost)
1273 spincnt++;
1274 else
1275 spincnt = 0;
1276 if (spincnt > 10) {
1277 trace_rcu_utilization("End boost kthread@rcu_yield");
1278 rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
1279 trace_rcu_utilization("Start boost kthread@rcu_yield");
1280 spincnt = 0;
1283 /* NOTREACHED */
1284 trace_rcu_utilization("End boost kthread@notreached");
1285 return 0;
1289 * Check to see if it is time to start boosting RCU readers that are
1290 * blocking the current grace period, and, if so, tell the per-rcu_node
1291 * kthread to start boosting them. If there is an expedited grace
1292 * period in progress, it is always time to boost.
1294 * The caller must hold rnp->lock, which this function releases,
1295 * but irqs remain disabled. The ->boost_kthread_task is immortal,
1296 * so we don't need to worry about it going away.
1298 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1300 struct task_struct *t;
1302 if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1303 rnp->n_balk_exp_gp_tasks++;
1304 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1305 return;
1307 if (rnp->exp_tasks != NULL ||
1308 (rnp->gp_tasks != NULL &&
1309 rnp->boost_tasks == NULL &&
1310 rnp->qsmask == 0 &&
1311 ULONG_CMP_GE(jiffies, rnp->boost_time))) {
1312 if (rnp->exp_tasks == NULL)
1313 rnp->boost_tasks = rnp->gp_tasks;
1314 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1315 t = rnp->boost_kthread_task;
1316 if (t != NULL)
1317 wake_up_process(t);
1318 } else {
1319 rcu_initiate_boost_trace(rnp);
1320 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1325 * Wake up the per-CPU kthread to invoke RCU callbacks.
1327 static void invoke_rcu_callbacks_kthread(void)
1329 unsigned long flags;
1331 local_irq_save(flags);
1332 __this_cpu_write(rcu_cpu_has_work, 1);
1333 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1334 current != __this_cpu_read(rcu_cpu_kthread_task))
1335 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1336 local_irq_restore(flags);
1340 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1341 * held, so no one should be messing with the existence of the boost
1342 * kthread.
1344 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
1345 cpumask_var_t cm)
1347 struct task_struct *t;
1349 t = rnp->boost_kthread_task;
1350 if (t != NULL)
1351 set_cpus_allowed_ptr(rnp->boost_kthread_task, cm);
1354 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1357 * Do priority-boost accounting for the start of a new grace period.
1359 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1361 rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1365 * Create an RCU-boost kthread for the specified node if one does not
1366 * already exist. We only create this kthread for preemptible RCU.
1367 * Returns zero if all is well, a negated errno otherwise.
1369 static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1370 struct rcu_node *rnp,
1371 int rnp_index)
1373 unsigned long flags;
1374 struct sched_param sp;
1375 struct task_struct *t;
1377 if (&rcu_preempt_state != rsp)
1378 return 0;
1379 rsp->boost = 1;
1380 if (rnp->boost_kthread_task != NULL)
1381 return 0;
1382 t = kthread_create(rcu_boost_kthread, (void *)rnp,
1383 "rcub/%d", rnp_index);
1384 if (IS_ERR(t))
1385 return PTR_ERR(t);
1386 raw_spin_lock_irqsave(&rnp->lock, flags);
1387 rnp->boost_kthread_task = t;
1388 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1389 sp.sched_priority = RCU_BOOST_PRIO;
1390 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1391 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1392 return 0;
1395 #ifdef CONFIG_HOTPLUG_CPU
1398 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1400 static void rcu_stop_cpu_kthread(int cpu)
1402 struct task_struct *t;
1404 /* Stop the CPU's kthread. */
1405 t = per_cpu(rcu_cpu_kthread_task, cpu);
1406 if (t != NULL) {
1407 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1408 kthread_stop(t);
1412 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1414 static void rcu_kthread_do_work(void)
1416 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1417 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1418 rcu_preempt_do_callbacks();
1422 * Wake up the specified per-rcu_node-structure kthread.
1423 * Because the per-rcu_node kthreads are immortal, we don't need
1424 * to do anything to keep them alive.
1426 static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1428 struct task_struct *t;
1430 t = rnp->node_kthread_task;
1431 if (t != NULL)
1432 wake_up_process(t);
1436 * Set the specified CPU's kthread to run RT or not, as specified by
1437 * the to_rt argument. The CPU-hotplug locks are held, so the task
1438 * is not going away.
1440 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1442 int policy;
1443 struct sched_param sp;
1444 struct task_struct *t;
1446 t = per_cpu(rcu_cpu_kthread_task, cpu);
1447 if (t == NULL)
1448 return;
1449 if (to_rt) {
1450 policy = SCHED_FIFO;
1451 sp.sched_priority = RCU_KTHREAD_PRIO;
1452 } else {
1453 policy = SCHED_NORMAL;
1454 sp.sched_priority = 0;
1456 sched_setscheduler_nocheck(t, policy, &sp);
1460 * Timer handler to initiate the waking up of per-CPU kthreads that
1461 * have yielded the CPU due to excess numbers of RCU callbacks.
1462 * We wake up the per-rcu_node kthread, which in turn will wake up
1463 * the booster kthread.
1465 static void rcu_cpu_kthread_timer(unsigned long arg)
1467 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1468 struct rcu_node *rnp = rdp->mynode;
1470 atomic_or(rdp->grpmask, &rnp->wakemask);
1471 invoke_rcu_node_kthread(rnp);
1475 * Drop to non-real-time priority and yield, but only after posting a
1476 * timer that will cause us to regain our real-time priority if we
1477 * remain preempted. Either way, we restore our real-time priority
1478 * before returning.
1480 static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1482 struct sched_param sp;
1483 struct timer_list yield_timer;
1484 int prio = current->rt_priority;
1486 setup_timer_on_stack(&yield_timer, f, arg);
1487 mod_timer(&yield_timer, jiffies + 2);
1488 sp.sched_priority = 0;
1489 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1490 set_user_nice(current, 19);
1491 schedule();
1492 set_user_nice(current, 0);
1493 sp.sched_priority = prio;
1494 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1495 del_timer(&yield_timer);
1499 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1500 * This can happen while the corresponding CPU is either coming online
1501 * or going offline. We cannot wait until the CPU is fully online
1502 * before starting the kthread, because the various notifier functions
1503 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1504 * the corresponding CPU is online.
1506 * Return 1 if the kthread needs to stop, 0 otherwise.
1508 * Caller must disable bh. This function can momentarily enable it.
1510 static int rcu_cpu_kthread_should_stop(int cpu)
1512 while (cpu_is_offline(cpu) ||
1513 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1514 smp_processor_id() != cpu) {
1515 if (kthread_should_stop())
1516 return 1;
1517 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1518 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1519 local_bh_enable();
1520 schedule_timeout_uninterruptible(1);
1521 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1522 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1523 local_bh_disable();
1525 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1526 return 0;
1530 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1531 * RCU softirq used in flavors and configurations of RCU that do not
1532 * support RCU priority boosting.
1534 static int rcu_cpu_kthread(void *arg)
1536 int cpu = (int)(long)arg;
1537 unsigned long flags;
1538 int spincnt = 0;
1539 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1540 char work;
1541 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1543 trace_rcu_utilization("Start CPU kthread@init");
1544 for (;;) {
1545 *statusp = RCU_KTHREAD_WAITING;
1546 trace_rcu_utilization("End CPU kthread@rcu_wait");
1547 rcu_wait(*workp != 0 || kthread_should_stop());
1548 trace_rcu_utilization("Start CPU kthread@rcu_wait");
1549 local_bh_disable();
1550 if (rcu_cpu_kthread_should_stop(cpu)) {
1551 local_bh_enable();
1552 break;
1554 *statusp = RCU_KTHREAD_RUNNING;
1555 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1556 local_irq_save(flags);
1557 work = *workp;
1558 *workp = 0;
1559 local_irq_restore(flags);
1560 if (work)
1561 rcu_kthread_do_work();
1562 local_bh_enable();
1563 if (*workp != 0)
1564 spincnt++;
1565 else
1566 spincnt = 0;
1567 if (spincnt > 10) {
1568 *statusp = RCU_KTHREAD_YIELDING;
1569 trace_rcu_utilization("End CPU kthread@rcu_yield");
1570 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1571 trace_rcu_utilization("Start CPU kthread@rcu_yield");
1572 spincnt = 0;
1575 *statusp = RCU_KTHREAD_STOPPED;
1576 trace_rcu_utilization("End CPU kthread@term");
1577 return 0;
1581 * Spawn a per-CPU kthread, setting up affinity and priority.
1582 * Because the CPU hotplug lock is held, no other CPU will be attempting
1583 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1584 * attempting to access it during boot, but the locking in kthread_bind()
1585 * will enforce sufficient ordering.
1587 * Please note that we cannot simply refuse to wake up the per-CPU
1588 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1589 * which can result in softlockup complaints if the task ends up being
1590 * idle for more than a couple of minutes.
1592 * However, please note also that we cannot bind the per-CPU kthread to its
1593 * CPU until that CPU is fully online. We also cannot wait until the
1594 * CPU is fully online before we create its per-CPU kthread, as this would
1595 * deadlock the system when CPU notifiers tried waiting for grace
1596 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1597 * is online. If its CPU is not yet fully online, then the code in
1598 * rcu_cpu_kthread() will wait until it is fully online, and then do
1599 * the binding.
1601 static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1603 struct sched_param sp;
1604 struct task_struct *t;
1606 if (!rcu_scheduler_fully_active ||
1607 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1608 return 0;
1609 t = kthread_create_on_node(rcu_cpu_kthread,
1610 (void *)(long)cpu,
1611 cpu_to_node(cpu),
1612 "rcuc/%d", cpu);
1613 if (IS_ERR(t))
1614 return PTR_ERR(t);
1615 if (cpu_online(cpu))
1616 kthread_bind(t, cpu);
1617 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1618 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1619 sp.sched_priority = RCU_KTHREAD_PRIO;
1620 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1621 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1622 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1623 return 0;
1627 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1628 * kthreads when needed. We ignore requests to wake up kthreads
1629 * for offline CPUs, which is OK because force_quiescent_state()
1630 * takes care of this case.
1632 static int rcu_node_kthread(void *arg)
1634 int cpu;
1635 unsigned long flags;
1636 unsigned long mask;
1637 struct rcu_node *rnp = (struct rcu_node *)arg;
1638 struct sched_param sp;
1639 struct task_struct *t;
1641 for (;;) {
1642 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1643 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1644 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1645 raw_spin_lock_irqsave(&rnp->lock, flags);
1646 mask = atomic_xchg(&rnp->wakemask, 0);
1647 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1648 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1649 if ((mask & 0x1) == 0)
1650 continue;
1651 preempt_disable();
1652 t = per_cpu(rcu_cpu_kthread_task, cpu);
1653 if (!cpu_online(cpu) || t == NULL) {
1654 preempt_enable();
1655 continue;
1657 per_cpu(rcu_cpu_has_work, cpu) = 1;
1658 sp.sched_priority = RCU_KTHREAD_PRIO;
1659 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1660 preempt_enable();
1663 /* NOTREACHED */
1664 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1665 return 0;
1669 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1670 * served by the rcu_node in question. The CPU hotplug lock is still
1671 * held, so the value of rnp->qsmaskinit will be stable.
1673 * We don't include outgoingcpu in the affinity set, use -1 if there is
1674 * no outgoing CPU. If there are no CPUs left in the affinity set,
1675 * this function allows the kthread to execute on any CPU.
1677 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1679 cpumask_var_t cm;
1680 int cpu;
1681 unsigned long mask = rnp->qsmaskinit;
1683 if (rnp->node_kthread_task == NULL)
1684 return;
1685 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1686 return;
1687 cpumask_clear(cm);
1688 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1689 if ((mask & 0x1) && cpu != outgoingcpu)
1690 cpumask_set_cpu(cpu, cm);
1691 if (cpumask_weight(cm) == 0) {
1692 cpumask_setall(cm);
1693 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1694 cpumask_clear_cpu(cpu, cm);
1695 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1697 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1698 rcu_boost_kthread_setaffinity(rnp, cm);
1699 free_cpumask_var(cm);
1703 * Spawn a per-rcu_node kthread, setting priority and affinity.
1704 * Called during boot before online/offline can happen, or, if
1705 * during runtime, with the main CPU-hotplug locks held. So only
1706 * one of these can be executing at a time.
1708 static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1709 struct rcu_node *rnp)
1711 unsigned long flags;
1712 int rnp_index = rnp - &rsp->node[0];
1713 struct sched_param sp;
1714 struct task_struct *t;
1716 if (!rcu_scheduler_fully_active ||
1717 rnp->qsmaskinit == 0)
1718 return 0;
1719 if (rnp->node_kthread_task == NULL) {
1720 t = kthread_create(rcu_node_kthread, (void *)rnp,
1721 "rcun/%d", rnp_index);
1722 if (IS_ERR(t))
1723 return PTR_ERR(t);
1724 raw_spin_lock_irqsave(&rnp->lock, flags);
1725 rnp->node_kthread_task = t;
1726 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1727 sp.sched_priority = 99;
1728 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1729 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1731 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1735 * Spawn all kthreads -- called as soon as the scheduler is running.
1737 static int __init rcu_spawn_kthreads(void)
1739 int cpu;
1740 struct rcu_node *rnp;
1742 rcu_scheduler_fully_active = 1;
1743 for_each_possible_cpu(cpu) {
1744 per_cpu(rcu_cpu_has_work, cpu) = 0;
1745 if (cpu_online(cpu))
1746 (void)rcu_spawn_one_cpu_kthread(cpu);
1748 rnp = rcu_get_root(rcu_state);
1749 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1750 if (NUM_RCU_NODES > 1) {
1751 rcu_for_each_leaf_node(rcu_state, rnp)
1752 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1754 return 0;
1756 early_initcall(rcu_spawn_kthreads);
1758 static void __cpuinit rcu_prepare_kthreads(int cpu)
1760 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1761 struct rcu_node *rnp = rdp->mynode;
1763 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1764 if (rcu_scheduler_fully_active) {
1765 (void)rcu_spawn_one_cpu_kthread(cpu);
1766 if (rnp->node_kthread_task == NULL)
1767 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1771 #else /* #ifdef CONFIG_RCU_BOOST */
1773 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1775 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1778 static void invoke_rcu_callbacks_kthread(void)
1780 WARN_ON_ONCE(1);
1783 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1787 #ifdef CONFIG_HOTPLUG_CPU
1789 static void rcu_stop_cpu_kthread(int cpu)
1793 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
1795 static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1799 static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1803 static int __init rcu_scheduler_really_started(void)
1805 rcu_scheduler_fully_active = 1;
1806 return 0;
1808 early_initcall(rcu_scheduler_really_started);
1810 static void __cpuinit rcu_prepare_kthreads(int cpu)
1814 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1816 #ifndef CONFIG_SMP
1818 void synchronize_sched_expedited(void)
1820 cond_resched();
1822 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1824 #else /* #ifndef CONFIG_SMP */
1826 static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
1827 static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
1829 static int synchronize_sched_expedited_cpu_stop(void *data)
1832 * There must be a full memory barrier on each affected CPU
1833 * between the time that try_stop_cpus() is called and the
1834 * time that it returns.
1836 * In the current initial implementation of cpu_stop, the
1837 * above condition is already met when the control reaches
1838 * this point and the following smp_mb() is not strictly
1839 * necessary. Do smp_mb() anyway for documentation and
1840 * robustness against future implementation changes.
1842 smp_mb(); /* See above comment block. */
1843 return 0;
1847 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
1848 * approach to force grace period to end quickly. This consumes
1849 * significant time on all CPUs, and is thus not recommended for
1850 * any sort of common-case code.
1852 * Note that it is illegal to call this function while holding any
1853 * lock that is acquired by a CPU-hotplug notifier. Failing to
1854 * observe this restriction will result in deadlock.
1856 * This implementation can be thought of as an application of ticket
1857 * locking to RCU, with sync_sched_expedited_started and
1858 * sync_sched_expedited_done taking on the roles of the halves
1859 * of the ticket-lock word. Each task atomically increments
1860 * sync_sched_expedited_started upon entry, snapshotting the old value,
1861 * then attempts to stop all the CPUs. If this succeeds, then each
1862 * CPU will have executed a context switch, resulting in an RCU-sched
1863 * grace period. We are then done, so we use atomic_cmpxchg() to
1864 * update sync_sched_expedited_done to match our snapshot -- but
1865 * only if someone else has not already advanced past our snapshot.
1867 * On the other hand, if try_stop_cpus() fails, we check the value
1868 * of sync_sched_expedited_done. If it has advanced past our
1869 * initial snapshot, then someone else must have forced a grace period
1870 * some time after we took our snapshot. In this case, our work is
1871 * done for us, and we can simply return. Otherwise, we try again,
1872 * but keep our initial snapshot for purposes of checking for someone
1873 * doing our work for us.
1875 * If we fail too many times in a row, we fall back to synchronize_sched().
1877 void synchronize_sched_expedited(void)
1879 int firstsnap, s, snap, trycount = 0;
1881 /* Note that atomic_inc_return() implies full memory barrier. */
1882 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
1883 get_online_cpus();
1886 * Each pass through the following loop attempts to force a
1887 * context switch on each CPU.
1889 while (try_stop_cpus(cpu_online_mask,
1890 synchronize_sched_expedited_cpu_stop,
1891 NULL) == -EAGAIN) {
1892 put_online_cpus();
1894 /* No joy, try again later. Or just synchronize_sched(). */
1895 if (trycount++ < 10)
1896 udelay(trycount * num_online_cpus());
1897 else {
1898 synchronize_sched();
1899 return;
1902 /* Check to see if someone else did our work for us. */
1903 s = atomic_read(&sync_sched_expedited_done);
1904 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
1905 smp_mb(); /* ensure test happens before caller kfree */
1906 return;
1910 * Refetching sync_sched_expedited_started allows later
1911 * callers to piggyback on our grace period. We subtract
1912 * 1 to get the same token that the last incrementer got.
1913 * We retry after they started, so our grace period works
1914 * for them, and they started after our first try, so their
1915 * grace period works for us.
1917 get_online_cpus();
1918 snap = atomic_read(&sync_sched_expedited_started);
1919 smp_mb(); /* ensure read is before try_stop_cpus(). */
1923 * Everyone up to our most recent fetch is covered by our grace
1924 * period. Update the counter, but only if our work is still
1925 * relevant -- which it won't be if someone who started later
1926 * than we did beat us to the punch.
1928 do {
1929 s = atomic_read(&sync_sched_expedited_done);
1930 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
1931 smp_mb(); /* ensure test happens before caller kfree */
1932 break;
1934 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
1936 put_online_cpus();
1938 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
1940 #endif /* #else #ifndef CONFIG_SMP */
1942 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1945 * Check to see if any future RCU-related work will need to be done
1946 * by the current CPU, even if none need be done immediately, returning
1947 * 1 if so. This function is part of the RCU implementation; it is -not-
1948 * an exported member of the RCU API.
1950 * Because we have preemptible RCU, just check whether this CPU needs
1951 * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
1952 * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
1954 int rcu_needs_cpu(int cpu)
1956 return rcu_needs_cpu_quick_check(cpu);
1959 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1961 #define RCU_NEEDS_CPU_FLUSHES 5
1962 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1963 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1966 * Check to see if any future RCU-related work will need to be done
1967 * by the current CPU, even if none need be done immediately, returning
1968 * 1 if so. This function is part of the RCU implementation; it is -not-
1969 * an exported member of the RCU API.
1971 * Because we are not supporting preemptible RCU, attempt to accelerate
1972 * any current grace periods so that RCU no longer needs this CPU, but
1973 * only if all other CPUs are already in dynticks-idle mode. This will
1974 * allow the CPU cores to be powered down immediately, as opposed to after
1975 * waiting many milliseconds for grace periods to elapse.
1977 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1978 * disabled, we do one pass of force_quiescent_state(), then do a
1979 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1980 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1982 int rcu_needs_cpu(int cpu)
1984 int c = 0;
1985 int snap;
1986 int thatcpu;
1988 /* Check for being in the holdoff period. */
1989 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
1990 return rcu_needs_cpu_quick_check(cpu);
1992 /* Don't bother unless we are the last non-dyntick-idle CPU. */
1993 for_each_online_cpu(thatcpu) {
1994 if (thatcpu == cpu)
1995 continue;
1996 snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
1997 thatcpu).dynticks);
1998 smp_mb(); /* Order sampling of snap with end of grace period. */
1999 if ((snap & 0x1) != 0) {
2000 per_cpu(rcu_dyntick_drain, cpu) = 0;
2001 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2002 return rcu_needs_cpu_quick_check(cpu);
2006 /* Check and update the rcu_dyntick_drain sequencing. */
2007 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2008 /* First time through, initialize the counter. */
2009 per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
2010 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2011 /* We have hit the limit, so time to give up. */
2012 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2013 return rcu_needs_cpu_quick_check(cpu);
2016 /* Do one step pushing remaining RCU callbacks through. */
2017 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2018 rcu_sched_qs(cpu);
2019 force_quiescent_state(&rcu_sched_state, 0);
2020 c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
2022 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2023 rcu_bh_qs(cpu);
2024 force_quiescent_state(&rcu_bh_state, 0);
2025 c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
2028 /* If RCU callbacks are still pending, RCU still needs this CPU. */
2029 if (c)
2030 invoke_rcu_core();
2031 return c;
2034 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */