Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / rcupdate.c
blob13458bbaa1be2bbac965b9584709152c9291cc46
1 /*
2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/rcupdate.h>
39 #include <linux/interrupt.h>
40 #include <linux/sched.h>
41 #include <asm/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/module.h>
44 #include <linux/completion.h>
45 #include <linux/moduleparam.h>
46 #include <linux/percpu.h>
47 #include <linux/notifier.h>
48 #include <linux/rcupdate.h>
49 #include <linux/cpu.h>
50 #include <linux/mutex.h>
52 /* Definition for rcupdate control block. */
53 static struct rcu_ctrlblk rcu_ctrlblk = {
54 .cur = -300,
55 .completed = -300,
56 .lock = SPIN_LOCK_UNLOCKED,
57 .cpumask = CPU_MASK_NONE,
59 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
60 .cur = -300,
61 .completed = -300,
62 .lock = SPIN_LOCK_UNLOCKED,
63 .cpumask = CPU_MASK_NONE,
66 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
67 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
69 /* Fake initialization required by compiler */
70 static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
71 static int blimit = 10;
72 static int qhimark = 10000;
73 static int qlowmark = 100;
74 #ifdef CONFIG_SMP
75 static int rsinterval = 1000;
76 #endif
78 static atomic_t rcu_barrier_cpu_count;
79 static DEFINE_MUTEX(rcu_barrier_mutex);
80 static struct completion rcu_barrier_completion;
82 #ifdef CONFIG_SMP
83 static void force_quiescent_state(struct rcu_data *rdp,
84 struct rcu_ctrlblk *rcp)
86 int cpu;
87 cpumask_t cpumask;
88 set_need_resched();
89 if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
90 rdp->last_rs_qlen = rdp->qlen;
92 * Don't send IPI to itself. With irqs disabled,
93 * rdp->cpu is the current cpu.
95 cpumask = rcp->cpumask;
96 cpu_clear(rdp->cpu, cpumask);
97 for_each_cpu_mask(cpu, cpumask)
98 smp_send_reschedule(cpu);
101 #else
102 static inline void force_quiescent_state(struct rcu_data *rdp,
103 struct rcu_ctrlblk *rcp)
105 set_need_resched();
107 #endif
110 * call_rcu - Queue an RCU callback for invocation after a grace period.
111 * @head: structure to be used for queueing the RCU updates.
112 * @func: actual update function to be invoked after the grace period
114 * The update function will be invoked some time after a full grace
115 * period elapses, in other words after all currently executing RCU
116 * read-side critical sections have completed. RCU read-side critical
117 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
118 * and may be nested.
120 void fastcall call_rcu(struct rcu_head *head,
121 void (*func)(struct rcu_head *rcu))
123 unsigned long flags;
124 struct rcu_data *rdp;
126 head->func = func;
127 head->next = NULL;
128 local_irq_save(flags);
129 rdp = &__get_cpu_var(rcu_data);
130 *rdp->nxttail = head;
131 rdp->nxttail = &head->next;
132 if (unlikely(++rdp->qlen > qhimark)) {
133 rdp->blimit = INT_MAX;
134 force_quiescent_state(rdp, &rcu_ctrlblk);
136 local_irq_restore(flags);
140 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
141 * @head: structure to be used for queueing the RCU updates.
142 * @func: actual update function to be invoked after the grace period
144 * The update function will be invoked some time after a full grace
145 * period elapses, in other words after all currently executing RCU
146 * read-side critical sections have completed. call_rcu_bh() assumes
147 * that the read-side critical sections end on completion of a softirq
148 * handler. This means that read-side critical sections in process
149 * context must not be interrupted by softirqs. This interface is to be
150 * used when most of the read-side critical sections are in softirq context.
151 * RCU read-side critical sections are delimited by rcu_read_lock() and
152 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
153 * and rcu_read_unlock_bh(), if in process context. These may be nested.
155 void fastcall call_rcu_bh(struct rcu_head *head,
156 void (*func)(struct rcu_head *rcu))
158 unsigned long flags;
159 struct rcu_data *rdp;
161 head->func = func;
162 head->next = NULL;
163 local_irq_save(flags);
164 rdp = &__get_cpu_var(rcu_bh_data);
165 *rdp->nxttail = head;
166 rdp->nxttail = &head->next;
168 if (unlikely(++rdp->qlen > qhimark)) {
169 rdp->blimit = INT_MAX;
170 force_quiescent_state(rdp, &rcu_bh_ctrlblk);
173 local_irq_restore(flags);
177 * Return the number of RCU batches processed thus far. Useful
178 * for debug and statistics.
180 long rcu_batches_completed(void)
182 return rcu_ctrlblk.completed;
185 static void rcu_barrier_callback(struct rcu_head *notused)
187 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
188 complete(&rcu_barrier_completion);
192 * Called with preemption disabled, and from cross-cpu IRQ context.
194 static void rcu_barrier_func(void *notused)
196 int cpu = smp_processor_id();
197 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
198 struct rcu_head *head;
200 head = &rdp->barrier;
201 atomic_inc(&rcu_barrier_cpu_count);
202 call_rcu(head, rcu_barrier_callback);
206 * rcu_barrier - Wait until all the in-flight RCUs are complete.
208 void rcu_barrier(void)
210 BUG_ON(in_interrupt());
211 /* Take cpucontrol mutex to protect against CPU hotplug */
212 mutex_lock(&rcu_barrier_mutex);
213 init_completion(&rcu_barrier_completion);
214 atomic_set(&rcu_barrier_cpu_count, 0);
215 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
216 wait_for_completion(&rcu_barrier_completion);
217 mutex_unlock(&rcu_barrier_mutex);
219 EXPORT_SYMBOL_GPL(rcu_barrier);
222 * Invoke the completed RCU callbacks. They are expected to be in
223 * a per-cpu list.
225 static void rcu_do_batch(struct rcu_data *rdp)
227 struct rcu_head *next, *list;
228 int count = 0;
230 list = rdp->donelist;
231 while (list) {
232 next = rdp->donelist = list->next;
233 list->func(list);
234 list = next;
235 rdp->qlen--;
236 if (++count >= rdp->blimit)
237 break;
239 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
240 rdp->blimit = blimit;
241 if (!rdp->donelist)
242 rdp->donetail = &rdp->donelist;
243 else
244 tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu));
248 * Grace period handling:
249 * The grace period handling consists out of two steps:
250 * - A new grace period is started.
251 * This is done by rcu_start_batch. The start is not broadcasted to
252 * all cpus, they must pick this up by comparing rcp->cur with
253 * rdp->quiescbatch. All cpus are recorded in the
254 * rcu_ctrlblk.cpumask bitmap.
255 * - All cpus must go through a quiescent state.
256 * Since the start of the grace period is not broadcasted, at least two
257 * calls to rcu_check_quiescent_state are required:
258 * The first call just notices that a new grace period is running. The
259 * following calls check if there was a quiescent state since the beginning
260 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
261 * the bitmap is empty, then the grace period is completed.
262 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
263 * period (if necessary).
266 * Register a new batch of callbacks, and start it up if there is currently no
267 * active batch and the batch to be registered has not already occurred.
268 * Caller must hold rcu_ctrlblk.lock.
270 static void rcu_start_batch(struct rcu_ctrlblk *rcp)
272 if (rcp->next_pending &&
273 rcp->completed == rcp->cur) {
274 rcp->next_pending = 0;
276 * next_pending == 0 must be visible in
277 * __rcu_process_callbacks() before it can see new value of cur.
279 smp_wmb();
280 rcp->cur++;
283 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
284 * Barrier Otherwise it can cause tickless idle CPUs to be
285 * included in rcp->cpumask, which will extend graceperiods
286 * unnecessarily.
288 smp_mb();
289 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
295 * cpu went through a quiescent state since the beginning of the grace period.
296 * Clear it from the cpu mask and complete the grace period if it was the last
297 * cpu. Start another grace period if someone has further entries pending
299 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
301 cpu_clear(cpu, rcp->cpumask);
302 if (cpus_empty(rcp->cpumask)) {
303 /* batch completed ! */
304 rcp->completed = rcp->cur;
305 rcu_start_batch(rcp);
310 * Check if the cpu has gone through a quiescent state (say context
311 * switch). If so and if it already hasn't done so in this RCU
312 * quiescent cycle, then indicate that it has done so.
314 static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
315 struct rcu_data *rdp)
317 if (rdp->quiescbatch != rcp->cur) {
318 /* start new grace period: */
319 rdp->qs_pending = 1;
320 rdp->passed_quiesc = 0;
321 rdp->quiescbatch = rcp->cur;
322 return;
325 /* Grace period already completed for this cpu?
326 * qs_pending is checked instead of the actual bitmap to avoid
327 * cacheline trashing.
329 if (!rdp->qs_pending)
330 return;
333 * Was there a quiescent state since the beginning of the grace
334 * period? If no, then exit and wait for the next call.
336 if (!rdp->passed_quiesc)
337 return;
338 rdp->qs_pending = 0;
340 spin_lock(&rcp->lock);
342 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
343 * during cpu startup. Ignore the quiescent state.
345 if (likely(rdp->quiescbatch == rcp->cur))
346 cpu_quiet(rdp->cpu, rcp);
348 spin_unlock(&rcp->lock);
352 #ifdef CONFIG_HOTPLUG_CPU
354 /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
355 * locking requirements, the list it's pulling from has to belong to a cpu
356 * which is dead and hence not processing interrupts.
358 static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
359 struct rcu_head **tail)
361 local_irq_disable();
362 *this_rdp->nxttail = list;
363 if (list)
364 this_rdp->nxttail = tail;
365 local_irq_enable();
368 static void __rcu_offline_cpu(struct rcu_data *this_rdp,
369 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
371 /* if the cpu going offline owns the grace period
372 * we can block indefinitely waiting for it, so flush
373 * it here
375 spin_lock_bh(&rcp->lock);
376 if (rcp->cur != rcp->completed)
377 cpu_quiet(rdp->cpu, rcp);
378 spin_unlock_bh(&rcp->lock);
379 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
380 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
381 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
384 static void rcu_offline_cpu(int cpu)
386 struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
387 struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
389 __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
390 &per_cpu(rcu_data, cpu));
391 __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
392 &per_cpu(rcu_bh_data, cpu));
393 put_cpu_var(rcu_data);
394 put_cpu_var(rcu_bh_data);
395 tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu);
398 #else
400 static void rcu_offline_cpu(int cpu)
404 #endif
407 * This does the RCU processing work from tasklet context.
409 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
410 struct rcu_data *rdp)
412 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
413 *rdp->donetail = rdp->curlist;
414 rdp->donetail = rdp->curtail;
415 rdp->curlist = NULL;
416 rdp->curtail = &rdp->curlist;
419 if (rdp->nxtlist && !rdp->curlist) {
420 local_irq_disable();
421 rdp->curlist = rdp->nxtlist;
422 rdp->curtail = rdp->nxttail;
423 rdp->nxtlist = NULL;
424 rdp->nxttail = &rdp->nxtlist;
425 local_irq_enable();
428 * start the next batch of callbacks
431 /* determine batch number */
432 rdp->batch = rcp->cur + 1;
433 /* see the comment and corresponding wmb() in
434 * the rcu_start_batch()
436 smp_rmb();
438 if (!rcp->next_pending) {
439 /* and start it/schedule start if it's a new batch */
440 spin_lock(&rcp->lock);
441 rcp->next_pending = 1;
442 rcu_start_batch(rcp);
443 spin_unlock(&rcp->lock);
447 rcu_check_quiescent_state(rcp, rdp);
448 if (rdp->donelist)
449 rcu_do_batch(rdp);
452 static void rcu_process_callbacks(unsigned long unused)
454 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
455 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
458 static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
460 /* This cpu has pending rcu entries and the grace period
461 * for them has completed.
463 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
464 return 1;
466 /* This cpu has no pending entries, but there are new entries */
467 if (!rdp->curlist && rdp->nxtlist)
468 return 1;
470 /* This cpu has finished callbacks to invoke */
471 if (rdp->donelist)
472 return 1;
474 /* The rcu core waits for a quiescent state from the cpu */
475 if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
476 return 1;
478 /* nothing to do */
479 return 0;
482 int rcu_pending(int cpu)
484 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
485 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
488 void rcu_check_callbacks(int cpu, int user)
490 if (user ||
491 (idle_cpu(cpu) && !in_softirq() &&
492 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
493 rcu_qsctr_inc(cpu);
494 rcu_bh_qsctr_inc(cpu);
495 } else if (!in_softirq())
496 rcu_bh_qsctr_inc(cpu);
497 tasklet_schedule(&per_cpu(rcu_tasklet, cpu));
500 static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
501 struct rcu_data *rdp)
503 memset(rdp, 0, sizeof(*rdp));
504 rdp->curtail = &rdp->curlist;
505 rdp->nxttail = &rdp->nxtlist;
506 rdp->donetail = &rdp->donelist;
507 rdp->quiescbatch = rcp->completed;
508 rdp->qs_pending = 0;
509 rdp->cpu = cpu;
510 rdp->blimit = blimit;
513 static void __devinit rcu_online_cpu(int cpu)
515 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
516 struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
518 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
519 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
520 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
523 static int __devinit rcu_cpu_notify(struct notifier_block *self,
524 unsigned long action, void *hcpu)
526 long cpu = (long)hcpu;
527 switch (action) {
528 case CPU_UP_PREPARE:
529 rcu_online_cpu(cpu);
530 break;
531 case CPU_DEAD:
532 rcu_offline_cpu(cpu);
533 break;
534 default:
535 break;
537 return NOTIFY_OK;
540 static struct notifier_block __devinitdata rcu_nb = {
541 .notifier_call = rcu_cpu_notify,
545 * Initializes rcu mechanism. Assumed to be called early.
546 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
547 * Note that rcu_qsctr and friends are implicitly
548 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
550 void __init rcu_init(void)
552 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
553 (void *)(long)smp_processor_id());
554 /* Register notifier for non-boot CPUs */
555 register_cpu_notifier(&rcu_nb);
558 struct rcu_synchronize {
559 struct rcu_head head;
560 struct completion completion;
563 /* Because of FASTCALL declaration of complete, we use this wrapper */
564 static void wakeme_after_rcu(struct rcu_head *head)
566 struct rcu_synchronize *rcu;
568 rcu = container_of(head, struct rcu_synchronize, head);
569 complete(&rcu->completion);
573 * synchronize_rcu - wait until a grace period has elapsed.
575 * Control will return to the caller some time after a full grace
576 * period has elapsed, in other words after all currently executing RCU
577 * read-side critical sections have completed. RCU read-side critical
578 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
579 * and may be nested.
581 * If your read-side code is not protected by rcu_read_lock(), do -not-
582 * use synchronize_rcu().
584 void synchronize_rcu(void)
586 struct rcu_synchronize rcu;
588 init_completion(&rcu.completion);
589 /* Will wake me after RCU finished */
590 call_rcu(&rcu.head, wakeme_after_rcu);
592 /* Wait for it */
593 wait_for_completion(&rcu.completion);
597 * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
599 void synchronize_kernel(void)
601 synchronize_rcu();
604 module_param(blimit, int, 0);
605 module_param(qhimark, int, 0);
606 module_param(qlowmark, int, 0);
607 #ifdef CONFIG_SMP
608 module_param(rsinterval, int, 0);
609 #endif
610 EXPORT_SYMBOL_GPL(rcu_batches_completed);
611 EXPORT_SYMBOL_GPL_FUTURE(call_rcu); /* WARNING: GPL-only in April 2006. */
612 EXPORT_SYMBOL_GPL_FUTURE(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
613 EXPORT_SYMBOL_GPL(synchronize_rcu);
614 EXPORT_SYMBOL_GPL_FUTURE(synchronize_kernel); /* WARNING: GPL-only in April 2006. */