[NET]: Fix BMSR_100{HALF,FULL}2 defines in linux/mii.h
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / rcupdate.c
blob8cf15a569fcdece3c61b0ad61574802eba95f51f
1 /*
2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/rcupdate.h>
39 #include <linux/interrupt.h>
40 #include <linux/sched.h>
41 #include <asm/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/module.h>
44 #include <linux/completion.h>
45 #include <linux/moduleparam.h>
46 #include <linux/percpu.h>
47 #include <linux/notifier.h>
48 #include <linux/rcupdate.h>
49 #include <linux/cpu.h>
51 /* Definition for rcupdate control block. */
52 struct rcu_ctrlblk rcu_ctrlblk = {
53 .cur = -300,
54 .completed = -300,
55 .lock = SPIN_LOCK_UNLOCKED,
56 .cpumask = CPU_MASK_NONE,
58 struct rcu_ctrlblk rcu_bh_ctrlblk = {
59 .cur = -300,
60 .completed = -300,
61 .lock = SPIN_LOCK_UNLOCKED,
62 .cpumask = CPU_MASK_NONE,
65 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
66 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
68 /* Fake initialization required by compiler */
69 static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
70 static int blimit = 10;
71 static int qhimark = 10000;
72 static int qlowmark = 100;
73 #ifdef CONFIG_SMP
74 static int rsinterval = 1000;
75 #endif
77 static atomic_t rcu_barrier_cpu_count;
78 static struct semaphore rcu_barrier_sema;
79 static struct completion rcu_barrier_completion;
81 #ifdef CONFIG_SMP
82 static void force_quiescent_state(struct rcu_data *rdp,
83 struct rcu_ctrlblk *rcp)
85 int cpu;
86 cpumask_t cpumask;
87 set_need_resched();
88 if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
89 rdp->last_rs_qlen = rdp->qlen;
91 * Don't send IPI to itself. With irqs disabled,
92 * rdp->cpu is the current cpu.
94 cpumask = rcp->cpumask;
95 cpu_clear(rdp->cpu, cpumask);
96 for_each_cpu_mask(cpu, cpumask)
97 smp_send_reschedule(cpu);
100 #else
101 static inline void force_quiescent_state(struct rcu_data *rdp,
102 struct rcu_ctrlblk *rcp)
104 set_need_resched();
106 #endif
109 * call_rcu - Queue an RCU callback for invocation after a grace period.
110 * @head: structure to be used for queueing the RCU updates.
111 * @func: actual update function to be invoked after the grace period
113 * The update function will be invoked some time after a full grace
114 * period elapses, in other words after all currently executing RCU
115 * read-side critical sections have completed. RCU read-side critical
116 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
117 * and may be nested.
119 void fastcall call_rcu(struct rcu_head *head,
120 void (*func)(struct rcu_head *rcu))
122 unsigned long flags;
123 struct rcu_data *rdp;
125 head->func = func;
126 head->next = NULL;
127 local_irq_save(flags);
128 rdp = &__get_cpu_var(rcu_data);
129 *rdp->nxttail = head;
130 rdp->nxttail = &head->next;
131 if (unlikely(++rdp->qlen > qhimark)) {
132 rdp->blimit = INT_MAX;
133 force_quiescent_state(rdp, &rcu_ctrlblk);
135 local_irq_restore(flags);
139 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
140 * @head: structure to be used for queueing the RCU updates.
141 * @func: actual update function to be invoked after the grace period
143 * The update function will be invoked some time after a full grace
144 * period elapses, in other words after all currently executing RCU
145 * read-side critical sections have completed. call_rcu_bh() assumes
146 * that the read-side critical sections end on completion of a softirq
147 * handler. This means that read-side critical sections in process
148 * context must not be interrupted by softirqs. This interface is to be
149 * used when most of the read-side critical sections are in softirq context.
150 * RCU read-side critical sections are delimited by rcu_read_lock() and
151 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
152 * and rcu_read_unlock_bh(), if in process context. These may be nested.
154 void fastcall call_rcu_bh(struct rcu_head *head,
155 void (*func)(struct rcu_head *rcu))
157 unsigned long flags;
158 struct rcu_data *rdp;
160 head->func = func;
161 head->next = NULL;
162 local_irq_save(flags);
163 rdp = &__get_cpu_var(rcu_bh_data);
164 *rdp->nxttail = head;
165 rdp->nxttail = &head->next;
167 if (unlikely(++rdp->qlen > qhimark)) {
168 rdp->blimit = INT_MAX;
169 force_quiescent_state(rdp, &rcu_bh_ctrlblk);
172 local_irq_restore(flags);
176 * Return the number of RCU batches processed thus far. Useful
177 * for debug and statistics.
179 long rcu_batches_completed(void)
181 return rcu_ctrlblk.completed;
184 static void rcu_barrier_callback(struct rcu_head *notused)
186 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
187 complete(&rcu_barrier_completion);
191 * Called with preemption disabled, and from cross-cpu IRQ context.
193 static void rcu_barrier_func(void *notused)
195 int cpu = smp_processor_id();
196 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
197 struct rcu_head *head;
199 head = &rdp->barrier;
200 atomic_inc(&rcu_barrier_cpu_count);
201 call_rcu(head, rcu_barrier_callback);
205 * rcu_barrier - Wait until all the in-flight RCUs are complete.
207 void rcu_barrier(void)
209 BUG_ON(in_interrupt());
210 /* Take cpucontrol semaphore to protect against CPU hotplug */
211 down(&rcu_barrier_sema);
212 init_completion(&rcu_barrier_completion);
213 atomic_set(&rcu_barrier_cpu_count, 0);
214 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
215 wait_for_completion(&rcu_barrier_completion);
216 up(&rcu_barrier_sema);
218 EXPORT_SYMBOL_GPL(rcu_barrier);
221 * Invoke the completed RCU callbacks. They are expected to be in
222 * a per-cpu list.
224 static void rcu_do_batch(struct rcu_data *rdp)
226 struct rcu_head *next, *list;
227 int count = 0;
229 list = rdp->donelist;
230 while (list) {
231 next = rdp->donelist = list->next;
232 list->func(list);
233 list = next;
234 rdp->qlen--;
235 if (++count >= rdp->blimit)
236 break;
238 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
239 rdp->blimit = blimit;
240 if (!rdp->donelist)
241 rdp->donetail = &rdp->donelist;
242 else
243 tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu));
247 * Grace period handling:
248 * The grace period handling consists out of two steps:
249 * - A new grace period is started.
250 * This is done by rcu_start_batch. The start is not broadcasted to
251 * all cpus, they must pick this up by comparing rcp->cur with
252 * rdp->quiescbatch. All cpus are recorded in the
253 * rcu_ctrlblk.cpumask bitmap.
254 * - All cpus must go through a quiescent state.
255 * Since the start of the grace period is not broadcasted, at least two
256 * calls to rcu_check_quiescent_state are required:
257 * The first call just notices that a new grace period is running. The
258 * following calls check if there was a quiescent state since the beginning
259 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
260 * the bitmap is empty, then the grace period is completed.
261 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
262 * period (if necessary).
265 * Register a new batch of callbacks, and start it up if there is currently no
266 * active batch and the batch to be registered has not already occurred.
267 * Caller must hold rcu_ctrlblk.lock.
269 static void rcu_start_batch(struct rcu_ctrlblk *rcp)
271 if (rcp->next_pending &&
272 rcp->completed == rcp->cur) {
273 rcp->next_pending = 0;
275 * next_pending == 0 must be visible in
276 * __rcu_process_callbacks() before it can see new value of cur.
278 smp_wmb();
279 rcp->cur++;
282 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
283 * Barrier Otherwise it can cause tickless idle CPUs to be
284 * included in rcp->cpumask, which will extend graceperiods
285 * unnecessarily.
287 smp_mb();
288 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
294 * cpu went through a quiescent state since the beginning of the grace period.
295 * Clear it from the cpu mask and complete the grace period if it was the last
296 * cpu. Start another grace period if someone has further entries pending
298 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
300 cpu_clear(cpu, rcp->cpumask);
301 if (cpus_empty(rcp->cpumask)) {
302 /* batch completed ! */
303 rcp->completed = rcp->cur;
304 rcu_start_batch(rcp);
309 * Check if the cpu has gone through a quiescent state (say context
310 * switch). If so and if it already hasn't done so in this RCU
311 * quiescent cycle, then indicate that it has done so.
313 static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
314 struct rcu_data *rdp)
316 if (rdp->quiescbatch != rcp->cur) {
317 /* start new grace period: */
318 rdp->qs_pending = 1;
319 rdp->passed_quiesc = 0;
320 rdp->quiescbatch = rcp->cur;
321 return;
324 /* Grace period already completed for this cpu?
325 * qs_pending is checked instead of the actual bitmap to avoid
326 * cacheline trashing.
328 if (!rdp->qs_pending)
329 return;
332 * Was there a quiescent state since the beginning of the grace
333 * period? If no, then exit and wait for the next call.
335 if (!rdp->passed_quiesc)
336 return;
337 rdp->qs_pending = 0;
339 spin_lock(&rcp->lock);
341 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
342 * during cpu startup. Ignore the quiescent state.
344 if (likely(rdp->quiescbatch == rcp->cur))
345 cpu_quiet(rdp->cpu, rcp);
347 spin_unlock(&rcp->lock);
351 #ifdef CONFIG_HOTPLUG_CPU
353 /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
354 * locking requirements, the list it's pulling from has to belong to a cpu
355 * which is dead and hence not processing interrupts.
357 static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
358 struct rcu_head **tail)
360 local_irq_disable();
361 *this_rdp->nxttail = list;
362 if (list)
363 this_rdp->nxttail = tail;
364 local_irq_enable();
367 static void __rcu_offline_cpu(struct rcu_data *this_rdp,
368 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
370 /* if the cpu going offline owns the grace period
371 * we can block indefinitely waiting for it, so flush
372 * it here
374 spin_lock_bh(&rcp->lock);
375 if (rcp->cur != rcp->completed)
376 cpu_quiet(rdp->cpu, rcp);
377 spin_unlock_bh(&rcp->lock);
378 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
379 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
380 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
383 static void rcu_offline_cpu(int cpu)
385 struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
386 struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
388 __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
389 &per_cpu(rcu_data, cpu));
390 __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
391 &per_cpu(rcu_bh_data, cpu));
392 put_cpu_var(rcu_data);
393 put_cpu_var(rcu_bh_data);
394 tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu);
397 #else
399 static void rcu_offline_cpu(int cpu)
403 #endif
406 * This does the RCU processing work from tasklet context.
408 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
409 struct rcu_data *rdp)
411 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
412 *rdp->donetail = rdp->curlist;
413 rdp->donetail = rdp->curtail;
414 rdp->curlist = NULL;
415 rdp->curtail = &rdp->curlist;
418 local_irq_disable();
419 if (rdp->nxtlist && !rdp->curlist) {
420 rdp->curlist = rdp->nxtlist;
421 rdp->curtail = rdp->nxttail;
422 rdp->nxtlist = NULL;
423 rdp->nxttail = &rdp->nxtlist;
424 local_irq_enable();
427 * start the next batch of callbacks
430 /* determine batch number */
431 rdp->batch = rcp->cur + 1;
432 /* see the comment and corresponding wmb() in
433 * the rcu_start_batch()
435 smp_rmb();
437 if (!rcp->next_pending) {
438 /* and start it/schedule start if it's a new batch */
439 spin_lock(&rcp->lock);
440 rcp->next_pending = 1;
441 rcu_start_batch(rcp);
442 spin_unlock(&rcp->lock);
444 } else {
445 local_irq_enable();
447 rcu_check_quiescent_state(rcp, rdp);
448 if (rdp->donelist)
449 rcu_do_batch(rdp);
452 static void rcu_process_callbacks(unsigned long unused)
454 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
455 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
458 static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
460 /* This cpu has pending rcu entries and the grace period
461 * for them has completed.
463 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
464 return 1;
466 /* This cpu has no pending entries, but there are new entries */
467 if (!rdp->curlist && rdp->nxtlist)
468 return 1;
470 /* This cpu has finished callbacks to invoke */
471 if (rdp->donelist)
472 return 1;
474 /* The rcu core waits for a quiescent state from the cpu */
475 if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
476 return 1;
478 /* nothing to do */
479 return 0;
482 int rcu_pending(int cpu)
484 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
485 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
488 void rcu_check_callbacks(int cpu, int user)
490 if (user ||
491 (idle_cpu(cpu) && !in_softirq() &&
492 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
493 rcu_qsctr_inc(cpu);
494 rcu_bh_qsctr_inc(cpu);
495 } else if (!in_softirq())
496 rcu_bh_qsctr_inc(cpu);
497 tasklet_schedule(&per_cpu(rcu_tasklet, cpu));
500 static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
501 struct rcu_data *rdp)
503 memset(rdp, 0, sizeof(*rdp));
504 rdp->curtail = &rdp->curlist;
505 rdp->nxttail = &rdp->nxtlist;
506 rdp->donetail = &rdp->donelist;
507 rdp->quiescbatch = rcp->completed;
508 rdp->qs_pending = 0;
509 rdp->cpu = cpu;
510 rdp->blimit = blimit;
513 static void __devinit rcu_online_cpu(int cpu)
515 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
516 struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
518 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
519 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
520 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
523 static int __devinit rcu_cpu_notify(struct notifier_block *self,
524 unsigned long action, void *hcpu)
526 long cpu = (long)hcpu;
527 switch (action) {
528 case CPU_UP_PREPARE:
529 rcu_online_cpu(cpu);
530 break;
531 case CPU_DEAD:
532 rcu_offline_cpu(cpu);
533 break;
534 default:
535 break;
537 return NOTIFY_OK;
540 static struct notifier_block __devinitdata rcu_nb = {
541 .notifier_call = rcu_cpu_notify,
545 * Initializes rcu mechanism. Assumed to be called early.
546 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
547 * Note that rcu_qsctr and friends are implicitly
548 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
550 void __init rcu_init(void)
552 sema_init(&rcu_barrier_sema, 1);
553 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
554 (void *)(long)smp_processor_id());
555 /* Register notifier for non-boot CPUs */
556 register_cpu_notifier(&rcu_nb);
559 struct rcu_synchronize {
560 struct rcu_head head;
561 struct completion completion;
564 /* Because of FASTCALL declaration of complete, we use this wrapper */
565 static void wakeme_after_rcu(struct rcu_head *head)
567 struct rcu_synchronize *rcu;
569 rcu = container_of(head, struct rcu_synchronize, head);
570 complete(&rcu->completion);
574 * synchronize_rcu - wait until a grace period has elapsed.
576 * Control will return to the caller some time after a full grace
577 * period has elapsed, in other words after all currently executing RCU
578 * read-side critical sections have completed. RCU read-side critical
579 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
580 * and may be nested.
582 * If your read-side code is not protected by rcu_read_lock(), do -not-
583 * use synchronize_rcu().
585 void synchronize_rcu(void)
587 struct rcu_synchronize rcu;
589 init_completion(&rcu.completion);
590 /* Will wake me after RCU finished */
591 call_rcu(&rcu.head, wakeme_after_rcu);
593 /* Wait for it */
594 wait_for_completion(&rcu.completion);
598 * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
600 void synchronize_kernel(void)
602 synchronize_rcu();
605 module_param(blimit, int, 0);
606 module_param(qhimark, int, 0);
607 module_param(qlowmark, int, 0);
608 #ifdef CONFIG_SMP
609 module_param(rsinterval, int, 0);
610 #endif
611 EXPORT_SYMBOL_GPL(rcu_batches_completed);
612 EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
613 EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
614 EXPORT_SYMBOL_GPL(synchronize_rcu);
615 EXPORT_SYMBOL(synchronize_kernel); /* WARNING: GPL-only in April 2006. */