2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/rcupdate.h>
39 #include <linux/interrupt.h>
40 #include <linux/sched.h>
41 #include <asm/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/module.h>
44 #include <linux/completion.h>
45 #include <linux/moduleparam.h>
46 #include <linux/percpu.h>
47 #include <linux/notifier.h>
48 #include <linux/rcupdate.h>
49 #include <linux/cpu.h>
50 #include <linux/mutex.h>
52 /* Definition for rcupdate control block. */
53 static struct rcu_ctrlblk rcu_ctrlblk
= {
56 .lock
= __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk
.lock
),
57 .cpumask
= CPU_MASK_NONE
,
59 static struct rcu_ctrlblk rcu_bh_ctrlblk
= {
62 .lock
= __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk
.lock
),
63 .cpumask
= CPU_MASK_NONE
,
66 DEFINE_PER_CPU(struct rcu_data
, rcu_data
) = { 0L };
67 DEFINE_PER_CPU(struct rcu_data
, rcu_bh_data
) = { 0L };
69 /* Fake initialization required by compiler */
70 static DEFINE_PER_CPU(struct tasklet_struct
, rcu_tasklet
) = {NULL
};
71 static int blimit
= 10;
72 static int qhimark
= 10000;
73 static int qlowmark
= 100;
75 static atomic_t rcu_barrier_cpu_count
;
76 static DEFINE_MUTEX(rcu_barrier_mutex
);
77 static struct completion rcu_barrier_completion
;
80 static void force_quiescent_state(struct rcu_data
*rdp
,
81 struct rcu_ctrlblk
*rcp
)
86 if (unlikely(!rcp
->signaled
)) {
89 * Don't send IPI to itself. With irqs disabled,
90 * rdp->cpu is the current cpu.
92 cpumask
= rcp
->cpumask
;
93 cpu_clear(rdp
->cpu
, cpumask
);
94 for_each_cpu_mask(cpu
, cpumask
)
95 smp_send_reschedule(cpu
);
99 static inline void force_quiescent_state(struct rcu_data
*rdp
,
100 struct rcu_ctrlblk
*rcp
)
107 * call_rcu - Queue an RCU callback for invocation after a grace period.
108 * @head: structure to be used for queueing the RCU updates.
109 * @func: actual update function to be invoked after the grace period
111 * The update function will be invoked some time after a full grace
112 * period elapses, in other words after all currently executing RCU
113 * read-side critical sections have completed. RCU read-side critical
114 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
117 void fastcall
call_rcu(struct rcu_head
*head
,
118 void (*func
)(struct rcu_head
*rcu
))
121 struct rcu_data
*rdp
;
125 local_irq_save(flags
);
126 rdp
= &__get_cpu_var(rcu_data
);
127 *rdp
->nxttail
= head
;
128 rdp
->nxttail
= &head
->next
;
129 if (unlikely(++rdp
->qlen
> qhimark
)) {
130 rdp
->blimit
= INT_MAX
;
131 force_quiescent_state(rdp
, &rcu_ctrlblk
);
133 local_irq_restore(flags
);
137 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
138 * @head: structure to be used for queueing the RCU updates.
139 * @func: actual update function to be invoked after the grace period
141 * The update function will be invoked some time after a full grace
142 * period elapses, in other words after all currently executing RCU
143 * read-side critical sections have completed. call_rcu_bh() assumes
144 * that the read-side critical sections end on completion of a softirq
145 * handler. This means that read-side critical sections in process
146 * context must not be interrupted by softirqs. This interface is to be
147 * used when most of the read-side critical sections are in softirq context.
148 * RCU read-side critical sections are delimited by rcu_read_lock() and
149 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
150 * and rcu_read_unlock_bh(), if in process context. These may be nested.
152 void fastcall
call_rcu_bh(struct rcu_head
*head
,
153 void (*func
)(struct rcu_head
*rcu
))
156 struct rcu_data
*rdp
;
160 local_irq_save(flags
);
161 rdp
= &__get_cpu_var(rcu_bh_data
);
162 *rdp
->nxttail
= head
;
163 rdp
->nxttail
= &head
->next
;
165 if (unlikely(++rdp
->qlen
> qhimark
)) {
166 rdp
->blimit
= INT_MAX
;
167 force_quiescent_state(rdp
, &rcu_bh_ctrlblk
);
170 local_irq_restore(flags
);
174 * Return the number of RCU batches processed thus far. Useful
175 * for debug and statistics.
177 long rcu_batches_completed(void)
179 return rcu_ctrlblk
.completed
;
183 * Return the number of RCU batches processed thus far. Useful
184 * for debug and statistics.
186 long rcu_batches_completed_bh(void)
188 return rcu_bh_ctrlblk
.completed
;
191 static void rcu_barrier_callback(struct rcu_head
*notused
)
193 if (atomic_dec_and_test(&rcu_barrier_cpu_count
))
194 complete(&rcu_barrier_completion
);
198 * Called with preemption disabled, and from cross-cpu IRQ context.
200 static void rcu_barrier_func(void *notused
)
202 int cpu
= smp_processor_id();
203 struct rcu_data
*rdp
= &per_cpu(rcu_data
, cpu
);
204 struct rcu_head
*head
;
206 head
= &rdp
->barrier
;
207 atomic_inc(&rcu_barrier_cpu_count
);
208 call_rcu(head
, rcu_barrier_callback
);
212 * rcu_barrier - Wait until all the in-flight RCUs are complete.
214 void rcu_barrier(void)
216 BUG_ON(in_interrupt());
217 /* Take cpucontrol mutex to protect against CPU hotplug */
218 mutex_lock(&rcu_barrier_mutex
);
219 init_completion(&rcu_barrier_completion
);
220 atomic_set(&rcu_barrier_cpu_count
, 0);
221 on_each_cpu(rcu_barrier_func
, NULL
, 0, 1);
222 wait_for_completion(&rcu_barrier_completion
);
223 mutex_unlock(&rcu_barrier_mutex
);
225 EXPORT_SYMBOL_GPL(rcu_barrier
);
228 * Invoke the completed RCU callbacks. They are expected to be in
231 static void rcu_do_batch(struct rcu_data
*rdp
)
233 struct rcu_head
*next
, *list
;
236 list
= rdp
->donelist
;
242 if (++count
>= rdp
->blimit
)
245 rdp
->donelist
= list
;
250 if (rdp
->blimit
== INT_MAX
&& rdp
->qlen
<= qlowmark
)
251 rdp
->blimit
= blimit
;
254 rdp
->donetail
= &rdp
->donelist
;
256 tasklet_schedule(&per_cpu(rcu_tasklet
, rdp
->cpu
));
260 * Grace period handling:
261 * The grace period handling consists out of two steps:
262 * - A new grace period is started.
263 * This is done by rcu_start_batch. The start is not broadcasted to
264 * all cpus, they must pick this up by comparing rcp->cur with
265 * rdp->quiescbatch. All cpus are recorded in the
266 * rcu_ctrlblk.cpumask bitmap.
267 * - All cpus must go through a quiescent state.
268 * Since the start of the grace period is not broadcasted, at least two
269 * calls to rcu_check_quiescent_state are required:
270 * The first call just notices that a new grace period is running. The
271 * following calls check if there was a quiescent state since the beginning
272 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
273 * the bitmap is empty, then the grace period is completed.
274 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
275 * period (if necessary).
278 * Register a new batch of callbacks, and start it up if there is currently no
279 * active batch and the batch to be registered has not already occurred.
280 * Caller must hold rcu_ctrlblk.lock.
282 static void rcu_start_batch(struct rcu_ctrlblk
*rcp
)
284 if (rcp
->next_pending
&&
285 rcp
->completed
== rcp
->cur
) {
286 rcp
->next_pending
= 0;
288 * next_pending == 0 must be visible in
289 * __rcu_process_callbacks() before it can see new value of cur.
295 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
296 * Barrier Otherwise it can cause tickless idle CPUs to be
297 * included in rcp->cpumask, which will extend graceperiods
301 cpus_andnot(rcp
->cpumask
, cpu_online_map
, nohz_cpu_mask
);
308 * cpu went through a quiescent state since the beginning of the grace period.
309 * Clear it from the cpu mask and complete the grace period if it was the last
310 * cpu. Start another grace period if someone has further entries pending
312 static void cpu_quiet(int cpu
, struct rcu_ctrlblk
*rcp
)
314 cpu_clear(cpu
, rcp
->cpumask
);
315 if (cpus_empty(rcp
->cpumask
)) {
316 /* batch completed ! */
317 rcp
->completed
= rcp
->cur
;
318 rcu_start_batch(rcp
);
323 * Check if the cpu has gone through a quiescent state (say context
324 * switch). If so and if it already hasn't done so in this RCU
325 * quiescent cycle, then indicate that it has done so.
327 static void rcu_check_quiescent_state(struct rcu_ctrlblk
*rcp
,
328 struct rcu_data
*rdp
)
330 if (rdp
->quiescbatch
!= rcp
->cur
) {
331 /* start new grace period: */
333 rdp
->passed_quiesc
= 0;
334 rdp
->quiescbatch
= rcp
->cur
;
338 /* Grace period already completed for this cpu?
339 * qs_pending is checked instead of the actual bitmap to avoid
340 * cacheline trashing.
342 if (!rdp
->qs_pending
)
346 * Was there a quiescent state since the beginning of the grace
347 * period? If no, then exit and wait for the next call.
349 if (!rdp
->passed_quiesc
)
353 spin_lock(&rcp
->lock
);
355 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
356 * during cpu startup. Ignore the quiescent state.
358 if (likely(rdp
->quiescbatch
== rcp
->cur
))
359 cpu_quiet(rdp
->cpu
, rcp
);
361 spin_unlock(&rcp
->lock
);
365 #ifdef CONFIG_HOTPLUG_CPU
367 /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
368 * locking requirements, the list it's pulling from has to belong to a cpu
369 * which is dead and hence not processing interrupts.
371 static void rcu_move_batch(struct rcu_data
*this_rdp
, struct rcu_head
*list
,
372 struct rcu_head
**tail
)
375 *this_rdp
->nxttail
= list
;
377 this_rdp
->nxttail
= tail
;
381 static void __rcu_offline_cpu(struct rcu_data
*this_rdp
,
382 struct rcu_ctrlblk
*rcp
, struct rcu_data
*rdp
)
384 /* if the cpu going offline owns the grace period
385 * we can block indefinitely waiting for it, so flush
388 spin_lock_bh(&rcp
->lock
);
389 if (rcp
->cur
!= rcp
->completed
)
390 cpu_quiet(rdp
->cpu
, rcp
);
391 spin_unlock_bh(&rcp
->lock
);
392 rcu_move_batch(this_rdp
, rdp
->curlist
, rdp
->curtail
);
393 rcu_move_batch(this_rdp
, rdp
->nxtlist
, rdp
->nxttail
);
394 rcu_move_batch(this_rdp
, rdp
->donelist
, rdp
->donetail
);
397 static void rcu_offline_cpu(int cpu
)
399 struct rcu_data
*this_rdp
= &get_cpu_var(rcu_data
);
400 struct rcu_data
*this_bh_rdp
= &get_cpu_var(rcu_bh_data
);
402 __rcu_offline_cpu(this_rdp
, &rcu_ctrlblk
,
403 &per_cpu(rcu_data
, cpu
));
404 __rcu_offline_cpu(this_bh_rdp
, &rcu_bh_ctrlblk
,
405 &per_cpu(rcu_bh_data
, cpu
));
406 put_cpu_var(rcu_data
);
407 put_cpu_var(rcu_bh_data
);
408 tasklet_kill_immediate(&per_cpu(rcu_tasklet
, cpu
), cpu
);
413 static void rcu_offline_cpu(int cpu
)
420 * This does the RCU processing work from tasklet context.
422 static void __rcu_process_callbacks(struct rcu_ctrlblk
*rcp
,
423 struct rcu_data
*rdp
)
425 if (rdp
->curlist
&& !rcu_batch_before(rcp
->completed
, rdp
->batch
)) {
426 *rdp
->donetail
= rdp
->curlist
;
427 rdp
->donetail
= rdp
->curtail
;
429 rdp
->curtail
= &rdp
->curlist
;
432 if (rdp
->nxtlist
&& !rdp
->curlist
) {
434 rdp
->curlist
= rdp
->nxtlist
;
435 rdp
->curtail
= rdp
->nxttail
;
437 rdp
->nxttail
= &rdp
->nxtlist
;
441 * start the next batch of callbacks
444 /* determine batch number */
445 rdp
->batch
= rcp
->cur
+ 1;
446 /* see the comment and corresponding wmb() in
447 * the rcu_start_batch()
451 if (!rcp
->next_pending
) {
452 /* and start it/schedule start if it's a new batch */
453 spin_lock(&rcp
->lock
);
454 rcp
->next_pending
= 1;
455 rcu_start_batch(rcp
);
456 spin_unlock(&rcp
->lock
);
460 rcu_check_quiescent_state(rcp
, rdp
);
465 static void rcu_process_callbacks(unsigned long unused
)
467 __rcu_process_callbacks(&rcu_ctrlblk
, &__get_cpu_var(rcu_data
));
468 __rcu_process_callbacks(&rcu_bh_ctrlblk
, &__get_cpu_var(rcu_bh_data
));
471 static int __rcu_pending(struct rcu_ctrlblk
*rcp
, struct rcu_data
*rdp
)
473 /* This cpu has pending rcu entries and the grace period
474 * for them has completed.
476 if (rdp
->curlist
&& !rcu_batch_before(rcp
->completed
, rdp
->batch
))
479 /* This cpu has no pending entries, but there are new entries */
480 if (!rdp
->curlist
&& rdp
->nxtlist
)
483 /* This cpu has finished callbacks to invoke */
487 /* The rcu core waits for a quiescent state from the cpu */
488 if (rdp
->quiescbatch
!= rcp
->cur
|| rdp
->qs_pending
)
496 * Check to see if there is any immediate RCU-related work to be done
497 * by the current CPU, returning 1 if so. This function is part of the
498 * RCU implementation; it is -not- an exported member of the RCU API.
500 int rcu_pending(int cpu
)
502 return __rcu_pending(&rcu_ctrlblk
, &per_cpu(rcu_data
, cpu
)) ||
503 __rcu_pending(&rcu_bh_ctrlblk
, &per_cpu(rcu_bh_data
, cpu
));
507 * Check to see if any future RCU-related work will need to be done
508 * by the current CPU, even if none need be done immediately, returning
509 * 1 if so. This function is part of the RCU implementation; it is -not-
510 * an exported member of the RCU API.
512 int rcu_needs_cpu(int cpu
)
514 struct rcu_data
*rdp
= &per_cpu(rcu_data
, cpu
);
515 struct rcu_data
*rdp_bh
= &per_cpu(rcu_bh_data
, cpu
);
517 return (!!rdp
->curlist
|| !!rdp_bh
->curlist
|| rcu_pending(cpu
));
520 void rcu_check_callbacks(int cpu
, int user
)
523 (idle_cpu(cpu
) && !in_softirq() &&
524 hardirq_count() <= (1 << HARDIRQ_SHIFT
))) {
526 rcu_bh_qsctr_inc(cpu
);
527 } else if (!in_softirq())
528 rcu_bh_qsctr_inc(cpu
);
529 tasklet_schedule(&per_cpu(rcu_tasklet
, cpu
));
532 static void rcu_init_percpu_data(int cpu
, struct rcu_ctrlblk
*rcp
,
533 struct rcu_data
*rdp
)
535 memset(rdp
, 0, sizeof(*rdp
));
536 rdp
->curtail
= &rdp
->curlist
;
537 rdp
->nxttail
= &rdp
->nxtlist
;
538 rdp
->donetail
= &rdp
->donelist
;
539 rdp
->quiescbatch
= rcp
->completed
;
542 rdp
->blimit
= blimit
;
545 static void __devinit
rcu_online_cpu(int cpu
)
547 struct rcu_data
*rdp
= &per_cpu(rcu_data
, cpu
);
548 struct rcu_data
*bh_rdp
= &per_cpu(rcu_bh_data
, cpu
);
550 rcu_init_percpu_data(cpu
, &rcu_ctrlblk
, rdp
);
551 rcu_init_percpu_data(cpu
, &rcu_bh_ctrlblk
, bh_rdp
);
552 tasklet_init(&per_cpu(rcu_tasklet
, cpu
), rcu_process_callbacks
, 0UL);
555 static int __cpuinit
rcu_cpu_notify(struct notifier_block
*self
,
556 unsigned long action
, void *hcpu
)
558 long cpu
= (long)hcpu
;
564 rcu_offline_cpu(cpu
);
572 static struct notifier_block __cpuinitdata rcu_nb
= {
573 .notifier_call
= rcu_cpu_notify
,
577 * Initializes rcu mechanism. Assumed to be called early.
578 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
579 * Note that rcu_qsctr and friends are implicitly
580 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
582 void __init
rcu_init(void)
584 rcu_cpu_notify(&rcu_nb
, CPU_UP_PREPARE
,
585 (void *)(long)smp_processor_id());
586 /* Register notifier for non-boot CPUs */
587 register_cpu_notifier(&rcu_nb
);
590 struct rcu_synchronize
{
591 struct rcu_head head
;
592 struct completion completion
;
595 /* Because of FASTCALL declaration of complete, we use this wrapper */
596 static void wakeme_after_rcu(struct rcu_head
*head
)
598 struct rcu_synchronize
*rcu
;
600 rcu
= container_of(head
, struct rcu_synchronize
, head
);
601 complete(&rcu
->completion
);
605 * synchronize_rcu - wait until a grace period has elapsed.
607 * Control will return to the caller some time after a full grace
608 * period has elapsed, in other words after all currently executing RCU
609 * read-side critical sections have completed. RCU read-side critical
610 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
613 * If your read-side code is not protected by rcu_read_lock(), do -not-
614 * use synchronize_rcu().
616 void synchronize_rcu(void)
618 struct rcu_synchronize rcu
;
620 init_completion(&rcu
.completion
);
621 /* Will wake me after RCU finished */
622 call_rcu(&rcu
.head
, wakeme_after_rcu
);
625 wait_for_completion(&rcu
.completion
);
628 module_param(blimit
, int, 0);
629 module_param(qhimark
, int, 0);
630 module_param(qlowmark
, int, 0);
631 EXPORT_SYMBOL_GPL(rcu_batches_completed
);
632 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh
);
633 EXPORT_SYMBOL_GPL(call_rcu
);
634 EXPORT_SYMBOL_GPL(call_rcu_bh
);
635 EXPORT_SYMBOL_GPL(synchronize_rcu
);