DRI CVS merge: r128 driver private function cleanup
[linux-2.6/history.git] / kernel / rcupdate.c
blob113bae2ae4e514d0982d22ff23e771420b846eb7
1 /*
2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (c) IBM Corporation, 2001
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24 * Papers:
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * http://lse.sourceforge.net/locking/rcupdate.html
32 #include <linux/types.h>
33 #include <linux/kernel.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
36 #include <linux/smp.h>
37 #include <linux/interrupt.h>
38 #include <linux/sched.h>
39 #include <asm/atomic.h>
40 #include <asm/bitops.h>
41 #include <linux/module.h>
42 #include <linux/completion.h>
43 #include <linux/percpu.h>
44 #include <linux/notifier.h>
45 #include <linux/rcupdate.h>
46 #include <linux/cpu.h>
48 /* Definition for rcupdate control block. */
49 struct rcu_ctrlblk rcu_ctrlblk =
50 { .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1,
51 .maxbatch = 1, .rcu_cpu_mask = CPU_MASK_NONE };
52 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
54 /* Fake initialization required by compiler */
55 static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
56 #define RCU_tasklet(cpu) (per_cpu(rcu_tasklet, cpu))
58 /**
59 * call_rcu - Queue an RCU update request.
60 * @head: structure to be used for queueing the RCU updates.
61 * @func: actual update function to be invoked after the grace period
62 * @arg: argument to be passed to the update function
64 * The update function will be invoked as soon as all CPUs have performed
65 * a context switch or been seen in the idle loop or in a user process.
66 * The read-side of critical section that use call_rcu() for updation must
67 * be protected by rcu_read_lock()/rcu_read_unlock().
69 void call_rcu(struct rcu_head *head, void (*func)(void *arg), void *arg)
71 int cpu;
72 unsigned long flags;
74 head->func = func;
75 head->arg = arg;
76 local_irq_save(flags);
77 cpu = smp_processor_id();
78 list_add_tail(&head->list, &RCU_nxtlist(cpu));
79 local_irq_restore(flags);
83 * Invoke the completed RCU callbacks. They are expected to be in
84 * a per-cpu list.
86 static void rcu_do_batch(struct list_head *list)
88 struct list_head *entry;
89 struct rcu_head *head;
91 while (!list_empty(list)) {
92 entry = list->next;
93 list_del(entry);
94 head = list_entry(entry, struct rcu_head, list);
95 head->func(head->arg);
100 * Register a new batch of callbacks, and start it up if there is currently no
101 * active batch and the batch to be registered has not already occurred.
102 * Caller must hold the rcu_ctrlblk lock.
104 static void rcu_start_batch(long newbatch)
106 if (rcu_batch_before(rcu_ctrlblk.maxbatch, newbatch)) {
107 rcu_ctrlblk.maxbatch = newbatch;
109 if (rcu_batch_before(rcu_ctrlblk.maxbatch, rcu_ctrlblk.curbatch) ||
110 !cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
111 return;
113 rcu_ctrlblk.rcu_cpu_mask = cpu_online_map;
117 * Check if the cpu has gone through a quiescent state (say context
118 * switch). If so and if it already hasn't done so in this RCU
119 * quiescent cycle, then indicate that it has done so.
121 static void rcu_check_quiescent_state(void)
123 int cpu = smp_processor_id();
125 if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
126 return;
129 * Races with local timer interrupt - in the worst case
130 * we may miss one quiescent state of that CPU. That is
131 * tolerable. So no need to disable interrupts.
133 if (RCU_last_qsctr(cpu) == RCU_QSCTR_INVALID) {
134 RCU_last_qsctr(cpu) = RCU_qsctr(cpu);
135 return;
137 if (RCU_qsctr(cpu) == RCU_last_qsctr(cpu))
138 return;
140 spin_lock(&rcu_ctrlblk.mutex);
141 if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
142 goto out_unlock;
144 cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask);
145 RCU_last_qsctr(cpu) = RCU_QSCTR_INVALID;
146 if (!cpus_empty(rcu_ctrlblk.rcu_cpu_mask))
147 goto out_unlock;
149 rcu_ctrlblk.curbatch++;
150 rcu_start_batch(rcu_ctrlblk.maxbatch);
152 out_unlock:
153 spin_unlock(&rcu_ctrlblk.mutex);
158 * This does the RCU processing work from tasklet context.
160 static void rcu_process_callbacks(unsigned long unused)
162 int cpu = smp_processor_id();
163 LIST_HEAD(list);
165 if (!list_empty(&RCU_curlist(cpu)) &&
166 rcu_batch_after(rcu_ctrlblk.curbatch, RCU_batch(cpu))) {
167 list_splice(&RCU_curlist(cpu), &list);
168 INIT_LIST_HEAD(&RCU_curlist(cpu));
171 local_irq_disable();
172 if (!list_empty(&RCU_nxtlist(cpu)) && list_empty(&RCU_curlist(cpu))) {
173 list_splice(&RCU_nxtlist(cpu), &RCU_curlist(cpu));
174 INIT_LIST_HEAD(&RCU_nxtlist(cpu));
175 local_irq_enable();
178 * start the next batch of callbacks
180 spin_lock(&rcu_ctrlblk.mutex);
181 RCU_batch(cpu) = rcu_ctrlblk.curbatch + 1;
182 rcu_start_batch(RCU_batch(cpu));
183 spin_unlock(&rcu_ctrlblk.mutex);
184 } else {
185 local_irq_enable();
187 rcu_check_quiescent_state();
188 if (!list_empty(&list))
189 rcu_do_batch(&list);
192 void rcu_check_callbacks(int cpu, int user)
194 if (user ||
195 (idle_cpu(cpu) && !in_softirq() &&
196 hardirq_count() <= (1 << HARDIRQ_SHIFT)))
197 RCU_qsctr(cpu)++;
198 tasklet_schedule(&RCU_tasklet(cpu));
201 static void __devinit rcu_online_cpu(int cpu)
203 memset(&per_cpu(rcu_data, cpu), 0, sizeof(struct rcu_data));
204 tasklet_init(&RCU_tasklet(cpu), rcu_process_callbacks, 0UL);
205 INIT_LIST_HEAD(&RCU_nxtlist(cpu));
206 INIT_LIST_HEAD(&RCU_curlist(cpu));
209 static int __devinit rcu_cpu_notify(struct notifier_block *self,
210 unsigned long action, void *hcpu)
212 long cpu = (long)hcpu;
213 switch (action) {
214 case CPU_UP_PREPARE:
215 rcu_online_cpu(cpu);
216 break;
217 /* Space reserved for CPU_OFFLINE :) */
218 default:
219 break;
221 return NOTIFY_OK;
224 static struct notifier_block __devinitdata rcu_nb = {
225 .notifier_call = rcu_cpu_notify,
229 * Initializes rcu mechanism. Assumed to be called early.
230 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
231 * Note that rcu_qsctr and friends are implicitly
232 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
234 void __init rcu_init(void)
236 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
237 (void *)(long)smp_processor_id());
238 /* Register notifier for non-boot CPUs */
239 register_cpu_notifier(&rcu_nb);
243 /* Because of FASTCALL declaration of complete, we use this wrapper */
244 static void wakeme_after_rcu(void *completion)
246 complete(completion);
250 * synchronize-kernel - wait until all the CPUs have gone
251 * through a "quiescent" state. It may sleep.
253 void synchronize_kernel(void)
255 struct rcu_head rcu;
256 DECLARE_COMPLETION(completion);
258 /* Will wake me after RCU finished */
259 call_rcu(&rcu, wakeme_after_rcu, &completion);
261 /* Wait for it */
262 wait_for_completion(&completion);
266 EXPORT_SYMBOL(call_rcu);
267 EXPORT_SYMBOL(synchronize_kernel);