2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2001
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * http://lse.sourceforge.net/locking/rcupdate.html
33 #ifndef __LINUX_RCUPDATE_H
34 #define __LINUX_RCUPDATE_H
38 #include <linux/cache.h>
39 #include <linux/spinlock.h>
40 #include <linux/threads.h>
41 #include <linux/percpu.h>
42 #include <linux/cpumask.h>
43 #include <linux/seqlock.h>
44 #include <linux/lockdep.h>
47 * struct rcu_head - callback structure for use with RCU
48 * @next: next update requests in a list
49 * @func: actual update function to call after the grace period.
52 struct rcu_head
*next
;
53 void (*func
)(struct rcu_head
*head
);
56 #define RCU_HEAD_INIT { .next = NULL, .func = NULL }
57 #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
58 #define INIT_RCU_HEAD(ptr) do { \
59 (ptr)->next = NULL; (ptr)->func = NULL; \
64 /* Global control variables for rcupdate callback mechanism. */
66 long cur
; /* Current batch number. */
67 long completed
; /* Number of the last completed batch */
68 int next_pending
; /* Is the next batch already waiting? */
72 spinlock_t lock ____cacheline_internodealigned_in_smp
;
73 cpumask_t cpumask
; /* CPUs that need to switch in order */
74 /* for current batch to proceed. */
75 } ____cacheline_internodealigned_in_smp
;
77 /* Is batch a before batch b ? */
78 static inline int rcu_batch_before(long a
, long b
)
83 /* Is batch a after batch b ? */
84 static inline int rcu_batch_after(long a
, long b
)
90 * Per-CPU data for Read-Copy UPdate.
91 * nxtlist - new callbacks are added here
92 * curlist - current batch for which quiescent cycle started if any
95 /* 1) quiescent state handling : */
96 long quiescbatch
; /* Batch # for grace period */
97 int passed_quiesc
; /* User-mode/idle loop etc. */
98 int qs_pending
; /* core waits for quiesc state */
100 /* 2) batch handling */
101 long batch
; /* Batch # for current RCU batch */
102 struct rcu_head
*nxtlist
;
103 struct rcu_head
**nxttail
;
104 long qlen
; /* # of queued callbacks */
105 struct rcu_head
*curlist
;
106 struct rcu_head
**curtail
;
107 struct rcu_head
*donelist
;
108 struct rcu_head
**donetail
;
109 long blimit
; /* Upper limit on a processed batch */
111 struct rcu_head barrier
;
114 DECLARE_PER_CPU(struct rcu_data
, rcu_data
);
115 DECLARE_PER_CPU(struct rcu_data
, rcu_bh_data
);
118 * Increment the quiescent state counter.
119 * The counter is a bit degenerated: We do not need to know
120 * how many quiescent states passed, just if there was at least
121 * one since the start of the grace period. Thus just a flag.
123 static inline void rcu_qsctr_inc(int cpu
)
125 struct rcu_data
*rdp
= &per_cpu(rcu_data
, cpu
);
126 rdp
->passed_quiesc
= 1;
128 static inline void rcu_bh_qsctr_inc(int cpu
)
130 struct rcu_data
*rdp
= &per_cpu(rcu_bh_data
, cpu
);
131 rdp
->passed_quiesc
= 1;
134 extern int rcu_pending(int cpu
);
135 extern int rcu_needs_cpu(int cpu
);
137 #ifdef CONFIG_DEBUG_LOCK_ALLOC
138 extern struct lockdep_map rcu_lock_map
;
139 # define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
140 # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
142 # define rcu_read_acquire() do { } while (0)
143 # define rcu_read_release() do { } while (0)
147 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
149 * When synchronize_rcu() is invoked on one CPU while other CPUs
150 * are within RCU read-side critical sections, then the
151 * synchronize_rcu() is guaranteed to block until after all the other
152 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
153 * on one CPU while other CPUs are within RCU read-side critical
154 * sections, invocation of the corresponding RCU callback is deferred
155 * until after the all the other CPUs exit their critical sections.
157 * Note, however, that RCU callbacks are permitted to run concurrently
158 * with RCU read-side critical sections. One way that this can happen
159 * is via the following sequence of events: (1) CPU 0 enters an RCU
160 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
161 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
162 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
163 * callback is invoked. This is legal, because the RCU read-side critical
164 * section that was running concurrently with the call_rcu() (and which
165 * therefore might be referencing something that the corresponding RCU
166 * callback would free up) has completed before the corresponding
167 * RCU callback is invoked.
169 * RCU read-side critical sections may be nested. Any deferred actions
170 * will be deferred until the outermost RCU read-side critical section
173 * It is illegal to block while in an RCU read-side critical section.
175 #define rcu_read_lock() \
179 rcu_read_acquire(); \
183 * rcu_read_unlock - marks the end of an RCU read-side critical section.
185 * See rcu_read_lock() for more information.
187 #define rcu_read_unlock() \
189 rcu_read_release(); \
195 * So where is rcu_write_lock()? It does not exist, as there is no
196 * way for writers to lock out RCU readers. This is a feature, not
197 * a bug -- this property is what provides RCU's performance benefits.
198 * Of course, writers must coordinate with each other. The normal
199 * spinlock primitives work well for this, but any other technique may be
200 * used as well. RCU does not care how the writers keep out of each
201 * others' way, as long as they do so.
205 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
207 * This is equivalent of rcu_read_lock(), but to be used when updates
208 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
209 * consider completion of a softirq handler to be a quiescent state,
210 * a process in RCU read-side critical section must be protected by
211 * disabling softirqs. Read-side critical sections in interrupt context
212 * can use just rcu_read_lock().
215 #define rcu_read_lock_bh() \
217 local_bh_disable(); \
219 rcu_read_acquire(); \
223 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
225 * See rcu_read_lock_bh() for more information.
227 #define rcu_read_unlock_bh() \
229 rcu_read_release(); \
235 * rcu_dereference - fetch an RCU-protected pointer in an
236 * RCU read-side critical section. This pointer may later
237 * be safely dereferenced.
239 * Inserts memory barriers on architectures that require them
240 * (currently only the Alpha), and, more importantly, documents
241 * exactly which pointers are protected by RCU.
244 #define rcu_dereference(p) ({ \
245 typeof(p) _________p1 = p; \
246 smp_read_barrier_depends(); \
251 * rcu_assign_pointer - assign (publicize) a pointer to a newly
252 * initialized structure that will be dereferenced by RCU read-side
253 * critical sections. Returns the value assigned.
255 * Inserts memory barriers on architectures that require them
256 * (pretty much all of them other than x86), and also prevents
257 * the compiler from reordering the code that initializes the
258 * structure after the pointer assignment. More importantly, this
259 * call documents which pointers will be dereferenced by RCU read-side
263 #define rcu_assign_pointer(p, v) ({ \
269 * synchronize_sched - block until all CPUs have exited any non-preemptive
270 * kernel code sequences.
272 * This means that all preempt_disable code sequences, including NMI and
273 * hardware-interrupt handlers, in progress on entry will have completed
274 * before this primitive returns. However, this does not guarantee that
275 * softirq handlers will have completed, since in some kernels, these
276 * handlers can run in process context, and can block.
278 * This primitive provides the guarantees made by the (now removed)
279 * synchronize_kernel() API. In contrast, synchronize_rcu() only
280 * guarantees that rcu_read_lock() sections will have completed.
281 * In "classic RCU", these two guarantees happen to be one and
282 * the same, but can differ in realtime RCU implementations.
284 #define synchronize_sched() synchronize_rcu()
286 extern void rcu_init(void);
287 extern void rcu_check_callbacks(int cpu
, int user
);
288 extern void rcu_restart_cpu(int cpu
);
289 extern long rcu_batches_completed(void);
290 extern long rcu_batches_completed_bh(void);
292 /* Exported interfaces */
293 extern void FASTCALL(call_rcu(struct rcu_head
*head
,
294 void (*func
)(struct rcu_head
*head
)));
295 extern void FASTCALL(call_rcu_bh(struct rcu_head
*head
,
296 void (*func
)(struct rcu_head
*head
)));
297 extern void synchronize_rcu(void);
298 extern void rcu_barrier(void);
300 #endif /* __KERNEL__ */
301 #endif /* __LINUX_RCUPDATE_H */