2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2001
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
22 * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 * For detailed explanation of Read-Copy Update mechanism see -
29 * http://lse.sourceforge.net/locking/rcupdate.html
33 #ifndef __LINUX_RCUPDATE_H
34 #define __LINUX_RCUPDATE_H
38 #include <linux/cache.h>
39 #include <linux/spinlock.h>
40 #include <linux/threads.h>
41 #include <linux/percpu.h>
42 #include <linux/cpumask.h>
43 #include <linux/seqlock.h>
46 * struct rcu_head - callback structure for use with RCU
47 * @next: next update requests in a list
48 * @func: actual update function to call after the grace period.
51 struct rcu_head
*next
;
52 void (*func
)(struct rcu_head
*head
);
55 #define RCU_HEAD_INIT { .next = NULL, .func = NULL }
56 #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
57 #define INIT_RCU_HEAD(ptr) do { \
58 (ptr)->next = NULL; (ptr)->func = NULL; \
63 /* Global control variables for rcupdate callback mechanism. */
65 long cur
; /* Current batch number. */
66 long completed
; /* Number of the last completed batch */
67 int next_pending
; /* Is the next batch already waiting? */
69 spinlock_t lock ____cacheline_internodealigned_in_smp
;
70 cpumask_t cpumask
; /* CPUs that need to switch in order */
71 /* for current batch to proceed. */
72 } ____cacheline_internodealigned_in_smp
;
74 /* Is batch a before batch b ? */
75 static inline int rcu_batch_before(long a
, long b
)
80 /* Is batch a after batch b ? */
81 static inline int rcu_batch_after(long a
, long b
)
87 * Per-CPU data for Read-Copy UPdate.
88 * nxtlist - new callbacks are added here
89 * curlist - current batch for which quiescent cycle started if any
92 /* 1) quiescent state handling : */
93 long quiescbatch
; /* Batch # for grace period */
94 int passed_quiesc
; /* User-mode/idle loop etc. */
95 int qs_pending
; /* core waits for quiesc state */
97 /* 2) batch handling */
98 long batch
; /* Batch # for current RCU batch */
99 struct rcu_head
*nxtlist
;
100 struct rcu_head
**nxttail
;
101 long qlen
; /* # of queued callbacks */
102 struct rcu_head
*curlist
;
103 struct rcu_head
**curtail
;
104 struct rcu_head
*donelist
;
105 struct rcu_head
**donetail
;
106 long blimit
; /* Upper limit on a processed batch */
108 struct rcu_head barrier
;
110 long last_rs_qlen
; /* qlen during the last resched */
114 DECLARE_PER_CPU(struct rcu_data
, rcu_data
);
115 DECLARE_PER_CPU(struct rcu_data
, rcu_bh_data
);
118 * Increment the quiescent state counter.
119 * The counter is a bit degenerated: We do not need to know
120 * how many quiescent states passed, just if there was at least
121 * one since the start of the grace period. Thus just a flag.
123 static inline void rcu_qsctr_inc(int cpu
)
125 struct rcu_data
*rdp
= &per_cpu(rcu_data
, cpu
);
126 rdp
->passed_quiesc
= 1;
128 static inline void rcu_bh_qsctr_inc(int cpu
)
130 struct rcu_data
*rdp
= &per_cpu(rcu_bh_data
, cpu
);
131 rdp
->passed_quiesc
= 1;
134 extern int rcu_pending(int cpu
);
137 * rcu_read_lock - mark the beginning of an RCU read-side critical section.
139 * When synchronize_rcu() is invoked on one CPU while other CPUs
140 * are within RCU read-side critical sections, then the
141 * synchronize_rcu() is guaranteed to block until after all the other
142 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
143 * on one CPU while other CPUs are within RCU read-side critical
144 * sections, invocation of the corresponding RCU callback is deferred
145 * until after the all the other CPUs exit their critical sections.
147 * Note, however, that RCU callbacks are permitted to run concurrently
148 * with RCU read-side critical sections. One way that this can happen
149 * is via the following sequence of events: (1) CPU 0 enters an RCU
150 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
151 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
152 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
153 * callback is invoked. This is legal, because the RCU read-side critical
154 * section that was running concurrently with the call_rcu() (and which
155 * therefore might be referencing something that the corresponding RCU
156 * callback would free up) has completed before the corresponding
157 * RCU callback is invoked.
159 * RCU read-side critical sections may be nested. Any deferred actions
160 * will be deferred until the outermost RCU read-side critical section
163 * It is illegal to block while in an RCU read-side critical section.
165 #define rcu_read_lock() preempt_disable()
168 * rcu_read_unlock - marks the end of an RCU read-side critical section.
170 * See rcu_read_lock() for more information.
172 #define rcu_read_unlock() preempt_enable()
175 * So where is rcu_write_lock()? It does not exist, as there is no
176 * way for writers to lock out RCU readers. This is a feature, not
177 * a bug -- this property is what provides RCU's performance benefits.
178 * Of course, writers must coordinate with each other. The normal
179 * spinlock primitives work well for this, but any other technique may be
180 * used as well. RCU does not care how the writers keep out of each
181 * others' way, as long as they do so.
185 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section
187 * This is equivalent of rcu_read_lock(), but to be used when updates
188 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks
189 * consider completion of a softirq handler to be a quiescent state,
190 * a process in RCU read-side critical section must be protected by
191 * disabling softirqs. Read-side critical sections in interrupt context
192 * can use just rcu_read_lock().
195 #define rcu_read_lock_bh() local_bh_disable()
198 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
200 * See rcu_read_lock_bh() for more information.
202 #define rcu_read_unlock_bh() local_bh_enable()
205 * rcu_dereference - fetch an RCU-protected pointer in an
206 * RCU read-side critical section. This pointer may later
207 * be safely dereferenced.
209 * Inserts memory barriers on architectures that require them
210 * (currently only the Alpha), and, more importantly, documents
211 * exactly which pointers are protected by RCU.
214 #define rcu_dereference(p) ({ \
215 typeof(p) _________p1 = p; \
216 smp_read_barrier_depends(); \
221 * rcu_assign_pointer - assign (publicize) a pointer to a newly
222 * initialized structure that will be dereferenced by RCU read-side
223 * critical sections. Returns the value assigned.
225 * Inserts memory barriers on architectures that require them
226 * (pretty much all of them other than x86), and also prevents
227 * the compiler from reordering the code that initializes the
228 * structure after the pointer assignment. More importantly, this
229 * call documents which pointers will be dereferenced by RCU read-side
233 #define rcu_assign_pointer(p, v) ({ \
239 * synchronize_sched - block until all CPUs have exited any non-preemptive
240 * kernel code sequences.
242 * This means that all preempt_disable code sequences, including NMI and
243 * hardware-interrupt handlers, in progress on entry will have completed
244 * before this primitive returns. However, this does not guarantee that
245 * softirq handlers will have completed, since in some kernels, these
246 * handlers can run in process context, and can block.
248 * This primitive provides the guarantees made by the (deprecated)
249 * synchronize_kernel() API. In contrast, synchronize_rcu() only
250 * guarantees that rcu_read_lock() sections will have completed.
251 * In "classic RCU", these two guarantees happen to be one and
252 * the same, but can differ in realtime RCU implementations.
254 #define synchronize_sched() synchronize_rcu()
256 extern void rcu_init(void);
257 extern void rcu_check_callbacks(int cpu
, int user
);
258 extern void rcu_restart_cpu(int cpu
);
259 extern long rcu_batches_completed(void);
261 /* Exported interfaces */
262 extern void FASTCALL(call_rcu(struct rcu_head
*head
,
263 void (*func
)(struct rcu_head
*head
)));
264 extern void FASTCALL(call_rcu_bh(struct rcu_head
*head
,
265 void (*func
)(struct rcu_head
*head
)));
266 extern __deprecated_for_modules
void synchronize_kernel(void);
267 extern void synchronize_rcu(void);
268 void synchronize_idle(void);
269 extern void rcu_barrier(void);
271 #endif /* __KERNEL__ */
272 #endif /* __LINUX_RCUPDATE_H */