ASoC: wm_adsp: Separate concept of booted and running
[linux-2.6/btrfs-unstable.git] / include / linux / percpu-refcount.h
blob1c7eec09e5eba7ae8c0cc8e82172791f992bb361
1 /*
2 * Percpu refcounts:
3 * (C) 2012 Google, Inc.
4 * Author: Kent Overstreet <koverstreet@google.com>
6 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
7 * atomic_dec_and_test() - but percpu.
9 * There's one important difference between percpu refs and normal atomic_t
10 * refcounts; you have to keep track of your initial refcount, and then when you
11 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
12 * refcount.
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
20 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
21 * issuing the appropriate barriers, and then marks the ref as shutting down so
22 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
23 * it's safe to drop the initial ref.
25 * USAGE:
27 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
28 * is created when userspaces calls io_setup(), and destroyed when userspace
29 * calls io_destroy() or the process exits.
31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32 * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
33 * the kioctx from the proccess's list of kioctxs - after that, there can't be
34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35 * the initial ref with percpu_ref_put().
37 * Code that does a two stage shutdown like this often needs some kind of
38 * explicit synchronization to ensure the initial refcount can only be dropped
39 * once - percpu_ref_kill() does this for you, it returns true once and false if
40 * someone else already called it. The aio code uses it this way, but it's not
41 * necessary if the code has some other mechanism to synchronize teardown.
42 * around.
45 #ifndef _LINUX_PERCPU_REFCOUNT_H
46 #define _LINUX_PERCPU_REFCOUNT_H
48 #include <linux/atomic.h>
49 #include <linux/kernel.h>
50 #include <linux/percpu.h>
51 #include <linux/rcupdate.h>
52 #include <linux/gfp.h>
54 struct percpu_ref;
55 typedef void (percpu_ref_func_t)(struct percpu_ref *);
57 /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
58 enum {
59 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
60 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
61 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
63 __PERCPU_REF_FLAG_BITS = 2,
66 /* @flags for percpu_ref_init() */
67 enum {
69 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
70 * operation using percpu_ref_switch_to_percpu(). If initialized
71 * with this flag, the ref will stay in atomic mode until
72 * percpu_ref_switch_to_percpu() is invoked on it.
74 PERCPU_REF_INIT_ATOMIC = 1 << 0,
77 * Start dead w/ ref == 0 in atomic mode. Must be revived with
78 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
80 PERCPU_REF_INIT_DEAD = 1 << 1,
83 struct percpu_ref {
84 atomic_long_t count;
86 * The low bit of the pointer indicates whether the ref is in percpu
87 * mode; if set, then get/put will manipulate the atomic_t.
89 unsigned long percpu_count_ptr;
90 percpu_ref_func_t *release;
91 percpu_ref_func_t *confirm_switch;
92 bool force_atomic:1;
93 struct rcu_head rcu;
96 int __must_check percpu_ref_init(struct percpu_ref *ref,
97 percpu_ref_func_t *release, unsigned int flags,
98 gfp_t gfp);
99 void percpu_ref_exit(struct percpu_ref *ref);
100 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
101 percpu_ref_func_t *confirm_switch);
102 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
103 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
104 percpu_ref_func_t *confirm_kill);
105 void percpu_ref_reinit(struct percpu_ref *ref);
108 * percpu_ref_kill - drop the initial ref
109 * @ref: percpu_ref to kill
111 * Must be used to drop the initial ref on a percpu refcount; must be called
112 * precisely once before shutdown.
114 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
115 * percpu counters and dropping the initial ref.
117 static inline void percpu_ref_kill(struct percpu_ref *ref)
119 percpu_ref_kill_and_confirm(ref, NULL);
123 * Internal helper. Don't use outside percpu-refcount proper. The
124 * function doesn't return the pointer and let the caller test it for NULL
125 * because doing so forces the compiler to generate two conditional
126 * branches as it can't assume that @ref->percpu_count is not NULL.
128 static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 unsigned long __percpu **percpu_countp)
131 unsigned long percpu_ptr;
134 * The value of @ref->percpu_count_ptr is tested for
135 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
136 * used as a pointer. If the compiler generates a separate fetch
137 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
138 * between contaminating the pointer value, meaning that
139 * READ_ONCE() is required when fetching it.
141 percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
143 /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
144 smp_read_barrier_depends();
147 * Theoretically, the following could test just ATOMIC; however,
148 * then we'd have to mask off DEAD separately as DEAD may be
149 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
150 * implies ATOMIC anyway. Test them together.
152 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
153 return false;
155 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
156 return true;
160 * percpu_ref_get_many - increment a percpu refcount
161 * @ref: percpu_ref to get
162 * @nr: number of references to get
164 * Analogous to atomic_long_add().
166 * This function is safe to call as long as @ref is between init and exit.
168 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
170 unsigned long __percpu *percpu_count;
172 rcu_read_lock_sched();
174 if (__ref_is_percpu(ref, &percpu_count))
175 this_cpu_add(*percpu_count, nr);
176 else
177 atomic_long_add(nr, &ref->count);
179 rcu_read_unlock_sched();
183 * percpu_ref_get - increment a percpu refcount
184 * @ref: percpu_ref to get
186 * Analagous to atomic_long_inc().
188 * This function is safe to call as long as @ref is between init and exit.
190 static inline void percpu_ref_get(struct percpu_ref *ref)
192 percpu_ref_get_many(ref, 1);
196 * percpu_ref_tryget - try to increment a percpu refcount
197 * @ref: percpu_ref to try-get
199 * Increment a percpu refcount unless its count already reached zero.
200 * Returns %true on success; %false on failure.
202 * This function is safe to call as long as @ref is between init and exit.
204 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
206 unsigned long __percpu *percpu_count;
207 int ret;
209 rcu_read_lock_sched();
211 if (__ref_is_percpu(ref, &percpu_count)) {
212 this_cpu_inc(*percpu_count);
213 ret = true;
214 } else {
215 ret = atomic_long_inc_not_zero(&ref->count);
218 rcu_read_unlock_sched();
220 return ret;
224 * percpu_ref_tryget_live - try to increment a live percpu refcount
225 * @ref: percpu_ref to try-get
227 * Increment a percpu refcount unless it has already been killed. Returns
228 * %true on success; %false on failure.
230 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
231 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
232 * should be used. After the confirm_kill callback is invoked, it's
233 * guaranteed that no new reference will be given out by
234 * percpu_ref_tryget_live().
236 * This function is safe to call as long as @ref is between init and exit.
238 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
240 unsigned long __percpu *percpu_count;
241 int ret = false;
243 rcu_read_lock_sched();
245 if (__ref_is_percpu(ref, &percpu_count)) {
246 this_cpu_inc(*percpu_count);
247 ret = true;
248 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
249 ret = atomic_long_inc_not_zero(&ref->count);
252 rcu_read_unlock_sched();
254 return ret;
258 * percpu_ref_put_many - decrement a percpu refcount
259 * @ref: percpu_ref to put
260 * @nr: number of references to put
262 * Decrement the refcount, and if 0, call the release function (which was passed
263 * to percpu_ref_init())
265 * This function is safe to call as long as @ref is between init and exit.
267 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
269 unsigned long __percpu *percpu_count;
271 rcu_read_lock_sched();
273 if (__ref_is_percpu(ref, &percpu_count))
274 this_cpu_sub(*percpu_count, nr);
275 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
276 ref->release(ref);
278 rcu_read_unlock_sched();
282 * percpu_ref_put - decrement a percpu refcount
283 * @ref: percpu_ref to put
285 * Decrement the refcount, and if 0, call the release function (which was passed
286 * to percpu_ref_init())
288 * This function is safe to call as long as @ref is between init and exit.
290 static inline void percpu_ref_put(struct percpu_ref *ref)
292 percpu_ref_put_many(ref, 1);
296 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
297 * @ref: percpu_ref to test
299 * Returns %true if @ref is dying or dead.
301 * This function is safe to call as long as @ref is between init and exit
302 * and the caller is responsible for synchronizing against state changes.
304 static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
306 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
310 * percpu_ref_is_zero - test whether a percpu refcount reached zero
311 * @ref: percpu_ref to test
313 * Returns %true if @ref reached zero.
315 * This function is safe to call as long as @ref is between init and exit.
317 static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
319 unsigned long __percpu *percpu_count;
321 if (__ref_is_percpu(ref, &percpu_count))
322 return false;
323 return !atomic_long_read(&ref->count);
326 #endif