staging: gasket: fix check_and_invoke_callback log param
[linux-2.6/btrfs-unstable.git] / include / linux / spinlock.h
blobfd57888d4942e10166440da41d449a52fc8e1730
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SPINLOCK_H
3 #define __LINUX_SPINLOCK_H
5 /*
6 * include/linux/spinlock.h - generic spinlock/rwlock declarations
8 * here's the role of the various spinlock/rwlock related include files:
10 * on SMP builds:
12 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
13 * initializers
15 * linux/spinlock_types.h:
16 * defines the generic type and initializers
18 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
19 * implementations, mostly inline assembly code
21 * (also included on UP-debug builds:)
23 * linux/spinlock_api_smp.h:
24 * contains the prototypes for the _spin_*() APIs.
26 * linux/spinlock.h: builds the final spin_*() APIs.
28 * on UP builds:
30 * linux/spinlock_type_up.h:
31 * contains the generic, simplified UP spinlock type.
32 * (which is an empty structure on non-debug builds)
34 * linux/spinlock_types.h:
35 * defines the generic type and initializers
37 * linux/spinlock_up.h:
38 * contains the arch_spin_*()/etc. version of UP
39 * builds. (which are NOPs on non-debug, non-preempt
40 * builds)
42 * (included on UP-non-debug builds:)
44 * linux/spinlock_api_up.h:
45 * builds the _spin_*() APIs.
47 * linux/spinlock.h: builds the final spin_*() APIs.
50 #include <linux/typecheck.h>
51 #include <linux/preempt.h>
52 #include <linux/linkage.h>
53 #include <linux/compiler.h>
54 #include <linux/irqflags.h>
55 #include <linux/thread_info.h>
56 #include <linux/kernel.h>
57 #include <linux/stringify.h>
58 #include <linux/bottom_half.h>
59 #include <asm/barrier.h>
63 * Must define these before including other files, inline functions need them
65 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
67 #define LOCK_SECTION_START(extra) \
68 ".subsection 1\n\t" \
69 extra \
70 ".ifndef " LOCK_SECTION_NAME "\n\t" \
71 LOCK_SECTION_NAME ":\n\t" \
72 ".endif\n"
74 #define LOCK_SECTION_END \
75 ".previous\n\t"
77 #define __lockfunc __attribute__((section(".spinlock.text")))
80 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
82 #include <linux/spinlock_types.h>
85 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
87 #ifdef CONFIG_SMP
88 # include <asm/spinlock.h>
89 #else
90 # include <linux/spinlock_up.h>
91 #endif
93 #ifdef CONFIG_DEBUG_SPINLOCK
94 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
95 struct lock_class_key *key);
96 # define raw_spin_lock_init(lock) \
97 do { \
98 static struct lock_class_key __key; \
100 __raw_spin_lock_init((lock), #lock, &__key); \
101 } while (0)
103 #else
104 # define raw_spin_lock_init(lock) \
105 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
106 #endif
108 #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
110 #ifdef arch_spin_is_contended
111 #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
112 #else
113 #define raw_spin_is_contended(lock) (((void)(lock), 0))
114 #endif /*arch_spin_is_contended*/
117 * This barrier must provide two things:
119 * - it must guarantee a STORE before the spin_lock() is ordered against a
120 * LOAD after it, see the comments at its two usage sites.
122 * - it must ensure the critical section is RCsc.
124 * The latter is important for cases where we observe values written by other
125 * CPUs in spin-loops, without barriers, while being subject to scheduling.
127 * CPU0 CPU1 CPU2
129 * for (;;) {
130 * if (READ_ONCE(X))
131 * break;
133 * X=1
134 * <sched-out>
135 * <sched-in>
136 * r = X;
138 * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
139 * we get migrated and CPU2 sees X==0.
141 * Since most load-store architectures implement ACQUIRE with an smp_mb() after
142 * the LL/SC loop, they need no further barriers. Similarly all our TSO
143 * architectures imply an smp_mb() for each atomic instruction and equally don't
144 * need more.
146 * Architectures that can implement ACQUIRE better need to take care.
148 #ifndef smp_mb__after_spinlock
149 #define smp_mb__after_spinlock() do { } while (0)
150 #endif
152 #ifdef CONFIG_DEBUG_SPINLOCK
153 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
154 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
155 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
156 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
157 #else
158 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
160 __acquire(lock);
161 arch_spin_lock(&lock->raw_lock);
164 #ifndef arch_spin_lock_flags
165 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
166 #endif
168 static inline void
169 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
171 __acquire(lock);
172 arch_spin_lock_flags(&lock->raw_lock, *flags);
175 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
177 return arch_spin_trylock(&(lock)->raw_lock);
180 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
182 arch_spin_unlock(&lock->raw_lock);
183 __release(lock);
185 #endif
188 * Define the various spin_lock methods. Note we define these
189 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
190 * various methods are defined as nops in the case they are not
191 * required.
193 #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
195 #define raw_spin_lock(lock) _raw_spin_lock(lock)
197 #ifdef CONFIG_DEBUG_LOCK_ALLOC
198 # define raw_spin_lock_nested(lock, subclass) \
199 _raw_spin_lock_nested(lock, subclass)
201 # define raw_spin_lock_nest_lock(lock, nest_lock) \
202 do { \
203 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
204 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
205 } while (0)
206 #else
208 * Always evaluate the 'subclass' argument to avoid that the compiler
209 * warns about set-but-not-used variables when building with
210 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
212 # define raw_spin_lock_nested(lock, subclass) \
213 _raw_spin_lock(((void)(subclass), (lock)))
214 # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
215 #endif
217 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
219 #define raw_spin_lock_irqsave(lock, flags) \
220 do { \
221 typecheck(unsigned long, flags); \
222 flags = _raw_spin_lock_irqsave(lock); \
223 } while (0)
225 #ifdef CONFIG_DEBUG_LOCK_ALLOC
226 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
227 do { \
228 typecheck(unsigned long, flags); \
229 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
230 } while (0)
231 #else
232 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
233 do { \
234 typecheck(unsigned long, flags); \
235 flags = _raw_spin_lock_irqsave(lock); \
236 } while (0)
237 #endif
239 #else
241 #define raw_spin_lock_irqsave(lock, flags) \
242 do { \
243 typecheck(unsigned long, flags); \
244 _raw_spin_lock_irqsave(lock, flags); \
245 } while (0)
247 #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
248 raw_spin_lock_irqsave(lock, flags)
250 #endif
252 #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
253 #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
254 #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
255 #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
257 #define raw_spin_unlock_irqrestore(lock, flags) \
258 do { \
259 typecheck(unsigned long, flags); \
260 _raw_spin_unlock_irqrestore(lock, flags); \
261 } while (0)
262 #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
264 #define raw_spin_trylock_bh(lock) \
265 __cond_lock(lock, _raw_spin_trylock_bh(lock))
267 #define raw_spin_trylock_irq(lock) \
268 ({ \
269 local_irq_disable(); \
270 raw_spin_trylock(lock) ? \
271 1 : ({ local_irq_enable(); 0; }); \
274 #define raw_spin_trylock_irqsave(lock, flags) \
275 ({ \
276 local_irq_save(flags); \
277 raw_spin_trylock(lock) ? \
278 1 : ({ local_irq_restore(flags); 0; }); \
281 /* Include rwlock functions */
282 #include <linux/rwlock.h>
285 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
287 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
288 # include <linux/spinlock_api_smp.h>
289 #else
290 # include <linux/spinlock_api_up.h>
291 #endif
294 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
297 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
299 return &lock->rlock;
302 #define spin_lock_init(_lock) \
303 do { \
304 spinlock_check(_lock); \
305 raw_spin_lock_init(&(_lock)->rlock); \
306 } while (0)
308 static __always_inline void spin_lock(spinlock_t *lock)
310 raw_spin_lock(&lock->rlock);
313 static __always_inline void spin_lock_bh(spinlock_t *lock)
315 raw_spin_lock_bh(&lock->rlock);
318 static __always_inline int spin_trylock(spinlock_t *lock)
320 return raw_spin_trylock(&lock->rlock);
323 #define spin_lock_nested(lock, subclass) \
324 do { \
325 raw_spin_lock_nested(spinlock_check(lock), subclass); \
326 } while (0)
328 #define spin_lock_nest_lock(lock, nest_lock) \
329 do { \
330 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
331 } while (0)
333 static __always_inline void spin_lock_irq(spinlock_t *lock)
335 raw_spin_lock_irq(&lock->rlock);
338 #define spin_lock_irqsave(lock, flags) \
339 do { \
340 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
341 } while (0)
343 #define spin_lock_irqsave_nested(lock, flags, subclass) \
344 do { \
345 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
346 } while (0)
348 static __always_inline void spin_unlock(spinlock_t *lock)
350 raw_spin_unlock(&lock->rlock);
353 static __always_inline void spin_unlock_bh(spinlock_t *lock)
355 raw_spin_unlock_bh(&lock->rlock);
358 static __always_inline void spin_unlock_irq(spinlock_t *lock)
360 raw_spin_unlock_irq(&lock->rlock);
363 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
365 raw_spin_unlock_irqrestore(&lock->rlock, flags);
368 static __always_inline int spin_trylock_bh(spinlock_t *lock)
370 return raw_spin_trylock_bh(&lock->rlock);
373 static __always_inline int spin_trylock_irq(spinlock_t *lock)
375 return raw_spin_trylock_irq(&lock->rlock);
378 #define spin_trylock_irqsave(lock, flags) \
379 ({ \
380 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
384 * spin_is_locked() - Check whether a spinlock is locked.
385 * @lock: Pointer to the spinlock.
387 * This function is NOT required to provide any memory ordering
388 * guarantees; it could be used for debugging purposes or, when
389 * additional synchronization is needed, accompanied with other
390 * constructs (memory barriers) enforcing the synchronization.
392 * Returns: 1 if @lock is locked, 0 otherwise.
394 * Note that the function only tells you that the spinlock is
395 * seen to be locked, not that it is locked on your CPU.
397 * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
398 * the return value is always 0 (see include/linux/spinlock_up.h).
399 * Therefore you should not rely heavily on the return value.
401 static __always_inline int spin_is_locked(spinlock_t *lock)
403 return raw_spin_is_locked(&lock->rlock);
406 static __always_inline int spin_is_contended(spinlock_t *lock)
408 return raw_spin_is_contended(&lock->rlock);
411 #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
414 * Pull the atomic_t declaration:
415 * (asm-mips/atomic.h needs above definitions)
417 #include <linux/atomic.h>
419 * atomic_dec_and_lock - lock on reaching reference count zero
420 * @atomic: the atomic counter
421 * @lock: the spinlock in question
423 * Decrements @atomic by 1. If the result is 0, returns true and locks
424 * @lock. Returns false for all other cases.
426 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
427 #define atomic_dec_and_lock(atomic, lock) \
428 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
430 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
431 unsigned long *flags);
432 #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
433 __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
435 int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
436 size_t max_size, unsigned int cpu_mult,
437 gfp_t gfp);
439 void free_bucket_spinlocks(spinlock_t *locks);
441 #endif /* __LINUX_SPINLOCK_H */