1 #ifndef __LINUX_SPINLOCK_API_SMP_H
2 #define __LINUX_SPINLOCK_API_SMP_H
4 #ifndef __LINUX_SPINLOCK_H
5 # error "please don't include this file directly"
9 * include/linux/spinlock_api_smp.h
11 * spinlock API declarations on SMP (and debug)
12 * (implemented in kernel/spinlock.c)
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
18 int in_lock_functions(unsigned long addr
);
20 #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
22 void __lockfunc
_spin_lock(spinlock_t
*lock
) __acquires(lock
);
23 void __lockfunc
_spin_lock_nested(spinlock_t
*lock
, int subclass
)
25 void __lockfunc
_spin_lock_nest_lock(spinlock_t
*lock
, struct lockdep_map
*map
)
27 void __lockfunc
_read_lock(rwlock_t
*lock
) __acquires(lock
);
28 void __lockfunc
_write_lock(rwlock_t
*lock
) __acquires(lock
);
29 void __lockfunc
_spin_lock_bh(spinlock_t
*lock
) __acquires(lock
);
30 void __lockfunc
_read_lock_bh(rwlock_t
*lock
) __acquires(lock
);
31 void __lockfunc
_write_lock_bh(rwlock_t
*lock
) __acquires(lock
);
32 void __lockfunc
_spin_lock_irq(spinlock_t
*lock
) __acquires(lock
);
33 void __lockfunc
_read_lock_irq(rwlock_t
*lock
) __acquires(lock
);
34 void __lockfunc
_write_lock_irq(rwlock_t
*lock
) __acquires(lock
);
35 unsigned long __lockfunc
_spin_lock_irqsave(spinlock_t
*lock
)
37 unsigned long __lockfunc
_spin_lock_irqsave_nested(spinlock_t
*lock
, int subclass
)
39 unsigned long __lockfunc
_read_lock_irqsave(rwlock_t
*lock
)
41 unsigned long __lockfunc
_write_lock_irqsave(rwlock_t
*lock
)
43 int __lockfunc
_spin_trylock(spinlock_t
*lock
);
44 int __lockfunc
_read_trylock(rwlock_t
*lock
);
45 int __lockfunc
_write_trylock(rwlock_t
*lock
);
46 int __lockfunc
_spin_trylock_bh(spinlock_t
*lock
);
47 void __lockfunc
_spin_unlock(spinlock_t
*lock
) __releases(lock
);
48 void __lockfunc
_read_unlock(rwlock_t
*lock
) __releases(lock
);
49 void __lockfunc
_write_unlock(rwlock_t
*lock
) __releases(lock
);
50 void __lockfunc
_spin_unlock_bh(spinlock_t
*lock
) __releases(lock
);
51 void __lockfunc
_read_unlock_bh(rwlock_t
*lock
) __releases(lock
);
52 void __lockfunc
_write_unlock_bh(rwlock_t
*lock
) __releases(lock
);
53 void __lockfunc
_spin_unlock_irq(spinlock_t
*lock
) __releases(lock
);
54 void __lockfunc
_read_unlock_irq(rwlock_t
*lock
) __releases(lock
);
55 void __lockfunc
_write_unlock_irq(rwlock_t
*lock
) __releases(lock
);
56 void __lockfunc
_spin_unlock_irqrestore(spinlock_t
*lock
, unsigned long flags
)
58 void __lockfunc
_read_unlock_irqrestore(rwlock_t
*lock
, unsigned long flags
)
60 void __lockfunc
_write_unlock_irqrestore(rwlock_t
*lock
, unsigned long flags
)
64 * We inline the unlock functions in the nondebug case:
66 #if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
67 #define __always_inline__spin_unlock
68 #define __always_inline__read_unlock
69 #define __always_inline__write_unlock
70 #define __always_inline__spin_unlock_irq
71 #define __always_inline__read_unlock_irq
72 #define __always_inline__write_unlock_irq
75 #ifndef CONFIG_DEBUG_SPINLOCK
76 #ifndef CONFIG_GENERIC_LOCKBREAK
78 #ifdef __always_inline__spin_lock
79 #define _spin_lock(lock) __spin_lock(lock)
82 #ifdef __always_inline__read_lock
83 #define _read_lock(lock) __read_lock(lock)
86 #ifdef __always_inline__write_lock
87 #define _write_lock(lock) __write_lock(lock)
90 #ifdef __always_inline__spin_lock_bh
91 #define _spin_lock_bh(lock) __spin_lock_bh(lock)
94 #ifdef __always_inline__read_lock_bh
95 #define _read_lock_bh(lock) __read_lock_bh(lock)
98 #ifdef __always_inline__write_lock_bh
99 #define _write_lock_bh(lock) __write_lock_bh(lock)
102 #ifdef __always_inline__spin_lock_irq
103 #define _spin_lock_irq(lock) __spin_lock_irq(lock)
106 #ifdef __always_inline__read_lock_irq
107 #define _read_lock_irq(lock) __read_lock_irq(lock)
110 #ifdef __always_inline__write_lock_irq
111 #define _write_lock_irq(lock) __write_lock_irq(lock)
114 #ifdef __always_inline__spin_lock_irqsave
115 #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
118 #ifdef __always_inline__read_lock_irqsave
119 #define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
122 #ifdef __always_inline__write_lock_irqsave
123 #define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
126 #endif /* !CONFIG_GENERIC_LOCKBREAK */
128 #ifdef __always_inline__spin_trylock
129 #define _spin_trylock(lock) __spin_trylock(lock)
132 #ifdef __always_inline__read_trylock
133 #define _read_trylock(lock) __read_trylock(lock)
136 #ifdef __always_inline__write_trylock
137 #define _write_trylock(lock) __write_trylock(lock)
140 #ifdef __always_inline__spin_trylock_bh
141 #define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
144 #ifdef __always_inline__spin_unlock
145 #define _spin_unlock(lock) __spin_unlock(lock)
148 #ifdef __always_inline__read_unlock
149 #define _read_unlock(lock) __read_unlock(lock)
152 #ifdef __always_inline__write_unlock
153 #define _write_unlock(lock) __write_unlock(lock)
156 #ifdef __always_inline__spin_unlock_bh
157 #define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
160 #ifdef __always_inline__read_unlock_bh
161 #define _read_unlock_bh(lock) __read_unlock_bh(lock)
164 #ifdef __always_inline__write_unlock_bh
165 #define _write_unlock_bh(lock) __write_unlock_bh(lock)
168 #ifdef __always_inline__spin_unlock_irq
169 #define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
172 #ifdef __always_inline__read_unlock_irq
173 #define _read_unlock_irq(lock) __read_unlock_irq(lock)
176 #ifdef __always_inline__write_unlock_irq
177 #define _write_unlock_irq(lock) __write_unlock_irq(lock)
180 #ifdef __always_inline__spin_unlock_irqrestore
181 #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
184 #ifdef __always_inline__read_unlock_irqrestore
185 #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
188 #ifdef __always_inline__write_unlock_irqrestore
189 #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
192 #endif /* CONFIG_DEBUG_SPINLOCK */
194 static inline int __spin_trylock(spinlock_t
*lock
)
197 if (_raw_spin_trylock(lock
)) {
198 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
205 static inline int __read_trylock(rwlock_t
*lock
)
208 if (_raw_read_trylock(lock
)) {
209 rwlock_acquire_read(&lock
->dep_map
, 0, 1, _RET_IP_
);
216 static inline int __write_trylock(rwlock_t
*lock
)
219 if (_raw_write_trylock(lock
)) {
220 rwlock_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
228 * If lockdep is enabled then we use the non-preemption spin-ops
229 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
230 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
232 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
234 static inline void __read_lock(rwlock_t
*lock
)
237 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
238 LOCK_CONTENDED(lock
, _raw_read_trylock
, _raw_read_lock
);
241 static inline unsigned long __spin_lock_irqsave(spinlock_t
*lock
)
245 local_irq_save(flags
);
247 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
249 * On lockdep we dont want the hand-coded irq-enable of
250 * _raw_spin_lock_flags() code, because lockdep assumes
251 * that interrupts are not re-enabled during lock-acquire:
253 #ifdef CONFIG_LOCKDEP
254 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
256 _raw_spin_lock_flags(lock
, &flags
);
261 static inline void __spin_lock_irq(spinlock_t
*lock
)
265 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
266 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
269 static inline void __spin_lock_bh(spinlock_t
*lock
)
273 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
274 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
277 static inline unsigned long __read_lock_irqsave(rwlock_t
*lock
)
281 local_irq_save(flags
);
283 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
284 LOCK_CONTENDED_FLAGS(lock
, _raw_read_trylock
, _raw_read_lock
,
285 _raw_read_lock_flags
, &flags
);
289 static inline void __read_lock_irq(rwlock_t
*lock
)
293 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
294 LOCK_CONTENDED(lock
, _raw_read_trylock
, _raw_read_lock
);
297 static inline void __read_lock_bh(rwlock_t
*lock
)
301 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
302 LOCK_CONTENDED(lock
, _raw_read_trylock
, _raw_read_lock
);
305 static inline unsigned long __write_lock_irqsave(rwlock_t
*lock
)
309 local_irq_save(flags
);
311 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
312 LOCK_CONTENDED_FLAGS(lock
, _raw_write_trylock
, _raw_write_lock
,
313 _raw_write_lock_flags
, &flags
);
317 static inline void __write_lock_irq(rwlock_t
*lock
)
321 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
322 LOCK_CONTENDED(lock
, _raw_write_trylock
, _raw_write_lock
);
325 static inline void __write_lock_bh(rwlock_t
*lock
)
329 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
330 LOCK_CONTENDED(lock
, _raw_write_trylock
, _raw_write_lock
);
333 static inline void __spin_lock(spinlock_t
*lock
)
336 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
337 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
340 static inline void __write_lock(rwlock_t
*lock
)
343 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
344 LOCK_CONTENDED(lock
, _raw_write_trylock
, _raw_write_lock
);
347 #endif /* CONFIG_PREEMPT */
349 static inline void __spin_unlock(spinlock_t
*lock
)
351 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
352 _raw_spin_unlock(lock
);
356 static inline void __write_unlock(rwlock_t
*lock
)
358 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
359 _raw_write_unlock(lock
);
363 static inline void __read_unlock(rwlock_t
*lock
)
365 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
366 _raw_read_unlock(lock
);
370 static inline void __spin_unlock_irqrestore(spinlock_t
*lock
,
373 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
374 _raw_spin_unlock(lock
);
375 local_irq_restore(flags
);
379 static inline void __spin_unlock_irq(spinlock_t
*lock
)
381 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
382 _raw_spin_unlock(lock
);
387 static inline void __spin_unlock_bh(spinlock_t
*lock
)
389 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
390 _raw_spin_unlock(lock
);
391 preempt_enable_no_resched();
392 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
395 static inline void __read_unlock_irqrestore(rwlock_t
*lock
, unsigned long flags
)
397 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
398 _raw_read_unlock(lock
);
399 local_irq_restore(flags
);
403 static inline void __read_unlock_irq(rwlock_t
*lock
)
405 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
406 _raw_read_unlock(lock
);
411 static inline void __read_unlock_bh(rwlock_t
*lock
)
413 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
414 _raw_read_unlock(lock
);
415 preempt_enable_no_resched();
416 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
419 static inline void __write_unlock_irqrestore(rwlock_t
*lock
,
422 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
423 _raw_write_unlock(lock
);
424 local_irq_restore(flags
);
428 static inline void __write_unlock_irq(rwlock_t
*lock
)
430 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
431 _raw_write_unlock(lock
);
436 static inline void __write_unlock_bh(rwlock_t
*lock
)
438 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
439 _raw_write_unlock(lock
);
440 preempt_enable_no_resched();
441 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
444 static inline int __spin_trylock_bh(spinlock_t
*lock
)
448 if (_raw_spin_trylock(lock
)) {
449 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
452 preempt_enable_no_resched();
453 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
457 #endif /* __LINUX_SPINLOCK_API_SMP_H */