1 /* Copyright (C) 2011-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Chris Metcalf <cmetcalf@tilera.com>, 2011.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library. If not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef _LOWLEVELLOCK_H
20 #define _LOWLEVELLOCK_H 1
23 #include <sys/param.h>
24 #include <bits/pthreadtypes.h>
27 #include <kernel-features.h>
32 #define FUTEX_REQUEUE 3
33 #define FUTEX_CMP_REQUEUE 4
34 #define FUTEX_WAKE_OP 5
35 #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
36 #define FUTEX_LOCK_PI 6
37 #define FUTEX_UNLOCK_PI 7
38 #define FUTEX_TRYLOCK_PI 8
39 #define FUTEX_WAIT_BITSET 9
40 #define FUTEX_WAKE_BITSET 10
41 #define FUTEX_WAIT_REQUEUE_PI 11
42 #define FUTEX_CMP_REQUEUE_PI 12
43 #define FUTEX_PRIVATE_FLAG 128
44 #define FUTEX_CLOCK_REALTIME 256
46 #define FUTEX_BITSET_MATCH_ANY 0xffffffff
48 /* Values for 'private' parameter of locking macros. Yes, the
49 definition seems to be backwards. But it is not. The bit will be
50 reversed before passing to the system call. */
52 #define LLL_SHARED FUTEX_PRIVATE_FLAG
55 #if !defined NOT_IN_libc || defined IS_IN_rtld
56 /* In libc.so or ld.so all futexes are private. */
57 # ifdef __ASSUME_PRIVATE_FUTEX
58 # define __lll_private_flag(fl, private) \
59 ((fl) | FUTEX_PRIVATE_FLAG)
61 # define __lll_private_flag(fl, private) \
62 ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
65 # ifdef __ASSUME_PRIVATE_FUTEX
66 # define __lll_private_flag(fl, private) \
67 (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
69 # define __lll_private_flag(fl, private) \
70 (__builtin_constant_p (private) \
72 ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
74 : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
75 & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
80 #define lll_futex_wait(futexp, val, private) \
81 lll_futex_timed_wait (futexp, val, NULL, private)
83 #define lll_futex_timed_wait(futexp, val, timespec, private) \
85 INTERNAL_SYSCALL_DECL (__err); \
86 INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
87 __lll_private_flag (FUTEX_WAIT, private), \
91 #define lll_futex_timed_wait_bitset(futexp, val, timespec, clockbit, private) \
93 INTERNAL_SYSCALL_DECL (__err); \
95 int __op = FUTEX_WAIT_BITSET | clockbit; \
97 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
98 __lll_private_flag (__op, private), \
99 (val), (timespec), NULL /* Unused. */, \
100 FUTEX_BITSET_MATCH_ANY); \
104 #define lll_futex_wake(futexp, nr, private) \
106 INTERNAL_SYSCALL_DECL (__err); \
107 INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
108 __lll_private_flag (FUTEX_WAKE, private), \
112 #define lll_robust_dead(futexv, private) \
115 int *__futexp = &(futexv); \
116 atomic_or (__futexp, FUTEX_OWNER_DIED); \
117 lll_futex_wake (__futexp, 1, private); \
121 /* Returns non-zero if error happened, zero if success. */
122 #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
124 INTERNAL_SYSCALL_DECL (__err); \
126 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
127 __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
128 (nr_wake), (nr_move), (mutex), (val)); \
129 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
132 /* Returns non-zero if error happened, zero if success. */
133 #define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
135 INTERNAL_SYSCALL_DECL (__err); \
137 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
138 __lll_private_flag (FUTEX_WAKE_OP, private), \
139 (nr_wake), (nr_wake2), (futexp2), \
140 FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
141 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
144 /* Priority Inheritance support. */
145 #define lll_futex_wait_requeue_pi(futexp, val, mutex, private) \
146 lll_futex_timed_wait_requeue_pi (futexp, val, NULL, 0, mutex, private)
148 #define lll_futex_timed_wait_requeue_pi(futexp, val, timespec, clockbit, \
151 INTERNAL_SYSCALL_DECL (__err); \
153 int __op = FUTEX_WAIT_REQUEUE_PI | clockbit; \
155 __ret = INTERNAL_SYSCALL (futex, __err, 5, (futexp), \
156 __lll_private_flag (__op, private), \
157 (val), (timespec), mutex); \
158 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
161 #define lll_futex_cmp_requeue_pi(futexp, nr_wake, nr_move, mutex, val, priv) \
163 INTERNAL_SYSCALL_DECL (__err); \
166 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
167 __lll_private_flag (FUTEX_CMP_REQUEUE_PI, priv),\
168 (nr_wake), (nr_move), (mutex), (val)); \
169 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
173 static inline int __attribute__ ((always_inline
))
174 __lll_trylock (int *futex
)
176 return atomic_compare_and_exchange_val_acq (futex
, 1, 0) != 0;
178 #define lll_trylock(lock) __lll_trylock (&(lock))
181 static inline int __attribute__ ((always_inline
))
182 __lll_cond_trylock (int *futex
)
184 return atomic_compare_and_exchange_val_acq (futex
, 2, 0) != 0;
186 #define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
189 static inline int __attribute__ ((always_inline
))
190 __lll_robust_trylock (int *futex
, int id
)
192 return atomic_compare_and_exchange_val_acq (futex
, id
, 0) != 0;
194 #define lll_robust_trylock(lock, id) \
195 __lll_robust_trylock (&(lock), id)
197 extern void __lll_lock_wait_private (int *futex
) attribute_hidden
;
198 extern void __lll_lock_wait (int *futex
, int private) attribute_hidden
;
199 extern int __lll_robust_lock_wait (int *futex
, int private) attribute_hidden
;
201 static inline void __attribute__ ((always_inline
))
202 __lll_lock (int *futex
, int private)
204 if (atomic_compare_and_exchange_bool_acq (futex
, 1, 0) != 0)
206 if (__builtin_constant_p (private) && private == LLL_PRIVATE
)
207 __lll_lock_wait_private (futex
);
209 __lll_lock_wait (futex
, private);
212 #define lll_lock(futex, private) __lll_lock (&(futex), private)
215 static inline int __attribute__ ((always_inline
))
216 __lll_robust_lock (int *futex
, int id
, int private)
219 if (atomic_compare_and_exchange_bool_acq (futex
, id
, 0) != 0)
220 result
= __lll_robust_lock_wait (futex
, private);
223 #define lll_robust_lock(futex, id, private) \
224 __lll_robust_lock (&(futex), id, private)
227 static inline void __attribute__ ((always_inline
))
228 __lll_cond_lock (int *futex
, int private)
230 if (atomic_compare_and_exchange_bool_acq (futex
, 2, 0) != 0)
231 __lll_lock_wait (futex
, private);
233 #define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
236 #define lll_robust_cond_lock(futex, id, private) \
237 __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
240 extern int __lll_timedlock_wait (int *futex
, const struct timespec
*,
241 int private) attribute_hidden
;
242 extern int __lll_robust_timedlock_wait (int *futex
, const struct timespec
*,
243 int private) attribute_hidden
;
245 static inline int __attribute__ ((always_inline
))
246 __lll_timedlock (int *futex
, const struct timespec
*abstime
, int private)
249 if (atomic_compare_and_exchange_bool_acq (futex
, 1, 0) != 0)
250 result
= __lll_timedlock_wait (futex
, abstime
, private);
253 #define lll_timedlock(futex, abstime, private) \
254 __lll_timedlock (&(futex), abstime, private)
257 static inline int __attribute__ ((always_inline
))
258 __lll_robust_timedlock (int *futex
, const struct timespec
*abstime
,
262 if (atomic_compare_and_exchange_bool_acq (futex
, id
, 0) != 0)
263 result
= __lll_robust_timedlock_wait (futex
, abstime
, private);
266 #define lll_robust_timedlock(futex, abstime, id, private) \
267 __lll_robust_timedlock (&(futex), abstime, id, private)
270 #define __lll_unlock(futex, private) \
272 ({ int *__futex = (futex); \
273 int __oldval = atomic_exchange_rel (__futex, 0); \
274 if (__builtin_expect (__oldval > 1, 0)) \
275 lll_futex_wake (__futex, 1, private); \
277 #define lll_unlock(futex, private) __lll_unlock(&(futex), private)
280 #define __lll_robust_unlock(futex, private) \
282 ({ int *__futex = (futex); \
283 int __oldval = atomic_exchange_rel (__futex, 0); \
284 if (__builtin_expect (__oldval & FUTEX_WAITERS, 0)) \
285 lll_futex_wake (__futex, 1, private); \
287 #define lll_robust_unlock(futex, private) \
288 __lll_robust_unlock(&(futex), private)
291 #define lll_islocked(futex) \
294 /* Initializers for lock. */
295 #define LLL_LOCK_INITIALIZER (0)
296 #define LLL_LOCK_INITIALIZER_LOCKED (1)
299 /* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
300 wakeup when the clone terminates. The memory location contains the
301 thread ID while the clone is running and is reset to zero
303 #define lll_wait_tid(tid) \
305 __typeof (tid) __tid; \
306 while ((__tid = (tid)) != 0) \
307 lll_futex_wait (&(tid), __tid, LLL_SHARED); \
310 extern int __lll_timedwait_tid (int *, const struct timespec
*)
313 #define lll_timedwait_tid(tid, abstime) \
317 __res = __lll_timedwait_tid (&(tid), (abstime)); \
321 #endif /* lowlevellock.h */