Get rid of lll_robust_dead.
[glibc.git] / sysdeps / unix / sysv / linux / mips / lowlevellock.h
blob9ed6338ae66b3879d1fd1543f97d871d5f9572cc
1 /* Copyright (C) 2003-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library. If not, see
16 <http://www.gnu.org/licenses/>. */
18 #ifndef _LOWLEVELLOCK_H
19 #define _LOWLEVELLOCK_H 1
21 #include <time.h>
22 #include <sys/param.h>
23 #include <bits/pthreadtypes.h>
24 #include <atomic.h>
25 #include <sysdep.h>
26 #include <kernel-features.h>
28 #define FUTEX_WAIT 0
29 #define FUTEX_WAKE 1
30 #define FUTEX_REQUEUE 3
31 #define FUTEX_CMP_REQUEUE 4
32 #define FUTEX_WAKE_OP 5
33 #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
34 #define FUTEX_LOCK_PI 6
35 #define FUTEX_UNLOCK_PI 7
36 #define FUTEX_TRYLOCK_PI 8
37 #define FUTEX_WAIT_BITSET 9
38 #define FUTEX_WAKE_BITSET 10
39 #define FUTEX_WAIT_REQUEUE_PI 11
40 #define FUTEX_CMP_REQUEUE_PI 12
41 #define FUTEX_PRIVATE_FLAG 128
42 #define FUTEX_CLOCK_REALTIME 256
44 #define FUTEX_BITSET_MATCH_ANY 0xffffffff
46 /* Values for 'private' parameter of locking macros. Yes, the
47 definition seems to be backwards. But it is not. The bit will be
48 reversed before passing to the system call. */
49 #define LLL_PRIVATE 0
50 #define LLL_SHARED FUTEX_PRIVATE_FLAG
53 #if !defined NOT_IN_libc || defined IS_IN_rtld
54 /* In libc.so or ld.so all futexes are private. */
55 # ifdef __ASSUME_PRIVATE_FUTEX
56 # define __lll_private_flag(fl, private) \
57 ((fl) | FUTEX_PRIVATE_FLAG)
58 # else
59 # define __lll_private_flag(fl, private) \
60 ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
61 # endif
62 #else
63 # ifdef __ASSUME_PRIVATE_FUTEX
64 # define __lll_private_flag(fl, private) \
65 (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
66 # else
67 # define __lll_private_flag(fl, private) \
68 (__builtin_constant_p (private) \
69 ? ((private) == 0 \
70 ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
71 : (fl)) \
72 : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
73 & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
74 # endif
75 #endif
78 #define lll_futex_wait(futexp, val, private) \
79 lll_futex_timed_wait(futexp, val, NULL, private)
81 #define lll_futex_timed_wait(futexp, val, timespec, private) \
82 ({ \
83 INTERNAL_SYSCALL_DECL (__err); \
84 long int __ret; \
85 __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
86 __lll_private_flag (FUTEX_WAIT, private), \
87 (val), (timespec)); \
88 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
91 #define lll_futex_timed_wait_bitset(futexp, val, timespec, clockbit, private) \
92 ({ \
93 INTERNAL_SYSCALL_DECL (__err); \
94 long int __ret; \
95 int __op = FUTEX_WAIT_BITSET | clockbit; \
96 __ret = INTERNAL_SYSCALL (futex, __err, 6, (long) (futexp), \
97 __lll_private_flag (__op, private), \
98 (val), (timespec), NULL /* Unused. */, \
99 FUTEX_BITSET_MATCH_ANY); \
100 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
103 #define lll_futex_wake(futexp, nr, private) \
104 ({ \
105 INTERNAL_SYSCALL_DECL (__err); \
106 long int __ret; \
107 __ret = INTERNAL_SYSCALL (futex, __err, 4, (long) (futexp), \
108 __lll_private_flag (FUTEX_WAKE, private), \
109 (nr), 0); \
110 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
113 /* Returns non-zero if error happened, zero if success. */
114 #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
115 ({ \
116 INTERNAL_SYSCALL_DECL (__err); \
117 long int __ret; \
118 __ret = INTERNAL_SYSCALL (futex, __err, 6, (long) (futexp), \
119 __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
120 (nr_wake), (nr_move), (mutex), (val)); \
121 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
124 /* Returns non-zero if error happened, zero if success. */
125 #define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
126 ({ \
127 INTERNAL_SYSCALL_DECL (__err); \
128 long int __ret; \
130 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
131 __lll_private_flag (FUTEX_WAKE_OP, private), \
132 (nr_wake), (nr_wake2), (futexp2), \
133 FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
134 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
137 /* Priority Inheritance support. */
138 #define lll_futex_wait_requeue_pi(futexp, val, mutex, private) \
139 lll_futex_timed_wait_requeue_pi (futexp, val, NULL, 0, mutex, private)
141 #define lll_futex_timed_wait_requeue_pi(futexp, val, timespec, clockbit, \
142 mutex, private) \
143 ({ \
144 INTERNAL_SYSCALL_DECL (__err); \
145 long int __ret; \
146 int __op = FUTEX_WAIT_REQUEUE_PI | clockbit; \
148 __ret = INTERNAL_SYSCALL (futex, __err, 5, (futexp), \
149 __lll_private_flag (__op, private), \
150 (val), (timespec), mutex); \
151 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
154 #define lll_futex_cmp_requeue_pi(futexp, nr_wake, nr_move, mutex, val, priv) \
155 ({ \
156 INTERNAL_SYSCALL_DECL (__err); \
157 long int __ret; \
159 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
160 __lll_private_flag (FUTEX_CMP_REQUEUE_PI, priv),\
161 (nr_wake), (nr_move), (mutex), (val)); \
162 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
165 static inline int __attribute__((always_inline))
166 __lll_trylock(int *futex)
168 return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
170 #define lll_trylock(lock) __lll_trylock (&(lock))
173 static inline int __attribute__((always_inline))
174 __lll_cond_trylock(int *futex)
176 return atomic_compare_and_exchange_val_acq (futex, 2, 0) != 0;
178 #define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
181 extern void __lll_lock_wait_private (int *futex) attribute_hidden;
182 extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
183 extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
185 #define __lll_lock(futex, private) \
186 ((void) ({ \
187 int *__futex = (futex); \
188 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, \
189 1, 0), 0)) \
191 if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
192 __lll_lock_wait_private (__futex); \
193 else \
194 __lll_lock_wait (__futex, private); \
197 #define lll_lock(futex, private) __lll_lock (&(futex), private)
200 #define __lll_robust_lock(futex, id, private) \
201 ({ \
202 int *__futex = (futex); \
203 int __val = 0; \
205 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
206 0), 0)) \
207 __val = __lll_robust_lock_wait (__futex, private); \
208 __val; \
210 #define lll_robust_lock(futex, id, private) \
211 __lll_robust_lock (&(futex), id, private)
214 static inline void __attribute__ ((always_inline))
215 __lll_cond_lock (int *futex, int private)
217 if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
218 __lll_lock_wait (futex, private);
220 #define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
223 #define lll_robust_cond_lock(futex, id, private) \
224 __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
227 extern int __lll_timedlock_wait (int *futex, const struct timespec *,
228 int private) attribute_hidden;
229 extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
230 int private) attribute_hidden;
232 static inline int __attribute__ ((always_inline))
233 __lll_timedlock (int *futex, const struct timespec *abstime, int private)
235 int result = 0;
236 if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
237 result = __lll_timedlock_wait (futex, abstime, private);
238 return result;
240 #define lll_timedlock(futex, abstime, private) \
241 __lll_timedlock (&(futex), abstime, private)
244 static inline int __attribute__ ((always_inline))
245 __lll_robust_timedlock (int *futex, const struct timespec *abstime,
246 int id, int private)
248 int result = 0;
249 if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
250 result = __lll_robust_timedlock_wait (futex, abstime, private);
251 return result;
253 #define lll_robust_timedlock(futex, abstime, id, private) \
254 __lll_robust_timedlock (&(futex), abstime, id, private)
257 #define __lll_unlock(futex, private) \
258 ((void) ({ \
259 int *__futex = (futex); \
260 int __val = atomic_exchange_rel (__futex, 0); \
262 if (__builtin_expect (__val > 1, 0)) \
263 lll_futex_wake (__futex, 1, private); \
265 #define lll_unlock(futex, private) __lll_unlock(&(futex), private)
268 #define __lll_robust_unlock(futex, private) \
269 ((void) ({ \
270 int *__futex = (futex); \
271 int __val = atomic_exchange_rel (__futex, 0); \
273 if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
274 lll_futex_wake (__futex, 1, private); \
276 #define lll_robust_unlock(futex, private) \
277 __lll_robust_unlock(&(futex), private)
280 #define lll_islocked(futex) \
281 (futex != 0)
284 /* Our internal lock implementation is identical to the binary-compatible
285 mutex implementation. */
287 /* Initializers for lock. */
288 #define LLL_LOCK_INITIALIZER (0)
289 #define LLL_LOCK_INITIALIZER_LOCKED (1)
291 /* The states of a lock are:
292 0 - untaken
293 1 - taken by one user
294 >1 - taken by more users */
296 /* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
297 wakeup when the clone terminates. The memory location contains the
298 thread ID while the clone is running and is reset to zero
299 afterwards. */
300 #define lll_wait_tid(tid) \
301 do { \
302 __typeof (tid) __tid; \
303 while ((__tid = (tid)) != 0) \
304 lll_futex_wait (&(tid), __tid, LLL_SHARED); \
305 } while (0)
307 extern int __lll_timedwait_tid (int *, const struct timespec *)
308 attribute_hidden;
310 #define lll_timedwait_tid(tid, abstime) \
311 ({ \
312 int __res = 0; \
313 if ((tid) != 0) \
314 __res = __lll_timedwait_tid (&(tid), (abstime)); \
315 __res; \
318 /* Implement __libc_lock_lock using exchange_and_add, which expands into
319 a single instruction on XLP processors. We enable this for all MIPS
320 processors as atomic_exchange_and_add_acq and
321 atomic_compare_and_exchange_acq take the same time to execute.
322 This is a simplified expansion of ({ lll_lock (NAME, LLL_PRIVATE); 0; }).
324 Note: __lll_lock_wait_private() resets lock value to '2', which prevents
325 unbounded increase of the lock value and [with billions of threads]
326 overflow. */
327 #define __libc_lock_lock(NAME) \
328 ({ \
329 int *__futex = &(NAME); \
330 if (__builtin_expect (atomic_exchange_and_add_acq (__futex, 1), 0)) \
331 __lll_lock_wait_private (__futex); \
332 0; \
335 #ifdef _MIPS_ARCH_XLP
336 /* The generic version using a single atomic_compare_and_exchange_acq takes
337 less time for non-XLP processors, so we use below for XLP only. */
338 # define __libc_lock_trylock(NAME) \
339 ({ \
340 int *__futex = &(NAME); \
341 int __result = atomic_exchange_and_add_acq (__futex, 1); \
342 /* If __result == 0, we succeeded in acquiring the lock. \
343 If __result == 1, we switched the lock to 'contended' state, which \
344 will cause a [possibly unnecessary] call to lll_futex_wait. This is \
345 unlikely, so we accept the possible inefficiency. \
346 If __result >= 2, we need to set the lock to 'contended' state to avoid \
347 unbounded increase from subsequent trylocks. */ \
348 if (__result >= 2) \
349 __result = atomic_exchange_acq (__futex, 2); \
350 __result; \
352 #endif
354 #endif /* lowlevellock.h */