Update copyright notices with scripts/update-copyrights
[glibc.git] / nptl / sysdeps / unix / sysv / linux / powerpc / lowlevellock.h
blobfe856708a4bc624f5ae54b27925a6961fce38497
1 /* Copyright (C) 2003-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef _LOWLEVELLOCK_H
20 #define _LOWLEVELLOCK_H 1
22 #include <time.h>
23 #include <sys/param.h>
24 #include <bits/pthreadtypes.h>
25 #include <atomic.h>
26 #include <kernel-features.h>
28 #ifndef __NR_futex
29 # define __NR_futex 221
30 #endif
31 #define FUTEX_WAIT 0
32 #define FUTEX_WAKE 1
33 #define FUTEX_REQUEUE 3
34 #define FUTEX_CMP_REQUEUE 4
35 #define FUTEX_WAKE_OP 5
36 #define FUTEX_OP_CLEAR_WAKE_IF_GT_ONE ((4 << 24) | 1)
37 #define FUTEX_LOCK_PI 6
38 #define FUTEX_UNLOCK_PI 7
39 #define FUTEX_TRYLOCK_PI 8
40 #define FUTEX_WAIT_BITSET 9
41 #define FUTEX_WAKE_BITSET 10
42 #define FUTEX_WAIT_REQUEUE_PI 11
43 #define FUTEX_CMP_REQUEUE_PI 12
44 #define FUTEX_PRIVATE_FLAG 128
45 #define FUTEX_CLOCK_REALTIME 256
47 #define FUTEX_BITSET_MATCH_ANY 0xffffffff
49 /* Values for 'private' parameter of locking macros. Yes, the
50 definition seems to be backwards. But it is not. The bit will be
51 reversed before passing to the system call. */
52 #define LLL_PRIVATE 0
53 #define LLL_SHARED FUTEX_PRIVATE_FLAG
55 #if !defined NOT_IN_libc || defined IS_IN_rtld
56 /* In libc.so or ld.so all futexes are private. */
57 # ifdef __ASSUME_PRIVATE_FUTEX
58 # define __lll_private_flag(fl, private) \
59 ((fl) | FUTEX_PRIVATE_FLAG)
60 # else
61 # define __lll_private_flag(fl, private) \
62 ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex))
63 # endif
64 #else
65 # ifdef __ASSUME_PRIVATE_FUTEX
66 # define __lll_private_flag(fl, private) \
67 (((fl) | FUTEX_PRIVATE_FLAG) ^ (private))
68 # else
69 # define __lll_private_flag(fl, private) \
70 (__builtin_constant_p (private) \
71 ? ((private) == 0 \
72 ? ((fl) | THREAD_GETMEM (THREAD_SELF, header.private_futex)) \
73 : (fl)) \
74 : ((fl) | (((private) ^ FUTEX_PRIVATE_FLAG) \
75 & THREAD_GETMEM (THREAD_SELF, header.private_futex))))
76 # endif
77 #endif
79 #define lll_futex_wait(futexp, val, private) \
80 lll_futex_timed_wait (futexp, val, NULL, private)
82 #define lll_futex_timed_wait(futexp, val, timespec, private) \
83 ({ \
84 INTERNAL_SYSCALL_DECL (__err); \
85 long int __ret; \
87 __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
88 __lll_private_flag (FUTEX_WAIT, private), \
89 (val), (timespec)); \
90 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
93 #define lll_futex_timed_wait_bitset(futexp, val, timespec, clockbit, private) \
94 ({ \
95 INTERNAL_SYSCALL_DECL (__err); \
96 long int __ret; \
97 int __op = FUTEX_WAIT_BITSET | clockbit; \
99 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
100 __lll_private_flag (__op, private), \
101 (val), (timespec), NULL /* Unused. */, \
102 FUTEX_BITSET_MATCH_ANY); \
103 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
106 #define lll_futex_wake(futexp, nr, private) \
107 ({ \
108 INTERNAL_SYSCALL_DECL (__err); \
109 long int __ret; \
111 __ret = INTERNAL_SYSCALL (futex, __err, 4, (futexp), \
112 __lll_private_flag (FUTEX_WAKE, private), \
113 (nr), 0); \
114 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
117 #define lll_robust_dead(futexv, private) \
118 do \
120 INTERNAL_SYSCALL_DECL (__err); \
121 int *__futexp = &(futexv); \
123 atomic_or (__futexp, FUTEX_OWNER_DIED); \
124 INTERNAL_SYSCALL (futex, __err, 4, __futexp, \
125 __lll_private_flag (FUTEX_WAKE, private), 1, 0); \
127 while (0)
129 /* Returns non-zero if error happened, zero if success. */
130 #define lll_futex_requeue(futexp, nr_wake, nr_move, mutex, val, private) \
131 ({ \
132 INTERNAL_SYSCALL_DECL (__err); \
133 long int __ret; \
135 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
136 __lll_private_flag (FUTEX_CMP_REQUEUE, private),\
137 (nr_wake), (nr_move), (mutex), (val)); \
138 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
141 /* Returns non-zero if error happened, zero if success. */
142 #define lll_futex_wake_unlock(futexp, nr_wake, nr_wake2, futexp2, private) \
143 ({ \
144 INTERNAL_SYSCALL_DECL (__err); \
145 long int __ret; \
147 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
148 __lll_private_flag (FUTEX_WAKE_OP, private), \
149 (nr_wake), (nr_wake2), (futexp2), \
150 FUTEX_OP_CLEAR_WAKE_IF_GT_ONE); \
151 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
154 /* Priority Inheritance support. */
155 #define lll_futex_wait_requeue_pi(futexp, val, mutex, private) \
156 lll_futex_timed_wait_requeue_pi (futexp, val, NULL, 0, mutex, private)
158 #define lll_futex_timed_wait_requeue_pi(futexp, val, timespec, clockbit, \
159 mutex, private) \
160 ({ \
161 INTERNAL_SYSCALL_DECL (__err); \
162 long int __ret; \
163 int __op = FUTEX_WAIT_REQUEUE_PI | clockbit; \
165 __ret = INTERNAL_SYSCALL (futex, __err, 5, (futexp), \
166 __lll_private_flag (__op, private), \
167 (val), (timespec), mutex); \
168 INTERNAL_SYSCALL_ERROR_P (__ret, __err) ? -__ret : __ret; \
171 #define lll_futex_cmp_requeue_pi(futexp, nr_wake, nr_move, mutex, val, priv) \
172 ({ \
173 INTERNAL_SYSCALL_DECL (__err); \
174 long int __ret; \
176 __ret = INTERNAL_SYSCALL (futex, __err, 6, (futexp), \
177 __lll_private_flag (FUTEX_CMP_REQUEUE_PI, priv),\
178 (nr_wake), (nr_move), (mutex), (val)); \
179 INTERNAL_SYSCALL_ERROR_P (__ret, __err); \
183 #ifdef UP
184 # define __lll_acq_instr ""
185 # define __lll_rel_instr ""
186 #else
187 # define __lll_acq_instr "isync"
188 # ifdef _ARCH_PWR4
190 * Newer powerpc64 processors support the new "light weight" sync (lwsync)
191 * So if the build is using -mcpu=[power4,power5,power5+,970] we can
192 * safely use lwsync.
194 # define __lll_rel_instr "lwsync"
195 # else
197 * Older powerpc32 processors don't support the new "light weight"
198 * sync (lwsync). So the only safe option is to use normal sync
199 * for all powerpc32 applications.
201 # define __lll_rel_instr "sync"
202 # endif
203 #endif
205 /* Set *futex to ID if it is 0, atomically. Returns the old value */
206 #define __lll_robust_trylock(futex, id) \
207 ({ int __val; \
208 __asm __volatile ("1: lwarx %0,0,%2" MUTEX_HINT_ACQ "\n" \
209 " cmpwi 0,%0,0\n" \
210 " bne 2f\n" \
211 " stwcx. %3,0,%2\n" \
212 " bne- 1b\n" \
213 "2: " __lll_acq_instr \
214 : "=&r" (__val), "=m" (*futex) \
215 : "r" (futex), "r" (id), "m" (*futex) \
216 : "cr0", "memory"); \
217 __val; \
220 #define lll_robust_trylock(lock, id) __lll_robust_trylock (&(lock), id)
222 /* Set *futex to 1 if it is 0, atomically. Returns the old value */
223 #define __lll_trylock(futex) __lll_robust_trylock (futex, 1)
225 #define lll_trylock(lock) __lll_trylock (&(lock))
227 /* Set *futex to 2 if it is 0, atomically. Returns the old value */
228 #define __lll_cond_trylock(futex) __lll_robust_trylock (futex, 2)
230 #define lll_cond_trylock(lock) __lll_cond_trylock (&(lock))
233 extern void __lll_lock_wait_private (int *futex) attribute_hidden;
234 extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
235 extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
237 #define lll_lock(lock, private) \
238 (void) ({ \
239 int *__futex = &(lock); \
240 if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
241 0) != 0) \
243 if (__builtin_constant_p (private) && (private) == LLL_PRIVATE) \
244 __lll_lock_wait_private (__futex); \
245 else \
246 __lll_lock_wait (__futex, private); \
250 #define lll_robust_lock(lock, id, private) \
251 ({ \
252 int *__futex = &(lock); \
253 int __val = 0; \
254 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
255 0), 0)) \
256 __val = __lll_robust_lock_wait (__futex, private); \
257 __val; \
260 #define lll_cond_lock(lock, private) \
261 (void) ({ \
262 int *__futex = &(lock); \
263 if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 2, 0),\
264 0) != 0) \
265 __lll_lock_wait (__futex, private); \
268 #define lll_robust_cond_lock(lock, id, private) \
269 ({ \
270 int *__futex = &(lock); \
271 int __val = 0; \
272 int __id = id | FUTEX_WAITERS; \
273 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, __id,\
274 0), 0)) \
275 __val = __lll_robust_lock_wait (__futex, private); \
276 __val; \
280 extern int __lll_timedlock_wait
281 (int *futex, const struct timespec *, int private) attribute_hidden;
282 extern int __lll_robust_timedlock_wait
283 (int *futex, const struct timespec *, int private) attribute_hidden;
285 #define lll_timedlock(lock, abstime, private) \
286 ({ \
287 int *__futex = &(lock); \
288 int __val = 0; \
289 if (__builtin_expect (atomic_compare_and_exchange_val_acq (__futex, 1, 0),\
290 0) != 0) \
291 __val = __lll_timedlock_wait (__futex, abstime, private); \
292 __val; \
295 #define lll_robust_timedlock(lock, abstime, id, private) \
296 ({ \
297 int *__futex = &(lock); \
298 int __val = 0; \
299 if (__builtin_expect (atomic_compare_and_exchange_bool_acq (__futex, id, \
300 0), 0)) \
301 __val = __lll_robust_timedlock_wait (__futex, abstime, private); \
302 __val; \
305 #define lll_unlock(lock, private) \
306 ((void) ({ \
307 int *__futex = &(lock); \
308 int __val = atomic_exchange_rel (__futex, 0); \
309 if (__builtin_expect (__val > 1, 0)) \
310 lll_futex_wake (__futex, 1, private); \
313 #define lll_robust_unlock(lock, private) \
314 ((void) ({ \
315 int *__futex = &(lock); \
316 int __val = atomic_exchange_rel (__futex, 0); \
317 if (__builtin_expect (__val & FUTEX_WAITERS, 0)) \
318 lll_futex_wake (__futex, 1, private); \
321 #define lll_islocked(futex) \
322 (futex != 0)
325 /* Initializers for lock. */
326 #define LLL_LOCK_INITIALIZER (0)
327 #define LLL_LOCK_INITIALIZER_LOCKED (1)
329 /* The states of a lock are:
330 0 - untaken
331 1 - taken by one user
332 >1 - taken by more users */
334 /* The kernel notifies a process which uses CLONE_CHILD_CLEARTID via futex
335 wakeup when the clone terminates. The memory location contains the
336 thread ID while the clone is running and is reset to zero
337 afterwards. */
338 #define lll_wait_tid(tid) \
339 do { \
340 __typeof (tid) __tid; \
341 while ((__tid = (tid)) != 0) \
342 lll_futex_wait (&(tid), __tid, LLL_SHARED); \
343 } while (0)
345 extern int __lll_timedwait_tid (int *, const struct timespec *)
346 attribute_hidden;
348 #define lll_timedwait_tid(tid, abstime) \
349 ({ \
350 int __res = 0; \
351 if ((tid) != 0) \
352 __res = __lll_timedwait_tid (&(tid), (abstime)); \
353 __res; \
356 #endif /* lowlevellock.h */