S390: Fix "backtrace() returns infinitely deep stack frames with makecontext()" ...
[glibc.git] / nptl / pthread_mutex_lock.c
blob9a3b46624d4cf5b2507002ef5cf488a2992230ce
1 /* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <unistd.h>
23 #include <sys/param.h>
24 #include <not-cancel.h>
25 #include "pthreadP.h"
26 #include <atomic.h>
27 #include <lowlevellock.h>
28 #include <stap-probe.h>
30 #ifndef lll_lock_elision
31 #define lll_lock_elision(lock, try_lock, private) ({ \
32 lll_lock (lock, private); 0; })
33 #endif
35 #ifndef lll_trylock_elision
36 #define lll_trylock_elision(a,t) lll_trylock(a)
37 #endif
39 #ifndef LLL_MUTEX_LOCK
40 # define LLL_MUTEX_LOCK(mutex) \
41 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
42 # define LLL_MUTEX_TRYLOCK(mutex) \
43 lll_trylock ((mutex)->__data.__lock)
44 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
45 lll_robust_lock ((mutex)->__data.__lock, id, \
46 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
47 # define LLL_MUTEX_LOCK_ELISION(mutex) \
48 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
49 PTHREAD_MUTEX_PSHARED (mutex))
50 # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
51 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
52 PTHREAD_MUTEX_PSHARED (mutex))
53 #endif
55 #ifndef FORCE_ELISION
56 #define FORCE_ELISION(m, s)
57 #endif
59 static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
60 __attribute_noinline__;
62 int
63 __pthread_mutex_lock (mutex)
64 pthread_mutex_t *mutex;
66 assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
68 unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
70 LIBC_PROBE (mutex_entry, 1, mutex);
72 if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
73 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
74 return __pthread_mutex_lock_full (mutex);
76 if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
78 FORCE_ELISION (mutex, goto elision);
79 simple:
80 /* Normal mutex. */
81 LLL_MUTEX_LOCK (mutex);
82 assert (mutex->__data.__owner == 0);
84 #ifdef HAVE_ELISION
85 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
87 elision: __attribute__((unused))
88 /* This case can never happen on a system without elision,
89 as the mutex type initialization functions will not
90 allow to set the elision flags. */
91 /* Don't record owner or users for elision case. This is a
92 tail call. */
93 return LLL_MUTEX_LOCK_ELISION (mutex);
95 #endif
96 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
97 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
99 /* Recursive mutex. */
100 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
102 /* Check whether we already hold the mutex. */
103 if (mutex->__data.__owner == id)
105 /* Just bump the counter. */
106 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
107 /* Overflow of the counter. */
108 return EAGAIN;
110 ++mutex->__data.__count;
112 return 0;
115 /* We have to get the mutex. */
116 LLL_MUTEX_LOCK (mutex);
118 assert (mutex->__data.__owner == 0);
119 mutex->__data.__count = 1;
121 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
122 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
124 if (! __is_smp)
125 goto simple;
127 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
129 int cnt = 0;
130 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
131 mutex->__data.__spins * 2 + 10);
134 if (cnt++ >= max_cnt)
136 LLL_MUTEX_LOCK (mutex);
137 break;
139 atomic_spin_nop ();
141 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
143 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
145 assert (mutex->__data.__owner == 0);
147 else
149 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
150 assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
151 /* Check whether we already hold the mutex. */
152 if (__glibc_unlikely (mutex->__data.__owner == id))
153 return EDEADLK;
154 goto simple;
157 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
159 /* Record the ownership. */
160 mutex->__data.__owner = id;
161 #ifndef NO_INCR
162 ++mutex->__data.__nusers;
163 #endif
165 LIBC_PROBE (mutex_acquired, 1, mutex);
167 return 0;
170 static int
171 __pthread_mutex_lock_full (pthread_mutex_t *mutex)
173 int oldval;
174 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
176 switch (PTHREAD_MUTEX_TYPE (mutex))
178 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
179 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
180 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
181 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
182 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
183 &mutex->__data.__list.__next);
185 oldval = mutex->__data.__lock;
188 again:
189 if ((oldval & FUTEX_OWNER_DIED) != 0)
191 /* The previous owner died. Try locking the mutex. */
192 int newval = id;
193 #ifdef NO_INCR
194 newval |= FUTEX_WAITERS;
195 #else
196 newval |= (oldval & FUTEX_WAITERS);
197 #endif
199 newval
200 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
201 newval, oldval);
203 if (newval != oldval)
205 oldval = newval;
206 goto again;
209 /* We got the mutex. */
210 mutex->__data.__count = 1;
211 /* But it is inconsistent unless marked otherwise. */
212 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
214 ENQUEUE_MUTEX (mutex);
215 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
217 /* Note that we deliberately exit here. If we fall
218 through to the end of the function __nusers would be
219 incremented which is not correct because the old
220 owner has to be discounted. If we are not supposed
221 to increment __nusers we actually have to decrement
222 it here. */
223 #ifdef NO_INCR
224 --mutex->__data.__nusers;
225 #endif
227 return EOWNERDEAD;
230 /* Check whether we already hold the mutex. */
231 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
233 int kind = PTHREAD_MUTEX_TYPE (mutex);
234 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
236 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
237 NULL);
238 return EDEADLK;
241 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
243 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
244 NULL);
246 /* Just bump the counter. */
247 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
248 /* Overflow of the counter. */
249 return EAGAIN;
251 ++mutex->__data.__count;
253 return 0;
257 oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
259 if (__builtin_expect (mutex->__data.__owner
260 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
262 /* This mutex is now not recoverable. */
263 mutex->__data.__count = 0;
264 lll_unlock (mutex->__data.__lock,
265 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
266 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
267 return ENOTRECOVERABLE;
270 while ((oldval & FUTEX_OWNER_DIED) != 0);
272 mutex->__data.__count = 1;
273 ENQUEUE_MUTEX (mutex);
274 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
275 break;
277 /* The PI support requires the Linux futex system call. If that's not
278 available, pthread_mutex_init should never have allowed the type to
279 be set. So it will get the default case for an invalid type. */
280 #ifdef __NR_futex
281 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
282 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
283 case PTHREAD_MUTEX_PI_NORMAL_NP:
284 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
285 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
286 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
287 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
288 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
290 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
291 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
293 if (robust)
294 /* Note: robust PI futexes are signaled by setting bit 0. */
295 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
296 (void *) (((uintptr_t) &mutex->__data.__list.__next)
297 | 1));
299 oldval = mutex->__data.__lock;
301 /* Check whether we already hold the mutex. */
302 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
304 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
306 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
307 return EDEADLK;
310 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
312 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
314 /* Just bump the counter. */
315 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
316 /* Overflow of the counter. */
317 return EAGAIN;
319 ++mutex->__data.__count;
321 return 0;
325 int newval = id;
326 # ifdef NO_INCR
327 newval |= FUTEX_WAITERS;
328 # endif
329 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
330 newval, 0);
332 if (oldval != 0)
334 /* The mutex is locked. The kernel will now take care of
335 everything. */
336 int private = (robust
337 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
338 : PTHREAD_MUTEX_PSHARED (mutex));
339 INTERNAL_SYSCALL_DECL (__err);
340 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
341 __lll_private_flag (FUTEX_LOCK_PI,
342 private), 1, 0);
344 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
345 && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
346 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
348 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
349 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
350 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
351 /* ESRCH can happen only for non-robust PI mutexes where
352 the owner of the lock died. */
353 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
355 /* Delay the thread indefinitely. */
356 while (1)
357 pause_not_cancel ();
360 oldval = mutex->__data.__lock;
362 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
365 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
367 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
369 /* We got the mutex. */
370 mutex->__data.__count = 1;
371 /* But it is inconsistent unless marked otherwise. */
372 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
374 ENQUEUE_MUTEX_PI (mutex);
375 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
377 /* Note that we deliberately exit here. If we fall
378 through to the end of the function __nusers would be
379 incremented which is not correct because the old owner
380 has to be discounted. If we are not supposed to
381 increment __nusers we actually have to decrement it here. */
382 # ifdef NO_INCR
383 --mutex->__data.__nusers;
384 # endif
386 return EOWNERDEAD;
389 if (robust
390 && __builtin_expect (mutex->__data.__owner
391 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
393 /* This mutex is now not recoverable. */
394 mutex->__data.__count = 0;
396 INTERNAL_SYSCALL_DECL (__err);
397 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
398 __lll_private_flag (FUTEX_UNLOCK_PI,
399 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
400 0, 0);
402 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
403 return ENOTRECOVERABLE;
406 mutex->__data.__count = 1;
407 if (robust)
409 ENQUEUE_MUTEX_PI (mutex);
410 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
413 break;
414 #endif /* __NR_futex. */
416 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
417 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
418 case PTHREAD_MUTEX_PP_NORMAL_NP:
419 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
421 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
423 oldval = mutex->__data.__lock;
425 /* Check whether we already hold the mutex. */
426 if (mutex->__data.__owner == id)
428 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
429 return EDEADLK;
431 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
433 /* Just bump the counter. */
434 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
435 /* Overflow of the counter. */
436 return EAGAIN;
438 ++mutex->__data.__count;
440 return 0;
444 int oldprio = -1, ceilval;
447 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
448 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
450 if (__pthread_current_priority () > ceiling)
452 if (oldprio != -1)
453 __pthread_tpp_change_priority (oldprio, -1);
454 return EINVAL;
457 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
458 if (retval)
459 return retval;
461 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
462 oldprio = ceiling;
464 oldval
465 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
466 #ifdef NO_INCR
467 ceilval | 2,
468 #else
469 ceilval | 1,
470 #endif
471 ceilval);
473 if (oldval == ceilval)
474 break;
478 oldval
479 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
480 ceilval | 2,
481 ceilval | 1);
483 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
484 break;
486 if (oldval != ceilval)
487 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
488 PTHREAD_MUTEX_PSHARED (mutex));
490 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
491 ceilval | 2, ceilval)
492 != ceilval);
494 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
496 assert (mutex->__data.__owner == 0);
497 mutex->__data.__count = 1;
499 break;
501 default:
502 /* Correct code cannot set any other type. */
503 return EINVAL;
506 /* Record the ownership. */
507 mutex->__data.__owner = id;
508 #ifndef NO_INCR
509 ++mutex->__data.__nusers;
510 #endif
512 LIBC_PROBE (mutex_acquired, 1, mutex);
514 return 0;
516 #ifndef __pthread_mutex_lock
517 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
518 hidden_def (__pthread_mutex_lock)
519 #endif
522 #ifdef NO_INCR
523 void
524 __pthread_mutex_cond_lock_adjust (mutex)
525 pthread_mutex_t *mutex;
527 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
528 assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
529 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
531 /* Record the ownership. */
532 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
533 mutex->__data.__owner = id;
535 if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
536 ++mutex->__data.__count;
538 #endif