1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
23 #include <sys/param.h>
24 #include <not-cancel.h>
27 #include <futex-internal.h>
28 #include <stap-probe.h>
29 #include <shlib-compat.h>
31 /* Some of the following definitions differ when pthread_mutex_cond_lock.c
32 includes this file. */
33 #ifndef LLL_MUTEX_LOCK
34 /* lll_lock with single-thread optimization. */
36 lll_mutex_lock_optimized (pthread_mutex_t
*mutex
)
38 /* The single-threaded optimization is only valid for private
39 mutexes. For process-shared mutexes, the mutex could be in a
40 shared mapping, so synchronization with another process is needed
41 even without any threads. If the lock is already marked as
42 acquired, POSIX requires that pthread_mutex_lock deadlocks for
43 normal mutexes, so skip the optimization in that case as
45 int private = PTHREAD_MUTEX_PSHARED (mutex
);
46 if (private == LLL_PRIVATE
&& SINGLE_THREAD_P
&& mutex
->__data
.__lock
== 0)
47 mutex
->__data
.__lock
= 1;
49 lll_lock (mutex
->__data
.__lock
, private);
52 # define LLL_MUTEX_LOCK(mutex) \
53 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
54 # define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex)
55 # define LLL_MUTEX_TRYLOCK(mutex) \
56 lll_trylock ((mutex)->__data.__lock)
57 # define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
58 # define LLL_MUTEX_LOCK_ELISION(mutex) \
59 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
60 PTHREAD_MUTEX_PSHARED (mutex))
61 # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
62 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
63 PTHREAD_MUTEX_PSHARED (mutex))
64 # define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock
65 # define PTHREAD_MUTEX_VERSIONS 1
68 static int __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
69 __attribute_noinline__
;
72 PTHREAD_MUTEX_LOCK (pthread_mutex_t
*mutex
)
74 /* See concurrency notes regarding mutex type which is loaded from __kind
75 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
76 unsigned int type
= PTHREAD_MUTEX_TYPE_ELISION (mutex
);
78 LIBC_PROBE (mutex_entry
, 1, mutex
);
80 if (__builtin_expect (type
& ~(PTHREAD_MUTEX_KIND_MASK_NP
81 | PTHREAD_MUTEX_ELISION_FLAGS_NP
), 0))
82 return __pthread_mutex_lock_full (mutex
);
84 if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_NP
))
86 FORCE_ELISION (mutex
, goto elision
);
89 LLL_MUTEX_LOCK_OPTIMIZED (mutex
);
90 assert (mutex
->__data
.__owner
== 0);
92 #if ENABLE_ELISION_SUPPORT
93 else if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_ELISION_NP
))
95 elision
: __attribute__((unused
))
96 /* This case can never happen on a system without elision,
97 as the mutex type initialization functions will not
98 allow to set the elision flags. */
99 /* Don't record owner or users for elision case. This is a
101 return LLL_MUTEX_LOCK_ELISION (mutex
);
104 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
105 == PTHREAD_MUTEX_RECURSIVE_NP
, 1))
107 /* Recursive mutex. */
108 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
110 /* Check whether we already hold the mutex. */
111 if (mutex
->__data
.__owner
== id
)
113 /* Just bump the counter. */
114 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
115 /* Overflow of the counter. */
118 ++mutex
->__data
.__count
;
123 /* We have to get the mutex. */
124 LLL_MUTEX_LOCK_OPTIMIZED (mutex
);
126 assert (mutex
->__data
.__owner
== 0);
127 mutex
->__data
.__count
= 1;
129 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
130 == PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
132 if (LLL_MUTEX_TRYLOCK (mutex
) != 0)
135 int max_cnt
= MIN (max_adaptive_count (),
136 mutex
->__data
.__spins
* 2 + 10);
139 if (cnt
++ >= max_cnt
)
141 LLL_MUTEX_LOCK (mutex
);
146 while (LLL_MUTEX_TRYLOCK (mutex
) != 0);
148 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
150 assert (mutex
->__data
.__owner
== 0);
154 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
155 assert (PTHREAD_MUTEX_TYPE (mutex
) == PTHREAD_MUTEX_ERRORCHECK_NP
);
156 /* Check whether we already hold the mutex. */
157 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
162 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
164 /* Record the ownership. */
165 mutex
->__data
.__owner
= id
;
167 ++mutex
->__data
.__nusers
;
170 LIBC_PROBE (mutex_acquired
, 1, mutex
);
176 __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
179 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
181 switch (PTHREAD_MUTEX_TYPE (mutex
))
183 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
184 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
185 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
186 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
187 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
188 &mutex
->__data
.__list
.__next
);
189 /* We need to set op_pending before starting the operation. Also
190 see comments at ENQUEUE_MUTEX. */
191 __asm ("" ::: "memory");
193 oldval
= mutex
->__data
.__lock
;
194 /* This is set to FUTEX_WAITERS iff we might have shared the
195 FUTEX_WAITERS flag with other threads, and therefore need to keep it
196 set to avoid lost wake-ups. We have the same requirement in the
197 simple mutex algorithm.
198 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
199 are building the special case mutexes for use from within condition
201 unsigned int assume_other_futex_waiters
= LLL_ROBUST_MUTEX_LOCK_MODIFIER
;
204 /* Try to acquire the lock through a CAS from 0 (not acquired) to
205 our TID | assume_other_futex_waiters. */
206 if (__glibc_likely (oldval
== 0))
209 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
210 id
| assume_other_futex_waiters
, 0);
211 if (__glibc_likely (oldval
== 0))
215 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
217 /* The previous owner died. Try locking the mutex. */
220 /* We are not taking assume_other_futex_waiters into accoount
221 here simply because we'll set FUTEX_WAITERS anyway. */
222 newval
|= FUTEX_WAITERS
;
224 newval
|= (oldval
& FUTEX_WAITERS
) | assume_other_futex_waiters
;
228 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
231 if (newval
!= oldval
)
237 /* We got the mutex. */
238 mutex
->__data
.__count
= 1;
239 /* But it is inconsistent unless marked otherwise. */
240 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
242 /* We must not enqueue the mutex before we have acquired it.
243 Also see comments at ENQUEUE_MUTEX. */
244 __asm ("" ::: "memory");
245 ENQUEUE_MUTEX (mutex
);
246 /* We need to clear op_pending after we enqueue the mutex. */
247 __asm ("" ::: "memory");
248 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
250 /* Note that we deliberately exit here. If we fall
251 through to the end of the function __nusers would be
252 incremented which is not correct because the old
253 owner has to be discounted. If we are not supposed
254 to increment __nusers we actually have to decrement
257 --mutex
->__data
.__nusers
;
263 /* Check whether we already hold the mutex. */
264 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
266 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
267 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
269 /* We do not need to ensure ordering wrt another memory
270 access. Also see comments at ENQUEUE_MUTEX. */
271 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
276 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
278 /* We do not need to ensure ordering wrt another memory
280 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
283 /* Just bump the counter. */
284 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
285 /* Overflow of the counter. */
288 ++mutex
->__data
.__count
;
294 /* We cannot acquire the mutex nor has its owner died. Thus, try
295 to block using futexes. Set FUTEX_WAITERS if necessary so that
296 other threads are aware that there are potentially threads
297 blocked on the futex. Restart if oldval changed in the
299 if ((oldval
& FUTEX_WAITERS
) == 0)
301 if (atomic_compare_and_exchange_bool_acq (&mutex
->__data
.__lock
,
302 oldval
| FUTEX_WAITERS
,
306 oldval
= mutex
->__data
.__lock
;
309 oldval
|= FUTEX_WAITERS
;
312 /* It is now possible that we share the FUTEX_WAITERS flag with
313 another thread; therefore, update assume_other_futex_waiters so
314 that we do not forget about this when handling other cases
315 above and thus do not cause lost wake-ups. */
316 assume_other_futex_waiters
|= FUTEX_WAITERS
;
318 /* Block using the futex and reload current lock value. */
319 futex_wait ((unsigned int *) &mutex
->__data
.__lock
, oldval
,
320 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
321 oldval
= mutex
->__data
.__lock
;
324 /* We have acquired the mutex; check if it is still consistent. */
325 if (__builtin_expect (mutex
->__data
.__owner
326 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
328 /* This mutex is now not recoverable. */
329 mutex
->__data
.__count
= 0;
330 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
331 lll_unlock (mutex
->__data
.__lock
, private);
332 /* FIXME This violates the mutex destruction requirements. See
333 __pthread_mutex_unlock_full. */
334 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
335 return ENOTRECOVERABLE
;
338 mutex
->__data
.__count
= 1;
339 /* We must not enqueue the mutex before we have acquired it.
340 Also see comments at ENQUEUE_MUTEX. */
341 __asm ("" ::: "memory");
342 ENQUEUE_MUTEX (mutex
);
343 /* We need to clear op_pending after we enqueue the mutex. */
344 __asm ("" ::: "memory");
345 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
348 /* The PI support requires the Linux futex system call. If that's not
349 available, pthread_mutex_init should never have allowed the type to
350 be set. So it will get the default case for an invalid type. */
352 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
353 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
354 case PTHREAD_MUTEX_PI_NORMAL_NP
:
355 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
356 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
357 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
358 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
359 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
363 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
364 in sysdeps/nptl/bits/thread-shared-types.h. */
365 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
366 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
367 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
372 /* Note: robust PI futexes are signaled by setting bit 0. */
373 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
374 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
376 /* We need to set op_pending before starting the operation. Also
377 see comments at ENQUEUE_MUTEX. */
378 __asm ("" ::: "memory");
381 oldval
= mutex
->__data
.__lock
;
383 /* Check whether we already hold the mutex. */
384 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
386 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
388 /* We do not need to ensure ordering wrt another memory
390 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
394 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
396 /* We do not need to ensure ordering wrt another memory
398 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
400 /* Just bump the counter. */
401 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
402 /* Overflow of the counter. */
405 ++mutex
->__data
.__count
;
413 newval
|= FUTEX_WAITERS
;
415 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
420 /* The mutex is locked. The kernel will now take care of
422 int private = (robust
423 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
424 : PTHREAD_MUTEX_PSHARED (mutex
));
425 int e
= futex_lock_pi64 (&mutex
->__data
.__lock
, NULL
, private);
426 if (e
== ESRCH
|| e
== EDEADLK
)
429 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
430 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
431 /* ESRCH can happen only for non-robust PI mutexes where
432 the owner of the lock died. */
433 assert (e
!= ESRCH
|| !robust
);
435 /* Delay the thread indefinitely. */
437 __futex_abstimed_wait64 (&(unsigned int){0}, 0,
438 0 /* ignored */, NULL
, private);
441 oldval
= mutex
->__data
.__lock
;
443 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
446 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
448 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
450 /* We got the mutex. */
451 mutex
->__data
.__count
= 1;
452 /* But it is inconsistent unless marked otherwise. */
453 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
455 /* We must not enqueue the mutex before we have acquired it.
456 Also see comments at ENQUEUE_MUTEX. */
457 __asm ("" ::: "memory");
458 ENQUEUE_MUTEX_PI (mutex
);
459 /* We need to clear op_pending after we enqueue the mutex. */
460 __asm ("" ::: "memory");
461 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
463 /* Note that we deliberately exit here. If we fall
464 through to the end of the function __nusers would be
465 incremented which is not correct because the old owner
466 has to be discounted. If we are not supposed to
467 increment __nusers we actually have to decrement it here. */
469 --mutex
->__data
.__nusers
;
476 && __builtin_expect (mutex
->__data
.__owner
477 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
479 /* This mutex is now not recoverable. */
480 mutex
->__data
.__count
= 0;
482 futex_unlock_pi ((unsigned int *) &mutex
->__data
.__lock
,
483 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
485 /* To the kernel, this will be visible after the kernel has
486 acquired the mutex in the syscall. */
487 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
488 return ENOTRECOVERABLE
;
491 mutex
->__data
.__count
= 1;
494 /* We must not enqueue the mutex before we have acquired it.
495 Also see comments at ENQUEUE_MUTEX. */
496 __asm ("" ::: "memory");
497 ENQUEUE_MUTEX_PI (mutex
);
498 /* We need to clear op_pending after we enqueue the mutex. */
499 __asm ("" ::: "memory");
500 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
504 #endif /* __NR_futex. */
506 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
507 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
508 case PTHREAD_MUTEX_PP_NORMAL_NP
:
509 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
511 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
512 in sysdeps/nptl/bits/thread-shared-types.h. */
513 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
514 & PTHREAD_MUTEX_KIND_MASK_NP
;
516 oldval
= mutex
->__data
.__lock
;
518 /* Check whether we already hold the mutex. */
519 if (mutex
->__data
.__owner
== id
)
521 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
524 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
526 /* Just bump the counter. */
527 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
528 /* Overflow of the counter. */
531 ++mutex
->__data
.__count
;
537 int oldprio
= -1, ceilval
;
540 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
541 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
543 if (__pthread_current_priority () > ceiling
)
546 __pthread_tpp_change_priority (oldprio
, -1);
550 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
554 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
558 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
566 if (oldval
== ceilval
)
572 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
576 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
579 if (oldval
!= ceilval
)
580 futex_wait ((unsigned int * ) &mutex
->__data
.__lock
,
582 PTHREAD_MUTEX_PSHARED (mutex
));
584 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
585 ceilval
| 2, ceilval
)
588 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
590 assert (mutex
->__data
.__owner
== 0);
591 mutex
->__data
.__count
= 1;
596 /* Correct code cannot set any other type. */
600 /* Record the ownership. */
601 mutex
->__data
.__owner
= id
;
603 ++mutex
->__data
.__nusers
;
606 LIBC_PROBE (mutex_acquired
, 1, mutex
);
611 #if PTHREAD_MUTEX_VERSIONS
612 versioned_symbol (libpthread
, ___pthread_mutex_lock
, __pthread_mutex_lock
,
614 libc_hidden_ver (___pthread_mutex_lock
, __pthread_mutex_lock
)
615 versioned_symbol (libpthread
, ___pthread_mutex_lock
, pthread_mutex_lock
,
618 # if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
619 compat_symbol (libpthread
, ___pthread_mutex_lock
, __pthread_mutex_lock
,
622 #endif /* PTHREAD_MUTEX_VERSIONS */
627 __pthread_mutex_cond_lock_adjust (pthread_mutex_t
*mutex
)
629 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
630 in sysdeps/nptl/bits/thread-shared-types.h. */
631 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
632 assert ((mutex_kind
& PTHREAD_MUTEX_PRIO_INHERIT_NP
) != 0);
633 assert ((mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) == 0);
634 assert ((mutex_kind
& PTHREAD_MUTEX_PSHARED_BIT
) == 0);
636 /* Record the ownership. */
637 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
638 mutex
->__data
.__owner
= id
;
640 if (mutex_kind
== PTHREAD_MUTEX_PI_RECURSIVE_NP
)
641 ++mutex
->__data
.__count
;