1 /* Copyright (C) 2002-2023 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
22 #include <sys/param.h>
23 #include <not-cancel.h>
26 #include <futex-internal.h>
27 #include <stap-probe.h>
28 #include <shlib-compat.h>
30 /* Some of the following definitions differ when pthread_mutex_cond_lock.c
31 includes this file. */
32 #ifndef LLL_MUTEX_LOCK
33 /* lll_lock with single-thread optimization. */
35 lll_mutex_lock_optimized (pthread_mutex_t
*mutex
)
37 /* The single-threaded optimization is only valid for private
38 mutexes. For process-shared mutexes, the mutex could be in a
39 shared mapping, so synchronization with another process is needed
40 even without any threads. If the lock is already marked as
41 acquired, POSIX requires that pthread_mutex_lock deadlocks for
42 normal mutexes, so skip the optimization in that case as
44 int private = PTHREAD_MUTEX_PSHARED (mutex
);
45 if (private == LLL_PRIVATE
&& SINGLE_THREAD_P
&& mutex
->__data
.__lock
== 0)
46 mutex
->__data
.__lock
= 1;
48 lll_lock (mutex
->__data
.__lock
, private);
51 # define LLL_MUTEX_LOCK(mutex) \
52 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
53 # define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex)
54 # define LLL_MUTEX_TRYLOCK(mutex) \
55 lll_trylock ((mutex)->__data.__lock)
56 # define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
57 # define LLL_MUTEX_LOCK_ELISION(mutex) \
58 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
59 PTHREAD_MUTEX_PSHARED (mutex))
60 # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
61 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
62 PTHREAD_MUTEX_PSHARED (mutex))
63 # define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock
64 # define PTHREAD_MUTEX_VERSIONS 1
67 #ifndef LLL_MUTEX_READ_LOCK
68 # define LLL_MUTEX_READ_LOCK(mutex) \
69 atomic_load_relaxed (&(mutex)->__data.__lock)
72 static int __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
73 __attribute_noinline__
;
76 PTHREAD_MUTEX_LOCK (pthread_mutex_t
*mutex
)
78 /* See concurrency notes regarding mutex type which is loaded from __kind
79 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
80 unsigned int type
= PTHREAD_MUTEX_TYPE_ELISION (mutex
);
82 LIBC_PROBE (mutex_entry
, 1, mutex
);
84 if (__builtin_expect (type
& ~(PTHREAD_MUTEX_KIND_MASK_NP
85 | PTHREAD_MUTEX_ELISION_FLAGS_NP
), 0))
86 return __pthread_mutex_lock_full (mutex
);
88 if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_NP
))
90 FORCE_ELISION (mutex
, goto elision
);
93 LLL_MUTEX_LOCK_OPTIMIZED (mutex
);
94 assert (mutex
->__data
.__owner
== 0);
96 #if ENABLE_ELISION_SUPPORT
97 else if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_ELISION_NP
))
99 elision
: __attribute__((unused
))
100 /* This case can never happen on a system without elision,
101 as the mutex type initialization functions will not
102 allow to set the elision flags. */
103 /* Don't record owner or users for elision case. This is a
105 return LLL_MUTEX_LOCK_ELISION (mutex
);
108 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
109 == PTHREAD_MUTEX_RECURSIVE_NP
, 1))
111 /* Recursive mutex. */
112 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
114 /* Check whether we already hold the mutex. */
115 if (mutex
->__data
.__owner
== id
)
117 /* Just bump the counter. */
118 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
119 /* Overflow of the counter. */
122 ++mutex
->__data
.__count
;
127 /* We have to get the mutex. */
128 LLL_MUTEX_LOCK_OPTIMIZED (mutex
);
130 assert (mutex
->__data
.__owner
== 0);
131 mutex
->__data
.__count
= 1;
133 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
134 == PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
136 if (LLL_MUTEX_TRYLOCK (mutex
) != 0)
139 int max_cnt
= MIN (max_adaptive_count (),
140 mutex
->__data
.__spins
* 2 + 10);
141 int spin_count
, exp_backoff
= 1;
142 unsigned int jitter
= get_jitter ();
145 /* In each loop, spin count is exponential backoff plus
146 random jitter, random range is [0, exp_backoff-1]. */
147 spin_count
= exp_backoff
+ (jitter
& (exp_backoff
- 1));
151 /* If cnt exceeds max spin count, just go to wait
153 LLL_MUTEX_LOCK (mutex
);
158 while (--spin_count
> 0);
159 /* Prepare for next loop. */
160 exp_backoff
= get_next_backoff (exp_backoff
);
162 while (LLL_MUTEX_READ_LOCK (mutex
) != 0
163 || LLL_MUTEX_TRYLOCK (mutex
) != 0);
165 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
167 assert (mutex
->__data
.__owner
== 0);
171 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
172 assert (PTHREAD_MUTEX_TYPE (mutex
) == PTHREAD_MUTEX_ERRORCHECK_NP
);
173 /* Check whether we already hold the mutex. */
174 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
179 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
181 /* Record the ownership. */
182 mutex
->__data
.__owner
= id
;
184 ++mutex
->__data
.__nusers
;
187 LIBC_PROBE (mutex_acquired
, 1, mutex
);
193 __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
196 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
198 switch (PTHREAD_MUTEX_TYPE (mutex
))
200 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
201 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
202 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
203 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
204 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
205 &mutex
->__data
.__list
.__next
);
206 /* We need to set op_pending before starting the operation. Also
207 see comments at ENQUEUE_MUTEX. */
208 __asm ("" ::: "memory");
210 oldval
= mutex
->__data
.__lock
;
211 /* This is set to FUTEX_WAITERS iff we might have shared the
212 FUTEX_WAITERS flag with other threads, and therefore need to keep it
213 set to avoid lost wake-ups. We have the same requirement in the
214 simple mutex algorithm.
215 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
216 are building the special case mutexes for use from within condition
218 unsigned int assume_other_futex_waiters
= LLL_ROBUST_MUTEX_LOCK_MODIFIER
;
221 /* Try to acquire the lock through a CAS from 0 (not acquired) to
222 our TID | assume_other_futex_waiters. */
223 if (__glibc_likely (oldval
== 0))
226 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
227 id
| assume_other_futex_waiters
, 0);
228 if (__glibc_likely (oldval
== 0))
232 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
234 /* The previous owner died. Try locking the mutex. */
237 /* We are not taking assume_other_futex_waiters into account
238 here simply because we'll set FUTEX_WAITERS anyway. */
239 newval
|= FUTEX_WAITERS
;
241 newval
|= (oldval
& FUTEX_WAITERS
) | assume_other_futex_waiters
;
245 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
248 if (newval
!= oldval
)
254 /* We got the mutex. */
255 mutex
->__data
.__count
= 1;
256 /* But it is inconsistent unless marked otherwise. */
257 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
259 /* We must not enqueue the mutex before we have acquired it.
260 Also see comments at ENQUEUE_MUTEX. */
261 __asm ("" ::: "memory");
262 ENQUEUE_MUTEX (mutex
);
263 /* We need to clear op_pending after we enqueue the mutex. */
264 __asm ("" ::: "memory");
265 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
267 /* Note that we deliberately exit here. If we fall
268 through to the end of the function __nusers would be
269 incremented which is not correct because the old
270 owner has to be discounted. If we are not supposed
271 to increment __nusers we actually have to decrement
274 --mutex
->__data
.__nusers
;
280 /* Check whether we already hold the mutex. */
281 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
283 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
284 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
286 /* We do not need to ensure ordering wrt another memory
287 access. Also see comments at ENQUEUE_MUTEX. */
288 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
293 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
295 /* We do not need to ensure ordering wrt another memory
297 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
300 /* Just bump the counter. */
301 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
302 /* Overflow of the counter. */
305 ++mutex
->__data
.__count
;
311 /* We cannot acquire the mutex nor has its owner died. Thus, try
312 to block using futexes. Set FUTEX_WAITERS if necessary so that
313 other threads are aware that there are potentially threads
314 blocked on the futex. Restart if oldval changed in the
316 if ((oldval
& FUTEX_WAITERS
) == 0)
318 int val
= atomic_compare_and_exchange_val_acq
319 (&mutex
->__data
.__lock
, oldval
| FUTEX_WAITERS
, oldval
);
325 oldval
|= FUTEX_WAITERS
;
328 /* It is now possible that we share the FUTEX_WAITERS flag with
329 another thread; therefore, update assume_other_futex_waiters so
330 that we do not forget about this when handling other cases
331 above and thus do not cause lost wake-ups. */
332 assume_other_futex_waiters
|= FUTEX_WAITERS
;
334 /* Block using the futex and reload current lock value. */
335 futex_wait ((unsigned int *) &mutex
->__data
.__lock
, oldval
,
336 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
337 oldval
= mutex
->__data
.__lock
;
340 /* We have acquired the mutex; check if it is still consistent. */
341 if (__builtin_expect (mutex
->__data
.__owner
342 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
344 /* This mutex is now not recoverable. */
345 mutex
->__data
.__count
= 0;
346 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
347 lll_unlock (mutex
->__data
.__lock
, private);
348 /* FIXME This violates the mutex destruction requirements. See
349 __pthread_mutex_unlock_full. */
350 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
351 return ENOTRECOVERABLE
;
354 mutex
->__data
.__count
= 1;
355 /* We must not enqueue the mutex before we have acquired it.
356 Also see comments at ENQUEUE_MUTEX. */
357 __asm ("" ::: "memory");
358 ENQUEUE_MUTEX (mutex
);
359 /* We need to clear op_pending after we enqueue the mutex. */
360 __asm ("" ::: "memory");
361 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
364 /* The PI support requires the Linux futex system call. If that's not
365 available, pthread_mutex_init should never have allowed the type to
366 be set. So it will get the default case for an invalid type. */
368 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
369 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
370 case PTHREAD_MUTEX_PI_NORMAL_NP
:
371 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
372 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
373 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
374 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
375 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
379 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
380 in sysdeps/nptl/bits/thread-shared-types.h. */
381 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
382 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
383 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
388 /* Note: robust PI futexes are signaled by setting bit 0. */
389 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
390 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
392 /* We need to set op_pending before starting the operation. Also
393 see comments at ENQUEUE_MUTEX. */
394 __asm ("" ::: "memory");
397 oldval
= mutex
->__data
.__lock
;
399 /* Check whether we already hold the mutex. */
400 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
402 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
404 /* We do not need to ensure ordering wrt another memory
406 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
410 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
412 /* We do not need to ensure ordering wrt another memory
414 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
416 /* Just bump the counter. */
417 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
418 /* Overflow of the counter. */
421 ++mutex
->__data
.__count
;
429 newval
|= FUTEX_WAITERS
;
431 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
436 /* The mutex is locked. The kernel will now take care of
438 int private = (robust
439 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
440 : PTHREAD_MUTEX_PSHARED (mutex
));
441 int e
= __futex_lock_pi64 (&mutex
->__data
.__lock
, 0 /* unused */,
443 if (e
== ESRCH
|| e
== EDEADLK
)
446 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
447 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
448 /* ESRCH can happen only for non-robust PI mutexes where
449 the owner of the lock died. */
450 assert (e
!= ESRCH
|| !robust
);
452 /* Delay the thread indefinitely. */
454 __futex_abstimed_wait64 (&(unsigned int){0}, 0,
455 0 /* ignored */, NULL
, private);
458 oldval
= mutex
->__data
.__lock
;
460 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
463 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
465 atomic_fetch_and_acquire (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
467 /* We got the mutex. */
468 mutex
->__data
.__count
= 1;
469 /* But it is inconsistent unless marked otherwise. */
470 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
472 /* We must not enqueue the mutex before we have acquired it.
473 Also see comments at ENQUEUE_MUTEX. */
474 __asm ("" ::: "memory");
475 ENQUEUE_MUTEX_PI (mutex
);
476 /* We need to clear op_pending after we enqueue the mutex. */
477 __asm ("" ::: "memory");
478 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
480 /* Note that we deliberately exit here. If we fall
481 through to the end of the function __nusers would be
482 incremented which is not correct because the old owner
483 has to be discounted. If we are not supposed to
484 increment __nusers we actually have to decrement it here. */
486 --mutex
->__data
.__nusers
;
493 && __builtin_expect (mutex
->__data
.__owner
494 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
496 /* This mutex is now not recoverable. */
497 mutex
->__data
.__count
= 0;
499 futex_unlock_pi ((unsigned int *) &mutex
->__data
.__lock
,
500 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
502 /* To the kernel, this will be visible after the kernel has
503 acquired the mutex in the syscall. */
504 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
505 return ENOTRECOVERABLE
;
508 mutex
->__data
.__count
= 1;
511 /* We must not enqueue the mutex before we have acquired it.
512 Also see comments at ENQUEUE_MUTEX. */
513 __asm ("" ::: "memory");
514 ENQUEUE_MUTEX_PI (mutex
);
515 /* We need to clear op_pending after we enqueue the mutex. */
516 __asm ("" ::: "memory");
517 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
521 #endif /* __NR_futex. */
523 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
524 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
525 case PTHREAD_MUTEX_PP_NORMAL_NP
:
526 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
528 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
529 in sysdeps/nptl/bits/thread-shared-types.h. */
530 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
531 & PTHREAD_MUTEX_KIND_MASK_NP
;
533 oldval
= mutex
->__data
.__lock
;
535 /* Check whether we already hold the mutex. */
536 if (mutex
->__data
.__owner
== id
)
538 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
541 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
543 /* Just bump the counter. */
544 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
545 /* Overflow of the counter. */
548 ++mutex
->__data
.__count
;
554 int oldprio
= -1, ceilval
;
557 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
558 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
560 if (__pthread_current_priority () > ceiling
)
563 __pthread_tpp_change_priority (oldprio
, -1);
567 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
571 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
575 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
583 if (oldval
== ceilval
)
589 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
593 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
596 if (oldval
!= ceilval
)
597 futex_wait ((unsigned int * ) &mutex
->__data
.__lock
,
599 PTHREAD_MUTEX_PSHARED (mutex
));
601 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
602 ceilval
| 2, ceilval
)
605 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
607 assert (mutex
->__data
.__owner
== 0);
608 mutex
->__data
.__count
= 1;
613 /* Correct code cannot set any other type. */
617 /* Record the ownership. */
618 mutex
->__data
.__owner
= id
;
620 ++mutex
->__data
.__nusers
;
623 LIBC_PROBE (mutex_acquired
, 1, mutex
);
628 #if PTHREAD_MUTEX_VERSIONS
629 libc_hidden_ver (___pthread_mutex_lock
, __pthread_mutex_lock
)
631 strong_alias (___pthread_mutex_lock
, __pthread_mutex_lock
)
633 versioned_symbol (libpthread
, ___pthread_mutex_lock
, pthread_mutex_lock
,
636 # if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
637 compat_symbol (libpthread
, ___pthread_mutex_lock
, __pthread_mutex_lock
,
640 #endif /* PTHREAD_MUTEX_VERSIONS */
645 __pthread_mutex_cond_lock_adjust (pthread_mutex_t
*mutex
)
647 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
648 in sysdeps/nptl/bits/thread-shared-types.h. */
649 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
650 assert ((mutex_kind
& PTHREAD_MUTEX_PRIO_INHERIT_NP
) != 0);
651 assert ((mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) == 0);
652 assert ((mutex_kind
& PTHREAD_MUTEX_PSHARED_BIT
) == 0);
654 /* Record the ownership. */
655 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
656 mutex
->__data
.__owner
= id
;
658 if (mutex_kind
== PTHREAD_MUTEX_PI_RECURSIVE_NP
)
659 ++mutex
->__data
.__count
;