1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
21 #include <sys/param.h>
25 #include <lowlevellock.h>
26 #include <not-cancel.h>
27 #include <futex-internal.h>
29 #include <stap-probe.h>
32 __pthread_mutex_clocklock_common (pthread_mutex_t
*mutex
,
34 const struct __timespec64
*abstime
)
37 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
40 /* We must not check ABSTIME here. If the thread does not block
41 abstime must not be checked for a valid value. */
43 /* See concurrency notes regarding mutex type which is loaded from __kind
44 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
45 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
46 PTHREAD_MUTEX_TIMED_NP
))
48 /* Recursive mutex. */
49 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
50 case PTHREAD_MUTEX_RECURSIVE_NP
:
51 /* Check whether we already hold the mutex. */
52 if (mutex
->__data
.__owner
== id
)
54 /* Just bump the counter. */
55 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
56 /* Overflow of the counter. */
59 ++mutex
->__data
.__count
;
64 /* We have to get the mutex. */
65 result
= __futex_clocklock64 (&mutex
->__data
.__lock
, clockid
, abstime
,
66 PTHREAD_MUTEX_PSHARED (mutex
));
71 /* Only locked once so far. */
72 mutex
->__data
.__count
= 1;
75 /* Error checking mutex. */
76 case PTHREAD_MUTEX_ERRORCHECK_NP
:
77 /* Check whether we already hold the mutex. */
78 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
81 /* Don't do lock elision on an error checking mutex. */
84 case PTHREAD_MUTEX_TIMED_NP
:
85 FORCE_ELISION (mutex
, goto elision
);
88 result
= __futex_clocklock64 (&mutex
->__data
.__lock
, clockid
, abstime
,
89 PTHREAD_MUTEX_PSHARED (mutex
));
92 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
93 elision
: __attribute__((unused
))
94 /* Don't record ownership */
95 return lll_clocklock_elision (mutex
->__data
.__lock
,
96 mutex
->__data
.__spins
,
98 PTHREAD_MUTEX_PSHARED (mutex
));
101 case PTHREAD_MUTEX_ADAPTIVE_NP
:
102 if (lll_trylock (mutex
->__data
.__lock
) != 0)
105 int max_cnt
= MIN (max_adaptive_count (),
106 mutex
->__data
.__spins
* 2 + 10);
109 if (cnt
++ >= max_cnt
)
111 result
= __futex_clocklock64 (&mutex
->__data
.__lock
,
113 PTHREAD_MUTEX_PSHARED (mutex
));
118 while (lll_trylock (mutex
->__data
.__lock
) != 0);
120 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
124 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
125 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
126 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
127 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
128 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
129 &mutex
->__data
.__list
.__next
);
130 /* We need to set op_pending before starting the operation. Also
131 see comments at ENQUEUE_MUTEX. */
132 __asm ("" ::: "memory");
134 oldval
= mutex
->__data
.__lock
;
135 /* This is set to FUTEX_WAITERS iff we might have shared the
136 FUTEX_WAITERS flag with other threads, and therefore need to keep it
137 set to avoid lost wake-ups. We have the same requirement in the
138 simple mutex algorithm. */
139 unsigned int assume_other_futex_waiters
= 0;
142 /* Try to acquire the lock through a CAS from 0 (not acquired) to
143 our TID | assume_other_futex_waiters. */
144 if (__glibc_likely (oldval
== 0))
147 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
148 id
| assume_other_futex_waiters
, 0);
149 if (__glibc_likely (oldval
== 0))
153 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
155 /* The previous owner died. Try locking the mutex. */
156 int newval
= id
| (oldval
& FUTEX_WAITERS
)
157 | assume_other_futex_waiters
;
160 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
162 if (newval
!= oldval
)
168 /* We got the mutex. */
169 mutex
->__data
.__count
= 1;
170 /* But it is inconsistent unless marked otherwise. */
171 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
173 /* We must not enqueue the mutex before we have acquired it.
174 Also see comments at ENQUEUE_MUTEX. */
175 __asm ("" ::: "memory");
176 ENQUEUE_MUTEX (mutex
);
177 /* We need to clear op_pending after we enqueue the mutex. */
178 __asm ("" ::: "memory");
179 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
181 /* Note that we deliberately exit here. If we fall
182 through to the end of the function __nusers would be
183 incremented which is not correct because the old
184 owner has to be discounted. */
188 /* Check whether we already hold the mutex. */
189 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
191 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
192 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
194 /* We do not need to ensure ordering wrt another memory
195 access. Also see comments at ENQUEUE_MUTEX. */
196 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
201 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
203 /* We do not need to ensure ordering wrt another memory
205 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
208 /* Just bump the counter. */
209 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
210 /* Overflow of the counter. */
213 ++mutex
->__data
.__count
;
215 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
221 /* We are about to block; check whether the timeout is invalid. */
222 if (! valid_nanoseconds (abstime
->tv_nsec
))
224 /* Work around the fact that the kernel rejects negative timeout
225 values despite them being valid. */
226 if (__glibc_unlikely (abstime
->tv_sec
< 0))
229 /* We cannot acquire the mutex nor has its owner died. Thus, try
230 to block using futexes. Set FUTEX_WAITERS if necessary so that
231 other threads are aware that there are potentially threads
232 blocked on the futex. Restart if oldval changed in the
234 if ((oldval
& FUTEX_WAITERS
) == 0)
236 if (atomic_compare_and_exchange_bool_acq (&mutex
->__data
.__lock
,
237 oldval
| FUTEX_WAITERS
,
241 oldval
= mutex
->__data
.__lock
;
244 oldval
|= FUTEX_WAITERS
;
247 /* It is now possible that we share the FUTEX_WAITERS flag with
248 another thread; therefore, update assume_other_futex_waiters so
249 that we do not forget about this when handling other cases
250 above and thus do not cause lost wake-ups. */
251 assume_other_futex_waiters
|= FUTEX_WAITERS
;
253 /* Block using the futex. */
254 int err
= __futex_abstimed_wait64 (
255 (unsigned int *) &mutex
->__data
.__lock
,
256 oldval
, clockid
, abstime
,
257 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
258 /* The futex call timed out. */
259 if (err
== ETIMEDOUT
|| err
== EOVERFLOW
)
261 /* Reload current lock value. */
262 oldval
= mutex
->__data
.__lock
;
265 /* We have acquired the mutex; check if it is still consistent. */
266 if (__builtin_expect (mutex
->__data
.__owner
267 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
269 /* This mutex is now not recoverable. */
270 mutex
->__data
.__count
= 0;
271 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
272 lll_unlock (mutex
->__data
.__lock
, private);
273 /* FIXME This violates the mutex destruction requirements. See
274 __pthread_mutex_unlock_full. */
275 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
276 return ENOTRECOVERABLE
;
279 mutex
->__data
.__count
= 1;
280 /* We must not enqueue the mutex before we have acquired it.
281 Also see comments at ENQUEUE_MUTEX. */
282 __asm ("" ::: "memory");
283 ENQUEUE_MUTEX (mutex
);
284 /* We need to clear op_pending after we enqueue the mutex. */
285 __asm ("" ::: "memory");
286 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
289 /* The PI support requires the Linux futex system call. If that's not
290 available, pthread_mutex_init should never have allowed the type to
291 be set. So it will get the default case for an invalid type. */
293 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
294 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
295 case PTHREAD_MUTEX_PI_NORMAL_NP
:
296 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
297 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
298 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
299 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
300 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
302 /* Currently futex FUTEX_LOCK_PI operation only provides support for
303 CLOCK_REALTIME and trying to emulate by converting a
304 CLOCK_MONOTONIC to CLOCK_REALTIME will take in account possible
305 changes to the wall clock. */
306 if (__glibc_unlikely (clockid
!= CLOCK_REALTIME
))
311 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
312 in sysdeps/nptl/bits/thread-shared-types.h. */
313 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
314 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
315 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
320 /* Note: robust PI futexes are signaled by setting bit 0. */
321 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
322 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
324 /* We need to set op_pending before starting the operation. Also
325 see comments at ENQUEUE_MUTEX. */
326 __asm ("" ::: "memory");
329 oldval
= mutex
->__data
.__lock
;
331 /* Check whether we already hold the mutex. */
332 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
334 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
336 /* We do not need to ensure ordering wrt another memory
338 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
342 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
344 /* We do not need to ensure ordering wrt another memory
346 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
348 /* Just bump the counter. */
349 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
350 /* Overflow of the counter. */
353 ++mutex
->__data
.__count
;
355 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
361 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
366 /* The mutex is locked. The kernel will now take care of
367 everything. The timeout value must be a relative value.
369 int private = (robust
370 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
371 : PTHREAD_MUTEX_PSHARED (mutex
));
372 int e
= futex_lock_pi64 (&mutex
->__data
.__lock
, abstime
, private);
375 else if (e
== ESRCH
|| e
== EDEADLK
)
378 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
379 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
380 /* ESRCH can happen only for non-robust PI mutexes where
381 the owner of the lock died. */
382 assert (e
!= ESRCH
|| !robust
);
384 /* Delay the thread until the timeout is reached. Then return
387 e
= __futex_abstimed_wait64 (&(unsigned int){0}, 0, clockid
,
389 while (e
!= ETIMEDOUT
);
395 oldval
= mutex
->__data
.__lock
;
397 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
400 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
402 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
404 /* We got the mutex. */
405 mutex
->__data
.__count
= 1;
406 /* But it is inconsistent unless marked otherwise. */
407 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
409 /* We must not enqueue the mutex before we have acquired it.
410 Also see comments at ENQUEUE_MUTEX. */
411 __asm ("" ::: "memory");
412 ENQUEUE_MUTEX_PI (mutex
);
413 /* We need to clear op_pending after we enqueue the mutex. */
414 __asm ("" ::: "memory");
415 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
417 /* Note that we deliberately exit here. If we fall
418 through to the end of the function __nusers would be
419 incremented which is not correct because the old owner
420 has to be discounted. */
425 && __builtin_expect (mutex
->__data
.__owner
426 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
428 /* This mutex is now not recoverable. */
429 mutex
->__data
.__count
= 0;
431 futex_unlock_pi ((unsigned int *) &mutex
->__data
.__lock
,
432 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
434 /* To the kernel, this will be visible after the kernel has
435 acquired the mutex in the syscall. */
436 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
437 return ENOTRECOVERABLE
;
440 mutex
->__data
.__count
= 1;
443 /* We must not enqueue the mutex before we have acquired it.
444 Also see comments at ENQUEUE_MUTEX. */
445 __asm ("" ::: "memory");
446 ENQUEUE_MUTEX_PI (mutex
);
447 /* We need to clear op_pending after we enqueue the mutex. */
448 __asm ("" ::: "memory");
449 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
453 #endif /* __NR_futex. */
455 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
456 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
457 case PTHREAD_MUTEX_PP_NORMAL_NP
:
458 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
460 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
461 in sysdeps/nptl/bits/thread-shared-types.h. */
462 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
463 & PTHREAD_MUTEX_KIND_MASK_NP
;
465 oldval
= mutex
->__data
.__lock
;
467 /* Check whether we already hold the mutex. */
468 if (mutex
->__data
.__owner
== id
)
470 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
473 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
475 /* Just bump the counter. */
476 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
477 /* Overflow of the counter. */
480 ++mutex
->__data
.__count
;
482 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
488 int oldprio
= -1, ceilval
;
491 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
492 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
494 if (__pthread_current_priority () > ceiling
)
499 __pthread_tpp_change_priority (oldprio
, -1);
503 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
507 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
511 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
512 ceilval
| 1, ceilval
);
514 if (oldval
== ceilval
)
520 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
524 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
527 if (oldval
!= ceilval
)
529 /* Reject invalid timeouts. */
530 if (! valid_nanoseconds (abstime
->tv_nsec
))
536 int e
= __futex_abstimed_wait64 (
537 (unsigned int *) &mutex
->__data
.__lock
, ceilval
| 2,
538 clockid
, abstime
, PTHREAD_MUTEX_PSHARED (mutex
));
539 if (e
== ETIMEDOUT
|| e
== EOVERFLOW
)
543 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
544 ceilval
| 2, ceilval
)
547 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
549 assert (mutex
->__data
.__owner
== 0);
550 mutex
->__data
.__count
= 1;
555 /* Correct code cannot set any other type. */
561 /* Record the ownership. */
562 mutex
->__data
.__owner
= id
;
563 ++mutex
->__data
.__nusers
;
565 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
573 ___pthread_mutex_clocklock64 (pthread_mutex_t
*mutex
,
575 const struct __timespec64
*abstime
)
577 if (__glibc_unlikely (!futex_abstimed_supported_clockid (clockid
)))
580 LIBC_PROBE (mutex_clocklock_entry
, 3, mutex
, clockid
, abstime
);
581 return __pthread_mutex_clocklock_common (mutex
, clockid
, abstime
);
585 strong_alias (___pthread_mutex_clocklock64
, ___pthread_mutex_clocklock
)
586 #else /* __TIMESPEC64 != 64 */
587 strong_alias (___pthread_mutex_clocklock64
, __pthread_mutex_clocklock64
)
588 libc_hidden_def (__pthread_mutex_clocklock64
)
591 ___pthread_mutex_clocklock (pthread_mutex_t
*mutex
,
593 const struct timespec
*abstime
)
595 struct __timespec64 ts64
= valid_timespec_to_timespec64 (*abstime
);
597 return ___pthread_mutex_clocklock64 (mutex
, clockid
, &ts64
);
599 #endif /* __TIMESPEC64 != 64 */
600 libc_hidden_ver (___pthread_mutex_clocklock
, __pthread_mutex_clocklock
)
602 strong_alias (___pthread_mutex_clocklock
, __pthread_mutex_clocklock
)
604 versioned_symbol (libc
, ___pthread_mutex_clocklock
,
605 pthread_mutex_clocklock
, GLIBC_2_34
);
606 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_30, GLIBC_2_34)
607 compat_symbol (libpthread
, ___pthread_mutex_clocklock
,
608 pthread_mutex_clocklock
, GLIBC_2_30
);
612 ___pthread_mutex_timedlock64 (pthread_mutex_t
*mutex
,
613 const struct __timespec64
*abstime
)
615 LIBC_PROBE (mutex_timedlock_entry
, 2, mutex
, abstime
);
616 return __pthread_mutex_clocklock_common (mutex
, CLOCK_REALTIME
, abstime
);
620 strong_alias (___pthread_mutex_timedlock64
, ___pthread_mutex_timedlock
)
621 #else /* __TIMESPEC64 != 64 */
622 strong_alias (___pthread_mutex_timedlock64
, __pthread_mutex_timedlock64
);
623 libc_hidden_def (__pthread_mutex_timedlock64
)
626 ___pthread_mutex_timedlock (pthread_mutex_t
*mutex
,
627 const struct timespec
*abstime
)
629 struct __timespec64 ts64
= valid_timespec_to_timespec64 (*abstime
);
631 return __pthread_mutex_timedlock64 (mutex
, &ts64
);
633 #endif /* __TIMESPEC64 != 64 */
634 versioned_symbol (libc
, ___pthread_mutex_timedlock
,
635 pthread_mutex_timedlock
, GLIBC_2_34
);
636 libc_hidden_ver (___pthread_mutex_timedlock
, __pthread_mutex_timedlock
)
638 strong_alias (___pthread_mutex_timedlock
, __pthread_mutex_timedlock
)
641 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_2, GLIBC_2_34)
642 compat_symbol (libpthread
, ___pthread_mutex_timedlock
,
643 pthread_mutex_timedlock
, GLIBC_2_2
);