1 /* Copyright (C) 2002-2022 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
21 #include <sys/param.h>
25 #include <lowlevellock.h>
26 #include <not-cancel.h>
27 #include <futex-internal.h>
29 #include <stap-probe.h>
32 __pthread_mutex_clocklock_common (pthread_mutex_t
*mutex
,
34 const struct __timespec64
*abstime
)
37 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
40 /* We must not check ABSTIME here. If the thread does not block
41 abstime must not be checked for a valid value. */
43 /* See concurrency notes regarding mutex type which is loaded from __kind
44 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
45 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
46 PTHREAD_MUTEX_TIMED_NP
))
48 /* Recursive mutex. */
49 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
50 case PTHREAD_MUTEX_RECURSIVE_NP
:
51 /* Check whether we already hold the mutex. */
52 if (mutex
->__data
.__owner
== id
)
54 /* Just bump the counter. */
55 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
56 /* Overflow of the counter. */
59 ++mutex
->__data
.__count
;
64 /* We have to get the mutex. */
65 result
= __futex_clocklock64 (&mutex
->__data
.__lock
, clockid
, abstime
,
66 PTHREAD_MUTEX_PSHARED (mutex
));
71 /* Only locked once so far. */
72 mutex
->__data
.__count
= 1;
75 /* Error checking mutex. */
76 case PTHREAD_MUTEX_ERRORCHECK_NP
:
77 /* Check whether we already hold the mutex. */
78 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
81 /* Don't do lock elision on an error checking mutex. */
84 case PTHREAD_MUTEX_TIMED_NP
:
85 FORCE_ELISION (mutex
, goto elision
);
88 result
= __futex_clocklock64 (&mutex
->__data
.__lock
, clockid
, abstime
,
89 PTHREAD_MUTEX_PSHARED (mutex
));
92 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
93 elision
: __attribute__((unused
))
94 /* Don't record ownership */
95 return lll_clocklock_elision (mutex
->__data
.__lock
,
96 mutex
->__data
.__spins
,
98 PTHREAD_MUTEX_PSHARED (mutex
));
101 case PTHREAD_MUTEX_ADAPTIVE_NP
:
102 if (lll_trylock (mutex
->__data
.__lock
) != 0)
105 int max_cnt
= MIN (max_adaptive_count (),
106 mutex
->__data
.__spins
* 2 + 10);
109 if (cnt
++ >= max_cnt
)
111 result
= __futex_clocklock64 (&mutex
->__data
.__lock
,
113 PTHREAD_MUTEX_PSHARED (mutex
));
118 while (lll_trylock (mutex
->__data
.__lock
) != 0);
120 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
124 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
125 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
126 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
127 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
128 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
129 &mutex
->__data
.__list
.__next
);
130 /* We need to set op_pending before starting the operation. Also
131 see comments at ENQUEUE_MUTEX. */
132 __asm ("" ::: "memory");
134 oldval
= mutex
->__data
.__lock
;
135 /* This is set to FUTEX_WAITERS iff we might have shared the
136 FUTEX_WAITERS flag with other threads, and therefore need to keep it
137 set to avoid lost wake-ups. We have the same requirement in the
138 simple mutex algorithm. */
139 unsigned int assume_other_futex_waiters
= 0;
142 /* Try to acquire the lock through a CAS from 0 (not acquired) to
143 our TID | assume_other_futex_waiters. */
144 if (__glibc_likely (oldval
== 0))
147 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
148 id
| assume_other_futex_waiters
, 0);
149 if (__glibc_likely (oldval
== 0))
153 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
155 /* The previous owner died. Try locking the mutex. */
156 int newval
= id
| (oldval
& FUTEX_WAITERS
)
157 | assume_other_futex_waiters
;
160 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
162 if (newval
!= oldval
)
168 /* We got the mutex. */
169 mutex
->__data
.__count
= 1;
170 /* But it is inconsistent unless marked otherwise. */
171 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
173 /* We must not enqueue the mutex before we have acquired it.
174 Also see comments at ENQUEUE_MUTEX. */
175 __asm ("" ::: "memory");
176 ENQUEUE_MUTEX (mutex
);
177 /* We need to clear op_pending after we enqueue the mutex. */
178 __asm ("" ::: "memory");
179 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
181 /* Note that we deliberately exit here. If we fall
182 through to the end of the function __nusers would be
183 incremented which is not correct because the old
184 owner has to be discounted. */
188 /* Check whether we already hold the mutex. */
189 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
191 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
192 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
194 /* We do not need to ensure ordering wrt another memory
195 access. Also see comments at ENQUEUE_MUTEX. */
196 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
201 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
203 /* We do not need to ensure ordering wrt another memory
205 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
208 /* Just bump the counter. */
209 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
210 /* Overflow of the counter. */
213 ++mutex
->__data
.__count
;
215 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
221 /* We are about to block; check whether the timeout is invalid. */
222 if (! valid_nanoseconds (abstime
->tv_nsec
))
224 /* Work around the fact that the kernel rejects negative timeout
225 values despite them being valid. */
226 if (__glibc_unlikely (abstime
->tv_sec
< 0))
229 /* We cannot acquire the mutex nor has its owner died. Thus, try
230 to block using futexes. Set FUTEX_WAITERS if necessary so that
231 other threads are aware that there are potentially threads
232 blocked on the futex. Restart if oldval changed in the
234 if ((oldval
& FUTEX_WAITERS
) == 0)
236 int val
= atomic_compare_and_exchange_val_acq
237 (&mutex
->__data
.__lock
, oldval
| FUTEX_WAITERS
, oldval
);
243 oldval
|= FUTEX_WAITERS
;
246 /* It is now possible that we share the FUTEX_WAITERS flag with
247 another thread; therefore, update assume_other_futex_waiters so
248 that we do not forget about this when handling other cases
249 above and thus do not cause lost wake-ups. */
250 assume_other_futex_waiters
|= FUTEX_WAITERS
;
252 /* Block using the futex. */
253 int err
= __futex_abstimed_wait64 (
254 (unsigned int *) &mutex
->__data
.__lock
,
255 oldval
, clockid
, abstime
,
256 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
257 /* The futex call timed out. */
258 if (err
== ETIMEDOUT
|| err
== EOVERFLOW
)
260 /* Reload current lock value. */
261 oldval
= mutex
->__data
.__lock
;
264 /* We have acquired the mutex; check if it is still consistent. */
265 if (__builtin_expect (mutex
->__data
.__owner
266 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
268 /* This mutex is now not recoverable. */
269 mutex
->__data
.__count
= 0;
270 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
271 lll_unlock (mutex
->__data
.__lock
, private);
272 /* FIXME This violates the mutex destruction requirements. See
273 __pthread_mutex_unlock_full. */
274 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
275 return ENOTRECOVERABLE
;
278 mutex
->__data
.__count
= 1;
279 /* We must not enqueue the mutex before we have acquired it.
280 Also see comments at ENQUEUE_MUTEX. */
281 __asm ("" ::: "memory");
282 ENQUEUE_MUTEX (mutex
);
283 /* We need to clear op_pending after we enqueue the mutex. */
284 __asm ("" ::: "memory");
285 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
288 /* The PI support requires the Linux futex system call. If that's not
289 available, pthread_mutex_init should never have allowed the type to
290 be set. So it will get the default case for an invalid type. */
292 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
293 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
294 case PTHREAD_MUTEX_PI_NORMAL_NP
:
295 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
296 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
297 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
298 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
299 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
303 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
304 in sysdeps/nptl/bits/thread-shared-types.h. */
305 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
306 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
307 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
312 /* Note: robust PI futexes are signaled by setting bit 0. */
313 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
314 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
316 /* We need to set op_pending before starting the operation. Also
317 see comments at ENQUEUE_MUTEX. */
318 __asm ("" ::: "memory");
321 oldval
= mutex
->__data
.__lock
;
323 /* Check whether we already hold the mutex. */
324 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
326 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
328 /* We do not need to ensure ordering wrt another memory
330 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
334 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
336 /* We do not need to ensure ordering wrt another memory
338 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
340 /* Just bump the counter. */
341 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
342 /* Overflow of the counter. */
345 ++mutex
->__data
.__count
;
347 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
353 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
358 /* The mutex is locked. The kernel will now take care of
359 everything. The timeout value must be a relative value.
361 int private = (robust
362 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
363 : PTHREAD_MUTEX_PSHARED (mutex
));
364 int e
= __futex_lock_pi64 (&mutex
->__data
.__lock
, clockid
, abstime
,
368 else if (e
== ESRCH
|| e
== EDEADLK
)
371 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
372 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
373 /* ESRCH can happen only for non-robust PI mutexes where
374 the owner of the lock died. */
375 assert (e
!= ESRCH
|| !robust
);
377 /* Delay the thread until the timeout is reached. Then return
380 e
= __futex_abstimed_wait64 (&(unsigned int){0}, 0, clockid
,
382 while (e
!= ETIMEDOUT
);
388 oldval
= mutex
->__data
.__lock
;
390 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
393 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
395 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
397 /* We got the mutex. */
398 mutex
->__data
.__count
= 1;
399 /* But it is inconsistent unless marked otherwise. */
400 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
402 /* We must not enqueue the mutex before we have acquired it.
403 Also see comments at ENQUEUE_MUTEX. */
404 __asm ("" ::: "memory");
405 ENQUEUE_MUTEX_PI (mutex
);
406 /* We need to clear op_pending after we enqueue the mutex. */
407 __asm ("" ::: "memory");
408 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
410 /* Note that we deliberately exit here. If we fall
411 through to the end of the function __nusers would be
412 incremented which is not correct because the old owner
413 has to be discounted. */
418 && __builtin_expect (mutex
->__data
.__owner
419 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
421 /* This mutex is now not recoverable. */
422 mutex
->__data
.__count
= 0;
424 futex_unlock_pi ((unsigned int *) &mutex
->__data
.__lock
,
425 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
427 /* To the kernel, this will be visible after the kernel has
428 acquired the mutex in the syscall. */
429 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
430 return ENOTRECOVERABLE
;
433 mutex
->__data
.__count
= 1;
436 /* We must not enqueue the mutex before we have acquired it.
437 Also see comments at ENQUEUE_MUTEX. */
438 __asm ("" ::: "memory");
439 ENQUEUE_MUTEX_PI (mutex
);
440 /* We need to clear op_pending after we enqueue the mutex. */
441 __asm ("" ::: "memory");
442 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
446 #endif /* __NR_futex. */
448 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
449 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
450 case PTHREAD_MUTEX_PP_NORMAL_NP
:
451 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
453 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
454 in sysdeps/nptl/bits/thread-shared-types.h. */
455 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
456 & PTHREAD_MUTEX_KIND_MASK_NP
;
458 oldval
= mutex
->__data
.__lock
;
460 /* Check whether we already hold the mutex. */
461 if (mutex
->__data
.__owner
== id
)
463 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
466 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
468 /* Just bump the counter. */
469 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
470 /* Overflow of the counter. */
473 ++mutex
->__data
.__count
;
475 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
481 int oldprio
= -1, ceilval
;
484 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
485 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
487 if (__pthread_current_priority () > ceiling
)
492 __pthread_tpp_change_priority (oldprio
, -1);
496 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
500 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
504 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
505 ceilval
| 1, ceilval
);
507 if (oldval
== ceilval
)
513 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
517 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
520 if (oldval
!= ceilval
)
522 /* Reject invalid timeouts. */
523 if (! valid_nanoseconds (abstime
->tv_nsec
))
529 int e
= __futex_abstimed_wait64 (
530 (unsigned int *) &mutex
->__data
.__lock
, ceilval
| 2,
531 clockid
, abstime
, PTHREAD_MUTEX_PSHARED (mutex
));
532 if (e
== ETIMEDOUT
|| e
== EOVERFLOW
)
536 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
537 ceilval
| 2, ceilval
)
540 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
542 assert (mutex
->__data
.__owner
== 0);
543 mutex
->__data
.__count
= 1;
548 /* Correct code cannot set any other type. */
554 /* Record the ownership. */
555 mutex
->__data
.__owner
= id
;
556 ++mutex
->__data
.__nusers
;
558 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
566 ___pthread_mutex_clocklock64 (pthread_mutex_t
*mutex
,
568 const struct __timespec64
*abstime
)
570 if (__glibc_unlikely (!futex_abstimed_supported_clockid (clockid
)))
573 LIBC_PROBE (mutex_clocklock_entry
, 3, mutex
, clockid
, abstime
);
574 return __pthread_mutex_clocklock_common (mutex
, clockid
, abstime
);
578 strong_alias (___pthread_mutex_clocklock64
, ___pthread_mutex_clocklock
)
579 #else /* __TIMESPEC64 != 64 */
580 strong_alias (___pthread_mutex_clocklock64
, __pthread_mutex_clocklock64
)
581 libc_hidden_def (__pthread_mutex_clocklock64
)
584 ___pthread_mutex_clocklock (pthread_mutex_t
*mutex
,
586 const struct timespec
*abstime
)
588 struct __timespec64 ts64
= valid_timespec_to_timespec64 (*abstime
);
590 return ___pthread_mutex_clocklock64 (mutex
, clockid
, &ts64
);
592 #endif /* __TIMESPEC64 != 64 */
593 libc_hidden_ver (___pthread_mutex_clocklock
, __pthread_mutex_clocklock
)
595 strong_alias (___pthread_mutex_clocklock
, __pthread_mutex_clocklock
)
597 versioned_symbol (libc
, ___pthread_mutex_clocklock
,
598 pthread_mutex_clocklock
, GLIBC_2_34
);
599 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_30, GLIBC_2_34)
600 compat_symbol (libpthread
, ___pthread_mutex_clocklock
,
601 pthread_mutex_clocklock
, GLIBC_2_30
);
605 ___pthread_mutex_timedlock64 (pthread_mutex_t
*mutex
,
606 const struct __timespec64
*abstime
)
608 LIBC_PROBE (mutex_timedlock_entry
, 2, mutex
, abstime
);
609 return __pthread_mutex_clocklock_common (mutex
, CLOCK_REALTIME
, abstime
);
613 strong_alias (___pthread_mutex_timedlock64
, ___pthread_mutex_timedlock
)
614 #else /* __TIMESPEC64 != 64 */
615 strong_alias (___pthread_mutex_timedlock64
, __pthread_mutex_timedlock64
);
616 libc_hidden_def (__pthread_mutex_timedlock64
)
619 ___pthread_mutex_timedlock (pthread_mutex_t
*mutex
,
620 const struct timespec
*abstime
)
622 struct __timespec64 ts64
= valid_timespec_to_timespec64 (*abstime
);
624 return __pthread_mutex_timedlock64 (mutex
, &ts64
);
626 #endif /* __TIMESPEC64 != 64 */
627 versioned_symbol (libc
, ___pthread_mutex_timedlock
,
628 pthread_mutex_timedlock
, GLIBC_2_34
);
629 libc_hidden_ver (___pthread_mutex_timedlock
, __pthread_mutex_timedlock
)
631 strong_alias (___pthread_mutex_timedlock
, __pthread_mutex_timedlock
)
634 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_2, GLIBC_2_34)
635 compat_symbol (libpthread
, ___pthread_mutex_timedlock
,
636 pthread_mutex_timedlock
, GLIBC_2_2
);