1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
22 #include <sys/param.h>
26 #include <lowlevellock.h>
27 #include <not-cancel.h>
28 #include <futex-internal.h>
30 #include <stap-probe.h>
33 __pthread_mutex_clocklock_common (pthread_mutex_t
*mutex
,
35 const struct __timespec64
*abstime
)
38 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
41 /* We must not check ABSTIME here. If the thread does not block
42 abstime must not be checked for a valid value. */
44 /* See concurrency notes regarding mutex type which is loaded from __kind
45 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
46 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
47 PTHREAD_MUTEX_TIMED_NP
))
49 /* Recursive mutex. */
50 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
51 case PTHREAD_MUTEX_RECURSIVE_NP
:
52 /* Check whether we already hold the mutex. */
53 if (mutex
->__data
.__owner
== id
)
55 /* Just bump the counter. */
56 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
57 /* Overflow of the counter. */
60 ++mutex
->__data
.__count
;
65 /* We have to get the mutex. */
66 result
= __futex_clocklock64 (&mutex
->__data
.__lock
, clockid
, abstime
,
67 PTHREAD_MUTEX_PSHARED (mutex
));
72 /* Only locked once so far. */
73 mutex
->__data
.__count
= 1;
76 /* Error checking mutex. */
77 case PTHREAD_MUTEX_ERRORCHECK_NP
:
78 /* Check whether we already hold the mutex. */
79 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
82 /* Don't do lock elision on an error checking mutex. */
85 case PTHREAD_MUTEX_TIMED_NP
:
86 FORCE_ELISION (mutex
, goto elision
);
89 result
= __futex_clocklock64 (&mutex
->__data
.__lock
, clockid
, abstime
,
90 PTHREAD_MUTEX_PSHARED (mutex
));
93 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
94 elision
: __attribute__((unused
))
95 /* Don't record ownership */
96 return lll_clocklock_elision (mutex
->__data
.__lock
,
97 mutex
->__data
.__spins
,
99 PTHREAD_MUTEX_PSHARED (mutex
));
102 case PTHREAD_MUTEX_ADAPTIVE_NP
:
103 if (lll_trylock (mutex
->__data
.__lock
) != 0)
106 int max_cnt
= MIN (max_adaptive_count (),
107 mutex
->__data
.__spins
* 2 + 10);
110 if (cnt
++ >= max_cnt
)
112 result
= __futex_clocklock64 (&mutex
->__data
.__lock
,
114 PTHREAD_MUTEX_PSHARED (mutex
));
119 while (lll_trylock (mutex
->__data
.__lock
) != 0);
121 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
125 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
126 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
127 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
128 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
129 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
130 &mutex
->__data
.__list
.__next
);
131 /* We need to set op_pending before starting the operation. Also
132 see comments at ENQUEUE_MUTEX. */
133 __asm ("" ::: "memory");
135 oldval
= mutex
->__data
.__lock
;
136 /* This is set to FUTEX_WAITERS iff we might have shared the
137 FUTEX_WAITERS flag with other threads, and therefore need to keep it
138 set to avoid lost wake-ups. We have the same requirement in the
139 simple mutex algorithm. */
140 unsigned int assume_other_futex_waiters
= 0;
143 /* Try to acquire the lock through a CAS from 0 (not acquired) to
144 our TID | assume_other_futex_waiters. */
145 if (__glibc_likely (oldval
== 0))
148 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
149 id
| assume_other_futex_waiters
, 0);
150 if (__glibc_likely (oldval
== 0))
154 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
156 /* The previous owner died. Try locking the mutex. */
157 int newval
= id
| (oldval
& FUTEX_WAITERS
)
158 | assume_other_futex_waiters
;
161 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
163 if (newval
!= oldval
)
169 /* We got the mutex. */
170 mutex
->__data
.__count
= 1;
171 /* But it is inconsistent unless marked otherwise. */
172 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
174 /* We must not enqueue the mutex before we have acquired it.
175 Also see comments at ENQUEUE_MUTEX. */
176 __asm ("" ::: "memory");
177 ENQUEUE_MUTEX (mutex
);
178 /* We need to clear op_pending after we enqueue the mutex. */
179 __asm ("" ::: "memory");
180 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
182 /* Note that we deliberately exit here. If we fall
183 through to the end of the function __nusers would be
184 incremented which is not correct because the old
185 owner has to be discounted. */
189 /* Check whether we already hold the mutex. */
190 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
192 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
193 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
195 /* We do not need to ensure ordering wrt another memory
196 access. Also see comments at ENQUEUE_MUTEX. */
197 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
202 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
204 /* We do not need to ensure ordering wrt another memory
206 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
209 /* Just bump the counter. */
210 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
211 /* Overflow of the counter. */
214 ++mutex
->__data
.__count
;
216 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
222 /* We are about to block; check whether the timeout is invalid. */
223 if (! valid_nanoseconds (abstime
->tv_nsec
))
225 /* Work around the fact that the kernel rejects negative timeout
226 values despite them being valid. */
227 if (__glibc_unlikely (abstime
->tv_sec
< 0))
230 /* We cannot acquire the mutex nor has its owner died. Thus, try
231 to block using futexes. Set FUTEX_WAITERS if necessary so that
232 other threads are aware that there are potentially threads
233 blocked on the futex. Restart if oldval changed in the
235 if ((oldval
& FUTEX_WAITERS
) == 0)
237 if (atomic_compare_and_exchange_bool_acq (&mutex
->__data
.__lock
,
238 oldval
| FUTEX_WAITERS
,
242 oldval
= mutex
->__data
.__lock
;
245 oldval
|= FUTEX_WAITERS
;
248 /* It is now possible that we share the FUTEX_WAITERS flag with
249 another thread; therefore, update assume_other_futex_waiters so
250 that we do not forget about this when handling other cases
251 above and thus do not cause lost wake-ups. */
252 assume_other_futex_waiters
|= FUTEX_WAITERS
;
254 /* Block using the futex. */
255 int err
= __futex_abstimed_wait64 (
256 (unsigned int *) &mutex
->__data
.__lock
,
257 oldval
, clockid
, abstime
,
258 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
259 /* The futex call timed out. */
260 if (err
== ETIMEDOUT
|| err
== EOVERFLOW
)
262 /* Reload current lock value. */
263 oldval
= mutex
->__data
.__lock
;
266 /* We have acquired the mutex; check if it is still consistent. */
267 if (__builtin_expect (mutex
->__data
.__owner
268 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
270 /* This mutex is now not recoverable. */
271 mutex
->__data
.__count
= 0;
272 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
273 lll_unlock (mutex
->__data
.__lock
, private);
274 /* FIXME This violates the mutex destruction requirements. See
275 __pthread_mutex_unlock_full. */
276 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
277 return ENOTRECOVERABLE
;
280 mutex
->__data
.__count
= 1;
281 /* We must not enqueue the mutex before we have acquired it.
282 Also see comments at ENQUEUE_MUTEX. */
283 __asm ("" ::: "memory");
284 ENQUEUE_MUTEX (mutex
);
285 /* We need to clear op_pending after we enqueue the mutex. */
286 __asm ("" ::: "memory");
287 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
290 /* The PI support requires the Linux futex system call. If that's not
291 available, pthread_mutex_init should never have allowed the type to
292 be set. So it will get the default case for an invalid type. */
294 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
295 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
296 case PTHREAD_MUTEX_PI_NORMAL_NP
:
297 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
298 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
299 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
300 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
301 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
303 /* Currently futex FUTEX_LOCK_PI operation only provides support for
304 CLOCK_REALTIME and trying to emulate by converting a
305 CLOCK_MONOTONIC to CLOCK_REALTIME will take in account possible
306 changes to the wall clock. */
307 if (__glibc_unlikely (clockid
!= CLOCK_REALTIME
))
312 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
313 in sysdeps/nptl/bits/thread-shared-types.h. */
314 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
315 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
316 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
321 /* Note: robust PI futexes are signaled by setting bit 0. */
322 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
323 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
325 /* We need to set op_pending before starting the operation. Also
326 see comments at ENQUEUE_MUTEX. */
327 __asm ("" ::: "memory");
330 oldval
= mutex
->__data
.__lock
;
332 /* Check whether we already hold the mutex. */
333 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
335 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
337 /* We do not need to ensure ordering wrt another memory
339 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
343 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
345 /* We do not need to ensure ordering wrt another memory
347 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
349 /* Just bump the counter. */
350 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
351 /* Overflow of the counter. */
354 ++mutex
->__data
.__count
;
356 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
362 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
367 /* The mutex is locked. The kernel will now take care of
368 everything. The timeout value must be a relative value.
370 int private = (robust
371 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
372 : PTHREAD_MUTEX_PSHARED (mutex
));
373 int e
= futex_lock_pi64 (&mutex
->__data
.__lock
, abstime
, private);
376 else if (e
== ESRCH
|| e
== EDEADLK
)
379 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
380 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
381 /* ESRCH can happen only for non-robust PI mutexes where
382 the owner of the lock died. */
383 assert (e
!= ESRCH
|| !robust
);
385 /* Delay the thread until the timeout is reached. Then return
388 e
= __futex_abstimed_wait64 (&(unsigned int){0}, 0, clockid
,
390 while (e
!= ETIMEDOUT
);
396 oldval
= mutex
->__data
.__lock
;
398 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
401 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
403 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
405 /* We got the mutex. */
406 mutex
->__data
.__count
= 1;
407 /* But it is inconsistent unless marked otherwise. */
408 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
410 /* We must not enqueue the mutex before we have acquired it.
411 Also see comments at ENQUEUE_MUTEX. */
412 __asm ("" ::: "memory");
413 ENQUEUE_MUTEX_PI (mutex
);
414 /* We need to clear op_pending after we enqueue the mutex. */
415 __asm ("" ::: "memory");
416 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
418 /* Note that we deliberately exit here. If we fall
419 through to the end of the function __nusers would be
420 incremented which is not correct because the old owner
421 has to be discounted. */
426 && __builtin_expect (mutex
->__data
.__owner
427 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
429 /* This mutex is now not recoverable. */
430 mutex
->__data
.__count
= 0;
432 futex_unlock_pi ((unsigned int *) &mutex
->__data
.__lock
,
433 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
435 /* To the kernel, this will be visible after the kernel has
436 acquired the mutex in the syscall. */
437 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
438 return ENOTRECOVERABLE
;
441 mutex
->__data
.__count
= 1;
444 /* We must not enqueue the mutex before we have acquired it.
445 Also see comments at ENQUEUE_MUTEX. */
446 __asm ("" ::: "memory");
447 ENQUEUE_MUTEX_PI (mutex
);
448 /* We need to clear op_pending after we enqueue the mutex. */
449 __asm ("" ::: "memory");
450 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
454 #endif /* __NR_futex. */
456 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
457 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
458 case PTHREAD_MUTEX_PP_NORMAL_NP
:
459 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
461 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
462 in sysdeps/nptl/bits/thread-shared-types.h. */
463 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
464 & PTHREAD_MUTEX_KIND_MASK_NP
;
466 oldval
= mutex
->__data
.__lock
;
468 /* Check whether we already hold the mutex. */
469 if (mutex
->__data
.__owner
== id
)
471 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
474 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
476 /* Just bump the counter. */
477 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
478 /* Overflow of the counter. */
481 ++mutex
->__data
.__count
;
483 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
489 int oldprio
= -1, ceilval
;
492 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
493 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
495 if (__pthread_current_priority () > ceiling
)
500 __pthread_tpp_change_priority (oldprio
, -1);
504 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
508 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
512 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
513 ceilval
| 1, ceilval
);
515 if (oldval
== ceilval
)
521 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
525 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
528 if (oldval
!= ceilval
)
530 /* Reject invalid timeouts. */
531 if (! valid_nanoseconds (abstime
->tv_nsec
))
537 int e
= __futex_abstimed_wait64 (
538 (unsigned int *) &mutex
->__data
.__lock
, ceilval
| 2,
539 clockid
, abstime
, PTHREAD_MUTEX_PSHARED (mutex
));
540 if (e
== ETIMEDOUT
|| e
== EOVERFLOW
)
544 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
545 ceilval
| 2, ceilval
)
548 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
550 assert (mutex
->__data
.__owner
== 0);
551 mutex
->__data
.__count
= 1;
556 /* Correct code cannot set any other type. */
562 /* Record the ownership. */
563 mutex
->__data
.__owner
= id
;
564 ++mutex
->__data
.__nusers
;
566 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
574 __pthread_mutex_clocklock64 (pthread_mutex_t
*mutex
,
576 const struct __timespec64
*abstime
)
578 if (__glibc_unlikely (!futex_abstimed_supported_clockid (clockid
)))
581 LIBC_PROBE (mutex_clocklock_entry
, 3, mutex
, clockid
, abstime
);
582 return __pthread_mutex_clocklock_common (mutex
, clockid
, abstime
);
586 libpthread_hidden_def (__pthread_mutex_clocklock64
)
589 __pthread_mutex_clocklock (pthread_mutex_t
*mutex
,
591 const struct timespec
*abstime
)
593 struct __timespec64 ts64
= valid_timespec_to_timespec64 (*abstime
);
595 return __pthread_mutex_clocklock64 (mutex
, clockid
, &ts64
);
598 weak_alias (__pthread_mutex_clocklock
, pthread_mutex_clocklock
)
601 __pthread_mutex_timedlock64 (pthread_mutex_t
*mutex
,
602 const struct __timespec64
*abstime
)
604 LIBC_PROBE (mutex_timedlock_entry
, 2, mutex
, abstime
);
605 return __pthread_mutex_clocklock_common (mutex
, CLOCK_REALTIME
, abstime
);
609 libpthread_hidden_def (__pthread_mutex_timedlock64
)
612 __pthread_mutex_timedlock (pthread_mutex_t
*mutex
,
613 const struct timespec
*abstime
)
615 struct __timespec64 ts64
= valid_timespec_to_timespec64 (*abstime
);
617 return __pthread_mutex_timedlock64 (mutex
, &ts64
);
620 weak_alias (__pthread_mutex_timedlock
, pthread_mutex_timedlock
)