1 /* Copyright (C) 2002-2023 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
22 #include <lowlevellock.h>
23 #include <futex-internal.h>
26 ___pthread_mutex_trylock (pthread_mutex_t
*mutex
)
29 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
31 /* See concurrency notes regarding mutex type which is loaded from __kind
32 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
33 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
34 PTHREAD_MUTEX_TIMED_NP
))
36 /* Recursive mutex. */
37 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
38 case PTHREAD_MUTEX_RECURSIVE_NP
:
39 /* Check whether we already hold the mutex. */
40 if (mutex
->__data
.__owner
== id
)
42 /* Just bump the counter. */
43 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
44 /* Overflow of the counter. */
47 ++mutex
->__data
.__count
;
51 if (lll_trylock (mutex
->__data
.__lock
) == 0)
53 /* Record the ownership. */
54 mutex
->__data
.__owner
= id
;
55 mutex
->__data
.__count
= 1;
56 ++mutex
->__data
.__nusers
;
61 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
62 elision
: __attribute__((unused
))
63 if (lll_trylock_elision (mutex
->__data
.__lock
,
64 mutex
->__data
.__elision
) != 0)
66 /* Don't record the ownership. */
69 case PTHREAD_MUTEX_TIMED_NP
:
70 FORCE_ELISION (mutex
, goto elision
);
72 case PTHREAD_MUTEX_ADAPTIVE_NP
:
73 case PTHREAD_MUTEX_ERRORCHECK_NP
:
74 if (lll_trylock (mutex
->__data
.__lock
) != 0)
77 /* Record the ownership. */
78 mutex
->__data
.__owner
= id
;
79 ++mutex
->__data
.__nusers
;
83 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
84 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
85 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
86 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
87 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
88 &mutex
->__data
.__list
.__next
);
89 /* We need to set op_pending before starting the operation. Also
90 see comments at ENQUEUE_MUTEX. */
91 __asm ("" ::: "memory");
93 oldval
= mutex
->__data
.__lock
;
97 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
99 /* The previous owner died. Try locking the mutex. */
100 int newval
= id
| (oldval
& FUTEX_WAITERS
);
103 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
106 if (newval
!= oldval
)
112 /* We got the mutex. */
113 mutex
->__data
.__count
= 1;
114 /* But it is inconsistent unless marked otherwise. */
115 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
117 /* We must not enqueue the mutex before we have acquired it.
118 Also see comments at ENQUEUE_MUTEX. */
119 __asm ("" ::: "memory");
120 ENQUEUE_MUTEX (mutex
);
121 /* We need to clear op_pending after we enqueue the mutex. */
122 __asm ("" ::: "memory");
123 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
125 /* Note that we deliberately exit here. If we fall
126 through to the end of the function __nusers would be
127 incremented which is not correct because the old
128 owner has to be discounted. */
132 /* Check whether we already hold the mutex. */
133 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
135 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
136 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
138 /* We do not need to ensure ordering wrt another memory
139 access. Also see comments at ENQUEUE_MUTEX. */
140 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
145 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
147 /* We do not need to ensure ordering wrt another memory
149 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
152 /* Just bump the counter. */
153 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
154 /* Overflow of the counter. */
157 ++mutex
->__data
.__count
;
163 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
165 if (oldval
!= 0 && (oldval
& FUTEX_OWNER_DIED
) == 0)
167 /* We haven't acquired the lock as it is already acquired by
168 another owner. We do not need to ensure ordering wrt another
170 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
175 if (__builtin_expect (mutex
->__data
.__owner
176 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
178 /* This mutex is now not recoverable. */
179 mutex
->__data
.__count
= 0;
181 lll_unlock (mutex
->__data
.__lock
,
182 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
183 /* FIXME This violates the mutex destruction requirements. See
184 __pthread_mutex_unlock_full. */
185 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
186 return ENOTRECOVERABLE
;
189 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
191 /* We must not enqueue the mutex before we have acquired it.
192 Also see comments at ENQUEUE_MUTEX. */
193 __asm ("" ::: "memory");
194 ENQUEUE_MUTEX (mutex
);
195 /* We need to clear op_pending after we enqueue the mutex. */
196 __asm ("" ::: "memory");
197 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
199 mutex
->__data
.__owner
= id
;
200 ++mutex
->__data
.__nusers
;
201 mutex
->__data
.__count
= 1;
205 /* The PI support requires the Linux futex system call. If that's not
206 available, pthread_mutex_init should never have allowed the type to
207 be set. So it will get the default case for an invalid type. */
209 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
210 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
211 case PTHREAD_MUTEX_PI_NORMAL_NP
:
212 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
213 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
214 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
215 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
216 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
220 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
221 in sysdeps/nptl/bits/thread-shared-types.h. */
222 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
223 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
224 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
229 /* Note: robust PI futexes are signaled by setting bit 0. */
230 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
231 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
233 /* We need to set op_pending before starting the operation. Also
234 see comments at ENQUEUE_MUTEX. */
235 __asm ("" ::: "memory");
238 oldval
= mutex
->__data
.__lock
;
240 /* Check whether we already hold the mutex. */
241 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
243 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
245 /* We do not need to ensure ordering wrt another memory
247 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
251 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
253 /* We do not need to ensure ordering wrt another memory
255 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
257 /* Just bump the counter. */
258 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
259 /* Overflow of the counter. */
262 ++mutex
->__data
.__count
;
269 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
274 if ((oldval
& FUTEX_OWNER_DIED
) == 0)
276 /* We haven't acquired the lock as it is already acquired by
277 another owner. We do not need to ensure ordering wrt another
279 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
286 /* The mutex owner died. The kernel will now take care of
288 int private = (robust
289 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
290 : PTHREAD_MUTEX_PSHARED (mutex
));
291 int e
= INTERNAL_SYSCALL_CALL (futex
, &mutex
->__data
.__lock
,
292 __lll_private_flag (FUTEX_TRYLOCK_PI
,
295 if (INTERNAL_SYSCALL_ERROR_P (e
)
296 && INTERNAL_SYSCALL_ERRNO (e
) == EWOULDBLOCK
)
298 /* The kernel has not yet finished the mutex owner death.
299 We do not need to ensure ordering wrt another memory
301 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
306 oldval
= mutex
->__data
.__lock
;
309 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
311 atomic_fetch_and_acquire (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
313 /* We got the mutex. */
314 mutex
->__data
.__count
= 1;
315 /* But it is inconsistent unless marked otherwise. */
316 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
318 /* We must not enqueue the mutex before we have acquired it.
319 Also see comments at ENQUEUE_MUTEX. */
320 __asm ("" ::: "memory");
321 ENQUEUE_MUTEX (mutex
);
322 /* We need to clear op_pending after we enqueue the mutex. */
323 __asm ("" ::: "memory");
324 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
326 /* Note that we deliberately exit here. If we fall
327 through to the end of the function __nusers would be
328 incremented which is not correct because the old owner
329 has to be discounted. */
334 && __builtin_expect (mutex
->__data
.__owner
335 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
337 /* This mutex is now not recoverable. */
338 mutex
->__data
.__count
= 0;
340 futex_unlock_pi ((unsigned int *) &mutex
->__data
.__lock
,
341 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
343 /* To the kernel, this will be visible after the kernel has
344 acquired the mutex in the syscall. */
345 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
346 return ENOTRECOVERABLE
;
351 /* We must not enqueue the mutex before we have acquired it.
352 Also see comments at ENQUEUE_MUTEX. */
353 __asm ("" ::: "memory");
354 ENQUEUE_MUTEX_PI (mutex
);
355 /* We need to clear op_pending after we enqueue the mutex. */
356 __asm ("" ::: "memory");
357 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
360 mutex
->__data
.__owner
= id
;
361 ++mutex
->__data
.__nusers
;
362 mutex
->__data
.__count
= 1;
366 #endif /* __NR_futex. */
368 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
369 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
370 case PTHREAD_MUTEX_PP_NORMAL_NP
:
371 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
373 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
374 in sysdeps/nptl/bits/thread-shared-types.h. */
375 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
376 & PTHREAD_MUTEX_KIND_MASK_NP
;
378 oldval
= mutex
->__data
.__lock
;
380 /* Check whether we already hold the mutex. */
381 if (mutex
->__data
.__owner
== id
)
383 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
386 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
388 /* Just bump the counter. */
389 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
390 /* Overflow of the counter. */
393 ++mutex
->__data
.__count
;
399 int oldprio
= -1, ceilval
;
402 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
403 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
405 if (__pthread_current_priority () > ceiling
)
408 __pthread_tpp_change_priority (oldprio
, -1);
412 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
416 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
420 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
421 ceilval
| 1, ceilval
);
423 if (oldval
== ceilval
)
426 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
428 if (oldval
!= ceilval
)
430 __pthread_tpp_change_priority (oldprio
, -1);
434 assert (mutex
->__data
.__owner
== 0);
435 /* Record the ownership. */
436 mutex
->__data
.__owner
= id
;
437 ++mutex
->__data
.__nusers
;
438 mutex
->__data
.__count
= 1;
445 /* Correct code cannot set any other type. */
451 versioned_symbol (libc
, ___pthread_mutex_trylock
,
452 pthread_mutex_trylock
, GLIBC_2_34
);
453 libc_hidden_ver (___pthread_mutex_trylock
, __pthread_mutex_trylock
)
455 strong_alias (___pthread_mutex_trylock
, __pthread_mutex_trylock
)
458 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
459 compat_symbol (libpthread
, ___pthread_mutex_trylock
,
460 pthread_mutex_trylock
, GLIBC_2_0
);
461 compat_symbol (libpthread
, ___pthread_mutex_trylock
,
462 __pthread_mutex_trylock
, GLIBC_2_0
);