1 /* Copyright (C) 2002-2020 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <futex-internal.h>
26 #ifndef lll_trylock_elision
27 #define lll_trylock_elision(a,t) lll_trylock(a)
31 #define FORCE_ELISION(m, s)
35 __pthread_mutex_trylock (pthread_mutex_t
*mutex
)
38 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
40 /* See concurrency notes regarding mutex type which is loaded from __kind
41 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
42 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
43 PTHREAD_MUTEX_TIMED_NP
))
45 /* Recursive mutex. */
46 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
47 case PTHREAD_MUTEX_RECURSIVE_NP
:
48 /* Check whether we already hold the mutex. */
49 if (mutex
->__data
.__owner
== id
)
51 /* Just bump the counter. */
52 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
53 /* Overflow of the counter. */
56 ++mutex
->__data
.__count
;
60 if (lll_trylock (mutex
->__data
.__lock
) == 0)
62 /* Record the ownership. */
63 mutex
->__data
.__owner
= id
;
64 mutex
->__data
.__count
= 1;
65 ++mutex
->__data
.__nusers
;
70 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
71 elision
: __attribute__((unused
))
72 if (lll_trylock_elision (mutex
->__data
.__lock
,
73 mutex
->__data
.__elision
) != 0)
75 /* Don't record the ownership. */
78 case PTHREAD_MUTEX_TIMED_NP
:
79 FORCE_ELISION (mutex
, goto elision
);
81 case PTHREAD_MUTEX_ADAPTIVE_NP
:
82 case PTHREAD_MUTEX_ERRORCHECK_NP
:
83 if (lll_trylock (mutex
->__data
.__lock
) != 0)
86 /* Record the ownership. */
87 mutex
->__data
.__owner
= id
;
88 ++mutex
->__data
.__nusers
;
92 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
93 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
94 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
95 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
96 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
97 &mutex
->__data
.__list
.__next
);
98 /* We need to set op_pending before starting the operation. Also
99 see comments at ENQUEUE_MUTEX. */
100 __asm ("" ::: "memory");
102 oldval
= mutex
->__data
.__lock
;
106 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
108 /* The previous owner died. Try locking the mutex. */
109 int newval
= id
| (oldval
& FUTEX_WAITERS
);
112 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
115 if (newval
!= oldval
)
121 /* We got the mutex. */
122 mutex
->__data
.__count
= 1;
123 /* But it is inconsistent unless marked otherwise. */
124 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
126 /* We must not enqueue the mutex before we have acquired it.
127 Also see comments at ENQUEUE_MUTEX. */
128 __asm ("" ::: "memory");
129 ENQUEUE_MUTEX (mutex
);
130 /* We need to clear op_pending after we enqueue the mutex. */
131 __asm ("" ::: "memory");
132 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
134 /* Note that we deliberately exit here. If we fall
135 through to the end of the function __nusers would be
136 incremented which is not correct because the old
137 owner has to be discounted. */
141 /* Check whether we already hold the mutex. */
142 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
144 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
145 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
147 /* We do not need to ensure ordering wrt another memory
148 access. Also see comments at ENQUEUE_MUTEX. */
149 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
154 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
156 /* We do not need to ensure ordering wrt another memory
158 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
161 /* Just bump the counter. */
162 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
163 /* Overflow of the counter. */
166 ++mutex
->__data
.__count
;
172 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
174 if (oldval
!= 0 && (oldval
& FUTEX_OWNER_DIED
) == 0)
176 /* We haven't acquired the lock as it is already acquired by
177 another owner. We do not need to ensure ordering wrt another
179 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
184 if (__builtin_expect (mutex
->__data
.__owner
185 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
187 /* This mutex is now not recoverable. */
188 mutex
->__data
.__count
= 0;
190 lll_unlock (mutex
->__data
.__lock
,
191 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
192 /* FIXME This violates the mutex destruction requirements. See
193 __pthread_mutex_unlock_full. */
194 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
195 return ENOTRECOVERABLE
;
198 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
200 /* We must not enqueue the mutex before we have acquired it.
201 Also see comments at ENQUEUE_MUTEX. */
202 __asm ("" ::: "memory");
203 ENQUEUE_MUTEX (mutex
);
204 /* We need to clear op_pending after we enqueue the mutex. */
205 __asm ("" ::: "memory");
206 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
208 mutex
->__data
.__owner
= id
;
209 ++mutex
->__data
.__nusers
;
210 mutex
->__data
.__count
= 1;
214 /* The PI support requires the Linux futex system call. If that's not
215 available, pthread_mutex_init should never have allowed the type to
216 be set. So it will get the default case for an invalid type. */
218 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
219 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
220 case PTHREAD_MUTEX_PI_NORMAL_NP
:
221 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
222 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
223 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
224 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
225 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
229 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
230 in sysdeps/nptl/bits/thread-shared-types.h. */
231 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
232 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
233 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
238 /* Note: robust PI futexes are signaled by setting bit 0. */
239 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
240 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
242 /* We need to set op_pending before starting the operation. Also
243 see comments at ENQUEUE_MUTEX. */
244 __asm ("" ::: "memory");
247 oldval
= mutex
->__data
.__lock
;
249 /* Check whether we already hold the mutex. */
250 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
252 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
254 /* We do not need to ensure ordering wrt another memory
256 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
260 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
262 /* We do not need to ensure ordering wrt another memory
264 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
266 /* Just bump the counter. */
267 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
268 /* Overflow of the counter. */
271 ++mutex
->__data
.__count
;
278 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
283 if ((oldval
& FUTEX_OWNER_DIED
) == 0)
285 /* We haven't acquired the lock as it is already acquired by
286 another owner. We do not need to ensure ordering wrt another
288 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
295 /* The mutex owner died. The kernel will now take care of
297 int private = (robust
298 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
299 : PTHREAD_MUTEX_PSHARED (mutex
));
300 int e
= INTERNAL_SYSCALL_CALL (futex
, &mutex
->__data
.__lock
,
301 __lll_private_flag (FUTEX_TRYLOCK_PI
,
304 if (INTERNAL_SYSCALL_ERROR_P (e
)
305 && INTERNAL_SYSCALL_ERRNO (e
) == EWOULDBLOCK
)
307 /* The kernel has not yet finished the mutex owner death.
308 We do not need to ensure ordering wrt another memory
310 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
315 oldval
= mutex
->__data
.__lock
;
318 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
320 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
322 /* We got the mutex. */
323 mutex
->__data
.__count
= 1;
324 /* But it is inconsistent unless marked otherwise. */
325 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
327 /* We must not enqueue the mutex before we have acquired it.
328 Also see comments at ENQUEUE_MUTEX. */
329 __asm ("" ::: "memory");
330 ENQUEUE_MUTEX (mutex
);
331 /* We need to clear op_pending after we enqueue the mutex. */
332 __asm ("" ::: "memory");
333 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
335 /* Note that we deliberately exit here. If we fall
336 through to the end of the function __nusers would be
337 incremented which is not correct because the old owner
338 has to be discounted. */
343 && __builtin_expect (mutex
->__data
.__owner
344 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
346 /* This mutex is now not recoverable. */
347 mutex
->__data
.__count
= 0;
349 futex_unlock_pi ((unsigned int *) &mutex
->__data
.__lock
,
350 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
352 /* To the kernel, this will be visible after the kernel has
353 acquired the mutex in the syscall. */
354 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
355 return ENOTRECOVERABLE
;
360 /* We must not enqueue the mutex before we have acquired it.
361 Also see comments at ENQUEUE_MUTEX. */
362 __asm ("" ::: "memory");
363 ENQUEUE_MUTEX_PI (mutex
);
364 /* We need to clear op_pending after we enqueue the mutex. */
365 __asm ("" ::: "memory");
366 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
369 mutex
->__data
.__owner
= id
;
370 ++mutex
->__data
.__nusers
;
371 mutex
->__data
.__count
= 1;
375 #endif /* __NR_futex. */
377 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
378 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
379 case PTHREAD_MUTEX_PP_NORMAL_NP
:
380 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
382 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
383 in sysdeps/nptl/bits/thread-shared-types.h. */
384 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
385 & PTHREAD_MUTEX_KIND_MASK_NP
;
387 oldval
= mutex
->__data
.__lock
;
389 /* Check whether we already hold the mutex. */
390 if (mutex
->__data
.__owner
== id
)
392 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
395 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
397 /* Just bump the counter. */
398 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
399 /* Overflow of the counter. */
402 ++mutex
->__data
.__count
;
408 int oldprio
= -1, ceilval
;
411 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
412 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
414 if (__pthread_current_priority () > ceiling
)
417 __pthread_tpp_change_priority (oldprio
, -1);
421 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
425 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
429 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
430 ceilval
| 1, ceilval
);
432 if (oldval
== ceilval
)
435 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
437 if (oldval
!= ceilval
)
439 __pthread_tpp_change_priority (oldprio
, -1);
443 assert (mutex
->__data
.__owner
== 0);
444 /* Record the ownership. */
445 mutex
->__data
.__owner
= id
;
446 ++mutex
->__data
.__nusers
;
447 mutex
->__data
.__count
= 1;
454 /* Correct code cannot set any other type. */
461 #ifndef __pthread_mutex_trylock
462 #ifndef pthread_mutex_trylock
463 weak_alias (__pthread_mutex_trylock
, pthread_mutex_trylock
)
464 hidden_def (__pthread_mutex_trylock
)