1 /* Copyright (C) 2002-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
25 #ifndef lll_trylock_elision
26 #define lll_trylock_elision(a,t) lll_trylock(a)
30 #define FORCE_ELISION(m, s)
34 __pthread_mutex_trylock (pthread_mutex_t
*mutex
)
37 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
39 /* See concurrency notes regarding mutex type which is loaded from __kind
40 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
41 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
42 PTHREAD_MUTEX_TIMED_NP
))
44 /* Recursive mutex. */
45 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
46 case PTHREAD_MUTEX_RECURSIVE_NP
:
47 /* Check whether we already hold the mutex. */
48 if (mutex
->__data
.__owner
== id
)
50 /* Just bump the counter. */
51 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
52 /* Overflow of the counter. */
55 ++mutex
->__data
.__count
;
59 if (lll_trylock (mutex
->__data
.__lock
) == 0)
61 /* Record the ownership. */
62 mutex
->__data
.__owner
= id
;
63 mutex
->__data
.__count
= 1;
64 ++mutex
->__data
.__nusers
;
69 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
70 elision
: __attribute__((unused
))
71 if (lll_trylock_elision (mutex
->__data
.__lock
,
72 mutex
->__data
.__elision
) != 0)
74 /* Don't record the ownership. */
77 case PTHREAD_MUTEX_TIMED_NP
:
78 FORCE_ELISION (mutex
, goto elision
);
80 case PTHREAD_MUTEX_ADAPTIVE_NP
:
81 case PTHREAD_MUTEX_ERRORCHECK_NP
:
82 if (lll_trylock (mutex
->__data
.__lock
) != 0)
85 /* Record the ownership. */
86 mutex
->__data
.__owner
= id
;
87 ++mutex
->__data
.__nusers
;
91 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
92 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
93 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
94 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
95 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
96 &mutex
->__data
.__list
.__next
);
97 /* We need to set op_pending before starting the operation. Also
98 see comments at ENQUEUE_MUTEX. */
99 __asm ("" ::: "memory");
101 oldval
= mutex
->__data
.__lock
;
105 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
107 /* The previous owner died. Try locking the mutex. */
108 int newval
= id
| (oldval
& FUTEX_WAITERS
);
111 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
114 if (newval
!= oldval
)
120 /* We got the mutex. */
121 mutex
->__data
.__count
= 1;
122 /* But it is inconsistent unless marked otherwise. */
123 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
125 /* We must not enqueue the mutex before we have acquired it.
126 Also see comments at ENQUEUE_MUTEX. */
127 __asm ("" ::: "memory");
128 ENQUEUE_MUTEX (mutex
);
129 /* We need to clear op_pending after we enqueue the mutex. */
130 __asm ("" ::: "memory");
131 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
133 /* Note that we deliberately exit here. If we fall
134 through to the end of the function __nusers would be
135 incremented which is not correct because the old
136 owner has to be discounted. */
140 /* Check whether we already hold the mutex. */
141 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
143 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
144 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
146 /* We do not need to ensure ordering wrt another memory
147 access. Also see comments at ENQUEUE_MUTEX. */
148 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
153 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
155 /* We do not need to ensure ordering wrt another memory
157 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
160 /* Just bump the counter. */
161 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
162 /* Overflow of the counter. */
165 ++mutex
->__data
.__count
;
171 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
173 if (oldval
!= 0 && (oldval
& FUTEX_OWNER_DIED
) == 0)
175 /* We haven't acquired the lock as it is already acquired by
176 another owner. We do not need to ensure ordering wrt another
178 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
183 if (__builtin_expect (mutex
->__data
.__owner
184 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
186 /* This mutex is now not recoverable. */
187 mutex
->__data
.__count
= 0;
189 lll_unlock (mutex
->__data
.__lock
,
190 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
191 /* FIXME This violates the mutex destruction requirements. See
192 __pthread_mutex_unlock_full. */
193 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
194 return ENOTRECOVERABLE
;
197 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
199 /* We must not enqueue the mutex before we have acquired it.
200 Also see comments at ENQUEUE_MUTEX. */
201 __asm ("" ::: "memory");
202 ENQUEUE_MUTEX (mutex
);
203 /* We need to clear op_pending after we enqueue the mutex. */
204 __asm ("" ::: "memory");
205 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
207 mutex
->__data
.__owner
= id
;
208 ++mutex
->__data
.__nusers
;
209 mutex
->__data
.__count
= 1;
213 /* The PI support requires the Linux futex system call. If that's not
214 available, pthread_mutex_init should never have allowed the type to
215 be set. So it will get the default case for an invalid type. */
217 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
218 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
219 case PTHREAD_MUTEX_PI_NORMAL_NP
:
220 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
221 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
222 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
223 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
224 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
228 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
229 in sysdeps/nptl/bits/thread-shared-types.h. */
230 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
231 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
232 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
237 /* Note: robust PI futexes are signaled by setting bit 0. */
238 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
239 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
241 /* We need to set op_pending before starting the operation. Also
242 see comments at ENQUEUE_MUTEX. */
243 __asm ("" ::: "memory");
246 oldval
= mutex
->__data
.__lock
;
248 /* Check whether we already hold the mutex. */
249 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
251 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
253 /* We do not need to ensure ordering wrt another memory
255 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
259 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
261 /* We do not need to ensure ordering wrt another memory
263 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
265 /* Just bump the counter. */
266 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
267 /* Overflow of the counter. */
270 ++mutex
->__data
.__count
;
277 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
282 if ((oldval
& FUTEX_OWNER_DIED
) == 0)
284 /* We haven't acquired the lock as it is already acquired by
285 another owner. We do not need to ensure ordering wrt another
287 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
294 /* The mutex owner died. The kernel will now take care of
296 int private = (robust
297 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
298 : PTHREAD_MUTEX_PSHARED (mutex
));
299 INTERNAL_SYSCALL_DECL (__err
);
300 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
301 __lll_private_flag (FUTEX_TRYLOCK_PI
,
304 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
305 && INTERNAL_SYSCALL_ERRNO (e
, __err
) == EWOULDBLOCK
)
307 /* The kernel has not yet finished the mutex owner death.
308 We do not need to ensure ordering wrt another memory
310 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
315 oldval
= mutex
->__data
.__lock
;
318 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
320 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
322 /* We got the mutex. */
323 mutex
->__data
.__count
= 1;
324 /* But it is inconsistent unless marked otherwise. */
325 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
327 /* We must not enqueue the mutex before we have acquired it.
328 Also see comments at ENQUEUE_MUTEX. */
329 __asm ("" ::: "memory");
330 ENQUEUE_MUTEX (mutex
);
331 /* We need to clear op_pending after we enqueue the mutex. */
332 __asm ("" ::: "memory");
333 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
335 /* Note that we deliberately exit here. If we fall
336 through to the end of the function __nusers would be
337 incremented which is not correct because the old owner
338 has to be discounted. */
343 && __builtin_expect (mutex
->__data
.__owner
344 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
346 /* This mutex is now not recoverable. */
347 mutex
->__data
.__count
= 0;
349 INTERNAL_SYSCALL_DECL (__err
);
350 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
351 __lll_private_flag (FUTEX_UNLOCK_PI
,
352 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
355 /* To the kernel, this will be visible after the kernel has
356 acquired the mutex in the syscall. */
357 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
358 return ENOTRECOVERABLE
;
363 /* We must not enqueue the mutex before we have acquired it.
364 Also see comments at ENQUEUE_MUTEX. */
365 __asm ("" ::: "memory");
366 ENQUEUE_MUTEX_PI (mutex
);
367 /* We need to clear op_pending after we enqueue the mutex. */
368 __asm ("" ::: "memory");
369 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
372 mutex
->__data
.__owner
= id
;
373 ++mutex
->__data
.__nusers
;
374 mutex
->__data
.__count
= 1;
378 #endif /* __NR_futex. */
380 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
381 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
382 case PTHREAD_MUTEX_PP_NORMAL_NP
:
383 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
385 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
386 in sysdeps/nptl/bits/thread-shared-types.h. */
387 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
388 & PTHREAD_MUTEX_KIND_MASK_NP
;
390 oldval
= mutex
->__data
.__lock
;
392 /* Check whether we already hold the mutex. */
393 if (mutex
->__data
.__owner
== id
)
395 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
398 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
400 /* Just bump the counter. */
401 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
402 /* Overflow of the counter. */
405 ++mutex
->__data
.__count
;
411 int oldprio
= -1, ceilval
;
414 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
415 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
417 if (__pthread_current_priority () > ceiling
)
420 __pthread_tpp_change_priority (oldprio
, -1);
424 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
428 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
432 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
433 ceilval
| 1, ceilval
);
435 if (oldval
== ceilval
)
438 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
440 if (oldval
!= ceilval
)
442 __pthread_tpp_change_priority (oldprio
, -1);
446 assert (mutex
->__data
.__owner
== 0);
447 /* Record the ownership. */
448 mutex
->__data
.__owner
= id
;
449 ++mutex
->__data
.__nusers
;
450 mutex
->__data
.__count
= 1;
457 /* Correct code cannot set any other type. */
464 #ifndef __pthread_mutex_trylock
465 #ifndef pthread_mutex_trylock
466 weak_alias (__pthread_mutex_trylock
, pthread_mutex_trylock
)
467 hidden_def (__pthread_mutex_trylock
)