1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <futex-internal.h>
27 __pthread_mutex_trylock (pthread_mutex_t
*mutex
)
30 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
32 /* See concurrency notes regarding mutex type which is loaded from __kind
33 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
34 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
35 PTHREAD_MUTEX_TIMED_NP
))
37 /* Recursive mutex. */
38 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
39 case PTHREAD_MUTEX_RECURSIVE_NP
:
40 /* Check whether we already hold the mutex. */
41 if (mutex
->__data
.__owner
== id
)
43 /* Just bump the counter. */
44 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
45 /* Overflow of the counter. */
48 ++mutex
->__data
.__count
;
52 if (lll_trylock (mutex
->__data
.__lock
) == 0)
54 /* Record the ownership. */
55 mutex
->__data
.__owner
= id
;
56 mutex
->__data
.__count
= 1;
57 ++mutex
->__data
.__nusers
;
62 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
63 elision
: __attribute__((unused
))
64 if (lll_trylock_elision (mutex
->__data
.__lock
,
65 mutex
->__data
.__elision
) != 0)
67 /* Don't record the ownership. */
70 case PTHREAD_MUTEX_TIMED_NP
:
71 FORCE_ELISION (mutex
, goto elision
);
73 case PTHREAD_MUTEX_ADAPTIVE_NP
:
74 case PTHREAD_MUTEX_ERRORCHECK_NP
:
75 if (lll_trylock (mutex
->__data
.__lock
) != 0)
78 /* Record the ownership. */
79 mutex
->__data
.__owner
= id
;
80 ++mutex
->__data
.__nusers
;
84 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
85 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
86 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
87 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
88 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
89 &mutex
->__data
.__list
.__next
);
90 /* We need to set op_pending before starting the operation. Also
91 see comments at ENQUEUE_MUTEX. */
92 __asm ("" ::: "memory");
94 oldval
= mutex
->__data
.__lock
;
98 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
100 /* The previous owner died. Try locking the mutex. */
101 int newval
= id
| (oldval
& FUTEX_WAITERS
);
104 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
107 if (newval
!= oldval
)
113 /* We got the mutex. */
114 mutex
->__data
.__count
= 1;
115 /* But it is inconsistent unless marked otherwise. */
116 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
118 /* We must not enqueue the mutex before we have acquired it.
119 Also see comments at ENQUEUE_MUTEX. */
120 __asm ("" ::: "memory");
121 ENQUEUE_MUTEX (mutex
);
122 /* We need to clear op_pending after we enqueue the mutex. */
123 __asm ("" ::: "memory");
124 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
126 /* Note that we deliberately exit here. If we fall
127 through to the end of the function __nusers would be
128 incremented which is not correct because the old
129 owner has to be discounted. */
133 /* Check whether we already hold the mutex. */
134 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
136 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
137 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
139 /* We do not need to ensure ordering wrt another memory
140 access. Also see comments at ENQUEUE_MUTEX. */
141 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
146 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
148 /* We do not need to ensure ordering wrt another memory
150 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
153 /* Just bump the counter. */
154 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
155 /* Overflow of the counter. */
158 ++mutex
->__data
.__count
;
164 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
166 if (oldval
!= 0 && (oldval
& FUTEX_OWNER_DIED
) == 0)
168 /* We haven't acquired the lock as it is already acquired by
169 another owner. We do not need to ensure ordering wrt another
171 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
176 if (__builtin_expect (mutex
->__data
.__owner
177 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
179 /* This mutex is now not recoverable. */
180 mutex
->__data
.__count
= 0;
182 lll_unlock (mutex
->__data
.__lock
,
183 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
184 /* FIXME This violates the mutex destruction requirements. See
185 __pthread_mutex_unlock_full. */
186 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
187 return ENOTRECOVERABLE
;
190 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
192 /* We must not enqueue the mutex before we have acquired it.
193 Also see comments at ENQUEUE_MUTEX. */
194 __asm ("" ::: "memory");
195 ENQUEUE_MUTEX (mutex
);
196 /* We need to clear op_pending after we enqueue the mutex. */
197 __asm ("" ::: "memory");
198 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
200 mutex
->__data
.__owner
= id
;
201 ++mutex
->__data
.__nusers
;
202 mutex
->__data
.__count
= 1;
206 /* The PI support requires the Linux futex system call. If that's not
207 available, pthread_mutex_init should never have allowed the type to
208 be set. So it will get the default case for an invalid type. */
210 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
211 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
212 case PTHREAD_MUTEX_PI_NORMAL_NP
:
213 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
214 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
215 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
216 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
217 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
221 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
222 in sysdeps/nptl/bits/thread-shared-types.h. */
223 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
224 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
225 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
230 /* Note: robust PI futexes are signaled by setting bit 0. */
231 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
232 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
234 /* We need to set op_pending before starting the operation. Also
235 see comments at ENQUEUE_MUTEX. */
236 __asm ("" ::: "memory");
239 oldval
= mutex
->__data
.__lock
;
241 /* Check whether we already hold the mutex. */
242 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
244 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
246 /* We do not need to ensure ordering wrt another memory
248 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
252 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
254 /* We do not need to ensure ordering wrt another memory
256 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
258 /* Just bump the counter. */
259 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
260 /* Overflow of the counter. */
263 ++mutex
->__data
.__count
;
270 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
275 if ((oldval
& FUTEX_OWNER_DIED
) == 0)
277 /* We haven't acquired the lock as it is already acquired by
278 another owner. We do not need to ensure ordering wrt another
280 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
287 /* The mutex owner died. The kernel will now take care of
289 int private = (robust
290 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
291 : PTHREAD_MUTEX_PSHARED (mutex
));
292 int e
= INTERNAL_SYSCALL_CALL (futex
, &mutex
->__data
.__lock
,
293 __lll_private_flag (FUTEX_TRYLOCK_PI
,
296 if (INTERNAL_SYSCALL_ERROR_P (e
)
297 && INTERNAL_SYSCALL_ERRNO (e
) == EWOULDBLOCK
)
299 /* The kernel has not yet finished the mutex owner death.
300 We do not need to ensure ordering wrt another memory
302 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
307 oldval
= mutex
->__data
.__lock
;
310 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
312 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
314 /* We got the mutex. */
315 mutex
->__data
.__count
= 1;
316 /* But it is inconsistent unless marked otherwise. */
317 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
319 /* We must not enqueue the mutex before we have acquired it.
320 Also see comments at ENQUEUE_MUTEX. */
321 __asm ("" ::: "memory");
322 ENQUEUE_MUTEX (mutex
);
323 /* We need to clear op_pending after we enqueue the mutex. */
324 __asm ("" ::: "memory");
325 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
327 /* Note that we deliberately exit here. If we fall
328 through to the end of the function __nusers would be
329 incremented which is not correct because the old owner
330 has to be discounted. */
335 && __builtin_expect (mutex
->__data
.__owner
336 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
338 /* This mutex is now not recoverable. */
339 mutex
->__data
.__count
= 0;
341 futex_unlock_pi ((unsigned int *) &mutex
->__data
.__lock
,
342 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
344 /* To the kernel, this will be visible after the kernel has
345 acquired the mutex in the syscall. */
346 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
347 return ENOTRECOVERABLE
;
352 /* We must not enqueue the mutex before we have acquired it.
353 Also see comments at ENQUEUE_MUTEX. */
354 __asm ("" ::: "memory");
355 ENQUEUE_MUTEX_PI (mutex
);
356 /* We need to clear op_pending after we enqueue the mutex. */
357 __asm ("" ::: "memory");
358 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
361 mutex
->__data
.__owner
= id
;
362 ++mutex
->__data
.__nusers
;
363 mutex
->__data
.__count
= 1;
367 #endif /* __NR_futex. */
369 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
370 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
371 case PTHREAD_MUTEX_PP_NORMAL_NP
:
372 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
374 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
375 in sysdeps/nptl/bits/thread-shared-types.h. */
376 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
377 & PTHREAD_MUTEX_KIND_MASK_NP
;
379 oldval
= mutex
->__data
.__lock
;
381 /* Check whether we already hold the mutex. */
382 if (mutex
->__data
.__owner
== id
)
384 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
387 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
389 /* Just bump the counter. */
390 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
391 /* Overflow of the counter. */
394 ++mutex
->__data
.__count
;
400 int oldprio
= -1, ceilval
;
403 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
404 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
406 if (__pthread_current_priority () > ceiling
)
409 __pthread_tpp_change_priority (oldprio
, -1);
413 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
417 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
421 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
422 ceilval
| 1, ceilval
);
424 if (oldval
== ceilval
)
427 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
429 if (oldval
!= ceilval
)
431 __pthread_tpp_change_priority (oldprio
, -1);
435 assert (mutex
->__data
.__owner
== 0);
436 /* Record the ownership. */
437 mutex
->__data
.__owner
= id
;
438 ++mutex
->__data
.__nusers
;
439 mutex
->__data
.__count
= 1;
446 /* Correct code cannot set any other type. */
453 #ifndef __pthread_mutex_trylock
454 #ifndef pthread_mutex_trylock
455 weak_alias (__pthread_mutex_trylock
, pthread_mutex_trylock
)
456 hidden_def (__pthread_mutex_trylock
)