hurd: Also make it possible to call strlen very early
[glibc.git] / nptl / pthread_mutex_trylock.c
blob9b3a16954debb03c5129505af3806bd9b215a584
1 /* Copyright (C) 2002-2023 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
18 #include <assert.h>
19 #include <errno.h>
20 #include <stdlib.h>
21 #include "pthreadP.h"
22 #include <lowlevellock.h>
23 #include <futex-internal.h>
25 int
26 ___pthread_mutex_trylock (pthread_mutex_t *mutex)
28 int oldval;
29 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
31 /* See concurrency notes regarding mutex type which is loaded from __kind
32 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
33 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
34 PTHREAD_MUTEX_TIMED_NP))
36 /* Recursive mutex. */
37 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
38 case PTHREAD_MUTEX_RECURSIVE_NP:
39 /* Check whether we already hold the mutex. */
40 if (mutex->__data.__owner == id)
42 /* Just bump the counter. */
43 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
44 /* Overflow of the counter. */
45 return EAGAIN;
47 ++mutex->__data.__count;
48 return 0;
51 if (lll_trylock (mutex->__data.__lock) == 0)
53 /* Record the ownership. */
54 mutex->__data.__owner = id;
55 mutex->__data.__count = 1;
56 ++mutex->__data.__nusers;
57 return 0;
59 break;
61 case PTHREAD_MUTEX_TIMED_ELISION_NP:
62 elision: __attribute__((unused))
63 if (lll_trylock_elision (mutex->__data.__lock,
64 mutex->__data.__elision) != 0)
65 break;
66 /* Don't record the ownership. */
67 return 0;
69 case PTHREAD_MUTEX_TIMED_NP:
70 FORCE_ELISION (mutex, goto elision);
71 /*FALL THROUGH*/
72 case PTHREAD_MUTEX_ADAPTIVE_NP:
73 case PTHREAD_MUTEX_ERRORCHECK_NP:
74 if (lll_trylock (mutex->__data.__lock) != 0)
75 break;
77 /* Record the ownership. */
78 mutex->__data.__owner = id;
79 ++mutex->__data.__nusers;
81 return 0;
83 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
84 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
85 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
86 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
87 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
88 &mutex->__data.__list.__next);
89 /* We need to set op_pending before starting the operation. Also
90 see comments at ENQUEUE_MUTEX. */
91 __asm ("" ::: "memory");
93 oldval = mutex->__data.__lock;
96 again:
97 if ((oldval & FUTEX_OWNER_DIED) != 0)
99 /* The previous owner died. Try locking the mutex. */
100 int newval = id | (oldval & FUTEX_WAITERS);
102 newval
103 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
104 newval, oldval);
106 if (newval != oldval)
108 oldval = newval;
109 goto again;
112 /* We got the mutex. */
113 mutex->__data.__count = 1;
114 /* But it is inconsistent unless marked otherwise. */
115 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
117 /* We must not enqueue the mutex before we have acquired it.
118 Also see comments at ENQUEUE_MUTEX. */
119 __asm ("" ::: "memory");
120 ENQUEUE_MUTEX (mutex);
121 /* We need to clear op_pending after we enqueue the mutex. */
122 __asm ("" ::: "memory");
123 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
125 /* Note that we deliberately exit here. If we fall
126 through to the end of the function __nusers would be
127 incremented which is not correct because the old
128 owner has to be discounted. */
129 return EOWNERDEAD;
132 /* Check whether we already hold the mutex. */
133 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
135 int kind = PTHREAD_MUTEX_TYPE (mutex);
136 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
138 /* We do not need to ensure ordering wrt another memory
139 access. Also see comments at ENQUEUE_MUTEX. */
140 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
141 NULL);
142 return EDEADLK;
145 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
147 /* We do not need to ensure ordering wrt another memory
148 access. */
149 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
150 NULL);
152 /* Just bump the counter. */
153 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
154 /* Overflow of the counter. */
155 return EAGAIN;
157 ++mutex->__data.__count;
159 return 0;
163 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
164 id, 0);
165 if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
167 /* We haven't acquired the lock as it is already acquired by
168 another owner. We do not need to ensure ordering wrt another
169 memory access. */
170 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
172 return EBUSY;
175 if (__builtin_expect (mutex->__data.__owner
176 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
178 /* This mutex is now not recoverable. */
179 mutex->__data.__count = 0;
180 if (oldval == id)
181 lll_unlock (mutex->__data.__lock,
182 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
183 /* FIXME This violates the mutex destruction requirements. See
184 __pthread_mutex_unlock_full. */
185 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
186 return ENOTRECOVERABLE;
189 while ((oldval & FUTEX_OWNER_DIED) != 0);
191 /* We must not enqueue the mutex before we have acquired it.
192 Also see comments at ENQUEUE_MUTEX. */
193 __asm ("" ::: "memory");
194 ENQUEUE_MUTEX (mutex);
195 /* We need to clear op_pending after we enqueue the mutex. */
196 __asm ("" ::: "memory");
197 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
199 mutex->__data.__owner = id;
200 ++mutex->__data.__nusers;
201 mutex->__data.__count = 1;
203 return 0;
205 /* The PI support requires the Linux futex system call. If that's not
206 available, pthread_mutex_init should never have allowed the type to
207 be set. So it will get the default case for an invalid type. */
208 #ifdef __NR_futex
209 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
210 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
211 case PTHREAD_MUTEX_PI_NORMAL_NP:
212 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
213 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
214 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
215 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
216 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
218 int kind, robust;
220 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
221 in sysdeps/nptl/bits/thread-shared-types.h. */
222 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
223 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
224 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
227 if (robust)
229 /* Note: robust PI futexes are signaled by setting bit 0. */
230 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
231 (void *) (((uintptr_t) &mutex->__data.__list.__next)
232 | 1));
233 /* We need to set op_pending before starting the operation. Also
234 see comments at ENQUEUE_MUTEX. */
235 __asm ("" ::: "memory");
238 oldval = mutex->__data.__lock;
240 /* Check whether we already hold the mutex. */
241 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
243 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
245 /* We do not need to ensure ordering wrt another memory
246 access. */
247 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
248 return EDEADLK;
251 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
253 /* We do not need to ensure ordering wrt another memory
254 access. */
255 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
257 /* Just bump the counter. */
258 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
259 /* Overflow of the counter. */
260 return EAGAIN;
262 ++mutex->__data.__count;
264 return 0;
268 oldval
269 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
270 id, 0);
272 if (oldval != 0)
274 if ((oldval & FUTEX_OWNER_DIED) == 0)
276 /* We haven't acquired the lock as it is already acquired by
277 another owner. We do not need to ensure ordering wrt another
278 memory access. */
279 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
281 return EBUSY;
284 assert (robust);
286 /* The mutex owner died. The kernel will now take care of
287 everything. */
288 int private = (robust
289 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
290 : PTHREAD_MUTEX_PSHARED (mutex));
291 int e = INTERNAL_SYSCALL_CALL (futex, &mutex->__data.__lock,
292 __lll_private_flag (FUTEX_TRYLOCK_PI,
293 private), 0, 0);
295 if (INTERNAL_SYSCALL_ERROR_P (e)
296 && INTERNAL_SYSCALL_ERRNO (e) == EWOULDBLOCK)
298 /* The kernel has not yet finished the mutex owner death.
299 We do not need to ensure ordering wrt another memory
300 access. */
301 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
303 return EBUSY;
306 oldval = mutex->__data.__lock;
309 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
311 atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
313 /* We got the mutex. */
314 mutex->__data.__count = 1;
315 /* But it is inconsistent unless marked otherwise. */
316 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
318 /* We must not enqueue the mutex before we have acquired it.
319 Also see comments at ENQUEUE_MUTEX. */
320 __asm ("" ::: "memory");
321 ENQUEUE_MUTEX (mutex);
322 /* We need to clear op_pending after we enqueue the mutex. */
323 __asm ("" ::: "memory");
324 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
326 /* Note that we deliberately exit here. If we fall
327 through to the end of the function __nusers would be
328 incremented which is not correct because the old owner
329 has to be discounted. */
330 return EOWNERDEAD;
333 if (robust
334 && __builtin_expect (mutex->__data.__owner
335 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
337 /* This mutex is now not recoverable. */
338 mutex->__data.__count = 0;
340 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
341 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
343 /* To the kernel, this will be visible after the kernel has
344 acquired the mutex in the syscall. */
345 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
346 return ENOTRECOVERABLE;
349 if (robust)
351 /* We must not enqueue the mutex before we have acquired it.
352 Also see comments at ENQUEUE_MUTEX. */
353 __asm ("" ::: "memory");
354 ENQUEUE_MUTEX_PI (mutex);
355 /* We need to clear op_pending after we enqueue the mutex. */
356 __asm ("" ::: "memory");
357 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
360 mutex->__data.__owner = id;
361 ++mutex->__data.__nusers;
362 mutex->__data.__count = 1;
364 return 0;
366 #endif /* __NR_futex. */
368 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
369 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
370 case PTHREAD_MUTEX_PP_NORMAL_NP:
371 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
373 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
374 in sysdeps/nptl/bits/thread-shared-types.h. */
375 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
376 & PTHREAD_MUTEX_KIND_MASK_NP;
378 oldval = mutex->__data.__lock;
380 /* Check whether we already hold the mutex. */
381 if (mutex->__data.__owner == id)
383 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
384 return EDEADLK;
386 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
388 /* Just bump the counter. */
389 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
390 /* Overflow of the counter. */
391 return EAGAIN;
393 ++mutex->__data.__count;
395 return 0;
399 int oldprio = -1, ceilval;
402 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
403 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
405 if (__pthread_current_priority () > ceiling)
407 if (oldprio != -1)
408 __pthread_tpp_change_priority (oldprio, -1);
409 return EINVAL;
412 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
413 if (retval)
414 return retval;
416 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
417 oldprio = ceiling;
419 oldval
420 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
421 ceilval | 1, ceilval);
423 if (oldval == ceilval)
424 break;
426 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
428 if (oldval != ceilval)
430 __pthread_tpp_change_priority (oldprio, -1);
431 break;
434 assert (mutex->__data.__owner == 0);
435 /* Record the ownership. */
436 mutex->__data.__owner = id;
437 ++mutex->__data.__nusers;
438 mutex->__data.__count = 1;
440 return 0;
442 break;
444 default:
445 /* Correct code cannot set any other type. */
446 return EINVAL;
449 return EBUSY;
451 versioned_symbol (libc, ___pthread_mutex_trylock,
452 pthread_mutex_trylock, GLIBC_2_34);
453 libc_hidden_ver (___pthread_mutex_trylock, __pthread_mutex_trylock)
454 #ifndef SHARED
455 strong_alias (___pthread_mutex_trylock, __pthread_mutex_trylock)
456 #endif
458 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
459 compat_symbol (libpthread, ___pthread_mutex_trylock,
460 pthread_mutex_trylock, GLIBC_2_0);
461 compat_symbol (libpthread, ___pthread_mutex_trylock,
462 __pthread_mutex_trylock, GLIBC_2_0);
463 #endif