nis: Fix leak on realloc failure in nis_getnames [BZ #28150]
[glibc.git] / nptl / pthread_mutex_trylock.c
blobb61cdb94f4090411729651703aeadb7e84e09ef5
1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24 #include <futex-internal.h>
26 int
27 ___pthread_mutex_trylock (pthread_mutex_t *mutex)
29 int oldval;
30 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
32 /* See concurrency notes regarding mutex type which is loaded from __kind
33 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
34 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
35 PTHREAD_MUTEX_TIMED_NP))
37 /* Recursive mutex. */
38 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
39 case PTHREAD_MUTEX_RECURSIVE_NP:
40 /* Check whether we already hold the mutex. */
41 if (mutex->__data.__owner == id)
43 /* Just bump the counter. */
44 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
45 /* Overflow of the counter. */
46 return EAGAIN;
48 ++mutex->__data.__count;
49 return 0;
52 if (lll_trylock (mutex->__data.__lock) == 0)
54 /* Record the ownership. */
55 mutex->__data.__owner = id;
56 mutex->__data.__count = 1;
57 ++mutex->__data.__nusers;
58 return 0;
60 break;
62 case PTHREAD_MUTEX_TIMED_ELISION_NP:
63 elision: __attribute__((unused))
64 if (lll_trylock_elision (mutex->__data.__lock,
65 mutex->__data.__elision) != 0)
66 break;
67 /* Don't record the ownership. */
68 return 0;
70 case PTHREAD_MUTEX_TIMED_NP:
71 FORCE_ELISION (mutex, goto elision);
72 /*FALL THROUGH*/
73 case PTHREAD_MUTEX_ADAPTIVE_NP:
74 case PTHREAD_MUTEX_ERRORCHECK_NP:
75 if (lll_trylock (mutex->__data.__lock) != 0)
76 break;
78 /* Record the ownership. */
79 mutex->__data.__owner = id;
80 ++mutex->__data.__nusers;
82 return 0;
84 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
85 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
86 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
87 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
88 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
89 &mutex->__data.__list.__next);
90 /* We need to set op_pending before starting the operation. Also
91 see comments at ENQUEUE_MUTEX. */
92 __asm ("" ::: "memory");
94 oldval = mutex->__data.__lock;
97 again:
98 if ((oldval & FUTEX_OWNER_DIED) != 0)
100 /* The previous owner died. Try locking the mutex. */
101 int newval = id | (oldval & FUTEX_WAITERS);
103 newval
104 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
105 newval, oldval);
107 if (newval != oldval)
109 oldval = newval;
110 goto again;
113 /* We got the mutex. */
114 mutex->__data.__count = 1;
115 /* But it is inconsistent unless marked otherwise. */
116 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
118 /* We must not enqueue the mutex before we have acquired it.
119 Also see comments at ENQUEUE_MUTEX. */
120 __asm ("" ::: "memory");
121 ENQUEUE_MUTEX (mutex);
122 /* We need to clear op_pending after we enqueue the mutex. */
123 __asm ("" ::: "memory");
124 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
126 /* Note that we deliberately exit here. If we fall
127 through to the end of the function __nusers would be
128 incremented which is not correct because the old
129 owner has to be discounted. */
130 return EOWNERDEAD;
133 /* Check whether we already hold the mutex. */
134 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
136 int kind = PTHREAD_MUTEX_TYPE (mutex);
137 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
139 /* We do not need to ensure ordering wrt another memory
140 access. Also see comments at ENQUEUE_MUTEX. */
141 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
142 NULL);
143 return EDEADLK;
146 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
148 /* We do not need to ensure ordering wrt another memory
149 access. */
150 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
151 NULL);
153 /* Just bump the counter. */
154 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
155 /* Overflow of the counter. */
156 return EAGAIN;
158 ++mutex->__data.__count;
160 return 0;
164 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
165 id, 0);
166 if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
168 /* We haven't acquired the lock as it is already acquired by
169 another owner. We do not need to ensure ordering wrt another
170 memory access. */
171 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
173 return EBUSY;
176 if (__builtin_expect (mutex->__data.__owner
177 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
179 /* This mutex is now not recoverable. */
180 mutex->__data.__count = 0;
181 if (oldval == id)
182 lll_unlock (mutex->__data.__lock,
183 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
184 /* FIXME This violates the mutex destruction requirements. See
185 __pthread_mutex_unlock_full. */
186 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
187 return ENOTRECOVERABLE;
190 while ((oldval & FUTEX_OWNER_DIED) != 0);
192 /* We must not enqueue the mutex before we have acquired it.
193 Also see comments at ENQUEUE_MUTEX. */
194 __asm ("" ::: "memory");
195 ENQUEUE_MUTEX (mutex);
196 /* We need to clear op_pending after we enqueue the mutex. */
197 __asm ("" ::: "memory");
198 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
200 mutex->__data.__owner = id;
201 ++mutex->__data.__nusers;
202 mutex->__data.__count = 1;
204 return 0;
206 /* The PI support requires the Linux futex system call. If that's not
207 available, pthread_mutex_init should never have allowed the type to
208 be set. So it will get the default case for an invalid type. */
209 #ifdef __NR_futex
210 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
211 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
212 case PTHREAD_MUTEX_PI_NORMAL_NP:
213 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
214 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
215 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
216 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
217 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
219 int kind, robust;
221 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
222 in sysdeps/nptl/bits/thread-shared-types.h. */
223 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
224 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
225 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
228 if (robust)
230 /* Note: robust PI futexes are signaled by setting bit 0. */
231 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
232 (void *) (((uintptr_t) &mutex->__data.__list.__next)
233 | 1));
234 /* We need to set op_pending before starting the operation. Also
235 see comments at ENQUEUE_MUTEX. */
236 __asm ("" ::: "memory");
239 oldval = mutex->__data.__lock;
241 /* Check whether we already hold the mutex. */
242 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
244 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
246 /* We do not need to ensure ordering wrt another memory
247 access. */
248 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
249 return EDEADLK;
252 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
254 /* We do not need to ensure ordering wrt another memory
255 access. */
256 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
258 /* Just bump the counter. */
259 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
260 /* Overflow of the counter. */
261 return EAGAIN;
263 ++mutex->__data.__count;
265 return 0;
269 oldval
270 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
271 id, 0);
273 if (oldval != 0)
275 if ((oldval & FUTEX_OWNER_DIED) == 0)
277 /* We haven't acquired the lock as it is already acquired by
278 another owner. We do not need to ensure ordering wrt another
279 memory access. */
280 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
282 return EBUSY;
285 assert (robust);
287 /* The mutex owner died. The kernel will now take care of
288 everything. */
289 int private = (robust
290 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
291 : PTHREAD_MUTEX_PSHARED (mutex));
292 int e = INTERNAL_SYSCALL_CALL (futex, &mutex->__data.__lock,
293 __lll_private_flag (FUTEX_TRYLOCK_PI,
294 private), 0, 0);
296 if (INTERNAL_SYSCALL_ERROR_P (e)
297 && INTERNAL_SYSCALL_ERRNO (e) == EWOULDBLOCK)
299 /* The kernel has not yet finished the mutex owner death.
300 We do not need to ensure ordering wrt another memory
301 access. */
302 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
304 return EBUSY;
307 oldval = mutex->__data.__lock;
310 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
312 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
314 /* We got the mutex. */
315 mutex->__data.__count = 1;
316 /* But it is inconsistent unless marked otherwise. */
317 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
319 /* We must not enqueue the mutex before we have acquired it.
320 Also see comments at ENQUEUE_MUTEX. */
321 __asm ("" ::: "memory");
322 ENQUEUE_MUTEX (mutex);
323 /* We need to clear op_pending after we enqueue the mutex. */
324 __asm ("" ::: "memory");
325 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
327 /* Note that we deliberately exit here. If we fall
328 through to the end of the function __nusers would be
329 incremented which is not correct because the old owner
330 has to be discounted. */
331 return EOWNERDEAD;
334 if (robust
335 && __builtin_expect (mutex->__data.__owner
336 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
338 /* This mutex is now not recoverable. */
339 mutex->__data.__count = 0;
341 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
342 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
344 /* To the kernel, this will be visible after the kernel has
345 acquired the mutex in the syscall. */
346 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
347 return ENOTRECOVERABLE;
350 if (robust)
352 /* We must not enqueue the mutex before we have acquired it.
353 Also see comments at ENQUEUE_MUTEX. */
354 __asm ("" ::: "memory");
355 ENQUEUE_MUTEX_PI (mutex);
356 /* We need to clear op_pending after we enqueue the mutex. */
357 __asm ("" ::: "memory");
358 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
361 mutex->__data.__owner = id;
362 ++mutex->__data.__nusers;
363 mutex->__data.__count = 1;
365 return 0;
367 #endif /* __NR_futex. */
369 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
370 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
371 case PTHREAD_MUTEX_PP_NORMAL_NP:
372 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
374 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
375 in sysdeps/nptl/bits/thread-shared-types.h. */
376 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
377 & PTHREAD_MUTEX_KIND_MASK_NP;
379 oldval = mutex->__data.__lock;
381 /* Check whether we already hold the mutex. */
382 if (mutex->__data.__owner == id)
384 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
385 return EDEADLK;
387 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
389 /* Just bump the counter. */
390 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
391 /* Overflow of the counter. */
392 return EAGAIN;
394 ++mutex->__data.__count;
396 return 0;
400 int oldprio = -1, ceilval;
403 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
404 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
406 if (__pthread_current_priority () > ceiling)
408 if (oldprio != -1)
409 __pthread_tpp_change_priority (oldprio, -1);
410 return EINVAL;
413 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
414 if (retval)
415 return retval;
417 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
418 oldprio = ceiling;
420 oldval
421 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
422 ceilval | 1, ceilval);
424 if (oldval == ceilval)
425 break;
427 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
429 if (oldval != ceilval)
431 __pthread_tpp_change_priority (oldprio, -1);
432 break;
435 assert (mutex->__data.__owner == 0);
436 /* Record the ownership. */
437 mutex->__data.__owner = id;
438 ++mutex->__data.__nusers;
439 mutex->__data.__count = 1;
441 return 0;
443 break;
445 default:
446 /* Correct code cannot set any other type. */
447 return EINVAL;
450 return EBUSY;
452 versioned_symbol (libc, ___pthread_mutex_trylock,
453 pthread_mutex_trylock, GLIBC_2_34);
454 libc_hidden_ver (___pthread_mutex_trylock, __pthread_mutex_trylock)
455 #ifndef SHARED
456 strong_alias (___pthread_mutex_trylock, __pthread_mutex_trylock)
457 #endif
459 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
460 compat_symbol (libpthread, ___pthread_mutex_trylock,
461 pthread_mutex_trylock, GLIBC_2_0);
462 compat_symbol (libpthread, ___pthread_mutex_trylock,
463 __pthread_mutex_trylock, GLIBC_2_0);
464 #endif