Added yesstr/nostr for nds_DE and nds_NL
[glibc.git] / nptl / pthread_mutex_trylock.c
blobec7da61c736df8c6f54c6bd4c2601fc5fb4b3080
1 /* Copyright (C) 2002-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
25 #ifndef lll_trylock_elision
26 #define lll_trylock_elision(a,t) lll_trylock(a)
27 #endif
29 #ifndef FORCE_ELISION
30 #define FORCE_ELISION(m, s)
31 #endif
33 int
34 __pthread_mutex_trylock (pthread_mutex_t *mutex)
36 int oldval;
37 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
39 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
40 PTHREAD_MUTEX_TIMED_NP))
42 /* Recursive mutex. */
43 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
44 case PTHREAD_MUTEX_RECURSIVE_NP:
45 /* Check whether we already hold the mutex. */
46 if (mutex->__data.__owner == id)
48 /* Just bump the counter. */
49 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
50 /* Overflow of the counter. */
51 return EAGAIN;
53 ++mutex->__data.__count;
54 return 0;
57 if (lll_trylock (mutex->__data.__lock) == 0)
59 /* Record the ownership. */
60 mutex->__data.__owner = id;
61 mutex->__data.__count = 1;
62 ++mutex->__data.__nusers;
63 return 0;
65 break;
67 case PTHREAD_MUTEX_TIMED_ELISION_NP:
68 elision: __attribute__((unused))
69 if (lll_trylock_elision (mutex->__data.__lock,
70 mutex->__data.__elision) != 0)
71 break;
72 /* Don't record the ownership. */
73 return 0;
75 case PTHREAD_MUTEX_TIMED_NP:
76 FORCE_ELISION (mutex, goto elision);
77 /*FALL THROUGH*/
78 case PTHREAD_MUTEX_ADAPTIVE_NP:
79 case PTHREAD_MUTEX_ERRORCHECK_NP:
80 if (lll_trylock (mutex->__data.__lock) != 0)
81 break;
83 /* Record the ownership. */
84 mutex->__data.__owner = id;
85 ++mutex->__data.__nusers;
87 return 0;
89 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
90 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
91 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
92 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
93 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
94 &mutex->__data.__list.__next);
96 oldval = mutex->__data.__lock;
99 again:
100 if ((oldval & FUTEX_OWNER_DIED) != 0)
102 /* The previous owner died. Try locking the mutex. */
103 int newval = id | (oldval & FUTEX_WAITERS);
105 newval
106 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
107 newval, oldval);
109 if (newval != oldval)
111 oldval = newval;
112 goto again;
115 /* We got the mutex. */
116 mutex->__data.__count = 1;
117 /* But it is inconsistent unless marked otherwise. */
118 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
120 ENQUEUE_MUTEX (mutex);
121 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
123 /* Note that we deliberately exist here. If we fall
124 through to the end of the function __nusers would be
125 incremented which is not correct because the old
126 owner has to be discounted. */
127 return EOWNERDEAD;
130 /* Check whether we already hold the mutex. */
131 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
133 int kind = PTHREAD_MUTEX_TYPE (mutex);
134 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
136 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
137 NULL);
138 return EDEADLK;
141 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
143 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
144 NULL);
146 /* Just bump the counter. */
147 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
148 /* Overflow of the counter. */
149 return EAGAIN;
151 ++mutex->__data.__count;
153 return 0;
157 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
158 id, 0);
159 if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
161 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
163 return EBUSY;
166 if (__builtin_expect (mutex->__data.__owner
167 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
169 /* This mutex is now not recoverable. */
170 mutex->__data.__count = 0;
171 if (oldval == id)
172 lll_unlock (mutex->__data.__lock,
173 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
174 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
175 return ENOTRECOVERABLE;
178 while ((oldval & FUTEX_OWNER_DIED) != 0);
180 ENQUEUE_MUTEX (mutex);
181 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
183 mutex->__data.__owner = id;
184 ++mutex->__data.__nusers;
185 mutex->__data.__count = 1;
187 return 0;
189 /* The PI support requires the Linux futex system call. If that's not
190 available, pthread_mutex_init should never have allowed the type to
191 be set. So it will get the default case for an invalid type. */
192 #ifdef __NR_futex
193 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
194 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
195 case PTHREAD_MUTEX_PI_NORMAL_NP:
196 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
197 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
198 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
199 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
200 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
202 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
203 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
205 if (robust)
206 /* Note: robust PI futexes are signaled by setting bit 0. */
207 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
208 (void *) (((uintptr_t) &mutex->__data.__list.__next)
209 | 1));
211 oldval = mutex->__data.__lock;
213 /* Check whether we already hold the mutex. */
214 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
216 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
218 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
219 return EDEADLK;
222 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
224 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
226 /* Just bump the counter. */
227 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
228 /* Overflow of the counter. */
229 return EAGAIN;
231 ++mutex->__data.__count;
233 return 0;
237 oldval
238 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
239 id, 0);
241 if (oldval != 0)
243 if ((oldval & FUTEX_OWNER_DIED) == 0)
245 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
247 return EBUSY;
250 assert (robust);
252 /* The mutex owner died. The kernel will now take care of
253 everything. */
254 int private = (robust
255 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
256 : PTHREAD_MUTEX_PSHARED (mutex));
257 INTERNAL_SYSCALL_DECL (__err);
258 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
259 __lll_private_flag (FUTEX_TRYLOCK_PI,
260 private), 0, 0);
262 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
263 && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
265 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
267 return EBUSY;
270 oldval = mutex->__data.__lock;
273 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
275 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
277 /* We got the mutex. */
278 mutex->__data.__count = 1;
279 /* But it is inconsistent unless marked otherwise. */
280 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
282 ENQUEUE_MUTEX (mutex);
283 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
285 /* Note that we deliberately exit here. If we fall
286 through to the end of the function __nusers would be
287 incremented which is not correct because the old owner
288 has to be discounted. */
289 return EOWNERDEAD;
292 if (robust
293 && __builtin_expect (mutex->__data.__owner
294 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
296 /* This mutex is now not recoverable. */
297 mutex->__data.__count = 0;
299 INTERNAL_SYSCALL_DECL (__err);
300 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
301 __lll_private_flag (FUTEX_UNLOCK_PI,
302 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
303 0, 0);
305 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
306 return ENOTRECOVERABLE;
309 if (robust)
311 ENQUEUE_MUTEX_PI (mutex);
312 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
315 mutex->__data.__owner = id;
316 ++mutex->__data.__nusers;
317 mutex->__data.__count = 1;
319 return 0;
321 #endif /* __NR_futex. */
323 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
324 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
325 case PTHREAD_MUTEX_PP_NORMAL_NP:
326 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
328 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
330 oldval = mutex->__data.__lock;
332 /* Check whether we already hold the mutex. */
333 if (mutex->__data.__owner == id)
335 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
336 return EDEADLK;
338 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
340 /* Just bump the counter. */
341 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
342 /* Overflow of the counter. */
343 return EAGAIN;
345 ++mutex->__data.__count;
347 return 0;
351 int oldprio = -1, ceilval;
354 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
355 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
357 if (__pthread_current_priority () > ceiling)
359 if (oldprio != -1)
360 __pthread_tpp_change_priority (oldprio, -1);
361 return EINVAL;
364 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
365 if (retval)
366 return retval;
368 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
369 oldprio = ceiling;
371 oldval
372 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
373 ceilval | 1, ceilval);
375 if (oldval == ceilval)
376 break;
378 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
380 if (oldval != ceilval)
382 __pthread_tpp_change_priority (oldprio, -1);
383 break;
386 assert (mutex->__data.__owner == 0);
387 /* Record the ownership. */
388 mutex->__data.__owner = id;
389 ++mutex->__data.__nusers;
390 mutex->__data.__count = 1;
392 return 0;
394 break;
396 default:
397 /* Correct code cannot set any other type. */
398 return EINVAL;
401 return EBUSY;
404 #ifndef __pthread_mutex_trylock
405 #ifndef pthread_mutex_trylock
406 weak_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
407 hidden_def (__pthread_mutex_trylock)
408 #endif
409 #endif