Revert "nscd: don't fork twice"
[glibc.git] / nptl / pthread_mutex_trylock.c
blob8f5279d2ff266d3fe3509929a34920aac68ec810
1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
26 int
27 __pthread_mutex_trylock (mutex)
28 pthread_mutex_t *mutex;
30 int oldval;
31 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
33 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
34 PTHREAD_MUTEX_TIMED_NP))
36 /* Recursive mutex. */
37 case PTHREAD_MUTEX_RECURSIVE_NP:
38 /* Check whether we already hold the mutex. */
39 if (mutex->__data.__owner == id)
41 /* Just bump the counter. */
42 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
43 /* Overflow of the counter. */
44 return EAGAIN;
46 ++mutex->__data.__count;
47 return 0;
50 if (lll_trylock (mutex->__data.__lock) == 0)
52 /* Record the ownership. */
53 mutex->__data.__owner = id;
54 mutex->__data.__count = 1;
55 ++mutex->__data.__nusers;
56 return 0;
58 break;
60 case PTHREAD_MUTEX_ERRORCHECK_NP:
61 case PTHREAD_MUTEX_TIMED_NP:
62 case PTHREAD_MUTEX_ADAPTIVE_NP:
63 /* Normal mutex. */
64 if (lll_trylock (mutex->__data.__lock) != 0)
65 break;
67 /* Record the ownership. */
68 mutex->__data.__owner = id;
69 ++mutex->__data.__nusers;
71 return 0;
73 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
74 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
75 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
76 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
77 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
78 &mutex->__data.__list.__next);
80 oldval = mutex->__data.__lock;
83 again:
84 if ((oldval & FUTEX_OWNER_DIED) != 0)
86 /* The previous owner died. Try locking the mutex. */
87 int newval = id | (oldval & FUTEX_WAITERS);
89 newval
90 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
91 newval, oldval);
93 if (newval != oldval)
95 oldval = newval;
96 goto again;
99 /* We got the mutex. */
100 mutex->__data.__count = 1;
101 /* But it is inconsistent unless marked otherwise. */
102 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
104 ENQUEUE_MUTEX (mutex);
105 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
107 /* Note that we deliberately exist here. If we fall
108 through to the end of the function __nusers would be
109 incremented which is not correct because the old
110 owner has to be discounted. */
111 return EOWNERDEAD;
114 /* Check whether we already hold the mutex. */
115 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
117 int kind = PTHREAD_MUTEX_TYPE (mutex);
118 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
120 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
121 NULL);
122 return EDEADLK;
125 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
127 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
128 NULL);
130 /* Just bump the counter. */
131 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
132 /* Overflow of the counter. */
133 return EAGAIN;
135 ++mutex->__data.__count;
137 return 0;
141 oldval = lll_robust_trylock (mutex->__data.__lock, id);
142 if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
144 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
146 return EBUSY;
149 if (__builtin_expect (mutex->__data.__owner
150 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
152 /* This mutex is now not recoverable. */
153 mutex->__data.__count = 0;
154 if (oldval == id)
155 lll_unlock (mutex->__data.__lock,
156 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
157 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
158 return ENOTRECOVERABLE;
161 while ((oldval & FUTEX_OWNER_DIED) != 0);
163 ENQUEUE_MUTEX (mutex);
164 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
166 mutex->__data.__owner = id;
167 ++mutex->__data.__nusers;
168 mutex->__data.__count = 1;
170 return 0;
172 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
173 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
174 case PTHREAD_MUTEX_PI_NORMAL_NP:
175 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
176 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
177 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
178 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
179 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
181 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
182 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
184 if (robust)
185 /* Note: robust PI futexes are signaled by setting bit 0. */
186 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
187 (void *) (((uintptr_t) &mutex->__data.__list.__next)
188 | 1));
190 oldval = mutex->__data.__lock;
192 /* Check whether we already hold the mutex. */
193 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
195 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
197 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
198 return EDEADLK;
201 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
203 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
205 /* Just bump the counter. */
206 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
207 /* Overflow of the counter. */
208 return EAGAIN;
210 ++mutex->__data.__count;
212 return 0;
216 oldval
217 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
218 id, 0);
220 if (oldval != 0)
222 if ((oldval & FUTEX_OWNER_DIED) == 0)
224 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
226 return EBUSY;
229 assert (robust);
231 /* The mutex owner died. The kernel will now take care of
232 everything. */
233 int private = (robust
234 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
235 : PTHREAD_MUTEX_PSHARED (mutex));
236 INTERNAL_SYSCALL_DECL (__err);
237 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
238 __lll_private_flag (FUTEX_TRYLOCK_PI,
239 private), 0, 0);
241 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
242 && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
244 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
246 return EBUSY;
249 oldval = mutex->__data.__lock;
252 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
254 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
256 /* We got the mutex. */
257 mutex->__data.__count = 1;
258 /* But it is inconsistent unless marked otherwise. */
259 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
261 ENQUEUE_MUTEX (mutex);
262 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
264 /* Note that we deliberately exit here. If we fall
265 through to the end of the function __nusers would be
266 incremented which is not correct because the old owner
267 has to be discounted. */
268 return EOWNERDEAD;
271 if (robust
272 && __builtin_expect (mutex->__data.__owner
273 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
275 /* This mutex is now not recoverable. */
276 mutex->__data.__count = 0;
278 INTERNAL_SYSCALL_DECL (__err);
279 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
280 __lll_private_flag (FUTEX_UNLOCK_PI,
281 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
282 0, 0);
284 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
285 return ENOTRECOVERABLE;
288 if (robust)
290 ENQUEUE_MUTEX_PI (mutex);
291 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
294 mutex->__data.__owner = id;
295 ++mutex->__data.__nusers;
296 mutex->__data.__count = 1;
298 return 0;
301 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
302 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
303 case PTHREAD_MUTEX_PP_NORMAL_NP:
304 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
306 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
308 oldval = mutex->__data.__lock;
310 /* Check whether we already hold the mutex. */
311 if (mutex->__data.__owner == id)
313 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
314 return EDEADLK;
316 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
318 /* Just bump the counter. */
319 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
320 /* Overflow of the counter. */
321 return EAGAIN;
323 ++mutex->__data.__count;
325 return 0;
329 int oldprio = -1, ceilval;
332 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
333 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
335 if (__pthread_current_priority () > ceiling)
337 if (oldprio != -1)
338 __pthread_tpp_change_priority (oldprio, -1);
339 return EINVAL;
342 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
343 if (retval)
344 return retval;
346 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
347 oldprio = ceiling;
349 oldval
350 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
351 ceilval | 1, ceilval);
353 if (oldval == ceilval)
354 break;
356 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
358 if (oldval != ceilval)
360 __pthread_tpp_change_priority (oldprio, -1);
361 break;
364 assert (mutex->__data.__owner == 0);
365 /* Record the ownership. */
366 mutex->__data.__owner = id;
367 ++mutex->__data.__nusers;
368 mutex->__data.__count = 1;
370 return 0;
372 break;
374 default:
375 /* Correct code cannot set any other type. */
376 return EINVAL;
379 return EBUSY;
381 strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)