Hurd: fix mode type for openat
[glibc.git] / nptl / pthread_mutex_timedlock.c
blobb7f34d4fd2b52980410ae7b3c92fa7ca193d180e
1 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <time.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24 #include <not-cancel.h>
27 int
28 pthread_mutex_timedlock (mutex, abstime)
29 pthread_mutex_t *mutex;
30 const struct timespec *abstime;
32 int oldval;
33 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
34 int result = 0;
36 /* We must not check ABSTIME here. If the thread does not block
37 abstime must not be checked for a valid value. */
39 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
40 PTHREAD_MUTEX_TIMED_NP))
42 /* Recursive mutex. */
43 case PTHREAD_MUTEX_RECURSIVE_NP:
44 /* Check whether we already hold the mutex. */
45 if (mutex->__data.__owner == id)
47 /* Just bump the counter. */
48 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
49 /* Overflow of the counter. */
50 return EAGAIN;
52 ++mutex->__data.__count;
54 goto out;
57 /* We have to get the mutex. */
58 result = lll_timedlock (mutex->__data.__lock, abstime,
59 PTHREAD_MUTEX_PSHARED (mutex));
61 if (result != 0)
62 goto out;
64 /* Only locked once so far. */
65 mutex->__data.__count = 1;
66 break;
68 /* Error checking mutex. */
69 case PTHREAD_MUTEX_ERRORCHECK_NP:
70 /* Check whether we already hold the mutex. */
71 if (__builtin_expect (mutex->__data.__owner == id, 0))
72 return EDEADLK;
74 /* FALLTHROUGH */
76 case PTHREAD_MUTEX_TIMED_NP:
77 simple:
78 /* Normal mutex. */
79 result = lll_timedlock (mutex->__data.__lock, abstime,
80 PTHREAD_MUTEX_PSHARED (mutex));
81 break;
83 case PTHREAD_MUTEX_ADAPTIVE_NP:
84 if (! __is_smp)
85 goto simple;
87 if (lll_trylock (mutex->__data.__lock) != 0)
89 int cnt = 0;
90 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
91 mutex->__data.__spins * 2 + 10);
94 if (cnt++ >= max_cnt)
96 result = lll_timedlock (mutex->__data.__lock, abstime,
97 PTHREAD_MUTEX_PSHARED (mutex));
98 break;
101 #ifdef BUSY_WAIT_NOP
102 BUSY_WAIT_NOP;
103 #endif
105 while (lll_trylock (mutex->__data.__lock) != 0);
107 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
109 break;
111 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
112 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
113 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
114 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
115 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
116 &mutex->__data.__list.__next);
118 oldval = mutex->__data.__lock;
121 again:
122 if ((oldval & FUTEX_OWNER_DIED) != 0)
124 /* The previous owner died. Try locking the mutex. */
125 int newval = id | (oldval & FUTEX_WAITERS);
127 newval
128 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
129 newval, oldval);
130 if (newval != oldval)
132 oldval = newval;
133 goto again;
136 /* We got the mutex. */
137 mutex->__data.__count = 1;
138 /* But it is inconsistent unless marked otherwise. */
139 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
141 ENQUEUE_MUTEX (mutex);
142 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
144 /* Note that we deliberately exit here. If we fall
145 through to the end of the function __nusers would be
146 incremented which is not correct because the old
147 owner has to be discounted. */
148 return EOWNERDEAD;
151 /* Check whether we already hold the mutex. */
152 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
154 int kind = PTHREAD_MUTEX_TYPE (mutex);
155 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
157 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
158 NULL);
159 return EDEADLK;
162 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
164 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
165 NULL);
167 /* Just bump the counter. */
168 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
169 /* Overflow of the counter. */
170 return EAGAIN;
172 ++mutex->__data.__count;
174 return 0;
178 result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
179 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
181 if (__builtin_expect (mutex->__data.__owner
182 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
184 /* This mutex is now not recoverable. */
185 mutex->__data.__count = 0;
186 lll_unlock (mutex->__data.__lock,
187 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
188 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
189 return ENOTRECOVERABLE;
192 if (result == ETIMEDOUT || result == EINVAL)
193 goto out;
195 oldval = result;
197 while ((oldval & FUTEX_OWNER_DIED) != 0);
199 mutex->__data.__count = 1;
200 ENQUEUE_MUTEX (mutex);
201 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
202 break;
204 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
205 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
206 case PTHREAD_MUTEX_PI_NORMAL_NP:
207 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
208 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
209 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
210 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
211 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
213 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
214 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
216 if (robust)
217 /* Note: robust PI futexes are signaled by setting bit 0. */
218 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
219 (void *) (((uintptr_t) &mutex->__data.__list.__next)
220 | 1));
222 oldval = mutex->__data.__lock;
224 /* Check whether we already hold the mutex. */
225 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
227 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
229 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
230 return EDEADLK;
233 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
235 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
237 /* Just bump the counter. */
238 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
239 /* Overflow of the counter. */
240 return EAGAIN;
242 ++mutex->__data.__count;
244 return 0;
248 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
249 id, 0);
251 if (oldval != 0)
253 /* The mutex is locked. The kernel will now take care of
254 everything. The timeout value must be a relative value.
255 Convert it. */
256 int private = (robust
257 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
258 : PTHREAD_MUTEX_PSHARED (mutex));
259 INTERNAL_SYSCALL_DECL (__err);
261 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
262 __lll_private_flag (FUTEX_LOCK_PI,
263 private), 1,
264 abstime);
265 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
267 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
268 return ETIMEDOUT;
270 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
271 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
273 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
274 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
275 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
276 /* ESRCH can happen only for non-robust PI mutexes where
277 the owner of the lock died. */
278 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
279 || !robust);
281 /* Delay the thread until the timeout is reached.
282 Then return ETIMEDOUT. */
283 struct timespec reltime;
284 struct timespec now;
286 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
287 &now);
288 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
289 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
290 if (reltime.tv_nsec < 0)
292 reltime.tv_nsec += 1000000000;
293 --reltime.tv_sec;
295 if (reltime.tv_sec >= 0)
296 while (nanosleep_not_cancel (&reltime, &reltime) != 0)
297 continue;
299 return ETIMEDOUT;
302 return INTERNAL_SYSCALL_ERRNO (e, __err);
305 oldval = mutex->__data.__lock;
307 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
310 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
312 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
314 /* We got the mutex. */
315 mutex->__data.__count = 1;
316 /* But it is inconsistent unless marked otherwise. */
317 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
319 ENQUEUE_MUTEX_PI (mutex);
320 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
322 /* Note that we deliberately exit here. If we fall
323 through to the end of the function __nusers would be
324 incremented which is not correct because the old owner
325 has to be discounted. */
326 return EOWNERDEAD;
329 if (robust
330 && __builtin_expect (mutex->__data.__owner
331 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
333 /* This mutex is now not recoverable. */
334 mutex->__data.__count = 0;
336 INTERNAL_SYSCALL_DECL (__err);
337 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
338 __lll_private_flag (FUTEX_UNLOCK_PI,
339 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
340 0, 0);
342 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
343 return ENOTRECOVERABLE;
346 mutex->__data.__count = 1;
347 if (robust)
349 ENQUEUE_MUTEX_PI (mutex);
350 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
353 break;
355 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
356 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
357 case PTHREAD_MUTEX_PP_NORMAL_NP:
358 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
360 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
362 oldval = mutex->__data.__lock;
364 /* Check whether we already hold the mutex. */
365 if (mutex->__data.__owner == id)
367 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
368 return EDEADLK;
370 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
372 /* Just bump the counter. */
373 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
374 /* Overflow of the counter. */
375 return EAGAIN;
377 ++mutex->__data.__count;
379 return 0;
383 int oldprio = -1, ceilval;
386 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
387 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
389 if (__pthread_current_priority () > ceiling)
391 result = EINVAL;
392 failpp:
393 if (oldprio != -1)
394 __pthread_tpp_change_priority (oldprio, -1);
395 return result;
398 result = __pthread_tpp_change_priority (oldprio, ceiling);
399 if (result)
400 return result;
402 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
403 oldprio = ceiling;
405 oldval
406 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
407 ceilval | 1, ceilval);
409 if (oldval == ceilval)
410 break;
414 oldval
415 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
416 ceilval | 2,
417 ceilval | 1);
419 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
420 break;
422 if (oldval != ceilval)
424 /* Reject invalid timeouts. */
425 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
427 result = EINVAL;
428 goto failpp;
431 struct timeval tv;
432 struct timespec rt;
434 /* Get the current time. */
435 (void) __gettimeofday (&tv, NULL);
437 /* Compute relative timeout. */
438 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
439 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
440 if (rt.tv_nsec < 0)
442 rt.tv_nsec += 1000000000;
443 --rt.tv_sec;
446 /* Already timed out? */
447 if (rt.tv_sec < 0)
449 result = ETIMEDOUT;
450 goto failpp;
453 lll_futex_timed_wait (&mutex->__data.__lock,
454 ceilval | 2, &rt,
455 PTHREAD_MUTEX_PSHARED (mutex));
458 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
459 ceilval | 2, ceilval)
460 != ceilval);
462 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
464 assert (mutex->__data.__owner == 0);
465 mutex->__data.__count = 1;
467 break;
469 default:
470 /* Correct code cannot set any other type. */
471 return EINVAL;
474 if (result == 0)
476 /* Record the ownership. */
477 mutex->__data.__owner = id;
478 ++mutex->__data.__nusers;
481 out:
482 return result;