Support C11 __STDC_SOURCE__ and _ISOC11_SOURCE
[glibc.git] / nptl / pthread_mutex_timedlock.c
blob8d0db79d58e6c71e87c3093867f24c4502525f3c
1 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <time.h>
23 #include "pthreadP.h"
24 #include <lowlevellock.h>
25 #include <not-cancel.h>
28 int
29 pthread_mutex_timedlock (mutex, abstime)
30 pthread_mutex_t *mutex;
31 const struct timespec *abstime;
33 int oldval;
34 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
35 int result = 0;
37 /* We must not check ABSTIME here. If the thread does not block
38 abstime must not be checked for a valid value. */
40 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
41 PTHREAD_MUTEX_TIMED_NP))
43 /* Recursive mutex. */
44 case PTHREAD_MUTEX_RECURSIVE_NP:
45 /* Check whether we already hold the mutex. */
46 if (mutex->__data.__owner == id)
48 /* Just bump the counter. */
49 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
50 /* Overflow of the counter. */
51 return EAGAIN;
53 ++mutex->__data.__count;
55 goto out;
58 /* We have to get the mutex. */
59 result = lll_timedlock (mutex->__data.__lock, abstime,
60 PTHREAD_MUTEX_PSHARED (mutex));
62 if (result != 0)
63 goto out;
65 /* Only locked once so far. */
66 mutex->__data.__count = 1;
67 break;
69 /* Error checking mutex. */
70 case PTHREAD_MUTEX_ERRORCHECK_NP:
71 /* Check whether we already hold the mutex. */
72 if (__builtin_expect (mutex->__data.__owner == id, 0))
73 return EDEADLK;
75 /* FALLTHROUGH */
77 case PTHREAD_MUTEX_TIMED_NP:
78 simple:
79 /* Normal mutex. */
80 result = lll_timedlock (mutex->__data.__lock, abstime,
81 PTHREAD_MUTEX_PSHARED (mutex));
82 break;
84 case PTHREAD_MUTEX_ADAPTIVE_NP:
85 if (! __is_smp)
86 goto simple;
88 if (lll_trylock (mutex->__data.__lock) != 0)
90 int cnt = 0;
91 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
92 mutex->__data.__spins * 2 + 10);
95 if (cnt++ >= max_cnt)
97 result = lll_timedlock (mutex->__data.__lock, abstime,
98 PTHREAD_MUTEX_PSHARED (mutex));
99 break;
102 #ifdef BUSY_WAIT_NOP
103 BUSY_WAIT_NOP;
104 #endif
106 while (lll_trylock (mutex->__data.__lock) != 0);
108 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
110 break;
112 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
113 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
114 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
115 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
116 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
117 &mutex->__data.__list.__next);
119 oldval = mutex->__data.__lock;
122 again:
123 if ((oldval & FUTEX_OWNER_DIED) != 0)
125 /* The previous owner died. Try locking the mutex. */
126 int newval = id | (oldval & FUTEX_WAITERS);
128 newval
129 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
130 newval, oldval);
131 if (newval != oldval)
133 oldval = newval;
134 goto again;
137 /* We got the mutex. */
138 mutex->__data.__count = 1;
139 /* But it is inconsistent unless marked otherwise. */
140 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
142 ENQUEUE_MUTEX (mutex);
143 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
145 /* Note that we deliberately exit here. If we fall
146 through to the end of the function __nusers would be
147 incremented which is not correct because the old
148 owner has to be discounted. */
149 return EOWNERDEAD;
152 /* Check whether we already hold the mutex. */
153 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
155 int kind = PTHREAD_MUTEX_TYPE (mutex);
156 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
158 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
159 NULL);
160 return EDEADLK;
163 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
165 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
166 NULL);
168 /* Just bump the counter. */
169 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
170 /* Overflow of the counter. */
171 return EAGAIN;
173 ++mutex->__data.__count;
175 return 0;
179 result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
180 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
182 if (__builtin_expect (mutex->__data.__owner
183 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
185 /* This mutex is now not recoverable. */
186 mutex->__data.__count = 0;
187 lll_unlock (mutex->__data.__lock,
188 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
189 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
190 return ENOTRECOVERABLE;
193 if (result == ETIMEDOUT || result == EINVAL)
194 goto out;
196 oldval = result;
198 while ((oldval & FUTEX_OWNER_DIED) != 0);
200 mutex->__data.__count = 1;
201 ENQUEUE_MUTEX (mutex);
202 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
203 break;
205 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
206 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
207 case PTHREAD_MUTEX_PI_NORMAL_NP:
208 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
209 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
210 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
211 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
212 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
214 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
215 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
217 if (robust)
218 /* Note: robust PI futexes are signaled by setting bit 0. */
219 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
220 (void *) (((uintptr_t) &mutex->__data.__list.__next)
221 | 1));
223 oldval = mutex->__data.__lock;
225 /* Check whether we already hold the mutex. */
226 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
228 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
230 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
231 return EDEADLK;
234 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
236 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
238 /* Just bump the counter. */
239 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
240 /* Overflow of the counter. */
241 return EAGAIN;
243 ++mutex->__data.__count;
245 return 0;
249 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
250 id, 0);
252 if (oldval != 0)
254 /* The mutex is locked. The kernel will now take care of
255 everything. The timeout value must be a relative value.
256 Convert it. */
257 int private = (robust
258 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
259 : PTHREAD_MUTEX_PSHARED (mutex));
260 INTERNAL_SYSCALL_DECL (__err);
262 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
263 __lll_private_flag (FUTEX_LOCK_PI,
264 private), 1,
265 abstime);
266 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
268 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
269 return ETIMEDOUT;
271 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
272 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
274 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
275 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
276 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
277 /* ESRCH can happen only for non-robust PI mutexes where
278 the owner of the lock died. */
279 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
280 || !robust);
282 /* Delay the thread until the timeout is reached.
283 Then return ETIMEDOUT. */
284 struct timespec reltime;
285 struct timespec now;
287 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
288 &now);
289 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
290 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
291 if (reltime.tv_nsec < 0)
293 reltime.tv_nsec += 1000000000;
294 --reltime.tv_sec;
296 if (reltime.tv_sec >= 0)
297 while (nanosleep_not_cancel (&reltime, &reltime) != 0)
298 continue;
300 return ETIMEDOUT;
303 return INTERNAL_SYSCALL_ERRNO (e, __err);
306 oldval = mutex->__data.__lock;
308 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
311 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
313 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
315 /* We got the mutex. */
316 mutex->__data.__count = 1;
317 /* But it is inconsistent unless marked otherwise. */
318 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
320 ENQUEUE_MUTEX_PI (mutex);
321 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
323 /* Note that we deliberately exit here. If we fall
324 through to the end of the function __nusers would be
325 incremented which is not correct because the old owner
326 has to be discounted. */
327 return EOWNERDEAD;
330 if (robust
331 && __builtin_expect (mutex->__data.__owner
332 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
334 /* This mutex is now not recoverable. */
335 mutex->__data.__count = 0;
337 INTERNAL_SYSCALL_DECL (__err);
338 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
339 __lll_private_flag (FUTEX_UNLOCK_PI,
340 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
341 0, 0);
343 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
344 return ENOTRECOVERABLE;
347 mutex->__data.__count = 1;
348 if (robust)
350 ENQUEUE_MUTEX_PI (mutex);
351 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
354 break;
356 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
357 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
358 case PTHREAD_MUTEX_PP_NORMAL_NP:
359 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
361 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
363 oldval = mutex->__data.__lock;
365 /* Check whether we already hold the mutex. */
366 if (mutex->__data.__owner == id)
368 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
369 return EDEADLK;
371 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
373 /* Just bump the counter. */
374 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
375 /* Overflow of the counter. */
376 return EAGAIN;
378 ++mutex->__data.__count;
380 return 0;
384 int oldprio = -1, ceilval;
387 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
388 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
390 if (__pthread_current_priority () > ceiling)
392 result = EINVAL;
393 failpp:
394 if (oldprio != -1)
395 __pthread_tpp_change_priority (oldprio, -1);
396 return result;
399 result = __pthread_tpp_change_priority (oldprio, ceiling);
400 if (result)
401 return result;
403 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
404 oldprio = ceiling;
406 oldval
407 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
408 ceilval | 1, ceilval);
410 if (oldval == ceilval)
411 break;
415 oldval
416 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
417 ceilval | 2,
418 ceilval | 1);
420 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
421 break;
423 if (oldval != ceilval)
425 /* Reject invalid timeouts. */
426 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
428 result = EINVAL;
429 goto failpp;
432 struct timeval tv;
433 struct timespec rt;
435 /* Get the current time. */
436 (void) __gettimeofday (&tv, NULL);
438 /* Compute relative timeout. */
439 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
440 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
441 if (rt.tv_nsec < 0)
443 rt.tv_nsec += 1000000000;
444 --rt.tv_sec;
447 /* Already timed out? */
448 if (rt.tv_sec < 0)
450 result = ETIMEDOUT;
451 goto failpp;
454 lll_futex_timed_wait (&mutex->__data.__lock,
455 ceilval | 2, &rt,
456 PTHREAD_MUTEX_PSHARED (mutex));
459 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
460 ceilval | 2, ceilval)
461 != ceilval);
463 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
465 assert (mutex->__data.__owner == 0);
466 mutex->__data.__count = 1;
468 break;
470 default:
471 /* Correct code cannot set any other type. */
472 return EINVAL;
475 if (result == 0)
477 /* Record the ownership. */
478 mutex->__data.__owner = id;
479 ++mutex->__data.__nusers;
482 out:
483 return result;