.
[glibc.git] / nptl / pthread_mutex_timedlock.c
blob8fd681c6effd048802df0b90cc825df03625bc16
1 /* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <time.h>
23 #include "pthreadP.h"
24 #include <lowlevellock.h>
25 #include <not-cancel.h>
28 int
29 pthread_mutex_timedlock (mutex, abstime)
30 pthread_mutex_t *mutex;
31 const struct timespec *abstime;
33 int oldval;
34 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
35 int result = 0;
37 /* We must not check ABSTIME here. If the thread does not block
38 abstime must not be checked for a valid value. */
40 switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
42 /* Recursive mutex. */
43 case PTHREAD_MUTEX_RECURSIVE_NP:
44 /* Check whether we already hold the mutex. */
45 if (mutex->__data.__owner == id)
47 /* Just bump the counter. */
48 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
49 /* Overflow of the counter. */
50 return EAGAIN;
52 ++mutex->__data.__count;
54 goto out;
57 /* We have to get the mutex. */
58 result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
60 if (result != 0)
61 goto out;
63 /* Only locked once so far. */
64 mutex->__data.__count = 1;
65 break;
67 /* Error checking mutex. */
68 case PTHREAD_MUTEX_ERRORCHECK_NP:
69 /* Check whether we already hold the mutex. */
70 if (__builtin_expect (mutex->__data.__owner == id, 0))
71 return EDEADLK;
73 /* FALLTHROUGH */
75 case PTHREAD_MUTEX_TIMED_NP:
76 simple:
77 /* Normal mutex. */
78 result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
79 break;
81 case PTHREAD_MUTEX_ADAPTIVE_NP:
82 if (! __is_smp)
83 goto simple;
85 if (lll_mutex_trylock (mutex->__data.__lock) != 0)
87 int cnt = 0;
88 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
89 mutex->__data.__spins * 2 + 10);
92 if (cnt++ >= max_cnt)
94 result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
95 break;
98 #ifdef BUSY_WAIT_NOP
99 BUSY_WAIT_NOP;
100 #endif
102 while (lll_mutex_trylock (mutex->__data.__lock) != 0);
104 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
106 break;
108 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
109 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
110 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
111 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
112 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
113 &mutex->__data.__list.__next);
115 oldval = mutex->__data.__lock;
118 again:
119 if ((oldval & FUTEX_OWNER_DIED) != 0)
121 /* The previous owner died. Try locking the mutex. */
122 int newval = id | (oldval & FUTEX_WAITERS);
124 newval
125 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
126 newval, oldval);
127 if (newval != oldval)
129 oldval = newval;
130 goto again;
133 /* We got the mutex. */
134 mutex->__data.__count = 1;
135 /* But it is inconsistent unless marked otherwise. */
136 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
138 ENQUEUE_MUTEX (mutex);
139 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
141 /* Note that we deliberately exit here. If we fall
142 through to the end of the function __nusers would be
143 incremented which is not correct because the old
144 owner has to be discounted. */
145 return EOWNERDEAD;
148 /* Check whether we already hold the mutex. */
149 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
151 if (mutex->__data.__kind
152 == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
154 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
155 NULL);
156 return EDEADLK;
159 if (mutex->__data.__kind
160 == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
162 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
163 NULL);
165 /* Just bump the counter. */
166 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
167 /* Overflow of the counter. */
168 return EAGAIN;
170 ++mutex->__data.__count;
172 return 0;
176 result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
177 id);
179 if (__builtin_expect (mutex->__data.__owner
180 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
182 /* This mutex is now not recoverable. */
183 mutex->__data.__count = 0;
184 lll_mutex_unlock (mutex->__data.__lock);
185 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
186 return ENOTRECOVERABLE;
189 if (result == ETIMEDOUT || result == EINVAL)
190 goto out;
192 oldval = result;
194 while ((oldval & FUTEX_OWNER_DIED) != 0);
196 mutex->__data.__count = 1;
197 ENQUEUE_MUTEX (mutex);
198 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
199 break;
201 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
202 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
203 case PTHREAD_MUTEX_PI_NORMAL_NP:
204 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
205 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
206 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
207 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
208 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
210 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
211 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
213 if (robust)
214 /* Note: robust PI futexes are signaled by setting bit 0. */
215 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
216 (void *) (((uintptr_t) &mutex->__data.__list.__next)
217 | 1));
219 oldval = mutex->__data.__lock;
221 /* Check whether we already hold the mutex. */
222 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
224 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
226 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
227 return EDEADLK;
230 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
232 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
234 /* Just bump the counter. */
235 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
236 /* Overflow of the counter. */
237 return EAGAIN;
239 ++mutex->__data.__count;
241 return 0;
245 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
246 id, 0);
248 if (oldval != 0)
250 /* The mutex is locked. The kernel will now take care of
251 everything. The timeout value must be a relative value.
252 Convert it. */
253 INTERNAL_SYSCALL_DECL (__err);
255 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
256 FUTEX_LOCK_PI, 1, abstime);
257 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
259 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
260 return ETIMEDOUT;
262 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
263 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
265 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
266 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
267 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
268 /* ESRCH can happen only for non-robust PI mutexes where
269 the owner of the lock died. */
270 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
271 || !robust);
273 /* Delay the thread until the timeout is reached.
274 Then return ETIMEDOUT. */
275 struct timespec reltime;
276 struct timespec now;
278 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
279 &now);
280 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
281 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
282 if (reltime.tv_nsec < 0)
284 reltime.tv_nsec += 1000000000;
285 --reltime.tv_sec;
287 if (reltime.tv_sec >= 0)
288 while (nanosleep_not_cancel (&reltime, &reltime) != 0)
289 continue;
291 return ETIMEDOUT;
294 return INTERNAL_SYSCALL_ERRNO (e, __err);
297 oldval = mutex->__data.__lock;
299 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
302 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
304 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
306 /* We got the mutex. */
307 mutex->__data.__count = 1;
308 /* But it is inconsistent unless marked otherwise. */
309 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
311 ENQUEUE_MUTEX_PI (mutex);
312 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
314 /* Note that we deliberately exit here. If we fall
315 through to the end of the function __nusers would be
316 incremented which is not correct because the old owner
317 has to be discounted. */
318 return EOWNERDEAD;
321 if (robust
322 && __builtin_expect (mutex->__data.__owner
323 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
325 /* This mutex is now not recoverable. */
326 mutex->__data.__count = 0;
328 INTERNAL_SYSCALL_DECL (__err);
329 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
330 FUTEX_UNLOCK_PI, 0, 0);
332 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
333 return ENOTRECOVERABLE;
336 mutex->__data.__count = 1;
337 if (robust)
339 ENQUEUE_MUTEX_PI (mutex);
340 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
343 break;
345 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
346 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
347 case PTHREAD_MUTEX_PP_NORMAL_NP:
348 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
350 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
352 oldval = mutex->__data.__lock;
354 /* Check whether we already hold the mutex. */
355 if (mutex->__data.__owner == id)
357 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
358 return EDEADLK;
360 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
362 /* Just bump the counter. */
363 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
364 /* Overflow of the counter. */
365 return EAGAIN;
367 ++mutex->__data.__count;
369 return 0;
373 int oldprio = -1, ceilval;
376 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
377 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
379 if (__pthread_current_priority () > ceiling)
381 result = EINVAL;
382 failpp:
383 if (oldprio != -1)
384 __pthread_tpp_change_priority (oldprio, -1);
385 return result;
388 result = __pthread_tpp_change_priority (oldprio, ceiling);
389 if (result)
390 return result;
392 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
393 oldprio = ceiling;
395 oldval
396 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
397 ceilval | 1, ceilval);
399 if (oldval == ceilval)
400 break;
404 oldval
405 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
406 ceilval | 2,
407 ceilval | 1);
409 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
410 break;
412 if (oldval != ceilval)
414 /* Reject invalid timeouts. */
415 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
417 result = EINVAL;
418 goto failpp;
421 struct timeval tv;
422 struct timespec rt;
424 /* Get the current time. */
425 (void) __gettimeofday (&tv, NULL);
427 /* Compute relative timeout. */
428 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
429 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
430 if (rt.tv_nsec < 0)
432 rt.tv_nsec += 1000000000;
433 --rt.tv_sec;
436 /* Already timed out? */
437 if (rt.tv_sec < 0)
439 result = ETIMEDOUT;
440 goto failpp;
443 lll_futex_timed_wait (&mutex->__data.__lock,
444 ceilval | 2, &rt);
447 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
448 ceilval | 2, ceilval)
449 != ceilval);
451 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
453 assert (mutex->__data.__owner == 0);
454 mutex->__data.__count = 1;
456 break;
458 default:
459 /* Correct code cannot set any other type. */
460 return EINVAL;
463 if (result == 0)
465 /* Record the ownership. */
466 mutex->__data.__owner = id;
467 ++mutex->__data.__nusers;
470 out:
471 return result;