[BZ #4647]
[glibc.git] / nptl / pthread_mutex_timedlock.c
blob825a9849b873ff95c23f0a7e2903a557c9f48f75
1 /* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <time.h>
23 #include "pthreadP.h"
24 #include <lowlevellock.h>
25 #include <not-cancel.h>
28 int
29 pthread_mutex_timedlock (mutex, abstime)
30 pthread_mutex_t *mutex;
31 const struct timespec *abstime;
33 int oldval;
34 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
35 int result = 0;
37 /* We must not check ABSTIME here. If the thread does not block
38 abstime must not be checked for a valid value. */
40 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
41 PTHREAD_MUTEX_TIMED_NP))
43 /* Recursive mutex. */
44 case PTHREAD_MUTEX_RECURSIVE_NP:
45 /* Check whether we already hold the mutex. */
46 if (mutex->__data.__owner == id)
48 /* Just bump the counter. */
49 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
50 /* Overflow of the counter. */
51 return EAGAIN;
53 ++mutex->__data.__count;
55 goto out;
58 /* We have to get the mutex. */
59 result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
61 if (result != 0)
62 goto out;
64 /* Only locked once so far. */
65 mutex->__data.__count = 1;
66 break;
68 /* Error checking mutex. */
69 case PTHREAD_MUTEX_ERRORCHECK_NP:
70 /* Check whether we already hold the mutex. */
71 if (__builtin_expect (mutex->__data.__owner == id, 0))
72 return EDEADLK;
74 /* FALLTHROUGH */
76 case PTHREAD_MUTEX_TIMED_NP:
77 simple:
78 /* Normal mutex. */
79 result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
80 break;
82 case PTHREAD_MUTEX_ADAPTIVE_NP:
83 if (! __is_smp)
84 goto simple;
86 if (lll_mutex_trylock (mutex->__data.__lock) != 0)
88 int cnt = 0;
89 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
90 mutex->__data.__spins * 2 + 10);
93 if (cnt++ >= max_cnt)
95 result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
96 break;
99 #ifdef BUSY_WAIT_NOP
100 BUSY_WAIT_NOP;
101 #endif
103 while (lll_mutex_trylock (mutex->__data.__lock) != 0);
105 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
107 break;
109 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
110 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
111 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
112 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
113 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
114 &mutex->__data.__list.__next);
116 oldval = mutex->__data.__lock;
119 again:
120 if ((oldval & FUTEX_OWNER_DIED) != 0)
122 /* The previous owner died. Try locking the mutex. */
123 int newval = id | (oldval & FUTEX_WAITERS);
125 newval
126 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
127 newval, oldval);
128 if (newval != oldval)
130 oldval = newval;
131 goto again;
134 /* We got the mutex. */
135 mutex->__data.__count = 1;
136 /* But it is inconsistent unless marked otherwise. */
137 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
139 ENQUEUE_MUTEX (mutex);
140 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
142 /* Note that we deliberately exit here. If we fall
143 through to the end of the function __nusers would be
144 incremented which is not correct because the old
145 owner has to be discounted. */
146 return EOWNERDEAD;
149 /* Check whether we already hold the mutex. */
150 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
152 if (mutex->__data.__kind
153 == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
155 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
156 NULL);
157 return EDEADLK;
160 if (mutex->__data.__kind
161 == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
163 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
164 NULL);
166 /* Just bump the counter. */
167 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
168 /* Overflow of the counter. */
169 return EAGAIN;
171 ++mutex->__data.__count;
173 return 0;
177 result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
178 id);
180 if (__builtin_expect (mutex->__data.__owner
181 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
183 /* This mutex is now not recoverable. */
184 mutex->__data.__count = 0;
185 lll_mutex_unlock (mutex->__data.__lock);
186 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
187 return ENOTRECOVERABLE;
190 if (result == ETIMEDOUT || result == EINVAL)
191 goto out;
193 oldval = result;
195 while ((oldval & FUTEX_OWNER_DIED) != 0);
197 mutex->__data.__count = 1;
198 ENQUEUE_MUTEX (mutex);
199 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
200 break;
202 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
203 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
204 case PTHREAD_MUTEX_PI_NORMAL_NP:
205 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
206 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
207 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
208 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
209 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
211 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
212 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
214 if (robust)
215 /* Note: robust PI futexes are signaled by setting bit 0. */
216 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
217 (void *) (((uintptr_t) &mutex->__data.__list.__next)
218 | 1));
220 oldval = mutex->__data.__lock;
222 /* Check whether we already hold the mutex. */
223 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
225 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
227 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
228 return EDEADLK;
231 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
233 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
235 /* Just bump the counter. */
236 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
237 /* Overflow of the counter. */
238 return EAGAIN;
240 ++mutex->__data.__count;
242 return 0;
246 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
247 id, 0);
249 if (oldval != 0)
251 /* The mutex is locked. The kernel will now take care of
252 everything. The timeout value must be a relative value.
253 Convert it. */
254 INTERNAL_SYSCALL_DECL (__err);
256 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
257 FUTEX_LOCK_PI, 1, abstime);
258 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
260 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
261 return ETIMEDOUT;
263 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
264 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
266 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
267 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
268 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
269 /* ESRCH can happen only for non-robust PI mutexes where
270 the owner of the lock died. */
271 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
272 || !robust);
274 /* Delay the thread until the timeout is reached.
275 Then return ETIMEDOUT. */
276 struct timespec reltime;
277 struct timespec now;
279 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
280 &now);
281 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
282 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
283 if (reltime.tv_nsec < 0)
285 reltime.tv_nsec += 1000000000;
286 --reltime.tv_sec;
288 if (reltime.tv_sec >= 0)
289 while (nanosleep_not_cancel (&reltime, &reltime) != 0)
290 continue;
292 return ETIMEDOUT;
295 return INTERNAL_SYSCALL_ERRNO (e, __err);
298 oldval = mutex->__data.__lock;
300 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
303 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
305 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
307 /* We got the mutex. */
308 mutex->__data.__count = 1;
309 /* But it is inconsistent unless marked otherwise. */
310 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
312 ENQUEUE_MUTEX_PI (mutex);
313 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
315 /* Note that we deliberately exit here. If we fall
316 through to the end of the function __nusers would be
317 incremented which is not correct because the old owner
318 has to be discounted. */
319 return EOWNERDEAD;
322 if (robust
323 && __builtin_expect (mutex->__data.__owner
324 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
326 /* This mutex is now not recoverable. */
327 mutex->__data.__count = 0;
329 INTERNAL_SYSCALL_DECL (__err);
330 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
331 FUTEX_UNLOCK_PI, 0, 0);
333 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
334 return ENOTRECOVERABLE;
337 mutex->__data.__count = 1;
338 if (robust)
340 ENQUEUE_MUTEX_PI (mutex);
341 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
344 break;
346 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
347 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
348 case PTHREAD_MUTEX_PP_NORMAL_NP:
349 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
351 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
353 oldval = mutex->__data.__lock;
355 /* Check whether we already hold the mutex. */
356 if (mutex->__data.__owner == id)
358 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
359 return EDEADLK;
361 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
363 /* Just bump the counter. */
364 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
365 /* Overflow of the counter. */
366 return EAGAIN;
368 ++mutex->__data.__count;
370 return 0;
374 int oldprio = -1, ceilval;
377 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
378 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
380 if (__pthread_current_priority () > ceiling)
382 result = EINVAL;
383 failpp:
384 if (oldprio != -1)
385 __pthread_tpp_change_priority (oldprio, -1);
386 return result;
389 result = __pthread_tpp_change_priority (oldprio, ceiling);
390 if (result)
391 return result;
393 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
394 oldprio = ceiling;
396 oldval
397 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
398 ceilval | 1, ceilval);
400 if (oldval == ceilval)
401 break;
405 oldval
406 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
407 ceilval | 2,
408 ceilval | 1);
410 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
411 break;
413 if (oldval != ceilval)
415 /* Reject invalid timeouts. */
416 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
418 result = EINVAL;
419 goto failpp;
422 struct timeval tv;
423 struct timespec rt;
425 /* Get the current time. */
426 (void) __gettimeofday (&tv, NULL);
428 /* Compute relative timeout. */
429 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
430 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
431 if (rt.tv_nsec < 0)
433 rt.tv_nsec += 1000000000;
434 --rt.tv_sec;
437 /* Already timed out? */
438 if (rt.tv_sec < 0)
440 result = ETIMEDOUT;
441 goto failpp;
444 lll_futex_timed_wait (&mutex->__data.__lock,
445 ceilval | 2, &rt,
446 // XYZ check mutex flag
447 LLL_SHARED);
450 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
451 ceilval | 2, ceilval)
452 != ceilval);
454 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
456 assert (mutex->__data.__owner == 0);
457 mutex->__data.__count = 1;
459 break;
461 default:
462 /* Correct code cannot set any other type. */
463 return EINVAL;
466 if (result == 0)
468 /* Record the ownership. */
469 mutex->__data.__owner = id;
470 ++mutex->__data.__nusers;
473 out:
474 return result;