(rfc3484_sort): Make sure that even if qsort doesn't support
[glibc.git] / nptl / pthread_mutex_timedlock.c
blob4bf0efea34a6664090fd6aeda7f3aae7c7272081
1 /* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <time.h>
23 #include "pthreadP.h"
24 #include <lowlevellock.h>
25 #include <not-cancel.h>
28 int
29 pthread_mutex_timedlock (mutex, abstime)
30 pthread_mutex_t *mutex;
31 const struct timespec *abstime;
33 int oldval;
34 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
35 int result = 0;
37 /* We must not check ABSTIME here. If the thread does not block
38 abstime must not be checked for a valid value. */
40 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
41 PTHREAD_MUTEX_TIMED_NP))
43 /* Recursive mutex. */
44 case PTHREAD_MUTEX_RECURSIVE_NP:
45 /* Check whether we already hold the mutex. */
46 if (mutex->__data.__owner == id)
48 /* Just bump the counter. */
49 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
50 /* Overflow of the counter. */
51 return EAGAIN;
53 ++mutex->__data.__count;
55 goto out;
58 /* We have to get the mutex. */
59 result = lll_timedlock (mutex->__data.__lock, abstime,
60 PTHREAD_MUTEX_PSHARED (mutex));
62 if (result != 0)
63 goto out;
65 /* Only locked once so far. */
66 mutex->__data.__count = 1;
67 break;
69 /* Error checking mutex. */
70 case PTHREAD_MUTEX_ERRORCHECK_NP:
71 /* Check whether we already hold the mutex. */
72 if (__builtin_expect (mutex->__data.__owner == id, 0))
73 return EDEADLK;
75 /* FALLTHROUGH */
77 case PTHREAD_MUTEX_TIMED_NP:
78 simple:
79 /* Normal mutex. */
80 result = lll_timedlock (mutex->__data.__lock, abstime,
81 PTHREAD_MUTEX_PSHARED (mutex));
82 break;
84 case PTHREAD_MUTEX_ADAPTIVE_NP:
85 if (! __is_smp)
86 goto simple;
88 if (lll_trylock (mutex->__data.__lock) != 0)
90 int cnt = 0;
91 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
92 mutex->__data.__spins * 2 + 10);
95 if (cnt++ >= max_cnt)
97 result = lll_timedlock (mutex->__data.__lock, abstime,
98 PTHREAD_MUTEX_PSHARED (mutex));
99 break;
102 #ifdef BUSY_WAIT_NOP
103 BUSY_WAIT_NOP;
104 #endif
106 while (lll_trylock (mutex->__data.__lock) != 0);
108 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
110 break;
112 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
113 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
114 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
115 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
116 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
117 &mutex->__data.__list.__next);
119 oldval = mutex->__data.__lock;
122 again:
123 if ((oldval & FUTEX_OWNER_DIED) != 0)
125 /* The previous owner died. Try locking the mutex. */
126 int newval = id | (oldval & FUTEX_WAITERS);
128 newval
129 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
130 newval, oldval);
131 if (newval != oldval)
133 oldval = newval;
134 goto again;
137 /* We got the mutex. */
138 mutex->__data.__count = 1;
139 /* But it is inconsistent unless marked otherwise. */
140 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
142 ENQUEUE_MUTEX (mutex);
143 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
145 /* Note that we deliberately exit here. If we fall
146 through to the end of the function __nusers would be
147 incremented which is not correct because the old
148 owner has to be discounted. */
149 return EOWNERDEAD;
152 /* Check whether we already hold the mutex. */
153 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
155 int kind = PTHREAD_MUTEX_TYPE (mutex);
156 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
158 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
159 NULL);
160 return EDEADLK;
163 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
165 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
166 NULL);
168 /* Just bump the counter. */
169 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
170 /* Overflow of the counter. */
171 return EAGAIN;
173 ++mutex->__data.__count;
175 return 0;
179 result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
180 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
182 if (__builtin_expect (mutex->__data.__owner
183 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
185 /* This mutex is now not recoverable. */
186 mutex->__data.__count = 0;
187 lll_unlock (mutex->__data.__lock,
188 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
189 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
190 return ENOTRECOVERABLE;
193 if (result == ETIMEDOUT || result == EINVAL)
194 goto out;
196 oldval = result;
198 while ((oldval & FUTEX_OWNER_DIED) != 0);
200 mutex->__data.__count = 1;
201 ENQUEUE_MUTEX (mutex);
202 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
203 break;
205 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
206 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
207 case PTHREAD_MUTEX_PI_NORMAL_NP:
208 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
209 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
210 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
211 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
212 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
214 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
215 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
217 if (robust)
218 /* Note: robust PI futexes are signaled by setting bit 0. */
219 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
220 (void *) (((uintptr_t) &mutex->__data.__list.__next)
221 | 1));
223 oldval = mutex->__data.__lock;
225 /* Check whether we already hold the mutex. */
226 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
228 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
230 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
231 return EDEADLK;
234 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
236 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
238 /* Just bump the counter. */
239 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
240 /* Overflow of the counter. */
241 return EAGAIN;
243 ++mutex->__data.__count;
245 return 0;
249 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
250 id, 0);
252 if (oldval != 0)
254 /* The mutex is locked. The kernel will now take care of
255 everything. The timeout value must be a relative value.
256 Convert it. */
257 INTERNAL_SYSCALL_DECL (__err);
259 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
260 FUTEX_LOCK_PI, 1, abstime);
261 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
263 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
264 return ETIMEDOUT;
266 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
267 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
269 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
270 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
271 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
272 /* ESRCH can happen only for non-robust PI mutexes where
273 the owner of the lock died. */
274 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
275 || !robust);
277 /* Delay the thread until the timeout is reached.
278 Then return ETIMEDOUT. */
279 struct timespec reltime;
280 struct timespec now;
282 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
283 &now);
284 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
285 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
286 if (reltime.tv_nsec < 0)
288 reltime.tv_nsec += 1000000000;
289 --reltime.tv_sec;
291 if (reltime.tv_sec >= 0)
292 while (nanosleep_not_cancel (&reltime, &reltime) != 0)
293 continue;
295 return ETIMEDOUT;
298 return INTERNAL_SYSCALL_ERRNO (e, __err);
301 oldval = mutex->__data.__lock;
303 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
306 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
308 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
310 /* We got the mutex. */
311 mutex->__data.__count = 1;
312 /* But it is inconsistent unless marked otherwise. */
313 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
315 ENQUEUE_MUTEX_PI (mutex);
316 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
318 /* Note that we deliberately exit here. If we fall
319 through to the end of the function __nusers would be
320 incremented which is not correct because the old owner
321 has to be discounted. */
322 return EOWNERDEAD;
325 if (robust
326 && __builtin_expect (mutex->__data.__owner
327 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
329 /* This mutex is now not recoverable. */
330 mutex->__data.__count = 0;
332 INTERNAL_SYSCALL_DECL (__err);
333 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
334 FUTEX_UNLOCK_PI, 0, 0);
336 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
337 return ENOTRECOVERABLE;
340 mutex->__data.__count = 1;
341 if (robust)
343 ENQUEUE_MUTEX_PI (mutex);
344 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
347 break;
349 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
350 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
351 case PTHREAD_MUTEX_PP_NORMAL_NP:
352 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
354 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
356 oldval = mutex->__data.__lock;
358 /* Check whether we already hold the mutex. */
359 if (mutex->__data.__owner == id)
361 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
362 return EDEADLK;
364 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
366 /* Just bump the counter. */
367 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
368 /* Overflow of the counter. */
369 return EAGAIN;
371 ++mutex->__data.__count;
373 return 0;
377 int oldprio = -1, ceilval;
380 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
381 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
383 if (__pthread_current_priority () > ceiling)
385 result = EINVAL;
386 failpp:
387 if (oldprio != -1)
388 __pthread_tpp_change_priority (oldprio, -1);
389 return result;
392 result = __pthread_tpp_change_priority (oldprio, ceiling);
393 if (result)
394 return result;
396 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
397 oldprio = ceiling;
399 oldval
400 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
401 ceilval | 1, ceilval);
403 if (oldval == ceilval)
404 break;
408 oldval
409 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
410 ceilval | 2,
411 ceilval | 1);
413 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
414 break;
416 if (oldval != ceilval)
418 /* Reject invalid timeouts. */
419 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
421 result = EINVAL;
422 goto failpp;
425 struct timeval tv;
426 struct timespec rt;
428 /* Get the current time. */
429 (void) __gettimeofday (&tv, NULL);
431 /* Compute relative timeout. */
432 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
433 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
434 if (rt.tv_nsec < 0)
436 rt.tv_nsec += 1000000000;
437 --rt.tv_sec;
440 /* Already timed out? */
441 if (rt.tv_sec < 0)
443 result = ETIMEDOUT;
444 goto failpp;
447 lll_futex_timed_wait (&mutex->__data.__lock,
448 ceilval | 2, &rt,
449 PTHREAD_MUTEX_PSHARED (mutex));
452 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
453 ceilval | 2, ceilval)
454 != ceilval);
456 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
458 assert (mutex->__data.__owner == 0);
459 mutex->__data.__count = 1;
461 break;
463 default:
464 /* Correct code cannot set any other type. */
465 return EINVAL;
468 if (result == 0)
470 /* Record the ownership. */
471 mutex->__data.__owner = id;
472 ++mutex->__data.__nusers;
475 out:
476 return result;