Update copyright dates with scripts/update-copyrights.
[glibc.git] / nptl / pthread_mutex_timedlock.c
blob109a46a84d0c8d901281425dcb7f32f29e5467cf
1 /* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <time.h>
22 #include <sys/param.h>
23 #include <sys/time.h>
24 #include "pthreadP.h"
25 #include <lowlevellock.h>
26 #include <not-cancel.h>
28 #include <stap-probe.h>
30 #ifndef lll_timedlock_elision
31 #define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c)
32 #endif
34 #ifndef lll_trylock_elision
35 #define lll_trylock_elision(a,t) lll_trylock(a)
36 #endif
38 #ifndef FORCE_ELISION
39 #define FORCE_ELISION(m, s)
40 #endif
42 int
43 pthread_mutex_timedlock (mutex, abstime)
44 pthread_mutex_t *mutex;
45 const struct timespec *abstime;
47 int oldval;
48 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
49 int result = 0;
51 LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
53 /* We must not check ABSTIME here. If the thread does not block
54 abstime must not be checked for a valid value. */
56 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
57 PTHREAD_MUTEX_TIMED_NP))
59 /* Recursive mutex. */
60 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
61 case PTHREAD_MUTEX_RECURSIVE_NP:
62 /* Check whether we already hold the mutex. */
63 if (mutex->__data.__owner == id)
65 /* Just bump the counter. */
66 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
67 /* Overflow of the counter. */
68 return EAGAIN;
70 ++mutex->__data.__count;
72 goto out;
75 /* We have to get the mutex. */
76 result = lll_timedlock (mutex->__data.__lock, abstime,
77 PTHREAD_MUTEX_PSHARED (mutex));
79 if (result != 0)
80 goto out;
82 /* Only locked once so far. */
83 mutex->__data.__count = 1;
84 break;
86 /* Error checking mutex. */
87 case PTHREAD_MUTEX_ERRORCHECK_NP:
88 /* Check whether we already hold the mutex. */
89 if (__glibc_unlikely (mutex->__data.__owner == id))
90 return EDEADLK;
92 /* FALLTHROUGH */
94 case PTHREAD_MUTEX_TIMED_NP:
95 FORCE_ELISION (mutex, goto elision);
96 simple:
97 /* Normal mutex. */
98 result = lll_timedlock (mutex->__data.__lock, abstime,
99 PTHREAD_MUTEX_PSHARED (mutex));
100 break;
102 case PTHREAD_MUTEX_TIMED_ELISION_NP:
103 elision: __attribute__((unused))
104 /* Don't record ownership */
105 return lll_timedlock_elision (mutex->__data.__lock,
106 mutex->__data.__spins,
107 abstime,
108 PTHREAD_MUTEX_PSHARED (mutex));
111 case PTHREAD_MUTEX_ADAPTIVE_NP:
112 if (! __is_smp)
113 goto simple;
115 if (lll_trylock (mutex->__data.__lock) != 0)
117 int cnt = 0;
118 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
119 mutex->__data.__spins * 2 + 10);
122 if (cnt++ >= max_cnt)
124 result = lll_timedlock (mutex->__data.__lock, abstime,
125 PTHREAD_MUTEX_PSHARED (mutex));
126 break;
129 #ifdef BUSY_WAIT_NOP
130 BUSY_WAIT_NOP;
131 #endif
133 while (lll_trylock (mutex->__data.__lock) != 0);
135 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
137 break;
139 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
140 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
141 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
142 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
143 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
144 &mutex->__data.__list.__next);
146 oldval = mutex->__data.__lock;
149 again:
150 if ((oldval & FUTEX_OWNER_DIED) != 0)
152 /* The previous owner died. Try locking the mutex. */
153 int newval = id | (oldval & FUTEX_WAITERS);
155 newval
156 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
157 newval, oldval);
158 if (newval != oldval)
160 oldval = newval;
161 goto again;
164 /* We got the mutex. */
165 mutex->__data.__count = 1;
166 /* But it is inconsistent unless marked otherwise. */
167 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
169 ENQUEUE_MUTEX (mutex);
170 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
172 /* Note that we deliberately exit here. If we fall
173 through to the end of the function __nusers would be
174 incremented which is not correct because the old
175 owner has to be discounted. */
176 return EOWNERDEAD;
179 /* Check whether we already hold the mutex. */
180 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
182 int kind = PTHREAD_MUTEX_TYPE (mutex);
183 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
185 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
186 NULL);
187 return EDEADLK;
190 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
192 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
193 NULL);
195 /* Just bump the counter. */
196 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
197 /* Overflow of the counter. */
198 return EAGAIN;
200 ++mutex->__data.__count;
202 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
204 return 0;
208 result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
209 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
211 if (__builtin_expect (mutex->__data.__owner
212 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
214 /* This mutex is now not recoverable. */
215 mutex->__data.__count = 0;
216 lll_unlock (mutex->__data.__lock,
217 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
218 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
219 return ENOTRECOVERABLE;
222 if (result == ETIMEDOUT || result == EINVAL)
223 goto out;
225 oldval = result;
227 while ((oldval & FUTEX_OWNER_DIED) != 0);
229 mutex->__data.__count = 1;
230 ENQUEUE_MUTEX (mutex);
231 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
232 break;
234 /* The PI support requires the Linux futex system call. If that's not
235 available, pthread_mutex_init should never have allowed the type to
236 be set. So it will get the default case for an invalid type. */
237 #ifdef __NR_futex
238 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
239 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
240 case PTHREAD_MUTEX_PI_NORMAL_NP:
241 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
242 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
243 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
244 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
245 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
247 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
248 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
250 if (robust)
251 /* Note: robust PI futexes are signaled by setting bit 0. */
252 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
253 (void *) (((uintptr_t) &mutex->__data.__list.__next)
254 | 1));
256 oldval = mutex->__data.__lock;
258 /* Check whether we already hold the mutex. */
259 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
261 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
263 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
264 return EDEADLK;
267 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
269 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
271 /* Just bump the counter. */
272 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
273 /* Overflow of the counter. */
274 return EAGAIN;
276 ++mutex->__data.__count;
278 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
280 return 0;
284 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
285 id, 0);
287 if (oldval != 0)
289 /* The mutex is locked. The kernel will now take care of
290 everything. The timeout value must be a relative value.
291 Convert it. */
292 int private = (robust
293 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
294 : PTHREAD_MUTEX_PSHARED (mutex));
295 INTERNAL_SYSCALL_DECL (__err);
297 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
298 __lll_private_flag (FUTEX_LOCK_PI,
299 private), 1,
300 abstime);
301 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
303 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
304 return ETIMEDOUT;
306 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
307 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
309 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
310 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
311 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
312 /* ESRCH can happen only for non-robust PI mutexes where
313 the owner of the lock died. */
314 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
315 || !robust);
317 /* Delay the thread until the timeout is reached.
318 Then return ETIMEDOUT. */
319 struct timespec reltime;
320 struct timespec now;
322 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
323 &now);
324 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
325 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
326 if (reltime.tv_nsec < 0)
328 reltime.tv_nsec += 1000000000;
329 --reltime.tv_sec;
331 if (reltime.tv_sec >= 0)
332 while (nanosleep_not_cancel (&reltime, &reltime) != 0)
333 continue;
335 return ETIMEDOUT;
338 return INTERNAL_SYSCALL_ERRNO (e, __err);
341 oldval = mutex->__data.__lock;
343 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
346 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
348 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
350 /* We got the mutex. */
351 mutex->__data.__count = 1;
352 /* But it is inconsistent unless marked otherwise. */
353 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
355 ENQUEUE_MUTEX_PI (mutex);
356 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
358 /* Note that we deliberately exit here. If we fall
359 through to the end of the function __nusers would be
360 incremented which is not correct because the old owner
361 has to be discounted. */
362 return EOWNERDEAD;
365 if (robust
366 && __builtin_expect (mutex->__data.__owner
367 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
369 /* This mutex is now not recoverable. */
370 mutex->__data.__count = 0;
372 INTERNAL_SYSCALL_DECL (__err);
373 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
374 __lll_private_flag (FUTEX_UNLOCK_PI,
375 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
376 0, 0);
378 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
379 return ENOTRECOVERABLE;
382 mutex->__data.__count = 1;
383 if (robust)
385 ENQUEUE_MUTEX_PI (mutex);
386 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
389 break;
390 #endif /* __NR_futex. */
392 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
393 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
394 case PTHREAD_MUTEX_PP_NORMAL_NP:
395 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
397 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
399 oldval = mutex->__data.__lock;
401 /* Check whether we already hold the mutex. */
402 if (mutex->__data.__owner == id)
404 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
405 return EDEADLK;
407 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
409 /* Just bump the counter. */
410 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
411 /* Overflow of the counter. */
412 return EAGAIN;
414 ++mutex->__data.__count;
416 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
418 return 0;
422 int oldprio = -1, ceilval;
425 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
426 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
428 if (__pthread_current_priority () > ceiling)
430 result = EINVAL;
431 failpp:
432 if (oldprio != -1)
433 __pthread_tpp_change_priority (oldprio, -1);
434 return result;
437 result = __pthread_tpp_change_priority (oldprio, ceiling);
438 if (result)
439 return result;
441 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
442 oldprio = ceiling;
444 oldval
445 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
446 ceilval | 1, ceilval);
448 if (oldval == ceilval)
449 break;
453 oldval
454 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
455 ceilval | 2,
456 ceilval | 1);
458 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
459 break;
461 if (oldval != ceilval)
463 /* Reject invalid timeouts. */
464 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
466 result = EINVAL;
467 goto failpp;
470 struct timeval tv;
471 struct timespec rt;
473 /* Get the current time. */
474 (void) __gettimeofday (&tv, NULL);
476 /* Compute relative timeout. */
477 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
478 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
479 if (rt.tv_nsec < 0)
481 rt.tv_nsec += 1000000000;
482 --rt.tv_sec;
485 /* Already timed out? */
486 if (rt.tv_sec < 0)
488 result = ETIMEDOUT;
489 goto failpp;
492 lll_futex_timed_wait (&mutex->__data.__lock,
493 ceilval | 2, &rt,
494 PTHREAD_MUTEX_PSHARED (mutex));
497 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
498 ceilval | 2, ceilval)
499 != ceilval);
501 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
503 assert (mutex->__data.__owner == 0);
504 mutex->__data.__count = 1;
506 break;
508 default:
509 /* Correct code cannot set any other type. */
510 return EINVAL;
513 if (result == 0)
515 /* Record the ownership. */
516 mutex->__data.__owner = id;
517 ++mutex->__data.__nusers;
519 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
522 out:
523 return result;