Avoid attempt for runtime checks if all environments are defined
[glibc.git] / nptl / pthread_mutex_timedlock.c
blob07f0901e5266bb23a139fade66ff137ce635c46a
1 /* Copyright (C) 2002-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <time.h>
22 #include <sys/param.h>
23 #include <sys/time.h>
24 #include "pthreadP.h"
25 #include <atomic.h>
26 #include <lowlevellock.h>
27 #include <not-cancel.h>
29 #include <stap-probe.h>
31 #ifndef lll_timedlock_elision
32 #define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c)
33 #endif
35 #ifndef lll_trylock_elision
36 #define lll_trylock_elision(a,t) lll_trylock(a)
37 #endif
39 #ifndef FORCE_ELISION
40 #define FORCE_ELISION(m, s)
41 #endif
43 int
44 pthread_mutex_timedlock (pthread_mutex_t *mutex,
45 const struct timespec *abstime)
47 int oldval;
48 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
49 int result = 0;
51 LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
53 /* We must not check ABSTIME here. If the thread does not block
54 abstime must not be checked for a valid value. */
56 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
57 PTHREAD_MUTEX_TIMED_NP))
59 /* Recursive mutex. */
60 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
61 case PTHREAD_MUTEX_RECURSIVE_NP:
62 /* Check whether we already hold the mutex. */
63 if (mutex->__data.__owner == id)
65 /* Just bump the counter. */
66 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
67 /* Overflow of the counter. */
68 return EAGAIN;
70 ++mutex->__data.__count;
72 goto out;
75 /* We have to get the mutex. */
76 result = lll_timedlock (mutex->__data.__lock, abstime,
77 PTHREAD_MUTEX_PSHARED (mutex));
79 if (result != 0)
80 goto out;
82 /* Only locked once so far. */
83 mutex->__data.__count = 1;
84 break;
86 /* Error checking mutex. */
87 case PTHREAD_MUTEX_ERRORCHECK_NP:
88 /* Check whether we already hold the mutex. */
89 if (__glibc_unlikely (mutex->__data.__owner == id))
90 return EDEADLK;
92 /* Don't do lock elision on an error checking mutex. */
93 goto simple;
95 case PTHREAD_MUTEX_TIMED_NP:
96 FORCE_ELISION (mutex, goto elision);
97 simple:
98 /* Normal mutex. */
99 result = lll_timedlock (mutex->__data.__lock, abstime,
100 PTHREAD_MUTEX_PSHARED (mutex));
101 break;
103 case PTHREAD_MUTEX_TIMED_ELISION_NP:
104 elision: __attribute__((unused))
105 /* Don't record ownership */
106 return lll_timedlock_elision (mutex->__data.__lock,
107 mutex->__data.__spins,
108 abstime,
109 PTHREAD_MUTEX_PSHARED (mutex));
112 case PTHREAD_MUTEX_ADAPTIVE_NP:
113 if (! __is_smp)
114 goto simple;
116 if (lll_trylock (mutex->__data.__lock) != 0)
118 int cnt = 0;
119 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
120 mutex->__data.__spins * 2 + 10);
123 if (cnt++ >= max_cnt)
125 result = lll_timedlock (mutex->__data.__lock, abstime,
126 PTHREAD_MUTEX_PSHARED (mutex));
127 break;
129 atomic_spin_nop ();
131 while (lll_trylock (mutex->__data.__lock) != 0);
133 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
135 break;
137 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
138 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
139 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
140 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
141 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
142 &mutex->__data.__list.__next);
144 oldval = mutex->__data.__lock;
147 again:
148 if ((oldval & FUTEX_OWNER_DIED) != 0)
150 /* The previous owner died. Try locking the mutex. */
151 int newval = id | (oldval & FUTEX_WAITERS);
153 newval
154 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
155 newval, oldval);
156 if (newval != oldval)
158 oldval = newval;
159 goto again;
162 /* We got the mutex. */
163 mutex->__data.__count = 1;
164 /* But it is inconsistent unless marked otherwise. */
165 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
167 ENQUEUE_MUTEX (mutex);
168 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
170 /* Note that we deliberately exit here. If we fall
171 through to the end of the function __nusers would be
172 incremented which is not correct because the old
173 owner has to be discounted. */
174 return EOWNERDEAD;
177 /* Check whether we already hold the mutex. */
178 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
180 int kind = PTHREAD_MUTEX_TYPE (mutex);
181 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
183 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
184 NULL);
185 return EDEADLK;
188 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
190 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
191 NULL);
193 /* Just bump the counter. */
194 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
195 /* Overflow of the counter. */
196 return EAGAIN;
198 ++mutex->__data.__count;
200 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
202 return 0;
206 result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
207 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
209 if (__builtin_expect (mutex->__data.__owner
210 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
212 /* This mutex is now not recoverable. */
213 mutex->__data.__count = 0;
214 lll_unlock (mutex->__data.__lock,
215 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
216 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
217 return ENOTRECOVERABLE;
220 if (result == ETIMEDOUT || result == EINVAL)
221 goto out;
223 oldval = result;
225 while ((oldval & FUTEX_OWNER_DIED) != 0);
227 mutex->__data.__count = 1;
228 ENQUEUE_MUTEX (mutex);
229 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
230 break;
232 /* The PI support requires the Linux futex system call. If that's not
233 available, pthread_mutex_init should never have allowed the type to
234 be set. So it will get the default case for an invalid type. */
235 #ifdef __NR_futex
236 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
237 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
238 case PTHREAD_MUTEX_PI_NORMAL_NP:
239 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
240 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
241 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
242 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
243 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
245 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
246 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
248 if (robust)
249 /* Note: robust PI futexes are signaled by setting bit 0. */
250 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
251 (void *) (((uintptr_t) &mutex->__data.__list.__next)
252 | 1));
254 oldval = mutex->__data.__lock;
256 /* Check whether we already hold the mutex. */
257 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
259 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
261 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
262 return EDEADLK;
265 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
267 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
269 /* Just bump the counter. */
270 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
271 /* Overflow of the counter. */
272 return EAGAIN;
274 ++mutex->__data.__count;
276 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
278 return 0;
282 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
283 id, 0);
285 if (oldval != 0)
287 /* The mutex is locked. The kernel will now take care of
288 everything. The timeout value must be a relative value.
289 Convert it. */
290 int private = (robust
291 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
292 : PTHREAD_MUTEX_PSHARED (mutex));
293 INTERNAL_SYSCALL_DECL (__err);
295 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
296 __lll_private_flag (FUTEX_LOCK_PI,
297 private), 1,
298 abstime);
299 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
301 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
302 return ETIMEDOUT;
304 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
305 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
307 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
308 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
309 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
310 /* ESRCH can happen only for non-robust PI mutexes where
311 the owner of the lock died. */
312 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
313 || !robust);
315 /* Delay the thread until the timeout is reached.
316 Then return ETIMEDOUT. */
317 struct timespec reltime;
318 struct timespec now;
320 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
321 &now);
322 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
323 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
324 if (reltime.tv_nsec < 0)
326 reltime.tv_nsec += 1000000000;
327 --reltime.tv_sec;
329 if (reltime.tv_sec >= 0)
330 while (nanosleep_not_cancel (&reltime, &reltime) != 0)
331 continue;
333 return ETIMEDOUT;
336 return INTERNAL_SYSCALL_ERRNO (e, __err);
339 oldval = mutex->__data.__lock;
341 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
344 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
346 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
348 /* We got the mutex. */
349 mutex->__data.__count = 1;
350 /* But it is inconsistent unless marked otherwise. */
351 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
353 ENQUEUE_MUTEX_PI (mutex);
354 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
356 /* Note that we deliberately exit here. If we fall
357 through to the end of the function __nusers would be
358 incremented which is not correct because the old owner
359 has to be discounted. */
360 return EOWNERDEAD;
363 if (robust
364 && __builtin_expect (mutex->__data.__owner
365 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
367 /* This mutex is now not recoverable. */
368 mutex->__data.__count = 0;
370 INTERNAL_SYSCALL_DECL (__err);
371 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
372 __lll_private_flag (FUTEX_UNLOCK_PI,
373 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
374 0, 0);
376 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
377 return ENOTRECOVERABLE;
380 mutex->__data.__count = 1;
381 if (robust)
383 ENQUEUE_MUTEX_PI (mutex);
384 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
387 break;
388 #endif /* __NR_futex. */
390 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
391 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
392 case PTHREAD_MUTEX_PP_NORMAL_NP:
393 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
395 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
397 oldval = mutex->__data.__lock;
399 /* Check whether we already hold the mutex. */
400 if (mutex->__data.__owner == id)
402 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
403 return EDEADLK;
405 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
407 /* Just bump the counter. */
408 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
409 /* Overflow of the counter. */
410 return EAGAIN;
412 ++mutex->__data.__count;
414 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
416 return 0;
420 int oldprio = -1, ceilval;
423 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
424 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
426 if (__pthread_current_priority () > ceiling)
428 result = EINVAL;
429 failpp:
430 if (oldprio != -1)
431 __pthread_tpp_change_priority (oldprio, -1);
432 return result;
435 result = __pthread_tpp_change_priority (oldprio, ceiling);
436 if (result)
437 return result;
439 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
440 oldprio = ceiling;
442 oldval
443 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
444 ceilval | 1, ceilval);
446 if (oldval == ceilval)
447 break;
451 oldval
452 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
453 ceilval | 2,
454 ceilval | 1);
456 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
457 break;
459 if (oldval != ceilval)
461 /* Reject invalid timeouts. */
462 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
464 result = EINVAL;
465 goto failpp;
468 struct timeval tv;
469 struct timespec rt;
471 /* Get the current time. */
472 (void) __gettimeofday (&tv, NULL);
474 /* Compute relative timeout. */
475 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
476 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
477 if (rt.tv_nsec < 0)
479 rt.tv_nsec += 1000000000;
480 --rt.tv_sec;
483 /* Already timed out? */
484 if (rt.tv_sec < 0)
486 result = ETIMEDOUT;
487 goto failpp;
490 lll_futex_timed_wait (&mutex->__data.__lock,
491 ceilval | 2, &rt,
492 PTHREAD_MUTEX_PSHARED (mutex));
495 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
496 ceilval | 2, ceilval)
497 != ceilval);
499 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
501 assert (mutex->__data.__owner == 0);
502 mutex->__data.__count = 1;
504 break;
506 default:
507 /* Correct code cannot set any other type. */
508 return EINVAL;
511 if (result == 0)
513 /* Record the ownership. */
514 mutex->__data.__owner = id;
515 ++mutex->__data.__nusers;
517 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
520 out:
521 return result;