1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
22 #include <sys/param.h>
24 #include <lowlevellock.h>
25 #include <not-cancel.h>
27 #include <stap-probe.h>
29 #ifndef lll_timedlock_elision
30 #define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c)
33 #ifndef lll_trylock_elision
34 #define lll_trylock_elision(a,t) lll_trylock(a)
38 #define FORCE_ELISION(m, s)
42 pthread_mutex_timedlock (mutex
, abstime
)
43 pthread_mutex_t
*mutex
;
44 const struct timespec
*abstime
;
47 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
50 LIBC_PROBE (mutex_timedlock_entry
, 2, mutex
, abstime
);
52 /* We must not check ABSTIME here. If the thread does not block
53 abstime must not be checked for a valid value. */
55 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
56 PTHREAD_MUTEX_TIMED_NP
))
58 /* Recursive mutex. */
59 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
60 case PTHREAD_MUTEX_RECURSIVE_NP
:
61 /* Check whether we already hold the mutex. */
62 if (mutex
->__data
.__owner
== id
)
64 /* Just bump the counter. */
65 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
66 /* Overflow of the counter. */
69 ++mutex
->__data
.__count
;
74 /* We have to get the mutex. */
75 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
76 PTHREAD_MUTEX_PSHARED (mutex
));
81 /* Only locked once so far. */
82 mutex
->__data
.__count
= 1;
85 /* Error checking mutex. */
86 case PTHREAD_MUTEX_ERRORCHECK_NP
:
87 /* Check whether we already hold the mutex. */
88 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
93 case PTHREAD_MUTEX_TIMED_NP
:
94 FORCE_ELISION (mutex
, goto elision
);
97 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
98 PTHREAD_MUTEX_PSHARED (mutex
));
101 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
102 elision
: __attribute__((unused
))
103 /* Don't record ownership */
104 return lll_timedlock_elision (mutex
->__data
.__lock
,
105 mutex
->__data
.__spins
,
107 PTHREAD_MUTEX_PSHARED (mutex
));
110 case PTHREAD_MUTEX_ADAPTIVE_NP
:
114 if (lll_trylock (mutex
->__data
.__lock
) != 0)
117 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
118 mutex
->__data
.__spins
* 2 + 10);
121 if (cnt
++ >= max_cnt
)
123 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
124 PTHREAD_MUTEX_PSHARED (mutex
));
132 while (lll_trylock (mutex
->__data
.__lock
) != 0);
134 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
138 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
139 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
140 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
141 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
142 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
143 &mutex
->__data
.__list
.__next
);
145 oldval
= mutex
->__data
.__lock
;
149 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
151 /* The previous owner died. Try locking the mutex. */
152 int newval
= id
| (oldval
& FUTEX_WAITERS
);
155 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
157 if (newval
!= oldval
)
163 /* We got the mutex. */
164 mutex
->__data
.__count
= 1;
165 /* But it is inconsistent unless marked otherwise. */
166 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
168 ENQUEUE_MUTEX (mutex
);
169 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
171 /* Note that we deliberately exit here. If we fall
172 through to the end of the function __nusers would be
173 incremented which is not correct because the old
174 owner has to be discounted. */
178 /* Check whether we already hold the mutex. */
179 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
181 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
182 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
184 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
189 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
191 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
194 /* Just bump the counter. */
195 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
196 /* Overflow of the counter. */
199 ++mutex
->__data
.__count
;
201 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
207 result
= lll_robust_timedlock (mutex
->__data
.__lock
, abstime
, id
,
208 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
210 if (__builtin_expect (mutex
->__data
.__owner
211 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
213 /* This mutex is now not recoverable. */
214 mutex
->__data
.__count
= 0;
215 lll_unlock (mutex
->__data
.__lock
,
216 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
217 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
218 return ENOTRECOVERABLE
;
221 if (result
== ETIMEDOUT
|| result
== EINVAL
)
226 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
228 mutex
->__data
.__count
= 1;
229 ENQUEUE_MUTEX (mutex
);
230 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
233 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
234 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
235 case PTHREAD_MUTEX_PI_NORMAL_NP
:
236 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
237 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
238 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
239 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
240 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
242 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
243 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
246 /* Note: robust PI futexes are signaled by setting bit 0. */
247 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
248 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
251 oldval
= mutex
->__data
.__lock
;
253 /* Check whether we already hold the mutex. */
254 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
256 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
258 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
262 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
264 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
266 /* Just bump the counter. */
267 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
268 /* Overflow of the counter. */
271 ++mutex
->__data
.__count
;
273 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
279 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
284 /* The mutex is locked. The kernel will now take care of
285 everything. The timeout value must be a relative value.
287 int private = (robust
288 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
289 : PTHREAD_MUTEX_PSHARED (mutex
));
290 INTERNAL_SYSCALL_DECL (__err
);
292 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
293 __lll_private_flag (FUTEX_LOCK_PI
,
296 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
))
298 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ETIMEDOUT
)
301 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
302 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
)
304 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
305 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
306 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
307 /* ESRCH can happen only for non-robust PI mutexes where
308 the owner of the lock died. */
309 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
312 /* Delay the thread until the timeout is reached.
313 Then return ETIMEDOUT. */
314 struct timespec reltime
;
317 INTERNAL_SYSCALL (clock_gettime
, __err
, 2, CLOCK_REALTIME
,
319 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
320 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
321 if (reltime
.tv_nsec
< 0)
323 reltime
.tv_nsec
+= 1000000000;
326 if (reltime
.tv_sec
>= 0)
327 while (nanosleep_not_cancel (&reltime
, &reltime
) != 0)
333 return INTERNAL_SYSCALL_ERRNO (e
, __err
);
336 oldval
= mutex
->__data
.__lock
;
338 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
341 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
343 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
345 /* We got the mutex. */
346 mutex
->__data
.__count
= 1;
347 /* But it is inconsistent unless marked otherwise. */
348 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
350 ENQUEUE_MUTEX_PI (mutex
);
351 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
353 /* Note that we deliberately exit here. If we fall
354 through to the end of the function __nusers would be
355 incremented which is not correct because the old owner
356 has to be discounted. */
361 && __builtin_expect (mutex
->__data
.__owner
362 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
364 /* This mutex is now not recoverable. */
365 mutex
->__data
.__count
= 0;
367 INTERNAL_SYSCALL_DECL (__err
);
368 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
369 __lll_private_flag (FUTEX_UNLOCK_PI
,
370 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
373 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
374 return ENOTRECOVERABLE
;
377 mutex
->__data
.__count
= 1;
380 ENQUEUE_MUTEX_PI (mutex
);
381 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
386 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
387 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
388 case PTHREAD_MUTEX_PP_NORMAL_NP
:
389 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
391 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
393 oldval
= mutex
->__data
.__lock
;
395 /* Check whether we already hold the mutex. */
396 if (mutex
->__data
.__owner
== id
)
398 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
401 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
403 /* Just bump the counter. */
404 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
405 /* Overflow of the counter. */
408 ++mutex
->__data
.__count
;
410 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
416 int oldprio
= -1, ceilval
;
419 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
420 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
422 if (__pthread_current_priority () > ceiling
)
427 __pthread_tpp_change_priority (oldprio
, -1);
431 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
435 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
439 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
440 ceilval
| 1, ceilval
);
442 if (oldval
== ceilval
)
448 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
452 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
455 if (oldval
!= ceilval
)
457 /* Reject invalid timeouts. */
458 if (abstime
->tv_nsec
< 0 || abstime
->tv_nsec
>= 1000000000)
467 /* Get the current time. */
468 (void) __gettimeofday (&tv
, NULL
);
470 /* Compute relative timeout. */
471 rt
.tv_sec
= abstime
->tv_sec
- tv
.tv_sec
;
472 rt
.tv_nsec
= abstime
->tv_nsec
- tv
.tv_usec
* 1000;
475 rt
.tv_nsec
+= 1000000000;
479 /* Already timed out? */
486 lll_futex_timed_wait (&mutex
->__data
.__lock
,
488 PTHREAD_MUTEX_PSHARED (mutex
));
491 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
492 ceilval
| 2, ceilval
)
495 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
497 assert (mutex
->__data
.__owner
== 0);
498 mutex
->__data
.__count
= 1;
503 /* Correct code cannot set any other type. */
509 /* Record the ownership. */
510 mutex
->__data
.__owner
= id
;
511 ++mutex
->__data
.__nusers
;
513 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);