1 /* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
22 #include <sys/param.h>
25 #include <lowlevellock.h>
26 #include <not-cancel.h>
28 #include <stap-probe.h>
30 #ifndef lll_timedlock_elision
31 #define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c)
34 #ifndef lll_trylock_elision
35 #define lll_trylock_elision(a,t) lll_trylock(a)
39 #define FORCE_ELISION(m, s)
43 pthread_mutex_timedlock (mutex
, abstime
)
44 pthread_mutex_t
*mutex
;
45 const struct timespec
*abstime
;
48 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
51 LIBC_PROBE (mutex_timedlock_entry
, 2, mutex
, abstime
);
53 /* We must not check ABSTIME here. If the thread does not block
54 abstime must not be checked for a valid value. */
56 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
57 PTHREAD_MUTEX_TIMED_NP
))
59 /* Recursive mutex. */
60 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
61 case PTHREAD_MUTEX_RECURSIVE_NP
:
62 /* Check whether we already hold the mutex. */
63 if (mutex
->__data
.__owner
== id
)
65 /* Just bump the counter. */
66 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
67 /* Overflow of the counter. */
70 ++mutex
->__data
.__count
;
75 /* We have to get the mutex. */
76 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
77 PTHREAD_MUTEX_PSHARED (mutex
));
82 /* Only locked once so far. */
83 mutex
->__data
.__count
= 1;
86 /* Error checking mutex. */
87 case PTHREAD_MUTEX_ERRORCHECK_NP
:
88 /* Check whether we already hold the mutex. */
89 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
94 case PTHREAD_MUTEX_TIMED_NP
:
95 FORCE_ELISION (mutex
, goto elision
);
98 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
99 PTHREAD_MUTEX_PSHARED (mutex
));
102 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
103 elision
: __attribute__((unused
))
104 /* Don't record ownership */
105 return lll_timedlock_elision (mutex
->__data
.__lock
,
106 mutex
->__data
.__spins
,
108 PTHREAD_MUTEX_PSHARED (mutex
));
111 case PTHREAD_MUTEX_ADAPTIVE_NP
:
115 if (lll_trylock (mutex
->__data
.__lock
) != 0)
118 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
119 mutex
->__data
.__spins
* 2 + 10);
122 if (cnt
++ >= max_cnt
)
124 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
125 PTHREAD_MUTEX_PSHARED (mutex
));
133 while (lll_trylock (mutex
->__data
.__lock
) != 0);
135 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
139 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
140 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
141 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
142 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
143 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
144 &mutex
->__data
.__list
.__next
);
146 oldval
= mutex
->__data
.__lock
;
150 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
152 /* The previous owner died. Try locking the mutex. */
153 int newval
= id
| (oldval
& FUTEX_WAITERS
);
156 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
158 if (newval
!= oldval
)
164 /* We got the mutex. */
165 mutex
->__data
.__count
= 1;
166 /* But it is inconsistent unless marked otherwise. */
167 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
169 ENQUEUE_MUTEX (mutex
);
170 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
172 /* Note that we deliberately exit here. If we fall
173 through to the end of the function __nusers would be
174 incremented which is not correct because the old
175 owner has to be discounted. */
179 /* Check whether we already hold the mutex. */
180 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
182 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
183 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
185 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
190 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
192 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
195 /* Just bump the counter. */
196 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
197 /* Overflow of the counter. */
200 ++mutex
->__data
.__count
;
202 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
208 result
= lll_robust_timedlock (mutex
->__data
.__lock
, abstime
, id
,
209 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
211 if (__builtin_expect (mutex
->__data
.__owner
212 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
214 /* This mutex is now not recoverable. */
215 mutex
->__data
.__count
= 0;
216 lll_unlock (mutex
->__data
.__lock
,
217 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
218 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
219 return ENOTRECOVERABLE
;
222 if (result
== ETIMEDOUT
|| result
== EINVAL
)
227 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
229 mutex
->__data
.__count
= 1;
230 ENQUEUE_MUTEX (mutex
);
231 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
234 /* The PI support requires the Linux futex system call. If that's not
235 available, pthread_mutex_init should never have allowed the type to
236 be set. So it will get the default case for an invalid type. */
238 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
239 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
240 case PTHREAD_MUTEX_PI_NORMAL_NP
:
241 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
242 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
243 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
244 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
245 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
247 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
248 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
251 /* Note: robust PI futexes are signaled by setting bit 0. */
252 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
253 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
256 oldval
= mutex
->__data
.__lock
;
258 /* Check whether we already hold the mutex. */
259 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
261 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
263 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
267 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
269 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
271 /* Just bump the counter. */
272 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
273 /* Overflow of the counter. */
276 ++mutex
->__data
.__count
;
278 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
284 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
289 /* The mutex is locked. The kernel will now take care of
290 everything. The timeout value must be a relative value.
292 int private = (robust
293 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
294 : PTHREAD_MUTEX_PSHARED (mutex
));
295 INTERNAL_SYSCALL_DECL (__err
);
297 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
298 __lll_private_flag (FUTEX_LOCK_PI
,
301 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
))
303 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ETIMEDOUT
)
306 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
307 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
)
309 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
310 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
311 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
312 /* ESRCH can happen only for non-robust PI mutexes where
313 the owner of the lock died. */
314 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
317 /* Delay the thread until the timeout is reached.
318 Then return ETIMEDOUT. */
319 struct timespec reltime
;
322 INTERNAL_SYSCALL (clock_gettime
, __err
, 2, CLOCK_REALTIME
,
324 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
325 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
326 if (reltime
.tv_nsec
< 0)
328 reltime
.tv_nsec
+= 1000000000;
331 if (reltime
.tv_sec
>= 0)
332 while (nanosleep_not_cancel (&reltime
, &reltime
) != 0)
338 return INTERNAL_SYSCALL_ERRNO (e
, __err
);
341 oldval
= mutex
->__data
.__lock
;
343 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
346 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
348 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
350 /* We got the mutex. */
351 mutex
->__data
.__count
= 1;
352 /* But it is inconsistent unless marked otherwise. */
353 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
355 ENQUEUE_MUTEX_PI (mutex
);
356 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
358 /* Note that we deliberately exit here. If we fall
359 through to the end of the function __nusers would be
360 incremented which is not correct because the old owner
361 has to be discounted. */
366 && __builtin_expect (mutex
->__data
.__owner
367 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
369 /* This mutex is now not recoverable. */
370 mutex
->__data
.__count
= 0;
372 INTERNAL_SYSCALL_DECL (__err
);
373 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
374 __lll_private_flag (FUTEX_UNLOCK_PI
,
375 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
378 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
379 return ENOTRECOVERABLE
;
382 mutex
->__data
.__count
= 1;
385 ENQUEUE_MUTEX_PI (mutex
);
386 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
390 #endif /* __NR_futex. */
392 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
393 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
394 case PTHREAD_MUTEX_PP_NORMAL_NP
:
395 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
397 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
399 oldval
= mutex
->__data
.__lock
;
401 /* Check whether we already hold the mutex. */
402 if (mutex
->__data
.__owner
== id
)
404 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
407 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
409 /* Just bump the counter. */
410 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
411 /* Overflow of the counter. */
414 ++mutex
->__data
.__count
;
416 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
422 int oldprio
= -1, ceilval
;
425 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
426 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
428 if (__pthread_current_priority () > ceiling
)
433 __pthread_tpp_change_priority (oldprio
, -1);
437 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
441 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
445 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
446 ceilval
| 1, ceilval
);
448 if (oldval
== ceilval
)
454 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
458 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
461 if (oldval
!= ceilval
)
463 /* Reject invalid timeouts. */
464 if (abstime
->tv_nsec
< 0 || abstime
->tv_nsec
>= 1000000000)
473 /* Get the current time. */
474 (void) __gettimeofday (&tv
, NULL
);
476 /* Compute relative timeout. */
477 rt
.tv_sec
= abstime
->tv_sec
- tv
.tv_sec
;
478 rt
.tv_nsec
= abstime
->tv_nsec
- tv
.tv_usec
* 1000;
481 rt
.tv_nsec
+= 1000000000;
485 /* Already timed out? */
492 lll_futex_timed_wait (&mutex
->__data
.__lock
,
494 PTHREAD_MUTEX_PSHARED (mutex
));
497 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
498 ceilval
| 2, ceilval
)
501 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
503 assert (mutex
->__data
.__owner
== 0);
504 mutex
->__data
.__count
= 1;
509 /* Correct code cannot set any other type. */
515 /* Record the ownership. */
516 mutex
->__data
.__owner
= id
;
517 ++mutex
->__data
.__nusers
;
519 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);