1 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <not-cancel.h>
26 #if defined(__UCLIBC_USE_TIME64__)
27 #include "internal/time64_helpers.h"
30 /* We need to build this function with optimization to avoid
31 * lll_timedlock erroring out with
32 * error: can't find a register in class ‘GENERAL_REGS’ while reloading ‘asm’
36 attribute_optimize("Os")
38 pthread_mutex_timedlock (
39 pthread_mutex_t
*mutex
,
40 const struct timespec
*abstime
)
43 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
46 /* We must not check ABSTIME here. If the thread does not block
47 abstime must not be checked for a valid value. */
49 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
),
50 PTHREAD_MUTEX_TIMED_NP
))
52 /* Recursive mutex. */
53 case PTHREAD_MUTEX_RECURSIVE_NP
:
54 /* Check whether we already hold the mutex. */
55 if (mutex
->__data
.__owner
== id
)
57 /* Just bump the counter. */
58 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
59 /* Overflow of the counter. */
62 ++mutex
->__data
.__count
;
67 /* We have to get the mutex. */
68 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
69 PTHREAD_MUTEX_PSHARED (mutex
));
74 /* Only locked once so far. */
75 mutex
->__data
.__count
= 1;
78 /* Error checking mutex. */
79 case PTHREAD_MUTEX_ERRORCHECK_NP
:
80 /* Check whether we already hold the mutex. */
81 if (__builtin_expect (mutex
->__data
.__owner
== id
, 0))
86 case PTHREAD_MUTEX_TIMED_NP
:
89 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
90 PTHREAD_MUTEX_PSHARED (mutex
));
93 case PTHREAD_MUTEX_ADAPTIVE_NP
:
97 if (lll_trylock (mutex
->__data
.__lock
) != 0)
100 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
101 mutex
->__data
.__spins
* 2 + 10);
104 if (cnt
++ >= max_cnt
)
106 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
107 PTHREAD_MUTEX_PSHARED (mutex
));
115 while (lll_trylock (mutex
->__data
.__lock
) != 0);
117 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
121 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
122 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
123 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
124 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
125 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
126 &mutex
->__data
.__list
.__next
);
128 oldval
= mutex
->__data
.__lock
;
132 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
134 /* The previous owner died. Try locking the mutex. */
135 int newval
= id
| (oldval
& FUTEX_WAITERS
);
138 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
140 if (newval
!= oldval
)
146 /* We got the mutex. */
147 mutex
->__data
.__count
= 1;
148 /* But it is inconsistent unless marked otherwise. */
149 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
151 ENQUEUE_MUTEX (mutex
);
152 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
154 /* Note that we deliberately exit here. If we fall
155 through to the end of the function __nusers would be
156 incremented which is not correct because the old
157 owner has to be discounted. */
161 /* Check whether we already hold the mutex. */
162 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
164 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
165 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
167 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
172 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
174 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
177 /* Just bump the counter. */
178 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
179 /* Overflow of the counter. */
182 ++mutex
->__data
.__count
;
188 result
= lll_robust_timedlock (mutex
->__data
.__lock
, abstime
, id
,
189 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
191 if (__builtin_expect (mutex
->__data
.__owner
192 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
194 /* This mutex is now not recoverable. */
195 mutex
->__data
.__count
= 0;
196 lll_unlock (mutex
->__data
.__lock
,
197 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
198 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
199 return ENOTRECOVERABLE
;
202 if (result
== ETIMEDOUT
|| result
== EINVAL
)
207 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
209 mutex
->__data
.__count
= 1;
210 ENQUEUE_MUTEX (mutex
);
211 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
214 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
215 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
216 case PTHREAD_MUTEX_PI_NORMAL_NP
:
217 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
218 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
219 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
220 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
221 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
223 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
224 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
227 /* Note: robust PI futexes are signaled by setting bit 0. */
228 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
229 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
232 oldval
= mutex
->__data
.__lock
;
234 /* Check whether we already hold the mutex. */
235 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
237 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
239 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
243 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
245 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
247 /* Just bump the counter. */
248 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
249 /* Overflow of the counter. */
252 ++mutex
->__data
.__count
;
258 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
263 /* The mutex is locked. The kernel will now take care of
264 everything. The timeout value must be a relative value.
266 int private = (robust
267 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
268 : PTHREAD_MUTEX_PSHARED (mutex
));
269 INTERNAL_SYSCALL_DECL (__err
);
271 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
272 int e
= INTERNAL_SYSCALL (futex_time64
, __err
, 4, &mutex
->__data
.__lock
,
273 __lll_private_flag (FUTEX_LOCK_PI
,
277 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
278 __lll_private_flag (FUTEX_LOCK_PI
,
282 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
))
284 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ETIMEDOUT
)
287 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
288 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
)
290 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
291 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
292 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
293 /* ESRCH can happen only for non-robust PI mutexes where
294 the owner of the lock died. */
295 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
298 /* Delay the thread until the timeout is reached.
299 Then return ETIMEDOUT. */
300 struct timespec reltime
;
303 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_clock_gettime64)
304 INTERNAL_SYSCALL (clock_gettime64
, __err
, 2, CLOCK_REALTIME
,
307 INTERNAL_SYSCALL (clock_gettime
, __err
, 2, CLOCK_REALTIME
,
310 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
311 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
312 if (reltime
.tv_nsec
< 0)
314 reltime
.tv_nsec
+= 1000000000;
317 if (reltime
.tv_sec
>= 0)
318 while (nanosleep_not_cancel (&reltime
, &reltime
) != 0)
324 return INTERNAL_SYSCALL_ERRNO (e
, __err
);
327 oldval
= mutex
->__data
.__lock
;
329 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
332 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
334 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
336 /* We got the mutex. */
337 mutex
->__data
.__count
= 1;
338 /* But it is inconsistent unless marked otherwise. */
339 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
341 ENQUEUE_MUTEX_PI (mutex
);
342 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
344 /* Note that we deliberately exit here. If we fall
345 through to the end of the function __nusers would be
346 incremented which is not correct because the old owner
347 has to be discounted. */
352 && __builtin_expect (mutex
->__data
.__owner
353 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
355 /* This mutex is now not recoverable. */
356 mutex
->__data
.__count
= 0;
358 INTERNAL_SYSCALL_DECL (__err
);
359 #if defined(__UCLIBC_USE_TIME64__) && defined(__NR_futex_time64)
360 INTERNAL_SYSCALL (futex_time64
, __err
, 4, &mutex
->__data
.__lock
,
361 __lll_private_flag (FUTEX_UNLOCK_PI
,
362 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
365 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
366 __lll_private_flag (FUTEX_UNLOCK_PI
,
367 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
371 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
372 return ENOTRECOVERABLE
;
375 mutex
->__data
.__count
= 1;
378 ENQUEUE_MUTEX_PI (mutex
);
379 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
384 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
385 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
386 case PTHREAD_MUTEX_PP_NORMAL_NP
:
387 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
389 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
391 oldval
= mutex
->__data
.__lock
;
393 /* Check whether we already hold the mutex. */
394 if (mutex
->__data
.__owner
== id
)
396 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
399 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
401 /* Just bump the counter. */
402 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
403 /* Overflow of the counter. */
406 ++mutex
->__data
.__count
;
412 int oldprio
= -1, ceilval
;
415 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
416 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
418 if (__pthread_current_priority () > ceiling
)
423 __pthread_tpp_change_priority (oldprio
, -1);
427 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
431 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
435 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
436 ceilval
| 1, ceilval
);
438 if (oldval
== ceilval
)
444 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
448 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
451 if (oldval
!= ceilval
)
453 /* Reject invalid timeouts. */
454 if (abstime
->tv_nsec
< 0 || abstime
->tv_nsec
>= 1000000000)
463 /* Get the current time. */
464 (void) gettimeofday (&tv
, NULL
);
466 /* Compute relative timeout. */
467 rt
.tv_sec
= abstime
->tv_sec
- tv
.tv_sec
;
468 rt
.tv_nsec
= abstime
->tv_nsec
- tv
.tv_usec
* 1000;
471 rt
.tv_nsec
+= 1000000000;
475 /* Already timed out? */
482 lll_futex_timed_wait (&mutex
->__data
.__lock
,
484 PTHREAD_MUTEX_PSHARED (mutex
));
487 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
488 ceilval
| 2, ceilval
)
491 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
493 assert (mutex
->__data
.__owner
== 0);
494 mutex
->__data
.__count
= 1;
499 /* Correct code cannot set any other type. */
505 /* Record the ownership. */
506 mutex
->__data
.__owner
= id
;
507 ++mutex
->__data
.__nusers
;