1 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <not-cancel.h>
26 /* We need to build this function with optimization to avoid
27 * lll_timedlock erroring out with
28 * error: can't find a register in class ‘GENERAL_REGS’ while reloading ‘asm’
31 attribute_optimize("Os")
32 pthread_mutex_timedlock (
33 pthread_mutex_t
*mutex
,
34 const struct timespec
*abstime
)
37 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
40 /* We must not check ABSTIME here. If the thread does not block
41 abstime must not be checked for a valid value. */
43 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
),
44 PTHREAD_MUTEX_TIMED_NP
))
46 /* Recursive mutex. */
47 case PTHREAD_MUTEX_RECURSIVE_NP
:
48 /* Check whether we already hold the mutex. */
49 if (mutex
->__data
.__owner
== id
)
51 /* Just bump the counter. */
52 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
53 /* Overflow of the counter. */
56 ++mutex
->__data
.__count
;
61 /* We have to get the mutex. */
62 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
63 PTHREAD_MUTEX_PSHARED (mutex
));
68 /* Only locked once so far. */
69 mutex
->__data
.__count
= 1;
72 /* Error checking mutex. */
73 case PTHREAD_MUTEX_ERRORCHECK_NP
:
74 /* Check whether we already hold the mutex. */
75 if (__builtin_expect (mutex
->__data
.__owner
== id
, 0))
80 case PTHREAD_MUTEX_TIMED_NP
:
83 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
84 PTHREAD_MUTEX_PSHARED (mutex
));
87 case PTHREAD_MUTEX_ADAPTIVE_NP
:
91 if (lll_trylock (mutex
->__data
.__lock
) != 0)
94 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
95 mutex
->__data
.__spins
* 2 + 10);
100 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
101 PTHREAD_MUTEX_PSHARED (mutex
));
109 while (lll_trylock (mutex
->__data
.__lock
) != 0);
111 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
115 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
116 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
117 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
118 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
119 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
120 &mutex
->__data
.__list
.__next
);
122 oldval
= mutex
->__data
.__lock
;
126 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
128 /* The previous owner died. Try locking the mutex. */
129 int newval
= id
| (oldval
& FUTEX_WAITERS
);
132 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
134 if (newval
!= oldval
)
140 /* We got the mutex. */
141 mutex
->__data
.__count
= 1;
142 /* But it is inconsistent unless marked otherwise. */
143 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
145 ENQUEUE_MUTEX (mutex
);
146 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
148 /* Note that we deliberately exit here. If we fall
149 through to the end of the function __nusers would be
150 incremented which is not correct because the old
151 owner has to be discounted. */
155 /* Check whether we already hold the mutex. */
156 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
158 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
159 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
161 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
166 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
168 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
171 /* Just bump the counter. */
172 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
173 /* Overflow of the counter. */
176 ++mutex
->__data
.__count
;
182 result
= lll_robust_timedlock (mutex
->__data
.__lock
, abstime
, id
,
183 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
185 if (__builtin_expect (mutex
->__data
.__owner
186 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
188 /* This mutex is now not recoverable. */
189 mutex
->__data
.__count
= 0;
190 lll_unlock (mutex
->__data
.__lock
,
191 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
192 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
193 return ENOTRECOVERABLE
;
196 if (result
== ETIMEDOUT
|| result
== EINVAL
)
201 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
203 mutex
->__data
.__count
= 1;
204 ENQUEUE_MUTEX (mutex
);
205 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
208 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
209 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
210 case PTHREAD_MUTEX_PI_NORMAL_NP
:
211 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
212 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
213 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
214 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
215 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
217 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
218 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
221 /* Note: robust PI futexes are signaled by setting bit 0. */
222 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
223 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
226 oldval
= mutex
->__data
.__lock
;
228 /* Check whether we already hold the mutex. */
229 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
231 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
233 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
237 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
239 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
241 /* Just bump the counter. */
242 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
243 /* Overflow of the counter. */
246 ++mutex
->__data
.__count
;
252 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
257 /* The mutex is locked. The kernel will now take care of
258 everything. The timeout value must be a relative value.
260 int private = (robust
261 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
262 : PTHREAD_MUTEX_PSHARED (mutex
));
263 INTERNAL_SYSCALL_DECL (__err
);
265 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
266 __lll_private_flag (FUTEX_LOCK_PI
,
269 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
))
271 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ETIMEDOUT
)
274 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
275 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
)
277 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
278 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
279 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
280 /* ESRCH can happen only for non-robust PI mutexes where
281 the owner of the lock died. */
282 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
285 /* Delay the thread until the timeout is reached.
286 Then return ETIMEDOUT. */
287 struct timespec reltime
;
290 INTERNAL_SYSCALL (clock_gettime
, __err
, 2, CLOCK_REALTIME
,
292 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
293 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
294 if (reltime
.tv_nsec
< 0)
296 reltime
.tv_nsec
+= 1000000000;
299 if (reltime
.tv_sec
>= 0)
300 while (nanosleep_not_cancel (&reltime
, &reltime
) != 0)
306 return INTERNAL_SYSCALL_ERRNO (e
, __err
);
309 oldval
= mutex
->__data
.__lock
;
311 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
314 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
316 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
318 /* We got the mutex. */
319 mutex
->__data
.__count
= 1;
320 /* But it is inconsistent unless marked otherwise. */
321 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
323 ENQUEUE_MUTEX_PI (mutex
);
324 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
326 /* Note that we deliberately exit here. If we fall
327 through to the end of the function __nusers would be
328 incremented which is not correct because the old owner
329 has to be discounted. */
334 && __builtin_expect (mutex
->__data
.__owner
335 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
337 /* This mutex is now not recoverable. */
338 mutex
->__data
.__count
= 0;
340 INTERNAL_SYSCALL_DECL (__err
);
341 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
342 __lll_private_flag (FUTEX_UNLOCK_PI
,
343 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
346 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
347 return ENOTRECOVERABLE
;
350 mutex
->__data
.__count
= 1;
353 ENQUEUE_MUTEX_PI (mutex
);
354 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
359 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
360 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
361 case PTHREAD_MUTEX_PP_NORMAL_NP
:
362 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
364 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
366 oldval
= mutex
->__data
.__lock
;
368 /* Check whether we already hold the mutex. */
369 if (mutex
->__data
.__owner
== id
)
371 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
374 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
376 /* Just bump the counter. */
377 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
378 /* Overflow of the counter. */
381 ++mutex
->__data
.__count
;
387 int oldprio
= -1, ceilval
;
390 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
391 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
393 if (__pthread_current_priority () > ceiling
)
398 __pthread_tpp_change_priority (oldprio
, -1);
402 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
406 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
410 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
411 ceilval
| 1, ceilval
);
413 if (oldval
== ceilval
)
419 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
423 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
426 if (oldval
!= ceilval
)
428 /* Reject invalid timeouts. */
429 if (abstime
->tv_nsec
< 0 || abstime
->tv_nsec
>= 1000000000)
438 /* Get the current time. */
439 (void) gettimeofday (&tv
, NULL
);
441 /* Compute relative timeout. */
442 rt
.tv_sec
= abstime
->tv_sec
- tv
.tv_sec
;
443 rt
.tv_nsec
= abstime
->tv_nsec
- tv
.tv_usec
* 1000;
446 rt
.tv_nsec
+= 1000000000;
450 /* Already timed out? */
457 lll_futex_timed_wait (&mutex
->__data
.__lock
,
459 PTHREAD_MUTEX_PSHARED (mutex
));
462 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
463 ceilval
| 2, ceilval
)
466 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
468 assert (mutex
->__data
.__owner
== 0);
469 mutex
->__data
.__count
= 1;
474 /* Correct code cannot set any other type. */
480 /* Record the ownership. */
481 mutex
->__data
.__owner
= id
;
482 ++mutex
->__data
.__nusers
;