1 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <not-cancel.h>
26 /* We need to build this function with optimization to avoid
27 * lll_timedlock erroring out with
28 * error: can't find a register in class ‘GENERAL_REGS’ while reloading ‘asm’
32 attribute_optimize("Os")
34 pthread_mutex_timedlock (
35 pthread_mutex_t
*mutex
,
36 const struct timespec
*abstime
)
39 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
42 /* We must not check ABSTIME here. If the thread does not block
43 abstime must not be checked for a valid value. */
45 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
),
46 PTHREAD_MUTEX_TIMED_NP
))
48 /* Recursive mutex. */
49 case PTHREAD_MUTEX_RECURSIVE_NP
:
50 /* Check whether we already hold the mutex. */
51 if (mutex
->__data
.__owner
== id
)
53 /* Just bump the counter. */
54 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
55 /* Overflow of the counter. */
58 ++mutex
->__data
.__count
;
63 /* We have to get the mutex. */
64 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
65 PTHREAD_MUTEX_PSHARED (mutex
));
70 /* Only locked once so far. */
71 mutex
->__data
.__count
= 1;
74 /* Error checking mutex. */
75 case PTHREAD_MUTEX_ERRORCHECK_NP
:
76 /* Check whether we already hold the mutex. */
77 if (__builtin_expect (mutex
->__data
.__owner
== id
, 0))
82 case PTHREAD_MUTEX_TIMED_NP
:
85 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
86 PTHREAD_MUTEX_PSHARED (mutex
));
89 case PTHREAD_MUTEX_ADAPTIVE_NP
:
93 if (lll_trylock (mutex
->__data
.__lock
) != 0)
96 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
97 mutex
->__data
.__spins
* 2 + 10);
100 if (cnt
++ >= max_cnt
)
102 result
= lll_timedlock (mutex
->__data
.__lock
, abstime
,
103 PTHREAD_MUTEX_PSHARED (mutex
));
111 while (lll_trylock (mutex
->__data
.__lock
) != 0);
113 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
117 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
118 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
119 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
120 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
121 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
122 &mutex
->__data
.__list
.__next
);
124 oldval
= mutex
->__data
.__lock
;
128 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
130 /* The previous owner died. Try locking the mutex. */
131 int newval
= id
| (oldval
& FUTEX_WAITERS
);
134 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
136 if (newval
!= oldval
)
142 /* We got the mutex. */
143 mutex
->__data
.__count
= 1;
144 /* But it is inconsistent unless marked otherwise. */
145 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
147 ENQUEUE_MUTEX (mutex
);
148 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
150 /* Note that we deliberately exit here. If we fall
151 through to the end of the function __nusers would be
152 incremented which is not correct because the old
153 owner has to be discounted. */
157 /* Check whether we already hold the mutex. */
158 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
160 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
161 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
163 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
168 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
170 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
173 /* Just bump the counter. */
174 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
175 /* Overflow of the counter. */
178 ++mutex
->__data
.__count
;
184 result
= lll_robust_timedlock (mutex
->__data
.__lock
, abstime
, id
,
185 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
187 if (__builtin_expect (mutex
->__data
.__owner
188 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
190 /* This mutex is now not recoverable. */
191 mutex
->__data
.__count
= 0;
192 lll_unlock (mutex
->__data
.__lock
,
193 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
194 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
195 return ENOTRECOVERABLE
;
198 if (result
== ETIMEDOUT
|| result
== EINVAL
)
203 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
205 mutex
->__data
.__count
= 1;
206 ENQUEUE_MUTEX (mutex
);
207 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
210 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
211 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
212 case PTHREAD_MUTEX_PI_NORMAL_NP
:
213 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
214 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
215 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
216 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
217 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
219 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
220 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
223 /* Note: robust PI futexes are signaled by setting bit 0. */
224 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
225 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
228 oldval
= mutex
->__data
.__lock
;
230 /* Check whether we already hold the mutex. */
231 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
233 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
235 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
239 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
241 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
243 /* Just bump the counter. */
244 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
245 /* Overflow of the counter. */
248 ++mutex
->__data
.__count
;
254 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
259 /* The mutex is locked. The kernel will now take care of
260 everything. The timeout value must be a relative value.
262 int private = (robust
263 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
264 : PTHREAD_MUTEX_PSHARED (mutex
));
265 INTERNAL_SYSCALL_DECL (__err
);
267 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
268 __lll_private_flag (FUTEX_LOCK_PI
,
271 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
))
273 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ETIMEDOUT
)
276 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
277 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
)
279 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
280 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
281 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
282 /* ESRCH can happen only for non-robust PI mutexes where
283 the owner of the lock died. */
284 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
287 /* Delay the thread until the timeout is reached.
288 Then return ETIMEDOUT. */
289 struct timespec reltime
;
292 INTERNAL_SYSCALL (clock_gettime
, __err
, 2, CLOCK_REALTIME
,
294 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
295 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
296 if (reltime
.tv_nsec
< 0)
298 reltime
.tv_nsec
+= 1000000000;
301 if (reltime
.tv_sec
>= 0)
302 while (nanosleep_not_cancel (&reltime
, &reltime
) != 0)
308 return INTERNAL_SYSCALL_ERRNO (e
, __err
);
311 oldval
= mutex
->__data
.__lock
;
313 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
316 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
318 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
320 /* We got the mutex. */
321 mutex
->__data
.__count
= 1;
322 /* But it is inconsistent unless marked otherwise. */
323 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
325 ENQUEUE_MUTEX_PI (mutex
);
326 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
328 /* Note that we deliberately exit here. If we fall
329 through to the end of the function __nusers would be
330 incremented which is not correct because the old owner
331 has to be discounted. */
336 && __builtin_expect (mutex
->__data
.__owner
337 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
339 /* This mutex is now not recoverable. */
340 mutex
->__data
.__count
= 0;
342 INTERNAL_SYSCALL_DECL (__err
);
343 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
344 __lll_private_flag (FUTEX_UNLOCK_PI
,
345 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
348 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
349 return ENOTRECOVERABLE
;
352 mutex
->__data
.__count
= 1;
355 ENQUEUE_MUTEX_PI (mutex
);
356 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
361 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
362 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
363 case PTHREAD_MUTEX_PP_NORMAL_NP
:
364 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
366 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
368 oldval
= mutex
->__data
.__lock
;
370 /* Check whether we already hold the mutex. */
371 if (mutex
->__data
.__owner
== id
)
373 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
376 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
378 /* Just bump the counter. */
379 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
380 /* Overflow of the counter. */
383 ++mutex
->__data
.__count
;
389 int oldprio
= -1, ceilval
;
392 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
393 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
395 if (__pthread_current_priority () > ceiling
)
400 __pthread_tpp_change_priority (oldprio
, -1);
404 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
408 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
412 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
413 ceilval
| 1, ceilval
);
415 if (oldval
== ceilval
)
421 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
425 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
428 if (oldval
!= ceilval
)
430 /* Reject invalid timeouts. */
431 if (abstime
->tv_nsec
< 0 || abstime
->tv_nsec
>= 1000000000)
440 /* Get the current time. */
441 (void) gettimeofday (&tv
, NULL
);
443 /* Compute relative timeout. */
444 rt
.tv_sec
= abstime
->tv_sec
- tv
.tv_sec
;
445 rt
.tv_nsec
= abstime
->tv_nsec
- tv
.tv_usec
* 1000;
448 rt
.tv_nsec
+= 1000000000;
452 /* Already timed out? */
459 lll_futex_timed_wait (&mutex
->__data
.__lock
,
461 PTHREAD_MUTEX_PSHARED (mutex
));
464 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
465 ceilval
| 2, ceilval
)
468 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
470 assert (mutex
->__data
.__owner
== 0);
471 mutex
->__data
.__count
= 1;
476 /* Correct code cannot set any other type. */
482 /* Record the ownership. */
483 mutex
->__data
.__owner
= id
;
484 ++mutex
->__data
.__nusers
;