1 /* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 #include <lowlevellock.h>
25 #include <not-cancel.h>
29 pthread_mutex_timedlock (mutex
, abstime
)
30 pthread_mutex_t
*mutex
;
31 const struct timespec
*abstime
;
34 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
37 /* We must not check ABSTIME here. If the thread does not block
38 abstime must not be checked for a valid value. */
40 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
),
41 PTHREAD_MUTEX_TIMED_NP
))
43 /* Recursive mutex. */
44 case PTHREAD_MUTEX_RECURSIVE_NP
:
45 /* Check whether we already hold the mutex. */
46 if (mutex
->__data
.__owner
== id
)
48 /* Just bump the counter. */
49 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
50 /* Overflow of the counter. */
53 ++mutex
->__data
.__count
;
58 /* We have to get the mutex. */
59 result
= lll_mutex_timedlock (mutex
->__data
.__lock
, abstime
);
64 /* Only locked once so far. */
65 mutex
->__data
.__count
= 1;
68 /* Error checking mutex. */
69 case PTHREAD_MUTEX_ERRORCHECK_NP
:
70 /* Check whether we already hold the mutex. */
71 if (__builtin_expect (mutex
->__data
.__owner
== id
, 0))
76 case PTHREAD_MUTEX_TIMED_NP
:
79 result
= lll_mutex_timedlock (mutex
->__data
.__lock
, abstime
);
82 case PTHREAD_MUTEX_ADAPTIVE_NP
:
86 if (lll_mutex_trylock (mutex
->__data
.__lock
) != 0)
89 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
90 mutex
->__data
.__spins
* 2 + 10);
95 result
= lll_mutex_timedlock (mutex
->__data
.__lock
, abstime
);
103 while (lll_mutex_trylock (mutex
->__data
.__lock
) != 0);
105 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
109 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
110 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
111 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
112 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
113 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
114 &mutex
->__data
.__list
.__next
);
116 oldval
= mutex
->__data
.__lock
;
120 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
122 /* The previous owner died. Try locking the mutex. */
123 int newval
= id
| (oldval
& FUTEX_WAITERS
);
126 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
128 if (newval
!= oldval
)
134 /* We got the mutex. */
135 mutex
->__data
.__count
= 1;
136 /* But it is inconsistent unless marked otherwise. */
137 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
139 ENQUEUE_MUTEX (mutex
);
140 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
142 /* Note that we deliberately exit here. If we fall
143 through to the end of the function __nusers would be
144 incremented which is not correct because the old
145 owner has to be discounted. */
149 /* Check whether we already hold the mutex. */
150 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
152 if (mutex
->__data
.__kind
153 == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
155 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
160 if (mutex
->__data
.__kind
161 == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
163 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
166 /* Just bump the counter. */
167 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
168 /* Overflow of the counter. */
171 ++mutex
->__data
.__count
;
177 result
= lll_robust_mutex_timedlock (mutex
->__data
.__lock
, abstime
,
180 if (__builtin_expect (mutex
->__data
.__owner
181 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
183 /* This mutex is now not recoverable. */
184 mutex
->__data
.__count
= 0;
185 lll_mutex_unlock (mutex
->__data
.__lock
);
186 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
187 return ENOTRECOVERABLE
;
190 if (result
== ETIMEDOUT
|| result
== EINVAL
)
195 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
197 mutex
->__data
.__count
= 1;
198 ENQUEUE_MUTEX (mutex
);
199 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
202 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
203 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
204 case PTHREAD_MUTEX_PI_NORMAL_NP
:
205 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
206 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
207 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
208 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
209 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
211 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
212 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
215 /* Note: robust PI futexes are signaled by setting bit 0. */
216 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
217 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
220 oldval
= mutex
->__data
.__lock
;
222 /* Check whether we already hold the mutex. */
223 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
225 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
227 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
231 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
233 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
235 /* Just bump the counter. */
236 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
237 /* Overflow of the counter. */
240 ++mutex
->__data
.__count
;
246 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
251 /* The mutex is locked. The kernel will now take care of
252 everything. The timeout value must be a relative value.
254 INTERNAL_SYSCALL_DECL (__err
);
256 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
257 FUTEX_LOCK_PI
, 1, abstime
);
258 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
))
260 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ETIMEDOUT
)
263 if (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
264 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
)
266 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
267 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
268 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
269 /* ESRCH can happen only for non-robust PI mutexes where
270 the owner of the lock died. */
271 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
274 /* Delay the thread until the timeout is reached.
275 Then return ETIMEDOUT. */
276 struct timespec reltime
;
279 INTERNAL_SYSCALL (clock_gettime
, __err
, 2, CLOCK_REALTIME
,
281 reltime
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
282 reltime
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
283 if (reltime
.tv_nsec
< 0)
285 reltime
.tv_nsec
+= 1000000000;
288 if (reltime
.tv_sec
>= 0)
289 while (nanosleep_not_cancel (&reltime
, &reltime
) != 0)
295 return INTERNAL_SYSCALL_ERRNO (e
, __err
);
298 oldval
= mutex
->__data
.__lock
;
300 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
303 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
305 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
307 /* We got the mutex. */
308 mutex
->__data
.__count
= 1;
309 /* But it is inconsistent unless marked otherwise. */
310 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
312 ENQUEUE_MUTEX_PI (mutex
);
313 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
315 /* Note that we deliberately exit here. If we fall
316 through to the end of the function __nusers would be
317 incremented which is not correct because the old owner
318 has to be discounted. */
323 && __builtin_expect (mutex
->__data
.__owner
324 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
326 /* This mutex is now not recoverable. */
327 mutex
->__data
.__count
= 0;
329 INTERNAL_SYSCALL_DECL (__err
);
330 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
331 FUTEX_UNLOCK_PI
, 0, 0);
333 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
334 return ENOTRECOVERABLE
;
337 mutex
->__data
.__count
= 1;
340 ENQUEUE_MUTEX_PI (mutex
);
341 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
346 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
347 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
348 case PTHREAD_MUTEX_PP_NORMAL_NP
:
349 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
351 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
353 oldval
= mutex
->__data
.__lock
;
355 /* Check whether we already hold the mutex. */
356 if (mutex
->__data
.__owner
== id
)
358 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
361 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
363 /* Just bump the counter. */
364 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
365 /* Overflow of the counter. */
368 ++mutex
->__data
.__count
;
374 int oldprio
= -1, ceilval
;
377 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
378 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
380 if (__pthread_current_priority () > ceiling
)
385 __pthread_tpp_change_priority (oldprio
, -1);
389 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
393 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
397 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
398 ceilval
| 1, ceilval
);
400 if (oldval
== ceilval
)
406 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
410 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
413 if (oldval
!= ceilval
)
415 /* Reject invalid timeouts. */
416 if (abstime
->tv_nsec
< 0 || abstime
->tv_nsec
>= 1000000000)
425 /* Get the current time. */
426 (void) __gettimeofday (&tv
, NULL
);
428 /* Compute relative timeout. */
429 rt
.tv_sec
= abstime
->tv_sec
- tv
.tv_sec
;
430 rt
.tv_nsec
= abstime
->tv_nsec
- tv
.tv_usec
* 1000;
433 rt
.tv_nsec
+= 1000000000;
437 /* Already timed out? */
444 lll_futex_timed_wait (&mutex
->__data
.__lock
,
446 // XYZ check mutex flag
450 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
451 ceilval
| 2, ceilval
)
454 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
456 assert (mutex
->__data
.__owner
== 0);
457 mutex
->__data
.__count
= 1;
462 /* Correct code cannot set any other type. */
468 /* Record the ownership. */
469 mutex
->__data
.__owner
= id
;
470 ++mutex
->__data
.__nusers
;