1 /* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <not-cancel.h>
25 #include <lowlevellock.h>
28 #ifndef LLL_MUTEX_LOCK
29 # define LLL_MUTEX_LOCK(mutex) \
30 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
31 # define LLL_MUTEX_TRYLOCK(mutex) \
32 lll_trylock ((mutex)->__data.__lock)
33 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
34 lll_robust_lock ((mutex)->__data.__lock, id, \
35 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
39 static int __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
40 __attribute_noinline__
;
45 attribute_hidden internal_function
49 __pthread_mutex_lock (
50 pthread_mutex_t
*mutex
)
52 assert (sizeof (mutex
->__size
) >= sizeof (mutex
->__data
));
54 unsigned int type
= PTHREAD_MUTEX_TYPE (mutex
);
55 if (__builtin_expect (type
& ~PTHREAD_MUTEX_KIND_MASK_NP
, 0))
56 return __pthread_mutex_lock_full (mutex
);
58 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
60 if (__builtin_expect (type
, PTHREAD_MUTEX_TIMED_NP
)
61 == PTHREAD_MUTEX_TIMED_NP
)
65 LLL_MUTEX_LOCK (mutex
);
66 assert (mutex
->__data
.__owner
== 0);
68 else if (__builtin_expect (type
== PTHREAD_MUTEX_RECURSIVE_NP
, 1))
70 /* Recursive mutex. */
72 /* Check whether we already hold the mutex. */
73 if (mutex
->__data
.__owner
== id
)
75 /* Just bump the counter. */
76 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
77 /* Overflow of the counter. */
80 ++mutex
->__data
.__count
;
85 /* We have to get the mutex. */
86 LLL_MUTEX_LOCK (mutex
);
88 assert (mutex
->__data
.__owner
== 0);
89 mutex
->__data
.__count
= 1;
91 else if (__builtin_expect (type
== PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
96 if (LLL_MUTEX_TRYLOCK (mutex
) != 0)
99 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
100 mutex
->__data
.__spins
* 2 + 10);
103 if (cnt
++ >= max_cnt
)
105 LLL_MUTEX_LOCK (mutex
);
113 while (LLL_MUTEX_TRYLOCK (mutex
) != 0);
115 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
117 assert (mutex
->__data
.__owner
== 0);
121 assert (type
== PTHREAD_MUTEX_ERRORCHECK_NP
);
122 /* Check whether we already hold the mutex. */
123 if (__builtin_expect (mutex
->__data
.__owner
== id
, 0))
128 /* Record the ownership. */
129 mutex
->__data
.__owner
= id
;
131 ++mutex
->__data
.__nusers
;
138 __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
141 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
143 switch (PTHREAD_MUTEX_TYPE (mutex
))
145 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
146 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
147 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
148 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
149 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
150 &mutex
->__data
.__list
.__next
);
152 oldval
= mutex
->__data
.__lock
;
156 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
158 /* The previous owner died. Try locking the mutex. */
161 newval
|= FUTEX_WAITERS
;
163 newval
|= (oldval
& FUTEX_WAITERS
);
167 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
170 if (newval
!= oldval
)
176 /* We got the mutex. */
177 mutex
->__data
.__count
= 1;
178 /* But it is inconsistent unless marked otherwise. */
179 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
181 ENQUEUE_MUTEX (mutex
);
182 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
184 /* Note that we deliberately exit here. If we fall
185 through to the end of the function __nusers would be
186 incremented which is not correct because the old
187 owner has to be discounted. If we are not supposed
188 to increment __nusers we actually have to decrement
191 --mutex
->__data
.__nusers
;
197 /* Check whether we already hold the mutex. */
198 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
200 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
201 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
203 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
208 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
210 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
213 /* Just bump the counter. */
214 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
215 /* Overflow of the counter. */
218 ++mutex
->__data
.__count
;
224 oldval
= LLL_ROBUST_MUTEX_LOCK (mutex
, id
);
226 if (__builtin_expect (mutex
->__data
.__owner
227 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
229 /* This mutex is now not recoverable. */
230 mutex
->__data
.__count
= 0;
231 lll_unlock (mutex
->__data
.__lock
,
232 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
233 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
234 return ENOTRECOVERABLE
;
237 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
239 mutex
->__data
.__count
= 1;
240 ENQUEUE_MUTEX (mutex
);
241 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
244 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
245 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
246 case PTHREAD_MUTEX_PI_NORMAL_NP
:
247 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
248 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
249 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
250 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
251 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
253 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
254 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
257 /* Note: robust PI futexes are signaled by setting bit 0. */
258 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
259 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
262 oldval
= mutex
->__data
.__lock
;
264 /* Check whether we already hold the mutex. */
265 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
267 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
269 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
273 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
275 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
277 /* Just bump the counter. */
278 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
279 /* Overflow of the counter. */
282 ++mutex
->__data
.__count
;
290 newval
|= FUTEX_WAITERS
;
292 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
297 /* The mutex is locked. The kernel will now take care of
299 int private = (robust
300 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
301 : PTHREAD_MUTEX_PSHARED (mutex
));
302 INTERNAL_SYSCALL_DECL (__err
);
303 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
304 __lll_private_flag (FUTEX_LOCK_PI
,
307 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
308 && (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
309 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
))
311 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
312 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
313 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
314 /* ESRCH can happen only for non-robust PI mutexes where
315 the owner of the lock died. */
316 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
|| !robust
);
318 /* Delay the thread indefinitely. */
323 oldval
= mutex
->__data
.__lock
;
325 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
328 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
330 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
332 /* We got the mutex. */
333 mutex
->__data
.__count
= 1;
334 /* But it is inconsistent unless marked otherwise. */
335 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
337 ENQUEUE_MUTEX_PI (mutex
);
338 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
340 /* Note that we deliberately exit here. If we fall
341 through to the end of the function __nusers would be
342 incremented which is not correct because the old owner
343 has to be discounted. If we are not supposed to
344 increment __nusers we actually have to decrement it here. */
346 --mutex
->__data
.__nusers
;
353 && __builtin_expect (mutex
->__data
.__owner
354 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
356 /* This mutex is now not recoverable. */
357 mutex
->__data
.__count
= 0;
359 INTERNAL_SYSCALL_DECL (__err
);
360 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
361 __lll_private_flag (FUTEX_UNLOCK_PI
,
362 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
365 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
366 return ENOTRECOVERABLE
;
369 mutex
->__data
.__count
= 1;
372 ENQUEUE_MUTEX_PI (mutex
);
373 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
378 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
379 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
380 case PTHREAD_MUTEX_PP_NORMAL_NP
:
381 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
383 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
385 oldval
= mutex
->__data
.__lock
;
387 /* Check whether we already hold the mutex. */
388 if (mutex
->__data
.__owner
== id
)
390 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
393 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
395 /* Just bump the counter. */
396 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
397 /* Overflow of the counter. */
400 ++mutex
->__data
.__count
;
406 int oldprio
= -1, ceilval
;
409 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
410 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
412 if (__pthread_current_priority () > ceiling
)
415 __pthread_tpp_change_priority (oldprio
, -1);
419 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
423 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
427 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
435 if (oldval
== ceilval
)
441 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
445 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
448 if (oldval
!= ceilval
)
449 lll_futex_wait (&mutex
->__data
.__lock
, ceilval
| 2,
450 PTHREAD_MUTEX_PSHARED (mutex
));
452 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
453 ceilval
| 2, ceilval
)
456 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
458 assert (mutex
->__data
.__owner
== 0);
459 mutex
->__data
.__count
= 1;
464 /* Correct code cannot set any other type. */
468 /* Record the ownership. */
469 mutex
->__data
.__owner
= id
;
471 ++mutex
->__data
.__nusers
;
476 #ifndef __pthread_mutex_lock
477 strong_alias (__pthread_mutex_lock
, pthread_mutex_lock
)
478 strong_alias (__pthread_mutex_lock
, __pthread_mutex_lock_internal
)
484 attribute_hidden internal_function
485 __pthread_mutex_cond_lock_adjust (
486 pthread_mutex_t
*mutex
)
488 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PRIO_INHERIT_NP
) != 0);
489 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) == 0);
490 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PSHARED_BIT
) == 0);
492 /* Record the ownership. */
493 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
494 mutex
->__data
.__owner
= id
;
496 if (mutex
->__data
.__kind
== PTHREAD_MUTEX_PI_RECURSIVE_NP
)
497 ++mutex
->__data
.__count
;