1 /* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 #include <not-cancel.h>
26 #include <lowlevellock.h>
29 #ifndef LLL_MUTEX_LOCK
30 # define LLL_MUTEX_LOCK(mutex) \
31 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
32 # define LLL_MUTEX_TRYLOCK(mutex) \
33 lll_trylock ((mutex)->__data.__lock)
34 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
35 lll_robust_lock ((mutex)->__data.__lock, id, \
36 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
40 static int __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
41 __attribute_noinline__
;
45 __pthread_mutex_lock (mutex
)
46 pthread_mutex_t
*mutex
;
48 assert (sizeof (mutex
->__size
) >= sizeof (mutex
->__data
));
50 unsigned int type
= PTHREAD_MUTEX_TYPE (mutex
);
51 if (__builtin_expect (type
& ~PTHREAD_MUTEX_KIND_MASK_NP
, 0))
52 return __pthread_mutex_lock_full (mutex
);
54 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
56 if (__builtin_expect (type
, PTHREAD_MUTEX_TIMED_NP
)
57 == PTHREAD_MUTEX_TIMED_NP
)
61 LLL_MUTEX_LOCK (mutex
);
62 assert (mutex
->__data
.__owner
== 0);
64 else if (__builtin_expect (type
== PTHREAD_MUTEX_RECURSIVE_NP
, 1))
66 /* Recursive mutex. */
68 /* Check whether we already hold the mutex. */
69 if (mutex
->__data
.__owner
== id
)
71 /* Just bump the counter. */
72 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
73 /* Overflow of the counter. */
76 ++mutex
->__data
.__count
;
81 /* We have to get the mutex. */
82 LLL_MUTEX_LOCK (mutex
);
84 assert (mutex
->__data
.__owner
== 0);
85 mutex
->__data
.__count
= 1;
87 else if (__builtin_expect (type
== PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
92 if (LLL_MUTEX_TRYLOCK (mutex
) != 0)
95 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
96 mutex
->__data
.__spins
* 2 + 10);
101 LLL_MUTEX_LOCK (mutex
);
109 while (LLL_MUTEX_TRYLOCK (mutex
) != 0);
111 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
113 assert (mutex
->__data
.__owner
== 0);
117 assert (type
== PTHREAD_MUTEX_ERRORCHECK_NP
);
118 /* Check whether we already hold the mutex. */
119 if (__builtin_expect (mutex
->__data
.__owner
== id
, 0))
124 /* Record the ownership. */
125 mutex
->__data
.__owner
= id
;
127 ++mutex
->__data
.__nusers
;
134 __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
137 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
139 switch (PTHREAD_MUTEX_TYPE (mutex
))
141 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
142 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
143 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
144 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
145 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
146 &mutex
->__data
.__list
.__next
);
148 oldval
= mutex
->__data
.__lock
;
152 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
154 /* The previous owner died. Try locking the mutex. */
157 newval
|= FUTEX_WAITERS
;
159 newval
|= (oldval
& FUTEX_WAITERS
);
163 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
166 if (newval
!= oldval
)
172 /* We got the mutex. */
173 mutex
->__data
.__count
= 1;
174 /* But it is inconsistent unless marked otherwise. */
175 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
177 ENQUEUE_MUTEX (mutex
);
178 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
180 /* Note that we deliberately exit here. If we fall
181 through to the end of the function __nusers would be
182 incremented which is not correct because the old
183 owner has to be discounted. If we are not supposed
184 to increment __nusers we actually have to decrement
187 --mutex
->__data
.__nusers
;
193 /* Check whether we already hold the mutex. */
194 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
196 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
197 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
199 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
204 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
206 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
209 /* Just bump the counter. */
210 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
211 /* Overflow of the counter. */
214 ++mutex
->__data
.__count
;
220 oldval
= LLL_ROBUST_MUTEX_LOCK (mutex
, id
);
222 if (__builtin_expect (mutex
->__data
.__owner
223 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
225 /* This mutex is now not recoverable. */
226 mutex
->__data
.__count
= 0;
227 lll_unlock (mutex
->__data
.__lock
,
228 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
229 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
230 return ENOTRECOVERABLE
;
233 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
235 mutex
->__data
.__count
= 1;
236 ENQUEUE_MUTEX (mutex
);
237 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
240 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
241 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
242 case PTHREAD_MUTEX_PI_NORMAL_NP
:
243 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
244 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
245 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
246 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
247 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
249 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
250 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
253 /* Note: robust PI futexes are signaled by setting bit 0. */
254 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
255 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
258 oldval
= mutex
->__data
.__lock
;
260 /* Check whether we already hold the mutex. */
261 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
263 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
265 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
269 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
271 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
273 /* Just bump the counter. */
274 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
275 /* Overflow of the counter. */
278 ++mutex
->__data
.__count
;
286 newval
|= FUTEX_WAITERS
;
288 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
293 /* The mutex is locked. The kernel will now take care of
295 int private = (robust
296 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
297 : PTHREAD_MUTEX_PSHARED (mutex
));
298 INTERNAL_SYSCALL_DECL (__err
);
299 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
300 __lll_private_flag (FUTEX_LOCK_PI
,
303 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
304 && (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
305 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
))
307 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
308 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
309 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
310 /* ESRCH can happen only for non-robust PI mutexes where
311 the owner of the lock died. */
312 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
|| !robust
);
314 /* Delay the thread indefinitely. */
319 oldval
= mutex
->__data
.__lock
;
321 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
324 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
326 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
328 /* We got the mutex. */
329 mutex
->__data
.__count
= 1;
330 /* But it is inconsistent unless marked otherwise. */
331 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
333 ENQUEUE_MUTEX_PI (mutex
);
334 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
336 /* Note that we deliberately exit here. If we fall
337 through to the end of the function __nusers would be
338 incremented which is not correct because the old owner
339 has to be discounted. If we are not supposed to
340 increment __nusers we actually have to decrement it here. */
342 --mutex
->__data
.__nusers
;
349 && __builtin_expect (mutex
->__data
.__owner
350 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
352 /* This mutex is now not recoverable. */
353 mutex
->__data
.__count
= 0;
355 INTERNAL_SYSCALL_DECL (__err
);
356 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
357 __lll_private_flag (FUTEX_UNLOCK_PI
,
358 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
361 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
362 return ENOTRECOVERABLE
;
365 mutex
->__data
.__count
= 1;
368 ENQUEUE_MUTEX_PI (mutex
);
369 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
374 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
375 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
376 case PTHREAD_MUTEX_PP_NORMAL_NP
:
377 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
379 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
381 oldval
= mutex
->__data
.__lock
;
383 /* Check whether we already hold the mutex. */
384 if (mutex
->__data
.__owner
== id
)
386 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
389 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
391 /* Just bump the counter. */
392 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
393 /* Overflow of the counter. */
396 ++mutex
->__data
.__count
;
402 int oldprio
= -1, ceilval
;
405 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
406 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
408 if (__pthread_current_priority () > ceiling
)
411 __pthread_tpp_change_priority (oldprio
, -1);
415 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
419 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
423 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
431 if (oldval
== ceilval
)
437 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
441 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
444 if (oldval
!= ceilval
)
445 lll_futex_wait (&mutex
->__data
.__lock
, ceilval
| 2,
446 PTHREAD_MUTEX_PSHARED (mutex
));
448 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
449 ceilval
| 2, ceilval
)
452 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
454 assert (mutex
->__data
.__owner
== 0);
455 mutex
->__data
.__count
= 1;
460 /* Correct code cannot set any other type. */
464 /* Record the ownership. */
465 mutex
->__data
.__owner
= id
;
467 ++mutex
->__data
.__nusers
;
472 #ifndef __pthread_mutex_lock
473 strong_alias (__pthread_mutex_lock
, pthread_mutex_lock
)
474 strong_alias (__pthread_mutex_lock
, __pthread_mutex_lock_internal
)
480 __pthread_mutex_cond_lock_adjust (mutex
)
481 pthread_mutex_t
*mutex
;
483 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PRIO_INHERIT_NP
) != 0);
484 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) == 0);
485 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PSHARED_BIT
) == 0);
487 /* Record the ownership. */
488 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
489 mutex
->__data
.__owner
= id
;
491 if (mutex
->__data
.__kind
== PTHREAD_MUTEX_PI_RECURSIVE_NP
)
492 ++mutex
->__data
.__count
;