1 /* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <not-cancel.h>
25 #include <lowlevellock.h>
28 #ifndef LLL_MUTEX_LOCK
29 # define LLL_MUTEX_LOCK(mutex) \
30 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
31 # define LLL_MUTEX_TRYLOCK(mutex) \
32 lll_trylock ((mutex)->__data.__lock)
33 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
34 lll_robust_lock ((mutex)->__data.__lock, id, \
35 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
39 static int __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
40 __attribute_noinline__
;
44 __pthread_mutex_lock (mutex
)
45 pthread_mutex_t
*mutex
;
47 assert (sizeof (mutex
->__size
) >= sizeof (mutex
->__data
));
49 unsigned int type
= PTHREAD_MUTEX_TYPE (mutex
);
50 if (__builtin_expect (type
& ~PTHREAD_MUTEX_KIND_MASK_NP
, 0))
51 return __pthread_mutex_lock_full (mutex
);
53 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
55 if (__builtin_expect (type
, PTHREAD_MUTEX_TIMED_NP
)
56 == PTHREAD_MUTEX_TIMED_NP
)
60 LLL_MUTEX_LOCK (mutex
);
61 assert (mutex
->__data
.__owner
== 0);
63 else if (__builtin_expect (type
== PTHREAD_MUTEX_RECURSIVE_NP
, 1))
65 /* Recursive mutex. */
67 /* Check whether we already hold the mutex. */
68 if (mutex
->__data
.__owner
== id
)
70 /* Just bump the counter. */
71 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
72 /* Overflow of the counter. */
75 ++mutex
->__data
.__count
;
80 /* We have to get the mutex. */
81 LLL_MUTEX_LOCK (mutex
);
83 assert (mutex
->__data
.__owner
== 0);
84 mutex
->__data
.__count
= 1;
86 else if (__builtin_expect (type
== PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
91 if (LLL_MUTEX_TRYLOCK (mutex
) != 0)
94 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
95 mutex
->__data
.__spins
* 2 + 10);
100 LLL_MUTEX_LOCK (mutex
);
108 while (LLL_MUTEX_TRYLOCK (mutex
) != 0);
110 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
112 assert (mutex
->__data
.__owner
== 0);
116 assert (type
== PTHREAD_MUTEX_ERRORCHECK_NP
);
117 /* Check whether we already hold the mutex. */
118 if (__builtin_expect (mutex
->__data
.__owner
== id
, 0))
123 /* Record the ownership. */
124 mutex
->__data
.__owner
= id
;
126 ++mutex
->__data
.__nusers
;
133 __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
136 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
138 switch (PTHREAD_MUTEX_TYPE (mutex
))
140 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
141 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
142 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
143 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
144 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
145 &mutex
->__data
.__list
.__next
);
147 oldval
= mutex
->__data
.__lock
;
151 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
153 /* The previous owner died. Try locking the mutex. */
156 newval
|= FUTEX_WAITERS
;
158 newval
|= (oldval
& FUTEX_WAITERS
);
162 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
165 if (newval
!= oldval
)
171 /* We got the mutex. */
172 mutex
->__data
.__count
= 1;
173 /* But it is inconsistent unless marked otherwise. */
174 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
176 ENQUEUE_MUTEX (mutex
);
177 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
179 /* Note that we deliberately exit here. If we fall
180 through to the end of the function __nusers would be
181 incremented which is not correct because the old
182 owner has to be discounted. If we are not supposed
183 to increment __nusers we actually have to decrement
186 --mutex
->__data
.__nusers
;
192 /* Check whether we already hold the mutex. */
193 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
195 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
196 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
198 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
203 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
205 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
208 /* Just bump the counter. */
209 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
210 /* Overflow of the counter. */
213 ++mutex
->__data
.__count
;
219 oldval
= LLL_ROBUST_MUTEX_LOCK (mutex
, id
);
221 if (__builtin_expect (mutex
->__data
.__owner
222 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
224 /* This mutex is now not recoverable. */
225 mutex
->__data
.__count
= 0;
226 lll_unlock (mutex
->__data
.__lock
,
227 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
228 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
229 return ENOTRECOVERABLE
;
232 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
234 mutex
->__data
.__count
= 1;
235 ENQUEUE_MUTEX (mutex
);
236 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
239 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
240 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
241 case PTHREAD_MUTEX_PI_NORMAL_NP
:
242 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
243 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
244 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
245 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
246 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
248 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
249 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
252 /* Note: robust PI futexes are signaled by setting bit 0. */
253 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
254 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
257 oldval
= mutex
->__data
.__lock
;
259 /* Check whether we already hold the mutex. */
260 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
262 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
264 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
268 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
270 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
272 /* Just bump the counter. */
273 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
274 /* Overflow of the counter. */
277 ++mutex
->__data
.__count
;
285 newval
|= FUTEX_WAITERS
;
287 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
292 /* The mutex is locked. The kernel will now take care of
294 int private = (robust
295 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
296 : PTHREAD_MUTEX_PSHARED (mutex
));
297 INTERNAL_SYSCALL_DECL (__err
);
298 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
299 __lll_private_flag (FUTEX_LOCK_PI
,
302 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
303 && (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
304 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
))
306 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
307 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
308 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
309 /* ESRCH can happen only for non-robust PI mutexes where
310 the owner of the lock died. */
311 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
|| !robust
);
313 /* Delay the thread indefinitely. */
318 oldval
= mutex
->__data
.__lock
;
320 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
323 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
325 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
327 /* We got the mutex. */
328 mutex
->__data
.__count
= 1;
329 /* But it is inconsistent unless marked otherwise. */
330 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
332 ENQUEUE_MUTEX_PI (mutex
);
333 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
335 /* Note that we deliberately exit here. If we fall
336 through to the end of the function __nusers would be
337 incremented which is not correct because the old owner
338 has to be discounted. If we are not supposed to
339 increment __nusers we actually have to decrement it here. */
341 --mutex
->__data
.__nusers
;
348 && __builtin_expect (mutex
->__data
.__owner
349 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
351 /* This mutex is now not recoverable. */
352 mutex
->__data
.__count
= 0;
354 INTERNAL_SYSCALL_DECL (__err
);
355 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
356 __lll_private_flag (FUTEX_UNLOCK_PI
,
357 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
360 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
361 return ENOTRECOVERABLE
;
364 mutex
->__data
.__count
= 1;
367 ENQUEUE_MUTEX_PI (mutex
);
368 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
373 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
374 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
375 case PTHREAD_MUTEX_PP_NORMAL_NP
:
376 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
378 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
380 oldval
= mutex
->__data
.__lock
;
382 /* Check whether we already hold the mutex. */
383 if (mutex
->__data
.__owner
== id
)
385 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
388 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
390 /* Just bump the counter. */
391 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
392 /* Overflow of the counter. */
395 ++mutex
->__data
.__count
;
401 int oldprio
= -1, ceilval
;
404 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
405 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
407 if (__pthread_current_priority () > ceiling
)
410 __pthread_tpp_change_priority (oldprio
, -1);
414 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
418 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
422 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
430 if (oldval
== ceilval
)
436 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
440 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
443 if (oldval
!= ceilval
)
444 lll_futex_wait (&mutex
->__data
.__lock
, ceilval
| 2,
445 PTHREAD_MUTEX_PSHARED (mutex
));
447 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
448 ceilval
| 2, ceilval
)
451 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
453 assert (mutex
->__data
.__owner
== 0);
454 mutex
->__data
.__count
= 1;
459 /* Correct code cannot set any other type. */
463 /* Record the ownership. */
464 mutex
->__data
.__owner
= id
;
466 ++mutex
->__data
.__nusers
;
471 #ifndef __pthread_mutex_lock
472 strong_alias (__pthread_mutex_lock
, pthread_mutex_lock
)
473 strong_alias (__pthread_mutex_lock
, __pthread_mutex_lock_internal
)
479 __pthread_mutex_cond_lock_adjust (mutex
)
480 pthread_mutex_t
*mutex
;
482 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PRIO_INHERIT_NP
) != 0);
483 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) == 0);
484 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PSHARED_BIT
) == 0);
486 /* Record the ownership. */
487 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
488 mutex
->__data
.__owner
= id
;
490 if (mutex
->__data
.__kind
== PTHREAD_MUTEX_PI_RECURSIVE_NP
)
491 ++mutex
->__data
.__count
;