1 /* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 #include <not-cancel.h>
26 #include <lowlevellock.h>
29 #ifndef LLL_MUTEX_LOCK
30 # define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
31 # define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
32 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
37 __pthread_mutex_lock (mutex
)
38 pthread_mutex_t
*mutex
;
40 assert (sizeof (mutex
->__size
) >= sizeof (mutex
->__data
));
43 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
46 switch (__builtin_expect (mutex
->__data
.__kind
, PTHREAD_MUTEX_TIMED_NP
))
48 /* Recursive mutex. */
49 case PTHREAD_MUTEX_RECURSIVE_NP
:
50 /* Check whether we already hold the mutex. */
51 if (mutex
->__data
.__owner
== id
)
53 /* Just bump the counter. */
54 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
55 /* Overflow of the counter. */
58 ++mutex
->__data
.__count
;
63 /* We have to get the mutex. */
64 LLL_MUTEX_LOCK (mutex
->__data
.__lock
);
66 assert (mutex
->__data
.__owner
== 0);
67 mutex
->__data
.__count
= 1;
70 /* Error checking mutex. */
71 case PTHREAD_MUTEX_ERRORCHECK_NP
:
72 /* Check whether we already hold the mutex. */
73 if (__builtin_expect (mutex
->__data
.__owner
== id
, 0))
78 case PTHREAD_MUTEX_TIMED_NP
:
81 LLL_MUTEX_LOCK (mutex
->__data
.__lock
);
82 assert (mutex
->__data
.__owner
== 0);
85 case PTHREAD_MUTEX_ADAPTIVE_NP
:
89 if (LLL_MUTEX_TRYLOCK (mutex
->__data
.__lock
) != 0)
92 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
93 mutex
->__data
.__spins
* 2 + 10);
98 LLL_MUTEX_LOCK (mutex
->__data
.__lock
);
106 while (LLL_MUTEX_TRYLOCK (mutex
->__data
.__lock
) != 0);
108 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
110 assert (mutex
->__data
.__owner
== 0);
113 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
114 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
115 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
116 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
117 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
118 &mutex
->__data
.__list
.__next
);
120 oldval
= mutex
->__data
.__lock
;
124 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
126 /* The previous owner died. Try locking the mutex. */
129 newval
|= FUTEX_WAITERS
;
133 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
136 if (newval
!= oldval
)
142 /* We got the mutex. */
143 mutex
->__data
.__count
= 1;
144 /* But it is inconsistent unless marked otherwise. */
145 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
147 ENQUEUE_MUTEX (mutex
);
148 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
150 /* Note that we deliberately exit here. If we fall
151 through to the end of the function __nusers would be
152 incremented which is not correct because the old
153 owner has to be discounted. If we are not supposed
154 to increment __nusers we actually have to decrement
157 --mutex
->__data
.__nusers
;
163 /* Check whether we already hold the mutex. */
164 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
166 if (mutex
->__data
.__kind
167 == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
169 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
174 if (mutex
->__data
.__kind
175 == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
177 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
180 /* Just bump the counter. */
181 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
182 /* Overflow of the counter. */
185 ++mutex
->__data
.__count
;
191 oldval
= LLL_ROBUST_MUTEX_LOCK (mutex
->__data
.__lock
, id
);
193 if (__builtin_expect (mutex
->__data
.__owner
194 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
196 /* This mutex is now not recoverable. */
197 mutex
->__data
.__count
= 0;
198 lll_mutex_unlock (mutex
->__data
.__lock
);
199 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
200 return ENOTRECOVERABLE
;
203 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
205 mutex
->__data
.__count
= 1;
206 ENQUEUE_MUTEX (mutex
);
207 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
210 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
211 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
212 case PTHREAD_MUTEX_PI_NORMAL_NP
:
213 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
214 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
215 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
216 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
217 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
219 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
220 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
223 /* Note: robust PI futexes are signaled by setting bit 0. */
224 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
225 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
228 oldval
= mutex
->__data
.__lock
;
230 /* Check whether we already hold the mutex. */
231 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
233 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
235 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
239 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
241 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
243 /* Just bump the counter. */
244 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
245 /* Overflow of the counter. */
248 ++mutex
->__data
.__count
;
256 newval
|= FUTEX_WAITERS
;
258 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
263 /* The mutex is locked. The kernel will now take care of
265 INTERNAL_SYSCALL_DECL (__err
);
266 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
267 FUTEX_LOCK_PI
, 1, 0);
269 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
270 && (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
271 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
))
273 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
274 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
275 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
276 /* ESRCH can happen only for non-robust PI mutexes where
277 the owner of the lock died. */
278 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
|| !robust
);
280 /* Delay the thread indefinitely. */
285 oldval
= mutex
->__data
.__lock
;
287 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
290 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
292 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
294 /* We got the mutex. */
295 mutex
->__data
.__count
= 1;
296 /* But it is inconsistent unless marked otherwise. */
297 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
299 ENQUEUE_MUTEX_PI (mutex
);
300 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
302 /* Note that we deliberately exit here. If we fall
303 through to the end of the function __nusers would be
304 incremented which is not correct because the old owner
305 has to be discounted. If we are not supposed to
306 increment __nusers we actually have to decrement it here. */
308 --mutex
->__data
.__nusers
;
315 && __builtin_expect (mutex
->__data
.__owner
316 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
318 /* This mutex is now not recoverable. */
319 mutex
->__data
.__count
= 0;
321 INTERNAL_SYSCALL_DECL (__err
);
322 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
323 FUTEX_UNLOCK_PI
, 0, 0);
325 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
326 return ENOTRECOVERABLE
;
329 mutex
->__data
.__count
= 1;
332 ENQUEUE_MUTEX_PI (mutex
);
333 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
338 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
339 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
340 case PTHREAD_MUTEX_PP_NORMAL_NP
:
341 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
343 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
345 oldval
= mutex
->__data
.__lock
;
347 /* Check whether we already hold the mutex. */
348 if (mutex
->__data
.__owner
== id
)
350 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
353 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
355 /* Just bump the counter. */
356 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
357 /* Overflow of the counter. */
360 ++mutex
->__data
.__count
;
366 int oldprio
= -1, ceilval
;
369 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
370 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
372 if (__pthread_current_priority () > ceiling
)
375 __pthread_tpp_change_priority (oldprio
, -1);
379 retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
383 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
387 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
395 if (oldval
== ceilval
)
401 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
405 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
408 if (oldval
!= ceilval
)
409 lll_futex_wait (&mutex
->__data
.__lock
, ceilval
| 2);
411 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
412 ceilval
| 2, ceilval
)
415 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
417 assert (mutex
->__data
.__owner
== 0);
418 mutex
->__data
.__count
= 1;
423 /* Correct code cannot set any other type. */
427 /* Record the ownership. */
428 mutex
->__data
.__owner
= id
;
430 ++mutex
->__data
.__nusers
;
435 #ifndef __pthread_mutex_lock
436 strong_alias (__pthread_mutex_lock
, pthread_mutex_lock
)
437 strong_alias (__pthread_mutex_lock
, __pthread_mutex_lock_internal
)