NPTL/arc: notify kernel of the TP value
[uClibc.git] / libpthread / nptl / pthread_mutex_lock.c
blob8353e8cd31269f457507025b0697e9f264f00dbb
1 /* Copyright (C) 2002-2007, 2008, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <unistd.h>
23 #include <not-cancel.h>
24 #include "pthreadP.h"
25 #include <lowlevellock.h>
28 #ifndef LLL_MUTEX_LOCK
29 # define LLL_MUTEX_LOCK(mutex) \
30 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
31 # define LLL_MUTEX_TRYLOCK(mutex) \
32 lll_trylock ((mutex)->__data.__lock)
33 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
34 lll_robust_lock ((mutex)->__data.__lock, id, \
35 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
36 #endif
39 static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
40 __attribute_noinline__;
43 int
44 #ifdef NO_INCR
45 attribute_hidden internal_function
46 #else
47 attribute_protected
48 #endif
49 __pthread_mutex_lock (
50 pthread_mutex_t *mutex)
52 assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
54 unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
55 if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
56 return __pthread_mutex_lock_full (mutex);
58 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
60 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
61 == PTHREAD_MUTEX_TIMED_NP)
63 simple:
64 /* Normal mutex. */
65 LLL_MUTEX_LOCK (mutex);
66 assert (mutex->__data.__owner == 0);
68 else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
70 /* Recursive mutex. */
72 /* Check whether we already hold the mutex. */
73 if (mutex->__data.__owner == id)
75 /* Just bump the counter. */
76 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
77 /* Overflow of the counter. */
78 return EAGAIN;
80 ++mutex->__data.__count;
82 return 0;
85 /* We have to get the mutex. */
86 LLL_MUTEX_LOCK (mutex);
88 assert (mutex->__data.__owner == 0);
89 mutex->__data.__count = 1;
91 else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
93 if (! __is_smp)
94 goto simple;
96 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
98 int cnt = 0;
99 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
100 mutex->__data.__spins * 2 + 10);
103 if (cnt++ >= max_cnt)
105 LLL_MUTEX_LOCK (mutex);
106 break;
109 #ifdef BUSY_WAIT_NOP
110 BUSY_WAIT_NOP;
111 #endif
113 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
115 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
117 assert (mutex->__data.__owner == 0);
119 else
121 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
122 /* Check whether we already hold the mutex. */
123 if (__builtin_expect (mutex->__data.__owner == id, 0))
124 return EDEADLK;
125 goto simple;
128 /* Record the ownership. */
129 mutex->__data.__owner = id;
130 #ifndef NO_INCR
131 ++mutex->__data.__nusers;
132 #endif
134 return 0;
137 static int
138 __pthread_mutex_lock_full (pthread_mutex_t *mutex)
140 int oldval;
141 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
143 switch (PTHREAD_MUTEX_TYPE (mutex))
145 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
146 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
147 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
148 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
149 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
150 &mutex->__data.__list.__next);
152 oldval = mutex->__data.__lock;
155 again:
156 if ((oldval & FUTEX_OWNER_DIED) != 0)
158 /* The previous owner died. Try locking the mutex. */
159 int newval = id;
160 #ifdef NO_INCR
161 newval |= FUTEX_WAITERS;
162 #else
163 newval |= (oldval & FUTEX_WAITERS);
164 #endif
166 newval
167 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
168 newval, oldval);
170 if (newval != oldval)
172 oldval = newval;
173 goto again;
176 /* We got the mutex. */
177 mutex->__data.__count = 1;
178 /* But it is inconsistent unless marked otherwise. */
179 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
181 ENQUEUE_MUTEX (mutex);
182 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
184 /* Note that we deliberately exit here. If we fall
185 through to the end of the function __nusers would be
186 incremented which is not correct because the old
187 owner has to be discounted. If we are not supposed
188 to increment __nusers we actually have to decrement
189 it here. */
190 #ifdef NO_INCR
191 --mutex->__data.__nusers;
192 #endif
194 return EOWNERDEAD;
197 /* Check whether we already hold the mutex. */
198 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
200 int kind = PTHREAD_MUTEX_TYPE (mutex);
201 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
203 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
204 NULL);
205 return EDEADLK;
208 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
210 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
211 NULL);
213 /* Just bump the counter. */
214 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
215 /* Overflow of the counter. */
216 return EAGAIN;
218 ++mutex->__data.__count;
220 return 0;
224 oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
226 if (__builtin_expect (mutex->__data.__owner
227 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
229 /* This mutex is now not recoverable. */
230 mutex->__data.__count = 0;
231 lll_unlock (mutex->__data.__lock,
232 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
233 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
234 return ENOTRECOVERABLE;
237 while ((oldval & FUTEX_OWNER_DIED) != 0);
239 mutex->__data.__count = 1;
240 ENQUEUE_MUTEX (mutex);
241 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
242 break;
244 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
245 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
246 case PTHREAD_MUTEX_PI_NORMAL_NP:
247 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
248 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
249 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
250 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
251 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
253 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
254 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
256 if (robust)
257 /* Note: robust PI futexes are signaled by setting bit 0. */
258 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
259 (void *) (((uintptr_t) &mutex->__data.__list.__next)
260 | 1));
262 oldval = mutex->__data.__lock;
264 /* Check whether we already hold the mutex. */
265 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
267 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
269 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
270 return EDEADLK;
273 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
275 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
277 /* Just bump the counter. */
278 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
279 /* Overflow of the counter. */
280 return EAGAIN;
282 ++mutex->__data.__count;
284 return 0;
288 int newval = id;
289 #ifdef NO_INCR
290 newval |= FUTEX_WAITERS;
291 #endif
292 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
293 newval, 0);
295 if (oldval != 0)
297 /* The mutex is locked. The kernel will now take care of
298 everything. */
299 int private = (robust
300 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
301 : PTHREAD_MUTEX_PSHARED (mutex));
302 INTERNAL_SYSCALL_DECL (__err);
303 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
304 __lll_private_flag (FUTEX_LOCK_PI,
305 private), 1, 0);
307 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
308 && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
309 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
311 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
312 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
313 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
314 /* ESRCH can happen only for non-robust PI mutexes where
315 the owner of the lock died. */
316 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
318 /* Delay the thread indefinitely. */
319 while (1)
320 pause_not_cancel ();
323 oldval = mutex->__data.__lock;
325 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
328 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
330 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
332 /* We got the mutex. */
333 mutex->__data.__count = 1;
334 /* But it is inconsistent unless marked otherwise. */
335 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
337 ENQUEUE_MUTEX_PI (mutex);
338 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
340 /* Note that we deliberately exit here. If we fall
341 through to the end of the function __nusers would be
342 incremented which is not correct because the old owner
343 has to be discounted. If we are not supposed to
344 increment __nusers we actually have to decrement it here. */
345 #ifdef NO_INCR
346 --mutex->__data.__nusers;
347 #endif
349 return EOWNERDEAD;
352 if (robust
353 && __builtin_expect (mutex->__data.__owner
354 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
356 /* This mutex is now not recoverable. */
357 mutex->__data.__count = 0;
359 INTERNAL_SYSCALL_DECL (__err);
360 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
361 __lll_private_flag (FUTEX_UNLOCK_PI,
362 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
363 0, 0);
365 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
366 return ENOTRECOVERABLE;
369 mutex->__data.__count = 1;
370 if (robust)
372 ENQUEUE_MUTEX_PI (mutex);
373 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
376 break;
378 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
379 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
380 case PTHREAD_MUTEX_PP_NORMAL_NP:
381 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
383 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
385 oldval = mutex->__data.__lock;
387 /* Check whether we already hold the mutex. */
388 if (mutex->__data.__owner == id)
390 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
391 return EDEADLK;
393 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
395 /* Just bump the counter. */
396 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
397 /* Overflow of the counter. */
398 return EAGAIN;
400 ++mutex->__data.__count;
402 return 0;
406 int oldprio = -1, ceilval;
409 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
410 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
412 if (__pthread_current_priority () > ceiling)
414 if (oldprio != -1)
415 __pthread_tpp_change_priority (oldprio, -1);
416 return EINVAL;
419 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
420 if (retval)
421 return retval;
423 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
424 oldprio = ceiling;
426 oldval
427 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
428 #ifdef NO_INCR
429 ceilval | 2,
430 #else
431 ceilval | 1,
432 #endif
433 ceilval);
435 if (oldval == ceilval)
436 break;
440 oldval
441 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
442 ceilval | 2,
443 ceilval | 1);
445 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
446 break;
448 if (oldval != ceilval)
449 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
450 PTHREAD_MUTEX_PSHARED (mutex));
452 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
453 ceilval | 2, ceilval)
454 != ceilval);
456 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
458 assert (mutex->__data.__owner == 0);
459 mutex->__data.__count = 1;
461 break;
463 default:
464 /* Correct code cannot set any other type. */
465 return EINVAL;
468 /* Record the ownership. */
469 mutex->__data.__owner = id;
470 #ifndef NO_INCR
471 ++mutex->__data.__nusers;
472 #endif
474 return 0;
476 #ifndef __pthread_mutex_lock
477 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
478 strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
479 #endif
482 #ifdef NO_INCR
483 void
484 attribute_hidden internal_function
485 __pthread_mutex_cond_lock_adjust (
486 pthread_mutex_t *mutex)
488 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
489 assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
490 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
492 /* Record the ownership. */
493 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
494 mutex->__data.__owner = id;
496 if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
497 ++mutex->__data.__count;
499 #endif