Fix sysdeps/unix/sysv/linux/arm/libc-do-syscall.S warning.
[glibc.git] / nptl / pthread_mutex_lock.c
blob6d4acd40c6bbf1a927dbdc18b4847e65f26028bd
1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include <unistd.h>
23 #include <sys/param.h>
24 #include <not-cancel.h>
25 #include "pthreadP.h"
26 #include <lowlevellock.h>
27 #include <stap-probe.h>
29 #ifndef lll_lock_elision
30 #define lll_lock_elision(lock, try_lock, private) ({ \
31 lll_lock (lock, private); 0; })
32 #endif
34 #ifndef lll_trylock_elision
35 #define lll_trylock_elision(a,t) lll_trylock(a)
36 #endif
38 #ifndef LLL_MUTEX_LOCK
39 # define LLL_MUTEX_LOCK(mutex) \
40 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
41 # define LLL_MUTEX_TRYLOCK(mutex) \
42 lll_trylock ((mutex)->__data.__lock)
43 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
44 lll_robust_lock ((mutex)->__data.__lock, id, \
45 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
46 # define LLL_MUTEX_LOCK_ELISION(mutex) \
47 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
48 PTHREAD_MUTEX_PSHARED (mutex))
49 # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
50 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
51 PTHREAD_MUTEX_PSHARED (mutex))
52 #endif
54 #ifndef FORCE_ELISION
55 #define FORCE_ELISION(m, s)
56 #endif
58 static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
59 __attribute_noinline__;
61 int
62 __pthread_mutex_lock (mutex)
63 pthread_mutex_t *mutex;
65 assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
67 unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
69 LIBC_PROBE (mutex_entry, 1, mutex);
71 if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
72 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
73 return __pthread_mutex_lock_full (mutex);
75 if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
77 FORCE_ELISION (mutex, goto elision);
78 simple:
79 /* Normal mutex. */
80 LLL_MUTEX_LOCK (mutex);
81 assert (mutex->__data.__owner == 0);
83 #ifdef HAVE_ELISION
84 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
86 elision: __attribute__((unused))
87 /* This case can never happen on a system without elision,
88 as the mutex type initialization functions will not
89 allow to set the elision flags. */
90 /* Don't record owner or users for elision case. This is a
91 tail call. */
92 return LLL_MUTEX_LOCK_ELISION (mutex);
94 #endif
95 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
96 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
98 /* Recursive mutex. */
99 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
101 /* Check whether we already hold the mutex. */
102 if (mutex->__data.__owner == id)
104 /* Just bump the counter. */
105 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
106 /* Overflow of the counter. */
107 return EAGAIN;
109 ++mutex->__data.__count;
111 return 0;
114 /* We have to get the mutex. */
115 LLL_MUTEX_LOCK (mutex);
117 assert (mutex->__data.__owner == 0);
118 mutex->__data.__count = 1;
120 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
121 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
123 if (! __is_smp)
124 goto simple;
126 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
128 int cnt = 0;
129 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
130 mutex->__data.__spins * 2 + 10);
133 if (cnt++ >= max_cnt)
135 LLL_MUTEX_LOCK (mutex);
136 break;
139 #ifdef BUSY_WAIT_NOP
140 BUSY_WAIT_NOP;
141 #endif
143 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
145 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
147 assert (mutex->__data.__owner == 0);
149 else
151 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
152 assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP);
153 /* Check whether we already hold the mutex. */
154 if (__glibc_unlikely (mutex->__data.__owner == id))
155 return EDEADLK;
156 goto simple;
159 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
161 /* Record the ownership. */
162 mutex->__data.__owner = id;
163 #ifndef NO_INCR
164 ++mutex->__data.__nusers;
165 #endif
167 LIBC_PROBE (mutex_acquired, 1, mutex);
169 return 0;
172 static int
173 __pthread_mutex_lock_full (pthread_mutex_t *mutex)
175 int oldval;
176 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
178 switch (PTHREAD_MUTEX_TYPE (mutex))
180 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
181 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
182 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
183 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
184 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
185 &mutex->__data.__list.__next);
187 oldval = mutex->__data.__lock;
190 again:
191 if ((oldval & FUTEX_OWNER_DIED) != 0)
193 /* The previous owner died. Try locking the mutex. */
194 int newval = id;
195 #ifdef NO_INCR
196 newval |= FUTEX_WAITERS;
197 #else
198 newval |= (oldval & FUTEX_WAITERS);
199 #endif
201 newval
202 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
203 newval, oldval);
205 if (newval != oldval)
207 oldval = newval;
208 goto again;
211 /* We got the mutex. */
212 mutex->__data.__count = 1;
213 /* But it is inconsistent unless marked otherwise. */
214 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
216 ENQUEUE_MUTEX (mutex);
217 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
219 /* Note that we deliberately exit here. If we fall
220 through to the end of the function __nusers would be
221 incremented which is not correct because the old
222 owner has to be discounted. If we are not supposed
223 to increment __nusers we actually have to decrement
224 it here. */
225 #ifdef NO_INCR
226 --mutex->__data.__nusers;
227 #endif
229 return EOWNERDEAD;
232 /* Check whether we already hold the mutex. */
233 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
235 int kind = PTHREAD_MUTEX_TYPE (mutex);
236 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
238 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
239 NULL);
240 return EDEADLK;
243 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
245 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
246 NULL);
248 /* Just bump the counter. */
249 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
250 /* Overflow of the counter. */
251 return EAGAIN;
253 ++mutex->__data.__count;
255 return 0;
259 oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
261 if (__builtin_expect (mutex->__data.__owner
262 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
264 /* This mutex is now not recoverable. */
265 mutex->__data.__count = 0;
266 lll_unlock (mutex->__data.__lock,
267 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
268 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
269 return ENOTRECOVERABLE;
272 while ((oldval & FUTEX_OWNER_DIED) != 0);
274 mutex->__data.__count = 1;
275 ENQUEUE_MUTEX (mutex);
276 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
277 break;
279 /* The PI support requires the Linux futex system call. If that's not
280 available, pthread_mutex_init should never have allowed the type to
281 be set. So it will get the default case for an invalid type. */
282 #ifdef __NR_futex
283 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
284 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
285 case PTHREAD_MUTEX_PI_NORMAL_NP:
286 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
287 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
288 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
289 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
290 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
292 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
293 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
295 if (robust)
296 /* Note: robust PI futexes are signaled by setting bit 0. */
297 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
298 (void *) (((uintptr_t) &mutex->__data.__list.__next)
299 | 1));
301 oldval = mutex->__data.__lock;
303 /* Check whether we already hold the mutex. */
304 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
306 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
308 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
309 return EDEADLK;
312 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
314 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
316 /* Just bump the counter. */
317 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
318 /* Overflow of the counter. */
319 return EAGAIN;
321 ++mutex->__data.__count;
323 return 0;
327 int newval = id;
328 # ifdef NO_INCR
329 newval |= FUTEX_WAITERS;
330 # endif
331 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
332 newval, 0);
334 if (oldval != 0)
336 /* The mutex is locked. The kernel will now take care of
337 everything. */
338 int private = (robust
339 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
340 : PTHREAD_MUTEX_PSHARED (mutex));
341 INTERNAL_SYSCALL_DECL (__err);
342 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
343 __lll_private_flag (FUTEX_LOCK_PI,
344 private), 1, 0);
346 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
347 && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
348 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
350 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
351 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
352 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
353 /* ESRCH can happen only for non-robust PI mutexes where
354 the owner of the lock died. */
355 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
357 /* Delay the thread indefinitely. */
358 while (1)
359 pause_not_cancel ();
362 oldval = mutex->__data.__lock;
364 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
367 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
369 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
371 /* We got the mutex. */
372 mutex->__data.__count = 1;
373 /* But it is inconsistent unless marked otherwise. */
374 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
376 ENQUEUE_MUTEX_PI (mutex);
377 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
379 /* Note that we deliberately exit here. If we fall
380 through to the end of the function __nusers would be
381 incremented which is not correct because the old owner
382 has to be discounted. If we are not supposed to
383 increment __nusers we actually have to decrement it here. */
384 # ifdef NO_INCR
385 --mutex->__data.__nusers;
386 # endif
388 return EOWNERDEAD;
391 if (robust
392 && __builtin_expect (mutex->__data.__owner
393 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
395 /* This mutex is now not recoverable. */
396 mutex->__data.__count = 0;
398 INTERNAL_SYSCALL_DECL (__err);
399 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
400 __lll_private_flag (FUTEX_UNLOCK_PI,
401 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
402 0, 0);
404 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
405 return ENOTRECOVERABLE;
408 mutex->__data.__count = 1;
409 if (robust)
411 ENQUEUE_MUTEX_PI (mutex);
412 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
415 break;
416 #endif /* __NR_futex. */
418 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
419 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
420 case PTHREAD_MUTEX_PP_NORMAL_NP:
421 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
423 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
425 oldval = mutex->__data.__lock;
427 /* Check whether we already hold the mutex. */
428 if (mutex->__data.__owner == id)
430 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
431 return EDEADLK;
433 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
435 /* Just bump the counter. */
436 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
437 /* Overflow of the counter. */
438 return EAGAIN;
440 ++mutex->__data.__count;
442 return 0;
446 int oldprio = -1, ceilval;
449 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
450 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
452 if (__pthread_current_priority () > ceiling)
454 if (oldprio != -1)
455 __pthread_tpp_change_priority (oldprio, -1);
456 return EINVAL;
459 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
460 if (retval)
461 return retval;
463 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
464 oldprio = ceiling;
466 oldval
467 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
468 #ifdef NO_INCR
469 ceilval | 2,
470 #else
471 ceilval | 1,
472 #endif
473 ceilval);
475 if (oldval == ceilval)
476 break;
480 oldval
481 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
482 ceilval | 2,
483 ceilval | 1);
485 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
486 break;
488 if (oldval != ceilval)
489 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
490 PTHREAD_MUTEX_PSHARED (mutex));
492 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
493 ceilval | 2, ceilval)
494 != ceilval);
496 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
498 assert (mutex->__data.__owner == 0);
499 mutex->__data.__count = 1;
501 break;
503 default:
504 /* Correct code cannot set any other type. */
505 return EINVAL;
508 /* Record the ownership. */
509 mutex->__data.__owner = id;
510 #ifndef NO_INCR
511 ++mutex->__data.__nusers;
512 #endif
514 LIBC_PROBE (mutex_acquired, 1, mutex);
516 return 0;
518 #ifndef __pthread_mutex_lock
519 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
520 hidden_def (__pthread_mutex_lock)
521 #endif
524 #ifdef NO_INCR
525 void
526 __pthread_mutex_cond_lock_adjust (mutex)
527 pthread_mutex_t *mutex;
529 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0);
530 assert ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0);
531 assert ((mutex->__data.__kind & PTHREAD_MUTEX_PSHARED_BIT) == 0);
533 /* Record the ownership. */
534 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
535 mutex->__data.__owner = id;
537 if (mutex->__data.__kind == PTHREAD_MUTEX_PI_RECURSIVE_NP)
538 ++mutex->__data.__count;
540 #endif