1 /* Copyright (C) 2002-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <sys/param.h>
24 #include <not-cancel.h>
27 #include <lowlevellock.h>
28 #include <stap-probe.h>
30 #ifndef lll_lock_elision
31 #define lll_lock_elision(lock, try_lock, private) ({ \
32 lll_lock (lock, private); 0; })
35 #ifndef lll_trylock_elision
36 #define lll_trylock_elision(a,t) lll_trylock(a)
39 /* Some of the following definitions differ when pthread_mutex_cond_lock.c
40 includes this file. */
41 #ifndef LLL_MUTEX_LOCK
42 # define LLL_MUTEX_LOCK(mutex) \
43 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
44 # define LLL_MUTEX_TRYLOCK(mutex) \
45 lll_trylock ((mutex)->__data.__lock)
46 # define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
47 # define LLL_MUTEX_LOCK_ELISION(mutex) \
48 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
49 PTHREAD_MUTEX_PSHARED (mutex))
50 # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
51 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
52 PTHREAD_MUTEX_PSHARED (mutex))
56 #define FORCE_ELISION(m, s)
59 static int __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
60 __attribute_noinline__
;
63 __pthread_mutex_lock (pthread_mutex_t
*mutex
)
65 assert (sizeof (mutex
->__size
) >= sizeof (mutex
->__data
));
67 unsigned int type
= PTHREAD_MUTEX_TYPE_ELISION (mutex
);
69 LIBC_PROBE (mutex_entry
, 1, mutex
);
71 if (__builtin_expect (type
& ~(PTHREAD_MUTEX_KIND_MASK_NP
72 | PTHREAD_MUTEX_ELISION_FLAGS_NP
), 0))
73 return __pthread_mutex_lock_full (mutex
);
75 if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_NP
))
77 FORCE_ELISION (mutex
, goto elision
);
80 LLL_MUTEX_LOCK (mutex
);
81 assert (mutex
->__data
.__owner
== 0);
84 else if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_ELISION_NP
))
86 elision
: __attribute__((unused
))
87 /* This case can never happen on a system without elision,
88 as the mutex type initialization functions will not
89 allow to set the elision flags. */
90 /* Don't record owner or users for elision case. This is a
92 return LLL_MUTEX_LOCK_ELISION (mutex
);
95 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
96 == PTHREAD_MUTEX_RECURSIVE_NP
, 1))
98 /* Recursive mutex. */
99 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
101 /* Check whether we already hold the mutex. */
102 if (mutex
->__data
.__owner
== id
)
104 /* Just bump the counter. */
105 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
106 /* Overflow of the counter. */
109 ++mutex
->__data
.__count
;
114 /* We have to get the mutex. */
115 LLL_MUTEX_LOCK (mutex
);
117 assert (mutex
->__data
.__owner
== 0);
118 mutex
->__data
.__count
= 1;
120 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
121 == PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
126 if (LLL_MUTEX_TRYLOCK (mutex
) != 0)
129 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
130 mutex
->__data
.__spins
* 2 + 10);
133 if (cnt
++ >= max_cnt
)
135 LLL_MUTEX_LOCK (mutex
);
140 while (LLL_MUTEX_TRYLOCK (mutex
) != 0);
142 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
144 assert (mutex
->__data
.__owner
== 0);
148 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
149 assert (PTHREAD_MUTEX_TYPE (mutex
) == PTHREAD_MUTEX_ERRORCHECK_NP
);
150 /* Check whether we already hold the mutex. */
151 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
156 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
158 /* Record the ownership. */
159 mutex
->__data
.__owner
= id
;
161 ++mutex
->__data
.__nusers
;
164 LIBC_PROBE (mutex_acquired
, 1, mutex
);
170 __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
173 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
175 switch (PTHREAD_MUTEX_TYPE (mutex
))
177 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
178 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
179 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
180 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
181 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
182 &mutex
->__data
.__list
.__next
);
183 /* We need to set op_pending before starting the operation. Also
184 see comments at ENQUEUE_MUTEX. */
185 __asm ("" ::: "memory");
187 oldval
= mutex
->__data
.__lock
;
188 /* This is set to FUTEX_WAITERS iff we might have shared the
189 FUTEX_WAITERS flag with other threads, and therefore need to keep it
190 set to avoid lost wake-ups. We have the same requirement in the
191 simple mutex algorithm.
192 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
193 are building the special case mutexes for use from within condition
195 unsigned int assume_other_futex_waiters
= LLL_ROBUST_MUTEX_LOCK_MODIFIER
;
198 /* Try to acquire the lock through a CAS from 0 (not acquired) to
199 our TID | assume_other_futex_waiters. */
200 if (__glibc_likely ((oldval
== 0)
201 && (atomic_compare_and_exchange_bool_acq
202 (&mutex
->__data
.__lock
,
203 id
| assume_other_futex_waiters
, 0) == 0)))
206 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
208 /* The previous owner died. Try locking the mutex. */
211 /* We are not taking assume_other_futex_waiters into accoount
212 here simply because we'll set FUTEX_WAITERS anyway. */
213 newval
|= FUTEX_WAITERS
;
215 newval
|= (oldval
& FUTEX_WAITERS
) | assume_other_futex_waiters
;
219 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
222 if (newval
!= oldval
)
228 /* We got the mutex. */
229 mutex
->__data
.__count
= 1;
230 /* But it is inconsistent unless marked otherwise. */
231 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
233 /* We must not enqueue the mutex before we have acquired it.
234 Also see comments at ENQUEUE_MUTEX. */
235 __asm ("" ::: "memory");
236 ENQUEUE_MUTEX (mutex
);
237 /* We need to clear op_pending after we enqueue the mutex. */
238 __asm ("" ::: "memory");
239 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
241 /* Note that we deliberately exit here. If we fall
242 through to the end of the function __nusers would be
243 incremented which is not correct because the old
244 owner has to be discounted. If we are not supposed
245 to increment __nusers we actually have to decrement
248 --mutex
->__data
.__nusers
;
254 /* Check whether we already hold the mutex. */
255 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
257 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
258 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
260 /* We do not need to ensure ordering wrt another memory
261 access. Also see comments at ENQUEUE_MUTEX. */
262 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
267 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
269 /* We do not need to ensure ordering wrt another memory
271 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
274 /* Just bump the counter. */
275 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
276 /* Overflow of the counter. */
279 ++mutex
->__data
.__count
;
285 /* We cannot acquire the mutex nor has its owner died. Thus, try
286 to block using futexes. Set FUTEX_WAITERS if necessary so that
287 other threads are aware that there are potentially threads
288 blocked on the futex. Restart if oldval changed in the
290 if ((oldval
& FUTEX_WAITERS
) == 0)
292 if (atomic_compare_and_exchange_bool_acq (&mutex
->__data
.__lock
,
293 oldval
| FUTEX_WAITERS
,
297 oldval
= mutex
->__data
.__lock
;
300 oldval
|= FUTEX_WAITERS
;
303 /* It is now possible that we share the FUTEX_WAITERS flag with
304 another thread; therefore, update assume_other_futex_waiters so
305 that we do not forget about this when handling other cases
306 above and thus do not cause lost wake-ups. */
307 assume_other_futex_waiters
|= FUTEX_WAITERS
;
309 /* Block using the futex and reload current lock value. */
310 lll_futex_wait (&mutex
->__data
.__lock
, oldval
,
311 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
312 oldval
= mutex
->__data
.__lock
;
315 /* We have acquired the mutex; check if it is still consistent. */
316 if (__builtin_expect (mutex
->__data
.__owner
317 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
319 /* This mutex is now not recoverable. */
320 mutex
->__data
.__count
= 0;
321 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
322 lll_unlock (mutex
->__data
.__lock
, private);
323 /* FIXME This violates the mutex destruction requirements. See
324 __pthread_mutex_unlock_full. */
325 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
326 return ENOTRECOVERABLE
;
329 mutex
->__data
.__count
= 1;
330 /* We must not enqueue the mutex before we have acquired it.
331 Also see comments at ENQUEUE_MUTEX. */
332 __asm ("" ::: "memory");
333 ENQUEUE_MUTEX (mutex
);
334 /* We need to clear op_pending after we enqueue the mutex. */
335 __asm ("" ::: "memory");
336 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
339 /* The PI support requires the Linux futex system call. If that's not
340 available, pthread_mutex_init should never have allowed the type to
341 be set. So it will get the default case for an invalid type. */
343 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
344 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
345 case PTHREAD_MUTEX_PI_NORMAL_NP
:
346 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
347 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
348 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
349 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
350 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
352 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
353 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
357 /* Note: robust PI futexes are signaled by setting bit 0. */
358 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
359 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
361 /* We need to set op_pending before starting the operation. Also
362 see comments at ENQUEUE_MUTEX. */
363 __asm ("" ::: "memory");
366 oldval
= mutex
->__data
.__lock
;
368 /* Check whether we already hold the mutex. */
369 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
371 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
373 /* We do not need to ensure ordering wrt another memory
375 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
379 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
381 /* We do not need to ensure ordering wrt another memory
383 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
385 /* Just bump the counter. */
386 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
387 /* Overflow of the counter. */
390 ++mutex
->__data
.__count
;
398 newval
|= FUTEX_WAITERS
;
400 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
405 /* The mutex is locked. The kernel will now take care of
407 int private = (robust
408 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
409 : PTHREAD_MUTEX_PSHARED (mutex
));
410 INTERNAL_SYSCALL_DECL (__err
);
411 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
412 __lll_private_flag (FUTEX_LOCK_PI
,
415 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
416 && (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
417 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
))
419 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
420 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
421 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
422 /* ESRCH can happen only for non-robust PI mutexes where
423 the owner of the lock died. */
424 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
|| !robust
);
426 /* Delay the thread indefinitely. */
431 oldval
= mutex
->__data
.__lock
;
433 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
436 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
438 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
440 /* We got the mutex. */
441 mutex
->__data
.__count
= 1;
442 /* But it is inconsistent unless marked otherwise. */
443 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
445 /* We must not enqueue the mutex before we have acquired it.
446 Also see comments at ENQUEUE_MUTEX. */
447 __asm ("" ::: "memory");
448 ENQUEUE_MUTEX_PI (mutex
);
449 /* We need to clear op_pending after we enqueue the mutex. */
450 __asm ("" ::: "memory");
451 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
453 /* Note that we deliberately exit here. If we fall
454 through to the end of the function __nusers would be
455 incremented which is not correct because the old owner
456 has to be discounted. If we are not supposed to
457 increment __nusers we actually have to decrement it here. */
459 --mutex
->__data
.__nusers
;
466 && __builtin_expect (mutex
->__data
.__owner
467 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
469 /* This mutex is now not recoverable. */
470 mutex
->__data
.__count
= 0;
472 INTERNAL_SYSCALL_DECL (__err
);
473 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
474 __lll_private_flag (FUTEX_UNLOCK_PI
,
475 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
478 /* To the kernel, this will be visible after the kernel has
479 acquired the mutex in the syscall. */
480 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
481 return ENOTRECOVERABLE
;
484 mutex
->__data
.__count
= 1;
487 /* We must not enqueue the mutex before we have acquired it.
488 Also see comments at ENQUEUE_MUTEX. */
489 __asm ("" ::: "memory");
490 ENQUEUE_MUTEX_PI (mutex
);
491 /* We need to clear op_pending after we enqueue the mutex. */
492 __asm ("" ::: "memory");
493 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
497 #endif /* __NR_futex. */
499 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
500 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
501 case PTHREAD_MUTEX_PP_NORMAL_NP
:
502 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
504 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
506 oldval
= mutex
->__data
.__lock
;
508 /* Check whether we already hold the mutex. */
509 if (mutex
->__data
.__owner
== id
)
511 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
514 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
516 /* Just bump the counter. */
517 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
518 /* Overflow of the counter. */
521 ++mutex
->__data
.__count
;
527 int oldprio
= -1, ceilval
;
530 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
531 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
533 if (__pthread_current_priority () > ceiling
)
536 __pthread_tpp_change_priority (oldprio
, -1);
540 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
544 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
548 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
556 if (oldval
== ceilval
)
562 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
566 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
569 if (oldval
!= ceilval
)
570 lll_futex_wait (&mutex
->__data
.__lock
, ceilval
| 2,
571 PTHREAD_MUTEX_PSHARED (mutex
));
573 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
574 ceilval
| 2, ceilval
)
577 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
579 assert (mutex
->__data
.__owner
== 0);
580 mutex
->__data
.__count
= 1;
585 /* Correct code cannot set any other type. */
589 /* Record the ownership. */
590 mutex
->__data
.__owner
= id
;
592 ++mutex
->__data
.__nusers
;
595 LIBC_PROBE (mutex_acquired
, 1, mutex
);
599 #ifndef __pthread_mutex_lock
600 strong_alias (__pthread_mutex_lock
, pthread_mutex_lock
)
601 hidden_def (__pthread_mutex_lock
)
608 __pthread_mutex_cond_lock_adjust (pthread_mutex_t
*mutex
)
610 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PRIO_INHERIT_NP
) != 0);
611 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) == 0);
612 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PSHARED_BIT
) == 0);
614 /* Record the ownership. */
615 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
616 mutex
->__data
.__owner
= id
;
618 if (mutex
->__data
.__kind
== PTHREAD_MUTEX_PI_RECURSIVE_NP
)
619 ++mutex
->__data
.__count
;