1 /* Copyright (C) 2002-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <sys/param.h>
24 #include <not-cancel.h>
27 #include <lowlevellock.h>
28 #include <stap-probe.h>
30 #ifndef lll_lock_elision
31 #define lll_lock_elision(lock, try_lock, private) ({ \
32 lll_lock (lock, private); 0; })
35 #ifndef lll_trylock_elision
36 #define lll_trylock_elision(a,t) lll_trylock(a)
39 /* Some of the following definitions differ when pthread_mutex_cond_lock.c
40 includes this file. */
41 #ifndef LLL_MUTEX_LOCK
42 # define LLL_MUTEX_LOCK(mutex) \
43 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
44 # define LLL_MUTEX_TRYLOCK(mutex) \
45 lll_trylock ((mutex)->__data.__lock)
46 # define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0
47 # define LLL_MUTEX_LOCK_ELISION(mutex) \
48 lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \
49 PTHREAD_MUTEX_PSHARED (mutex))
50 # define LLL_MUTEX_TRYLOCK_ELISION(mutex) \
51 lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \
52 PTHREAD_MUTEX_PSHARED (mutex))
56 #define FORCE_ELISION(m, s)
59 static int __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
60 __attribute_noinline__
;
63 __pthread_mutex_lock (pthread_mutex_t
*mutex
)
65 unsigned int type
= PTHREAD_MUTEX_TYPE_ELISION (mutex
);
67 LIBC_PROBE (mutex_entry
, 1, mutex
);
69 if (__builtin_expect (type
& ~(PTHREAD_MUTEX_KIND_MASK_NP
70 | PTHREAD_MUTEX_ELISION_FLAGS_NP
), 0))
71 return __pthread_mutex_lock_full (mutex
);
73 if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_NP
))
75 FORCE_ELISION (mutex
, goto elision
);
78 LLL_MUTEX_LOCK (mutex
);
79 assert (mutex
->__data
.__owner
== 0);
82 else if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_ELISION_NP
))
84 elision
: __attribute__((unused
))
85 /* This case can never happen on a system without elision,
86 as the mutex type initialization functions will not
87 allow to set the elision flags. */
88 /* Don't record owner or users for elision case. This is a
90 return LLL_MUTEX_LOCK_ELISION (mutex
);
93 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
94 == PTHREAD_MUTEX_RECURSIVE_NP
, 1))
96 /* Recursive mutex. */
97 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
99 /* Check whether we already hold the mutex. */
100 if (mutex
->__data
.__owner
== id
)
102 /* Just bump the counter. */
103 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
104 /* Overflow of the counter. */
107 ++mutex
->__data
.__count
;
112 /* We have to get the mutex. */
113 LLL_MUTEX_LOCK (mutex
);
115 assert (mutex
->__data
.__owner
== 0);
116 mutex
->__data
.__count
= 1;
118 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
119 == PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
124 if (LLL_MUTEX_TRYLOCK (mutex
) != 0)
127 int max_cnt
= MIN (MAX_ADAPTIVE_COUNT
,
128 mutex
->__data
.__spins
* 2 + 10);
131 if (cnt
++ >= max_cnt
)
133 LLL_MUTEX_LOCK (mutex
);
138 while (LLL_MUTEX_TRYLOCK (mutex
) != 0);
140 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
142 assert (mutex
->__data
.__owner
== 0);
146 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
147 assert (PTHREAD_MUTEX_TYPE (mutex
) == PTHREAD_MUTEX_ERRORCHECK_NP
);
148 /* Check whether we already hold the mutex. */
149 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
154 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
156 /* Record the ownership. */
157 mutex
->__data
.__owner
= id
;
159 ++mutex
->__data
.__nusers
;
162 LIBC_PROBE (mutex_acquired
, 1, mutex
);
168 __pthread_mutex_lock_full (pthread_mutex_t
*mutex
)
171 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
173 switch (PTHREAD_MUTEX_TYPE (mutex
))
175 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
176 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
177 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
178 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
179 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
180 &mutex
->__data
.__list
.__next
);
181 /* We need to set op_pending before starting the operation. Also
182 see comments at ENQUEUE_MUTEX. */
183 __asm ("" ::: "memory");
185 oldval
= mutex
->__data
.__lock
;
186 /* This is set to FUTEX_WAITERS iff we might have shared the
187 FUTEX_WAITERS flag with other threads, and therefore need to keep it
188 set to avoid lost wake-ups. We have the same requirement in the
189 simple mutex algorithm.
190 We start with value zero for a normal mutex, and FUTEX_WAITERS if we
191 are building the special case mutexes for use from within condition
193 unsigned int assume_other_futex_waiters
= LLL_ROBUST_MUTEX_LOCK_MODIFIER
;
196 /* Try to acquire the lock through a CAS from 0 (not acquired) to
197 our TID | assume_other_futex_waiters. */
198 if (__glibc_likely (oldval
== 0))
201 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
202 id
| assume_other_futex_waiters
, 0);
203 if (__glibc_likely (oldval
== 0))
207 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
209 /* The previous owner died. Try locking the mutex. */
212 /* We are not taking assume_other_futex_waiters into accoount
213 here simply because we'll set FUTEX_WAITERS anyway. */
214 newval
|= FUTEX_WAITERS
;
216 newval
|= (oldval
& FUTEX_WAITERS
) | assume_other_futex_waiters
;
220 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
223 if (newval
!= oldval
)
229 /* We got the mutex. */
230 mutex
->__data
.__count
= 1;
231 /* But it is inconsistent unless marked otherwise. */
232 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
234 /* We must not enqueue the mutex before we have acquired it.
235 Also see comments at ENQUEUE_MUTEX. */
236 __asm ("" ::: "memory");
237 ENQUEUE_MUTEX (mutex
);
238 /* We need to clear op_pending after we enqueue the mutex. */
239 __asm ("" ::: "memory");
240 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
242 /* Note that we deliberately exit here. If we fall
243 through to the end of the function __nusers would be
244 incremented which is not correct because the old
245 owner has to be discounted. If we are not supposed
246 to increment __nusers we actually have to decrement
249 --mutex
->__data
.__nusers
;
255 /* Check whether we already hold the mutex. */
256 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
258 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
259 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
261 /* We do not need to ensure ordering wrt another memory
262 access. Also see comments at ENQUEUE_MUTEX. */
263 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
268 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
270 /* We do not need to ensure ordering wrt another memory
272 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
275 /* Just bump the counter. */
276 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
277 /* Overflow of the counter. */
280 ++mutex
->__data
.__count
;
286 /* We cannot acquire the mutex nor has its owner died. Thus, try
287 to block using futexes. Set FUTEX_WAITERS if necessary so that
288 other threads are aware that there are potentially threads
289 blocked on the futex. Restart if oldval changed in the
291 if ((oldval
& FUTEX_WAITERS
) == 0)
293 if (atomic_compare_and_exchange_bool_acq (&mutex
->__data
.__lock
,
294 oldval
| FUTEX_WAITERS
,
298 oldval
= mutex
->__data
.__lock
;
301 oldval
|= FUTEX_WAITERS
;
304 /* It is now possible that we share the FUTEX_WAITERS flag with
305 another thread; therefore, update assume_other_futex_waiters so
306 that we do not forget about this when handling other cases
307 above and thus do not cause lost wake-ups. */
308 assume_other_futex_waiters
|= FUTEX_WAITERS
;
310 /* Block using the futex and reload current lock value. */
311 lll_futex_wait (&mutex
->__data
.__lock
, oldval
,
312 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
313 oldval
= mutex
->__data
.__lock
;
316 /* We have acquired the mutex; check if it is still consistent. */
317 if (__builtin_expect (mutex
->__data
.__owner
318 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
320 /* This mutex is now not recoverable. */
321 mutex
->__data
.__count
= 0;
322 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
323 lll_unlock (mutex
->__data
.__lock
, private);
324 /* FIXME This violates the mutex destruction requirements. See
325 __pthread_mutex_unlock_full. */
326 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
327 return ENOTRECOVERABLE
;
330 mutex
->__data
.__count
= 1;
331 /* We must not enqueue the mutex before we have acquired it.
332 Also see comments at ENQUEUE_MUTEX. */
333 __asm ("" ::: "memory");
334 ENQUEUE_MUTEX (mutex
);
335 /* We need to clear op_pending after we enqueue the mutex. */
336 __asm ("" ::: "memory");
337 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
340 /* The PI support requires the Linux futex system call. If that's not
341 available, pthread_mutex_init should never have allowed the type to
342 be set. So it will get the default case for an invalid type. */
344 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
345 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
346 case PTHREAD_MUTEX_PI_NORMAL_NP
:
347 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
348 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
349 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
350 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
351 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
353 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
354 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
358 /* Note: robust PI futexes are signaled by setting bit 0. */
359 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
360 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
362 /* We need to set op_pending before starting the operation. Also
363 see comments at ENQUEUE_MUTEX. */
364 __asm ("" ::: "memory");
367 oldval
= mutex
->__data
.__lock
;
369 /* Check whether we already hold the mutex. */
370 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
372 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
374 /* We do not need to ensure ordering wrt another memory
376 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
380 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
382 /* We do not need to ensure ordering wrt another memory
384 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
386 /* Just bump the counter. */
387 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
388 /* Overflow of the counter. */
391 ++mutex
->__data
.__count
;
399 newval
|= FUTEX_WAITERS
;
401 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
406 /* The mutex is locked. The kernel will now take care of
408 int private = (robust
409 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
410 : PTHREAD_MUTEX_PSHARED (mutex
));
411 INTERNAL_SYSCALL_DECL (__err
);
412 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
413 __lll_private_flag (FUTEX_LOCK_PI
,
416 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
417 && (INTERNAL_SYSCALL_ERRNO (e
, __err
) == ESRCH
418 || INTERNAL_SYSCALL_ERRNO (e
, __err
) == EDEADLK
))
420 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != EDEADLK
421 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
422 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
423 /* ESRCH can happen only for non-robust PI mutexes where
424 the owner of the lock died. */
425 assert (INTERNAL_SYSCALL_ERRNO (e
, __err
) != ESRCH
|| !robust
);
427 /* Delay the thread indefinitely. */
432 oldval
= mutex
->__data
.__lock
;
434 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
437 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
439 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
441 /* We got the mutex. */
442 mutex
->__data
.__count
= 1;
443 /* But it is inconsistent unless marked otherwise. */
444 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
446 /* We must not enqueue the mutex before we have acquired it.
447 Also see comments at ENQUEUE_MUTEX. */
448 __asm ("" ::: "memory");
449 ENQUEUE_MUTEX_PI (mutex
);
450 /* We need to clear op_pending after we enqueue the mutex. */
451 __asm ("" ::: "memory");
452 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
454 /* Note that we deliberately exit here. If we fall
455 through to the end of the function __nusers would be
456 incremented which is not correct because the old owner
457 has to be discounted. If we are not supposed to
458 increment __nusers we actually have to decrement it here. */
460 --mutex
->__data
.__nusers
;
467 && __builtin_expect (mutex
->__data
.__owner
468 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
470 /* This mutex is now not recoverable. */
471 mutex
->__data
.__count
= 0;
473 INTERNAL_SYSCALL_DECL (__err
);
474 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
475 __lll_private_flag (FUTEX_UNLOCK_PI
,
476 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
479 /* To the kernel, this will be visible after the kernel has
480 acquired the mutex in the syscall. */
481 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
482 return ENOTRECOVERABLE
;
485 mutex
->__data
.__count
= 1;
488 /* We must not enqueue the mutex before we have acquired it.
489 Also see comments at ENQUEUE_MUTEX. */
490 __asm ("" ::: "memory");
491 ENQUEUE_MUTEX_PI (mutex
);
492 /* We need to clear op_pending after we enqueue the mutex. */
493 __asm ("" ::: "memory");
494 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
498 #endif /* __NR_futex. */
500 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
501 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
502 case PTHREAD_MUTEX_PP_NORMAL_NP
:
503 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
505 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
507 oldval
= mutex
->__data
.__lock
;
509 /* Check whether we already hold the mutex. */
510 if (mutex
->__data
.__owner
== id
)
512 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
515 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
517 /* Just bump the counter. */
518 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
519 /* Overflow of the counter. */
522 ++mutex
->__data
.__count
;
528 int oldprio
= -1, ceilval
;
531 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
532 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
534 if (__pthread_current_priority () > ceiling
)
537 __pthread_tpp_change_priority (oldprio
, -1);
541 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
545 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
549 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
557 if (oldval
== ceilval
)
563 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
567 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
570 if (oldval
!= ceilval
)
571 lll_futex_wait (&mutex
->__data
.__lock
, ceilval
| 2,
572 PTHREAD_MUTEX_PSHARED (mutex
));
574 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
575 ceilval
| 2, ceilval
)
578 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
580 assert (mutex
->__data
.__owner
== 0);
581 mutex
->__data
.__count
= 1;
586 /* Correct code cannot set any other type. */
590 /* Record the ownership. */
591 mutex
->__data
.__owner
= id
;
593 ++mutex
->__data
.__nusers
;
596 LIBC_PROBE (mutex_acquired
, 1, mutex
);
600 #ifndef __pthread_mutex_lock
601 weak_alias (__pthread_mutex_lock
, pthread_mutex_lock
)
602 hidden_def (__pthread_mutex_lock
)
608 __pthread_mutex_cond_lock_adjust (pthread_mutex_t
*mutex
)
610 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PRIO_INHERIT_NP
) != 0);
611 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) == 0);
612 assert ((mutex
->__data
.__kind
& PTHREAD_MUTEX_PSHARED_BIT
) == 0);
614 /* Record the ownership. */
615 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
616 mutex
->__data
.__owner
= id
;
618 if (mutex
->__data
.__kind
== PTHREAD_MUTEX_PI_RECURSIVE_NP
)
619 ++mutex
->__data
.__count
;