1 /* Copyright (C) 2002-2016 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <stap-probe.h>
26 #ifndef lll_unlock_elision
27 #define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
32 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
33 __attribute_noinline__
;
36 internal_function attribute_hidden
37 __pthread_mutex_unlock_usercnt (pthread_mutex_t
*mutex
, int decr
)
39 int type
= PTHREAD_MUTEX_TYPE_ELISION (mutex
);
40 if (__builtin_expect (type
&
41 ~(PTHREAD_MUTEX_KIND_MASK_NP
|PTHREAD_MUTEX_ELISION_FLAGS_NP
), 0))
42 return __pthread_mutex_unlock_full (mutex
, decr
);
44 if (__builtin_expect (type
, PTHREAD_MUTEX_TIMED_NP
)
45 == PTHREAD_MUTEX_TIMED_NP
)
47 /* Always reset the owner field. */
49 mutex
->__data
.__owner
= 0;
52 --mutex
->__data
.__nusers
;
55 lll_unlock (mutex
->__data
.__lock
, PTHREAD_MUTEX_PSHARED (mutex
));
57 LIBC_PROBE (mutex_release
, 1, mutex
);
61 else if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_ELISION_NP
))
63 /* Don't reset the owner/users fields for elision. */
64 return lll_unlock_elision (mutex
->__data
.__lock
, mutex
->__data
.__elision
,
65 PTHREAD_MUTEX_PSHARED (mutex
));
67 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
68 == PTHREAD_MUTEX_RECURSIVE_NP
, 1))
70 /* Recursive mutex. */
71 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
74 if (--mutex
->__data
.__count
!= 0)
75 /* We still hold the mutex. */
79 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
80 == PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
84 /* Error checking mutex. */
85 assert (type
== PTHREAD_MUTEX_ERRORCHECK_NP
);
86 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
87 || ! lll_islocked (mutex
->__data
.__lock
))
96 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
100 switch (PTHREAD_MUTEX_TYPE (mutex
))
102 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
103 /* Recursive mutex. */
104 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
105 == THREAD_GETMEM (THREAD_SELF
, tid
)
106 && __builtin_expect (mutex
->__data
.__owner
107 == PTHREAD_MUTEX_INCONSISTENT
, 0))
109 if (--mutex
->__data
.__count
!= 0)
110 /* We still hold the mutex. */
111 return ENOTRECOVERABLE
;
116 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
119 if (--mutex
->__data
.__count
!= 0)
120 /* We still hold the mutex. */
125 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
126 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
127 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
128 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
129 != THREAD_GETMEM (THREAD_SELF
, tid
)
130 || ! lll_islocked (mutex
->__data
.__lock
))
133 /* If the previous owner died and the caller did not succeed in
134 making the state consistent, mark the mutex as unrecoverable
135 and make all waiters. */
136 if (__builtin_expect (mutex
->__data
.__owner
137 == PTHREAD_MUTEX_INCONSISTENT
, 0))
139 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
142 /* Remove mutex from the list. */
143 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
144 &mutex
->__data
.__list
.__next
);
145 DEQUEUE_MUTEX (mutex
);
147 mutex
->__data
.__owner
= newowner
;
150 --mutex
->__data
.__nusers
;
153 lll_robust_unlock (mutex
->__data
.__lock
,
154 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
156 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
159 /* The PI support requires the Linux futex system call. If that's not
160 available, pthread_mutex_init should never have allowed the type to
161 be set. So it will get the default case for an invalid type. */
163 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
164 /* Recursive mutex. */
165 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
168 if (--mutex
->__data
.__count
!= 0)
169 /* We still hold the mutex. */
171 goto continue_pi_non_robust
;
173 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
174 /* Recursive mutex. */
175 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
176 == THREAD_GETMEM (THREAD_SELF
, tid
)
177 && __builtin_expect (mutex
->__data
.__owner
178 == PTHREAD_MUTEX_INCONSISTENT
, 0))
180 if (--mutex
->__data
.__count
!= 0)
181 /* We still hold the mutex. */
182 return ENOTRECOVERABLE
;
184 goto pi_notrecoverable
;
187 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
190 if (--mutex
->__data
.__count
!= 0)
191 /* We still hold the mutex. */
194 goto continue_pi_robust
;
196 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
197 case PTHREAD_MUTEX_PI_NORMAL_NP
:
198 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
199 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
200 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
201 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
202 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
203 != THREAD_GETMEM (THREAD_SELF
, tid
)
204 || ! lll_islocked (mutex
->__data
.__lock
))
207 /* If the previous owner died and the caller did not succeed in
208 making the state consistent, mark the mutex as unrecoverable
209 and make all waiters. */
210 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0
211 && __builtin_expect (mutex
->__data
.__owner
212 == PTHREAD_MUTEX_INCONSISTENT
, 0))
214 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
216 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0)
219 /* Remove mutex from the list.
220 Note: robust PI futexes are signaled by setting bit 0. */
221 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
222 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
224 DEQUEUE_MUTEX (mutex
);
227 continue_pi_non_robust
:
228 mutex
->__data
.__owner
= newowner
;
231 --mutex
->__data
.__nusers
;
233 /* Unlock. Load all necessary mutex data before releasing the mutex
234 to not violate the mutex destruction requirements (see
236 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
237 int private = (robust
238 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
239 : PTHREAD_MUTEX_PSHARED (mutex
));
240 /* Unlock the mutex using a CAS unless there are futex waiters or our
241 TID is not the value of __lock anymore, in which case we let the
242 kernel take care of the situation. Use release MO in the CAS to
243 synchronize with acquire MO in lock acquisitions. */
244 int l
= atomic_load_relaxed (&mutex
->__data
.__lock
);
247 if (((l
& FUTEX_WAITERS
) != 0)
248 || (l
!= THREAD_GETMEM (THREAD_SELF
, tid
)))
250 INTERNAL_SYSCALL_DECL (__err
);
251 INTERNAL_SYSCALL (futex
, __err
, 2, &mutex
->__data
.__lock
,
252 __lll_private_flag (FUTEX_UNLOCK_PI
, private));
256 while (!atomic_compare_exchange_weak_release (&mutex
->__data
.__lock
,
259 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
261 #endif /* __NR_futex. */
263 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
264 /* Recursive mutex. */
265 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
268 if (--mutex
->__data
.__count
!= 0)
269 /* We still hold the mutex. */
273 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
274 /* Error checking mutex. */
275 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
276 || (mutex
->__data
.__lock
& ~ PTHREAD_MUTEX_PRIO_CEILING_MASK
) == 0)
280 case PTHREAD_MUTEX_PP_NORMAL_NP
:
281 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
282 /* Always reset the owner field. */
284 mutex
->__data
.__owner
= 0;
288 --mutex
->__data
.__nusers
;
290 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
291 lock acquisitions. */
293 int oldval
= atomic_load_relaxed (&mutex
->__data
.__lock
);
296 newval
= oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
;
298 while (!atomic_compare_exchange_weak_release (&mutex
->__data
.__lock
,
301 if ((oldval
& ~PTHREAD_MUTEX_PRIO_CEILING_MASK
) > 1)
302 lll_futex_wake (&mutex
->__data
.__lock
, 1,
303 PTHREAD_MUTEX_PSHARED (mutex
));
305 int oldprio
= newval
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
307 LIBC_PROBE (mutex_release
, 1, mutex
);
309 return __pthread_tpp_change_priority (oldprio
, -1);
312 /* Correct code cannot set any other type. */
316 LIBC_PROBE (mutex_release
, 1, mutex
);
322 __pthread_mutex_unlock (pthread_mutex_t
*mutex
)
324 return __pthread_mutex_unlock_usercnt (mutex
, 1);
326 strong_alias (__pthread_mutex_unlock
, pthread_mutex_unlock
)
327 hidden_def (__pthread_mutex_unlock
)