1 /* Copyright (C) 2002-2017 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <stap-probe.h>
26 #ifndef lll_unlock_elision
27 #define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
32 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
33 __attribute_noinline__
;
36 internal_function attribute_hidden
37 __pthread_mutex_unlock_usercnt (pthread_mutex_t
*mutex
, int decr
)
39 int type
= PTHREAD_MUTEX_TYPE_ELISION (mutex
);
40 if (__builtin_expect (type
&
41 ~(PTHREAD_MUTEX_KIND_MASK_NP
|PTHREAD_MUTEX_ELISION_FLAGS_NP
), 0))
42 return __pthread_mutex_unlock_full (mutex
, decr
);
44 if (__builtin_expect (type
, PTHREAD_MUTEX_TIMED_NP
)
45 == PTHREAD_MUTEX_TIMED_NP
)
47 /* Always reset the owner field. */
49 mutex
->__data
.__owner
= 0;
52 --mutex
->__data
.__nusers
;
55 lll_unlock (mutex
->__data
.__lock
, PTHREAD_MUTEX_PSHARED (mutex
));
57 LIBC_PROBE (mutex_release
, 1, mutex
);
61 else if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_ELISION_NP
))
63 /* Don't reset the owner/users fields for elision. */
64 return lll_unlock_elision (mutex
->__data
.__lock
, mutex
->__data
.__elision
,
65 PTHREAD_MUTEX_PSHARED (mutex
));
67 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
68 == PTHREAD_MUTEX_RECURSIVE_NP
, 1))
70 /* Recursive mutex. */
71 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
74 if (--mutex
->__data
.__count
!= 0)
75 /* We still hold the mutex. */
79 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
80 == PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
84 /* Error checking mutex. */
85 assert (type
== PTHREAD_MUTEX_ERRORCHECK_NP
);
86 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
87 || ! lll_islocked (mutex
->__data
.__lock
))
96 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
101 switch (PTHREAD_MUTEX_TYPE (mutex
))
103 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
104 /* Recursive mutex. */
105 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
106 == THREAD_GETMEM (THREAD_SELF
, tid
)
107 && __builtin_expect (mutex
->__data
.__owner
108 == PTHREAD_MUTEX_INCONSISTENT
, 0))
110 if (--mutex
->__data
.__count
!= 0)
111 /* We still hold the mutex. */
112 return ENOTRECOVERABLE
;
117 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
120 if (--mutex
->__data
.__count
!= 0)
121 /* We still hold the mutex. */
126 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
127 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
128 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
129 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
130 != THREAD_GETMEM (THREAD_SELF
, tid
)
131 || ! lll_islocked (mutex
->__data
.__lock
))
134 /* If the previous owner died and the caller did not succeed in
135 making the state consistent, mark the mutex as unrecoverable
136 and make all waiters. */
137 if (__builtin_expect (mutex
->__data
.__owner
138 == PTHREAD_MUTEX_INCONSISTENT
, 0))
140 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
143 /* Remove mutex from the list. */
144 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
145 &mutex
->__data
.__list
.__next
);
146 /* We must set op_pending before we dequeue the mutex. Also see
147 comments at ENQUEUE_MUTEX. */
148 __asm ("" ::: "memory");
149 DEQUEUE_MUTEX (mutex
);
151 mutex
->__data
.__owner
= newowner
;
154 --mutex
->__data
.__nusers
;
156 /* Unlock by setting the lock to 0 (not acquired); if the lock had
157 FUTEX_WAITERS set previously, then wake any waiters.
158 The unlock operation must be the last access to the mutex to not
159 violate the mutex destruction requirements (see __lll_unlock). */
160 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
161 if (__glibc_unlikely ((atomic_exchange_rel (&mutex
->__data
.__lock
, 0)
162 & FUTEX_WAITERS
) != 0))
163 lll_futex_wake (&mutex
->__data
.__lock
, 1, private);
165 /* We must clear op_pending after we release the mutex.
166 FIXME However, this violates the mutex destruction requirements
167 because another thread could acquire the mutex, destroy it, and
168 reuse the memory for something else; then, if this thread crashes,
169 and the memory happens to have a value equal to the TID, the kernel
170 will believe it is still related to the mutex (which has been
171 destroyed already) and will modify some other random object. */
172 __asm ("" ::: "memory");
173 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
176 /* The PI support requires the Linux futex system call. If that's not
177 available, pthread_mutex_init should never have allowed the type to
178 be set. So it will get the default case for an invalid type. */
180 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
181 /* Recursive mutex. */
182 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
185 if (--mutex
->__data
.__count
!= 0)
186 /* We still hold the mutex. */
188 goto continue_pi_non_robust
;
190 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
191 /* Recursive mutex. */
192 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
193 == THREAD_GETMEM (THREAD_SELF
, tid
)
194 && __builtin_expect (mutex
->__data
.__owner
195 == PTHREAD_MUTEX_INCONSISTENT
, 0))
197 if (--mutex
->__data
.__count
!= 0)
198 /* We still hold the mutex. */
199 return ENOTRECOVERABLE
;
201 goto pi_notrecoverable
;
204 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
207 if (--mutex
->__data
.__count
!= 0)
208 /* We still hold the mutex. */
211 goto continue_pi_robust
;
213 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
214 case PTHREAD_MUTEX_PI_NORMAL_NP
:
215 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
216 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
217 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
218 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
219 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
220 != THREAD_GETMEM (THREAD_SELF
, tid
)
221 || ! lll_islocked (mutex
->__data
.__lock
))
224 /* If the previous owner died and the caller did not succeed in
225 making the state consistent, mark the mutex as unrecoverable
226 and make all waiters. */
227 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0
228 && __builtin_expect (mutex
->__data
.__owner
229 == PTHREAD_MUTEX_INCONSISTENT
, 0))
231 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
233 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0)
236 /* Remove mutex from the list.
237 Note: robust PI futexes are signaled by setting bit 0. */
238 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
239 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
241 /* We must set op_pending before we dequeue the mutex. Also see
242 comments at ENQUEUE_MUTEX. */
243 __asm ("" ::: "memory");
244 DEQUEUE_MUTEX (mutex
);
247 continue_pi_non_robust
:
248 mutex
->__data
.__owner
= newowner
;
251 --mutex
->__data
.__nusers
;
253 /* Unlock. Load all necessary mutex data before releasing the mutex
254 to not violate the mutex destruction requirements (see
256 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
258 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
259 : PTHREAD_MUTEX_PSHARED (mutex
));
260 /* Unlock the mutex using a CAS unless there are futex waiters or our
261 TID is not the value of __lock anymore, in which case we let the
262 kernel take care of the situation. Use release MO in the CAS to
263 synchronize with acquire MO in lock acquisitions. */
264 int l
= atomic_load_relaxed (&mutex
->__data
.__lock
);
267 if (((l
& FUTEX_WAITERS
) != 0)
268 || (l
!= THREAD_GETMEM (THREAD_SELF
, tid
)))
270 INTERNAL_SYSCALL_DECL (__err
);
271 INTERNAL_SYSCALL (futex
, __err
, 2, &mutex
->__data
.__lock
,
272 __lll_private_flag (FUTEX_UNLOCK_PI
, private));
276 while (!atomic_compare_exchange_weak_release (&mutex
->__data
.__lock
,
279 /* This happens after the kernel releases the mutex but violates the
280 mutex destruction requirements; see comments in the code handling
281 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
282 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
284 #endif /* __NR_futex. */
286 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
287 /* Recursive mutex. */
288 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
291 if (--mutex
->__data
.__count
!= 0)
292 /* We still hold the mutex. */
296 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
297 /* Error checking mutex. */
298 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
299 || (mutex
->__data
.__lock
& ~ PTHREAD_MUTEX_PRIO_CEILING_MASK
) == 0)
303 case PTHREAD_MUTEX_PP_NORMAL_NP
:
304 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
305 /* Always reset the owner field. */
307 mutex
->__data
.__owner
= 0;
311 --mutex
->__data
.__nusers
;
313 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
314 lock acquisitions. */
316 int oldval
= atomic_load_relaxed (&mutex
->__data
.__lock
);
319 newval
= oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
;
321 while (!atomic_compare_exchange_weak_release (&mutex
->__data
.__lock
,
324 if ((oldval
& ~PTHREAD_MUTEX_PRIO_CEILING_MASK
) > 1)
325 lll_futex_wake (&mutex
->__data
.__lock
, 1,
326 PTHREAD_MUTEX_PSHARED (mutex
));
328 int oldprio
= newval
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
330 LIBC_PROBE (mutex_release
, 1, mutex
);
332 return __pthread_tpp_change_priority (oldprio
, -1);
335 /* Correct code cannot set any other type. */
339 LIBC_PROBE (mutex_release
, 1, mutex
);
345 __pthread_mutex_unlock (pthread_mutex_t
*mutex
)
347 return __pthread_mutex_unlock_usercnt (mutex
, 1);
349 weak_alias (__pthread_mutex_unlock
, pthread_mutex_unlock
)
350 hidden_def (__pthread_mutex_unlock
)