1 /* Copyright (C) 2002-2018 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <stap-probe.h>
26 #ifndef lll_unlock_elision
27 #define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
31 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
32 __attribute_noinline__
;
36 __pthread_mutex_unlock_usercnt (pthread_mutex_t
*mutex
, int decr
)
38 int type
= PTHREAD_MUTEX_TYPE_ELISION (mutex
);
39 if (__builtin_expect (type
&
40 ~(PTHREAD_MUTEX_KIND_MASK_NP
|PTHREAD_MUTEX_ELISION_FLAGS_NP
), 0))
41 return __pthread_mutex_unlock_full (mutex
, decr
);
43 if (__builtin_expect (type
, PTHREAD_MUTEX_TIMED_NP
)
44 == PTHREAD_MUTEX_TIMED_NP
)
46 /* Always reset the owner field. */
48 mutex
->__data
.__owner
= 0;
51 --mutex
->__data
.__nusers
;
54 lll_unlock (mutex
->__data
.__lock
, PTHREAD_MUTEX_PSHARED (mutex
));
56 LIBC_PROBE (mutex_release
, 1, mutex
);
60 else if (__glibc_likely (type
== PTHREAD_MUTEX_TIMED_ELISION_NP
))
62 /* Don't reset the owner/users fields for elision. */
63 return lll_unlock_elision (mutex
->__data
.__lock
, mutex
->__data
.__elision
,
64 PTHREAD_MUTEX_PSHARED (mutex
));
66 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
67 == PTHREAD_MUTEX_RECURSIVE_NP
, 1))
69 /* Recursive mutex. */
70 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
73 if (--mutex
->__data
.__count
!= 0)
74 /* We still hold the mutex. */
78 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
)
79 == PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
83 /* Error checking mutex. */
84 assert (type
== PTHREAD_MUTEX_ERRORCHECK_NP
);
85 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
86 || ! lll_islocked (mutex
->__data
.__lock
))
94 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
99 switch (PTHREAD_MUTEX_TYPE (mutex
))
101 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
102 /* Recursive mutex. */
103 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
104 == THREAD_GETMEM (THREAD_SELF
, tid
)
105 && __builtin_expect (mutex
->__data
.__owner
106 == PTHREAD_MUTEX_INCONSISTENT
, 0))
108 if (--mutex
->__data
.__count
!= 0)
109 /* We still hold the mutex. */
110 return ENOTRECOVERABLE
;
115 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
118 if (--mutex
->__data
.__count
!= 0)
119 /* We still hold the mutex. */
124 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
125 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
126 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
127 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
128 != THREAD_GETMEM (THREAD_SELF
, tid
)
129 || ! lll_islocked (mutex
->__data
.__lock
))
132 /* If the previous owner died and the caller did not succeed in
133 making the state consistent, mark the mutex as unrecoverable
134 and make all waiters. */
135 if (__builtin_expect (mutex
->__data
.__owner
136 == PTHREAD_MUTEX_INCONSISTENT
, 0))
138 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
141 /* Remove mutex from the list. */
142 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
143 &mutex
->__data
.__list
.__next
);
144 /* We must set op_pending before we dequeue the mutex. Also see
145 comments at ENQUEUE_MUTEX. */
146 __asm ("" ::: "memory");
147 DEQUEUE_MUTEX (mutex
);
149 mutex
->__data
.__owner
= newowner
;
152 --mutex
->__data
.__nusers
;
154 /* Unlock by setting the lock to 0 (not acquired); if the lock had
155 FUTEX_WAITERS set previously, then wake any waiters.
156 The unlock operation must be the last access to the mutex to not
157 violate the mutex destruction requirements (see __lll_unlock). */
158 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
159 if (__glibc_unlikely ((atomic_exchange_rel (&mutex
->__data
.__lock
, 0)
160 & FUTEX_WAITERS
) != 0))
161 lll_futex_wake (&mutex
->__data
.__lock
, 1, private);
163 /* We must clear op_pending after we release the mutex.
164 FIXME However, this violates the mutex destruction requirements
165 because another thread could acquire the mutex, destroy it, and
166 reuse the memory for something else; then, if this thread crashes,
167 and the memory happens to have a value equal to the TID, the kernel
168 will believe it is still related to the mutex (which has been
169 destroyed already) and will modify some other random object. */
170 __asm ("" ::: "memory");
171 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
174 /* The PI support requires the Linux futex system call. If that's not
175 available, pthread_mutex_init should never have allowed the type to
176 be set. So it will get the default case for an invalid type. */
178 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
179 /* Recursive mutex. */
180 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
183 if (--mutex
->__data
.__count
!= 0)
184 /* We still hold the mutex. */
186 goto continue_pi_non_robust
;
188 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
189 /* Recursive mutex. */
190 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
191 == THREAD_GETMEM (THREAD_SELF
, tid
)
192 && __builtin_expect (mutex
->__data
.__owner
193 == PTHREAD_MUTEX_INCONSISTENT
, 0))
195 if (--mutex
->__data
.__count
!= 0)
196 /* We still hold the mutex. */
197 return ENOTRECOVERABLE
;
199 goto pi_notrecoverable
;
202 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
205 if (--mutex
->__data
.__count
!= 0)
206 /* We still hold the mutex. */
209 goto continue_pi_robust
;
211 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
212 case PTHREAD_MUTEX_PI_NORMAL_NP
:
213 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
214 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
215 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
216 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
217 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
218 != THREAD_GETMEM (THREAD_SELF
, tid
)
219 || ! lll_islocked (mutex
->__data
.__lock
))
222 /* If the previous owner died and the caller did not succeed in
223 making the state consistent, mark the mutex as unrecoverable
224 and make all waiters. */
225 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0
226 && __builtin_expect (mutex
->__data
.__owner
227 == PTHREAD_MUTEX_INCONSISTENT
, 0))
229 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
231 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0)
234 /* Remove mutex from the list.
235 Note: robust PI futexes are signaled by setting bit 0. */
236 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
237 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
239 /* We must set op_pending before we dequeue the mutex. Also see
240 comments at ENQUEUE_MUTEX. */
241 __asm ("" ::: "memory");
242 DEQUEUE_MUTEX (mutex
);
245 continue_pi_non_robust
:
246 mutex
->__data
.__owner
= newowner
;
249 --mutex
->__data
.__nusers
;
251 /* Unlock. Load all necessary mutex data before releasing the mutex
252 to not violate the mutex destruction requirements (see
254 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
256 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
257 : PTHREAD_MUTEX_PSHARED (mutex
));
258 /* Unlock the mutex using a CAS unless there are futex waiters or our
259 TID is not the value of __lock anymore, in which case we let the
260 kernel take care of the situation. Use release MO in the CAS to
261 synchronize with acquire MO in lock acquisitions. */
262 int l
= atomic_load_relaxed (&mutex
->__data
.__lock
);
265 if (((l
& FUTEX_WAITERS
) != 0)
266 || (l
!= THREAD_GETMEM (THREAD_SELF
, tid
)))
268 INTERNAL_SYSCALL_DECL (__err
);
269 INTERNAL_SYSCALL (futex
, __err
, 2, &mutex
->__data
.__lock
,
270 __lll_private_flag (FUTEX_UNLOCK_PI
, private));
274 while (!atomic_compare_exchange_weak_release (&mutex
->__data
.__lock
,
277 /* This happens after the kernel releases the mutex but violates the
278 mutex destruction requirements; see comments in the code handling
279 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
280 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
282 #endif /* __NR_futex. */
284 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
285 /* Recursive mutex. */
286 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
289 if (--mutex
->__data
.__count
!= 0)
290 /* We still hold the mutex. */
294 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
295 /* Error checking mutex. */
296 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
297 || (mutex
->__data
.__lock
& ~ PTHREAD_MUTEX_PRIO_CEILING_MASK
) == 0)
301 case PTHREAD_MUTEX_PP_NORMAL_NP
:
302 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
303 /* Always reset the owner field. */
305 mutex
->__data
.__owner
= 0;
309 --mutex
->__data
.__nusers
;
311 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
312 lock acquisitions. */
314 int oldval
= atomic_load_relaxed (&mutex
->__data
.__lock
);
317 newval
= oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
;
319 while (!atomic_compare_exchange_weak_release (&mutex
->__data
.__lock
,
322 if ((oldval
& ~PTHREAD_MUTEX_PRIO_CEILING_MASK
) > 1)
323 lll_futex_wake (&mutex
->__data
.__lock
, 1,
324 PTHREAD_MUTEX_PSHARED (mutex
));
326 int oldprio
= newval
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
328 LIBC_PROBE (mutex_release
, 1, mutex
);
330 return __pthread_tpp_change_priority (oldprio
, -1);
333 /* Correct code cannot set any other type. */
337 LIBC_PROBE (mutex_release
, 1, mutex
);
343 __pthread_mutex_unlock (pthread_mutex_t
*mutex
)
345 return __pthread_mutex_unlock_usercnt (mutex
, 1);
347 weak_alias (__pthread_mutex_unlock
, pthread_mutex_unlock
)
348 hidden_def (__pthread_mutex_unlock
)