Update install.texi, and regenerate INSTALL.
[glibc.git] / nptl / pthread_mutex_unlock.c
blob7c153975729101f9194f9719faf5d8cd493891ad
1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24 #include <stap-probe.h>
25 #include <futex-internal.h>
26 #include <shlib-compat.h>
28 static int
29 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
30 __attribute_noinline__;
32 /* lll_lock with single-thread optimization. */
33 static inline void
34 lll_mutex_unlock_optimized (pthread_mutex_t *mutex)
36 /* The single-threaded optimization is only valid for private
37 mutexes. For process-shared mutexes, the mutex could be in a
38 shared mapping, so synchronization with another process is needed
39 even without any threads. */
40 int private = PTHREAD_MUTEX_PSHARED (mutex);
41 if (private == LLL_PRIVATE && SINGLE_THREAD_P)
42 mutex->__data.__lock = 0;
43 else
44 lll_unlock (mutex->__data.__lock, private);
47 int
48 __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
50 /* See concurrency notes regarding mutex type which is loaded from __kind
51 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
52 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
53 if (__builtin_expect (type
54 & ~(PTHREAD_MUTEX_KIND_MASK_NP
55 |PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
56 return __pthread_mutex_unlock_full (mutex, decr);
58 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
59 == PTHREAD_MUTEX_TIMED_NP)
61 /* Always reset the owner field. */
62 normal:
63 mutex->__data.__owner = 0;
64 if (decr)
65 /* One less user. */
66 --mutex->__data.__nusers;
68 /* Unlock. */
69 lll_mutex_unlock_optimized (mutex);
71 LIBC_PROBE (mutex_release, 1, mutex);
73 return 0;
75 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
77 /* Don't reset the owner/users fields for elision. */
78 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
79 PTHREAD_MUTEX_PSHARED (mutex));
81 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
82 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
84 /* Recursive mutex. */
85 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
86 return EPERM;
88 if (--mutex->__data.__count != 0)
89 /* We still hold the mutex. */
90 return 0;
91 goto normal;
93 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
94 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
95 goto normal;
96 else
98 /* Error checking mutex. */
99 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
100 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
101 || ! lll_islocked (mutex->__data.__lock))
102 return EPERM;
103 goto normal;
106 libc_hidden_def (__pthread_mutex_unlock_usercnt)
109 static int
110 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
112 int newowner = 0;
113 int private;
115 switch (PTHREAD_MUTEX_TYPE (mutex))
117 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
118 /* Recursive mutex. */
119 if ((mutex->__data.__lock & FUTEX_TID_MASK)
120 == THREAD_GETMEM (THREAD_SELF, tid)
121 && __builtin_expect (mutex->__data.__owner
122 == PTHREAD_MUTEX_INCONSISTENT, 0))
124 if (--mutex->__data.__count != 0)
125 /* We still hold the mutex. */
126 return ENOTRECOVERABLE;
128 goto notrecoverable;
131 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
132 return EPERM;
134 if (--mutex->__data.__count != 0)
135 /* We still hold the mutex. */
136 return 0;
138 goto robust;
140 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
141 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
142 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
143 if ((mutex->__data.__lock & FUTEX_TID_MASK)
144 != THREAD_GETMEM (THREAD_SELF, tid)
145 || ! lll_islocked (mutex->__data.__lock))
146 return EPERM;
148 /* If the previous owner died and the caller did not succeed in
149 making the state consistent, mark the mutex as unrecoverable
150 and make all waiters. */
151 if (__builtin_expect (mutex->__data.__owner
152 == PTHREAD_MUTEX_INCONSISTENT, 0))
153 notrecoverable:
154 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
156 robust:
157 /* Remove mutex from the list. */
158 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
159 &mutex->__data.__list.__next);
160 /* We must set op_pending before we dequeue the mutex. Also see
161 comments at ENQUEUE_MUTEX. */
162 __asm ("" ::: "memory");
163 DEQUEUE_MUTEX (mutex);
165 mutex->__data.__owner = newowner;
166 if (decr)
167 /* One less user. */
168 --mutex->__data.__nusers;
170 /* Unlock by setting the lock to 0 (not acquired); if the lock had
171 FUTEX_WAITERS set previously, then wake any waiters.
172 The unlock operation must be the last access to the mutex to not
173 violate the mutex destruction requirements (see __lll_unlock). */
174 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
175 if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
176 & FUTEX_WAITERS) != 0))
177 futex_wake ((unsigned int *) &mutex->__data.__lock, 1, private);
179 /* We must clear op_pending after we release the mutex.
180 FIXME However, this violates the mutex destruction requirements
181 because another thread could acquire the mutex, destroy it, and
182 reuse the memory for something else; then, if this thread crashes,
183 and the memory happens to have a value equal to the TID, the kernel
184 will believe it is still related to the mutex (which has been
185 destroyed already) and will modify some other random object. */
186 __asm ("" ::: "memory");
187 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
188 break;
190 /* The PI support requires the Linux futex system call. If that's not
191 available, pthread_mutex_init should never have allowed the type to
192 be set. So it will get the default case for an invalid type. */
193 #ifdef __NR_futex
194 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
195 /* Recursive mutex. */
196 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
197 return EPERM;
199 if (--mutex->__data.__count != 0)
200 /* We still hold the mutex. */
201 return 0;
202 goto continue_pi_non_robust;
204 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
205 /* Recursive mutex. */
206 if ((mutex->__data.__lock & FUTEX_TID_MASK)
207 == THREAD_GETMEM (THREAD_SELF, tid)
208 && __builtin_expect (mutex->__data.__owner
209 == PTHREAD_MUTEX_INCONSISTENT, 0))
211 if (--mutex->__data.__count != 0)
212 /* We still hold the mutex. */
213 return ENOTRECOVERABLE;
215 goto pi_notrecoverable;
218 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
219 return EPERM;
221 if (--mutex->__data.__count != 0)
222 /* We still hold the mutex. */
223 return 0;
225 goto continue_pi_robust;
227 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
228 case PTHREAD_MUTEX_PI_NORMAL_NP:
229 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
230 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
231 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
232 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
233 if ((mutex->__data.__lock & FUTEX_TID_MASK)
234 != THREAD_GETMEM (THREAD_SELF, tid)
235 || ! lll_islocked (mutex->__data.__lock))
236 return EPERM;
238 /* If the previous owner died and the caller did not succeed in
239 making the state consistent, mark the mutex as unrecoverable
240 and make all waiters. */
241 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
242 in sysdeps/nptl/bits/thread-shared-types.h. */
243 if ((atomic_load_relaxed (&(mutex->__data.__kind))
244 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
245 && __builtin_expect (mutex->__data.__owner
246 == PTHREAD_MUTEX_INCONSISTENT, 0))
247 pi_notrecoverable:
248 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
250 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
251 in sysdeps/nptl/bits/thread-shared-types.h. */
252 if ((atomic_load_relaxed (&(mutex->__data.__kind))
253 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
255 continue_pi_robust:
256 /* Remove mutex from the list.
257 Note: robust PI futexes are signaled by setting bit 0. */
258 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
259 (void *) (((uintptr_t) &mutex->__data.__list.__next)
260 | 1));
261 /* We must set op_pending before we dequeue the mutex. Also see
262 comments at ENQUEUE_MUTEX. */
263 __asm ("" ::: "memory");
264 DEQUEUE_MUTEX (mutex);
267 continue_pi_non_robust:
268 mutex->__data.__owner = newowner;
269 if (decr)
270 /* One less user. */
271 --mutex->__data.__nusers;
273 /* Unlock. Load all necessary mutex data before releasing the mutex
274 to not violate the mutex destruction requirements (see
275 lll_unlock). */
276 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
277 in sysdeps/nptl/bits/thread-shared-types.h. */
278 int robust = atomic_load_relaxed (&(mutex->__data.__kind))
279 & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
280 private = (robust
281 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
282 : PTHREAD_MUTEX_PSHARED (mutex));
283 /* Unlock the mutex using a CAS unless there are futex waiters or our
284 TID is not the value of __lock anymore, in which case we let the
285 kernel take care of the situation. Use release MO in the CAS to
286 synchronize with acquire MO in lock acquisitions. */
287 int l = atomic_load_relaxed (&mutex->__data.__lock);
290 if (((l & FUTEX_WAITERS) != 0)
291 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
293 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
294 private);
295 break;
298 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
299 &l, 0));
301 /* This happens after the kernel releases the mutex but violates the
302 mutex destruction requirements; see comments in the code handling
303 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
304 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
305 break;
306 #endif /* __NR_futex. */
308 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
309 /* Recursive mutex. */
310 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
311 return EPERM;
313 if (--mutex->__data.__count != 0)
314 /* We still hold the mutex. */
315 return 0;
316 goto pp;
318 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
319 /* Error checking mutex. */
320 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
321 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
322 return EPERM;
323 /* FALLTHROUGH */
325 case PTHREAD_MUTEX_PP_NORMAL_NP:
326 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
327 /* Always reset the owner field. */
329 mutex->__data.__owner = 0;
331 if (decr)
332 /* One less user. */
333 --mutex->__data.__nusers;
335 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
336 lock acquisitions. */
337 int newval;
338 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
341 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
343 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
344 &oldval, newval));
346 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
347 futex_wake ((unsigned int *)&mutex->__data.__lock, 1,
348 PTHREAD_MUTEX_PSHARED (mutex));
350 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
352 LIBC_PROBE (mutex_release, 1, mutex);
354 return __pthread_tpp_change_priority (oldprio, -1);
356 default:
357 /* Correct code cannot set any other type. */
358 return EINVAL;
361 LIBC_PROBE (mutex_release, 1, mutex);
362 return 0;
367 ___pthread_mutex_unlock (pthread_mutex_t *mutex)
369 return __pthread_mutex_unlock_usercnt (mutex, 1);
371 libc_hidden_ver (___pthread_mutex_unlock, __pthread_mutex_unlock)
372 #ifndef SHARED
373 strong_alias (___pthread_mutex_unlock, __pthread_mutex_unlock)
374 #endif
375 versioned_symbol (libpthread, ___pthread_mutex_unlock, pthread_mutex_unlock,
376 GLIBC_2_0);
378 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
379 compat_symbol (libpthread, ___pthread_mutex_unlock, __pthread_mutex_unlock,
380 GLIBC_2_0);
381 #endif