x86_64: Fix missing wcsncat function definition without multiarch (x86-64-v4)
[glibc.git] / nptl / pthread_mutex_unlock.c
blob2240d2cc794935e4b30d9375ad49ee607a32afd7
1 /* Copyright (C) 2002-2024 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
18 #include <assert.h>
19 #include <errno.h>
20 #include <stdlib.h>
21 #include "pthreadP.h"
22 #include <lowlevellock.h>
23 #include <stap-probe.h>
24 #include <futex-internal.h>
25 #include <shlib-compat.h>
27 static int
28 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
29 __attribute_noinline__;
31 /* lll_lock with single-thread optimization. */
32 static inline void
33 lll_mutex_unlock_optimized (pthread_mutex_t *mutex)
35 /* The single-threaded optimization is only valid for private
36 mutexes. For process-shared mutexes, the mutex could be in a
37 shared mapping, so synchronization with another process is needed
38 even without any threads. */
39 int private = PTHREAD_MUTEX_PSHARED (mutex);
40 if (private == LLL_PRIVATE && SINGLE_THREAD_P)
41 mutex->__data.__lock = 0;
42 else
43 lll_unlock (mutex->__data.__lock, private);
46 int
47 __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
49 /* See concurrency notes regarding mutex type which is loaded from __kind
50 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
51 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
52 if (__builtin_expect (type
53 & ~(PTHREAD_MUTEX_KIND_MASK_NP
54 |PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
55 return __pthread_mutex_unlock_full (mutex, decr);
57 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
58 == PTHREAD_MUTEX_TIMED_NP)
60 /* Always reset the owner field. */
61 normal:
62 mutex->__data.__owner = 0;
63 if (decr)
64 /* One less user. */
65 --mutex->__data.__nusers;
67 /* Unlock. */
68 lll_mutex_unlock_optimized (mutex);
70 LIBC_PROBE (mutex_release, 1, mutex);
72 return 0;
74 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
76 /* Don't reset the owner/users fields for elision. */
77 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
78 PTHREAD_MUTEX_PSHARED (mutex));
80 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
81 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
83 /* Recursive mutex. */
84 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
85 return EPERM;
87 if (--mutex->__data.__count != 0)
88 /* We still hold the mutex. */
89 return 0;
90 goto normal;
92 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
93 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
94 goto normal;
95 else
97 /* Error checking mutex. */
98 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
99 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
100 || ! lll_islocked (mutex->__data.__lock))
101 return EPERM;
102 goto normal;
105 libc_hidden_def (__pthread_mutex_unlock_usercnt)
108 static int
109 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
111 int newowner = 0;
112 int private;
114 switch (PTHREAD_MUTEX_TYPE (mutex))
116 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
117 /* Recursive mutex. */
118 if ((mutex->__data.__lock & FUTEX_TID_MASK)
119 == THREAD_GETMEM (THREAD_SELF, tid)
120 && __builtin_expect (mutex->__data.__owner
121 == PTHREAD_MUTEX_INCONSISTENT, 0))
123 if (--mutex->__data.__count != 0)
124 /* We still hold the mutex. */
125 return ENOTRECOVERABLE;
127 goto notrecoverable;
130 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
131 return EPERM;
133 if (--mutex->__data.__count != 0)
134 /* We still hold the mutex. */
135 return 0;
137 goto robust;
139 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
140 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
141 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
142 if ((mutex->__data.__lock & FUTEX_TID_MASK)
143 != THREAD_GETMEM (THREAD_SELF, tid)
144 || ! lll_islocked (mutex->__data.__lock))
145 return EPERM;
147 /* If the previous owner died and the caller did not succeed in
148 making the state consistent, mark the mutex as unrecoverable
149 and make all waiters. */
150 if (__builtin_expect (mutex->__data.__owner
151 == PTHREAD_MUTEX_INCONSISTENT, 0))
152 notrecoverable:
153 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
155 robust:
156 /* Remove mutex from the list. */
157 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
158 &mutex->__data.__list.__next);
159 /* We must set op_pending before we dequeue the mutex. Also see
160 comments at ENQUEUE_MUTEX. */
161 __asm ("" ::: "memory");
162 DEQUEUE_MUTEX (mutex);
164 mutex->__data.__owner = newowner;
165 if (decr)
166 /* One less user. */
167 --mutex->__data.__nusers;
169 /* Unlock by setting the lock to 0 (not acquired); if the lock had
170 FUTEX_WAITERS set previously, then wake any waiters.
171 The unlock operation must be the last access to the mutex to not
172 violate the mutex destruction requirements (see __lll_unlock). */
173 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
174 if (__glibc_unlikely ((atomic_exchange_release (&mutex->__data.__lock, 0)
175 & FUTEX_WAITERS) != 0))
176 futex_wake ((unsigned int *) &mutex->__data.__lock, 1, private);
178 /* We must clear op_pending after we release the mutex.
179 FIXME However, this violates the mutex destruction requirements
180 because another thread could acquire the mutex, destroy it, and
181 reuse the memory for something else; then, if this thread crashes,
182 and the memory happens to have a value equal to the TID, the kernel
183 will believe it is still related to the mutex (which has been
184 destroyed already) and will modify some other random object. */
185 __asm ("" ::: "memory");
186 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
187 break;
189 /* The PI support requires the Linux futex system call. If that's not
190 available, pthread_mutex_init should never have allowed the type to
191 be set. So it will get the default case for an invalid type. */
192 #ifdef __NR_futex
193 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
194 /* Recursive mutex. */
195 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
196 return EPERM;
198 if (--mutex->__data.__count != 0)
199 /* We still hold the mutex. */
200 return 0;
201 goto continue_pi_non_robust;
203 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
204 /* Recursive mutex. */
205 if ((mutex->__data.__lock & FUTEX_TID_MASK)
206 == THREAD_GETMEM (THREAD_SELF, tid)
207 && __builtin_expect (mutex->__data.__owner
208 == PTHREAD_MUTEX_INCONSISTENT, 0))
210 if (--mutex->__data.__count != 0)
211 /* We still hold the mutex. */
212 return ENOTRECOVERABLE;
214 goto pi_notrecoverable;
217 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
218 return EPERM;
220 if (--mutex->__data.__count != 0)
221 /* We still hold the mutex. */
222 return 0;
224 goto continue_pi_robust;
226 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
227 case PTHREAD_MUTEX_PI_NORMAL_NP:
228 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
229 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
230 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
231 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
232 if ((mutex->__data.__lock & FUTEX_TID_MASK)
233 != THREAD_GETMEM (THREAD_SELF, tid)
234 || ! lll_islocked (mutex->__data.__lock))
235 return EPERM;
237 /* If the previous owner died and the caller did not succeed in
238 making the state consistent, mark the mutex as unrecoverable
239 and make all waiters. */
240 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
241 in sysdeps/nptl/bits/thread-shared-types.h. */
242 if ((atomic_load_relaxed (&(mutex->__data.__kind))
243 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
244 && __builtin_expect (mutex->__data.__owner
245 == PTHREAD_MUTEX_INCONSISTENT, 0))
246 pi_notrecoverable:
247 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
249 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
250 in sysdeps/nptl/bits/thread-shared-types.h. */
251 if ((atomic_load_relaxed (&(mutex->__data.__kind))
252 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
254 continue_pi_robust:
255 /* Remove mutex from the list.
256 Note: robust PI futexes are signaled by setting bit 0. */
257 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
258 (void *) (((uintptr_t) &mutex->__data.__list.__next)
259 | 1));
260 /* We must set op_pending before we dequeue the mutex. Also see
261 comments at ENQUEUE_MUTEX. */
262 __asm ("" ::: "memory");
263 DEQUEUE_MUTEX (mutex);
266 continue_pi_non_robust:
267 mutex->__data.__owner = newowner;
268 if (decr)
269 /* One less user. */
270 --mutex->__data.__nusers;
272 /* Unlock. Load all necessary mutex data before releasing the mutex
273 to not violate the mutex destruction requirements (see
274 lll_unlock). */
275 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
276 in sysdeps/nptl/bits/thread-shared-types.h. */
277 int robust = atomic_load_relaxed (&(mutex->__data.__kind))
278 & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
279 private = (robust
280 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
281 : PTHREAD_MUTEX_PSHARED (mutex));
282 /* Unlock the mutex using a CAS unless there are futex waiters or our
283 TID is not the value of __lock anymore, in which case we let the
284 kernel take care of the situation. Use release MO in the CAS to
285 synchronize with acquire MO in lock acquisitions. */
286 int l = atomic_load_relaxed (&mutex->__data.__lock);
289 if (((l & FUTEX_WAITERS) != 0)
290 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
292 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
293 private);
294 break;
297 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
298 &l, 0));
300 /* This happens after the kernel releases the mutex but violates the
301 mutex destruction requirements; see comments in the code handling
302 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
303 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
304 break;
305 #endif /* __NR_futex. */
307 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
308 /* Recursive mutex. */
309 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
310 return EPERM;
312 if (--mutex->__data.__count != 0)
313 /* We still hold the mutex. */
314 return 0;
315 goto pp;
317 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
318 /* Error checking mutex. */
319 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
320 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
321 return EPERM;
322 /* FALLTHROUGH */
324 case PTHREAD_MUTEX_PP_NORMAL_NP:
325 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
326 /* Always reset the owner field. */
328 mutex->__data.__owner = 0;
330 if (decr)
331 /* One less user. */
332 --mutex->__data.__nusers;
334 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
335 lock acquisitions. */
336 int newval;
337 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
340 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
342 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
343 &oldval, newval));
345 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
346 futex_wake ((unsigned int *)&mutex->__data.__lock, 1,
347 PTHREAD_MUTEX_PSHARED (mutex));
349 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
351 LIBC_PROBE (mutex_release, 1, mutex);
353 return __pthread_tpp_change_priority (oldprio, -1);
355 default:
356 /* Correct code cannot set any other type. */
357 return EINVAL;
360 LIBC_PROBE (mutex_release, 1, mutex);
361 return 0;
366 ___pthread_mutex_unlock (pthread_mutex_t *mutex)
368 return __pthread_mutex_unlock_usercnt (mutex, 1);
370 libc_hidden_ver (___pthread_mutex_unlock, __pthread_mutex_unlock)
371 #ifndef SHARED
372 strong_alias (___pthread_mutex_unlock, __pthread_mutex_unlock)
373 #endif
374 versioned_symbol (libpthread, ___pthread_mutex_unlock, pthread_mutex_unlock,
375 GLIBC_2_0);
377 #if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
378 compat_symbol (libpthread, ___pthread_mutex_unlock, __pthread_mutex_unlock,
379 GLIBC_2_0);
380 #endif