Update MIPS libm-test-ulps.
[glibc.git] / nptl / pthread_mutex_unlock.c
blobe29bb7fa533264068f5dc280d2f1bf0257f88cce
1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24 #include <stap-probe.h>
25 #include <futex-internal.h>
27 #ifndef lll_unlock_elision
28 #define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
29 #endif
31 static int
32 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
33 __attribute_noinline__;
35 int
36 attribute_hidden
37 __pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
39 /* See concurrency notes regarding mutex type which is loaded from __kind
40 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
41 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
42 if (__builtin_expect (type
43 & ~(PTHREAD_MUTEX_KIND_MASK_NP
44 |PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
45 return __pthread_mutex_unlock_full (mutex, decr);
47 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
48 == PTHREAD_MUTEX_TIMED_NP)
50 /* Always reset the owner field. */
51 normal:
52 mutex->__data.__owner = 0;
53 if (decr)
54 /* One less user. */
55 --mutex->__data.__nusers;
57 /* Unlock. */
58 lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
60 LIBC_PROBE (mutex_release, 1, mutex);
62 return 0;
64 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
66 /* Don't reset the owner/users fields for elision. */
67 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
68 PTHREAD_MUTEX_PSHARED (mutex));
70 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
71 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
73 /* Recursive mutex. */
74 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
75 return EPERM;
77 if (--mutex->__data.__count != 0)
78 /* We still hold the mutex. */
79 return 0;
80 goto normal;
82 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
83 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
84 goto normal;
85 else
87 /* Error checking mutex. */
88 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
89 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
90 || ! lll_islocked (mutex->__data.__lock))
91 return EPERM;
92 goto normal;
97 static int
98 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
100 int newowner = 0;
101 int private;
103 switch (PTHREAD_MUTEX_TYPE (mutex))
105 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
106 /* Recursive mutex. */
107 if ((mutex->__data.__lock & FUTEX_TID_MASK)
108 == THREAD_GETMEM (THREAD_SELF, tid)
109 && __builtin_expect (mutex->__data.__owner
110 == PTHREAD_MUTEX_INCONSISTENT, 0))
112 if (--mutex->__data.__count != 0)
113 /* We still hold the mutex. */
114 return ENOTRECOVERABLE;
116 goto notrecoverable;
119 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
120 return EPERM;
122 if (--mutex->__data.__count != 0)
123 /* We still hold the mutex. */
124 return 0;
126 goto robust;
128 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
129 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
130 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
131 if ((mutex->__data.__lock & FUTEX_TID_MASK)
132 != THREAD_GETMEM (THREAD_SELF, tid)
133 || ! lll_islocked (mutex->__data.__lock))
134 return EPERM;
136 /* If the previous owner died and the caller did not succeed in
137 making the state consistent, mark the mutex as unrecoverable
138 and make all waiters. */
139 if (__builtin_expect (mutex->__data.__owner
140 == PTHREAD_MUTEX_INCONSISTENT, 0))
141 notrecoverable:
142 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
144 robust:
145 /* Remove mutex from the list. */
146 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
147 &mutex->__data.__list.__next);
148 /* We must set op_pending before we dequeue the mutex. Also see
149 comments at ENQUEUE_MUTEX. */
150 __asm ("" ::: "memory");
151 DEQUEUE_MUTEX (mutex);
153 mutex->__data.__owner = newowner;
154 if (decr)
155 /* One less user. */
156 --mutex->__data.__nusers;
158 /* Unlock by setting the lock to 0 (not acquired); if the lock had
159 FUTEX_WAITERS set previously, then wake any waiters.
160 The unlock operation must be the last access to the mutex to not
161 violate the mutex destruction requirements (see __lll_unlock). */
162 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
163 if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
164 & FUTEX_WAITERS) != 0))
165 futex_wake ((unsigned int *) &mutex->__data.__lock, 1, private);
167 /* We must clear op_pending after we release the mutex.
168 FIXME However, this violates the mutex destruction requirements
169 because another thread could acquire the mutex, destroy it, and
170 reuse the memory for something else; then, if this thread crashes,
171 and the memory happens to have a value equal to the TID, the kernel
172 will believe it is still related to the mutex (which has been
173 destroyed already) and will modify some other random object. */
174 __asm ("" ::: "memory");
175 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
176 break;
178 /* The PI support requires the Linux futex system call. If that's not
179 available, pthread_mutex_init should never have allowed the type to
180 be set. So it will get the default case for an invalid type. */
181 #ifdef __NR_futex
182 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
183 /* Recursive mutex. */
184 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
185 return EPERM;
187 if (--mutex->__data.__count != 0)
188 /* We still hold the mutex. */
189 return 0;
190 goto continue_pi_non_robust;
192 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
193 /* Recursive mutex. */
194 if ((mutex->__data.__lock & FUTEX_TID_MASK)
195 == THREAD_GETMEM (THREAD_SELF, tid)
196 && __builtin_expect (mutex->__data.__owner
197 == PTHREAD_MUTEX_INCONSISTENT, 0))
199 if (--mutex->__data.__count != 0)
200 /* We still hold the mutex. */
201 return ENOTRECOVERABLE;
203 goto pi_notrecoverable;
206 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
207 return EPERM;
209 if (--mutex->__data.__count != 0)
210 /* We still hold the mutex. */
211 return 0;
213 goto continue_pi_robust;
215 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
216 case PTHREAD_MUTEX_PI_NORMAL_NP:
217 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
218 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
219 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
220 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
221 if ((mutex->__data.__lock & FUTEX_TID_MASK)
222 != THREAD_GETMEM (THREAD_SELF, tid)
223 || ! lll_islocked (mutex->__data.__lock))
224 return EPERM;
226 /* If the previous owner died and the caller did not succeed in
227 making the state consistent, mark the mutex as unrecoverable
228 and make all waiters. */
229 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
230 in sysdeps/nptl/bits/thread-shared-types.h. */
231 if ((atomic_load_relaxed (&(mutex->__data.__kind))
232 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
233 && __builtin_expect (mutex->__data.__owner
234 == PTHREAD_MUTEX_INCONSISTENT, 0))
235 pi_notrecoverable:
236 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
238 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
239 in sysdeps/nptl/bits/thread-shared-types.h. */
240 if ((atomic_load_relaxed (&(mutex->__data.__kind))
241 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
243 continue_pi_robust:
244 /* Remove mutex from the list.
245 Note: robust PI futexes are signaled by setting bit 0. */
246 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
247 (void *) (((uintptr_t) &mutex->__data.__list.__next)
248 | 1));
249 /* We must set op_pending before we dequeue the mutex. Also see
250 comments at ENQUEUE_MUTEX. */
251 __asm ("" ::: "memory");
252 DEQUEUE_MUTEX (mutex);
255 continue_pi_non_robust:
256 mutex->__data.__owner = newowner;
257 if (decr)
258 /* One less user. */
259 --mutex->__data.__nusers;
261 /* Unlock. Load all necessary mutex data before releasing the mutex
262 to not violate the mutex destruction requirements (see
263 lll_unlock). */
264 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
265 in sysdeps/nptl/bits/thread-shared-types.h. */
266 int robust = atomic_load_relaxed (&(mutex->__data.__kind))
267 & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
268 private = (robust
269 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
270 : PTHREAD_MUTEX_PSHARED (mutex));
271 /* Unlock the mutex using a CAS unless there are futex waiters or our
272 TID is not the value of __lock anymore, in which case we let the
273 kernel take care of the situation. Use release MO in the CAS to
274 synchronize with acquire MO in lock acquisitions. */
275 int l = atomic_load_relaxed (&mutex->__data.__lock);
278 if (((l & FUTEX_WAITERS) != 0)
279 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
281 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
282 private);
283 break;
286 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
287 &l, 0));
289 /* This happens after the kernel releases the mutex but violates the
290 mutex destruction requirements; see comments in the code handling
291 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
292 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
293 break;
294 #endif /* __NR_futex. */
296 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
297 /* Recursive mutex. */
298 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
299 return EPERM;
301 if (--mutex->__data.__count != 0)
302 /* We still hold the mutex. */
303 return 0;
304 goto pp;
306 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
307 /* Error checking mutex. */
308 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
309 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
310 return EPERM;
311 /* FALLTHROUGH */
313 case PTHREAD_MUTEX_PP_NORMAL_NP:
314 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
315 /* Always reset the owner field. */
317 mutex->__data.__owner = 0;
319 if (decr)
320 /* One less user. */
321 --mutex->__data.__nusers;
323 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
324 lock acquisitions. */
325 int newval;
326 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
329 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
331 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
332 &oldval, newval));
334 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
335 futex_wake ((unsigned int *)&mutex->__data.__lock, 1,
336 PTHREAD_MUTEX_PSHARED (mutex));
338 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
340 LIBC_PROBE (mutex_release, 1, mutex);
342 return __pthread_tpp_change_priority (oldprio, -1);
344 default:
345 /* Correct code cannot set any other type. */
346 return EINVAL;
349 LIBC_PROBE (mutex_release, 1, mutex);
350 return 0;
355 __pthread_mutex_unlock (pthread_mutex_t *mutex)
357 return __pthread_mutex_unlock_usercnt (mutex, 1);
359 weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
360 hidden_def (__pthread_mutex_unlock)