PowerPC: unify math_ldbl.h implementations
[glibc.git] / nptl / pthread_mutex_unlock.c
blob84e7415672f3601197c550f40297b19eac2e234e
1 /* Copyright (C) 2002-2012 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24 #include <stap-probe.h>
26 static int
27 internal_function
28 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
29 __attribute_noinline__;
31 int
32 internal_function attribute_hidden
33 __pthread_mutex_unlock_usercnt (mutex, decr)
34 pthread_mutex_t *mutex;
35 int decr;
37 int type = PTHREAD_MUTEX_TYPE (mutex);
38 if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
39 return __pthread_mutex_unlock_full (mutex, decr);
41 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
42 == PTHREAD_MUTEX_TIMED_NP)
44 /* Always reset the owner field. */
45 normal:
46 mutex->__data.__owner = 0;
47 if (decr)
48 /* One less user. */
49 --mutex->__data.__nusers;
51 /* Unlock. */
52 lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
54 LIBC_PROBE (mutex_release, 1, mutex);
56 return 0;
58 else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
60 /* Recursive mutex. */
61 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
62 return EPERM;
64 if (--mutex->__data.__count != 0)
65 /* We still hold the mutex. */
66 return 0;
67 goto normal;
69 else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
70 goto normal;
71 else
73 /* Error checking mutex. */
74 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
75 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
76 || ! lll_islocked (mutex->__data.__lock))
77 return EPERM;
78 goto normal;
83 static int
84 internal_function
85 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
87 int newowner = 0;
89 switch (PTHREAD_MUTEX_TYPE (mutex))
91 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
92 /* Recursive mutex. */
93 if ((mutex->__data.__lock & FUTEX_TID_MASK)
94 == THREAD_GETMEM (THREAD_SELF, tid)
95 && __builtin_expect (mutex->__data.__owner
96 == PTHREAD_MUTEX_INCONSISTENT, 0))
98 if (--mutex->__data.__count != 0)
99 /* We still hold the mutex. */
100 return ENOTRECOVERABLE;
102 goto notrecoverable;
105 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
106 return EPERM;
108 if (--mutex->__data.__count != 0)
109 /* We still hold the mutex. */
110 return 0;
112 goto robust;
114 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
115 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
116 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
117 if ((mutex->__data.__lock & FUTEX_TID_MASK)
118 != THREAD_GETMEM (THREAD_SELF, tid)
119 || ! lll_islocked (mutex->__data.__lock))
120 return EPERM;
122 /* If the previous owner died and the caller did not succeed in
123 making the state consistent, mark the mutex as unrecoverable
124 and make all waiters. */
125 if (__builtin_expect (mutex->__data.__owner
126 == PTHREAD_MUTEX_INCONSISTENT, 0))
127 notrecoverable:
128 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
130 robust:
131 /* Remove mutex from the list. */
132 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
133 &mutex->__data.__list.__next);
134 DEQUEUE_MUTEX (mutex);
136 mutex->__data.__owner = newowner;
137 if (decr)
138 /* One less user. */
139 --mutex->__data.__nusers;
141 /* Unlock. */
142 lll_robust_unlock (mutex->__data.__lock,
143 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
145 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
146 break;
148 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
149 /* Recursive mutex. */
150 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
151 return EPERM;
153 if (--mutex->__data.__count != 0)
154 /* We still hold the mutex. */
155 return 0;
156 goto continue_pi_non_robust;
158 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
159 /* Recursive mutex. */
160 if ((mutex->__data.__lock & FUTEX_TID_MASK)
161 == THREAD_GETMEM (THREAD_SELF, tid)
162 && __builtin_expect (mutex->__data.__owner
163 == PTHREAD_MUTEX_INCONSISTENT, 0))
165 if (--mutex->__data.__count != 0)
166 /* We still hold the mutex. */
167 return ENOTRECOVERABLE;
169 goto pi_notrecoverable;
172 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
173 return EPERM;
175 if (--mutex->__data.__count != 0)
176 /* We still hold the mutex. */
177 return 0;
179 goto continue_pi_robust;
181 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
182 case PTHREAD_MUTEX_PI_NORMAL_NP:
183 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
184 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
185 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
186 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
187 if ((mutex->__data.__lock & FUTEX_TID_MASK)
188 != THREAD_GETMEM (THREAD_SELF, tid)
189 || ! lll_islocked (mutex->__data.__lock))
190 return EPERM;
192 /* If the previous owner died and the caller did not succeed in
193 making the state consistent, mark the mutex as unrecoverable
194 and make all waiters. */
195 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
196 && __builtin_expect (mutex->__data.__owner
197 == PTHREAD_MUTEX_INCONSISTENT, 0))
198 pi_notrecoverable:
199 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
201 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
203 continue_pi_robust:
204 /* Remove mutex from the list.
205 Note: robust PI futexes are signaled by setting bit 0. */
206 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
207 (void *) (((uintptr_t) &mutex->__data.__list.__next)
208 | 1));
209 DEQUEUE_MUTEX (mutex);
212 continue_pi_non_robust:
213 mutex->__data.__owner = newowner;
214 if (decr)
215 /* One less user. */
216 --mutex->__data.__nusers;
218 /* Unlock. */
219 if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
220 || atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock, 0,
221 THREAD_GETMEM (THREAD_SELF,
222 tid)))
224 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
225 int private = (robust
226 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
227 : PTHREAD_MUTEX_PSHARED (mutex));
228 INTERNAL_SYSCALL_DECL (__err);
229 INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
230 __lll_private_flag (FUTEX_UNLOCK_PI, private));
233 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
234 break;
236 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
237 /* Recursive mutex. */
238 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
239 return EPERM;
241 if (--mutex->__data.__count != 0)
242 /* We still hold the mutex. */
243 return 0;
244 goto pp;
246 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
247 /* Error checking mutex. */
248 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
249 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
250 return EPERM;
251 /* FALLTHROUGH */
253 case PTHREAD_MUTEX_PP_NORMAL_NP:
254 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
255 /* Always reset the owner field. */
257 mutex->__data.__owner = 0;
259 if (decr)
260 /* One less user. */
261 --mutex->__data.__nusers;
263 /* Unlock. */
264 int newval, oldval;
267 oldval = mutex->__data.__lock;
268 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
270 while (atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock,
271 newval, oldval));
273 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
274 lll_futex_wake (&mutex->__data.__lock, 1,
275 PTHREAD_MUTEX_PSHARED (mutex));
277 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
279 LIBC_PROBE (mutex_release, 1, mutex);
281 return __pthread_tpp_change_priority (oldprio, -1);
283 default:
284 /* Correct code cannot set any other type. */
285 return EINVAL;
288 LIBC_PROBE (mutex_release, 1, mutex);
289 return 0;
294 __pthread_mutex_unlock (mutex)
295 pthread_mutex_t *mutex;
297 return __pthread_mutex_unlock_usercnt (mutex, 1);
299 strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
300 hidden_def (__pthread_mutex_unlock)