Use correct register for fourth parameter of x86-64 strncasecmp_l.
[glibc.git] / nptl / pthread_mutex_unlock.c
blobf9fe10b0f2b6f880de046df476082ecb9ffe3adf
1 /* Copyright (C) 2002, 2003, 2005-2008, 2009 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <stdlib.h>
23 #include "pthreadP.h"
24 #include <lowlevellock.h>
26 static int
27 internal_function
28 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
29 __attribute_noinline__;
31 int
32 internal_function attribute_hidden
33 __pthread_mutex_unlock_usercnt (mutex, decr)
34 pthread_mutex_t *mutex;
35 int decr;
37 int type = PTHREAD_MUTEX_TYPE (mutex);
38 if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
39 return __pthread_mutex_unlock_full (mutex, decr);
41 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
42 == PTHREAD_MUTEX_TIMED_NP)
44 /* Always reset the owner field. */
45 normal:
46 mutex->__data.__owner = 0;
47 if (decr)
48 /* One less user. */
49 --mutex->__data.__nusers;
51 /* Unlock. */
52 lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
53 return 0;
55 else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
57 /* Recursive mutex. */
58 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
59 return EPERM;
61 if (--mutex->__data.__count != 0)
62 /* We still hold the mutex. */
63 return 0;
64 goto normal;
66 else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
67 goto normal;
68 else
70 /* Error checking mutex. */
71 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
72 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
73 || ! lll_islocked (mutex->__data.__lock))
74 return EPERM;
75 goto normal;
80 static int
81 internal_function
82 __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
84 int newowner = 0;
86 switch (PTHREAD_MUTEX_TYPE (mutex))
88 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
89 /* Recursive mutex. */
90 if ((mutex->__data.__lock & FUTEX_TID_MASK)
91 == THREAD_GETMEM (THREAD_SELF, tid)
92 && __builtin_expect (mutex->__data.__owner
93 == PTHREAD_MUTEX_INCONSISTENT, 0))
95 if (--mutex->__data.__count != 0)
96 /* We still hold the mutex. */
97 return ENOTRECOVERABLE;
99 goto notrecoverable;
102 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
103 return EPERM;
105 if (--mutex->__data.__count != 0)
106 /* We still hold the mutex. */
107 return 0;
109 goto robust;
111 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
112 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
113 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
114 if ((mutex->__data.__lock & FUTEX_TID_MASK)
115 != THREAD_GETMEM (THREAD_SELF, tid)
116 || ! lll_islocked (mutex->__data.__lock))
117 return EPERM;
119 /* If the previous owner died and the caller did not succeed in
120 making the state consistent, mark the mutex as unrecoverable
121 and make all waiters. */
122 if (__builtin_expect (mutex->__data.__owner
123 == PTHREAD_MUTEX_INCONSISTENT, 0))
124 notrecoverable:
125 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
127 robust:
128 /* Remove mutex from the list. */
129 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
130 &mutex->__data.__list.__next);
131 DEQUEUE_MUTEX (mutex);
133 mutex->__data.__owner = newowner;
134 if (decr)
135 /* One less user. */
136 --mutex->__data.__nusers;
138 /* Unlock. */
139 lll_robust_unlock (mutex->__data.__lock,
140 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
142 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
143 break;
145 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
146 /* Recursive mutex. */
147 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
148 return EPERM;
150 if (--mutex->__data.__count != 0)
151 /* We still hold the mutex. */
152 return 0;
153 goto continue_pi_non_robust;
155 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
156 /* Recursive mutex. */
157 if ((mutex->__data.__lock & FUTEX_TID_MASK)
158 == THREAD_GETMEM (THREAD_SELF, tid)
159 && __builtin_expect (mutex->__data.__owner
160 == PTHREAD_MUTEX_INCONSISTENT, 0))
162 if (--mutex->__data.__count != 0)
163 /* We still hold the mutex. */
164 return ENOTRECOVERABLE;
166 goto pi_notrecoverable;
169 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
170 return EPERM;
172 if (--mutex->__data.__count != 0)
173 /* We still hold the mutex. */
174 return 0;
176 goto continue_pi_robust;
178 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
179 case PTHREAD_MUTEX_PI_NORMAL_NP:
180 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
181 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
182 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
183 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
184 if ((mutex->__data.__lock & FUTEX_TID_MASK)
185 != THREAD_GETMEM (THREAD_SELF, tid)
186 || ! lll_islocked (mutex->__data.__lock))
187 return EPERM;
189 /* If the previous owner died and the caller did not succeed in
190 making the state consistent, mark the mutex as unrecoverable
191 and make all waiters. */
192 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
193 && __builtin_expect (mutex->__data.__owner
194 == PTHREAD_MUTEX_INCONSISTENT, 0))
195 pi_notrecoverable:
196 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
198 if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
200 continue_pi_robust:
201 /* Remove mutex from the list.
202 Note: robust PI futexes are signaled by setting bit 0. */
203 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
204 (void *) (((uintptr_t) &mutex->__data.__list.__next)
205 | 1));
206 DEQUEUE_MUTEX (mutex);
209 continue_pi_non_robust:
210 mutex->__data.__owner = newowner;
211 if (decr)
212 /* One less user. */
213 --mutex->__data.__nusers;
215 /* Unlock. */
216 if ((mutex->__data.__lock & FUTEX_WAITERS) != 0
217 || atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock, 0,
218 THREAD_GETMEM (THREAD_SELF,
219 tid)))
221 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
222 int private = (robust
223 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
224 : PTHREAD_MUTEX_PSHARED (mutex));
225 INTERNAL_SYSCALL_DECL (__err);
226 INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
227 __lll_private_flag (FUTEX_UNLOCK_PI, private));
230 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
231 break;
233 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
234 /* Recursive mutex. */
235 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
236 return EPERM;
238 if (--mutex->__data.__count != 0)
239 /* We still hold the mutex. */
240 return 0;
241 goto pp;
243 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
244 /* Error checking mutex. */
245 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
246 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
247 return EPERM;
248 /* FALLTHROUGH */
250 case PTHREAD_MUTEX_PP_NORMAL_NP:
251 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
252 /* Always reset the owner field. */
254 mutex->__data.__owner = 0;
256 if (decr)
257 /* One less user. */
258 --mutex->__data.__nusers;
260 /* Unlock. */
261 int newval, oldval;
264 oldval = mutex->__data.__lock;
265 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
267 while (atomic_compare_and_exchange_bool_rel (&mutex->__data.__lock,
268 newval, oldval));
270 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
271 lll_futex_wake (&mutex->__data.__lock, 1,
272 PTHREAD_MUTEX_PSHARED (mutex));
274 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
275 return __pthread_tpp_change_priority (oldprio, -1);
277 default:
278 /* Correct code cannot set any other type. */
279 return EINVAL;
282 return 0;
287 __pthread_mutex_unlock (mutex)
288 pthread_mutex_t *mutex;
290 return __pthread_mutex_unlock_usercnt (mutex, 1);
292 strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
293 strong_alias (__pthread_mutex_unlock, __pthread_mutex_unlock_internal)