1 /* Copyright (C) 2002-2012 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <stap-probe.h>
28 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
29 __attribute_noinline__
;
32 internal_function attribute_hidden
33 __pthread_mutex_unlock_usercnt (mutex
, decr
)
34 pthread_mutex_t
*mutex
;
37 int type
= PTHREAD_MUTEX_TYPE (mutex
);
38 if (__builtin_expect (type
& ~PTHREAD_MUTEX_KIND_MASK_NP
, 0))
39 return __pthread_mutex_unlock_full (mutex
, decr
);
41 if (__builtin_expect (type
, PTHREAD_MUTEX_TIMED_NP
)
42 == PTHREAD_MUTEX_TIMED_NP
)
44 /* Always reset the owner field. */
46 mutex
->__data
.__owner
= 0;
49 --mutex
->__data
.__nusers
;
52 lll_unlock (mutex
->__data
.__lock
, PTHREAD_MUTEX_PSHARED (mutex
));
54 LIBC_PROBE (mutex_release
, 1, mutex
);
58 else if (__builtin_expect (type
== PTHREAD_MUTEX_RECURSIVE_NP
, 1))
60 /* Recursive mutex. */
61 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
64 if (--mutex
->__data
.__count
!= 0)
65 /* We still hold the mutex. */
69 else if (__builtin_expect (type
== PTHREAD_MUTEX_ADAPTIVE_NP
, 1))
73 /* Error checking mutex. */
74 assert (type
== PTHREAD_MUTEX_ERRORCHECK_NP
);
75 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
76 || ! lll_islocked (mutex
->__data
.__lock
))
85 __pthread_mutex_unlock_full (pthread_mutex_t
*mutex
, int decr
)
89 switch (PTHREAD_MUTEX_TYPE (mutex
))
91 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
92 /* Recursive mutex. */
93 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
94 == THREAD_GETMEM (THREAD_SELF
, tid
)
95 && __builtin_expect (mutex
->__data
.__owner
96 == PTHREAD_MUTEX_INCONSISTENT
, 0))
98 if (--mutex
->__data
.__count
!= 0)
99 /* We still hold the mutex. */
100 return ENOTRECOVERABLE
;
105 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
108 if (--mutex
->__data
.__count
!= 0)
109 /* We still hold the mutex. */
114 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
115 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
116 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
117 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
118 != THREAD_GETMEM (THREAD_SELF
, tid
)
119 || ! lll_islocked (mutex
->__data
.__lock
))
122 /* If the previous owner died and the caller did not succeed in
123 making the state consistent, mark the mutex as unrecoverable
124 and make all waiters. */
125 if (__builtin_expect (mutex
->__data
.__owner
126 == PTHREAD_MUTEX_INCONSISTENT
, 0))
128 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
131 /* Remove mutex from the list. */
132 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
133 &mutex
->__data
.__list
.__next
);
134 DEQUEUE_MUTEX (mutex
);
136 mutex
->__data
.__owner
= newowner
;
139 --mutex
->__data
.__nusers
;
142 lll_robust_unlock (mutex
->__data
.__lock
,
143 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
145 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
148 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
149 /* Recursive mutex. */
150 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
153 if (--mutex
->__data
.__count
!= 0)
154 /* We still hold the mutex. */
156 goto continue_pi_non_robust
;
158 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
159 /* Recursive mutex. */
160 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
161 == THREAD_GETMEM (THREAD_SELF
, tid
)
162 && __builtin_expect (mutex
->__data
.__owner
163 == PTHREAD_MUTEX_INCONSISTENT
, 0))
165 if (--mutex
->__data
.__count
!= 0)
166 /* We still hold the mutex. */
167 return ENOTRECOVERABLE
;
169 goto pi_notrecoverable
;
172 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
175 if (--mutex
->__data
.__count
!= 0)
176 /* We still hold the mutex. */
179 goto continue_pi_robust
;
181 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
182 case PTHREAD_MUTEX_PI_NORMAL_NP
:
183 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
184 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
185 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
186 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
187 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
188 != THREAD_GETMEM (THREAD_SELF
, tid
)
189 || ! lll_islocked (mutex
->__data
.__lock
))
192 /* If the previous owner died and the caller did not succeed in
193 making the state consistent, mark the mutex as unrecoverable
194 and make all waiters. */
195 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0
196 && __builtin_expect (mutex
->__data
.__owner
197 == PTHREAD_MUTEX_INCONSISTENT
, 0))
199 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
201 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0)
204 /* Remove mutex from the list.
205 Note: robust PI futexes are signaled by setting bit 0. */
206 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
207 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
209 DEQUEUE_MUTEX (mutex
);
212 continue_pi_non_robust
:
213 mutex
->__data
.__owner
= newowner
;
216 --mutex
->__data
.__nusers
;
219 if ((mutex
->__data
.__lock
& FUTEX_WAITERS
) != 0
220 || atomic_compare_and_exchange_bool_rel (&mutex
->__data
.__lock
, 0,
221 THREAD_GETMEM (THREAD_SELF
,
224 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
225 int private = (robust
226 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
227 : PTHREAD_MUTEX_PSHARED (mutex
));
228 INTERNAL_SYSCALL_DECL (__err
);
229 INTERNAL_SYSCALL (futex
, __err
, 2, &mutex
->__data
.__lock
,
230 __lll_private_flag (FUTEX_UNLOCK_PI
, private));
233 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
236 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
237 /* Recursive mutex. */
238 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
241 if (--mutex
->__data
.__count
!= 0)
242 /* We still hold the mutex. */
246 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
247 /* Error checking mutex. */
248 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
249 || (mutex
->__data
.__lock
& ~ PTHREAD_MUTEX_PRIO_CEILING_MASK
) == 0)
253 case PTHREAD_MUTEX_PP_NORMAL_NP
:
254 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
255 /* Always reset the owner field. */
257 mutex
->__data
.__owner
= 0;
261 --mutex
->__data
.__nusers
;
267 oldval
= mutex
->__data
.__lock
;
268 newval
= oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
;
270 while (atomic_compare_and_exchange_bool_rel (&mutex
->__data
.__lock
,
273 if ((oldval
& ~PTHREAD_MUTEX_PRIO_CEILING_MASK
) > 1)
274 lll_futex_wake (&mutex
->__data
.__lock
, 1,
275 PTHREAD_MUTEX_PSHARED (mutex
));
277 int oldprio
= newval
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
279 LIBC_PROBE (mutex_release
, 1, mutex
);
281 return __pthread_tpp_change_priority (oldprio
, -1);
284 /* Correct code cannot set any other type. */
288 LIBC_PROBE (mutex_release
, 1, mutex
);
294 __pthread_mutex_unlock (mutex
)
295 pthread_mutex_t
*mutex
;
297 return __pthread_mutex_unlock_usercnt (mutex
, 1);
299 strong_alias (__pthread_mutex_unlock
, pthread_mutex_unlock
)
300 hidden_def (__pthread_mutex_unlock
)