1 /* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
23 #include <lowlevellock.h>
27 internal_function attribute_hidden
28 __pthread_mutex_unlock_usercnt (mutex
, decr
)
29 pthread_mutex_t
*mutex
;
34 switch (__builtin_expect (mutex
->__data
.__kind
, PTHREAD_MUTEX_TIMED_NP
))
36 case PTHREAD_MUTEX_RECURSIVE_NP
:
37 /* Recursive mutex. */
38 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
41 if (--mutex
->__data
.__count
!= 0)
42 /* We still hold the mutex. */
46 case PTHREAD_MUTEX_ERRORCHECK_NP
:
47 /* Error checking mutex. */
48 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
49 || ! lll_mutex_islocked (mutex
->__data
.__lock
))
53 case PTHREAD_MUTEX_TIMED_NP
:
54 case PTHREAD_MUTEX_ADAPTIVE_NP
:
55 /* Always reset the owner field. */
57 mutex
->__data
.__owner
= 0;
60 --mutex
->__data
.__nusers
;
63 lll_mutex_unlock (mutex
->__data
.__lock
);
66 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
67 /* Recursive mutex. */
68 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
69 == THREAD_GETMEM (THREAD_SELF
, tid
)
70 && __builtin_expect (mutex
->__data
.__owner
71 == PTHREAD_MUTEX_INCONSISTENT
, 0))
73 if (--mutex
->__data
.__count
!= 0)
74 /* We still hold the mutex. */
75 return ENOTRECOVERABLE
;
80 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
83 if (--mutex
->__data
.__count
!= 0)
84 /* We still hold the mutex. */
89 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
90 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
91 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
92 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
93 != THREAD_GETMEM (THREAD_SELF
, tid
)
94 || ! lll_mutex_islocked (mutex
->__data
.__lock
))
97 /* If the previous owner died and the caller did not succeed in
98 making the state consistent, mark the mutex as unrecoverable
99 and make all waiters. */
100 if (__builtin_expect (mutex
->__data
.__owner
101 == PTHREAD_MUTEX_INCONSISTENT
, 0))
103 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
106 /* Remove mutex from the list. */
107 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
108 &mutex
->__data
.__list
.__next
);
109 DEQUEUE_MUTEX (mutex
);
111 mutex
->__data
.__owner
= newowner
;
114 --mutex
->__data
.__nusers
;
117 lll_robust_mutex_unlock (mutex
->__data
.__lock
);
119 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
122 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
123 /* Recursive mutex. */
124 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
127 if (--mutex
->__data
.__count
!= 0)
128 /* We still hold the mutex. */
132 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
133 /* Recursive mutex. */
134 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
135 == THREAD_GETMEM (THREAD_SELF
, tid
)
136 && __builtin_expect (mutex
->__data
.__owner
137 == PTHREAD_MUTEX_INCONSISTENT
, 0))
139 if (--mutex
->__data
.__count
!= 0)
140 /* We still hold the mutex. */
141 return ENOTRECOVERABLE
;
143 goto pi_notrecoverable
;
146 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
149 if (--mutex
->__data
.__count
!= 0)
150 /* We still hold the mutex. */
155 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
156 case PTHREAD_MUTEX_PI_NORMAL_NP
:
157 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
158 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
159 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
160 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
161 if ((mutex
->__data
.__lock
& FUTEX_TID_MASK
)
162 != THREAD_GETMEM (THREAD_SELF
, tid
)
163 || ! lll_mutex_islocked (mutex
->__data
.__lock
))
166 /* If the previous owner died and the caller did not succeed in
167 making the state consistent, mark the mutex as unrecoverable
168 and make all waiters. */
169 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0
170 && __builtin_expect (mutex
->__data
.__owner
171 == PTHREAD_MUTEX_INCONSISTENT
, 0))
173 newowner
= PTHREAD_MUTEX_NOTRECOVERABLE
;
176 if ((mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
) != 0)
178 /* Remove mutex from the list.
179 Note: robust PI futexes are signaled by setting bit 0. */
180 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
181 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
183 DEQUEUE_MUTEX (mutex
);
186 mutex
->__data
.__owner
= newowner
;
189 --mutex
->__data
.__nusers
;
192 if ((mutex
->__data
.__lock
& FUTEX_WAITERS
) != 0
193 || atomic_compare_and_exchange_bool_acq (&mutex
->__data
.__lock
, 0,
194 THREAD_GETMEM (THREAD_SELF
,
197 INTERNAL_SYSCALL_DECL (__err
);
198 INTERNAL_SYSCALL (futex
, __err
, 2, &mutex
->__data
.__lock
,
202 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
205 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
206 /* Recursive mutex. */
207 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
))
210 if (--mutex
->__data
.__count
!= 0)
211 /* We still hold the mutex. */
215 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
216 /* Error checking mutex. */
217 if (mutex
->__data
.__owner
!= THREAD_GETMEM (THREAD_SELF
, tid
)
218 || (mutex
->__data
.__lock
& ~ PTHREAD_MUTEX_PRIO_CEILING_MASK
) == 0)
222 case PTHREAD_MUTEX_PP_NORMAL_NP
:
223 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
224 /* Always reset the owner field. */
226 mutex
->__data
.__owner
= 0;
230 --mutex
->__data
.__nusers
;
236 oldval
= mutex
->__data
.__lock
;
237 newval
= oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
;
239 while (atomic_compare_and_exchange_bool_acq (&mutex
->__data
.__lock
,
242 if ((oldval
& ~PTHREAD_MUTEX_PRIO_CEILING_MASK
) > 1)
243 lll_futex_wake (&mutex
->__data
.__lock
, 1);
245 int oldprio
= newval
>> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
246 return __pthread_tpp_change_priority (oldprio
, -1);
249 /* Correct code cannot set any other type. */
258 __pthread_mutex_unlock (mutex
)
259 pthread_mutex_t
*mutex
;
261 return __pthread_mutex_unlock_usercnt (mutex
, 1);
263 strong_alias (__pthread_mutex_unlock
, pthread_mutex_unlock
)
264 strong_alias (__pthread_mutex_unlock
, __pthread_mutex_unlock_internal
)