1 /* Copyright (C) 2002, 2003, 2005, 2006, 2007 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 #include <lowlevellock.h>
28 __pthread_mutex_trylock (mutex
)
29 pthread_mutex_t
*mutex
;
32 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
34 switch (__builtin_expect (mutex
->__data
.__kind
, PTHREAD_MUTEX_TIMED_NP
))
36 /* Recursive mutex. */
37 case PTHREAD_MUTEX_RECURSIVE_NP
:
38 /* Check whether we already hold the mutex. */
39 if (mutex
->__data
.__owner
== id
)
41 /* Just bump the counter. */
42 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
43 /* Overflow of the counter. */
46 ++mutex
->__data
.__count
;
50 if (lll_mutex_trylock (mutex
->__data
.__lock
) == 0)
52 /* Record the ownership. */
53 mutex
->__data
.__owner
= id
;
54 mutex
->__data
.__count
= 1;
55 ++mutex
->__data
.__nusers
;
60 case PTHREAD_MUTEX_ERRORCHECK_NP
:
61 /* Check whether we already hold the mutex. */
62 if (__builtin_expect (mutex
->__data
.__owner
== id
, 0))
67 case PTHREAD_MUTEX_TIMED_NP
:
68 case PTHREAD_MUTEX_ADAPTIVE_NP
:
70 if (lll_mutex_trylock (mutex
->__data
.__lock
) != 0)
73 /* Record the ownership. */
74 mutex
->__data
.__owner
= id
;
75 ++mutex
->__data
.__nusers
;
80 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
81 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
82 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
83 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
84 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
85 &mutex
->__data
.__list
.__next
);
87 oldval
= mutex
->__data
.__lock
;
91 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
93 /* The previous owner died. Try locking the mutex. */
94 int newval
= id
| (oldval
& FUTEX_WAITERS
);
97 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
100 if (newval
!= oldval
)
106 /* We got the mutex. */
107 mutex
->__data
.__count
= 1;
108 /* But it is inconsistent unless marked otherwise. */
109 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
111 ENQUEUE_MUTEX (mutex
);
112 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
114 /* Note that we deliberately exist here. If we fall
115 through to the end of the function __nusers would be
116 incremented which is not correct because the old
117 owner has to be discounted. */
121 /* Check whether we already hold the mutex. */
122 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
124 if (mutex
->__data
.__kind
125 == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
127 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
132 if (mutex
->__data
.__kind
133 == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
135 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
138 /* Just bump the counter. */
139 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
140 /* Overflow of the counter. */
143 ++mutex
->__data
.__count
;
149 oldval
= lll_robust_mutex_trylock (mutex
->__data
.__lock
, id
);
150 if (oldval
!= 0 && (oldval
& FUTEX_OWNER_DIED
) == 0)
152 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
157 if (__builtin_expect (mutex
->__data
.__owner
158 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
160 /* This mutex is now not recoverable. */
161 mutex
->__data
.__count
= 0;
163 lll_mutex_unlock (mutex
->__data
.__lock
);
164 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
165 return ENOTRECOVERABLE
;
168 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
170 ENQUEUE_MUTEX (mutex
);
171 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
173 mutex
->__data
.__owner
= id
;
174 ++mutex
->__data
.__nusers
;
175 mutex
->__data
.__count
= 1;
179 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
180 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
181 case PTHREAD_MUTEX_PI_NORMAL_NP
:
182 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
183 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
184 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
185 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
186 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
188 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
189 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
192 /* Note: robust PI futexes are signaled by setting bit 0. */
193 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
194 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
197 oldval
= mutex
->__data
.__lock
;
199 /* Check whether we already hold the mutex. */
200 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
202 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
204 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
208 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
210 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
212 /* Just bump the counter. */
213 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
214 /* Overflow of the counter. */
217 ++mutex
->__data
.__count
;
224 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
229 if ((oldval
& FUTEX_OWNER_DIED
) == 0)
231 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
238 /* The mutex owner died. The kernel will now take care of
240 INTERNAL_SYSCALL_DECL (__err
);
241 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
242 FUTEX_TRYLOCK_PI
, 0, 0);
244 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
245 && INTERNAL_SYSCALL_ERRNO (e
, __err
) == EWOULDBLOCK
)
247 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
252 oldval
= mutex
->__data
.__lock
;
255 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
257 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
259 /* We got the mutex. */
260 mutex
->__data
.__count
= 1;
261 /* But it is inconsistent unless marked otherwise. */
262 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
264 ENQUEUE_MUTEX (mutex
);
265 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
267 /* Note that we deliberately exit here. If we fall
268 through to the end of the function __nusers would be
269 incremented which is not correct because the old owner
270 has to be discounted. */
275 && __builtin_expect (mutex
->__data
.__owner
276 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
278 /* This mutex is now not recoverable. */
279 mutex
->__data
.__count
= 0;
281 INTERNAL_SYSCALL_DECL (__err
);
282 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
283 FUTEX_UNLOCK_PI
, 0, 0);
285 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
286 return ENOTRECOVERABLE
;
291 ENQUEUE_MUTEX_PI (mutex
);
292 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
295 mutex
->__data
.__owner
= id
;
296 ++mutex
->__data
.__nusers
;
297 mutex
->__data
.__count
= 1;
302 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
303 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
304 case PTHREAD_MUTEX_PP_NORMAL_NP
:
305 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
307 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
309 oldval
= mutex
->__data
.__lock
;
311 /* Check whether we already hold the mutex. */
312 if (mutex
->__data
.__owner
== id
)
314 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
317 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
319 /* Just bump the counter. */
320 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
321 /* Overflow of the counter. */
324 ++mutex
->__data
.__count
;
330 int oldprio
= -1, ceilval
;
333 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
334 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
336 if (__pthread_current_priority () > ceiling
)
339 __pthread_tpp_change_priority (oldprio
, -1);
343 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
347 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
351 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
352 ceilval
| 1, ceilval
);
354 if (oldval
== ceilval
)
357 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
359 if (oldval
!= ceilval
)
361 __pthread_tpp_change_priority (oldprio
, -1);
365 assert (mutex
->__data
.__owner
== 0);
366 /* Record the ownership. */
367 mutex
->__data
.__owner
= id
;
368 ++mutex
->__data
.__nusers
;
369 mutex
->__data
.__count
= 1;
376 /* Correct code cannot set any other type. */
382 strong_alias (__pthread_mutex_trylock
, pthread_mutex_trylock
)