1 /* Copyright (C) 2002, 2003, 2005-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
24 #include <lowlevellock.h>
28 __pthread_mutex_trylock (mutex
)
29 pthread_mutex_t
*mutex
;
32 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
34 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex
),
35 PTHREAD_MUTEX_TIMED_NP
))
37 /* Recursive mutex. */
38 case PTHREAD_MUTEX_RECURSIVE_NP
:
39 /* Check whether we already hold the mutex. */
40 if (mutex
->__data
.__owner
== id
)
42 /* Just bump the counter. */
43 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
44 /* Overflow of the counter. */
47 ++mutex
->__data
.__count
;
51 if (lll_trylock (mutex
->__data
.__lock
) == 0)
53 /* Record the ownership. */
54 mutex
->__data
.__owner
= id
;
55 mutex
->__data
.__count
= 1;
56 ++mutex
->__data
.__nusers
;
61 case PTHREAD_MUTEX_ERRORCHECK_NP
:
62 case PTHREAD_MUTEX_TIMED_NP
:
63 case PTHREAD_MUTEX_ADAPTIVE_NP
:
65 if (lll_trylock (mutex
->__data
.__lock
) != 0)
68 /* Record the ownership. */
69 mutex
->__data
.__owner
= id
;
70 ++mutex
->__data
.__nusers
;
74 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
75 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
76 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
77 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
78 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
79 &mutex
->__data
.__list
.__next
);
81 oldval
= mutex
->__data
.__lock
;
85 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
87 /* The previous owner died. Try locking the mutex. */
88 int newval
= id
| (oldval
& FUTEX_WAITERS
);
91 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
100 /* We got the mutex. */
101 mutex
->__data
.__count
= 1;
102 /* But it is inconsistent unless marked otherwise. */
103 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
105 ENQUEUE_MUTEX (mutex
);
106 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
108 /* Note that we deliberately exist here. If we fall
109 through to the end of the function __nusers would be
110 incremented which is not correct because the old
111 owner has to be discounted. */
115 /* Check whether we already hold the mutex. */
116 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
118 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
119 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
121 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
126 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
128 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
131 /* Just bump the counter. */
132 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
133 /* Overflow of the counter. */
136 ++mutex
->__data
.__count
;
142 oldval
= lll_robust_trylock (mutex
->__data
.__lock
, id
);
143 if (oldval
!= 0 && (oldval
& FUTEX_OWNER_DIED
) == 0)
145 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
150 if (__builtin_expect (mutex
->__data
.__owner
151 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
153 /* This mutex is now not recoverable. */
154 mutex
->__data
.__count
= 0;
156 lll_unlock (mutex
->__data
.__lock
,
157 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
158 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
159 return ENOTRECOVERABLE
;
162 while ((oldval
& FUTEX_OWNER_DIED
) != 0);
164 ENQUEUE_MUTEX (mutex
);
165 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
167 mutex
->__data
.__owner
= id
;
168 ++mutex
->__data
.__nusers
;
169 mutex
->__data
.__count
= 1;
173 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
174 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
175 case PTHREAD_MUTEX_PI_NORMAL_NP
:
176 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
177 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
178 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
179 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
180 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
182 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
183 int robust
= mutex
->__data
.__kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
186 /* Note: robust PI futexes are signaled by setting bit 0. */
187 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
188 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
191 oldval
= mutex
->__data
.__lock
;
193 /* Check whether we already hold the mutex. */
194 if (__builtin_expect ((oldval
& FUTEX_TID_MASK
) == id
, 0))
196 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
198 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
202 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
204 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
206 /* Just bump the counter. */
207 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
208 /* Overflow of the counter. */
211 ++mutex
->__data
.__count
;
218 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
223 if ((oldval
& FUTEX_OWNER_DIED
) == 0)
225 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
232 /* The mutex owner died. The kernel will now take care of
234 int private = (robust
235 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
236 : PTHREAD_MUTEX_PSHARED (mutex
));
237 INTERNAL_SYSCALL_DECL (__err
);
238 int e
= INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
239 __lll_private_flag (FUTEX_TRYLOCK_PI
,
242 if (INTERNAL_SYSCALL_ERROR_P (e
, __err
)
243 && INTERNAL_SYSCALL_ERRNO (e
, __err
) == EWOULDBLOCK
)
245 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
250 oldval
= mutex
->__data
.__lock
;
253 if (__builtin_expect (oldval
& FUTEX_OWNER_DIED
, 0))
255 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
257 /* We got the mutex. */
258 mutex
->__data
.__count
= 1;
259 /* But it is inconsistent unless marked otherwise. */
260 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
262 ENQUEUE_MUTEX (mutex
);
263 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
265 /* Note that we deliberately exit here. If we fall
266 through to the end of the function __nusers would be
267 incremented which is not correct because the old owner
268 has to be discounted. */
273 && __builtin_expect (mutex
->__data
.__owner
274 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
276 /* This mutex is now not recoverable. */
277 mutex
->__data
.__count
= 0;
279 INTERNAL_SYSCALL_DECL (__err
);
280 INTERNAL_SYSCALL (futex
, __err
, 4, &mutex
->__data
.__lock
,
281 __lll_private_flag (FUTEX_UNLOCK_PI
,
282 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)),
285 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
286 return ENOTRECOVERABLE
;
291 ENQUEUE_MUTEX_PI (mutex
);
292 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
295 mutex
->__data
.__owner
= id
;
296 ++mutex
->__data
.__nusers
;
297 mutex
->__data
.__count
= 1;
302 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
303 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
304 case PTHREAD_MUTEX_PP_NORMAL_NP
:
305 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
307 int kind
= mutex
->__data
.__kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
309 oldval
= mutex
->__data
.__lock
;
311 /* Check whether we already hold the mutex. */
312 if (mutex
->__data
.__owner
== id
)
314 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
317 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
319 /* Just bump the counter. */
320 if (__builtin_expect (mutex
->__data
.__count
+ 1 == 0, 0))
321 /* Overflow of the counter. */
324 ++mutex
->__data
.__count
;
330 int oldprio
= -1, ceilval
;
333 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
334 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
336 if (__pthread_current_priority () > ceiling
)
339 __pthread_tpp_change_priority (oldprio
, -1);
343 int retval
= __pthread_tpp_change_priority (oldprio
, ceiling
);
347 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
351 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
352 ceilval
| 1, ceilval
);
354 if (oldval
== ceilval
)
357 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
359 if (oldval
!= ceilval
)
361 __pthread_tpp_change_priority (oldprio
, -1);
365 assert (mutex
->__data
.__owner
== 0);
366 /* Record the ownership. */
367 mutex
->__data
.__owner
= id
;
368 ++mutex
->__data
.__nusers
;
369 mutex
->__data
.__count
= 1;
376 /* Correct code cannot set any other type. */
382 strong_alias (__pthread_mutex_trylock
, pthread_mutex_trylock
)