[BZ #4514]
[glibc.git] / nptl / pthread_mutex_lock.c
blob1c3ee4fe25555d7815a3cd4aa4d8c72a8191ddcd
1 /* Copyright (C) 2002,2003,2004,2005,2006,2007 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <stdlib.h>
23 #include <unistd.h>
24 #include <not-cancel.h>
25 #include "pthreadP.h"
26 #include <lowlevellock.h>
29 #ifndef LLL_MUTEX_LOCK
30 # define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
31 # define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
32 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
33 #endif
36 int
37 __pthread_mutex_lock (mutex)
38 pthread_mutex_t *mutex;
40 assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
42 int oldval;
43 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
45 int retval = 0;
46 switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
48 /* Recursive mutex. */
49 case PTHREAD_MUTEX_RECURSIVE_NP:
50 /* Check whether we already hold the mutex. */
51 if (mutex->__data.__owner == id)
53 /* Just bump the counter. */
54 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
55 /* Overflow of the counter. */
56 return EAGAIN;
58 ++mutex->__data.__count;
60 return 0;
63 /* We have to get the mutex. */
64 LLL_MUTEX_LOCK (mutex->__data.__lock);
66 assert (mutex->__data.__owner == 0);
67 mutex->__data.__count = 1;
68 break;
70 /* Error checking mutex. */
71 case PTHREAD_MUTEX_ERRORCHECK_NP:
72 /* Check whether we already hold the mutex. */
73 if (__builtin_expect (mutex->__data.__owner == id, 0))
74 return EDEADLK;
76 /* FALLTHROUGH */
78 case PTHREAD_MUTEX_TIMED_NP:
79 simple:
80 /* Normal mutex. */
81 LLL_MUTEX_LOCK (mutex->__data.__lock);
82 assert (mutex->__data.__owner == 0);
83 break;
85 case PTHREAD_MUTEX_ADAPTIVE_NP:
86 if (! __is_smp)
87 goto simple;
89 if (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0)
91 int cnt = 0;
92 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
93 mutex->__data.__spins * 2 + 10);
96 if (cnt++ >= max_cnt)
98 LLL_MUTEX_LOCK (mutex->__data.__lock);
99 break;
102 #ifdef BUSY_WAIT_NOP
103 BUSY_WAIT_NOP;
104 #endif
106 while (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0);
108 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
110 assert (mutex->__data.__owner == 0);
111 break;
113 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
114 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
115 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
116 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
117 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
118 &mutex->__data.__list.__next);
120 oldval = mutex->__data.__lock;
123 again:
124 if ((oldval & FUTEX_OWNER_DIED) != 0)
126 /* The previous owner died. Try locking the mutex. */
127 int newval = id;
128 #ifdef NO_INCR
129 newval |= FUTEX_WAITERS;
130 #else
131 newval |= (oldval & FUTEX_WAITERS);
132 #endif
134 newval
135 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
136 newval, oldval);
138 if (newval != oldval)
140 oldval = newval;
141 goto again;
144 /* We got the mutex. */
145 mutex->__data.__count = 1;
146 /* But it is inconsistent unless marked otherwise. */
147 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
149 ENQUEUE_MUTEX (mutex);
150 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
152 /* Note that we deliberately exit here. If we fall
153 through to the end of the function __nusers would be
154 incremented which is not correct because the old
155 owner has to be discounted. If we are not supposed
156 to increment __nusers we actually have to decrement
157 it here. */
158 #ifdef NO_INCR
159 --mutex->__data.__nusers;
160 #endif
162 return EOWNERDEAD;
165 /* Check whether we already hold the mutex. */
166 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
168 if (mutex->__data.__kind
169 == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
171 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
172 NULL);
173 return EDEADLK;
176 if (mutex->__data.__kind
177 == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
179 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
180 NULL);
182 /* Just bump the counter. */
183 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
184 /* Overflow of the counter. */
185 return EAGAIN;
187 ++mutex->__data.__count;
189 return 0;
193 oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id);
195 if (__builtin_expect (mutex->__data.__owner
196 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
198 /* This mutex is now not recoverable. */
199 mutex->__data.__count = 0;
200 lll_mutex_unlock (mutex->__data.__lock);
201 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
202 return ENOTRECOVERABLE;
205 while ((oldval & FUTEX_OWNER_DIED) != 0);
207 mutex->__data.__count = 1;
208 ENQUEUE_MUTEX (mutex);
209 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
210 break;
212 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
213 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
214 case PTHREAD_MUTEX_PI_NORMAL_NP:
215 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
216 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
217 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
218 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
219 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
221 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
222 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
224 if (robust)
225 /* Note: robust PI futexes are signaled by setting bit 0. */
226 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
227 (void *) (((uintptr_t) &mutex->__data.__list.__next)
228 | 1));
230 oldval = mutex->__data.__lock;
232 /* Check whether we already hold the mutex. */
233 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
235 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
237 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
238 return EDEADLK;
241 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
243 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
245 /* Just bump the counter. */
246 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
247 /* Overflow of the counter. */
248 return EAGAIN;
250 ++mutex->__data.__count;
252 return 0;
256 int newval = id;
257 #ifdef NO_INCR
258 newval |= FUTEX_WAITERS;
259 #endif
260 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
261 newval, 0);
263 if (oldval != 0)
265 /* The mutex is locked. The kernel will now take care of
266 everything. */
267 INTERNAL_SYSCALL_DECL (__err);
268 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
269 FUTEX_LOCK_PI, 1, 0);
271 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
272 && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
273 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
275 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
276 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
277 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
278 /* ESRCH can happen only for non-robust PI mutexes where
279 the owner of the lock died. */
280 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
282 /* Delay the thread indefinitely. */
283 while (1)
284 pause_not_cancel ();
287 oldval = mutex->__data.__lock;
289 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
292 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
294 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
296 /* We got the mutex. */
297 mutex->__data.__count = 1;
298 /* But it is inconsistent unless marked otherwise. */
299 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
301 ENQUEUE_MUTEX_PI (mutex);
302 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
304 /* Note that we deliberately exit here. If we fall
305 through to the end of the function __nusers would be
306 incremented which is not correct because the old owner
307 has to be discounted. If we are not supposed to
308 increment __nusers we actually have to decrement it here. */
309 #ifdef NO_INCR
310 --mutex->__data.__nusers;
311 #endif
313 return EOWNERDEAD;
316 if (robust
317 && __builtin_expect (mutex->__data.__owner
318 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
320 /* This mutex is now not recoverable. */
321 mutex->__data.__count = 0;
323 INTERNAL_SYSCALL_DECL (__err);
324 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
325 FUTEX_UNLOCK_PI, 0, 0);
327 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
328 return ENOTRECOVERABLE;
331 mutex->__data.__count = 1;
332 if (robust)
334 ENQUEUE_MUTEX_PI (mutex);
335 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
338 break;
340 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
341 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
342 case PTHREAD_MUTEX_PP_NORMAL_NP:
343 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
345 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
347 oldval = mutex->__data.__lock;
349 /* Check whether we already hold the mutex. */
350 if (mutex->__data.__owner == id)
352 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
353 return EDEADLK;
355 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
357 /* Just bump the counter. */
358 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
359 /* Overflow of the counter. */
360 return EAGAIN;
362 ++mutex->__data.__count;
364 return 0;
368 int oldprio = -1, ceilval;
371 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
372 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
374 if (__pthread_current_priority () > ceiling)
376 if (oldprio != -1)
377 __pthread_tpp_change_priority (oldprio, -1);
378 return EINVAL;
381 retval = __pthread_tpp_change_priority (oldprio, ceiling);
382 if (retval)
383 return retval;
385 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
386 oldprio = ceiling;
388 oldval
389 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
390 #ifdef NO_INCR
391 ceilval | 2,
392 #else
393 ceilval | 1,
394 #endif
395 ceilval);
397 if (oldval == ceilval)
398 break;
402 oldval
403 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
404 ceilval | 2,
405 ceilval | 1);
407 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
408 break;
410 if (oldval != ceilval)
411 lll_futex_wait (&mutex->__data.__lock, ceilval | 2);
413 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
414 ceilval | 2, ceilval)
415 != ceilval);
417 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
419 assert (mutex->__data.__owner == 0);
420 mutex->__data.__count = 1;
422 break;
424 default:
425 /* Correct code cannot set any other type. */
426 return EINVAL;
429 /* Record the ownership. */
430 mutex->__data.__owner = id;
431 #ifndef NO_INCR
432 ++mutex->__data.__nusers;
433 #endif
435 return retval;
437 #ifndef __pthread_mutex_lock
438 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
439 strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
440 #endif