* pthread_mutex_lock.c (__pthread_mutex_lock): Handle only the
[glibc.git] / nptl / pthread_mutex_lock.c
blob4cb98beefba2801856bf1b11ace61cd2e09d8d8f
1 /* Copyright (C) 2002-2007, 2008 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
20 #include <assert.h>
21 #include <errno.h>
22 #include <stdlib.h>
23 #include <unistd.h>
24 #include <not-cancel.h>
25 #include "pthreadP.h"
26 #include <lowlevellock.h>
29 #ifndef LLL_MUTEX_LOCK
30 # define LLL_MUTEX_LOCK(mutex) \
31 lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex))
32 # define LLL_MUTEX_TRYLOCK(mutex) \
33 lll_trylock ((mutex)->__data.__lock)
34 # define LLL_ROBUST_MUTEX_LOCK(mutex, id) \
35 lll_robust_lock ((mutex)->__data.__lock, id, \
36 PTHREAD_ROBUST_MUTEX_PSHARED (mutex))
37 #endif
40 static int __pthread_mutex_lock_full (pthread_mutex_t *mutex)
41 __attribute_noinline__;
44 int
45 __pthread_mutex_lock (mutex)
46 pthread_mutex_t *mutex;
48 assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
50 unsigned int type = PTHREAD_MUTEX_TYPE (mutex);
51 if (__builtin_expect (type & ~PTHREAD_MUTEX_KIND_MASK_NP, 0))
52 return __pthread_mutex_lock_full (mutex);
54 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
56 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
57 == PTHREAD_MUTEX_TIMED_NP)
59 simple:
60 /* Normal mutex. */
61 LLL_MUTEX_LOCK (mutex);
62 assert (mutex->__data.__owner == 0);
64 else if (__builtin_expect (type == PTHREAD_MUTEX_RECURSIVE_NP, 1))
66 /* Recursive mutex. */
68 /* Check whether we already hold the mutex. */
69 if (mutex->__data.__owner == id)
71 /* Just bump the counter. */
72 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
73 /* Overflow of the counter. */
74 return EAGAIN;
76 ++mutex->__data.__count;
78 return 0;
81 /* We have to get the mutex. */
82 LLL_MUTEX_LOCK (mutex);
84 assert (mutex->__data.__owner == 0);
85 mutex->__data.__count = 1;
87 else if (__builtin_expect (type == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
89 if (! __is_smp)
90 goto simple;
92 if (LLL_MUTEX_TRYLOCK (mutex) != 0)
94 int cnt = 0;
95 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
96 mutex->__data.__spins * 2 + 10);
99 if (cnt++ >= max_cnt)
101 LLL_MUTEX_LOCK (mutex);
102 break;
105 #ifdef BUSY_WAIT_NOP
106 BUSY_WAIT_NOP;
107 #endif
109 while (LLL_MUTEX_TRYLOCK (mutex) != 0);
111 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
113 assert (mutex->__data.__owner == 0);
115 else
117 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
118 /* Check whether we already hold the mutex. */
119 if (__builtin_expect (mutex->__data.__owner == id, 0))
120 return EDEADLK;
121 goto simple;
124 out:
125 /* Record the ownership. */
126 mutex->__data.__owner = id;
127 #ifndef NO_INCR
128 ++mutex->__data.__nusers;
129 #endif
131 return 0;
134 static int
135 __pthread_mutex_lock_full (pthread_mutex_t *mutex)
137 int oldval;
138 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
140 switch (PTHREAD_MUTEX_TYPE (mutex))
142 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
143 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
144 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
145 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
146 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
147 &mutex->__data.__list.__next);
149 oldval = mutex->__data.__lock;
152 again:
153 if ((oldval & FUTEX_OWNER_DIED) != 0)
155 /* The previous owner died. Try locking the mutex. */
156 int newval = id;
157 #ifdef NO_INCR
158 newval |= FUTEX_WAITERS;
159 #else
160 newval |= (oldval & FUTEX_WAITERS);
161 #endif
163 newval
164 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
165 newval, oldval);
167 if (newval != oldval)
169 oldval = newval;
170 goto again;
173 /* We got the mutex. */
174 mutex->__data.__count = 1;
175 /* But it is inconsistent unless marked otherwise. */
176 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
178 ENQUEUE_MUTEX (mutex);
179 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
181 /* Note that we deliberately exit here. If we fall
182 through to the end of the function __nusers would be
183 incremented which is not correct because the old
184 owner has to be discounted. If we are not supposed
185 to increment __nusers we actually have to decrement
186 it here. */
187 #ifdef NO_INCR
188 --mutex->__data.__nusers;
189 #endif
191 return EOWNERDEAD;
194 /* Check whether we already hold the mutex. */
195 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
197 int kind = PTHREAD_MUTEX_TYPE (mutex);
198 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
200 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
201 NULL);
202 return EDEADLK;
205 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
207 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
208 NULL);
210 /* Just bump the counter. */
211 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
212 /* Overflow of the counter. */
213 return EAGAIN;
215 ++mutex->__data.__count;
217 return 0;
221 oldval = LLL_ROBUST_MUTEX_LOCK (mutex, id);
223 if (__builtin_expect (mutex->__data.__owner
224 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
226 /* This mutex is now not recoverable. */
227 mutex->__data.__count = 0;
228 lll_unlock (mutex->__data.__lock,
229 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
230 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
231 return ENOTRECOVERABLE;
234 while ((oldval & FUTEX_OWNER_DIED) != 0);
236 mutex->__data.__count = 1;
237 ENQUEUE_MUTEX (mutex);
238 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
239 break;
241 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
242 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
243 case PTHREAD_MUTEX_PI_NORMAL_NP:
244 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
245 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
246 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
247 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
248 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
250 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
251 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
253 if (robust)
254 /* Note: robust PI futexes are signaled by setting bit 0. */
255 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
256 (void *) (((uintptr_t) &mutex->__data.__list.__next)
257 | 1));
259 oldval = mutex->__data.__lock;
261 /* Check whether we already hold the mutex. */
262 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
264 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
266 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
267 return EDEADLK;
270 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
272 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
274 /* Just bump the counter. */
275 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
276 /* Overflow of the counter. */
277 return EAGAIN;
279 ++mutex->__data.__count;
281 return 0;
285 int newval = id;
286 #ifdef NO_INCR
287 newval |= FUTEX_WAITERS;
288 #endif
289 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
290 newval, 0);
292 if (oldval != 0)
294 /* The mutex is locked. The kernel will now take care of
295 everything. */
296 int private = (robust
297 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
298 : PTHREAD_MUTEX_PSHARED (mutex));
299 INTERNAL_SYSCALL_DECL (__err);
300 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
301 __lll_private_flag (FUTEX_LOCK_PI,
302 private), 1, 0);
304 if (INTERNAL_SYSCALL_ERROR_P (e, __err)
305 && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
306 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
308 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
309 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
310 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
311 /* ESRCH can happen only for non-robust PI mutexes where
312 the owner of the lock died. */
313 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
315 /* Delay the thread indefinitely. */
316 while (1)
317 pause_not_cancel ();
320 oldval = mutex->__data.__lock;
322 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
325 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
327 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
329 /* We got the mutex. */
330 mutex->__data.__count = 1;
331 /* But it is inconsistent unless marked otherwise. */
332 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
334 ENQUEUE_MUTEX_PI (mutex);
335 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
337 /* Note that we deliberately exit here. If we fall
338 through to the end of the function __nusers would be
339 incremented which is not correct because the old owner
340 has to be discounted. If we are not supposed to
341 increment __nusers we actually have to decrement it here. */
342 #ifdef NO_INCR
343 --mutex->__data.__nusers;
344 #endif
346 return EOWNERDEAD;
349 if (robust
350 && __builtin_expect (mutex->__data.__owner
351 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
353 /* This mutex is now not recoverable. */
354 mutex->__data.__count = 0;
356 INTERNAL_SYSCALL_DECL (__err);
357 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
358 __lll_private_flag (FUTEX_UNLOCK_PI,
359 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
360 0, 0);
362 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
363 return ENOTRECOVERABLE;
366 mutex->__data.__count = 1;
367 if (robust)
369 ENQUEUE_MUTEX_PI (mutex);
370 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
373 break;
375 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
376 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
377 case PTHREAD_MUTEX_PP_NORMAL_NP:
378 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
380 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
382 oldval = mutex->__data.__lock;
384 /* Check whether we already hold the mutex. */
385 if (mutex->__data.__owner == id)
387 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
388 return EDEADLK;
390 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
392 /* Just bump the counter. */
393 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
394 /* Overflow of the counter. */
395 return EAGAIN;
397 ++mutex->__data.__count;
399 return 0;
403 int oldprio = -1, ceilval;
406 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
407 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
409 if (__pthread_current_priority () > ceiling)
411 if (oldprio != -1)
412 __pthread_tpp_change_priority (oldprio, -1);
413 return EINVAL;
416 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
417 if (retval)
418 return retval;
420 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
421 oldprio = ceiling;
423 oldval
424 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
425 #ifdef NO_INCR
426 ceilval | 2,
427 #else
428 ceilval | 1,
429 #endif
430 ceilval);
432 if (oldval == ceilval)
433 break;
437 oldval
438 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
439 ceilval | 2,
440 ceilval | 1);
442 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
443 break;
445 if (oldval != ceilval)
446 lll_futex_wait (&mutex->__data.__lock, ceilval | 2,
447 PTHREAD_MUTEX_PSHARED (mutex));
449 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
450 ceilval | 2, ceilval)
451 != ceilval);
453 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
455 assert (mutex->__data.__owner == 0);
456 mutex->__data.__count = 1;
458 break;
460 default:
461 /* Correct code cannot set any other type. */
462 return EINVAL;
465 /* Record the ownership. */
466 mutex->__data.__owner = id;
467 #ifndef NO_INCR
468 ++mutex->__data.__nusers;
469 #endif
471 return 0;
473 #ifndef __pthread_mutex_lock
474 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
475 strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
476 #endif