1 /* Copyright (C) 2002-2012 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
20 #include <shlib-compat.h>
21 #include <lowlevellock.h>
22 #include <lowlevelcond.h>
23 #include <tcb-offsets.h>
24 #include <pthread-pi-defines.h>
25 #include <pthread-errnos.h>
26 #include <stap-probe.h>
28 #include <kernel-features.h>
33 /* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */
34 .globl __pthread_cond_wait
35 .type __pthread_cond_wait, @function
41 cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
42 DW.ref.__gcc_personality_v0)
43 cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
45 cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
46 cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
49 #define FRAME_SIZE (32+8)
50 leaq -FRAME_SIZE(%rsp), %rsp
51 cfi_adjust_cfa_offset(FRAME_SIZE)
56 +--------------------------+
57 rsp + 24 | old wake_seq value |
58 +--------------------------+
59 rsp + 16 | mutex pointer |
60 +--------------------------+
61 rsp + 8 | condvar pointer |
62 +--------------------------+
63 rsp + 4 | old broadcast_seq value |
64 +--------------------------+
65 rsp + 0 | old cancellation mode |
66 +--------------------------+
69 LIBC_PROBE (cond_wait, 2, %rdi, %rsi)
71 LP_OP(cmp) $-1, dep_mutex(%rdi)
73 /* Prepare structure passed to cancellation handler. */
78 mov %RSI_LP, dep_mutex(%rdi)
80 /* Get internal lock. */
87 cmpxchgl %esi, cond_lock(%rdi)
91 /* Unlock the mutex. */
92 2: movq 16(%rsp), %rdi
94 callq __pthread_mutex_unlock_usercnt
101 incl cond_futex(%rdi)
102 addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
104 /* Get and store current wakeup_seq value. */
106 movq wakeup_seq(%rdi), %r9
107 movl broadcast_seq(%rdi), %edx
112 8: movl cond_futex(%rdi), %edx
122 4: callq __pthread_enable_asynccancel
126 LP_OP(cmp) $-1, dep_mutex(%rdi)
127 leaq cond_futex(%rdi), %rdi
128 movl $FUTEX_WAIT, %esi
131 mov dep_mutex-cond_futex(%rdi), %R8_LP
132 /* Requeue to a non-robust PI mutex if the PI bit is set and
133 the robust bit is not set. */
134 movl MUTEX_KIND(%r8), %eax
135 andl $(ROBUST_BIT|PI_BIT), %eax
139 movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
140 movl $SYS_futex, %eax
146 #ifdef __ASSUME_REQUEUE_PI
151 /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
152 successfully, it has already locked the mutex for us and the
153 pi_flag (%r8b) is set to denote that fact. However, if another
154 thread changed the futex value before we entered the wait, the
155 syscall may return an EAGAIN and the mutex is not locked. We go
156 ahead with a success anyway since later we look at the pi_flag to
157 decide if we got the mutex or not. The sequence numbers then make
158 sure that only one of the threads actually wake up. We retry using
159 normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
160 and PI futexes don't mix.
162 Note that we don't check for EAGAIN specifically; we assume that the
163 only other error the futex function could return is EAGAIN since
164 anything else would mean an error in our function. It is too
165 expensive to do that check for every call (which is quite common in
166 case of a large number of threads), so it has been skipped. */
170 # ifndef __ASSUME_PRIVATE_FUTEX
171 movl $FUTEX_WAIT, %esi
176 #ifdef __ASSUME_PRIVATE_FUTEX
177 movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
179 orl %fs:PRIVATE_FUTEX, %esi
182 movl $SYS_futex, %eax
185 62: movl (%rsp), %edi
186 callq __pthread_disable_asynccancel
195 cmpxchgl %esi, (%rdi)
197 cmpxchgl %esi, cond_lock(%rdi)
201 6: movl broadcast_seq(%rdi), %edx
203 movq woken_seq(%rdi), %rax
205 movq wakeup_seq(%rdi), %r9
219 16: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
221 /* Wake up a thread which wants to destroy the condvar object. */
222 cmpq $0xffffffffffffffff, total_seq(%rdi)
224 movl cond_nwaiters(%rdi), %eax
225 andl $~((1 << nwaiters_shift) - 1), %eax
228 addq $cond_nwaiters, %rdi
229 LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi)
231 #ifdef __ASSUME_PRIVATE_FUTEX
232 movl $FUTEX_WAKE, %eax
233 movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
237 movl %fs:PRIVATE_FUTEX, %esi
239 orl $FUTEX_WAKE, %esi
241 movl $SYS_futex, %eax
243 subq $cond_nwaiters, %rdi
253 /* If requeue_pi is used the kernel performs the locking of the
255 11: movq 16(%rsp), %rdi
259 callq __pthread_mutex_cond_lock
261 14: leaq FRAME_SIZE(%rsp), %rsp
262 cfi_adjust_cfa_offset(-FRAME_SIZE)
264 /* We return the result of the mutex_lock operation. */
267 cfi_adjust_cfa_offset(FRAME_SIZE)
269 18: callq __pthread_mutex_cond_lock_adjust
273 /* We need to go back to futex_wait. If we're using requeue_pi, then
274 release the mutex we had acquired and go back. */
278 /* Adjust the mutex values first and then unlock it. The unlock
279 should always succeed or else the kernel did not lock the mutex
282 callq __pthread_mutex_cond_lock_adjust
285 callq __pthread_mutex_unlock_usercnt
286 /* Reload cond_var. */
290 /* Initial locking failed. */
293 addq $cond_lock, %rdi
295 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
296 movl $LLL_PRIVATE, %eax
297 movl $LLL_SHARED, %esi
299 callq __lll_lock_wait
302 /* Unlock in loop requires wakeup. */
305 addq $cond_lock, %rdi
307 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
308 movl $LLL_PRIVATE, %eax
309 movl $LLL_SHARED, %esi
311 /* The call preserves %rdx. */
312 callq __lll_unlock_wake
314 subq $cond_lock, %rdi
318 /* Locking in loop failed. */
321 addq $cond_lock, %rdi
323 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
324 movl $LLL_PRIVATE, %eax
325 movl $LLL_SHARED, %esi
327 callq __lll_lock_wait
329 subq $cond_lock, %rdi
333 /* Unlock after loop requires wakeup. */
336 addq $cond_lock, %rdi
338 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
339 movl $LLL_PRIVATE, %eax
340 movl $LLL_SHARED, %esi
342 callq __lll_unlock_wake
345 /* The initial unlocking of the mutex failed. */
357 addq $cond_lock, %rdi
359 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
360 movl $LLL_PRIVATE, %eax
361 movl $LLL_SHARED, %esi
363 callq __lll_unlock_wake
368 .size __pthread_cond_wait, .-__pthread_cond_wait
369 versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
374 .type __condvar_cleanup1, @function
375 .globl __condvar_cleanup1
376 .hidden __condvar_cleanup1
381 +--------------------------+
383 +--------------------------+
384 rsp + 16 | mutex pointer |
385 +--------------------------+
386 rsp + 8 | condvar pointer |
387 +--------------------------+
388 rsp + 4 | old broadcast_seq value |
389 +--------------------------+
390 rsp + 0 | old cancellation mode |
391 +--------------------------+
396 /* Get internal lock. */
402 cmpxchgl %esi, (%rdi)
404 cmpxchgl %esi, cond_lock(%rdi)
409 addq $cond_lock, %rdi
411 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
412 movl $LLL_PRIVATE, %eax
413 movl $LLL_SHARED, %esi
415 callq __lll_lock_wait
417 subq $cond_lock, %rdi
420 1: movl broadcast_seq(%rdi), %edx
424 /* We increment the wakeup_seq counter only if it is lower than
425 total_seq. If this is not the case the thread was woken and
426 then canceled. In this case we ignore the signal. */
427 movq total_seq(%rdi), %rax
428 cmpq wakeup_seq(%rdi), %rax
430 incq wakeup_seq(%rdi)
431 incl cond_futex(%rdi)
432 6: incq woken_seq(%rdi)
434 3: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
436 /* Wake up a thread which wants to destroy the condvar object. */
438 cmpq $0xffffffffffffffff, total_seq(%rdi)
440 movl cond_nwaiters(%rdi), %eax
441 andl $~((1 << nwaiters_shift) - 1), %eax
444 LP_OP(cmp) $-1, dep_mutex(%rdi)
445 leaq cond_nwaiters(%rdi), %rdi
447 #ifdef __ASSUME_PRIVATE_FUTEX
448 movl $FUTEX_WAKE, %eax
449 movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
453 movl %fs:PRIVATE_FUTEX, %esi
455 orl $FUTEX_WAKE, %esi
457 movl $SYS_futex, %eax
459 subq $cond_nwaiters, %rdi
470 addq $cond_lock, %rdi
472 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
473 movl $LLL_PRIVATE, %eax
474 movl $LLL_SHARED, %esi
476 /* The call preserves %rcx. */
477 callq __lll_unlock_wake
479 /* Wake up all waiters to make sure no signal gets lost. */
482 addq $cond_futex, %rdi
483 LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi)
484 movl $0x7fffffff, %edx
485 #ifdef __ASSUME_PRIVATE_FUTEX
486 movl $FUTEX_WAKE, %eax
487 movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
491 movl %fs:PRIVATE_FUTEX, %esi
493 orl $FUTEX_WAKE, %esi
495 movl $SYS_futex, %eax
498 /* Lock the mutex only if we don't own it already. This only happens
499 in case of PI mutexes, if we got cancelled after a successful
500 return of the futex syscall and before disabling async
502 5: movq 16(%rsp), %rdi
503 movl MUTEX_KIND(%rdi), %eax
504 andl $(ROBUST_BIT|PI_BIT), %eax
512 /* We managed to get the lock. Fix it up before returning. */
513 callq __pthread_mutex_cond_lock_adjust
517 7: callq __pthread_mutex_cond_lock
519 8: movq 24(%rsp), %rdi
521 call _Unwind_Resume@PLT
525 .size __condvar_cleanup1, .-__condvar_cleanup1
528 .section .gcc_except_table,"a",@progbits
530 .byte DW_EH_PE_omit # @LPStart format
531 .byte DW_EH_PE_omit # @TType format
532 .byte DW_EH_PE_uleb128 # call-site format
533 .uleb128 .Lcstend-.Lcstbegin
535 .uleb128 .LcleanupSTART-.LSTARTCODE
536 .uleb128 .LcleanupEND-.LcleanupSTART
537 .uleb128 __condvar_cleanup1-.LSTARTCODE
539 .uleb128 .LcallUR-.LSTARTCODE
540 .uleb128 .LENDCODE-.LcallUR
547 .hidden DW.ref.__gcc_personality_v0
548 .weak DW.ref.__gcc_personality_v0
549 .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
551 .type DW.ref.__gcc_personality_v0, @object
552 .size DW.ref.__gcc_personality_v0, LP_SIZE
553 DW.ref.__gcc_personality_v0:
554 ASM_ADDR __gcc_personality_v0