1 /* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */
4 * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Emmanuel Dreyfus
17 * 4. The name of the author may not be used to endorse or promote
18 * products derived from this software without specific prior written
21 * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS''
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #include "opt_compat.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/imgact.h>
39 #include <sys/imgact_aout.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kern_syscall.h>
43 #include <sys/malloc.h>
45 #include <sys/signalvar.h>
46 #include <sys/sysent.h>
47 #include <sys/sysproto.h>
50 #include <sys/spinlock2.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_extern.h>
57 #include <sys/kernel.h>
58 #include <sys/module.h>
59 #include <machine/cpu.h>
60 #include <machine/limits.h>
62 #include "i386/linux.h"
63 #include "i386/linux_proto.h"
64 #include "linux_signal.h"
65 #include "linux_util.h"
66 #include "linux_emuldata.h"
68 MALLOC_DEFINE(M_FUTEX
, "futex", "Linux futexes");
69 MALLOC_DEFINE(M_FUTEX_WP
, "futex wp", "Linux futexes wp");
75 struct futex
*wp_futex
;
76 TAILQ_ENTRY(waiting_proc
) wp_list
;
83 LIST_ENTRY(futex
) f_list
;
84 TAILQ_HEAD(lf_waiting_proc
, waiting_proc
) f_waiting_proc
;
87 struct futex_list futex_list
;
90 #define FUTEX_LOCK(f) spin_lock_wr(&(f)->f_lck)
91 #define FUTEX_UNLOCK(f) spin_unlock_wr(&(f)->f_lck)
92 #define FUTEX_INIT(f) spin_init(&(f)->f_lck)
93 #define FUTEX_SLEEP(f, id, flag, wm, timo) ssleep((id), &(f)->f_lck, (flag), (wm), (timo))
96 #define FUTEX_LOCK(f) lockmgr(&(f)->f_lck, LK_EXCLUSIVE)
97 #define FUTEX_UNLOCK(f) lockmgr(&(f)->f_lck, LK_RELEASE)
98 #define FUTEX_INIT(f) lockinit(&(f)->f_lck, "ftlk", 0, LK_CANRECURSE)
99 #define FUTEX_DESTROY(f) lockuninit(&(f)->f_lck)
100 #define FUTEX_ASSERT_LOCKED(f) KKASSERT(lockstatus(&(f)->f_lck, curthread) == LK_EXCLUSIVE)
101 #define FUTEX_SLEEP(f, id, flag, wm, timo) lksleep((id), &(f)->f_lck, (flag), (wm), (timo))
103 struct lock futex_mtx
; /* protects the futex list */
104 #define FUTEXES_LOCK lockmgr(&futex_mtx, LK_EXCLUSIVE)
105 #define FUTEXES_UNLOCK lockmgr(&futex_mtx, LK_RELEASE)
107 /* Debug magic to take advantage of freebsd's mess */
109 #define LINUX_CTR_PREFIX
111 #define LINUX_CTR_PREFIX while (0)
114 #define LINUX_CTR1(a,b,c) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c)
115 #define LINUX_CTR2(a,b,c,d) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c,d)
116 #define LINUX_CTR3(a,b,c,d,e) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c,d,e)
117 #define LINUX_CTR4(a,b,c,d,e,f) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c,d,e,f)
118 #define LINUX_CTR5(a,b,c,d,e,f,g) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c,d,e,f,g)
122 /* flags for futex_get() */
123 #define FUTEX_CREATE_WP 0x1 /* create waiting_proc */
124 #define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */
125 #define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */
128 #define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list
129 * of futex where thread sleep to wp_list
132 #define FUTEX_WP_REMOVED 0x2 /* wp is woken up and removed from futex
133 * wp_list to prevent double wakeup.
137 int futex_xchgl(int oparg
, uint32_t *uaddr
, int *oldval
);
138 int futex_addl(int oparg
, uint32_t *uaddr
, int *oldval
);
139 int futex_orl(int oparg
, uint32_t *uaddr
, int *oldval
);
140 int futex_andl(int oparg
, uint32_t *uaddr
, int *oldval
);
141 int futex_xorl(int oparg
, uint32_t *uaddr
, int *oldval
);
144 futex_put(struct futex
*f
, struct waiting_proc
*wp
)
146 FUTEX_ASSERT_LOCKED(f
);
148 if ((wp
->wp_flags
& FUTEX_WP_REMOVED
) == 0)
149 TAILQ_REMOVE(&f
->f_waiting_proc
, wp
, wp_list
);
150 kfree(wp
, M_FUTEX_WP
);
154 if (--f
->f_refcount
== 0) {
155 LIST_REMOVE(f
, f_list
);
159 LINUX_CTR2(sys_futex
, "futex_put destroy uaddr %p ref %d",
160 f
->f_uaddr
, f
->f_refcount
);
166 LINUX_CTR2(sys_futex
, "futex_put uaddr %p ref %d",
167 f
->f_uaddr
, f
->f_refcount
);
173 futex_get0(uint32_t *uaddr
, struct futex
**newf
, uint32_t flags
)
175 struct futex
*f
, *tmpf
;
181 LIST_FOREACH(f
, &futex_list
, f_list
) {
182 if (f
->f_uaddr
== uaddr
) {
186 kfree(tmpf
, M_FUTEX
);
188 if (flags
& FUTEX_DONTEXISTS
) {
194 * Increment refcount of the found futex to
195 * prevent it from deallocation before FUTEX_LOCK()
202 LINUX_CTR2(sys_futex
, "futex_get uaddr %p ref %d",
203 uaddr
, f
->f_refcount
);
208 if (flags
& FUTEX_DONTCREATE
) {
210 LINUX_CTR1(sys_futex
, "futex_get uaddr %p null", uaddr
);
216 tmpf
= kmalloc(sizeof(*tmpf
), M_FUTEX
, M_WAITOK
| M_ZERO
);
217 tmpf
->f_uaddr
= uaddr
;
218 tmpf
->f_refcount
= 1;
220 TAILQ_INIT(&tmpf
->f_waiting_proc
);
223 * Lock the new futex before an insert into the futex_list
224 * to prevent futex usage by other.
230 LIST_INSERT_HEAD(&futex_list
, tmpf
, f_list
);
233 LINUX_CTR2(sys_futex
, "futex_get uaddr %p ref %d new",
234 uaddr
, tmpf
->f_refcount
);
240 futex_get(uint32_t *uaddr
, struct waiting_proc
**wp
, struct futex
**f
,
245 if (flags
& FUTEX_CREATE_WP
) {
246 *wp
= kmalloc(sizeof(struct waiting_proc
), M_FUTEX_WP
, M_WAITOK
);
249 error
= futex_get0(uaddr
, f
, flags
);
251 if (flags
& FUTEX_CREATE_WP
) {
252 kfree(*wp
, M_FUTEX_WP
);
257 if (flags
& FUTEX_CREATE_WP
) {
258 TAILQ_INSERT_HEAD(&(*f
)->f_waiting_proc
, *wp
, wp_list
);
259 (*wp
)->wp_futex
= *f
;
266 futex_sleep(struct futex
*f
, struct waiting_proc
*wp
, unsigned long timeout
)
270 FUTEX_ASSERT_LOCKED(f
);
271 LINUX_CTR4(sys_futex
, "futex_sleep enter uaddr %p wp %p timo %ld ref %d",
272 f
->f_uaddr
, wp
, timeout
, f
->f_refcount
);
273 error
= FUTEX_SLEEP(f
, wp
, PCATCH
, "futex", timeout
);
275 error
= ssleep(wp
, &f
->f_lck
, PCATCH
, "futex", timeout
);
276 error
= sx_sleep(wp
, &f
->f_lck
, PCATCH
, "futex", timeout
);
278 if (wp
->wp_flags
& FUTEX_WP_REQUEUED
) {
279 KASSERT(f
!= wp
->wp_futex
, ("futex != wp_futex"));
280 LINUX_CTR5(sys_futex
, "futex_sleep out error %d uaddr %p w"
281 " %p requeued uaddr %p ref %d",
282 error
, f
->f_uaddr
, wp
, wp
->wp_futex
->f_uaddr
,
283 wp
->wp_futex
->f_refcount
);
294 futex_wake(struct futex
*f
, int n
)
296 struct waiting_proc
*wp
, *wpt
;
299 FUTEX_ASSERT_LOCKED(f
);
300 TAILQ_FOREACH_MUTABLE(wp
, &f
->f_waiting_proc
, wp_list
, wpt
) {
301 LINUX_CTR3(sys_futex
, "futex_wake uaddr %p wp %p ref %d",
302 f
->f_uaddr
, wp
, f
->f_refcount
);
303 wp
->wp_flags
|= FUTEX_WP_REMOVED
;
304 TAILQ_REMOVE(&f
->f_waiting_proc
, wp
, wp_list
);
314 futex_requeue(struct futex
*f
, int n
, struct futex
*f2
, int n2
)
316 struct waiting_proc
*wp
, *wpt
;
319 FUTEX_ASSERT_LOCKED(f
);
320 FUTEX_ASSERT_LOCKED(f2
);
322 TAILQ_FOREACH_MUTABLE(wp
, &f
->f_waiting_proc
, wp_list
, wpt
) {
324 LINUX_CTR2(sys_futex
, "futex_req_wake uaddr %p wp %p",
326 wp
->wp_flags
|= FUTEX_WP_REMOVED
;
327 TAILQ_REMOVE(&f
->f_waiting_proc
, wp
, wp_list
);
330 LINUX_CTR3(sys_futex
, "futex_requeue uaddr %p wp %p to %p",
331 f
->f_uaddr
, wp
, f2
->f_uaddr
);
332 wp
->wp_flags
|= FUTEX_WP_REQUEUED
;
333 /* Move wp to wp_list of f2 futex */
334 TAILQ_REMOVE(&f
->f_waiting_proc
, wp
, wp_list
);
335 TAILQ_INSERT_HEAD(&f2
->f_waiting_proc
, wp
, wp_list
);
338 * Thread which sleeps on wp after waking should
339 * acquire f2 lock, so increment refcount of f2 to
340 * prevent it from premature deallocation.
355 futex_wait(struct futex
*f
, struct waiting_proc
*wp
, struct l_timespec
*ts
)
357 struct l_timespec timeout
= {0, 0};
358 struct timeval tv
= {0, 0};
363 error
= copyin(ts
, &timeout
, sizeof(timeout
));
368 tv
.tv_usec
= timeout
.tv_sec
* 1000000 + timeout
.tv_nsec
/ 1000;
369 timeout_hz
= tvtohz_high(&tv
);
371 if (timeout
.tv_sec
== 0 && timeout
.tv_nsec
== 0)
375 * If the user process requests a non null timeout,
376 * make sure we do not turn it into an infinite
377 * timeout because timeout_hz gets null.
379 * We use a minimal timeout of 1/hz. Maybe it would
380 * make sense to just return ETIMEDOUT without sleeping.
382 if (((timeout
.tv_sec
!= 0) || (timeout
.tv_nsec
!= 0)) &&
386 error
= futex_sleep(f
, wp
, timeout_hz
);
387 if (error
== EWOULDBLOCK
)
394 futex_atomic_op(struct proc
*p
, int encoded_op
, uint32_t *uaddr
)
396 int op
= (encoded_op
>> 28) & 7;
397 int cmp
= (encoded_op
>> 24) & 15;
398 int oparg
= (encoded_op
<< 8) >> 20;
399 int cmparg
= (encoded_op
<< 20) >> 20;
402 if (encoded_op
& (FUTEX_OP_OPARG_SHIFT
<< 28))
406 if (ldebug(sys_futex
))
407 kprintf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, "
408 "cmparg = %x, uaddr = %p\n",
409 op
, cmp
, oparg
, cmparg
, uaddr
);
411 /* XXX: linux verifies access here and returns EFAULT */
415 ret
= futex_xchgl(oparg
, uaddr
, &oldval
);
418 ret
= futex_addl(oparg
, uaddr
, &oldval
);
421 ret
= futex_orl(oparg
, uaddr
, &oldval
);
424 ret
= futex_andl(~oparg
, uaddr
, &oldval
);
427 ret
= futex_xorl(oparg
, uaddr
, &oldval
);
438 case FUTEX_OP_CMP_EQ
:
439 return (oldval
== cmparg
);
440 case FUTEX_OP_CMP_NE
:
441 return (oldval
!= cmparg
);
442 case FUTEX_OP_CMP_LT
:
443 return (oldval
< cmparg
);
444 case FUTEX_OP_CMP_GE
:
445 return (oldval
>= cmparg
);
446 case FUTEX_OP_CMP_LE
:
447 return (oldval
<= cmparg
);
448 case FUTEX_OP_CMP_GT
:
449 return (oldval
> cmparg
);
456 sys_linux_sys_futex(struct linux_sys_futex_args
*args
)
458 int op_ret
, val
, ret
, nrwake
;
459 struct waiting_proc
*wp
;
460 struct futex
*f
, *f2
;
464 * Our implementation provides only privates futexes. Most of the apps
465 * should use private futexes but don't claim so. Therefore we treat
466 * all futexes as private by clearing the FUTEX_PRIVATE_FLAG. It works
467 * in most cases (ie. when futexes are not shared on file descriptor
468 * or between different processes.).
470 args
->op
= (args
->op
& ~LINUX_FUTEX_PRIVATE_FLAG
);
473 case LINUX_FUTEX_WAIT
:
474 LINUX_CTR2(sys_futex
, "WAIT val %d uaddr %p",
475 args
->val
, args
->uaddr
);
477 if (ldebug(sys_futex
))
478 kprintf(ARGS(sys_futex
, "futex_wait val %d uaddr %p"),
479 args
->val
, args
->uaddr
);
481 error
= futex_get(args
->uaddr
, &wp
, &f
, FUTEX_CREATE_WP
);
484 error
= copyin(args
->uaddr
, &val
, sizeof(val
));
486 LINUX_CTR1(sys_futex
, "WAIT copyin failed %d",
491 if (val
!= args
->val
) {
492 LINUX_CTR3(sys_futex
, "WAIT uaddr %p val %d != uval %d",
493 args
->uaddr
, args
->val
, val
);
495 return (EWOULDBLOCK
);
498 error
= futex_wait(f
, wp
, args
->timeout
);
501 case LINUX_FUTEX_WAKE
:
503 LINUX_CTR2(sys_futex
, "WAKE val %d uaddr %p",
504 args
->val
, args
->uaddr
);
507 * XXX: Linux is able to cope with different addresses
508 * corresponding to the same mapped memory in the sleeping
509 * and waker process(es).
512 if (ldebug(sys_futex
))
513 kprintf(ARGS(sys_futex
, "futex_wake val %d uaddr %p"),
514 args
->val
, args
->uaddr
);
516 error
= futex_get(args
->uaddr
, NULL
, &f
, FUTEX_DONTCREATE
);
520 args
->sysmsg_iresult
= 0;
523 args
->sysmsg_iresult
= futex_wake(f
, args
->val
);
527 case LINUX_FUTEX_CMP_REQUEUE
:
529 LINUX_CTR5(sys_futex
, "CMP_REQUEUE uaddr %p "
530 "val %d val3 %d uaddr2 %p val2 %d",
531 args
->uaddr
, args
->val
, args
->val3
, args
->uaddr2
,
532 (int)(unsigned long)args
->timeout
);
535 if (ldebug(sys_futex
))
536 kprintf(ARGS(sys_futex
, "futex_cmp_requeue uaddr %p "
537 "val %d val3 %d uaddr2 %p val2 %d"),
538 args
->uaddr
, args
->val
, args
->val3
, args
->uaddr2
,
539 (int)(unsigned long)args
->timeout
);
542 * Linux allows this, we would not, it is an incorrect
543 * usage of declared ABI, so return EINVAL.
545 if (args
->uaddr
== args
->uaddr2
)
547 error
= futex_get0(args
->uaddr
, &f
, 0);
552 * To avoid deadlocks return EINVAL if second futex
553 * exists at this time. Otherwise create the new futex
554 * and ignore false positive LOR which thus happens.
556 * Glibc fall back to FUTEX_WAKE in case of any error
557 * returned by FUTEX_CMP_REQUEUE.
559 error
= futex_get0(args
->uaddr2
, &f2
, FUTEX_DONTEXISTS
);
564 error
= copyin(args
->uaddr
, &val
, sizeof(val
));
566 LINUX_CTR1(sys_futex
, "CMP_REQUEUE copyin failed %d",
572 if (val
!= args
->val3
) {
573 LINUX_CTR2(sys_futex
, "CMP_REQUEUE val %d != uval %d",
580 nrwake
= (int)(unsigned long)args
->timeout
;
581 args
->sysmsg_iresult
= futex_requeue(f
, args
->val
, f2
, nrwake
);
586 case LINUX_FUTEX_WAKE_OP
:
588 LINUX_CTR5(sys_futex
, "WAKE_OP "
589 "uaddr %p op %d val %x uaddr2 %p val3 %x",
590 args
->uaddr
, args
->op
, args
->val
,
591 args
->uaddr2
, args
->val3
);
594 if (ldebug(sys_futex
))
595 kprintf(ARGS(sys_futex
, "futex_wake_op "
596 "uaddr %p op %d val %x uaddr2 %p val3 %x"),
597 args
->uaddr
, args
->op
, args
->val
,
598 args
->uaddr2
, args
->val3
);
600 error
= futex_get0(args
->uaddr
, &f
, 0);
603 if (args
->uaddr
!= args
->uaddr2
)
604 error
= futex_get0(args
->uaddr2
, &f2
, 0);
611 * This function returns positive number as results and
614 op_ret
= futex_atomic_op(curproc
, args
->val3
, args
->uaddr2
);
617 /* XXX: We don't handle the EFAULT yet. */
618 if (op_ret
!= -EFAULT
) {
630 ret
= futex_wake(f
, args
->val
);
634 nrwake
= (int)(unsigned long)args
->timeout
;
637 op_ret
+= futex_wake(f2
, nrwake
);
639 op_ret
+= futex_wake(f
, nrwake
);
646 args
->sysmsg_iresult
= ret
;
649 case LINUX_FUTEX_LOCK_PI
:
650 /* not yet implemented */
653 case LINUX_FUTEX_UNLOCK_PI
:
654 /* not yet implemented */
657 case LINUX_FUTEX_TRYLOCK_PI
:
658 /* not yet implemented */
661 case LINUX_FUTEX_REQUEUE
:
664 * Glibc does not use this operation since version 2.3.3,
665 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation.
666 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when
667 * FUTEX_REQUEUE returned EINVAL.
672 kprintf("linux_sys_futex: unknown op %d\n", args
->op
);
680 sys_linux_set_robust_list(struct linux_set_robust_list_args
*args
)
683 if (ldebug(set_robust_list
))
684 kprintf(ARGS(set_robust_list
, "head %p len %d"),
685 args
->head
, args
->len
);
688 if (args
->len
!= sizeof(struct linux_robust_list_head
))
691 emuldata_set_robust(curproc
, args
->head
);
699 sys_linux_get_robust_list(struct linux_get_robust_list_args
*args
)
701 struct linux_emuldata
*em
;
702 struct linux_robust_list_head empty_head
;
703 struct linux_robust_list_head
*head
;
704 l_size_t len
= sizeof(struct linux_robust_list_head
);
708 if (ldebug(get_robust_list
))
709 kprintf(ARGS(get_robust_list
, ""));
712 if (args
->pid
== 0) {
713 em
= emuldata_get(curproc
);
714 KKASSERT(em
!= NULL
);
715 if (em
->robust_futexes
== NULL
) {
716 bzero(&empty_head
, sizeof(empty_head
));
719 head
= em
->robust_futexes
;
724 p
= pfind(args
->pid
);
730 em
= emuldata_get(p
);
731 /* XXX: ptrace? p_candebug?*/
732 if (priv_check(curthread
, PRIV_CRED_SETUID
) ||
733 priv_check(curthread
, PRIV_CRED_SETEUID
)/* ||
734 p_candebug(curproc, p) */) {
738 head
= em
->robust_futexes
;
743 error
= copyout(&len
, args
->len
, sizeof(l_size_t
));
747 error
= copyout(head
, args
->head
, sizeof(struct linux_robust_list_head
));
753 handle_futex_death(struct proc
*p
, uint32_t *uaddr
, int pi
)
755 uint32_t uval
, nval
, mval
;
760 if (copyin(uaddr
, &uval
, 4))
762 if ((uval
& FUTEX_TID_MASK
) == p
->p_pid
) {
763 mval
= (uval
& FUTEX_WAITERS
) | FUTEX_OWNER_DIED
;
764 nval
= casuword((ulong
*)uaddr
, uval
, mval
);
772 if (!pi
&& (uval
& FUTEX_WAITERS
)) {
773 error
= futex_get(uaddr
, NULL
, &f
,
788 fetch_robust_entry(struct linux_robust_list
**entry
,
789 struct linux_robust_list
**head
, int *pi
)
793 if (copyin((const void *)head
, &uentry
, sizeof(l_ulong
)))
796 *entry
= (void *)(uentry
& ~1UL);
802 /* This walks the list of robust futexes releasing them. */
804 release_futexes(struct proc
*p
)
806 struct linux_robust_list_head
*head
= NULL
;
807 struct linux_robust_list
*entry
, *next_entry
, *pending
;
808 unsigned int limit
= 2048, pi
, next_pi
, pip
;
809 struct linux_emuldata
*em
;
815 em
= emuldata_get(p
);
816 KKASSERT(em
!= NULL
);
817 head
= em
->robust_futexes
;
823 if (fetch_robust_entry(&entry
, PTRIN(&head
->list
.next
), &pi
))
826 if (copyin(&head
->futex_offset
, &futex_offset
, sizeof(futex_offset
)))
829 if (fetch_robust_entry(&pending
, PTRIN(&head
->pending_list
), &pip
))
832 while (entry
!= &head
->list
) {
833 rc
= fetch_robust_entry(&next_entry
, PTRIN(&entry
->next
), &next_pi
);
835 if (entry
!= pending
)
836 if (handle_futex_death(p
, (uint32_t *)entry
+ futex_offset
, pi
))
848 /* XXX: not sure about this yield, was sched_relinquish(curthread); */
849 lwkt_deschedule(curthread
);
855 handle_futex_death(p
, (uint32_t *)pending
+ futex_offset
, pip
);