linux emulation - Major update
[dragonfly.git] / sys / emulation / linux / linux_futex.c
blobc4cf129dfe41e9ca66f9c5e02f5c47cb14ae9719
1 /* $NetBSD: linux_futex.c,v 1.7 2006/07/24 19:01:49 manu Exp $ */
3 /*-
4 * Copyright (c) 2005 Emmanuel Dreyfus, all rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Emmanuel Dreyfus
17 * 4. The name of the author may not be used to endorse or promote
18 * products derived from this software without specific prior written
19 * permission.
21 * THIS SOFTWARE IS PROVIDED BY THE THE AUTHOR AND CONTRIBUTORS ``AS IS''
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
34 #include "opt_compat.h"
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/imgact.h>
39 #include <sys/imgact_aout.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kern_syscall.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/proc.h>
45 #include <sys/signalvar.h>
46 #include <sys/sysent.h>
47 #include <sys/sysproto.h>
48 #include <sys/priv.h>
49 #include <sys/lock.h>
50 #include <sys/spinlock2.h>
52 #include <vm/vm.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_page.h>
55 #include <vm/vm_extern.h>
56 #include <sys/exec.h>
57 #include <sys/kernel.h>
58 #include <sys/module.h>
59 #include <machine/cpu.h>
60 #include <machine/limits.h>
62 #include "i386/linux.h"
63 #include "i386/linux_proto.h"
64 #include "linux_signal.h"
65 #include "linux_util.h"
66 #include "linux_emuldata.h"
68 MALLOC_DEFINE(M_FUTEX, "futex", "Linux futexes");
69 MALLOC_DEFINE(M_FUTEX_WP, "futex wp", "Linux futexes wp");
71 struct futex;
73 struct waiting_proc {
74 uint32_t wp_flags;
75 struct futex *wp_futex;
76 TAILQ_ENTRY(waiting_proc) wp_list;
79 struct futex {
80 struct lock f_lck;
81 uint32_t *f_uaddr;
82 uint32_t f_refcount;
83 LIST_ENTRY(futex) f_list;
84 TAILQ_HEAD(lf_waiting_proc, waiting_proc) f_waiting_proc;
87 struct futex_list futex_list;
89 #if 0
90 #define FUTEX_LOCK(f) spin_lock_wr(&(f)->f_lck)
91 #define FUTEX_UNLOCK(f) spin_unlock_wr(&(f)->f_lck)
92 #define FUTEX_INIT(f) spin_init(&(f)->f_lck)
93 #define FUTEX_SLEEP(f, id, flag, wm, timo) ssleep((id), &(f)->f_lck, (flag), (wm), (timo))
94 #endif
96 #define FUTEX_LOCK(f) lockmgr(&(f)->f_lck, LK_EXCLUSIVE)
97 #define FUTEX_UNLOCK(f) lockmgr(&(f)->f_lck, LK_RELEASE)
98 #define FUTEX_INIT(f) lockinit(&(f)->f_lck, "ftlk", 0, LK_CANRECURSE)
99 #define FUTEX_DESTROY(f) lockuninit(&(f)->f_lck)
100 #define FUTEX_ASSERT_LOCKED(f) KKASSERT(lockstatus(&(f)->f_lck, curthread) == LK_EXCLUSIVE)
101 #define FUTEX_SLEEP(f, id, flag, wm, timo) lksleep((id), &(f)->f_lck, (flag), (wm), (timo))
103 struct lock futex_mtx; /* protects the futex list */
104 #define FUTEXES_LOCK lockmgr(&futex_mtx, LK_EXCLUSIVE)
105 #define FUTEXES_UNLOCK lockmgr(&futex_mtx, LK_RELEASE)
107 /* Debug magic to take advantage of freebsd's mess */
108 #if LINUX_DEBUG
109 #define LINUX_CTR_PREFIX
110 #else
111 #define LINUX_CTR_PREFIX while (0)
112 #endif
114 #define LINUX_CTR1(a,b,c) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c)
115 #define LINUX_CTR2(a,b,c,d) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c,d)
116 #define LINUX_CTR3(a,b,c,d,e) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c,d,e)
117 #define LINUX_CTR4(a,b,c,d,e,f) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c,d,e,f)
118 #define LINUX_CTR5(a,b,c,d,e,f,g) LINUX_CTR_PREFIX kprintf("linux_futex: " b "\n",c,d,e,f,g)
122 /* flags for futex_get() */
123 #define FUTEX_CREATE_WP 0x1 /* create waiting_proc */
124 #define FUTEX_DONTCREATE 0x2 /* don't create futex if not exists */
125 #define FUTEX_DONTEXISTS 0x4 /* return EINVAL if futex exists */
127 /* wp_flags */
128 #define FUTEX_WP_REQUEUED 0x1 /* wp requeued - wp moved from wp_list
129 * of futex where thread sleep to wp_list
130 * of another futex.
132 #define FUTEX_WP_REMOVED 0x2 /* wp is woken up and removed from futex
133 * wp_list to prevent double wakeup.
136 /* support.s */
137 int futex_xchgl(int oparg, uint32_t *uaddr, int *oldval);
138 int futex_addl(int oparg, uint32_t *uaddr, int *oldval);
139 int futex_orl(int oparg, uint32_t *uaddr, int *oldval);
140 int futex_andl(int oparg, uint32_t *uaddr, int *oldval);
141 int futex_xorl(int oparg, uint32_t *uaddr, int *oldval);
143 static void
144 futex_put(struct futex *f, struct waiting_proc *wp)
146 FUTEX_ASSERT_LOCKED(f);
147 if (wp != NULL) {
148 if ((wp->wp_flags & FUTEX_WP_REMOVED) == 0)
149 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
150 kfree(wp, M_FUTEX_WP);
153 FUTEXES_LOCK;
154 if (--f->f_refcount == 0) {
155 LIST_REMOVE(f, f_list);
156 FUTEXES_UNLOCK;
157 FUTEX_UNLOCK(f);
159 LINUX_CTR2(sys_futex, "futex_put destroy uaddr %p ref %d",
160 f->f_uaddr, f->f_refcount);
161 FUTEX_DESTROY(f);
162 kfree(f, M_FUTEX);
163 return;
166 LINUX_CTR2(sys_futex, "futex_put uaddr %p ref %d",
167 f->f_uaddr, f->f_refcount);
168 FUTEXES_UNLOCK;
169 FUTEX_UNLOCK(f);
172 static int
173 futex_get0(uint32_t *uaddr, struct futex **newf, uint32_t flags)
175 struct futex *f, *tmpf;
177 *newf = tmpf = NULL;
179 retry:
180 FUTEXES_LOCK;
181 LIST_FOREACH(f, &futex_list, f_list) {
182 if (f->f_uaddr == uaddr) {
183 if (tmpf != NULL) {
184 FUTEX_UNLOCK(tmpf);
185 FUTEX_DESTROY(tmpf);
186 kfree(tmpf, M_FUTEX);
188 if (flags & FUTEX_DONTEXISTS) {
189 FUTEXES_UNLOCK;
190 return (EINVAL);
194 * Increment refcount of the found futex to
195 * prevent it from deallocation before FUTEX_LOCK()
197 ++f->f_refcount;
198 FUTEXES_UNLOCK;
200 FUTEX_LOCK(f);
201 *newf = f;
202 LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d",
203 uaddr, f->f_refcount);
204 return (0);
208 if (flags & FUTEX_DONTCREATE) {
209 FUTEXES_UNLOCK;
210 LINUX_CTR1(sys_futex, "futex_get uaddr %p null", uaddr);
211 return (0);
214 if (tmpf == NULL) {
215 FUTEXES_UNLOCK;
216 tmpf = kmalloc(sizeof(*tmpf), M_FUTEX, M_WAITOK | M_ZERO);
217 tmpf->f_uaddr = uaddr;
218 tmpf->f_refcount = 1;
219 FUTEX_INIT(tmpf);
220 TAILQ_INIT(&tmpf->f_waiting_proc);
223 * Lock the new futex before an insert into the futex_list
224 * to prevent futex usage by other.
226 FUTEX_LOCK(tmpf);
227 goto retry;
230 LIST_INSERT_HEAD(&futex_list, tmpf, f_list);
231 FUTEXES_UNLOCK;
233 LINUX_CTR2(sys_futex, "futex_get uaddr %p ref %d new",
234 uaddr, tmpf->f_refcount);
235 *newf = tmpf;
236 return (0);
239 static int
240 futex_get(uint32_t *uaddr, struct waiting_proc **wp, struct futex **f,
241 uint32_t flags)
243 int error;
245 if (flags & FUTEX_CREATE_WP) {
246 *wp = kmalloc(sizeof(struct waiting_proc), M_FUTEX_WP, M_WAITOK);
247 (*wp)->wp_flags = 0;
249 error = futex_get0(uaddr, f, flags);
250 if (error) {
251 if (flags & FUTEX_CREATE_WP) {
252 kfree(*wp, M_FUTEX_WP);
253 *wp = NULL;
255 return (error);
257 if (flags & FUTEX_CREATE_WP) {
258 TAILQ_INSERT_HEAD(&(*f)->f_waiting_proc, *wp, wp_list);
259 (*wp)->wp_futex = *f;
262 return (error);
265 static int
266 futex_sleep(struct futex *f, struct waiting_proc *wp, unsigned long timeout)
268 int error;
270 FUTEX_ASSERT_LOCKED(f);
271 LINUX_CTR4(sys_futex, "futex_sleep enter uaddr %p wp %p timo %ld ref %d",
272 f->f_uaddr, wp, timeout, f->f_refcount);
273 error = FUTEX_SLEEP(f, wp, PCATCH, "futex", timeout);
274 #if 0
275 error = ssleep(wp, &f->f_lck, PCATCH, "futex", timeout);
276 error = sx_sleep(wp, &f->f_lck, PCATCH, "futex", timeout);
277 #endif
278 if (wp->wp_flags & FUTEX_WP_REQUEUED) {
279 KASSERT(f != wp->wp_futex, ("futex != wp_futex"));
280 LINUX_CTR5(sys_futex, "futex_sleep out error %d uaddr %p w"
281 " %p requeued uaddr %p ref %d",
282 error, f->f_uaddr, wp, wp->wp_futex->f_uaddr,
283 wp->wp_futex->f_refcount);
284 futex_put(f, NULL);
285 f = wp->wp_futex;
286 FUTEX_LOCK(f);
289 futex_put(f, wp);
290 return (error);
293 static int
294 futex_wake(struct futex *f, int n)
296 struct waiting_proc *wp, *wpt;
297 int count = 0;
299 FUTEX_ASSERT_LOCKED(f);
300 TAILQ_FOREACH_MUTABLE(wp, &f->f_waiting_proc, wp_list, wpt) {
301 LINUX_CTR3(sys_futex, "futex_wake uaddr %p wp %p ref %d",
302 f->f_uaddr, wp, f->f_refcount);
303 wp->wp_flags |= FUTEX_WP_REMOVED;
304 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
305 wakeup_one(wp);
306 if (++count == n)
307 break;
310 return (count);
313 static int
314 futex_requeue(struct futex *f, int n, struct futex *f2, int n2)
316 struct waiting_proc *wp, *wpt;
317 int count = 0;
319 FUTEX_ASSERT_LOCKED(f);
320 FUTEX_ASSERT_LOCKED(f2);
322 TAILQ_FOREACH_MUTABLE(wp, &f->f_waiting_proc, wp_list, wpt) {
323 if (++count <= n) {
324 LINUX_CTR2(sys_futex, "futex_req_wake uaddr %p wp %p",
325 f->f_uaddr, wp);
326 wp->wp_flags |= FUTEX_WP_REMOVED;
327 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
328 wakeup_one(wp);
329 } else {
330 LINUX_CTR3(sys_futex, "futex_requeue uaddr %p wp %p to %p",
331 f->f_uaddr, wp, f2->f_uaddr);
332 wp->wp_flags |= FUTEX_WP_REQUEUED;
333 /* Move wp to wp_list of f2 futex */
334 TAILQ_REMOVE(&f->f_waiting_proc, wp, wp_list);
335 TAILQ_INSERT_HEAD(&f2->f_waiting_proc, wp, wp_list);
338 * Thread which sleeps on wp after waking should
339 * acquire f2 lock, so increment refcount of f2 to
340 * prevent it from premature deallocation.
342 wp->wp_futex = f2;
343 FUTEXES_LOCK;
344 ++f2->f_refcount;
345 FUTEXES_UNLOCK;
346 if (count - n >= n2)
347 break;
351 return (count);
354 static int
355 futex_wait(struct futex *f, struct waiting_proc *wp, struct l_timespec *ts)
357 struct l_timespec timeout = {0, 0};
358 struct timeval tv = {0, 0};
359 int timeout_hz;
360 int error;
362 if (ts != NULL) {
363 error = copyin(ts, &timeout, sizeof(timeout));
364 if (error)
365 return (error);
368 tv.tv_usec = timeout.tv_sec * 1000000 + timeout.tv_nsec / 1000;
369 timeout_hz = tvtohz_high(&tv);
371 if (timeout.tv_sec == 0 && timeout.tv_nsec == 0)
372 timeout_hz = 0;
375 * If the user process requests a non null timeout,
376 * make sure we do not turn it into an infinite
377 * timeout because timeout_hz gets null.
379 * We use a minimal timeout of 1/hz. Maybe it would
380 * make sense to just return ETIMEDOUT without sleeping.
382 if (((timeout.tv_sec != 0) || (timeout.tv_nsec != 0)) &&
383 (timeout_hz == 0))
384 timeout_hz = 1;
386 error = futex_sleep(f, wp, timeout_hz);
387 if (error == EWOULDBLOCK)
388 error = ETIMEDOUT;
390 return (error);
393 static int
394 futex_atomic_op(struct proc *p, int encoded_op, uint32_t *uaddr)
396 int op = (encoded_op >> 28) & 7;
397 int cmp = (encoded_op >> 24) & 15;
398 int oparg = (encoded_op << 8) >> 20;
399 int cmparg = (encoded_op << 20) >> 20;
400 int oldval = 0, ret;
402 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
403 oparg = 1 << oparg;
405 #ifdef DEBUG
406 if (ldebug(sys_futex))
407 kprintf("futex_atomic_op: op = %d, cmp = %d, oparg = %x, "
408 "cmparg = %x, uaddr = %p\n",
409 op, cmp, oparg, cmparg, uaddr);
410 #endif
411 /* XXX: linux verifies access here and returns EFAULT */
413 switch (op) {
414 case FUTEX_OP_SET:
415 ret = futex_xchgl(oparg, uaddr, &oldval);
416 break;
417 case FUTEX_OP_ADD:
418 ret = futex_addl(oparg, uaddr, &oldval);
419 break;
420 case FUTEX_OP_OR:
421 ret = futex_orl(oparg, uaddr, &oldval);
422 break;
423 case FUTEX_OP_ANDN:
424 ret = futex_andl(~oparg, uaddr, &oldval);
425 break;
426 case FUTEX_OP_XOR:
427 ret = futex_xorl(oparg, uaddr, &oldval);
428 break;
429 default:
430 ret = -ENOSYS;
431 break;
434 if (ret)
435 return (ret);
437 switch (cmp) {
438 case FUTEX_OP_CMP_EQ:
439 return (oldval == cmparg);
440 case FUTEX_OP_CMP_NE:
441 return (oldval != cmparg);
442 case FUTEX_OP_CMP_LT:
443 return (oldval < cmparg);
444 case FUTEX_OP_CMP_GE:
445 return (oldval >= cmparg);
446 case FUTEX_OP_CMP_LE:
447 return (oldval <= cmparg);
448 case FUTEX_OP_CMP_GT:
449 return (oldval > cmparg);
450 default:
451 return (-ENOSYS);
456 sys_linux_sys_futex(struct linux_sys_futex_args *args)
458 int op_ret, val, ret, nrwake;
459 struct waiting_proc *wp;
460 struct futex *f, *f2;
461 int error = 0;
464 * Our implementation provides only privates futexes. Most of the apps
465 * should use private futexes but don't claim so. Therefore we treat
466 * all futexes as private by clearing the FUTEX_PRIVATE_FLAG. It works
467 * in most cases (ie. when futexes are not shared on file descriptor
468 * or between different processes.).
470 args->op = (args->op & ~LINUX_FUTEX_PRIVATE_FLAG);
472 switch (args->op) {
473 case LINUX_FUTEX_WAIT:
474 LINUX_CTR2(sys_futex, "WAIT val %d uaddr %p",
475 args->val, args->uaddr);
476 #ifdef DEBUG
477 if (ldebug(sys_futex))
478 kprintf(ARGS(sys_futex, "futex_wait val %d uaddr %p"),
479 args->val, args->uaddr);
480 #endif
481 error = futex_get(args->uaddr, &wp, &f, FUTEX_CREATE_WP);
482 if (error)
483 return (error);
484 error = copyin(args->uaddr, &val, sizeof(val));
485 if (error) {
486 LINUX_CTR1(sys_futex, "WAIT copyin failed %d",
487 error);
488 futex_put(f, wp);
489 return (error);
491 if (val != args->val) {
492 LINUX_CTR3(sys_futex, "WAIT uaddr %p val %d != uval %d",
493 args->uaddr, args->val, val);
494 futex_put(f, wp);
495 return (EWOULDBLOCK);
498 error = futex_wait(f, wp, args->timeout);
499 break;
501 case LINUX_FUTEX_WAKE:
503 LINUX_CTR2(sys_futex, "WAKE val %d uaddr %p",
504 args->val, args->uaddr);
507 * XXX: Linux is able to cope with different addresses
508 * corresponding to the same mapped memory in the sleeping
509 * and waker process(es).
511 #ifdef DEBUG
512 if (ldebug(sys_futex))
513 kprintf(ARGS(sys_futex, "futex_wake val %d uaddr %p"),
514 args->val, args->uaddr);
515 #endif
516 error = futex_get(args->uaddr, NULL, &f, FUTEX_DONTCREATE);
517 if (error)
518 return (error);
519 if (f == NULL) {
520 args->sysmsg_iresult = 0;
521 return (error);
523 args->sysmsg_iresult = futex_wake(f, args->val);
524 futex_put(f, NULL);
525 break;
527 case LINUX_FUTEX_CMP_REQUEUE:
529 LINUX_CTR5(sys_futex, "CMP_REQUEUE uaddr %p "
530 "val %d val3 %d uaddr2 %p val2 %d",
531 args->uaddr, args->val, args->val3, args->uaddr2,
532 (int)(unsigned long)args->timeout);
534 #ifdef DEBUG
535 if (ldebug(sys_futex))
536 kprintf(ARGS(sys_futex, "futex_cmp_requeue uaddr %p "
537 "val %d val3 %d uaddr2 %p val2 %d"),
538 args->uaddr, args->val, args->val3, args->uaddr2,
539 (int)(unsigned long)args->timeout);
540 #endif
542 * Linux allows this, we would not, it is an incorrect
543 * usage of declared ABI, so return EINVAL.
545 if (args->uaddr == args->uaddr2)
546 return (EINVAL);
547 error = futex_get0(args->uaddr, &f, 0);
548 if (error)
549 return (error);
552 * To avoid deadlocks return EINVAL if second futex
553 * exists at this time. Otherwise create the new futex
554 * and ignore false positive LOR which thus happens.
556 * Glibc fall back to FUTEX_WAKE in case of any error
557 * returned by FUTEX_CMP_REQUEUE.
559 error = futex_get0(args->uaddr2, &f2, FUTEX_DONTEXISTS);
560 if (error) {
561 futex_put(f, NULL);
562 return (error);
564 error = copyin(args->uaddr, &val, sizeof(val));
565 if (error) {
566 LINUX_CTR1(sys_futex, "CMP_REQUEUE copyin failed %d",
567 error);
568 futex_put(f2, NULL);
569 futex_put(f, NULL);
570 return (error);
572 if (val != args->val3) {
573 LINUX_CTR2(sys_futex, "CMP_REQUEUE val %d != uval %d",
574 args->val, val);
575 futex_put(f2, NULL);
576 futex_put(f, NULL);
577 return (EAGAIN);
580 nrwake = (int)(unsigned long)args->timeout;
581 args->sysmsg_iresult = futex_requeue(f, args->val, f2, nrwake);
582 futex_put(f2, NULL);
583 futex_put(f, NULL);
584 break;
586 case LINUX_FUTEX_WAKE_OP:
588 LINUX_CTR5(sys_futex, "WAKE_OP "
589 "uaddr %p op %d val %x uaddr2 %p val3 %x",
590 args->uaddr, args->op, args->val,
591 args->uaddr2, args->val3);
593 #ifdef DEBUG
594 if (ldebug(sys_futex))
595 kprintf(ARGS(sys_futex, "futex_wake_op "
596 "uaddr %p op %d val %x uaddr2 %p val3 %x"),
597 args->uaddr, args->op, args->val,
598 args->uaddr2, args->val3);
599 #endif
600 error = futex_get0(args->uaddr, &f, 0);
601 if (error)
602 return (error);
603 if (args->uaddr != args->uaddr2)
604 error = futex_get0(args->uaddr2, &f2, 0);
605 if (error) {
606 futex_put(f, NULL);
607 return (error);
611 * This function returns positive number as results and
612 * negative as errors
614 op_ret = futex_atomic_op(curproc, args->val3, args->uaddr2);
616 if (op_ret < 0) {
617 /* XXX: We don't handle the EFAULT yet. */
618 if (op_ret != -EFAULT) {
619 if (f2 != NULL)
620 futex_put(f2, NULL);
621 futex_put(f, NULL);
622 return (-op_ret);
624 if (f2 != NULL)
625 futex_put(f2, NULL);
626 futex_put(f, NULL);
627 return (EFAULT);
630 ret = futex_wake(f, args->val);
632 if (op_ret > 0) {
633 op_ret = 0;
634 nrwake = (int)(unsigned long)args->timeout;
636 if (f2 != NULL)
637 op_ret += futex_wake(f2, nrwake);
638 else
639 op_ret += futex_wake(f, nrwake);
640 ret += op_ret;
643 if (f2 != NULL)
644 futex_put(f2, NULL);
645 futex_put(f, NULL);
646 args->sysmsg_iresult = ret;
647 break;
649 case LINUX_FUTEX_LOCK_PI:
650 /* not yet implemented */
651 return (ENOSYS);
653 case LINUX_FUTEX_UNLOCK_PI:
654 /* not yet implemented */
655 return (ENOSYS);
657 case LINUX_FUTEX_TRYLOCK_PI:
658 /* not yet implemented */
659 return (ENOSYS);
661 case LINUX_FUTEX_REQUEUE:
664 * Glibc does not use this operation since version 2.3.3,
665 * as it is racy and replaced by FUTEX_CMP_REQUEUE operation.
666 * Glibc versions prior to 2.3.3 fall back to FUTEX_WAKE when
667 * FUTEX_REQUEUE returned EINVAL.
669 return (EINVAL);
671 default:
672 kprintf("linux_sys_futex: unknown op %d\n", args->op);
673 return (ENOSYS);
676 return (error);
680 sys_linux_set_robust_list(struct linux_set_robust_list_args *args)
682 #ifdef DEBUG
683 if (ldebug(set_robust_list))
684 kprintf(ARGS(set_robust_list, "head %p len %d"),
685 args->head, args->len);
686 #endif
688 if (args->len != sizeof(struct linux_robust_list_head))
689 return (EINVAL);
691 emuldata_set_robust(curproc, args->head);
693 return (0);
699 sys_linux_get_robust_list(struct linux_get_robust_list_args *args)
701 struct linux_emuldata *em;
702 struct linux_robust_list_head empty_head;
703 struct linux_robust_list_head *head;
704 l_size_t len = sizeof(struct linux_robust_list_head);
705 int error = 0;
707 #ifdef DEBUG
708 if (ldebug(get_robust_list))
709 kprintf(ARGS(get_robust_list, ""));
710 #endif
711 EMUL_LOCK();
712 if (args->pid == 0) {
713 em = emuldata_get(curproc);
714 KKASSERT(em != NULL);
715 if (em->robust_futexes == NULL) {
716 bzero(&empty_head, sizeof(empty_head));
717 head = &empty_head;
718 } else {
719 head = em->robust_futexes;
721 } else {
722 struct proc *p;
724 p = pfind(args->pid);
725 if (p == NULL) {
726 EMUL_UNLOCK();
727 return (ESRCH);
730 em = emuldata_get(p);
731 /* XXX: ptrace? p_candebug?*/
732 if (priv_check(curthread, PRIV_CRED_SETUID) ||
733 priv_check(curthread, PRIV_CRED_SETEUID)/* ||
734 p_candebug(curproc, p) */) {
735 EMUL_UNLOCK();
736 return (EPERM);
738 head = em->robust_futexes;
741 EMUL_UNLOCK();
743 error = copyout(&len, args->len, sizeof(l_size_t));
744 if (error)
745 return (EFAULT);
747 error = copyout(head, args->head, sizeof(struct linux_robust_list_head));
749 return (error);
752 static int
753 handle_futex_death(struct proc *p, uint32_t *uaddr, int pi)
755 uint32_t uval, nval, mval;
756 struct futex *f;
757 int error;
759 retry:
760 if (copyin(uaddr, &uval, 4))
761 return (EFAULT);
762 if ((uval & FUTEX_TID_MASK) == p->p_pid) {
763 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
764 nval = casuword((ulong *)uaddr, uval, mval);
766 if (nval == -1)
767 return (EFAULT);
769 if (nval != uval)
770 goto retry;
772 if (!pi && (uval & FUTEX_WAITERS)) {
773 error = futex_get(uaddr, NULL, &f,
774 FUTEX_DONTCREATE);
775 if (error)
776 return (error);
777 if (f != NULL) {
778 futex_wake(f, 1);
779 futex_put(f, NULL);
784 return (0);
787 static int
788 fetch_robust_entry(struct linux_robust_list **entry,
789 struct linux_robust_list **head, int *pi)
791 l_ulong uentry;
793 if (copyin((const void *)head, &uentry, sizeof(l_ulong)))
794 return (EFAULT);
796 *entry = (void *)(uentry & ~1UL);
797 *pi = uentry & 1;
799 return (0);
802 /* This walks the list of robust futexes releasing them. */
803 void
804 release_futexes(struct proc *p)
806 struct linux_robust_list_head *head = NULL;
807 struct linux_robust_list *entry, *next_entry, *pending;
808 unsigned int limit = 2048, pi, next_pi, pip;
809 struct linux_emuldata *em;
810 l_long futex_offset;
811 int rc;
813 EMUL_LOCK();
814 KKASSERT(p != NULL);
815 em = emuldata_get(p);
816 KKASSERT(em != NULL);
817 head = em->robust_futexes;
818 EMUL_UNLOCK();
820 if (head == NULL)
821 return;
823 if (fetch_robust_entry(&entry, PTRIN(&head->list.next), &pi))
824 return;
826 if (copyin(&head->futex_offset, &futex_offset, sizeof(futex_offset)))
827 return;
829 if (fetch_robust_entry(&pending, PTRIN(&head->pending_list), &pip))
830 return;
832 while (entry != &head->list) {
833 rc = fetch_robust_entry(&next_entry, PTRIN(&entry->next), &next_pi);
835 if (entry != pending)
836 if (handle_futex_death(p, (uint32_t *)entry + futex_offset, pi))
837 return;
838 if (rc)
839 return;
841 entry = next_entry;
842 pi = next_pi;
844 if (!--limit)
845 break;
847 #if 0
848 /* XXX: not sure about this yield, was sched_relinquish(curthread); */
849 lwkt_deschedule(curthread);
850 lwkt_yield();
851 #endif
854 if (pending)
855 handle_futex_death(p, (uint32_t *)pending + futex_offset, pip);