kernel - (mainly x86_64) - Fix a number of rare races
[dragonfly.git] / sys / emulation / linux / i386 / linux_machdep.c
blob12295701e0faa3dc27dd8536663ca8b4344bc679
1 /*-
2 * Copyright (c) 2000 Marcel Moolenaar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer
10 * in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: src/sys/i386/linux/linux_machdep.c,v 1.6.2.4 2001/11/05 19:08:23 marcel Exp $
29 * $DragonFly: src/sys/emulation/linux/i386/linux_machdep.c,v 1.23 2007/07/30 17:41:23 pavalos Exp $
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/imgact.h>
35 #include <sys/kern_syscall.h>
36 #include <sys/lock.h>
37 #include <sys/mman.h>
38 #include <sys/nlookup.h>
39 #include <sys/proc.h>
40 #include <sys/priv.h>
41 #include <sys/resource.h>
42 #include <sys/resourcevar.h>
43 #include <sys/ptrace.h>
44 #include <sys/sysproto.h>
45 #include <sys/thread2.h>
46 #include <sys/unistd.h>
47 #include <sys/wait.h>
49 #include <machine/frame.h>
50 #include <machine/psl.h>
51 #include <machine/segments.h>
52 #include <machine/sysarch.h>
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_map.h>
58 #include <sys/mplock2.h>
60 #include "linux.h"
61 #include "linux_proto.h"
62 #include "../linux_ipc.h"
63 #include "../linux_signal.h"
64 #include "../linux_util.h"
65 #include "../linux_emuldata.h"
67 struct l_descriptor {
68 l_uint entry_number;
69 l_ulong base_addr;
70 l_uint limit;
71 l_uint seg_32bit:1;
72 l_uint contents:2;
73 l_uint read_exec_only:1;
74 l_uint limit_in_pages:1;
75 l_uint seg_not_present:1;
76 l_uint useable:1;
79 struct l_old_select_argv {
80 l_int nfds;
81 l_fd_set *readfds;
82 l_fd_set *writefds;
83 l_fd_set *exceptfds;
84 struct l_timeval *timeout;
87 int
88 linux_to_bsd_sigaltstack(int lsa)
90 int bsa = 0;
92 if (lsa & LINUX_SS_DISABLE)
93 bsa |= SS_DISABLE;
94 if (lsa & LINUX_SS_ONSTACK)
95 bsa |= SS_ONSTACK;
96 return (bsa);
99 int
100 bsd_to_linux_sigaltstack(int bsa)
102 int lsa = 0;
104 if (bsa & SS_DISABLE)
105 lsa |= LINUX_SS_DISABLE;
106 if (bsa & SS_ONSTACK)
107 lsa |= LINUX_SS_ONSTACK;
108 return (lsa);
112 * MPALMOSTSAFE
115 sys_linux_execve(struct linux_execve_args *args)
117 struct nlookupdata nd;
118 struct image_args exec_args;
119 char *path;
120 int error;
122 error = linux_copyin_path(args->path, &path, LINUX_PATH_EXISTS);
123 if (error)
124 return (error);
125 #ifdef DEBUG
126 if (ldebug(execve))
127 kprintf(ARGS(execve, "%s"), path);
128 #endif
129 get_mplock();
130 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW);
131 bzero(&exec_args, sizeof(exec_args));
132 if (error == 0) {
133 error = exec_copyin_args(&exec_args, path, PATH_SYSSPACE,
134 args->argp, args->envp);
136 if (error == 0)
137 error = kern_execve(&nd, &exec_args);
138 nlookup_done(&nd);
141 * The syscall result is returned in registers to the new program.
142 * Linux will register %edx as an atexit function and we must be
143 * sure to set it to 0. XXX
145 if (error == 0) {
146 args->sysmsg_result64 = 0;
147 if (curproc->p_sysent == &elf_linux_sysvec)
148 error = emuldata_init(curproc, NULL, 0);
151 exec_free_args(&exec_args);
152 linux_free_path(&path);
154 if (error < 0) {
155 /* We hit a lethal error condition. Let's die now. */
156 exit1(W_EXITCODE(0, SIGABRT));
157 /* NOTREACHED */
159 rel_mplock();
161 return(error);
164 struct l_ipc_kludge {
165 struct l_msgbuf *msgp;
166 l_long msgtyp;
170 * MPALMOSTSAFE
173 sys_linux_ipc(struct linux_ipc_args *args)
175 int error = 0;
177 get_mplock();
179 switch (args->what & 0xFFFF) {
180 case LINUX_SEMOP: {
181 struct linux_semop_args a;
183 a.semid = args->arg1;
184 a.tsops = args->ptr;
185 a.nsops = args->arg2;
186 a.sysmsg_lresult = 0;
187 error = linux_semop(&a);
188 args->sysmsg_lresult = a.sysmsg_lresult;
189 break;
191 case LINUX_SEMGET: {
192 struct linux_semget_args a;
194 a.key = args->arg1;
195 a.nsems = args->arg2;
196 a.semflg = args->arg3;
197 a.sysmsg_lresult = 0;
198 error = linux_semget(&a);
199 args->sysmsg_lresult = a.sysmsg_lresult;
200 break;
202 case LINUX_SEMCTL: {
203 struct linux_semctl_args a;
204 int error;
206 a.semid = args->arg1;
207 a.semnum = args->arg2;
208 a.cmd = args->arg3;
209 a.sysmsg_lresult = 0;
210 error = copyin((caddr_t)args->ptr, &a.arg, sizeof(a.arg));
211 if (error)
212 break;
213 error = linux_semctl(&a);
214 args->sysmsg_lresult = a.sysmsg_lresult;
215 break;
217 case LINUX_MSGSND: {
218 struct linux_msgsnd_args a;
220 a.msqid = args->arg1;
221 a.msgp = args->ptr;
222 a.msgsz = args->arg2;
223 a.msgflg = args->arg3;
224 a.sysmsg_lresult = 0;
225 error = linux_msgsnd(&a);
226 args->sysmsg_lresult = a.sysmsg_lresult;
227 break;
229 case LINUX_MSGRCV: {
230 struct linux_msgrcv_args a;
232 a.msqid = args->arg1;
233 a.msgsz = args->arg2;
234 if (a.msgsz < 0) {
235 error = EINVAL;
236 break;
238 a.msgflg = args->arg3;
239 a.sysmsg_lresult = 0;
240 if ((args->what >> 16) == 0) {
241 struct l_ipc_kludge tmp;
242 int error;
244 if (args->ptr == NULL) {
245 error = EINVAL;
246 break;
248 error = copyin((caddr_t)args->ptr, &tmp, sizeof(tmp));
249 if (error)
250 break;
251 a.msgp = tmp.msgp;
252 a.msgtyp = tmp.msgtyp;
253 } else {
254 a.msgp = args->ptr;
255 a.msgtyp = args->arg5;
257 error = linux_msgrcv(&a);
258 args->sysmsg_lresult = a.sysmsg_lresult;
259 break;
261 case LINUX_MSGGET: {
262 struct linux_msgget_args a;
264 a.key = args->arg1;
265 a.msgflg = args->arg2;
266 a.sysmsg_lresult = 0;
267 error = linux_msgget(&a);
268 args->sysmsg_lresult = a.sysmsg_lresult;
269 break;
271 case LINUX_MSGCTL: {
272 struct linux_msgctl_args a;
274 a.msqid = args->arg1;
275 a.cmd = args->arg2;
276 a.buf = args->ptr;
277 a.sysmsg_lresult = 0;
278 error = linux_msgctl(&a);
279 args->sysmsg_lresult = a.sysmsg_lresult;
280 break;
282 case LINUX_SHMAT: {
283 struct linux_shmat_args a;
285 a.shmid = args->arg1;
286 a.shmaddr = args->ptr;
287 a.shmflg = args->arg2;
288 a.raddr = (l_ulong *)args->arg3;
289 a.sysmsg_lresult = 0;
290 error = linux_shmat(&a);
291 args->sysmsg_lresult = a.sysmsg_lresult;
292 break;
294 case LINUX_SHMDT: {
295 struct linux_shmdt_args a;
297 a.shmaddr = args->ptr;
298 a.sysmsg_lresult = 0;
299 error = linux_shmdt(&a);
300 args->sysmsg_lresult = a.sysmsg_lresult;
301 break;
303 case LINUX_SHMGET: {
304 struct linux_shmget_args a;
306 a.key = args->arg1;
307 a.size = args->arg2;
308 a.shmflg = args->arg3;
309 a.sysmsg_lresult = 0;
310 error = linux_shmget(&a);
311 args->sysmsg_lresult = a.sysmsg_lresult;
312 break;
314 case LINUX_SHMCTL: {
315 struct linux_shmctl_args a;
317 a.shmid = args->arg1;
318 a.cmd = args->arg2;
319 a.buf = args->ptr;
320 a.sysmsg_lresult = 0;
321 error = linux_shmctl(&a);
322 args->sysmsg_lresult = a.sysmsg_lresult;
323 break;
325 default:
326 error = EINVAL;
327 break;
329 rel_mplock();
330 return(error);
334 * MPSAFE
337 sys_linux_old_select(struct linux_old_select_args *args)
339 struct l_old_select_argv linux_args;
340 struct linux_select_args newsel;
341 int error;
343 #ifdef DEBUG
344 if (ldebug(old_select))
345 kprintf(ARGS(old_select, "%p"), args->ptr);
346 #endif
348 error = copyin((caddr_t)args->ptr, &linux_args, sizeof(linux_args));
349 if (error)
350 return (error);
352 newsel.sysmsg_iresult = 0;
353 newsel.nfds = linux_args.nfds;
354 newsel.readfds = linux_args.readfds;
355 newsel.writefds = linux_args.writefds;
356 newsel.exceptfds = linux_args.exceptfds;
357 newsel.timeout = linux_args.timeout;
358 error = sys_linux_select(&newsel);
359 args->sysmsg_iresult = newsel.sysmsg_iresult;
360 return(error);
364 * MPSAFE
367 sys_linux_fork(struct linux_fork_args *args)
369 struct lwp *lp = curthread->td_lwp;
370 struct proc *p2;
371 int error;
373 get_mplock();
374 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2);
375 if (error == 0) {
376 emuldata_init(curproc, p2, 0);
378 start_forked_proc(lp, p2);
379 args->sysmsg_fds[0] = p2->p_pid;
380 args->sysmsg_fds[1] = 0;
382 rel_mplock();
384 /* Are we the child? */
385 if (args->sysmsg_iresult == 1)
386 args->sysmsg_iresult = 0;
388 return (error);
392 * MPALMOSTSAFE
395 sys_linux_exit_group(struct linux_exit_group_args *args)
397 struct linux_emuldata *em, *e;
398 int rval;
400 rval = args->rval;
401 EMUL_LOCK();
403 em = emuldata_get(curproc);
405 if (em->s->refs == 1) {
406 EMUL_UNLOCK();
407 exit1(W_EXITCODE(rval, 0));
408 /* NOTREACHED */
409 return (0);
411 KKASSERT(em->proc == curproc);
412 em->flags |= EMUL_DIDKILL;
413 em->s->flags |= LINUX_LES_INEXITGROUP;
414 em->s->xstat = W_EXITCODE(rval, 0);
416 LIST_REMOVE(em, threads);
417 LIST_INSERT_HEAD(&em->s->threads, em, threads);
419 while ((e = LIST_NEXT(em, threads)) != NULL) {
420 LIST_REMOVE(em, threads);
421 LIST_INSERT_AFTER(e, em, threads);
422 if ((e->flags & EMUL_DIDKILL) == 0) {
423 e->flags |= EMUL_DIDKILL;
424 KKASSERT(pfind(e->proc->p_pid) == e->proc);
425 get_mplock();
426 ksignal(e->proc, SIGKILL);
427 rel_mplock();
431 EMUL_UNLOCK();
432 exit1(W_EXITCODE(rval, 0));
433 /* NOTREACHED */
435 return (0);
439 * MPSAFE
442 sys_linux_vfork(struct linux_vfork_args *args)
444 struct lwp *lp = curthread->td_lwp;
445 struct proc *p2;
446 int error;
448 get_mplock();
449 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2);
450 if (error == 0) {
451 emuldata_init(curproc, p2, 0);
453 start_forked_proc(lp, p2);
454 args->sysmsg_fds[0] = p2->p_pid;
455 args->sysmsg_fds[1] = 0;
457 rel_mplock();
459 if (args->sysmsg_iresult == 1)
460 args->sysmsg_iresult = 0;
462 return (error);
466 * MPALMOSTSAFE
469 sys_linux_clone(struct linux_clone_args *args)
471 struct segment_descriptor *desc;
472 struct l_user_desc info;
473 int idx;
474 int a[2];
476 struct lwp *lp = curthread->td_lwp;
477 int error, ff = RFPROC;
478 struct proc *p2 = NULL;
479 int exit_signal;
480 vm_offset_t start;
482 exit_signal = args->flags & 0x000000ff;
483 if (exit_signal >= LINUX_NSIG)
484 return (EINVAL);
485 if (exit_signal <= LINUX_SIGTBLSZ)
486 exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)];
488 if (args->flags & LINUX_CLONE_VM)
489 ff |= RFMEM;
490 if (args->flags & LINUX_CLONE_SIGHAND)
491 ff |= RFSIGSHARE;
492 if (!(args->flags & (LINUX_CLONE_FILES | LINUX_CLONE_FS)))
493 ff |= RFFDG;
494 if ((args->flags & 0xffffff00) == LINUX_THREADING_FLAGS)
495 ff |= RFTHREAD;
496 if (args->flags & LINUX_CLONE_VFORK)
497 ff |= RFPPWAIT;
498 if (args->flags & LINUX_CLONE_PARENT_SETTID) {
499 if (args->parent_tidptr == NULL)
500 return (EINVAL);
503 error = 0;
504 start = 0;
506 get_mplock();
507 error = fork1(lp, ff | RFPGLOCK, &p2);
508 if (error) {
509 rel_mplock();
510 return error;
513 args->sysmsg_fds[0] = p2 ? p2->p_pid : 0;
514 args->sysmsg_fds[1] = 0;
516 if (args->flags & (LINUX_CLONE_PARENT | LINUX_CLONE_THREAD))
517 proc_reparent(p2, curproc->p_pptr /* XXX */);
519 emuldata_init(curproc, p2, args->flags);
520 linux_proc_fork(p2, curproc, args->child_tidptr);
522 * XXX: this can't happen, p2 is never NULL, or else we'd have
523 * other problems, too (see p2->p_sigparent == ...,
524 * linux_proc_fork and emuldata_init.
526 if (p2 == NULL) {
527 error = ESRCH;
528 } else {
529 if (args->flags & LINUX_CLONE_PARENT_SETTID) {
530 error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid));
534 p2->p_sigparent = exit_signal;
535 if (args->stack) {
536 ONLY_LWP_IN_PROC(p2)->lwp_md.md_regs->tf_esp =
537 (unsigned long)args->stack;
540 if (args->flags & LINUX_CLONE_SETTLS) {
541 error = copyin((void *)curthread->td_lwp->lwp_md.md_regs->tf_esi, &info, sizeof(struct l_user_desc));
542 if (error) {
543 kprintf("copyin of tf_esi to info failed\n");
544 } else {
545 idx = info.entry_number;
547 * We understand both our own entries such as the ones
548 * we provide on linux_set_thread_area, as well as the
549 * linux-type entries 6-8.
551 if ((idx < 6 || idx > 8) && (idx < GTLS_START)) {
552 kprintf("LINUX_CLONE_SETTLS, invalid idx requested: %d\n", idx);
553 goto out;
555 if (idx < GTLS_START) {
556 idx -= 6;
557 } else {
558 #if 0 /* was SMP */
559 idx -= (GTLS_START + mycpu->gd_cpuid * NGDT);
560 #endif
561 idx -= GTLS_START;
563 KKASSERT(idx >= 0);
565 a[0] = LINUX_LDT_entry_a(&info);
566 a[1] = LINUX_LDT_entry_b(&info);
567 if (p2) {
568 desc = &FIRST_LWP_IN_PROC(p2)->lwp_thread->td_tls.tls[idx];
569 memcpy(desc, &a, sizeof(a));
570 } else {
571 kprintf("linux_clone... we don't have a p2\n");
575 out:
576 if (p2)
577 start_forked_proc(lp, p2);
579 rel_mplock();
580 #ifdef DEBUG
581 if (ldebug(clone))
582 kprintf(LMSG("clone: successful rfork to %ld"),
583 (long)p2->p_pid);
584 #endif
586 return (error);
589 /* XXX move */
590 struct l_mmap_argv {
591 l_caddr_t addr;
592 l_int len;
593 l_int prot;
594 l_int flags;
595 l_int fd;
596 l_int pos;
599 #define STACK_SIZE (2 * 1024 * 1024)
600 #define GUARD_SIZE (4 * PAGE_SIZE)
603 * MPALMOSTSAFE
605 static int
606 linux_mmap_common(caddr_t linux_addr, size_t linux_len, int linux_prot,
607 int linux_flags, int linux_fd, off_t pos, void **res)
609 struct thread *td = curthread;
610 struct proc *p = td->td_proc;
611 caddr_t addr;
612 void *new;
613 int error, flags, len, prot, fd;
615 flags = 0;
616 if (linux_flags & LINUX_MAP_SHARED)
617 flags |= MAP_SHARED;
618 if (linux_flags & LINUX_MAP_PRIVATE)
619 flags |= MAP_PRIVATE;
620 if (linux_flags & LINUX_MAP_FIXED)
621 flags |= MAP_FIXED;
622 if (linux_flags & LINUX_MAP_ANON) {
623 flags |= MAP_ANON;
624 } else {
625 flags |= MAP_NOSYNC;
628 lwkt_gettoken(&vm_token);
629 lwkt_gettoken(&vmspace_token);
631 if (linux_flags & LINUX_MAP_GROWSDOWN) {
632 flags |= MAP_STACK;
633 /* The linux MAP_GROWSDOWN option does not limit auto
634 * growth of the region. Linux mmap with this option
635 * takes as addr the inital BOS, and as len, the initial
636 * region size. It can then grow down from addr without
637 * limit. However, linux threads has an implicit internal
638 * limit to stack size of STACK_SIZE. Its just not
639 * enforced explicitly in linux. But, here we impose
640 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
641 * region, since we can do this with our mmap.
643 * Our mmap with MAP_STACK takes addr as the maximum
644 * downsize limit on BOS, and as len the max size of
645 * the region. It them maps the top SGROWSIZ bytes,
646 * and autgrows the region down, up to the limit
647 * in addr.
649 * If we don't use the MAP_STACK option, the effect
650 * of this code is to allocate a stack region of a
651 * fixed size of (STACK_SIZE - GUARD_SIZE).
654 /* This gives us TOS */
655 addr = linux_addr + linux_len;
657 if (addr > p->p_vmspace->vm_maxsaddr) {
658 /* Some linux apps will attempt to mmap
659 * thread stacks near the top of their
660 * address space. If their TOS is greater
661 * than vm_maxsaddr, vm_map_growstack()
662 * will confuse the thread stack with the
663 * process stack and deliver a SEGV if they
664 * attempt to grow the thread stack past their
665 * current stacksize rlimit. To avoid this,
666 * adjust vm_maxsaddr upwards to reflect
667 * the current stacksize rlimit rather
668 * than the maximum possible stacksize.
669 * It would be better to adjust the
670 * mmap'ed region, but some apps do not check
671 * mmap's return value.
673 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
674 p->p_rlimit[RLIMIT_STACK].rlim_cur;
677 /* This gives us our maximum stack size */
678 if (linux_len > STACK_SIZE - GUARD_SIZE) {
679 len = linux_len;
680 } else {
681 len = STACK_SIZE - GUARD_SIZE;
683 /* This gives us a new BOS. If we're using VM_STACK, then
684 * mmap will just map the top SGROWSIZ bytes, and let
685 * the stack grow down to the limit at BOS. If we're
686 * not using VM_STACK we map the full stack, since we
687 * don't have a way to autogrow it.
689 addr -= len;
690 } else {
691 addr = linux_addr;
692 len = linux_len;
695 prot = linux_prot;
697 if (prot & (PROT_READ | PROT_WRITE | PROT_EXEC))
698 prot |= PROT_READ | PROT_EXEC;
700 if (linux_flags & LINUX_MAP_ANON) {
701 fd = -1;
702 } else {
703 fd = linux_fd;
706 #ifdef DEBUG
707 if (ldebug(mmap) || ldebug(mmap2))
708 kprintf("-> (%p, %d, %d, 0x%08x, %d, %lld)\n",
709 addr, len, prot, flags, fd, pos);
710 #endif
711 error = kern_mmap(curproc->p_vmspace, addr, len,
712 prot, flags, fd, pos, &new);
714 lwkt_reltoken(&vmspace_token);
715 lwkt_reltoken(&vm_token);
717 if (error == 0)
718 *res = new;
719 return (error);
723 * MPSAFE
726 sys_linux_mmap(struct linux_mmap_args *args)
728 struct l_mmap_argv linux_args;
729 int error;
731 error = copyin((caddr_t)args->ptr, &linux_args, sizeof(linux_args));
732 if (error)
733 return (error);
735 #ifdef DEBUG
736 if (ldebug(mmap))
737 kprintf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
738 (void *)linux_args.addr, linux_args.len, linux_args.prot,
739 linux_args.flags, linux_args.fd, linux_args.pos);
740 #endif
741 error = linux_mmap_common(linux_args.addr, linux_args.len,
742 linux_args.prot, linux_args.flags, linux_args.fd,
743 linux_args.pos, &args->sysmsg_resultp);
744 #ifdef DEBUG
745 if (ldebug(mmap))
746 kprintf("-> %p\n", args->sysmsg_resultp);
747 #endif
748 return(error);
752 * MPSAFE
755 sys_linux_mmap2(struct linux_mmap2_args *args)
757 int error;
759 #ifdef DEBUG
760 if (ldebug(mmap2))
761 kprintf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
762 (void *)args->addr, args->len, args->prot, args->flags,
763 args->fd, args->pgoff);
764 #endif
765 error = linux_mmap_common((void *)args->addr, args->len, args->prot,
766 args->flags, args->fd, args->pgoff * PAGE_SIZE,
767 &args->sysmsg_resultp);
768 #ifdef DEBUG
769 if (ldebug(mmap2))
770 kprintf("-> %p\n", args->sysmsg_resultp);
771 #endif
772 return (error);
776 * MPSAFE
779 sys_linux_pipe(struct linux_pipe_args *args)
781 int error;
782 int reg_edx;
783 struct pipe_args bsd_args;
785 #ifdef DEBUG
786 if (ldebug(pipe))
787 kprintf(ARGS(pipe, "*"));
788 #endif
790 reg_edx = args->sysmsg_fds[1];
791 error = sys_pipe(&bsd_args);
792 if (error) {
793 args->sysmsg_fds[1] = reg_edx;
794 return (error);
797 error = copyout(bsd_args.sysmsg_fds, args->pipefds, 2*sizeof(int));
798 if (error) {
799 args->sysmsg_fds[1] = reg_edx;
800 return (error);
803 args->sysmsg_fds[1] = reg_edx;
804 args->sysmsg_fds[0] = 0;
805 return (0);
809 * XXX: Preliminary
812 sys_linux_pipe2(struct linux_pipe2_args *args)
814 struct thread *td = curthread;
815 int error;
816 int reg_edx;
817 struct pipe_args bsd_args;
818 union fcntl_dat dat;
820 reg_edx = args->sysmsg_fds[1];
821 error = sys_pipe(&bsd_args);
822 if (error) {
823 args->sysmsg_fds[1] = reg_edx;
824 return (error);
827 // if (args->flags & LINUX_O_CLOEXEC) {
828 // }
830 if (args->flags & LINUX_O_NONBLOCK) {
831 dat.fc_flags = O_NONBLOCK;
832 kern_fcntl(bsd_args.sysmsg_fds[0], F_SETFL, &dat, td->td_ucred);
833 kern_fcntl(bsd_args.sysmsg_fds[1], F_SETFL, &dat, td->td_ucred);
836 error = copyout(bsd_args.sysmsg_fds, args->pipefds, 2*sizeof(int));
837 if (error) {
838 args->sysmsg_fds[1] = reg_edx;
839 return (error);
842 args->sysmsg_fds[1] = reg_edx;
843 args->sysmsg_fds[0] = 0;
844 return (0);
848 * MPSAFE
851 sys_linux_ioperm(struct linux_ioperm_args *args)
853 struct sysarch_args sa;
854 struct i386_ioperm_args *iia;
855 caddr_t sg;
856 int error;
858 sg = stackgap_init();
859 iia = stackgap_alloc(&sg, sizeof(struct i386_ioperm_args));
860 iia->start = args->start;
861 iia->length = args->length;
862 iia->enable = args->enable;
863 sa.sysmsg_resultp = NULL;
864 sa.op = I386_SET_IOPERM;
865 sa.parms = (char *)iia;
866 error = sys_sysarch(&sa);
867 args->sysmsg_resultp = sa.sysmsg_resultp;
868 return(error);
872 * MPSAFE
875 sys_linux_iopl(struct linux_iopl_args *args)
877 struct thread *td = curthread;
878 struct lwp *lp = td->td_lwp;
879 int error;
881 if (args->level < 0 || args->level > 3)
882 return (EINVAL);
883 if ((error = priv_check(td, PRIV_ROOT)) != 0)
884 return (error);
885 if (securelevel > 0)
886 return (EPERM);
887 lp->lwp_md.md_regs->tf_eflags =
888 (lp->lwp_md.md_regs->tf_eflags & ~PSL_IOPL) |
889 (args->level * (PSL_IOPL / 3));
890 return (0);
894 * MPSAFE
897 sys_linux_modify_ldt(struct linux_modify_ldt_args *uap)
899 int error;
900 caddr_t sg;
901 struct sysarch_args args;
902 struct i386_ldt_args *ldt;
903 struct l_descriptor ld;
904 union descriptor *desc;
905 int size, written;
907 sg = stackgap_init();
909 if (uap->ptr == NULL)
910 return (EINVAL);
912 switch (uap->func) {
913 case 0x00: /* read_ldt */
914 ldt = stackgap_alloc(&sg, sizeof(*ldt));
915 ldt->start = 0;
916 ldt->descs = uap->ptr;
917 ldt->num = uap->bytecount / sizeof(union descriptor);
918 args.op = I386_GET_LDT;
919 args.parms = (char*)ldt;
920 args.sysmsg_iresult = 0;
921 error = sys_sysarch(&args);
922 uap->sysmsg_iresult = args.sysmsg_iresult *
923 sizeof(union descriptor);
924 break;
925 case 0x02: /* read_default_ldt = 0 */
926 size = 5*sizeof(struct l_desc_struct);
927 if (size > uap->bytecount)
928 size = uap->bytecount;
929 for (written = error = 0; written < size && error == 0; written++)
930 error = subyte((char *)uap->ptr + written, 0);
931 uap->sysmsg_iresult = written;
932 break;
933 case 0x01: /* write_ldt */
934 case 0x11: /* write_ldt */
935 if (uap->bytecount != sizeof(ld))
936 return (EINVAL);
938 error = copyin(uap->ptr, &ld, sizeof(ld));
939 if (error)
940 return (error);
942 ldt = stackgap_alloc(&sg, sizeof(*ldt));
943 desc = stackgap_alloc(&sg, sizeof(*desc));
944 ldt->start = ld.entry_number;
945 ldt->descs = desc;
946 ldt->num = 1;
947 desc->sd.sd_lolimit = (ld.limit & 0x0000ffff);
948 desc->sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
949 desc->sd.sd_lobase = (ld.base_addr & 0x00ffffff);
950 desc->sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
951 desc->sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
952 (ld.contents << 2);
953 desc->sd.sd_dpl = 3;
954 desc->sd.sd_p = (ld.seg_not_present ^ 1);
955 desc->sd.sd_xx = 0;
956 desc->sd.sd_def32 = ld.seg_32bit;
957 desc->sd.sd_gran = ld.limit_in_pages;
958 args.op = I386_SET_LDT;
959 args.parms = (char*)ldt;
960 args.sysmsg_iresult = 0;
961 error = sys_sysarch(&args);
962 uap->sysmsg_iresult = args.sysmsg_iresult;
963 break;
964 default:
965 error = EINVAL;
966 break;
969 return (error);
973 * MPALMOSTSAFE
976 sys_linux_sigaction(struct linux_sigaction_args *args)
978 l_osigaction_t osa;
979 l_sigaction_t linux_act, linux_oact;
980 struct sigaction act, oact;
981 int error, sig;
983 #ifdef DEBUG
984 if (ldebug(sigaction))
985 kprintf(ARGS(sigaction, "%d, %p, %p"),
986 args->sig, (void *)args->nsa, (void *)args->osa);
987 #endif
989 if (args->nsa) {
990 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
991 if (error)
992 return (error);
993 linux_act.lsa_handler = osa.lsa_handler;
994 linux_act.lsa_flags = osa.lsa_flags;
995 linux_act.lsa_restorer = osa.lsa_restorer;
996 LINUX_SIGEMPTYSET(linux_act.lsa_mask);
997 linux_act.lsa_mask.__bits[0] = osa.lsa_mask;
998 linux_to_bsd_sigaction(&linux_act, &act);
1001 if (args->sig <= LINUX_SIGTBLSZ)
1002 sig = linux_to_bsd_signal[_SIG_IDX(args->sig)];
1003 else
1004 sig = args->sig;
1006 get_mplock();
1007 error = kern_sigaction(sig, args->nsa ? &act : NULL,
1008 args->osa ? &oact : NULL);
1009 rel_mplock();
1011 if (args->osa != NULL && !error) {
1012 bsd_to_linux_sigaction(&oact, &linux_oact);
1013 osa.lsa_handler = linux_oact.lsa_handler;
1014 osa.lsa_flags = linux_oact.lsa_flags;
1015 osa.lsa_restorer = linux_oact.lsa_restorer;
1016 osa.lsa_mask = linux_oact.lsa_mask.__bits[0];
1017 error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
1019 return (error);
1023 * Linux has two extra args, restart and oldmask. We dont use these,
1024 * but it seems that "restart" is actually a context pointer that
1025 * enables the signal to happen with a different register set.
1027 * MPALMOSTSAFE
1030 sys_linux_sigsuspend(struct linux_sigsuspend_args *args)
1032 l_sigset_t linux_mask;
1033 sigset_t mask;
1034 int error;
1036 #ifdef DEBUG
1037 if (ldebug(sigsuspend))
1038 kprintf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
1039 #endif
1041 LINUX_SIGEMPTYSET(mask);
1042 mask.__bits[0] = args->mask;
1043 linux_to_bsd_sigset(&linux_mask, &mask);
1045 get_mplock();
1046 error = kern_sigsuspend(&mask);
1047 rel_mplock();
1049 return(error);
1053 * MPALMOSTSAFE
1056 sys_linux_rt_sigsuspend(struct linux_rt_sigsuspend_args *uap)
1058 l_sigset_t linux_mask;
1059 sigset_t mask;
1060 int error;
1062 #ifdef DEBUG
1063 if (ldebug(rt_sigsuspend))
1064 kprintf(ARGS(rt_sigsuspend, "%p, %d"),
1065 (void *)uap->newset, uap->sigsetsize);
1066 #endif
1068 if (uap->sigsetsize != sizeof(l_sigset_t))
1069 return (EINVAL);
1071 error = copyin(uap->newset, &linux_mask, sizeof(l_sigset_t));
1072 if (error)
1073 return (error);
1075 linux_to_bsd_sigset(&linux_mask, &mask);
1077 get_mplock();
1078 error = kern_sigsuspend(&mask);
1079 rel_mplock();
1081 return(error);
1085 * MPALMOSTSAFE
1088 sys_linux_pause(struct linux_pause_args *args)
1090 struct thread *td = curthread;
1091 struct lwp *lp = td->td_lwp;
1092 sigset_t mask;
1093 int error;
1095 #ifdef DEBUG
1096 if (ldebug(pause))
1097 kprintf(ARGS(pause, ""));
1098 #endif
1100 mask = lp->lwp_sigmask;
1102 get_mplock();
1103 error = kern_sigsuspend(&mask);
1104 rel_mplock();
1106 return(error);
1110 * MPALMOSTSAFE
1113 sys_linux_sigaltstack(struct linux_sigaltstack_args *uap)
1115 stack_t ss, oss;
1116 l_stack_t linux_ss;
1117 int error;
1119 #ifdef DEBUG
1120 if (ldebug(sigaltstack))
1121 kprintf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
1122 #endif
1124 if (uap->uss) {
1125 error = copyin(uap->uss, &linux_ss, sizeof(l_stack_t));
1126 if (error)
1127 return (error);
1129 ss.ss_sp = linux_ss.ss_sp;
1130 ss.ss_size = linux_ss.ss_size;
1131 ss.ss_flags = linux_to_bsd_sigaltstack(linux_ss.ss_flags);
1134 get_mplock();
1135 error = kern_sigaltstack(uap->uss ? &ss : NULL,
1136 uap->uoss ? &oss : NULL);
1137 rel_mplock();
1139 if (error == 0 && uap->uoss) {
1140 linux_ss.ss_sp = oss.ss_sp;
1141 linux_ss.ss_size = oss.ss_size;
1142 linux_ss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
1143 error = copyout(&linux_ss, uap->uoss, sizeof(l_stack_t));
1146 return (error);
1150 sys_linux_set_thread_area(struct linux_set_thread_area_args *args)
1152 struct segment_descriptor *desc;
1153 struct l_user_desc info;
1154 int error;
1155 int idx;
1156 int a[2];
1157 int i;
1159 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1160 if (error)
1161 return (EFAULT);
1163 #ifdef DEBUG
1164 if (ldebug(set_thread_area))
1165 kprintf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"),
1166 info.entry_number,
1167 info.base_addr,
1168 info.limit,
1169 info.seg_32bit,
1170 info.contents,
1171 info.read_exec_only,
1172 info.limit_in_pages,
1173 info.seg_not_present,
1174 info.useable);
1175 #endif
1177 idx = info.entry_number;
1178 if (idx != -1 && (idx < 6 || idx > 8))
1179 return (EINVAL);
1181 if (idx == -1) {
1182 /* -1 means finding the first free TLS entry */
1183 for (i = 0; i < NGTLS; i++) {
1185 * try to determine if the TLS entry is empty by looking
1186 * at the lolimit entry.
1188 if (curthread->td_tls.tls[idx].sd_lolimit == 0) {
1189 idx = i;
1190 break;
1194 if (idx == -1) {
1196 * By now we should have an index. If not, it means
1197 * that no entry is free, so return ESRCH.
1199 return (ESRCH);
1201 } else {
1202 /* translate the index from Linux to ours */
1203 idx -= 6;
1204 KKASSERT(idx >= 0);
1207 /* Tell the caller about the allocated entry number */
1208 #if 0 /* was SMP */
1209 info.entry_number = GTLS_START + mycpu->gd_cpuid * NGDT + idx;
1210 #endif
1211 info.entry_number = GTLS_START + idx;
1214 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1215 if (error)
1216 return (error);
1218 if (LINUX_LDT_empty(&info)) {
1219 a[0] = 0;
1220 a[1] = 0;
1221 } else {
1222 a[0] = LINUX_LDT_entry_a(&info);
1223 a[1] = LINUX_LDT_entry_b(&info);
1227 * Update the TLS and the TLS entries in the GDT, but hold a critical
1228 * section as required by set_user_TLS().
1230 crit_enter();
1231 desc = &curthread->td_tls.tls[idx];
1232 memcpy(desc, &a, sizeof(a));
1233 set_user_TLS();
1234 crit_exit();
1236 return (0);
1240 sys_linux_get_thread_area(struct linux_get_thread_area_args *args)
1242 struct segment_descriptor *sd;
1243 struct l_desc_struct desc;
1244 struct l_user_desc info;
1245 int error;
1246 int idx;
1248 #ifdef DEBUG
1249 if (ldebug(get_thread_area))
1250 kprintf(ARGS(get_thread_area, "%p"), args->desc);
1251 #endif
1253 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1254 if (error)
1255 return (EFAULT);
1257 idx = info.entry_number;
1258 if ((idx < 6 || idx > 8) && (idx < GTLS_START)) {
1259 kprintf("sys_linux_get_thread_area, invalid idx requested: %d\n", idx);
1260 return (EINVAL);
1263 memset(&info, 0, sizeof(info));
1265 /* translate the index from Linux to ours */
1266 info.entry_number = idx;
1267 if (idx < GTLS_START) {
1268 idx -= 6;
1269 } else {
1270 #if 0 /* was SMP */
1271 idx -= (GTLS_START + mycpu->gd_cpuid * NGDT);
1272 #endif
1273 idx -= GTLS_START;
1276 KKASSERT(idx >= 0);
1278 sd = &curthread->td_tls.tls[idx];
1279 memcpy(&desc, sd, sizeof(desc));
1280 info.base_addr = LINUX_GET_BASE(&desc);
1281 info.limit = LINUX_GET_LIMIT(&desc);
1282 info.seg_32bit = LINUX_GET_32BIT(&desc);
1283 info.contents = LINUX_GET_CONTENTS(&desc);
1284 info.read_exec_only = !LINUX_GET_WRITABLE(&desc);
1285 info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc);
1286 info.seg_not_present = !LINUX_GET_PRESENT(&desc);
1287 info.useable = LINUX_GET_USEABLE(&desc);
1289 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1290 if (error)
1291 return (EFAULT);
1293 return (0);