linux emulation - Major update
[dragonfly.git] / sys / emulation / linux / i386 / linux_machdep.c
blob822f8f3217b537112066adb5803be148fc1588ac
1 /*-
2 * Copyright (c) 2000 Marcel Moolenaar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer
10 * in this position and unchanged.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * $FreeBSD: src/sys/i386/linux/linux_machdep.c,v 1.6.2.4 2001/11/05 19:08:23 marcel Exp $
29 * $DragonFly: src/sys/emulation/linux/i386/linux_machdep.c,v 1.23 2007/07/30 17:41:23 pavalos Exp $
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/imgact.h>
35 #include <sys/kern_syscall.h>
36 #include <sys/lock.h>
37 #include <sys/mman.h>
38 #include <sys/nlookup.h>
39 #include <sys/proc.h>
40 #include <sys/priv.h>
41 #include <sys/resource.h>
42 #include <sys/resourcevar.h>
43 #include <sys/ptrace.h>
44 #include <sys/sysproto.h>
45 #include <sys/thread2.h>
46 #include <sys/unistd.h>
47 #include <sys/wait.h>
49 #include <machine/frame.h>
50 #include <machine/psl.h>
51 #include <machine/segments.h>
52 #include <machine/sysarch.h>
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_map.h>
58 #include <sys/mplock2.h>
60 #include "linux.h"
61 #include "linux_proto.h"
62 #include "../linux_ipc.h"
63 #include "../linux_signal.h"
64 #include "../linux_util.h"
65 #include "../linux_emuldata.h"
67 struct l_descriptor {
68 l_uint entry_number;
69 l_ulong base_addr;
70 l_uint limit;
71 l_uint seg_32bit:1;
72 l_uint contents:2;
73 l_uint read_exec_only:1;
74 l_uint limit_in_pages:1;
75 l_uint seg_not_present:1;
76 l_uint useable:1;
79 struct l_old_select_argv {
80 l_int nfds;
81 l_fd_set *readfds;
82 l_fd_set *writefds;
83 l_fd_set *exceptfds;
84 struct l_timeval *timeout;
87 int
88 linux_to_bsd_sigaltstack(int lsa)
90 int bsa = 0;
92 if (lsa & LINUX_SS_DISABLE)
93 bsa |= SS_DISABLE;
94 if (lsa & LINUX_SS_ONSTACK)
95 bsa |= SS_ONSTACK;
96 return (bsa);
99 int
100 bsd_to_linux_sigaltstack(int bsa)
102 int lsa = 0;
104 if (bsa & SS_DISABLE)
105 lsa |= LINUX_SS_DISABLE;
106 if (bsa & SS_ONSTACK)
107 lsa |= LINUX_SS_ONSTACK;
108 return (lsa);
112 * MPALMOSTSAFE
115 sys_linux_execve(struct linux_execve_args *args)
117 struct nlookupdata nd;
118 struct image_args exec_args;
119 char *path;
120 int error;
122 error = linux_copyin_path(args->path, &path, LINUX_PATH_EXISTS);
123 if (error)
124 return (error);
125 #ifdef DEBUG
126 if (ldebug(execve))
127 kprintf(ARGS(execve, "%s"), path);
128 #endif
129 get_mplock();
130 error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW);
131 bzero(&exec_args, sizeof(exec_args));
132 if (error == 0) {
133 error = exec_copyin_args(&exec_args, path, PATH_SYSSPACE,
134 args->argp, args->envp);
136 if (error == 0)
137 error = kern_execve(&nd, &exec_args);
138 nlookup_done(&nd);
141 * The syscall result is returned in registers to the new program.
142 * Linux will register %edx as an atexit function and we must be
143 * sure to set it to 0. XXX
145 if (error == 0) {
146 args->sysmsg_result64 = 0;
147 if (curproc->p_sysent == &elf_linux_sysvec)
148 error = emuldata_init(curproc, NULL, 0);
151 exec_free_args(&exec_args);
152 linux_free_path(&path);
154 if (error < 0) {
155 /* We hit a lethal error condition. Let's die now. */
156 exit1(W_EXITCODE(0, SIGABRT));
157 /* NOTREACHED */
159 rel_mplock();
161 return(error);
164 struct l_ipc_kludge {
165 struct l_msgbuf *msgp;
166 l_long msgtyp;
170 * MPALMOSTSAFE
173 sys_linux_ipc(struct linux_ipc_args *args)
175 int error = 0;
177 get_mplock();
179 switch (args->what & 0xFFFF) {
180 case LINUX_SEMOP: {
181 struct linux_semop_args a;
183 a.semid = args->arg1;
184 a.tsops = args->ptr;
185 a.nsops = args->arg2;
186 a.sysmsg_lresult = 0;
187 error = linux_semop(&a);
188 args->sysmsg_lresult = a.sysmsg_lresult;
189 break;
191 case LINUX_SEMGET: {
192 struct linux_semget_args a;
194 a.key = args->arg1;
195 a.nsems = args->arg2;
196 a.semflg = args->arg3;
197 a.sysmsg_lresult = 0;
198 error = linux_semget(&a);
199 args->sysmsg_lresult = a.sysmsg_lresult;
200 break;
202 case LINUX_SEMCTL: {
203 struct linux_semctl_args a;
204 int error;
206 a.semid = args->arg1;
207 a.semnum = args->arg2;
208 a.cmd = args->arg3;
209 a.sysmsg_lresult = 0;
210 error = copyin((caddr_t)args->ptr, &a.arg, sizeof(a.arg));
211 if (error)
212 break;
213 error = linux_semctl(&a);
214 args->sysmsg_lresult = a.sysmsg_lresult;
215 break;
217 case LINUX_MSGSND: {
218 struct linux_msgsnd_args a;
220 a.msqid = args->arg1;
221 a.msgp = args->ptr;
222 a.msgsz = args->arg2;
223 a.msgflg = args->arg3;
224 a.sysmsg_lresult = 0;
225 error = linux_msgsnd(&a);
226 args->sysmsg_lresult = a.sysmsg_lresult;
227 break;
229 case LINUX_MSGRCV: {
230 struct linux_msgrcv_args a;
232 a.msqid = args->arg1;
233 a.msgsz = args->arg2;
234 if (a.msgsz < 0) {
235 error = EINVAL;
236 break;
238 a.msgflg = args->arg3;
239 a.sysmsg_lresult = 0;
240 if ((args->what >> 16) == 0) {
241 struct l_ipc_kludge tmp;
242 int error;
244 if (args->ptr == NULL) {
245 error = EINVAL;
246 break;
248 error = copyin((caddr_t)args->ptr, &tmp, sizeof(tmp));
249 if (error)
250 break;
251 a.msgp = tmp.msgp;
252 a.msgtyp = tmp.msgtyp;
253 } else {
254 a.msgp = args->ptr;
255 a.msgtyp = args->arg5;
257 error = linux_msgrcv(&a);
258 args->sysmsg_lresult = a.sysmsg_lresult;
259 break;
261 case LINUX_MSGGET: {
262 struct linux_msgget_args a;
264 a.key = args->arg1;
265 a.msgflg = args->arg2;
266 a.sysmsg_lresult = 0;
267 error = linux_msgget(&a);
268 args->sysmsg_lresult = a.sysmsg_lresult;
269 break;
271 case LINUX_MSGCTL: {
272 struct linux_msgctl_args a;
274 a.msqid = args->arg1;
275 a.cmd = args->arg2;
276 a.buf = args->ptr;
277 a.sysmsg_lresult = 0;
278 error = linux_msgctl(&a);
279 args->sysmsg_lresult = a.sysmsg_lresult;
280 break;
282 case LINUX_SHMAT: {
283 struct linux_shmat_args a;
285 a.shmid = args->arg1;
286 a.shmaddr = args->ptr;
287 a.shmflg = args->arg2;
288 a.raddr = (l_ulong *)args->arg3;
289 a.sysmsg_lresult = 0;
290 error = linux_shmat(&a);
291 args->sysmsg_lresult = a.sysmsg_lresult;
292 break;
294 case LINUX_SHMDT: {
295 struct linux_shmdt_args a;
297 a.shmaddr = args->ptr;
298 a.sysmsg_lresult = 0;
299 error = linux_shmdt(&a);
300 args->sysmsg_lresult = a.sysmsg_lresult;
301 break;
303 case LINUX_SHMGET: {
304 struct linux_shmget_args a;
306 a.key = args->arg1;
307 a.size = args->arg2;
308 a.shmflg = args->arg3;
309 a.sysmsg_lresult = 0;
310 error = linux_shmget(&a);
311 args->sysmsg_lresult = a.sysmsg_lresult;
312 break;
314 case LINUX_SHMCTL: {
315 struct linux_shmctl_args a;
317 a.shmid = args->arg1;
318 a.cmd = args->arg2;
319 a.buf = args->ptr;
320 a.sysmsg_lresult = 0;
321 error = linux_shmctl(&a);
322 args->sysmsg_lresult = a.sysmsg_lresult;
323 break;
325 default:
326 error = EINVAL;
327 break;
329 rel_mplock();
330 return(error);
334 * MPSAFE
337 sys_linux_old_select(struct linux_old_select_args *args)
339 struct l_old_select_argv linux_args;
340 struct linux_select_args newsel;
341 int error;
343 #ifdef DEBUG
344 if (ldebug(old_select))
345 kprintf(ARGS(old_select, "%p"), args->ptr);
346 #endif
348 error = copyin((caddr_t)args->ptr, &linux_args, sizeof(linux_args));
349 if (error)
350 return (error);
352 newsel.sysmsg_iresult = 0;
353 newsel.nfds = linux_args.nfds;
354 newsel.readfds = linux_args.readfds;
355 newsel.writefds = linux_args.writefds;
356 newsel.exceptfds = linux_args.exceptfds;
357 newsel.timeout = linux_args.timeout;
358 error = sys_linux_select(&newsel);
359 args->sysmsg_iresult = newsel.sysmsg_iresult;
360 return(error);
364 * MPSAFE
367 sys_linux_fork(struct linux_fork_args *args)
369 struct lwp *lp = curthread->td_lwp;
370 struct proc *p2;
371 int error;
373 get_mplock();
374 error = fork1(lp, RFFDG | RFPROC | RFPGLOCK, &p2);
375 if (error == 0) {
376 emuldata_init(curproc, p2, 0);
378 start_forked_proc(lp, p2);
379 args->sysmsg_fds[0] = p2->p_pid;
380 args->sysmsg_fds[1] = 0;
382 rel_mplock();
384 /* Are we the child? */
385 if (args->sysmsg_iresult == 1)
386 args->sysmsg_iresult = 0;
388 return (error);
392 * MPALMOSTSAFE
395 sys_linux_exit_group(struct linux_exit_group_args *args)
397 struct linux_emuldata *em, *e;
398 int sig;
400 sig = args->rval;
402 get_mplock();
404 EMUL_LOCK();
406 em = emuldata_get(curproc);
408 if (em->s->refs == 1) {
409 exit1(W_EXITCODE(0, sig));
410 /* notreached */
412 EMUL_UNLOCK();
414 rel_mplock();
415 return (0);
417 KKASSERT(em->proc == curproc);
418 em->flags |= EMUL_DIDKILL;
419 em->s->flags |= LINUX_LES_INEXITGROUP;
420 em->s->xstat = W_EXITCODE(0, sig);
422 LIST_REMOVE(em, threads);
423 LIST_INSERT_HEAD(&em->s->threads, em, threads);
425 while ((e = LIST_NEXT(em, threads)) != NULL) {
426 LIST_REMOVE(em, threads);
427 LIST_INSERT_AFTER(e, em, threads);
428 if ((e->flags & EMUL_DIDKILL) == 0) {
429 e->flags |= EMUL_DIDKILL;
430 KKASSERT(pfind(e->proc->p_pid) == e->proc);
431 ksignal(e->proc, SIGKILL);
436 EMUL_UNLOCK();
438 exit1(W_EXITCODE(0, sig));
439 rel_mplock();
440 /* notreached */
442 return (0);
446 * MPSAFE
449 sys_linux_vfork(struct linux_vfork_args *args)
451 struct lwp *lp = curthread->td_lwp;
452 struct proc *p2;
453 int error;
455 get_mplock();
456 error = fork1(lp, RFFDG | RFPROC | RFPPWAIT | RFMEM | RFPGLOCK, &p2);
457 if (error == 0) {
458 emuldata_init(curproc, p2, 0);
460 start_forked_proc(lp, p2);
461 args->sysmsg_fds[0] = p2->p_pid;
462 args->sysmsg_fds[1] = 0;
464 rel_mplock();
466 if (args->sysmsg_iresult == 1)
467 args->sysmsg_iresult = 0;
469 return (error);
473 * MPALMOSTSAFE
476 sys_linux_clone(struct linux_clone_args *args)
478 struct segment_descriptor *desc;
479 struct l_user_desc info;
480 int idx;
481 int a[2];
483 struct lwp *lp = curthread->td_lwp;
484 int error, ff = RFPROC;
485 struct proc *p2 = NULL;
486 int exit_signal;
487 vm_offset_t start;
489 #ifdef DEBUG
490 if (ldebug(clone)) {
491 kprintf(ARGS(clone, "flags %x, stack %x"),
492 (unsigned int)args->flags, (unsigned int)args->stack);
493 if (args->flags & CLONE_PID)
494 kprintf(LMSG("CLONE_PID not yet supported"));
496 #endif
497 exit_signal = args->flags & 0x000000ff;
498 if (exit_signal >= LINUX_NSIG)
499 return (EINVAL);
500 if (exit_signal <= LINUX_SIGTBLSZ)
501 exit_signal = linux_to_bsd_signal[_SIG_IDX(exit_signal)];
503 if (args->flags & LINUX_CLONE_VM)
504 ff |= RFMEM;
505 if (args->flags & LINUX_CLONE_SIGHAND)
506 ff |= RFSIGSHARE;
507 if (!(args->flags & (LINUX_CLONE_FILES | LINUX_CLONE_FS)))
508 ff |= RFFDG;
509 if ((args->flags & 0xffffff00) == LINUX_THREADING_FLAGS)
510 ff |= RFTHREAD;
511 if (args->flags & LINUX_CLONE_VFORK)
512 ff |= RFPPWAIT;
513 if (args->flags & LINUX_CLONE_PARENT_SETTID) {
514 if (args->parent_tidptr == NULL)
515 return (EINVAL);
518 error = 0;
519 start = 0;
521 get_mplock();
522 error = fork1(lp, ff | RFPGLOCK, &p2);
523 if (error) {
524 rel_mplock();
525 return error;
528 args->sysmsg_fds[0] = p2 ? p2->p_pid : 0;
529 args->sysmsg_fds[1] = 0;
531 if (args->flags & (LINUX_CLONE_PARENT | LINUX_CLONE_THREAD))
532 proc_reparent(p2, curproc->p_pptr /* XXX */);
534 emuldata_init(curproc, p2, args->flags);
535 linux_proc_fork(p2, curproc, args->child_tidptr);
537 * XXX: this can't happen, p2 is never NULL, or else we'd have
538 * other problems, too (see p2->p_sigparent == ...,
539 * linux_proc_fork and emuldata_init.
541 if (p2 == NULL) {
542 error = ESRCH;
543 } else {
544 if (args->flags & LINUX_CLONE_PARENT_SETTID) {
545 error = copyout(&p2->p_pid, args->parent_tidptr, sizeof(p2->p_pid));
549 p2->p_sigparent = exit_signal;
550 if (args->stack) {
551 ONLY_LWP_IN_PROC(p2)->lwp_md.md_regs->tf_esp =
552 (unsigned long)args->stack;
555 if (args->flags & LINUX_CLONE_SETTLS) {
556 error = copyin((void *)curthread->td_lwp->lwp_md.md_regs->tf_esi, &info, sizeof(struct l_user_desc));
557 if (error) {
558 kprintf("copyin of tf_esi to info failed\n");
559 } else {
560 idx = info.entry_number;
562 * We understand both our own entries such as the ones
563 * we provide on linux_set_thread_area, as well as the
564 * linux-type entries 6-8.
566 if ((idx < 6 || idx > 8) && (idx < GTLS_START)) {
567 kprintf("LINUX_CLONE_SETTLS, invalid idx requested: %d\n", idx);
568 goto out;
570 if (idx < GTLS_START) {
571 idx -= 6;
572 } else {
573 #ifdef SMP
574 idx -= (GTLS_START + mycpu->gd_cpuid * NGDT);
575 #else
576 idx -= GTLS_START;
577 #endif
579 KKASSERT(idx >= 0);
581 a[0] = LINUX_LDT_entry_a(&info);
582 a[1] = LINUX_LDT_entry_b(&info);
583 if (p2) {
584 desc = &FIRST_LWP_IN_PROC(p2)->lwp_thread->td_tls.tls[idx];
585 memcpy(desc, &a, sizeof(a));
586 } else {
587 kprintf("linux_clone... we don't have a p2\n");
591 out:
592 if (p2)
593 start_forked_proc(lp, p2);
595 rel_mplock();
596 #ifdef DEBUG
597 if (ldebug(clone))
598 kprintf(LMSG("clone: successful rfork to %ld"),
599 (long)p2->p_pid);
600 #endif
602 return (error);
605 /* XXX move */
606 struct l_mmap_argv {
607 l_caddr_t addr;
608 l_int len;
609 l_int prot;
610 l_int flags;
611 l_int fd;
612 l_int pos;
615 #define STACK_SIZE (2 * 1024 * 1024)
616 #define GUARD_SIZE (4 * PAGE_SIZE)
619 * MPALMOSTSAFE
621 static int
622 linux_mmap_common(caddr_t linux_addr, size_t linux_len, int linux_prot,
623 int linux_flags, int linux_fd, off_t pos, void **res)
625 struct thread *td = curthread;
626 struct proc *p = td->td_proc;
627 caddr_t addr;
628 void *new;
629 int error, flags, len, prot, fd;
631 flags = 0;
632 if (linux_flags & LINUX_MAP_SHARED)
633 flags |= MAP_SHARED;
634 if (linux_flags & LINUX_MAP_PRIVATE)
635 flags |= MAP_PRIVATE;
636 if (linux_flags & LINUX_MAP_FIXED)
637 flags |= MAP_FIXED;
638 if (linux_flags & LINUX_MAP_ANON) {
639 flags |= MAP_ANON;
640 } else {
641 flags |= MAP_NOSYNC;
644 get_mplock();
646 if (linux_flags & LINUX_MAP_GROWSDOWN) {
647 flags |= MAP_STACK;
648 /* The linux MAP_GROWSDOWN option does not limit auto
649 * growth of the region. Linux mmap with this option
650 * takes as addr the inital BOS, and as len, the initial
651 * region size. It can then grow down from addr without
652 * limit. However, linux threads has an implicit internal
653 * limit to stack size of STACK_SIZE. Its just not
654 * enforced explicitly in linux. But, here we impose
655 * a limit of (STACK_SIZE - GUARD_SIZE) on the stack
656 * region, since we can do this with our mmap.
658 * Our mmap with MAP_STACK takes addr as the maximum
659 * downsize limit on BOS, and as len the max size of
660 * the region. It them maps the top SGROWSIZ bytes,
661 * and autgrows the region down, up to the limit
662 * in addr.
664 * If we don't use the MAP_STACK option, the effect
665 * of this code is to allocate a stack region of a
666 * fixed size of (STACK_SIZE - GUARD_SIZE).
669 /* This gives us TOS */
670 addr = linux_addr + linux_len;
672 if (addr > p->p_vmspace->vm_maxsaddr) {
673 /* Some linux apps will attempt to mmap
674 * thread stacks near the top of their
675 * address space. If their TOS is greater
676 * than vm_maxsaddr, vm_map_growstack()
677 * will confuse the thread stack with the
678 * process stack and deliver a SEGV if they
679 * attempt to grow the thread stack past their
680 * current stacksize rlimit. To avoid this,
681 * adjust vm_maxsaddr upwards to reflect
682 * the current stacksize rlimit rather
683 * than the maximum possible stacksize.
684 * It would be better to adjust the
685 * mmap'ed region, but some apps do not check
686 * mmap's return value.
688 p->p_vmspace->vm_maxsaddr = (char *)USRSTACK -
689 p->p_rlimit[RLIMIT_STACK].rlim_cur;
692 /* This gives us our maximum stack size */
693 if (linux_len > STACK_SIZE - GUARD_SIZE) {
694 len = linux_len;
695 } else {
696 len = STACK_SIZE - GUARD_SIZE;
698 /* This gives us a new BOS. If we're using VM_STACK, then
699 * mmap will just map the top SGROWSIZ bytes, and let
700 * the stack grow down to the limit at BOS. If we're
701 * not using VM_STACK we map the full stack, since we
702 * don't have a way to autogrow it.
704 addr -= len;
705 } else {
706 addr = linux_addr;
707 len = linux_len;
710 prot = linux_prot | PROT_READ;
711 if (linux_flags & LINUX_MAP_ANON) {
712 fd = -1;
713 } else {
714 fd = linux_fd;
717 #ifdef DEBUG
718 if (ldebug(mmap) || ldebug(mmap2))
719 kprintf("-> (%p, %d, %d, 0x%08x, %d, %lld)\n",
720 addr, len, prot, flags, fd, pos);
721 #endif
722 error = kern_mmap(curproc->p_vmspace, addr, len,
723 prot, flags, fd, pos, &new);
724 rel_mplock();
726 if (error == 0)
727 *res = new;
728 return (error);
732 * MPSAFE
735 sys_linux_mmap(struct linux_mmap_args *args)
737 struct l_mmap_argv linux_args;
738 int error;
740 error = copyin((caddr_t)args->ptr, &linux_args, sizeof(linux_args));
741 if (error)
742 return (error);
744 #ifdef DEBUG
745 if (ldebug(mmap))
746 kprintf(ARGS(mmap, "%p, %d, %d, 0x%08x, %d, %d"),
747 (void *)linux_args.addr, linux_args.len, linux_args.prot,
748 linux_args.flags, linux_args.fd, linux_args.pos);
749 #endif
750 error = linux_mmap_common(linux_args.addr, linux_args.len,
751 linux_args.prot, linux_args.flags, linux_args.fd,
752 linux_args.pos, &args->sysmsg_resultp);
753 #ifdef DEBUG
754 if (ldebug(mmap))
755 kprintf("-> %p\n", args->sysmsg_resultp);
756 #endif
757 return(error);
761 * MPSAFE
764 sys_linux_mmap2(struct linux_mmap2_args *args)
766 int error;
768 #ifdef DEBUG
769 if (ldebug(mmap2))
770 kprintf(ARGS(mmap2, "%p, %d, %d, 0x%08x, %d, %d"),
771 (void *)args->addr, args->len, args->prot, args->flags,
772 args->fd, args->pgoff);
773 #endif
774 error = linux_mmap_common((void *)args->addr, args->len, args->prot,
775 args->flags, args->fd, args->pgoff * PAGE_SIZE,
776 &args->sysmsg_resultp);
777 #ifdef DEBUG
778 if (ldebug(mmap2))
779 kprintf("-> %p\n", args->sysmsg_resultp);
780 #endif
781 return (error);
785 * MPSAFE
788 sys_linux_pipe(struct linux_pipe_args *args)
790 int error;
791 int reg_edx;
792 struct pipe_args bsd_args;
794 #ifdef DEBUG
795 if (ldebug(pipe))
796 kprintf(ARGS(pipe, "*"));
797 #endif
799 reg_edx = args->sysmsg_fds[1];
800 error = sys_pipe(&bsd_args);
801 if (error) {
802 args->sysmsg_fds[1] = reg_edx;
803 return (error);
806 error = copyout(bsd_args.sysmsg_fds, args->pipefds, 2*sizeof(int));
807 if (error) {
808 args->sysmsg_fds[1] = reg_edx;
809 return (error);
812 args->sysmsg_fds[1] = reg_edx;
813 args->sysmsg_fds[0] = 0;
814 return (0);
818 * MPSAFE
821 sys_linux_ioperm(struct linux_ioperm_args *args)
823 struct sysarch_args sa;
824 struct i386_ioperm_args *iia;
825 caddr_t sg;
826 int error;
828 sg = stackgap_init();
829 iia = stackgap_alloc(&sg, sizeof(struct i386_ioperm_args));
830 iia->start = args->start;
831 iia->length = args->length;
832 iia->enable = args->enable;
833 sa.sysmsg_resultp = NULL;
834 sa.op = I386_SET_IOPERM;
835 sa.parms = (char *)iia;
836 error = sys_sysarch(&sa);
837 args->sysmsg_resultp = sa.sysmsg_resultp;
838 return(error);
842 * MPSAFE
845 sys_linux_iopl(struct linux_iopl_args *args)
847 struct thread *td = curthread;
848 struct lwp *lp = td->td_lwp;
849 int error;
851 if (args->level < 0 || args->level > 3)
852 return (EINVAL);
853 if ((error = priv_check(td, PRIV_ROOT)) != 0)
854 return (error);
855 if (securelevel > 0)
856 return (EPERM);
857 lp->lwp_md.md_regs->tf_eflags =
858 (lp->lwp_md.md_regs->tf_eflags & ~PSL_IOPL) |
859 (args->level * (PSL_IOPL / 3));
860 return (0);
864 * MPSAFE
867 sys_linux_modify_ldt(struct linux_modify_ldt_args *uap)
869 int error;
870 caddr_t sg;
871 struct sysarch_args args;
872 struct i386_ldt_args *ldt;
873 struct l_descriptor ld;
874 union descriptor *desc;
875 int size, written;
877 sg = stackgap_init();
879 if (uap->ptr == NULL)
880 return (EINVAL);
882 switch (uap->func) {
883 case 0x00: /* read_ldt */
884 ldt = stackgap_alloc(&sg, sizeof(*ldt));
885 ldt->start = 0;
886 ldt->descs = uap->ptr;
887 ldt->num = uap->bytecount / sizeof(union descriptor);
888 args.op = I386_GET_LDT;
889 args.parms = (char*)ldt;
890 args.sysmsg_iresult = 0;
891 error = sys_sysarch(&args);
892 uap->sysmsg_iresult = args.sysmsg_iresult *
893 sizeof(union descriptor);
894 break;
895 case 0x02: /* read_default_ldt = 0 */
896 size = 5*sizeof(struct l_desc_struct);
897 if (size > uap->bytecount)
898 size = uap->bytecount;
899 for (written = error = 0; written < size && error == 0; written++)
900 error = subyte((char *)uap->ptr + written, 0);
901 uap->sysmsg_iresult = written;
902 break;
903 case 0x01: /* write_ldt */
904 case 0x11: /* write_ldt */
905 if (uap->bytecount != sizeof(ld))
906 return (EINVAL);
908 error = copyin(uap->ptr, &ld, sizeof(ld));
909 if (error)
910 return (error);
912 ldt = stackgap_alloc(&sg, sizeof(*ldt));
913 desc = stackgap_alloc(&sg, sizeof(*desc));
914 ldt->start = ld.entry_number;
915 ldt->descs = desc;
916 ldt->num = 1;
917 desc->sd.sd_lolimit = (ld.limit & 0x0000ffff);
918 desc->sd.sd_hilimit = (ld.limit & 0x000f0000) >> 16;
919 desc->sd.sd_lobase = (ld.base_addr & 0x00ffffff);
920 desc->sd.sd_hibase = (ld.base_addr & 0xff000000) >> 24;
921 desc->sd.sd_type = SDT_MEMRO | ((ld.read_exec_only ^ 1) << 1) |
922 (ld.contents << 2);
923 desc->sd.sd_dpl = 3;
924 desc->sd.sd_p = (ld.seg_not_present ^ 1);
925 desc->sd.sd_xx = 0;
926 desc->sd.sd_def32 = ld.seg_32bit;
927 desc->sd.sd_gran = ld.limit_in_pages;
928 args.op = I386_SET_LDT;
929 args.parms = (char*)ldt;
930 args.sysmsg_iresult = 0;
931 error = sys_sysarch(&args);
932 uap->sysmsg_iresult = args.sysmsg_iresult;
933 break;
934 default:
935 error = EINVAL;
936 break;
939 return (error);
943 * MPALMOSTSAFE
946 sys_linux_sigaction(struct linux_sigaction_args *args)
948 l_osigaction_t osa;
949 l_sigaction_t linux_act, linux_oact;
950 struct sigaction act, oact;
951 int error;
953 #ifdef DEBUG
954 if (ldebug(sigaction))
955 kprintf(ARGS(sigaction, "%d, %p, %p"),
956 args->sig, (void *)args->nsa, (void *)args->osa);
957 #endif
959 if (args->nsa) {
960 error = copyin(args->nsa, &osa, sizeof(l_osigaction_t));
961 if (error)
962 return (error);
963 linux_act.lsa_handler = osa.lsa_handler;
964 linux_act.lsa_flags = osa.lsa_flags;
965 linux_act.lsa_restorer = osa.lsa_restorer;
966 LINUX_SIGEMPTYSET(linux_act.lsa_mask);
967 linux_act.lsa_mask.__bits[0] = osa.lsa_mask;
968 linux_to_bsd_sigaction(&linux_act, &act);
971 get_mplock();
972 error = kern_sigaction(args->sig, args->nsa ? &act : NULL,
973 args->osa ? &oact : NULL);
974 rel_mplock();
976 if (args->osa != NULL && !error) {
977 bsd_to_linux_sigaction(&oact, &linux_oact);
978 osa.lsa_handler = linux_oact.lsa_handler;
979 osa.lsa_flags = linux_oact.lsa_flags;
980 osa.lsa_restorer = linux_oact.lsa_restorer;
981 osa.lsa_mask = linux_oact.lsa_mask.__bits[0];
982 error = copyout(&osa, args->osa, sizeof(l_osigaction_t));
984 return (error);
988 * Linux has two extra args, restart and oldmask. We dont use these,
989 * but it seems that "restart" is actually a context pointer that
990 * enables the signal to happen with a different register set.
992 * MPALMOSTSAFE
995 sys_linux_sigsuspend(struct linux_sigsuspend_args *args)
997 l_sigset_t linux_mask;
998 sigset_t mask;
999 int error;
1001 #ifdef DEBUG
1002 if (ldebug(sigsuspend))
1003 kprintf(ARGS(sigsuspend, "%08lx"), (unsigned long)args->mask);
1004 #endif
1006 LINUX_SIGEMPTYSET(mask);
1007 mask.__bits[0] = args->mask;
1008 linux_to_bsd_sigset(&linux_mask, &mask);
1010 get_mplock();
1011 error = kern_sigsuspend(&mask);
1012 rel_mplock();
1014 return(error);
1018 * MPALMOSTSAFE
1021 sys_linux_rt_sigsuspend(struct linux_rt_sigsuspend_args *uap)
1023 l_sigset_t linux_mask;
1024 sigset_t mask;
1025 int error;
1027 #ifdef DEBUG
1028 if (ldebug(rt_sigsuspend))
1029 kprintf(ARGS(rt_sigsuspend, "%p, %d"),
1030 (void *)uap->newset, uap->sigsetsize);
1031 #endif
1033 if (uap->sigsetsize != sizeof(l_sigset_t))
1034 return (EINVAL);
1036 error = copyin(uap->newset, &linux_mask, sizeof(l_sigset_t));
1037 if (error)
1038 return (error);
1040 linux_to_bsd_sigset(&linux_mask, &mask);
1042 get_mplock();
1043 error = kern_sigsuspend(&mask);
1044 rel_mplock();
1046 return(error);
1050 * MPALMOSTSAFE
1053 sys_linux_pause(struct linux_pause_args *args)
1055 struct thread *td = curthread;
1056 struct lwp *lp = td->td_lwp;
1057 sigset_t mask;
1058 int error;
1060 #ifdef DEBUG
1061 if (ldebug(pause))
1062 kprintf(ARGS(pause, ""));
1063 #endif
1065 mask = lp->lwp_sigmask;
1067 get_mplock();
1068 error = kern_sigsuspend(&mask);
1069 rel_mplock();
1071 return(error);
1075 * MPALMOSTSAFE
1078 sys_linux_sigaltstack(struct linux_sigaltstack_args *uap)
1080 stack_t ss, oss;
1081 l_stack_t linux_ss;
1082 int error;
1084 #ifdef DEBUG
1085 if (ldebug(sigaltstack))
1086 kprintf(ARGS(sigaltstack, "%p, %p"), uap->uss, uap->uoss);
1087 #endif
1089 if (uap->uss) {
1090 error = copyin(uap->uss, &linux_ss, sizeof(l_stack_t));
1091 if (error)
1092 return (error);
1094 ss.ss_sp = linux_ss.ss_sp;
1095 ss.ss_size = linux_ss.ss_size;
1096 ss.ss_flags = linux_to_bsd_sigaltstack(linux_ss.ss_flags);
1099 get_mplock();
1100 error = kern_sigaltstack(uap->uss ? &ss : NULL,
1101 uap->uoss ? &oss : NULL);
1102 rel_mplock();
1104 if (error == 0 && uap->uoss) {
1105 linux_ss.ss_sp = oss.ss_sp;
1106 linux_ss.ss_size = oss.ss_size;
1107 linux_ss.ss_flags = bsd_to_linux_sigaltstack(oss.ss_flags);
1108 error = copyout(&linux_ss, uap->uoss, sizeof(l_stack_t));
1111 return (error);
1115 sys_linux_set_thread_area(struct linux_set_thread_area_args *args)
1117 struct segment_descriptor *desc;
1118 struct l_user_desc info;
1119 int error;
1120 int idx;
1121 int a[2];
1122 int i;
1124 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1125 if (error)
1126 return (EFAULT);
1128 #ifdef DEBUG
1129 if (ldebug(set_thread_area))
1130 kprintf(ARGS(set_thread_area, "%i, %x, %x, %i, %i, %i, %i, %i, %i\n"),
1131 info.entry_number,
1132 info.base_addr,
1133 info.limit,
1134 info.seg_32bit,
1135 info.contents,
1136 info.read_exec_only,
1137 info.limit_in_pages,
1138 info.seg_not_present,
1139 info.useable);
1140 #endif
1142 idx = info.entry_number;
1143 if (idx != -1 && (idx < 6 || idx > 8))
1144 return (EINVAL);
1146 if (idx == -1) {
1147 /* -1 means finding the first free TLS entry */
1148 for (i = 0; i < NGTLS; i++) {
1150 * try to determine if the TLS entry is empty by looking
1151 * at the lolimit entry.
1153 if (curthread->td_tls.tls[idx].sd_lolimit == 0) {
1154 idx = i;
1155 break;
1159 if (idx == -1) {
1161 * By now we should have an index. If not, it means
1162 * that no entry is free, so return ESRCH.
1164 return (ESRCH);
1166 } else {
1167 /* translate the index from Linux to ours */
1168 idx -= 6;
1169 KKASSERT(idx >= 0);
1172 /* Tell the caller about the allocated entry number */
1173 #if 0
1174 info.entry_number = idx;
1175 #endif
1176 #ifdef SMP
1177 info.entry_number = GTLS_START + mycpu->gd_cpuid * NGDT + idx;
1178 #else
1179 info.entry_number = GTLS_START + idx;
1180 #endif
1182 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1183 if (error)
1184 return (error);
1186 if (LINUX_LDT_empty(&info)) {
1187 a[0] = 0;
1188 a[1] = 0;
1189 } else {
1190 a[0] = LINUX_LDT_entry_a(&info);
1191 a[1] = LINUX_LDT_entry_b(&info);
1195 * Update the TLS and the TLS entries in the GDT, but hold a critical
1196 * section as required by set_user_TLS().
1198 crit_enter();
1199 desc = &curthread->td_tls.tls[idx];
1200 memcpy(desc, &a, sizeof(a));
1201 set_user_TLS();
1202 crit_exit();
1204 return (0);
1208 sys_linux_get_thread_area(struct linux_get_thread_area_args *args)
1210 struct segment_descriptor *sd;
1211 struct l_desc_struct desc;
1212 struct l_user_desc info;
1213 int error;
1214 int idx;
1216 #ifdef DEBUG
1217 if (ldebug(get_thread_area))
1218 kprintf(ARGS(get_thread_area, "%p"), args->desc);
1219 #endif
1221 error = copyin(args->desc, &info, sizeof(struct l_user_desc));
1222 if (error)
1223 return (EFAULT);
1225 idx = info.entry_number;
1226 if ((idx < 6 || idx > 8) && (idx < GTLS_START)) {
1227 kprintf("sys_linux_get_thread_area, invalid idx requested: %d\n", idx);
1228 return (EINVAL);
1231 memset(&info, 0, sizeof(info));
1233 /* translate the index from Linux to ours */
1234 info.entry_number = idx;
1235 if (idx < GTLS_START) {
1236 idx -= 6;
1237 } else {
1238 #ifdef SMP
1239 idx -= (GTLS_START + mycpu->gd_cpuid * NGDT);
1240 #else
1241 idx -= GTLS_START;
1242 #endif
1244 KKASSERT(idx >= 0);
1246 sd = &curthread->td_tls.tls[idx];
1247 memcpy(&desc, sd, sizeof(desc));
1248 info.base_addr = LINUX_GET_BASE(&desc);
1249 info.limit = LINUX_GET_LIMIT(&desc);
1250 info.seg_32bit = LINUX_GET_32BIT(&desc);
1251 info.contents = LINUX_GET_CONTENTS(&desc);
1252 info.read_exec_only = !LINUX_GET_WRITABLE(&desc);
1253 info.limit_in_pages = LINUX_GET_LIMIT_PAGES(&desc);
1254 info.seg_not_present = !LINUX_GET_PRESENT(&desc);
1255 info.useable = LINUX_GET_USEABLE(&desc);
1257 error = copyout(&info, args->desc, sizeof(struct l_user_desc));
1258 if (error)
1259 return (EFAULT);
1261 return (0);