suser_* to priv_* conversion
[dragonfly.git] / sys / platform / pc32 / i386 / vm86.c
blob1d88690e887c4b9dfd23f6175e46a5e20359324c
1 /*-
2 * Copyright (c) 1997 Jonathan Lemon
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * modification, are permitted provided that the following conditions
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
27 * $FreeBSD: src/sys/i386/i386/vm86.c,v 1.31.2.2 2001/10/05 06:18:55 peter Exp $
28 * $DragonFly: src/sys/platform/pc32/i386/vm86.c,v 1.26 2008/08/02 01:14:43 dillon Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/proc.h>
35 #include <sys/priv.h>
36 #include <sys/lock.h>
37 #include <sys/malloc.h>
38 #include <sys/sysctl.h>
40 #include <vm/vm.h>
41 #include <vm/pmap.h>
42 #include <vm/vm_map.h>
43 #include <vm/vm_page.h>
45 #include <sys/user.h>
46 #include <sys/thread2.h>
48 #include <machine/md_var.h>
49 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
50 #include <machine/psl.h>
51 #include <machine/specialreg.h>
52 #include <machine/sysarch.h>
53 #include <machine/clock.h>
54 #include <bus/isa/isa.h>
55 #include <bus/isa/rtc.h>
56 #include <machine_base/isa/timerreg.h>
58 extern int i386_extend_pcb (struct lwp *);
59 extern int vm86pa;
60 extern struct pcb *vm86pcb;
62 extern int vm86_bioscall(struct vm86frame *);
63 extern void vm86_biosret(struct vm86frame *);
65 #define PGTABLE_SIZE ((1024 + 64) * 1024 / PAGE_SIZE)
66 #define INTMAP_SIZE 32
67 #define IOMAP_SIZE ctob(IOPAGES)
68 #define TSS_SIZE \
69 (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
70 INTMAP_SIZE + IOMAP_SIZE + 1)
72 struct vm86_layout {
73 pt_entry_t vml_pgtbl[PGTABLE_SIZE];
74 struct pcb vml_pcb;
75 struct pcb_ext vml_ext;
76 char vml_intmap[INTMAP_SIZE];
77 char vml_iomap[IOMAP_SIZE];
78 char vml_iomap_trailer;
81 void vm86_prepcall(struct vm86frame *);
83 struct system_map {
84 int type;
85 vm_offset_t start;
86 vm_offset_t end;
89 #define HLT 0xf4
90 #define CLI 0xfa
91 #define STI 0xfb
92 #define PUSHF 0x9c
93 #define POPF 0x9d
94 #define INTn 0xcd
95 #define IRET 0xcf
96 #define INB 0xe4
97 #define INW 0xe5
98 #define INBDX 0xec
99 #define INWDX 0xed
100 #define OUTB 0xe6
101 #define OUTW 0xe7
102 #define OUTBDX 0xee
103 #define OUTWDX 0xef
104 #define CALLm 0xff
105 #define OPERAND_SIZE_PREFIX 0x66
106 #define ADDRESS_SIZE_PREFIX 0x67
107 #define PUSH_MASK ~(PSL_VM | PSL_RF | PSL_I)
108 #define POP_MASK ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
110 static void vm86_setup_timer_fault(void);
111 static void vm86_clear_timer_fault(void);
113 static int vm86_blew_up_timer;
115 static int timer_warn = 1;
116 SYSCTL_INT(_debug, OID_AUTO, timer_warn, CTLFLAG_RW, &timer_warn, 0, "");
118 static __inline caddr_t
119 MAKE_ADDR(u_short sel, u_short off)
121 return ((caddr_t)((sel << 4) + off));
124 static __inline void
125 GET_VEC(u_int vec, u_short *sel, u_short *off)
127 *sel = vec >> 16;
128 *off = vec & 0xffff;
131 static __inline u_int
132 MAKE_VEC(u_short sel, u_short off)
134 return ((sel << 16) | off);
137 static __inline void
138 PUSH(u_short x, struct vm86frame *vmf)
140 vmf->vmf_sp -= 2;
141 susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
144 static __inline void
145 PUSHL(u_int x, struct vm86frame *vmf)
147 vmf->vmf_sp -= 4;
148 suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
151 static __inline u_short
152 POP(struct vm86frame *vmf)
154 u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
156 vmf->vmf_sp += 2;
157 return (x);
160 static __inline u_int
161 POPL(struct vm86frame *vmf)
163 u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
165 vmf->vmf_sp += 4;
166 return (x);
170 * MPSAFE
173 vm86_emulate(struct vm86frame *vmf)
175 struct vm86_kernel *vm86;
176 caddr_t addr;
177 u_char i_byte;
178 u_int temp_flags;
179 int inc_ip = 1;
180 int retcode = 0;
183 * pcb_ext contains the address of the extension area, or zero if
184 * the extension is not present. (This check should not be needed,
185 * as we can't enter vm86 mode until we set up an extension area)
187 if (curthread->td_pcb->pcb_ext == 0)
188 return (SIGBUS);
189 vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
191 if (vmf->vmf_eflags & PSL_T)
192 retcode = SIGTRAP;
195 * Instruction emulation
197 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
198 i_byte = fubyte(addr);
199 if (i_byte == ADDRESS_SIZE_PREFIX) {
200 i_byte = fubyte(++addr);
201 inc_ip++;
205 * I/O emulation (TIMER only, a big hack). Just reenable the
206 * IO bits involved, flag it, and retry the instruction.
208 switch(i_byte) {
209 case OUTB:
210 case OUTW:
211 case OUTBDX:
212 case OUTWDX:
213 vm86_blew_up_timer = 1;
214 /* fall through */
215 case INB:
216 case INW:
217 case INBDX:
218 case INWDX:
219 vm86_clear_timer_fault();
220 /* retry insn */
221 return(0);
224 if (vm86->vm86_has_vme) {
225 switch (i_byte) {
226 case OPERAND_SIZE_PREFIX:
227 i_byte = fubyte(++addr);
228 inc_ip++;
229 switch (i_byte) {
230 case PUSHF:
231 if (vmf->vmf_eflags & PSL_VIF)
232 PUSHL((vmf->vmf_eflags & PUSH_MASK)
233 | PSL_IOPL | PSL_I, vmf);
234 else
235 PUSHL((vmf->vmf_eflags & PUSH_MASK)
236 | PSL_IOPL, vmf);
237 vmf->vmf_ip += inc_ip;
238 return (0);
240 case POPF:
241 temp_flags = POPL(vmf) & POP_MASK;
242 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
243 | temp_flags | PSL_VM | PSL_I;
244 vmf->vmf_ip += inc_ip;
245 if (temp_flags & PSL_I) {
246 vmf->vmf_eflags |= PSL_VIF;
247 if (vmf->vmf_eflags & PSL_VIP)
248 break;
249 } else {
250 vmf->vmf_eflags &= ~PSL_VIF;
252 return (0);
254 break;
256 /* VME faults here if VIP is set, but does not set VIF. */
257 case STI:
258 vmf->vmf_eflags |= PSL_VIF;
259 vmf->vmf_ip += inc_ip;
260 if ((vmf->vmf_eflags & PSL_VIP) == 0) {
261 uprintf("fatal sti\n");
262 return (SIGKILL);
264 break;
266 /* VME if no redirection support */
267 case INTn:
268 break;
270 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
271 case POPF:
272 temp_flags = POP(vmf) & POP_MASK;
273 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
274 | temp_flags | PSL_VM | PSL_I;
275 vmf->vmf_ip += inc_ip;
276 if (temp_flags & PSL_I) {
277 vmf->vmf_eflags |= PSL_VIF;
278 if (vmf->vmf_eflags & PSL_VIP)
279 break;
280 } else {
281 vmf->vmf_eflags &= ~PSL_VIF;
283 return (retcode);
285 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
286 case IRET:
287 vmf->vmf_ip = POP(vmf);
288 vmf->vmf_cs = POP(vmf);
289 temp_flags = POP(vmf) & POP_MASK;
290 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
291 | temp_flags | PSL_VM | PSL_I;
292 if (temp_flags & PSL_I) {
293 vmf->vmf_eflags |= PSL_VIF;
294 if (vmf->vmf_eflags & PSL_VIP)
295 break;
296 } else {
297 vmf->vmf_eflags &= ~PSL_VIF;
299 return (retcode);
302 return (SIGBUS);
305 switch (i_byte) {
306 case OPERAND_SIZE_PREFIX:
307 i_byte = fubyte(++addr);
308 inc_ip++;
309 switch (i_byte) {
310 case PUSHF:
311 if (vm86->vm86_eflags & PSL_VIF)
312 PUSHL((vmf->vmf_flags & PUSH_MASK)
313 | PSL_IOPL | PSL_I, vmf);
314 else
315 PUSHL((vmf->vmf_flags & PUSH_MASK)
316 | PSL_IOPL, vmf);
317 vmf->vmf_ip += inc_ip;
318 return (retcode);
320 case POPF:
321 temp_flags = POPL(vmf) & POP_MASK;
322 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
323 | temp_flags | PSL_VM | PSL_I;
324 vmf->vmf_ip += inc_ip;
325 if (temp_flags & PSL_I) {
326 vm86->vm86_eflags |= PSL_VIF;
327 if (vm86->vm86_eflags & PSL_VIP)
328 break;
329 } else {
330 vm86->vm86_eflags &= ~PSL_VIF;
332 return (retcode);
334 return (SIGBUS);
336 case CLI:
337 vm86->vm86_eflags &= ~PSL_VIF;
338 vmf->vmf_ip += inc_ip;
339 return (retcode);
341 case STI:
342 /* if there is a pending interrupt, go to the emulator */
343 vm86->vm86_eflags |= PSL_VIF;
344 vmf->vmf_ip += inc_ip;
345 if (vm86->vm86_eflags & PSL_VIP)
346 break;
347 return (retcode);
349 case PUSHF:
350 if (vm86->vm86_eflags & PSL_VIF)
351 PUSH((vmf->vmf_flags & PUSH_MASK)
352 | PSL_IOPL | PSL_I, vmf);
353 else
354 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
355 vmf->vmf_ip += inc_ip;
356 return (retcode);
358 case INTn:
359 i_byte = fubyte(addr + 1);
360 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
361 break;
362 if (vm86->vm86_eflags & PSL_VIF)
363 PUSH((vmf->vmf_flags & PUSH_MASK)
364 | PSL_IOPL | PSL_I, vmf);
365 else
366 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
367 PUSH(vmf->vmf_cs, vmf);
368 PUSH(vmf->vmf_ip + inc_ip + 1, vmf); /* increment IP */
369 GET_VEC(fuword((caddr_t)(i_byte * 4)),
370 &vmf->vmf_cs, &vmf->vmf_ip);
371 vmf->vmf_flags &= ~PSL_T;
372 vm86->vm86_eflags &= ~PSL_VIF;
373 return (retcode);
375 case IRET:
376 vmf->vmf_ip = POP(vmf);
377 vmf->vmf_cs = POP(vmf);
378 temp_flags = POP(vmf) & POP_MASK;
379 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
380 | temp_flags | PSL_VM | PSL_I;
381 if (temp_flags & PSL_I) {
382 vm86->vm86_eflags |= PSL_VIF;
383 if (vm86->vm86_eflags & PSL_VIP)
384 break;
385 } else {
386 vm86->vm86_eflags &= ~PSL_VIF;
388 return (retcode);
390 case POPF:
391 temp_flags = POP(vmf) & POP_MASK;
392 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
393 | temp_flags | PSL_VM | PSL_I;
394 vmf->vmf_ip += inc_ip;
395 if (temp_flags & PSL_I) {
396 vm86->vm86_eflags |= PSL_VIF;
397 if (vm86->vm86_eflags & PSL_VIP)
398 break;
399 } else {
400 vm86->vm86_eflags &= ~PSL_VIF;
402 return (retcode);
404 return (SIGBUS);
407 void
408 vm86_initialize(void)
410 int i;
411 u_int *addr;
412 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
413 struct pcb *pcb;
414 struct pcb_ext *ext;
415 struct soft_segment_descriptor ssd = {
416 0, /* segment base address (overwritten) */
417 0, /* length (overwritten) */
418 SDT_SYS386TSS, /* segment type */
419 0, /* priority level */
420 1, /* descriptor present */
421 0, 0,
422 0, /* default 16 size */
423 0 /* granularity */
427 * this should be a compile time error, but cpp doesn't grok sizeof().
429 if (sizeof(struct vm86_layout) > ctob(3))
430 panic("struct vm86_layout exceeds space allocated in locore.s");
433 * Below is the memory layout that we use for the vm86 region.
435 * +--------+
436 * | |
437 * | |
438 * | page 0 |
439 * | | +--------+
440 * | | | stack |
441 * +--------+ +--------+ <--------- vm86paddr
442 * | | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
443 * | | +--------+
444 * | | | PCB | size: ~240 bytes
445 * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
446 * | | +--------+
447 * | | |int map |
448 * | | +--------+
449 * +--------+ | |
450 * | page 2 | | I/O |
451 * +--------+ | bitmap |
452 * | page 3 | | |
453 * | | +--------+
454 * +--------+
458 * A rudimentary PCB must be installed, in order to get to the
459 * PCB extension area. We use the PCB area as a scratchpad for
460 * data storage, the layout of which is shown below.
462 * pcb_esi = new PTD entry 0
463 * pcb_ebp = pointer to frame on vm86 stack
464 * pcb_esp = stack frame pointer at time of switch
465 * pcb_ebx = va of vm86 page table
466 * pcb_eip = argument pointer to initial call
467 * pcb_spare[0] = saved TSS descriptor, word 0
468 * pcb_space[1] = saved TSS descriptor, word 1
470 #define new_ptd pcb_esi
471 #define vm86_frame pcb_ebp
472 #define pgtable_va pcb_ebx
474 pcb = &vml->vml_pcb;
475 ext = &vml->vml_ext;
477 bzero(pcb, sizeof(struct pcb));
478 pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
479 pcb->vm86_frame = (pt_entry_t)vm86paddr - sizeof(struct vm86frame);
480 pcb->pgtable_va = (vm_offset_t)vm86paddr;
481 pcb->pcb_ext = ext;
483 bzero(ext, sizeof(struct pcb_ext));
484 ext->ext_tss.tss_esp0 = (vm_offset_t)vm86paddr;
485 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
486 ext->ext_tss.tss_ioopt =
487 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
488 ext->ext_iomap = vml->vml_iomap;
489 ext->ext_vm86.vm86_intmap = vml->vml_intmap;
491 if (cpu_feature & CPUID_VME)
492 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
494 addr = (u_int *)ext->ext_vm86.vm86_intmap;
495 for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
496 *addr++ = 0;
497 vml->vml_iomap_trailer = 0xff;
499 ssd.ssd_base = (u_int)&ext->ext_tss;
500 ssd.ssd_limit = TSS_SIZE - 1;
501 ssdtosd(&ssd, &ext->ext_tssd);
503 vm86pcb = pcb;
505 #if 0
507 * use whatever is leftover of the vm86 page layout as a
508 * message buffer so we can capture early output.
510 msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
511 ctob(3) - sizeof(struct vm86_layout));
512 #endif
515 vm_offset_t
516 vm86_getpage(struct vm86context *vmc, int pagenum)
518 int i;
520 for (i = 0; i < vmc->npages; i++)
521 if (vmc->pmap[i].pte_num == pagenum)
522 return (vmc->pmap[i].kva);
523 return (0);
526 vm_offset_t
527 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
529 int i, flags = 0;
531 for (i = 0; i < vmc->npages; i++)
532 if (vmc->pmap[i].pte_num == pagenum)
533 goto bad;
535 if (vmc->npages == VM86_PMAPSIZE)
536 goto bad; /* XXX grow map? */
538 if (kva == 0) {
539 kva = (vm_offset_t)kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
540 flags = VMAP_MALLOC;
543 i = vmc->npages++;
544 vmc->pmap[i].flags = flags;
545 vmc->pmap[i].kva = kva;
546 vmc->pmap[i].pte_num = pagenum;
547 return (kva);
548 bad:
549 panic("vm86_addpage: not enough room, or overlap");
552 static void
553 vm86_initflags(struct vm86frame *vmf)
555 int eflags = vmf->vmf_eflags;
556 struct vm86_kernel *vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
558 if (vm86->vm86_has_vme) {
559 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
560 (eflags & VME_USERCHANGE) | PSL_VM;
561 } else {
562 vm86->vm86_eflags = eflags; /* save VIF, VIP */
563 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |
564 (eflags & VM_USERCHANGE) | PSL_VM;
566 vmf->vmf_eflags = eflags | PSL_VM;
570 * called from vm86_bioscall, while in vm86 address space, to finalize setup.
572 void
573 vm86_prepcall(struct vm86frame *vmf)
575 uintptr_t addr[] = { 0xA00, 0x1000 }; /* code, stack */
576 u_char intcall[] = {
577 CLI, INTn, 0x00, STI, HLT
580 if ((vmf->vmf_trapno & PAGE_MASK) <= 0xff) {
581 /* interrupt call requested */
582 intcall[2] = (u_char)(vmf->vmf_trapno & 0xff);
583 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
584 vmf->vmf_ip = addr[0];
585 vmf->vmf_cs = 0;
587 vmf->vmf_sp = addr[1] - 2; /* keep aligned */
588 vmf->kernel_fs = vmf->kernel_es = vmf->kernel_ds = vmf->kernel_gs = 0;
589 vmf->vmf_ss = 0;
590 vmf->vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
591 vm86_initflags(vmf);
595 * vm86 trap handler; determines whether routine succeeded or not.
596 * Called while in vm86 space, returns to calling process.
598 * A MP lock ref is held on entry from trap() and must be released prior
599 * to returning to the VM86 call.
601 void
602 vm86_trap(struct vm86frame *vmf, int have_mplock)
604 caddr_t addr;
606 /* "should not happen" */
607 if ((vmf->vmf_eflags & PSL_VM) == 0)
608 panic("vm86_trap called, but not in vm86 mode");
610 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
611 if (*(u_char *)addr == HLT)
612 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
613 else
614 vmf->vmf_trapno = vmf->vmf_trapno << 16;
616 if (have_mplock)
617 rel_mplock();
618 vm86_biosret(vmf);
622 vm86_intcall(int intnum, struct vm86frame *vmf)
624 int error;
626 if (intnum < 0 || intnum > 0xff)
627 return (EINVAL);
629 crit_enter();
630 ASSERT_MP_LOCK_HELD(curthread);
632 vm86_setup_timer_fault();
633 vmf->vmf_trapno = intnum;
634 error = vm86_bioscall(vmf);
637 * Yes, this happens, especially with video BIOS calls. The BIOS
638 * will sometimes eat timer 2 for lunch, and we need timer 2.
640 if (vm86_blew_up_timer) {
641 vm86_blew_up_timer = 0;
642 timer_restore();
643 if (timer_warn) {
644 kprintf("Warning: BIOS played with the 8254, "
645 "resetting it\n");
648 crit_exit();
649 return(error);
653 * struct vm86context contains the page table to use when making
654 * vm86 calls. If intnum is a valid interrupt number (0-255), then
655 * the "interrupt trampoline" will be used, otherwise we use the
656 * caller's cs:ip routine.
659 vm86_datacall(int intnum, struct vm86frame *vmf, struct vm86context *vmc)
661 pt_entry_t *pte = vm86paddr;
662 u_int page;
663 int i, entry, retval;
665 crit_enter();
666 ASSERT_MP_LOCK_HELD(curthread);
668 for (i = 0; i < vmc->npages; i++) {
669 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
670 entry = vmc->pmap[i].pte_num;
671 vmc->pmap[i].old_pte = pte[entry];
672 pte[entry] = page | PG_V | PG_RW | PG_U;
675 vmf->vmf_trapno = intnum;
676 retval = vm86_bioscall(vmf);
678 for (i = 0; i < vmc->npages; i++) {
679 entry = vmc->pmap[i].pte_num;
680 pte[entry] = vmc->pmap[i].old_pte;
682 crit_exit();
683 return (retval);
686 vm_offset_t
687 vm86_getaddr(struct vm86context *vmc, u_short sel, u_short off)
689 int i, page;
690 vm_offset_t addr;
692 addr = (vm_offset_t)MAKE_ADDR(sel, off);
693 page = addr >> PAGE_SHIFT;
694 for (i = 0; i < vmc->npages; i++)
695 if (page == vmc->pmap[i].pte_num)
696 return (vmc->pmap[i].kva + (addr & PAGE_MASK));
697 return (0);
701 vm86_getptr(struct vm86context *vmc, vm_offset_t kva, u_short *sel,
702 u_short *off)
704 int i;
706 for (i = 0; i < vmc->npages; i++)
707 if (kva >= vmc->pmap[i].kva &&
708 kva < vmc->pmap[i].kva + PAGE_SIZE) {
709 *off = kva - vmc->pmap[i].kva;
710 *sel = vmc->pmap[i].pte_num << 8;
711 return (1);
713 return (0);
714 panic("vm86_getptr: address not found");
718 vm86_sysarch(struct lwp *lp, char *args)
720 int error = 0;
721 struct i386_vm86_args ua;
722 struct vm86_kernel *vm86;
724 if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
725 return (error);
727 if (lp->lwp_thread->td_pcb->pcb_ext == 0)
728 if ((error = i386_extend_pcb(lp)) != 0)
729 return (error);
730 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
732 switch (ua.sub_op) {
733 case VM86_INIT: {
734 struct vm86_init_args sa;
736 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
737 return (error);
738 if (cpu_feature & CPUID_VME)
739 vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
740 else
741 vm86->vm86_has_vme = 0;
742 vm86->vm86_inited = 1;
743 vm86->vm86_debug = sa.debug;
744 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
746 break;
748 #if 0
749 case VM86_SET_VME: {
750 struct vm86_vme_args sa;
752 if ((cpu_feature & CPUID_VME) == 0)
753 return (ENODEV);
755 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
756 return (error);
757 if (sa.state)
758 load_cr4(rcr4() | CR4_VME);
759 else
760 load_cr4(rcr4() & ~CR4_VME);
762 break;
763 #endif
765 case VM86_GET_VME: {
766 struct vm86_vme_args sa;
768 sa.state = (rcr4() & CR4_VME ? 1 : 0);
769 error = copyout(&sa, ua.sub_args, sizeof(sa));
771 break;
773 case VM86_INTCALL: {
774 struct vm86_intcall_args sa;
776 if ((error = priv_check_cred(lp->lwp_proc->p_ucred, PRIV_ROOT, 0)))
777 return (error);
778 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
779 return (error);
780 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
781 return (error);
782 error = copyout(&sa, ua.sub_args, sizeof(sa));
784 break;
786 default:
787 error = EINVAL;
789 return (error);
793 * Setup the VM86 I/O map to take faults on the timer
795 static void
796 vm86_setup_timer_fault(void)
798 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
800 vml->vml_iomap[TIMER_MODE >> 3] |= 1 << (TIMER_MODE & 7);
801 vml->vml_iomap[TIMER_CNTR0 >> 3] |= 1 << (TIMER_CNTR0 & 7);
802 vml->vml_iomap[TIMER_CNTR1 >> 3] |= 1 << (TIMER_CNTR1 & 7);
803 vml->vml_iomap[TIMER_CNTR2 >> 3] |= 1 << (TIMER_CNTR2 & 7);
807 * Setup the VM86 I/O map to not fault on the timer
809 static void
810 vm86_clear_timer_fault(void)
812 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
814 vml->vml_iomap[TIMER_MODE >> 3] &= ~(1 << (TIMER_MODE & 7));
815 vml->vml_iomap[TIMER_CNTR0 >> 3] &= ~(1 << (TIMER_CNTR0 & 7));
816 vml->vml_iomap[TIMER_CNTR1 >> 3] &= ~(1 << (TIMER_CNTR1 & 7));
817 vml->vml_iomap[TIMER_CNTR2 >> 3] &= ~(1 << (TIMER_CNTR2 & 7));