we do not want to shift by the block size, which is much larger than
[dragonfly.git] / sys / platform / pc32 / i386 / vm86.c
blob7352743f86b722a326c0434a17db39e05bbc568b
1 /*-
2 * Copyright (c) 1997 Jonathan Lemon
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * modification, are permitted provided that the following conditions
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
27 * $FreeBSD: src/sys/i386/i386/vm86.c,v 1.31.2.2 2001/10/05 06:18:55 peter Exp $
28 * $DragonFly: src/sys/platform/pc32/i386/vm86.c,v 1.25 2007/01/22 19:37:04 corecode Exp $
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/proc.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/sysctl.h>
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_page.h>
44 #include <sys/user.h>
45 #include <sys/thread2.h>
47 #include <machine/md_var.h>
48 #include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */
49 #include <machine/psl.h>
50 #include <machine/specialreg.h>
51 #include <machine/sysarch.h>
52 #include <machine/clock.h>
53 #include <bus/isa/i386/isa.h>
54 #include <bus/isa/rtc.h>
55 #include <machine_base/isa/timerreg.h>
57 extern int i386_extend_pcb (struct lwp *);
58 extern int vm86pa;
59 extern struct pcb *vm86pcb;
61 extern int vm86_bioscall(struct vm86frame *);
62 extern void vm86_biosret(struct vm86frame *);
64 #define PGTABLE_SIZE ((1024 + 64) * 1024 / PAGE_SIZE)
65 #define INTMAP_SIZE 32
66 #define IOMAP_SIZE ctob(IOPAGES)
67 #define TSS_SIZE \
68 (sizeof(struct pcb_ext) - sizeof(struct segment_descriptor) + \
69 INTMAP_SIZE + IOMAP_SIZE + 1)
71 struct vm86_layout {
72 pt_entry_t vml_pgtbl[PGTABLE_SIZE];
73 struct pcb vml_pcb;
74 struct pcb_ext vml_ext;
75 char vml_intmap[INTMAP_SIZE];
76 char vml_iomap[IOMAP_SIZE];
77 char vml_iomap_trailer;
80 void vm86_prepcall(struct vm86frame *);
82 struct system_map {
83 int type;
84 vm_offset_t start;
85 vm_offset_t end;
88 #define HLT 0xf4
89 #define CLI 0xfa
90 #define STI 0xfb
91 #define PUSHF 0x9c
92 #define POPF 0x9d
93 #define INTn 0xcd
94 #define IRET 0xcf
95 #define INB 0xe4
96 #define INW 0xe5
97 #define INBDX 0xec
98 #define INWDX 0xed
99 #define OUTB 0xe6
100 #define OUTW 0xe7
101 #define OUTBDX 0xee
102 #define OUTWDX 0xef
103 #define CALLm 0xff
104 #define OPERAND_SIZE_PREFIX 0x66
105 #define ADDRESS_SIZE_PREFIX 0x67
106 #define PUSH_MASK ~(PSL_VM | PSL_RF | PSL_I)
107 #define POP_MASK ~(PSL_VIP | PSL_VIF | PSL_VM | PSL_RF | PSL_IOPL)
109 static void vm86_setup_timer_fault(void);
110 static void vm86_clear_timer_fault(void);
112 static int vm86_blew_up_timer;
114 static int timer_warn = 1;
115 SYSCTL_INT(_debug, OID_AUTO, timer_warn, CTLFLAG_RW, &timer_warn, 0, "");
117 static __inline caddr_t
118 MAKE_ADDR(u_short sel, u_short off)
120 return ((caddr_t)((sel << 4) + off));
123 static __inline void
124 GET_VEC(u_int vec, u_short *sel, u_short *off)
126 *sel = vec >> 16;
127 *off = vec & 0xffff;
130 static __inline u_int
131 MAKE_VEC(u_short sel, u_short off)
133 return ((sel << 16) | off);
136 static __inline void
137 PUSH(u_short x, struct vm86frame *vmf)
139 vmf->vmf_sp -= 2;
140 susword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
143 static __inline void
144 PUSHL(u_int x, struct vm86frame *vmf)
146 vmf->vmf_sp -= 4;
147 suword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp), x);
150 static __inline u_short
151 POP(struct vm86frame *vmf)
153 u_short x = fusword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
155 vmf->vmf_sp += 2;
156 return (x);
159 static __inline u_int
160 POPL(struct vm86frame *vmf)
162 u_int x = fuword(MAKE_ADDR(vmf->vmf_ss, vmf->vmf_sp));
164 vmf->vmf_sp += 4;
165 return (x);
169 * MPSAFE
172 vm86_emulate(struct vm86frame *vmf)
174 struct vm86_kernel *vm86;
175 caddr_t addr;
176 u_char i_byte;
177 u_int temp_flags;
178 int inc_ip = 1;
179 int retcode = 0;
182 * pcb_ext contains the address of the extension area, or zero if
183 * the extension is not present. (This check should not be needed,
184 * as we can't enter vm86 mode until we set up an extension area)
186 if (curthread->td_pcb->pcb_ext == 0)
187 return (SIGBUS);
188 vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
190 if (vmf->vmf_eflags & PSL_T)
191 retcode = SIGTRAP;
194 * Instruction emulation
196 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
197 i_byte = fubyte(addr);
198 if (i_byte == ADDRESS_SIZE_PREFIX) {
199 i_byte = fubyte(++addr);
200 inc_ip++;
204 * I/O emulation (TIMER only, a big hack). Just reenable the
205 * IO bits involved, flag it, and retry the instruction.
207 switch(i_byte) {
208 case OUTB:
209 case OUTW:
210 case OUTBDX:
211 case OUTWDX:
212 vm86_blew_up_timer = 1;
213 /* fall through */
214 case INB:
215 case INW:
216 case INBDX:
217 case INWDX:
218 vm86_clear_timer_fault();
219 /* retry insn */
220 return(0);
223 if (vm86->vm86_has_vme) {
224 switch (i_byte) {
225 case OPERAND_SIZE_PREFIX:
226 i_byte = fubyte(++addr);
227 inc_ip++;
228 switch (i_byte) {
229 case PUSHF:
230 if (vmf->vmf_eflags & PSL_VIF)
231 PUSHL((vmf->vmf_eflags & PUSH_MASK)
232 | PSL_IOPL | PSL_I, vmf);
233 else
234 PUSHL((vmf->vmf_eflags & PUSH_MASK)
235 | PSL_IOPL, vmf);
236 vmf->vmf_ip += inc_ip;
237 return (0);
239 case POPF:
240 temp_flags = POPL(vmf) & POP_MASK;
241 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
242 | temp_flags | PSL_VM | PSL_I;
243 vmf->vmf_ip += inc_ip;
244 if (temp_flags & PSL_I) {
245 vmf->vmf_eflags |= PSL_VIF;
246 if (vmf->vmf_eflags & PSL_VIP)
247 break;
248 } else {
249 vmf->vmf_eflags &= ~PSL_VIF;
251 return (0);
253 break;
255 /* VME faults here if VIP is set, but does not set VIF. */
256 case STI:
257 vmf->vmf_eflags |= PSL_VIF;
258 vmf->vmf_ip += inc_ip;
259 if ((vmf->vmf_eflags & PSL_VIP) == 0) {
260 uprintf("fatal sti\n");
261 return (SIGKILL);
263 break;
265 /* VME if no redirection support */
266 case INTn:
267 break;
269 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
270 case POPF:
271 temp_flags = POP(vmf) & POP_MASK;
272 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
273 | temp_flags | PSL_VM | PSL_I;
274 vmf->vmf_ip += inc_ip;
275 if (temp_flags & PSL_I) {
276 vmf->vmf_eflags |= PSL_VIF;
277 if (vmf->vmf_eflags & PSL_VIP)
278 break;
279 } else {
280 vmf->vmf_eflags &= ~PSL_VIF;
282 return (retcode);
284 /* VME if trying to set PSL_TF, or PSL_I when VIP is set */
285 case IRET:
286 vmf->vmf_ip = POP(vmf);
287 vmf->vmf_cs = POP(vmf);
288 temp_flags = POP(vmf) & POP_MASK;
289 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
290 | temp_flags | PSL_VM | PSL_I;
291 if (temp_flags & PSL_I) {
292 vmf->vmf_eflags |= PSL_VIF;
293 if (vmf->vmf_eflags & PSL_VIP)
294 break;
295 } else {
296 vmf->vmf_eflags &= ~PSL_VIF;
298 return (retcode);
301 return (SIGBUS);
304 switch (i_byte) {
305 case OPERAND_SIZE_PREFIX:
306 i_byte = fubyte(++addr);
307 inc_ip++;
308 switch (i_byte) {
309 case PUSHF:
310 if (vm86->vm86_eflags & PSL_VIF)
311 PUSHL((vmf->vmf_flags & PUSH_MASK)
312 | PSL_IOPL | PSL_I, vmf);
313 else
314 PUSHL((vmf->vmf_flags & PUSH_MASK)
315 | PSL_IOPL, vmf);
316 vmf->vmf_ip += inc_ip;
317 return (retcode);
319 case POPF:
320 temp_flags = POPL(vmf) & POP_MASK;
321 vmf->vmf_eflags = (vmf->vmf_eflags & ~POP_MASK)
322 | temp_flags | PSL_VM | PSL_I;
323 vmf->vmf_ip += inc_ip;
324 if (temp_flags & PSL_I) {
325 vm86->vm86_eflags |= PSL_VIF;
326 if (vm86->vm86_eflags & PSL_VIP)
327 break;
328 } else {
329 vm86->vm86_eflags &= ~PSL_VIF;
331 return (retcode);
333 return (SIGBUS);
335 case CLI:
336 vm86->vm86_eflags &= ~PSL_VIF;
337 vmf->vmf_ip += inc_ip;
338 return (retcode);
340 case STI:
341 /* if there is a pending interrupt, go to the emulator */
342 vm86->vm86_eflags |= PSL_VIF;
343 vmf->vmf_ip += inc_ip;
344 if (vm86->vm86_eflags & PSL_VIP)
345 break;
346 return (retcode);
348 case PUSHF:
349 if (vm86->vm86_eflags & PSL_VIF)
350 PUSH((vmf->vmf_flags & PUSH_MASK)
351 | PSL_IOPL | PSL_I, vmf);
352 else
353 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
354 vmf->vmf_ip += inc_ip;
355 return (retcode);
357 case INTn:
358 i_byte = fubyte(addr + 1);
359 if ((vm86->vm86_intmap[i_byte >> 3] & (1 << (i_byte & 7))) != 0)
360 break;
361 if (vm86->vm86_eflags & PSL_VIF)
362 PUSH((vmf->vmf_flags & PUSH_MASK)
363 | PSL_IOPL | PSL_I, vmf);
364 else
365 PUSH((vmf->vmf_flags & PUSH_MASK) | PSL_IOPL, vmf);
366 PUSH(vmf->vmf_cs, vmf);
367 PUSH(vmf->vmf_ip + inc_ip + 1, vmf); /* increment IP */
368 GET_VEC(fuword((caddr_t)(i_byte * 4)),
369 &vmf->vmf_cs, &vmf->vmf_ip);
370 vmf->vmf_flags &= ~PSL_T;
371 vm86->vm86_eflags &= ~PSL_VIF;
372 return (retcode);
374 case IRET:
375 vmf->vmf_ip = POP(vmf);
376 vmf->vmf_cs = POP(vmf);
377 temp_flags = POP(vmf) & POP_MASK;
378 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
379 | temp_flags | PSL_VM | PSL_I;
380 if (temp_flags & PSL_I) {
381 vm86->vm86_eflags |= PSL_VIF;
382 if (vm86->vm86_eflags & PSL_VIP)
383 break;
384 } else {
385 vm86->vm86_eflags &= ~PSL_VIF;
387 return (retcode);
389 case POPF:
390 temp_flags = POP(vmf) & POP_MASK;
391 vmf->vmf_flags = (vmf->vmf_flags & ~POP_MASK)
392 | temp_flags | PSL_VM | PSL_I;
393 vmf->vmf_ip += inc_ip;
394 if (temp_flags & PSL_I) {
395 vm86->vm86_eflags |= PSL_VIF;
396 if (vm86->vm86_eflags & PSL_VIP)
397 break;
398 } else {
399 vm86->vm86_eflags &= ~PSL_VIF;
401 return (retcode);
403 return (SIGBUS);
406 void
407 vm86_initialize(void)
409 int i;
410 u_int *addr;
411 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
412 struct pcb *pcb;
413 struct pcb_ext *ext;
414 struct soft_segment_descriptor ssd = {
415 0, /* segment base address (overwritten) */
416 0, /* length (overwritten) */
417 SDT_SYS386TSS, /* segment type */
418 0, /* priority level */
419 1, /* descriptor present */
420 0, 0,
421 0, /* default 16 size */
422 0 /* granularity */
426 * this should be a compile time error, but cpp doesn't grok sizeof().
428 if (sizeof(struct vm86_layout) > ctob(3))
429 panic("struct vm86_layout exceeds space allocated in locore.s");
432 * Below is the memory layout that we use for the vm86 region.
434 * +--------+
435 * | |
436 * | |
437 * | page 0 |
438 * | | +--------+
439 * | | | stack |
440 * +--------+ +--------+ <--------- vm86paddr
441 * | | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
442 * | | +--------+
443 * | | | PCB | size: ~240 bytes
444 * | page 1 | |PCB Ext | size: ~140 bytes (includes TSS)
445 * | | +--------+
446 * | | |int map |
447 * | | +--------+
448 * +--------+ | |
449 * | page 2 | | I/O |
450 * +--------+ | bitmap |
451 * | page 3 | | |
452 * | | +--------+
453 * +--------+
457 * A rudimentary PCB must be installed, in order to get to the
458 * PCB extension area. We use the PCB area as a scratchpad for
459 * data storage, the layout of which is shown below.
461 * pcb_esi = new PTD entry 0
462 * pcb_ebp = pointer to frame on vm86 stack
463 * pcb_esp = stack frame pointer at time of switch
464 * pcb_ebx = va of vm86 page table
465 * pcb_eip = argument pointer to initial call
466 * pcb_spare[0] = saved TSS descriptor, word 0
467 * pcb_space[1] = saved TSS descriptor, word 1
469 #define new_ptd pcb_esi
470 #define vm86_frame pcb_ebp
471 #define pgtable_va pcb_ebx
473 pcb = &vml->vml_pcb;
474 ext = &vml->vml_ext;
476 bzero(pcb, sizeof(struct pcb));
477 pcb->new_ptd = vm86pa | PG_V | PG_RW | PG_U;
478 pcb->vm86_frame = (pt_entry_t)vm86paddr - sizeof(struct vm86frame);
479 pcb->pgtable_va = (vm_offset_t)vm86paddr;
480 pcb->pcb_ext = ext;
482 bzero(ext, sizeof(struct pcb_ext));
483 ext->ext_tss.tss_esp0 = (vm_offset_t)vm86paddr;
484 ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
485 ext->ext_tss.tss_ioopt =
486 ((u_int)vml->vml_iomap - (u_int)&ext->ext_tss) << 16;
487 ext->ext_iomap = vml->vml_iomap;
488 ext->ext_vm86.vm86_intmap = vml->vml_intmap;
490 if (cpu_feature & CPUID_VME)
491 ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
493 addr = (u_int *)ext->ext_vm86.vm86_intmap;
494 for (i = 0; i < (INTMAP_SIZE + IOMAP_SIZE) / sizeof(u_int); i++)
495 *addr++ = 0;
496 vml->vml_iomap_trailer = 0xff;
498 ssd.ssd_base = (u_int)&ext->ext_tss;
499 ssd.ssd_limit = TSS_SIZE - 1;
500 ssdtosd(&ssd, &ext->ext_tssd);
502 vm86pcb = pcb;
504 #if 0
506 * use whatever is leftover of the vm86 page layout as a
507 * message buffer so we can capture early output.
509 msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
510 ctob(3) - sizeof(struct vm86_layout));
511 #endif
514 vm_offset_t
515 vm86_getpage(struct vm86context *vmc, int pagenum)
517 int i;
519 for (i = 0; i < vmc->npages; i++)
520 if (vmc->pmap[i].pte_num == pagenum)
521 return (vmc->pmap[i].kva);
522 return (0);
525 vm_offset_t
526 vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
528 int i, flags = 0;
530 for (i = 0; i < vmc->npages; i++)
531 if (vmc->pmap[i].pte_num == pagenum)
532 goto bad;
534 if (vmc->npages == VM86_PMAPSIZE)
535 goto bad; /* XXX grow map? */
537 if (kva == 0) {
538 kva = (vm_offset_t)kmalloc(PAGE_SIZE, M_TEMP, M_WAITOK);
539 flags = VMAP_MALLOC;
542 i = vmc->npages++;
543 vmc->pmap[i].flags = flags;
544 vmc->pmap[i].kva = kva;
545 vmc->pmap[i].pte_num = pagenum;
546 return (kva);
547 bad:
548 panic("vm86_addpage: not enough room, or overlap");
551 static void
552 vm86_initflags(struct vm86frame *vmf)
554 int eflags = vmf->vmf_eflags;
555 struct vm86_kernel *vm86 = &curthread->td_pcb->pcb_ext->ext_vm86;
557 if (vm86->vm86_has_vme) {
558 eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
559 (eflags & VME_USERCHANGE) | PSL_VM;
560 } else {
561 vm86->vm86_eflags = eflags; /* save VIF, VIP */
562 eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |
563 (eflags & VM_USERCHANGE) | PSL_VM;
565 vmf->vmf_eflags = eflags | PSL_VM;
569 * called from vm86_bioscall, while in vm86 address space, to finalize setup.
571 void
572 vm86_prepcall(struct vm86frame *vmf)
574 uintptr_t addr[] = { 0xA00, 0x1000 }; /* code, stack */
575 u_char intcall[] = {
576 CLI, INTn, 0x00, STI, HLT
579 if ((vmf->vmf_trapno & PAGE_MASK) <= 0xff) {
580 /* interrupt call requested */
581 intcall[2] = (u_char)(vmf->vmf_trapno & 0xff);
582 memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
583 vmf->vmf_ip = addr[0];
584 vmf->vmf_cs = 0;
586 vmf->vmf_sp = addr[1] - 2; /* keep aligned */
587 vmf->kernel_fs = vmf->kernel_es = vmf->kernel_ds = vmf->kernel_gs = 0;
588 vmf->vmf_ss = 0;
589 vmf->vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
590 vm86_initflags(vmf);
594 * vm86 trap handler; determines whether routine succeeded or not.
595 * Called while in vm86 space, returns to calling process.
597 * A MP lock ref is held on entry from trap() and must be released prior
598 * to returning to the VM86 call.
600 void
601 vm86_trap(struct vm86frame *vmf, int have_mplock)
603 caddr_t addr;
605 /* "should not happen" */
606 if ((vmf->vmf_eflags & PSL_VM) == 0)
607 panic("vm86_trap called, but not in vm86 mode");
609 addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
610 if (*(u_char *)addr == HLT)
611 vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
612 else
613 vmf->vmf_trapno = vmf->vmf_trapno << 16;
615 if (have_mplock)
616 rel_mplock();
617 vm86_biosret(vmf);
621 vm86_intcall(int intnum, struct vm86frame *vmf)
623 int error;
625 if (intnum < 0 || intnum > 0xff)
626 return (EINVAL);
628 crit_enter();
629 ASSERT_MP_LOCK_HELD(curthread);
631 vm86_setup_timer_fault();
632 vmf->vmf_trapno = intnum;
633 error = vm86_bioscall(vmf);
636 * Yes, this happens, especially with video BIOS calls. The BIOS
637 * will sometimes eat timer 2 for lunch, and we need timer 2.
639 if (vm86_blew_up_timer) {
640 vm86_blew_up_timer = 0;
641 timer_restore();
642 if (timer_warn) {
643 kprintf("Warning: BIOS played with the 8254, "
644 "resetting it\n");
647 crit_exit();
648 return(error);
652 * struct vm86context contains the page table to use when making
653 * vm86 calls. If intnum is a valid interrupt number (0-255), then
654 * the "interrupt trampoline" will be used, otherwise we use the
655 * caller's cs:ip routine.
658 vm86_datacall(int intnum, struct vm86frame *vmf, struct vm86context *vmc)
660 pt_entry_t *pte = vm86paddr;
661 u_int page;
662 int i, entry, retval;
664 crit_enter();
665 ASSERT_MP_LOCK_HELD(curthread);
667 for (i = 0; i < vmc->npages; i++) {
668 page = vtophys(vmc->pmap[i].kva & PG_FRAME);
669 entry = vmc->pmap[i].pte_num;
670 vmc->pmap[i].old_pte = pte[entry];
671 pte[entry] = page | PG_V | PG_RW | PG_U;
674 vmf->vmf_trapno = intnum;
675 retval = vm86_bioscall(vmf);
677 for (i = 0; i < vmc->npages; i++) {
678 entry = vmc->pmap[i].pte_num;
679 pte[entry] = vmc->pmap[i].old_pte;
681 crit_exit();
682 return (retval);
685 vm_offset_t
686 vm86_getaddr(struct vm86context *vmc, u_short sel, u_short off)
688 int i, page;
689 vm_offset_t addr;
691 addr = (vm_offset_t)MAKE_ADDR(sel, off);
692 page = addr >> PAGE_SHIFT;
693 for (i = 0; i < vmc->npages; i++)
694 if (page == vmc->pmap[i].pte_num)
695 return (vmc->pmap[i].kva + (addr & PAGE_MASK));
696 return (0);
700 vm86_getptr(struct vm86context *vmc, vm_offset_t kva, u_short *sel,
701 u_short *off)
703 int i;
705 for (i = 0; i < vmc->npages; i++)
706 if (kva >= vmc->pmap[i].kva &&
707 kva < vmc->pmap[i].kva + PAGE_SIZE) {
708 *off = kva - vmc->pmap[i].kva;
709 *sel = vmc->pmap[i].pte_num << 8;
710 return (1);
712 return (0);
713 panic("vm86_getptr: address not found");
717 vm86_sysarch(struct lwp *lp, char *args)
719 int error = 0;
720 struct i386_vm86_args ua;
721 struct vm86_kernel *vm86;
723 if ((error = copyin(args, &ua, sizeof(struct i386_vm86_args))) != 0)
724 return (error);
726 if (lp->lwp_thread->td_pcb->pcb_ext == 0)
727 if ((error = i386_extend_pcb(lp)) != 0)
728 return (error);
729 vm86 = &lp->lwp_thread->td_pcb->pcb_ext->ext_vm86;
731 switch (ua.sub_op) {
732 case VM86_INIT: {
733 struct vm86_init_args sa;
735 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))) != 0)
736 return (error);
737 if (cpu_feature & CPUID_VME)
738 vm86->vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
739 else
740 vm86->vm86_has_vme = 0;
741 vm86->vm86_inited = 1;
742 vm86->vm86_debug = sa.debug;
743 bcopy(&sa.int_map, vm86->vm86_intmap, 32);
745 break;
747 #if 0
748 case VM86_SET_VME: {
749 struct vm86_vme_args sa;
751 if ((cpu_feature & CPUID_VME) == 0)
752 return (ENODEV);
754 if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
755 return (error);
756 if (sa.state)
757 load_cr4(rcr4() | CR4_VME);
758 else
759 load_cr4(rcr4() & ~CR4_VME);
761 break;
762 #endif
764 case VM86_GET_VME: {
765 struct vm86_vme_args sa;
767 sa.state = (rcr4() & CR4_VME ? 1 : 0);
768 error = copyout(&sa, ua.sub_args, sizeof(sa));
770 break;
772 case VM86_INTCALL: {
773 struct vm86_intcall_args sa;
775 if ((error = suser_cred(lp->lwp_proc->p_ucred, 0)))
776 return (error);
777 if ((error = copyin(ua.sub_args, &sa, sizeof(sa))))
778 return (error);
779 if ((error = vm86_intcall(sa.intnum, &sa.vmf)))
780 return (error);
781 error = copyout(&sa, ua.sub_args, sizeof(sa));
783 break;
785 default:
786 error = EINVAL;
788 return (error);
792 * Setup the VM86 I/O map to take faults on the timer
794 static void
795 vm86_setup_timer_fault(void)
797 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
799 vml->vml_iomap[TIMER_MODE >> 3] |= 1 << (TIMER_MODE & 7);
800 vml->vml_iomap[TIMER_CNTR0 >> 3] |= 1 << (TIMER_CNTR0 & 7);
801 vml->vml_iomap[TIMER_CNTR1 >> 3] |= 1 << (TIMER_CNTR1 & 7);
802 vml->vml_iomap[TIMER_CNTR2 >> 3] |= 1 << (TIMER_CNTR2 & 7);
806 * Setup the VM86 I/O map to not fault on the timer
808 static void
809 vm86_clear_timer_fault(void)
811 struct vm86_layout *vml = (struct vm86_layout *)vm86paddr;
813 vml->vml_iomap[TIMER_MODE >> 3] &= ~(1 << (TIMER_MODE & 7));
814 vml->vml_iomap[TIMER_CNTR0 >> 3] &= ~(1 << (TIMER_CNTR0 & 7));
815 vml->vml_iomap[TIMER_CNTR1 >> 3] &= ~(1 << (TIMER_CNTR1 & 7));
816 vml->vml_iomap[TIMER_CNTR2 >> 3] &= ~(1 << (TIMER_CNTR2 & 7));