Map the VRAM and Video ROM/System ROM seperately.
[v86d.git] / libs / lrmi-0.10 / lrmi.c
blobde75e75c40bcdf7e995a83f02cb97576599b85c6
1 /*
2 Linux Real Mode Interface - A library of DPMI-like functions for Linux.
4 Copyright (C) 1998 by Josh Vanderhoof
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice shall be
15 included in all copies or substantial portions of the Software.
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 IN NO EVENT SHALL JOSH VANDERHOOF BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 OTHER DEALINGS IN THE SOFTWARE.
26 #if defined(__i386__) && (defined(__linux__) || defined(__NetBSD__) \
27 || defined(__FreeBSD__) || defined(__OpenBSD__))
29 #include <stdio.h>
30 #include <string.h>
32 #if defined(__linux__)
34 #include <asm/vm86.h>
35 #include <signal.h>
37 #ifdef USE_LIBC_VM86
38 #include <sys/vm86.h>
39 #endif
41 #elif defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__)
43 #include <sys/param.h>
44 #include <signal.h>
45 #include <setjmp.h>
46 #include <machine/psl.h>
47 #include <machine/vm86.h>
48 #include <machine/sysarch.h>
50 #endif /* __NetBSD__ || __FreeBSD__ || __OpenBSD__ */
52 #if defined(__FreeBSD__)
53 #include <sys/ucontext.h>
54 #endif
56 #include <sys/types.h>
57 #include <sys/stat.h>
58 #include <sys/mman.h>
59 #include <unistd.h>
60 #include <fcntl.h>
62 #include "lrmi.h"
64 /* 2.6.26+ kernels don't define the legacy masks. */
65 #if defined(__linux__) && !defined(TF_MASK)
66 #define TF_MASK X86_EFLAGS_TF
67 #define IF_MASK X86_EFLAGS_IF
68 #define VIF_MASK X86_EFLAGS_VIF
69 #define IOPL_MASK X86_EFLAGS_IOPL
70 #endif
72 #define REAL_MEM_BASE ((void *)0x10000)
73 #define REAL_MEM_SIZE 0x40000
74 #define REAL_MEM_BLOCKS 0x100
76 struct mem_block {
77 unsigned int size : 20;
78 unsigned int free : 1;
81 static struct {
82 int ready;
83 int count;
84 struct mem_block blocks[REAL_MEM_BLOCKS];
85 } mem_info = { 0 };
87 static int
88 read_file(char *name, void *p, size_t n)
90 int fd;
92 fd = open(name, O_RDONLY);
94 if (fd == -1) {
95 perror("open");
96 return 0;
99 if (read(fd, p, n) != n) {
100 perror("read");
101 close(fd);
102 return 0;
105 close(fd);
107 return 1;
110 static int
111 map_file(void *start, size_t length, int prot, int flags, char *name, long offset)
113 void *m;
114 int fd;
116 fd = open(name, (flags & MAP_SHARED) ? O_RDWR : O_RDONLY);
118 if (fd == -1) {
119 perror("open");
120 return 0;
123 m = mmap(start, length, prot, flags, fd, offset);
125 if (m == (void *)-1) {
126 perror("mmap");
127 close(fd);
128 return 0;
131 close(fd);
132 return 1;
135 static int
136 real_mem_init(void)
138 if (mem_info.ready)
139 return 1;
141 if (!map_file((void *)REAL_MEM_BASE, REAL_MEM_SIZE,
142 PROT_READ | PROT_WRITE | PROT_EXEC,
143 MAP_FIXED | MAP_PRIVATE, "/dev/zero", 0))
144 return 0;
146 mem_info.ready = 1;
147 mem_info.count = 1;
148 mem_info.blocks[0].size = REAL_MEM_SIZE;
149 mem_info.blocks[0].free = 1;
151 return 1;
154 static void
155 real_mem_deinit(void)
157 if (mem_info.ready) {
158 munmap((void *)REAL_MEM_BASE, REAL_MEM_SIZE);
159 mem_info.ready = 0;
164 static void
165 insert_block(int i)
167 memmove(
168 mem_info.blocks + i + 1,
169 mem_info.blocks + i,
170 (mem_info.count - i) * sizeof(struct mem_block));
172 mem_info.count++;
175 static void
176 delete_block(int i)
178 mem_info.count--;
180 memmove(
181 mem_info.blocks + i,
182 mem_info.blocks + i + 1,
183 (mem_info.count - i) * sizeof(struct mem_block));
186 void *
187 LRMI_alloc_real(int size)
189 int i;
190 char *r = (char *)REAL_MEM_BASE;
192 if (!mem_info.ready)
193 return NULL;
195 if (mem_info.count == REAL_MEM_BLOCKS)
196 return NULL;
198 size = (size + 15) & ~15;
200 for (i = 0; i < mem_info.count; i++) {
201 if (mem_info.blocks[i].free && size < mem_info.blocks[i].size) {
202 insert_block(i);
204 mem_info.blocks[i].size = size;
205 mem_info.blocks[i].free = 0;
206 mem_info.blocks[i + 1].size -= size;
208 return (void *)r;
211 r += mem_info.blocks[i].size;
214 return NULL;
218 void
219 LRMI_free_real(void *m)
221 int i;
222 char *r = (char *)REAL_MEM_BASE;
224 if (!mem_info.ready)
225 return;
227 i = 0;
228 while (m != (void *)r) {
229 r += mem_info.blocks[i].size;
230 i++;
231 if (i == mem_info.count)
232 return;
235 mem_info.blocks[i].free = 1;
237 if (i + 1 < mem_info.count && mem_info.blocks[i + 1].free) {
238 mem_info.blocks[i].size += mem_info.blocks[i + 1].size;
239 delete_block(i + 1);
242 if (i - 1 >= 0 && mem_info.blocks[i - 1].free) {
243 mem_info.blocks[i - 1].size += mem_info.blocks[i].size;
244 delete_block(i);
249 #if defined(__linux__)
250 #define DEFAULT_VM86_FLAGS (IF_MASK | IOPL_MASK)
251 #elif defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__)
252 #define DEFAULT_VM86_FLAGS (PSL_I | PSL_IOPL)
253 #define TF_MASK PSL_T
254 #define VIF_MASK PSL_VIF
255 #endif
256 #define DEFAULT_STACK_SIZE 0x1000
257 #define RETURN_TO_32_INT 255
259 #if defined(__linux__)
260 #define CONTEXT_REGS context.vm.regs
261 #define REG(x) x
262 #elif defined(__NetBSD__) || defined(__OpenBSD__)
263 #define CONTEXT_REGS context.vm.substr.regs
264 #define REG(x) vmsc.sc_ ## x
265 #elif defined(__FreeBSD__)
266 #define CONTEXT_REGS context.vm.uc
267 #define REG(x) uc_mcontext.mc_ ## x
268 #endif
270 static struct {
271 int ready;
272 unsigned short ret_seg, ret_off;
273 unsigned short stack_seg, stack_off;
274 #if defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__)
275 struct vm86_struct vm;
276 #elif defined(__FreeBSD__)
277 struct {
278 struct vm86_init_args init;
279 ucontext_t uc;
280 } vm;
281 #endif
282 #if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__)
283 int success;
284 jmp_buf env;
285 void *old_sighandler;
286 int vret;
287 #endif
288 } context = { 0 };
291 static inline void
292 set_bit(unsigned int bit, void *array)
294 unsigned char *a = array;
296 a[bit / 8] |= (1 << (bit % 8));
300 static inline unsigned int
301 get_int_seg(int i)
303 return *(unsigned short *)(i * 4 + 2);
307 static inline unsigned int
308 get_int_off(int i)
310 return *(unsigned short *)(i * 4);
314 static inline void
315 pushw(unsigned short i)
317 CONTEXT_REGS.REG(esp) -= 2;
318 *(unsigned short *)(((unsigned int)CONTEXT_REGS.REG(ss) << 4) +
319 CONTEXT_REGS.REG(esp)) = i;
324 LRMI_init(void)
326 void *m;
328 if (context.ready)
329 return 1;
331 if (!real_mem_init())
332 return 0;
335 Map the Interrupt Vectors (0x0 - 0x400) + BIOS data (0x400 - 0x502)
336 and the ROM (0xa0000 - 0x100000)
340 * v86d: map the IVTBDA area as shared, see note in v86_mem.c for
341 * an explanation
343 if (!map_file((void *)0, 0x502,
344 PROT_READ | PROT_WRITE | PROT_EXEC,
345 MAP_FIXED | MAP_SHARED, "/dev/mem", 0)) {
346 real_mem_deinit();
347 return 0;
350 if (!map_file((void *)0xa0000, 0x20000,
351 PROT_READ | PROT_WRITE,
352 MAP_FIXED | MAP_SHARED, "/dev/mem", 0xa0000)) {
353 munmap((void *)0, 0x502);
354 real_mem_deinit();
355 return 0;
358 if (!map_file((void *)0xc0000, 0x30000,
359 PROT_READ | PROT_WRITE | PROT_EXEC,
360 MAP_FIXED | MAP_SHARED, "/dev/mem", 0xc0000)) {
361 munmap((void *)0, 0x502);
362 munmap((void *)0xa0000, 0x20000);
363 real_mem_deinit();
364 return 0;
368 Allocate a stack
370 m = LRMI_alloc_real(DEFAULT_STACK_SIZE);
372 context.stack_seg = (unsigned int)m >> 4;
373 context.stack_off = DEFAULT_STACK_SIZE;
376 Allocate the return to 32 bit routine
378 m = LRMI_alloc_real(2);
380 context.ret_seg = (unsigned int)m >> 4;
381 context.ret_off = (unsigned int)m & 0xf;
383 ((unsigned char *)m)[0] = 0xcd; /* int opcode */
384 ((unsigned char *)m)[1] = RETURN_TO_32_INT;
386 memset(&context.vm, 0, sizeof(context.vm));
389 Enable kernel emulation of all ints except RETURN_TO_32_INT
391 #if defined(__linux__)
392 memset(&context.vm.int_revectored, 0, sizeof(context.vm.int_revectored));
393 set_bit(RETURN_TO_32_INT, &context.vm.int_revectored);
394 #elif defined(__NetBSD__) || defined(__OpenBSD__)
395 set_bit(RETURN_TO_32_INT, &context.vm.int_byuser);
396 #elif defined(__FreeBSD__)
397 set_bit(RETURN_TO_32_INT, &context.vm.init.int_map);
398 #endif
400 context.ready = 1;
402 return 1;
406 static void
407 set_regs(struct LRMI_regs *r)
409 CONTEXT_REGS.REG(edi) = r->edi;
410 CONTEXT_REGS.REG(esi) = r->esi;
411 CONTEXT_REGS.REG(ebp) = r->ebp;
412 CONTEXT_REGS.REG(ebx) = r->ebx;
413 CONTEXT_REGS.REG(edx) = r->edx;
414 CONTEXT_REGS.REG(ecx) = r->ecx;
415 CONTEXT_REGS.REG(eax) = r->eax;
416 CONTEXT_REGS.REG(eflags) = DEFAULT_VM86_FLAGS;
417 CONTEXT_REGS.REG(es) = r->es;
418 CONTEXT_REGS.REG(ds) = r->ds;
419 CONTEXT_REGS.REG(fs) = r->fs;
420 CONTEXT_REGS.REG(gs) = r->gs;
424 static void
425 get_regs(struct LRMI_regs *r)
427 r->edi = CONTEXT_REGS.REG(edi);
428 r->esi = CONTEXT_REGS.REG(esi);
429 r->ebp = CONTEXT_REGS.REG(ebp);
430 r->ebx = CONTEXT_REGS.REG(ebx);
431 r->edx = CONTEXT_REGS.REG(edx);
432 r->ecx = CONTEXT_REGS.REG(ecx);
433 r->eax = CONTEXT_REGS.REG(eax);
434 r->flags = CONTEXT_REGS.REG(eflags);
435 r->es = CONTEXT_REGS.REG(es);
436 r->ds = CONTEXT_REGS.REG(ds);
437 r->fs = CONTEXT_REGS.REG(fs);
438 r->gs = CONTEXT_REGS.REG(gs);
441 #define DIRECTION_FLAG (1 << 10)
443 enum {
444 CSEG = 0x2e, SSEG = 0x36, DSEG = 0x3e,
445 ESEG = 0x26, FSEG = 0x64, GSEG = 0x65,
448 static void
449 em_ins(int size)
451 unsigned int edx, edi;
453 edx = CONTEXT_REGS.REG(edx) & 0xffff;
454 edi = CONTEXT_REGS.REG(edi) & 0xffff;
455 edi += (unsigned int)CONTEXT_REGS.REG(es) << 4;
457 if (CONTEXT_REGS.REG(eflags) & DIRECTION_FLAG) {
458 if (size == 4)
459 asm volatile ("std; insl; cld"
460 : "=D" (edi) : "d" (edx), "0" (edi));
461 else if (size == 2)
462 asm volatile ("std; insw; cld"
463 : "=D" (edi) : "d" (edx), "0" (edi));
464 else
465 asm volatile ("std; insb; cld"
466 : "=D" (edi) : "d" (edx), "0" (edi));
467 } else {
468 if (size == 4)
469 asm volatile ("cld; insl"
470 : "=D" (edi) : "d" (edx), "0" (edi));
471 else if (size == 2)
472 asm volatile ("cld; insw"
473 : "=D" (edi) : "d" (edx), "0" (edi));
474 else
475 asm volatile ("cld; insb"
476 : "=D" (edi) : "d" (edx), "0" (edi));
479 edi -= (unsigned int)CONTEXT_REGS.REG(es) << 4;
481 CONTEXT_REGS.REG(edi) &= 0xffff0000;
482 CONTEXT_REGS.REG(edi) |= edi & 0xffff;
485 static void
486 em_rep_ins(int size)
488 unsigned int cx;
490 cx = CONTEXT_REGS.REG(ecx) & 0xffff;
492 while (cx--)
493 em_ins(size);
495 CONTEXT_REGS.REG(ecx) &= 0xffff0000;
498 static void
499 em_outs(int size, int seg)
501 unsigned int edx, esi, base;
503 edx = CONTEXT_REGS.REG(edx) & 0xffff;
504 esi = CONTEXT_REGS.REG(esi) & 0xffff;
506 switch (seg) {
507 case CSEG: base = CONTEXT_REGS.REG(cs); break;
508 case SSEG: base = CONTEXT_REGS.REG(ss); break;
509 case ESEG: base = CONTEXT_REGS.REG(es); break;
510 case FSEG: base = CONTEXT_REGS.REG(fs); break;
511 case GSEG: base = CONTEXT_REGS.REG(gs); break;
512 default:
513 case DSEG: base = CONTEXT_REGS.REG(ds); break;
516 esi += base << 4;
518 if (CONTEXT_REGS.REG(eflags) & DIRECTION_FLAG) {
519 if (size == 4)
520 asm volatile ("std; outsl; cld"
521 : "=S" (esi) : "d" (edx), "0" (esi));
522 else if (size == 2)
523 asm volatile ("std; outsw; cld"
524 : "=S" (esi) : "d" (edx), "0" (esi));
525 else
526 asm volatile ("std; outsb; cld"
527 : "=S" (esi) : "d" (edx), "0" (esi));
528 } else {
529 if (size == 4)
530 asm volatile ("cld; outsl"
531 : "=S" (esi) : "d" (edx), "0" (esi));
532 else if (size == 2)
533 asm volatile ("cld; outsw"
534 : "=S" (esi) : "d" (edx), "0" (esi));
535 else
536 asm volatile ("cld; outsb"
537 : "=S" (esi) : "d" (edx), "0" (esi));
540 esi -= base << 4;
542 CONTEXT_REGS.REG(esi) &= 0xffff0000;
543 CONTEXT_REGS.REG(esi) |= esi & 0xffff;
546 static void
547 em_rep_outs(int size, int seg)
549 unsigned int cx;
551 cx = CONTEXT_REGS.REG(ecx) & 0xffff;
553 while (cx--)
554 em_outs(size, seg);
556 CONTEXT_REGS.REG(ecx) &= 0xffff0000;
559 static void
560 em_inbl(unsigned char literal)
562 asm volatile ("inb %w1, %b0"
563 : "=a" (CONTEXT_REGS.REG(eax))
564 : "d" (literal), "0" (CONTEXT_REGS.REG(eax)));
567 static void
568 em_inb(void)
570 asm volatile ("inb %w1, %b0"
571 : "=a" (CONTEXT_REGS.REG(eax))
572 : "d" (CONTEXT_REGS.REG(edx)), "0" (CONTEXT_REGS.REG(eax)));
575 static void
576 em_inw(void)
578 asm volatile ("inw %w1, %w0"
579 : "=a" (CONTEXT_REGS.REG(eax))
580 : "d" (CONTEXT_REGS.REG(edx)), "0" (CONTEXT_REGS.REG(eax)));
583 static void
584 em_inl(void)
586 asm volatile ("inl %w1, %0"
587 : "=a" (CONTEXT_REGS.REG(eax))
588 : "d" (CONTEXT_REGS.REG(edx)));
591 static void
592 em_outbl(unsigned char literal)
594 asm volatile ("outb %b0, %w1"
595 : : "a" (CONTEXT_REGS.REG(eax)),
596 "d" (literal));
599 static void
600 em_outb(void)
602 asm volatile ("outb %b0, %w1"
603 : : "a" (CONTEXT_REGS.REG(eax)),
604 "d" (CONTEXT_REGS.REG(edx)));
607 static void
608 em_outw(void)
610 asm volatile ("outw %w0, %w1"
611 : : "a" (CONTEXT_REGS.REG(eax)),
612 "d" (CONTEXT_REGS.REG(edx)));
615 static void
616 em_outl(void)
618 asm volatile ("outl %0, %w1"
619 : : "a" (CONTEXT_REGS.REG(eax)),
620 "d" (CONTEXT_REGS.REG(edx)));
623 static int
624 emulate(void)
626 unsigned char *insn;
627 struct {
628 unsigned char seg;
629 unsigned int size : 1;
630 unsigned int rep : 1;
631 } prefix = { DSEG, 0, 0 };
632 int i = 0;
634 insn = (unsigned char *)((unsigned int)CONTEXT_REGS.REG(cs) << 4);
635 insn += CONTEXT_REGS.REG(eip);
637 while (1) {
638 if (insn[i] == 0x66) {
639 prefix.size = 1 - prefix.size;
640 i++;
641 } else if (insn[i] == 0xf3) {
642 prefix.rep = 1;
643 i++;
644 } else if (insn[i] == CSEG || insn[i] == SSEG
645 || insn[i] == DSEG || insn[i] == ESEG
646 || insn[i] == FSEG || insn[i] == GSEG) {
647 prefix.seg = insn[i];
648 i++;
649 } else if (insn[i] == 0xf0 || insn[i] == 0xf2
650 || insn[i] == 0x67) {
651 /* these prefixes are just ignored */
652 i++;
653 } else if (insn[i] == 0x6c) {
654 if (prefix.rep)
655 em_rep_ins(1);
656 else
657 em_ins(1);
658 i++;
659 break;
660 } else if (insn[i] == 0x6d) {
661 if (prefix.rep) {
662 if (prefix.size)
663 em_rep_ins(4);
664 else
665 em_rep_ins(2);
666 } else {
667 if (prefix.size)
668 em_ins(4);
669 else
670 em_ins(2);
672 i++;
673 break;
674 } else if (insn[i] == 0x6e) {
675 if (prefix.rep)
676 em_rep_outs(1, prefix.seg);
677 else
678 em_outs(1, prefix.seg);
679 i++;
680 break;
681 } else if (insn[i] == 0x6f) {
682 if (prefix.rep) {
683 if (prefix.size)
684 em_rep_outs(4, prefix.seg);
685 else
686 em_rep_outs(2, prefix.seg);
687 } else {
688 if (prefix.size)
689 em_outs(4, prefix.seg);
690 else
691 em_outs(2, prefix.seg);
693 i++;
694 break;
695 } else if (insn[i] == 0xe4) {
696 em_inbl(insn[i + 1]);
697 i += 2;
698 break;
699 } else if (insn[i] == 0xec) {
700 em_inb();
701 i++;
702 break;
703 } else if (insn[i] == 0xed) {
704 if (prefix.size)
705 em_inl();
706 else
707 em_inw();
708 i++;
709 break;
710 } else if (insn[i] == 0xe6) {
711 em_outbl(insn[i + 1]);
712 i += 2;
713 break;
714 } else if (insn[i] == 0xee) {
715 em_outb();
716 i++;
717 break;
718 } else if (insn[i] == 0xef) {
719 if (prefix.size)
720 em_outl();
721 else
722 em_outw();
724 i++;
725 break;
726 } else
727 return 0;
730 CONTEXT_REGS.REG(eip) += i;
731 return 1;
735 #if defined(__linux__)
737 I don't know how to make sure I get the right vm86() from libc.
738 The one I want is syscall # 113 (vm86old() in libc 5, vm86() in glibc)
739 which should be declared as "int vm86(struct vm86_struct *);" in
740 <sys/vm86.h>.
742 This just does syscall 113 with inline asm, which should work
743 for both libc's (I hope).
745 #if !defined(USE_LIBC_VM86)
746 static int
747 lrmi_vm86(struct vm86_struct *vm)
749 int r;
750 #ifdef __PIC__
751 asm volatile (
752 "pushl %%ebx\n\t"
753 "movl %2, %%ebx\n\t"
754 "int $0x80\n\t"
755 "popl %%ebx"
756 : "=a" (r)
757 : "0" (113), "r" (vm));
758 #else
759 asm volatile (
760 "int $0x80"
761 : "=a" (r)
762 : "0" (113), "b" (vm));
763 #endif
764 return r;
766 #else
767 #define lrmi_vm86 vm86
768 #endif
769 #endif /* __linux__ */
772 static void
773 debug_info(int vret)
775 #ifdef LRMI_DEBUG
776 int i;
777 unsigned char *p;
779 fputs("vm86() failed\n", stderr);
780 fprintf(stderr, "return = 0x%x\n", vret);
781 fprintf(stderr, "eax = 0x%08x\n", CONTEXT_REGS.REG(eax));
782 fprintf(stderr, "ebx = 0x%08x\n", CONTEXT_REGS.REG(ebx));
783 fprintf(stderr, "ecx = 0x%08x\n", CONTEXT_REGS.REG(ecx));
784 fprintf(stderr, "edx = 0x%08x\n", CONTEXT_REGS.REG(edx));
785 fprintf(stderr, "esi = 0x%08x\n", CONTEXT_REGS.REG(esi));
786 fprintf(stderr, "edi = 0x%08x\n", CONTEXT_REGS.REG(edi));
787 fprintf(stderr, "ebp = 0x%08x\n", CONTEXT_REGS.REG(ebp));
788 fprintf(stderr, "eip = 0x%08x\n", CONTEXT_REGS.REG(eip));
789 fprintf(stderr, "cs = 0x%04x\n", CONTEXT_REGS.REG(cs));
790 fprintf(stderr, "esp = 0x%08x\n", CONTEXT_REGS.REG(esp));
791 fprintf(stderr, "ss = 0x%04x\n", CONTEXT_REGS.REG(ss));
792 fprintf(stderr, "ds = 0x%04x\n", CONTEXT_REGS.REG(ds));
793 fprintf(stderr, "es = 0x%04x\n", CONTEXT_REGS.REG(es));
794 fprintf(stderr, "fs = 0x%04x\n", CONTEXT_REGS.REG(fs));
795 fprintf(stderr, "gs = 0x%04x\n", CONTEXT_REGS.REG(gs));
796 fprintf(stderr, "eflags = 0x%08x\n", CONTEXT_REGS.REG(eflags));
798 fputs("cs:ip = [ ", stderr);
800 p = (unsigned char *)((CONTEXT_REGS.REG(cs) << 4) + (CONTEXT_REGS.REG(eip) & 0xffff));
802 for (i = 0; i < 16; ++i)
803 fprintf(stderr, "%02x ", (unsigned int)p[i]);
805 fputs("]\n", stderr);
806 #endif
810 #if defined(__linux__)
811 static int
812 run_vm86(void)
814 unsigned int vret;
815 sigset_t all_sigs, old_sigs;
816 unsigned long old_gs, old_fs;
818 while (1) {
819 // FIXME: may apply this to BSD equivalents?
820 sigfillset(&all_sigs);
821 sigprocmask(SIG_SETMASK, &all_sigs, &old_sigs);
822 asm volatile ("mov %%gs, %0" : "=rm" (old_gs));
823 asm volatile ("mov %%fs, %0" : "=rm" (old_fs));
824 vret = lrmi_vm86(&context.vm);
825 asm volatile ("mov %0, %%gs" :: "rm" (old_gs));
826 asm volatile ("mov %0, %%fs" :: "rm" (old_fs));
827 sigprocmask(SIG_SETMASK, &old_sigs, NULL);
829 if (VM86_TYPE(vret) == VM86_INTx) {
830 unsigned int v = VM86_ARG(vret);
832 if (v == RETURN_TO_32_INT)
833 return 1;
835 pushw(CONTEXT_REGS.REG(eflags));
836 pushw(CONTEXT_REGS.REG(cs));
837 pushw(CONTEXT_REGS.REG(eip));
839 CONTEXT_REGS.REG(cs) = get_int_seg(v);
840 CONTEXT_REGS.REG(eip) = get_int_off(v);
841 CONTEXT_REGS.REG(eflags) &= ~(VIF_MASK | TF_MASK);
843 continue;
846 if (VM86_TYPE(vret) != VM86_UNKNOWN)
847 break;
849 if (!emulate())
850 break;
853 debug_info(vret);
855 return 0;
857 #elif defined(__NetBSD__) || defined(__FreeBSD__) || defined(__OpenBSD__)
858 #if defined(__NetBSD__) || defined(__OpenBSD__)
859 static void
860 vm86_callback(int sig, int code, struct sigcontext *sc)
862 /* Sync our context with what the kernel develivered to us. */
863 memcpy(&CONTEXT_REGS, sc, sizeof(*sc));
865 switch (VM86_TYPE(code)) {
866 case VM86_INTx:
868 unsigned int v = VM86_ARG(code);
870 if (v == RETURN_TO_32_INT) {
871 context.success = 1;
872 longjmp(context.env, 1);
875 pushw(CONTEXT_REGS.REG(eflags));
876 pushw(CONTEXT_REGS.REG(cs));
877 pushw(CONTEXT_REGS.REG(eip));
879 CONTEXT_REGS.REG(cs) = get_int_seg(v);
880 CONTEXT_REGS.REG(eip) = get_int_off(v);
881 CONTEXT_REGS.REG(eflags) &= ~(VIF_MASK | TF_MASK);
883 break;
886 case VM86_UNKNOWN:
887 if (emulate() == 0) {
888 context.success = 0;
889 context.vret = code;
890 longjmp(context.env, 1);
892 break;
894 default:
895 context.success = 0;
896 context.vret = code;
897 longjmp(context.env, 1);
898 return;
901 /* ...and sync our context back to the kernel. */
902 memcpy(sc, &CONTEXT_REGS, sizeof(*sc));
904 #elif defined(__FreeBSD__)
905 static void
906 vm86_callback(int sig, int code, struct sigcontext *sc)
908 unsigned char *addr;
910 /* Sync our context with what the kernel develivered to us. */
911 memcpy(&CONTEXT_REGS, sc, sizeof(*sc));
913 if (code) {
914 /* XXX probably need to call original signal handler here */
915 context.success = 0;
916 context.vret = code;
917 longjmp(context.env, 1);
920 addr = (unsigned char *)((CONTEXT_REGS.REG(cs) << 4) +
921 CONTEXT_REGS.REG(eip));
923 if (addr[0] == 0xcd) { /* int opcode */
924 if (addr[1] == RETURN_TO_32_INT) {
925 context.success = 1;
926 longjmp(context.env, 1);
929 pushw(CONTEXT_REGS.REG(eflags));
930 pushw(CONTEXT_REGS.REG(cs));
931 pushw(CONTEXT_REGS.REG(eip));
933 CONTEXT_REGS.REG(cs) = get_int_seg(addr[1]);
934 CONTEXT_REGS.REG(eip) = get_int_off(addr[1]);
935 CONTEXT_REGS.REG(eflags) &= ~(VIF_MASK | TF_MASK);
936 } else {
937 if (emulate() == 0) {
938 context.success = 0;
939 longjmp(context.env, 1);
943 /* ...and sync our context back to the kernel. */
944 memcpy(sc, &CONTEXT_REGS, sizeof(*sc));
946 #endif /* __FreeBSD__ */
948 static int
949 run_vm86(void)
951 if (context.old_sighandler) {
952 #ifdef LRMI_DEBUG
953 fprintf(stderr, "run_vm86: callback already installed\n");
954 #endif
955 return (0);
958 #if defined(__NetBSD__) || defined(__OpenBSD__)
959 context.old_sighandler = signal(SIGURG, (void (*)(int))vm86_callback);
960 #elif defined(__FreeBSD__)
961 context.old_sighandler = signal(SIGBUS, (void (*)(int))vm86_callback);
962 #endif
964 if (context.old_sighandler == (void *)-1) {
965 context.old_sighandler = NULL;
966 #ifdef LRMI_DEBUG
967 fprintf(stderr, "run_vm86: cannot install callback\n");
968 #endif
969 return (0);
972 if (setjmp(context.env)) {
973 #if defined(__NetBSD__) || defined(__OpenBSD__)
974 (void) signal(SIGURG, context.old_sighandler);
975 #elif defined(__FreeBSD__)
976 (void) signal(SIGBUS, context.old_sighandler);
977 #endif
978 context.old_sighandler = NULL;
980 if (context.success)
981 return (1);
982 debug_info(context.vret);
983 return (0);
986 #if defined(__NetBSD__) || defined(__OpenBSD__)
987 if (i386_vm86(&context.vm) == -1)
988 return (0);
989 #elif defined(__FreeBSD__)
990 if (i386_vm86(VM86_INIT, &context.vm.init))
991 return 0;
993 CONTEXT_REGS.REG(eflags) |= PSL_VM | PSL_VIF;
994 sigreturn(&context.vm.uc);
995 #endif /* __FreeBSD__ */
997 /* NOTREACHED */
998 return (0);
1000 #endif /* __NetBSD__ || __FreeBSD__ || __OpenBSD__ */
1003 LRMI_call(struct LRMI_regs *r)
1005 unsigned int vret;
1007 memset(&CONTEXT_REGS, 0, sizeof(CONTEXT_REGS));
1009 set_regs(r);
1011 CONTEXT_REGS.REG(cs) = r->cs;
1012 CONTEXT_REGS.REG(eip) = r->ip;
1014 if (r->ss == 0 && r->sp == 0) {
1015 CONTEXT_REGS.REG(ss) = context.stack_seg;
1016 CONTEXT_REGS.REG(esp) = context.stack_off;
1017 } else {
1018 CONTEXT_REGS.REG(ss) = r->ss;
1019 CONTEXT_REGS.REG(esp) = r->sp;
1022 pushw(context.ret_seg);
1023 pushw(context.ret_off);
1025 vret = run_vm86();
1027 get_regs(r);
1029 return vret;
1034 LRMI_int(int i, struct LRMI_regs *r)
1036 unsigned int vret;
1037 unsigned int seg, off;
1039 seg = get_int_seg(i);
1040 off = get_int_off(i);
1043 If the interrupt is in regular memory, it's probably
1044 still pointing at a dos TSR (which is now gone).
1046 if (seg < 0xa000 || (seg << 4) + off >= 0x100000) {
1047 #ifdef LRMI_DEBUG
1048 fprintf(stderr, "Int 0x%x is not in rom (%04x:%04x)\n", i, seg, off);
1049 #endif
1050 return 0;
1053 memset(&CONTEXT_REGS, 0, sizeof(CONTEXT_REGS));
1055 set_regs(r);
1057 CONTEXT_REGS.REG(cs) = seg;
1058 CONTEXT_REGS.REG(eip) = off;
1060 if (r->ss == 0 && r->sp == 0) {
1061 CONTEXT_REGS.REG(ss) = context.stack_seg;
1062 CONTEXT_REGS.REG(esp) = context.stack_off;
1063 } else {
1064 CONTEXT_REGS.REG(ss) = r->ss;
1065 CONTEXT_REGS.REG(esp) = r->sp;
1068 pushw(DEFAULT_VM86_FLAGS);
1069 pushw(context.ret_seg);
1070 pushw(context.ret_off);
1072 vret = run_vm86();
1074 get_regs(r);
1076 return vret;
1079 #else /* (__linux__ || __NetBSD__ || __FreeBSD__ || __OpenBSD__) && __i386__ */
1080 #warning "LRMI is not supported on your system!"
1081 #endif