Fix email address
[mplayer/greg.git] / osdep / lrmi.c
blob0487bba9681f7068633f906b78707418ccad0c2e
1 /*
2 Linux Real Mode Interface - A library of DPMI-like functions for Linux.
4 Copyright (C) 1998 by Josh Vanderhoof
6 You are free to distribute and modify this file, as long as you
7 do not remove this copyright notice and clearly label modified
8 versions as being modified.
10 This software has NO WARRANTY. Use it at your own risk.
11 Original location: http://cvs.debian.org/lrmi/
13 Modified for use with MPlayer, see the CVS log for details.
16 #include "../config.h"
17 #ifdef HAVE_VESA
19 #include <signal.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <sys/io.h>
23 #include <asm/vm86.h>
25 #ifdef USE_LIBC_VM86
26 #include <sys/vm86.h>
27 #endif
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <sys/mman.h>
32 #include <unistd.h>
33 #include <fcntl.h>
35 #include "lrmi.h"
37 #define REAL_MEM_BASE ((void *)0x10000)
38 #define REAL_MEM_SIZE 0x10000
39 #define REAL_MEM_BLOCKS 0x100
41 struct mem_block
43 unsigned int size : 20;
44 unsigned int free : 1;
47 static struct
49 int ready;
50 int count;
51 struct mem_block blocks[REAL_MEM_BLOCKS];
52 } mem_info = { .ready = 0, };
54 static int
55 real_mem_init(void)
57 void *m;
58 int fd_zero;
60 if (mem_info.ready)
61 return 1;
63 fd_zero = open("/dev/zero", O_RDONLY);
64 if (fd_zero == -1)
66 perror("open /dev/zero");
67 return 0;
70 m = mmap((void *)REAL_MEM_BASE, REAL_MEM_SIZE,
71 PROT_READ | PROT_WRITE | PROT_EXEC,
72 MAP_FIXED | MAP_PRIVATE, fd_zero, 0);
74 if (m == (void *)-1)
76 perror("mmap /dev/zero");
77 close(fd_zero);
78 return 0;
81 mem_info.ready = 1;
82 mem_info.count = 1;
83 mem_info.blocks[0].size = REAL_MEM_SIZE;
84 mem_info.blocks[0].free = 1;
86 return 1;
90 static void
91 insert_block(int i)
93 memmove(
94 mem_info.blocks + i + 1,
95 mem_info.blocks + i,
96 (mem_info.count - i) * sizeof(struct mem_block));
98 mem_info.count++;
101 static void
102 delete_block(int i)
104 mem_info.count--;
106 memmove(
107 mem_info.blocks + i,
108 mem_info.blocks + i + 1,
109 (mem_info.count - i) * sizeof(struct mem_block));
112 void *
113 LRMI_alloc_real(int size)
115 int i;
116 char *r = (char *)REAL_MEM_BASE;
118 if (!mem_info.ready)
119 return NULL;
121 if (mem_info.count == REAL_MEM_BLOCKS)
122 return NULL;
124 size = (size + 15) & ~15;
126 for (i = 0; i < mem_info.count; i++)
128 if (mem_info.blocks[i].free && size < mem_info.blocks[i].size)
130 insert_block(i);
132 mem_info.blocks[i].size = size;
133 mem_info.blocks[i].free = 0;
134 mem_info.blocks[i + 1].size -= size;
136 return (void *)r;
139 r += mem_info.blocks[i].size;
142 return NULL;
146 void
147 LRMI_free_real(void *m)
149 int i;
150 char *r = (char *)REAL_MEM_BASE;
152 if (!mem_info.ready)
153 return;
155 i = 0;
156 while (m != (void *)r)
158 r += mem_info.blocks[i].size;
159 i++;
160 if (i == mem_info.count)
161 return;
164 mem_info.blocks[i].free = 1;
166 if (i + 1 < mem_info.count && mem_info.blocks[i + 1].free)
168 mem_info.blocks[i].size += mem_info.blocks[i + 1].size;
169 delete_block(i + 1);
172 if (i - 1 >= 0 && mem_info.blocks[i - 1].free)
174 mem_info.blocks[i - 1].size += mem_info.blocks[i].size;
175 delete_block(i);
180 #define DEFAULT_VM86_FLAGS (IF_MASK | IOPL_MASK)
181 #define DEFAULT_STACK_SIZE 0x1000
182 #define RETURN_TO_32_INT 255
184 static struct
186 int ready;
187 unsigned short ret_seg, ret_off;
188 unsigned short stack_seg, stack_off;
189 struct vm86_struct vm;
190 } context = { .ready = 0, };
193 static inline void
194 set_bit(unsigned int bit, void *array)
196 unsigned char *a = array;
198 a[bit / 8] |= (1 << (bit % 8));
202 static inline unsigned int
203 get_int_seg(int i)
205 return *(unsigned short *)(i * 4 + 2);
209 static inline unsigned int
210 get_int_off(int i)
212 return *(unsigned short *)(i * 4);
216 static inline void
217 pushw(unsigned short i)
219 struct vm86_regs *r = &context.vm.regs;
220 r->esp -= 2;
221 *(unsigned short *)(((unsigned int)r->ss << 4) + r->esp) = i;
226 LRMI_init(void)
228 void *m;
229 int fd_mem;
231 if (context.ready)
232 return 1;
234 if (!real_mem_init())
235 return 0;
238 Map the Interrupt Vectors (0x0 - 0x400) + BIOS data (0x400 - 0x502)
239 and the ROM (0xa0000 - 0x100000)
241 fd_mem = open("/dev/mem", O_RDWR);
243 if (fd_mem == -1)
245 perror("open /dev/mem");
246 return 0;
249 m = mmap((void *)0, 0x502,
250 PROT_READ | PROT_WRITE | PROT_EXEC,
251 MAP_FIXED | MAP_PRIVATE, fd_mem, 0);
253 if (m == (void *)-1)
255 perror("mmap /dev/mem");
256 return 0;
259 m = mmap((void *)0xa0000, 0x100000 - 0xa0000,
260 PROT_READ | PROT_WRITE,
261 MAP_FIXED | MAP_SHARED, fd_mem, 0xa0000);
263 if (m == (void *)-1)
265 perror("mmap /dev/mem");
266 return 0;
271 Allocate a stack
273 m = LRMI_alloc_real(DEFAULT_STACK_SIZE);
275 context.stack_seg = (unsigned int)m >> 4;
276 context.stack_off = DEFAULT_STACK_SIZE;
279 Allocate the return to 32 bit routine
281 m = LRMI_alloc_real(2);
283 context.ret_seg = (unsigned int)m >> 4;
284 context.ret_off = (unsigned int)m & 0xf;
286 ((unsigned char *)m)[0] = 0xcd; /* int opcode */
287 ((unsigned char *)m)[1] = RETURN_TO_32_INT;
289 memset(&context.vm, 0, sizeof(context.vm));
292 Enable kernel emulation of all ints except RETURN_TO_32_INT
294 memset(&context.vm.int_revectored, 0, sizeof(context.vm.int_revectored));
295 set_bit(RETURN_TO_32_INT, &context.vm.int_revectored);
297 context.ready = 1;
299 return 1;
303 static void
304 set_regs(struct LRMI_regs *r)
306 context.vm.regs.edi = r->edi;
307 context.vm.regs.esi = r->esi;
308 context.vm.regs.ebp = r->ebp;
309 context.vm.regs.ebx = r->ebx;
310 context.vm.regs.edx = r->edx;
311 context.vm.regs.ecx = r->ecx;
312 context.vm.regs.eax = r->eax;
313 context.vm.regs.eflags = DEFAULT_VM86_FLAGS;
314 context.vm.regs.es = r->es;
315 context.vm.regs.ds = r->ds;
316 context.vm.regs.fs = r->fs;
317 context.vm.regs.gs = r->gs;
321 static void
322 get_regs(struct LRMI_regs *r)
324 r->edi = context.vm.regs.edi;
325 r->esi = context.vm.regs.esi;
326 r->ebp = context.vm.regs.ebp;
327 r->ebx = context.vm.regs.ebx;
328 r->edx = context.vm.regs.edx;
329 r->ecx = context.vm.regs.ecx;
330 r->eax = context.vm.regs.eax;
331 r->flags = context.vm.regs.eflags;
332 r->es = context.vm.regs.es;
333 r->ds = context.vm.regs.ds;
334 r->fs = context.vm.regs.fs;
335 r->gs = context.vm.regs.gs;
338 #define DIRECTION_FLAG (1 << 10)
340 static void
341 em_ins(int size)
343 unsigned int edx, edi;
345 edx = context.vm.regs.edx & 0xffff;
346 edi = context.vm.regs.edi & 0xffff;
347 edi += (unsigned int)context.vm.regs.ds << 4;
349 if (context.vm.regs.eflags & DIRECTION_FLAG)
351 if (size == 4)
352 asm volatile ("std; insl; cld"
353 : "=D" (edi) : "d" (edx), "0" (edi));
354 else if (size == 2)
355 asm volatile ("std; insw; cld"
356 : "=D" (edi) : "d" (edx), "0" (edi));
357 else
358 asm volatile ("std; insb; cld"
359 : "=D" (edi) : "d" (edx), "0" (edi));
361 else
363 if (size == 4)
364 asm volatile ("cld; insl"
365 : "=D" (edi) : "d" (edx), "0" (edi));
366 else if (size == 2)
367 asm volatile ("cld; insw"
368 : "=D" (edi) : "d" (edx), "0" (edi));
369 else
370 asm volatile ("cld; insb"
371 : "=D" (edi) : "d" (edx), "0" (edi));
374 edi -= (unsigned int)context.vm.regs.ds << 4;
376 context.vm.regs.edi &= 0xffff0000;
377 context.vm.regs.edi |= edi & 0xffff;
380 static void
381 em_rep_ins(int size)
383 unsigned int ecx, edx, edi;
385 ecx = context.vm.regs.ecx & 0xffff;
386 edx = context.vm.regs.edx & 0xffff;
387 edi = context.vm.regs.edi & 0xffff;
388 edi += (unsigned int)context.vm.regs.ds << 4;
390 if (context.vm.regs.eflags & DIRECTION_FLAG)
392 if (size == 4)
393 asm volatile ("std; rep; insl; cld"
394 : "=D" (edi), "=c" (ecx)
395 : "d" (edx), "0" (edi), "1" (ecx));
396 else if (size == 2)
397 asm volatile ("std; rep; insw; cld"
398 : "=D" (edi), "=c" (ecx)
399 : "d" (edx), "0" (edi), "1" (ecx));
400 else
401 asm volatile ("std; rep; insb; cld"
402 : "=D" (edi), "=c" (ecx)
403 : "d" (edx), "0" (edi), "1" (ecx));
405 else
407 if (size == 4)
408 asm volatile ("cld; rep; insl"
409 : "=D" (edi), "=c" (ecx)
410 : "d" (edx), "0" (edi), "1" (ecx));
411 else if (size == 2)
412 asm volatile ("cld; rep; insw"
413 : "=D" (edi), "=c" (ecx)
414 : "d" (edx), "0" (edi), "1" (ecx));
415 else
416 asm volatile ("cld; rep; insb"
417 : "=D" (edi), "=c" (ecx)
418 : "d" (edx), "0" (edi), "1" (ecx));
421 edi -= (unsigned int)context.vm.regs.ds << 4;
423 context.vm.regs.edi &= 0xffff0000;
424 context.vm.regs.edi |= edi & 0xffff;
426 context.vm.regs.ecx &= 0xffff0000;
427 context.vm.regs.ecx |= ecx & 0xffff;
430 static void
431 em_outs(int size)
433 unsigned int edx, esi;
435 edx = context.vm.regs.edx & 0xffff;
436 esi = context.vm.regs.esi & 0xffff;
437 esi += (unsigned int)context.vm.regs.ds << 4;
439 if (context.vm.regs.eflags & DIRECTION_FLAG)
441 if (size == 4)
442 asm volatile ("std; outsl; cld"
443 : "=S" (esi) : "d" (edx), "0" (esi));
444 else if (size == 2)
445 asm volatile ("std; outsw; cld"
446 : "=S" (esi) : "d" (edx), "0" (esi));
447 else
448 asm volatile ("std; outsb; cld"
449 : "=S" (esi) : "d" (edx), "0" (esi));
451 else
453 if (size == 4)
454 asm volatile ("cld; outsl"
455 : "=S" (esi) : "d" (edx), "0" (esi));
456 else if (size == 2)
457 asm volatile ("cld; outsw"
458 : "=S" (esi) : "d" (edx), "0" (esi));
459 else
460 asm volatile ("cld; outsb"
461 : "=S" (esi) : "d" (edx), "0" (esi));
464 esi -= (unsigned int)context.vm.regs.ds << 4;
466 context.vm.regs.esi &= 0xffff0000;
467 context.vm.regs.esi |= esi & 0xffff;
470 static void
471 em_rep_outs(int size)
473 unsigned int ecx, edx, esi;
475 ecx = context.vm.regs.ecx & 0xffff;
476 edx = context.vm.regs.edx & 0xffff;
477 esi = context.vm.regs.esi & 0xffff;
478 esi += (unsigned int)context.vm.regs.ds << 4;
480 if (context.vm.regs.eflags & DIRECTION_FLAG)
482 if (size == 4)
483 asm volatile ("std; rep; outsl; cld"
484 : "=S" (esi), "=c" (ecx)
485 : "d" (edx), "0" (esi), "1" (ecx));
486 else if (size == 2)
487 asm volatile ("std; rep; outsw; cld"
488 : "=S" (esi), "=c" (ecx)
489 : "d" (edx), "0" (esi), "1" (ecx));
490 else
491 asm volatile ("std; rep; outsb; cld"
492 : "=S" (esi), "=c" (ecx)
493 : "d" (edx), "0" (esi), "1" (ecx));
495 else
497 if (size == 4)
498 asm volatile ("cld; rep; outsl"
499 : "=S" (esi), "=c" (ecx)
500 : "d" (edx), "0" (esi), "1" (ecx));
501 else if (size == 2)
502 asm volatile ("cld; rep; outsw"
503 : "=S" (esi), "=c" (ecx)
504 : "d" (edx), "0" (esi), "1" (ecx));
505 else
506 asm volatile ("cld; rep; outsb"
507 : "=S" (esi), "=c" (ecx)
508 : "d" (edx), "0" (esi), "1" (ecx));
511 esi -= (unsigned int)context.vm.regs.ds << 4;
513 context.vm.regs.esi &= 0xffff0000;
514 context.vm.regs.esi |= esi & 0xffff;
516 context.vm.regs.ecx &= 0xffff0000;
517 context.vm.regs.ecx |= ecx & 0xffff;
520 static void
521 em_inbl(unsigned char literal)
523 context.vm.regs.eax = inb(literal) & 0xff;
526 static void
527 em_inb(void)
529 asm volatile ("inb (%w1), %b0"
530 : "=a" (context.vm.regs.eax)
531 : "d" (context.vm.regs.edx), "0" (context.vm.regs.eax));
534 static void
535 em_inw(void)
537 asm volatile ("inw (%w1), %w0"
538 : "=a" (context.vm.regs.eax)
539 : "d" (context.vm.regs.edx), "0" (context.vm.regs.eax));
542 static void
543 em_inl(void)
545 asm volatile ("inl (%w1), %0"
546 : "=a" (context.vm.regs.eax)
547 : "d" (context.vm.regs.edx));
550 static void
551 em_outbl(unsigned char literal)
553 outb(context.vm.regs.eax & 0xff, literal);
556 static void
557 em_outb(void)
559 asm volatile ("outb %b0, (%w1)"
560 : : "a" (context.vm.regs.eax),
561 "d" (context.vm.regs.edx));
564 static void
565 em_outw(void)
567 asm volatile ("outw %w0, (%w1)"
568 : : "a" (context.vm.regs.eax),
569 "d" (context.vm.regs.edx));
572 static void
573 em_outl(void)
575 asm volatile ("outl %0, (%w1)"
576 : : "a" (context.vm.regs.eax),
577 "d" (context.vm.regs.edx));
580 static int
581 emulate(void)
583 unsigned char *insn;
584 struct
586 unsigned int size : 1;
587 unsigned int rep : 1;
588 } prefix = { 0, 0 };
589 int i = 0;
591 insn = (unsigned char *)((unsigned int)context.vm.regs.cs << 4);
592 insn += context.vm.regs.eip;
594 while (1)
596 if (insn[i] == 0x66)
598 prefix.size = 1 - prefix.size;
599 i++;
601 else if (insn[i] == 0xf3)
603 prefix.rep = 1;
604 i++;
606 else if (insn[i] == 0xf0 || insn[i] == 0xf2
607 || insn[i] == 0x26 || insn[i] == 0x2e
608 || insn[i] == 0x36 || insn[i] == 0x3e
609 || insn[i] == 0x64 || insn[i] == 0x65
610 || insn[i] == 0x67)
612 /* these prefixes are just ignored */
613 i++;
615 else if (insn[i] == 0x6c)
617 if (prefix.rep)
618 em_rep_ins(1);
619 else
620 em_ins(1);
621 i++;
622 break;
624 else if (insn[i] == 0x6d)
626 if (prefix.rep)
628 if (prefix.size)
629 em_rep_ins(4);
630 else
631 em_rep_ins(2);
633 else
635 if (prefix.size)
636 em_ins(4);
637 else
638 em_ins(2);
640 i++;
641 break;
643 else if (insn[i] == 0x6e)
645 if (prefix.rep)
646 em_rep_outs(1);
647 else
648 em_outs(1);
649 i++;
650 break;
652 else if (insn[i] == 0x6f)
654 if (prefix.rep)
656 if (prefix.size)
657 em_rep_outs(4);
658 else
659 em_rep_outs(2);
661 else
663 if (prefix.size)
664 em_outs(4);
665 else
666 em_outs(2);
668 i++;
669 break;
671 else if (insn[i] == 0xe4)
673 em_inbl(insn[i + 1]);
674 i += 2;
675 break;
677 else if (insn[i] == 0xe6)
679 em_outbl(insn[i + 1]);
680 i += 2;
681 break;
683 else if (insn[i] == 0xec)
685 em_inb();
686 i++;
687 break;
689 else if (insn[i] == 0xed)
691 if (prefix.size)
692 em_inl();
693 else
694 em_inw();
695 i++;
696 break;
698 else if (insn[i] == 0xee)
700 em_outb();
701 i++;
702 break;
704 else if (insn[i] == 0xef)
706 if (prefix.size)
707 em_outl();
708 else
709 em_outw();
711 i++;
712 break;
714 else
715 return 0;
718 context.vm.regs.eip += i;
719 return 1;
724 I don't know how to make sure I get the right vm86() from libc.
725 The one I want is syscall # 113 (vm86old() in libc 5, vm86() in glibc)
726 which should be declared as "int vm86(struct vm86_struct *);" in
727 <sys/vm86.h>.
729 This just does syscall 113 with inline asm, which should work
730 for both libc's (I hope).
732 #if !defined(USE_LIBC_VM86)
733 static int
734 lrmi_vm86(struct vm86_struct *vm)
736 int r;
737 #ifdef __PIC__
738 asm volatile (
739 "pushl %%ebx\n\t"
740 "movl %2, %%ebx\n\t"
741 "int $0x80\n\t"
742 "popl %%ebx"
743 : "=a" (r)
744 : "0" (113), "r" (vm));
745 #else
746 asm volatile (
747 "int $0x80"
748 : "=a" (r)
749 : "0" (113), "b" (vm));
750 #endif
751 return r;
753 #else
754 #define lrmi_vm86 vm86
755 #endif
758 static void
759 debug_info(int vret)
761 int i;
762 unsigned char *p;
764 fputs("vm86() failed\n", stderr);
765 fprintf(stderr, "return = 0x%x\n", vret);
766 fprintf(stderr, "eax = 0x%08lx\n", context.vm.regs.eax);
767 fprintf(stderr, "ebx = 0x%08lx\n", context.vm.regs.ebx);
768 fprintf(stderr, "ecx = 0x%08lx\n", context.vm.regs.ecx);
769 fprintf(stderr, "edx = 0x%08lx\n", context.vm.regs.edx);
770 fprintf(stderr, "esi = 0x%08lx\n", context.vm.regs.esi);
771 fprintf(stderr, "edi = 0x%08lx\n", context.vm.regs.edi);
772 fprintf(stderr, "ebp = 0x%08lx\n", context.vm.regs.ebp);
773 fprintf(stderr, "eip = 0x%08lx\n", context.vm.regs.eip);
774 fprintf(stderr, "cs = 0x%04x\n", context.vm.regs.cs);
775 fprintf(stderr, "esp = 0x%08lx\n", context.vm.regs.esp);
776 fprintf(stderr, "ss = 0x%04x\n", context.vm.regs.ss);
777 fprintf(stderr, "ds = 0x%04x\n", context.vm.regs.ds);
778 fprintf(stderr, "es = 0x%04x\n", context.vm.regs.es);
779 fprintf(stderr, "fs = 0x%04x\n", context.vm.regs.fs);
780 fprintf(stderr, "gs = 0x%04x\n", context.vm.regs.gs);
781 fprintf(stderr, "eflags = 0x%08lx\n", context.vm.regs.eflags);
783 fputs("cs:ip = [ ", stderr);
785 p = (unsigned char *)((context.vm.regs.cs << 4) + (context.vm.regs.eip & 0xffff));
787 for (i = 0; i < 16; ++i)
788 fprintf(stderr, "%02x ", (unsigned int)p[i]);
790 fputs("]\n", stderr);
794 static int
795 run_vm86(void)
797 unsigned int vret;
798 sigset_t allsigs, cursigs;
799 unsigned long oldgs, oldfs;
801 while (1)
803 sigfillset(&allsigs);
804 sigprocmask(SIG_SETMASK, &allsigs, &cursigs);
805 asm volatile ("movl %%gs, %0" : "=g" (oldgs));
806 asm volatile ("movl %%fs, %0" : "=g" (oldfs));
807 vret = lrmi_vm86(&context.vm);
808 asm volatile ("movl %0, %%fs" :: "g" (oldfs));
809 asm volatile ("movl %0, %%gs" :: "g" (oldgs));
810 sigprocmask(SIG_SETMASK, &cursigs, NULL);
812 if (VM86_TYPE(vret) == VM86_INTx)
814 unsigned int v = VM86_ARG(vret);
816 if (v == RETURN_TO_32_INT)
817 return 1;
819 pushw(context.vm.regs.eflags);
820 pushw(context.vm.regs.cs);
821 pushw(context.vm.regs.eip);
823 context.vm.regs.cs = get_int_seg(v);
824 context.vm.regs.eip = get_int_off(v);
825 context.vm.regs.eflags &= ~(VIF_MASK | TF_MASK);
827 continue;
830 if (VM86_TYPE(vret) != VM86_UNKNOWN)
831 break;
833 if (!emulate())
834 break;
837 #ifdef ORIGINAL_LRMI_CODE_THAT_GOT_IFDEFED_OUT
838 debug_info(vret);
839 #endif
840 return 0;
845 LRMI_call(struct LRMI_regs *r)
847 unsigned int vret;
849 memset(&context.vm.regs, 0, sizeof(context.vm.regs));
851 set_regs(r);
853 context.vm.regs.cs = r->cs;
854 context.vm.regs.eip = r->ip;
856 if (r->ss == 0 && r->sp == 0)
858 context.vm.regs.ss = context.stack_seg;
859 context.vm.regs.esp = context.stack_off;
861 else
863 context.vm.regs.ss = r->ss;
864 context.vm.regs.esp = r->sp;
867 pushw(context.ret_seg);
868 pushw(context.ret_off);
870 vret = run_vm86();
872 get_regs(r);
874 return vret;
879 LRMI_int(int i, struct LRMI_regs *r)
881 unsigned int vret;
882 unsigned int seg, off;
884 seg = get_int_seg(i);
885 off = get_int_off(i);
888 If the interrupt is in regular memory, it's probably
889 still pointing at a dos TSR (which is now gone).
891 if (seg < 0xa000 || (seg << 4) + off >= 0x100000)
893 #ifdef ORIGINAL_LRMI_CODE_THAT_GOT_IFDEFED_OUT
894 fprintf(stderr, "Int 0x%x is not in rom (%04x:%04x)\n", i, seg, off);
895 #endif
896 return 0;
899 memset(&context.vm.regs, 0, sizeof(context.vm.regs));
901 set_regs(r);
903 context.vm.regs.cs = seg;
904 context.vm.regs.eip = off;
906 if (r->ss == 0 && r->sp == 0)
908 context.vm.regs.ss = context.stack_seg;
909 context.vm.regs.esp = context.stack_off;
911 else
913 context.vm.regs.ss = r->ss;
914 context.vm.regs.esp = r->sp;
917 pushw(DEFAULT_VM86_FLAGS);
918 pushw(context.ret_seg);
919 pushw(context.ret_off);
921 vret = run_vm86();
923 get_regs(r);
925 return vret;
928 #endif