2 * vm86 linux syscall support
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
31 # define LOG_VM86(...) qemu_log(__VA_ARGS__);
33 # define LOG_VM86(...) do { } while (0)
37 #define set_flags(X,new,mask) \
38 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
40 #define SAFE_MASK (0xDD5)
41 #define RETURN_MASK (0xDFF)
43 static inline int is_revectored(int nr
, struct target_revectored_struct
*bitmap
)
45 return (((uint8_t *)bitmap
)[nr
>> 3] >> (nr
& 7)) & 1;
48 static inline void vm_putw(CPUX86State
*env
, uint32_t segptr
,
49 unsigned int reg16
, unsigned int val
)
51 cpu_stw_data(env
, segptr
+ (reg16
& 0xffff), val
);
54 static inline void vm_putl(CPUX86State
*env
, uint32_t segptr
,
55 unsigned int reg16
, unsigned int val
)
57 cpu_stl_data(env
, segptr
+ (reg16
& 0xffff), val
);
60 static inline unsigned int vm_getb(CPUX86State
*env
,
61 uint32_t segptr
, unsigned int reg16
)
63 return cpu_ldub_data(env
, segptr
+ (reg16
& 0xffff));
66 static inline unsigned int vm_getw(CPUX86State
*env
,
67 uint32_t segptr
, unsigned int reg16
)
69 return cpu_lduw_data(env
, segptr
+ (reg16
& 0xffff));
72 static inline unsigned int vm_getl(CPUX86State
*env
,
73 uint32_t segptr
, unsigned int reg16
)
75 return cpu_ldl_data(env
, segptr
+ (reg16
& 0xffff));
78 void save_v86_state(CPUX86State
*env
)
80 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
81 TaskState
*ts
= cs
->opaque
;
82 struct target_vm86plus_struct
* target_v86
;
84 if (!lock_user_struct(VERIFY_WRITE
, target_v86
, ts
->target_v86
, 0))
85 /* FIXME - should return an error */
87 /* put the VM86 registers in the userspace register structure */
88 target_v86
->regs
.eax
= tswap32(env
->regs
[R_EAX
]);
89 target_v86
->regs
.ebx
= tswap32(env
->regs
[R_EBX
]);
90 target_v86
->regs
.ecx
= tswap32(env
->regs
[R_ECX
]);
91 target_v86
->regs
.edx
= tswap32(env
->regs
[R_EDX
]);
92 target_v86
->regs
.esi
= tswap32(env
->regs
[R_ESI
]);
93 target_v86
->regs
.edi
= tswap32(env
->regs
[R_EDI
]);
94 target_v86
->regs
.ebp
= tswap32(env
->regs
[R_EBP
]);
95 target_v86
->regs
.esp
= tswap32(env
->regs
[R_ESP
]);
96 target_v86
->regs
.eip
= tswap32(env
->eip
);
97 target_v86
->regs
.cs
= tswap16(env
->segs
[R_CS
].selector
);
98 target_v86
->regs
.ss
= tswap16(env
->segs
[R_SS
].selector
);
99 target_v86
->regs
.ds
= tswap16(env
->segs
[R_DS
].selector
);
100 target_v86
->regs
.es
= tswap16(env
->segs
[R_ES
].selector
);
101 target_v86
->regs
.fs
= tswap16(env
->segs
[R_FS
].selector
);
102 target_v86
->regs
.gs
= tswap16(env
->segs
[R_GS
].selector
);
103 set_flags(env
->eflags
, ts
->v86flags
, VIF_MASK
| ts
->v86mask
);
104 target_v86
->regs
.eflags
= tswap32(env
->eflags
);
105 unlock_user_struct(target_v86
, ts
->target_v86
, 1);
106 LOG_VM86("save_v86_state: eflags=%08x cs:ip=%04x:%04x\n",
107 env
->eflags
, env
->segs
[R_CS
].selector
, env
->eip
);
109 /* restore 32 bit registers */
110 env
->regs
[R_EAX
] = ts
->vm86_saved_regs
.eax
;
111 env
->regs
[R_EBX
] = ts
->vm86_saved_regs
.ebx
;
112 env
->regs
[R_ECX
] = ts
->vm86_saved_regs
.ecx
;
113 env
->regs
[R_EDX
] = ts
->vm86_saved_regs
.edx
;
114 env
->regs
[R_ESI
] = ts
->vm86_saved_regs
.esi
;
115 env
->regs
[R_EDI
] = ts
->vm86_saved_regs
.edi
;
116 env
->regs
[R_EBP
] = ts
->vm86_saved_regs
.ebp
;
117 env
->regs
[R_ESP
] = ts
->vm86_saved_regs
.esp
;
118 env
->eflags
= ts
->vm86_saved_regs
.eflags
;
119 env
->eip
= ts
->vm86_saved_regs
.eip
;
121 cpu_x86_load_seg(env
, R_CS
, ts
->vm86_saved_regs
.cs
);
122 cpu_x86_load_seg(env
, R_SS
, ts
->vm86_saved_regs
.ss
);
123 cpu_x86_load_seg(env
, R_DS
, ts
->vm86_saved_regs
.ds
);
124 cpu_x86_load_seg(env
, R_ES
, ts
->vm86_saved_regs
.es
);
125 cpu_x86_load_seg(env
, R_FS
, ts
->vm86_saved_regs
.fs
);
126 cpu_x86_load_seg(env
, R_GS
, ts
->vm86_saved_regs
.gs
);
129 /* return from vm86 mode to 32 bit. The vm86() syscall will return
131 static inline void return_to_32bit(CPUX86State
*env
, int retval
)
133 LOG_VM86("return_to_32bit: ret=0x%x\n", retval
);
135 env
->regs
[R_EAX
] = retval
;
138 static inline int set_IF(CPUX86State
*env
)
140 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
141 TaskState
*ts
= cs
->opaque
;
143 ts
->v86flags
|= VIF_MASK
;
144 if (ts
->v86flags
& VIP_MASK
) {
145 return_to_32bit(env
, TARGET_VM86_STI
);
151 static inline void clear_IF(CPUX86State
*env
)
153 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
154 TaskState
*ts
= cs
->opaque
;
156 ts
->v86flags
&= ~VIF_MASK
;
159 static inline void clear_TF(CPUX86State
*env
)
161 env
->eflags
&= ~TF_MASK
;
164 static inline void clear_AC(CPUX86State
*env
)
166 env
->eflags
&= ~AC_MASK
;
169 static inline int set_vflags_long(unsigned long eflags
, CPUX86State
*env
)
171 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
172 TaskState
*ts
= cs
->opaque
;
174 set_flags(ts
->v86flags
, eflags
, ts
->v86mask
);
175 set_flags(env
->eflags
, eflags
, SAFE_MASK
);
176 if (eflags
& IF_MASK
)
183 static inline int set_vflags_short(unsigned short flags
, CPUX86State
*env
)
185 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
186 TaskState
*ts
= cs
->opaque
;
188 set_flags(ts
->v86flags
, flags
, ts
->v86mask
& 0xffff);
189 set_flags(env
->eflags
, flags
, SAFE_MASK
);
197 static inline unsigned int get_vflags(CPUX86State
*env
)
199 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
200 TaskState
*ts
= cs
->opaque
;
203 flags
= env
->eflags
& RETURN_MASK
;
204 if (ts
->v86flags
& VIF_MASK
)
207 return flags
| (ts
->v86flags
& ts
->v86mask
);
210 #define ADD16(reg, val) reg = (reg & ~0xffff) | ((reg + (val)) & 0xffff)
212 /* handle VM86 interrupt (NOTE: the CPU core currently does not
213 support TSS interrupt revectoring, so this code is always executed) */
214 static void do_int(CPUX86State
*env
, int intno
)
216 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
217 TaskState
*ts
= cs
->opaque
;
218 uint32_t int_addr
, segoffs
, ssp
;
221 if (env
->segs
[R_CS
].selector
== TARGET_BIOSSEG
)
223 if (is_revectored(intno
, &ts
->vm86plus
.int_revectored
))
225 if (intno
== 0x21 && is_revectored((env
->regs
[R_EAX
] >> 8) & 0xff,
226 &ts
->vm86plus
.int21_revectored
))
228 int_addr
= (intno
<< 2);
229 segoffs
= cpu_ldl_data(env
, int_addr
);
230 if ((segoffs
>> 16) == TARGET_BIOSSEG
)
232 LOG_VM86("VM86: emulating int 0x%x. CS:IP=%04x:%04x\n",
233 intno
, segoffs
>> 16, segoffs
& 0xffff);
235 ssp
= env
->segs
[R_SS
].selector
<< 4;
236 sp
= env
->regs
[R_ESP
] & 0xffff;
237 vm_putw(env
, ssp
, sp
- 2, get_vflags(env
));
238 vm_putw(env
, ssp
, sp
- 4, env
->segs
[R_CS
].selector
);
239 vm_putw(env
, ssp
, sp
- 6, env
->eip
);
240 ADD16(env
->regs
[R_ESP
], -6);
241 /* goto interrupt handler */
242 env
->eip
= segoffs
& 0xffff;
243 cpu_x86_load_seg(env
, R_CS
, segoffs
>> 16);
249 LOG_VM86("VM86: return to 32 bits int 0x%x\n", intno
);
250 return_to_32bit(env
, TARGET_VM86_INTx
| (intno
<< 8));
253 void handle_vm86_trap(CPUX86State
*env
, int trapno
)
255 if (trapno
== 1 || trapno
== 3) {
256 return_to_32bit(env
, TARGET_VM86_TRAP
+ (trapno
<< 8));
262 #define CHECK_IF_IN_TRAP() \
263 if ((ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) && \
264 (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_TFpendig)) \
267 #define VM86_FAULT_RETURN \
268 if ((ts->vm86plus.vm86plus.flags & TARGET_force_return_for_pic) && \
269 (ts->v86flags & (IF_MASK | VIF_MASK))) \
270 return_to_32bit(env, TARGET_VM86_PICRETURN); \
273 void handle_vm86_fault(CPUX86State
*env
)
275 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
276 TaskState
*ts
= cs
->opaque
;
278 unsigned int ip
, sp
, newflags
, newip
, newcs
, opcode
, intno
;
279 int data32
, pref_done
;
281 csp
= env
->segs
[R_CS
].selector
<< 4;
282 ip
= env
->eip
& 0xffff;
284 ssp
= env
->segs
[R_SS
].selector
<< 4;
285 sp
= env
->regs
[R_ESP
] & 0xffff;
287 LOG_VM86("VM86 exception %04x:%08x\n",
288 env
->segs
[R_CS
].selector
, env
->eip
);
293 opcode
= vm_getb(env
, csp
, ip
);
296 case 0x66: /* 32-bit data */ data32
=1; break;
297 case 0x67: /* 32-bit address */ break;
298 case 0x2e: /* CS */ break;
299 case 0x3e: /* DS */ break;
300 case 0x26: /* ES */ break;
301 case 0x36: /* SS */ break;
302 case 0x65: /* GS */ break;
303 case 0x64: /* FS */ break;
304 case 0xf2: /* repnz */ break;
305 case 0xf3: /* rep */ break;
306 default: pref_done
= 1;
308 } while (!pref_done
);
312 case 0x9c: /* pushf */
314 vm_putl(env
, ssp
, sp
- 4, get_vflags(env
));
315 ADD16(env
->regs
[R_ESP
], -4);
317 vm_putw(env
, ssp
, sp
- 2, get_vflags(env
));
318 ADD16(env
->regs
[R_ESP
], -2);
323 case 0x9d: /* popf */
325 newflags
= vm_getl(env
, ssp
, sp
);
326 ADD16(env
->regs
[R_ESP
], 4);
328 newflags
= vm_getw(env
, ssp
, sp
);
329 ADD16(env
->regs
[R_ESP
], 2);
334 if (set_vflags_long(newflags
, env
))
337 if (set_vflags_short(newflags
, env
))
343 intno
= vm_getb(env
, csp
, ip
);
346 if (ts
->vm86plus
.vm86plus
.flags
& TARGET_vm86dbg_active
) {
347 if ( (ts
->vm86plus
.vm86plus
.vm86dbg_intxxtab
[intno
>> 3] >>
349 return_to_32bit(env
, TARGET_VM86_INTx
+ (intno
<< 8));
356 case 0xcf: /* iret */
358 newip
= vm_getl(env
, ssp
, sp
) & 0xffff;
359 newcs
= vm_getl(env
, ssp
, sp
+ 4) & 0xffff;
360 newflags
= vm_getl(env
, ssp
, sp
+ 8);
361 ADD16(env
->regs
[R_ESP
], 12);
363 newip
= vm_getw(env
, ssp
, sp
);
364 newcs
= vm_getw(env
, ssp
, sp
+ 2);
365 newflags
= vm_getw(env
, ssp
, sp
+ 4);
366 ADD16(env
->regs
[R_ESP
], 6);
369 cpu_x86_load_seg(env
, R_CS
, newcs
);
372 if (set_vflags_long(newflags
, env
))
375 if (set_vflags_short(newflags
, env
))
392 /* real VM86 GPF exception */
393 return_to_32bit(env
, TARGET_VM86_UNKNOWN
);
398 int do_vm86(CPUX86State
*env
, long subfunction
, abi_ulong vm86_addr
)
400 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
401 TaskState
*ts
= cs
->opaque
;
402 struct target_vm86plus_struct
* target_v86
;
405 switch (subfunction
) {
406 case TARGET_VM86_REQUEST_IRQ
:
407 case TARGET_VM86_FREE_IRQ
:
408 case TARGET_VM86_GET_IRQ_BITS
:
409 case TARGET_VM86_GET_AND_RESET_IRQ
:
410 gemu_log("qemu: unsupported vm86 subfunction (%ld)\n", subfunction
);
411 ret
= -TARGET_EINVAL
;
413 case TARGET_VM86_PLUS_INSTALL_CHECK
:
414 /* NOTE: on old vm86 stuff this will return the error
415 from verify_area(), because the subfunction is
416 interpreted as (invalid) address to vm86_struct.
417 So the installation check works.
423 /* save current CPU regs */
424 ts
->vm86_saved_regs
.eax
= 0; /* default vm86 syscall return code */
425 ts
->vm86_saved_regs
.ebx
= env
->regs
[R_EBX
];
426 ts
->vm86_saved_regs
.ecx
= env
->regs
[R_ECX
];
427 ts
->vm86_saved_regs
.edx
= env
->regs
[R_EDX
];
428 ts
->vm86_saved_regs
.esi
= env
->regs
[R_ESI
];
429 ts
->vm86_saved_regs
.edi
= env
->regs
[R_EDI
];
430 ts
->vm86_saved_regs
.ebp
= env
->regs
[R_EBP
];
431 ts
->vm86_saved_regs
.esp
= env
->regs
[R_ESP
];
432 ts
->vm86_saved_regs
.eflags
= env
->eflags
;
433 ts
->vm86_saved_regs
.eip
= env
->eip
;
434 ts
->vm86_saved_regs
.cs
= env
->segs
[R_CS
].selector
;
435 ts
->vm86_saved_regs
.ss
= env
->segs
[R_SS
].selector
;
436 ts
->vm86_saved_regs
.ds
= env
->segs
[R_DS
].selector
;
437 ts
->vm86_saved_regs
.es
= env
->segs
[R_ES
].selector
;
438 ts
->vm86_saved_regs
.fs
= env
->segs
[R_FS
].selector
;
439 ts
->vm86_saved_regs
.gs
= env
->segs
[R_GS
].selector
;
441 ts
->target_v86
= vm86_addr
;
442 if (!lock_user_struct(VERIFY_READ
, target_v86
, vm86_addr
, 1))
443 return -TARGET_EFAULT
;
444 /* build vm86 CPU state */
445 ts
->v86flags
= tswap32(target_v86
->regs
.eflags
);
446 env
->eflags
= (env
->eflags
& ~SAFE_MASK
) |
447 (tswap32(target_v86
->regs
.eflags
) & SAFE_MASK
) | VM_MASK
;
449 ts
->vm86plus
.cpu_type
= tswapal(target_v86
->cpu_type
);
450 switch (ts
->vm86plus
.cpu_type
) {
455 ts
->v86mask
= NT_MASK
| IOPL_MASK
;
458 ts
->v86mask
= AC_MASK
| NT_MASK
| IOPL_MASK
;
461 ts
->v86mask
= ID_MASK
| AC_MASK
| NT_MASK
| IOPL_MASK
;
465 env
->regs
[R_EBX
] = tswap32(target_v86
->regs
.ebx
);
466 env
->regs
[R_ECX
] = tswap32(target_v86
->regs
.ecx
);
467 env
->regs
[R_EDX
] = tswap32(target_v86
->regs
.edx
);
468 env
->regs
[R_ESI
] = tswap32(target_v86
->regs
.esi
);
469 env
->regs
[R_EDI
] = tswap32(target_v86
->regs
.edi
);
470 env
->regs
[R_EBP
] = tswap32(target_v86
->regs
.ebp
);
471 env
->regs
[R_ESP
] = tswap32(target_v86
->regs
.esp
);
472 env
->eip
= tswap32(target_v86
->regs
.eip
);
473 cpu_x86_load_seg(env
, R_CS
, tswap16(target_v86
->regs
.cs
));
474 cpu_x86_load_seg(env
, R_SS
, tswap16(target_v86
->regs
.ss
));
475 cpu_x86_load_seg(env
, R_DS
, tswap16(target_v86
->regs
.ds
));
476 cpu_x86_load_seg(env
, R_ES
, tswap16(target_v86
->regs
.es
));
477 cpu_x86_load_seg(env
, R_FS
, tswap16(target_v86
->regs
.fs
));
478 cpu_x86_load_seg(env
, R_GS
, tswap16(target_v86
->regs
.gs
));
479 ret
= tswap32(target_v86
->regs
.eax
); /* eax will be restored at
480 the end of the syscall */
481 memcpy(&ts
->vm86plus
.int_revectored
,
482 &target_v86
->int_revectored
, 32);
483 memcpy(&ts
->vm86plus
.int21_revectored
,
484 &target_v86
->int21_revectored
, 32);
485 ts
->vm86plus
.vm86plus
.flags
= tswapal(target_v86
->vm86plus
.flags
);
486 memcpy(&ts
->vm86plus
.vm86plus
.vm86dbg_intxxtab
,
487 target_v86
->vm86plus
.vm86dbg_intxxtab
, 32);
488 unlock_user_struct(target_v86
, vm86_addr
, 0);
490 LOG_VM86("do_vm86: cs:ip=%04x:%04x\n",
491 env
->segs
[R_CS
].selector
, env
->eip
);
492 /* now the virtual CPU is ready for vm86 execution ! */