2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
37 #ifndef CONFIG_USER_ONLY
38 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
39 #define MEMSUFFIX _kernel
41 #include "exec/cpu_ldst_template.h"
44 #include "exec/cpu_ldst_template.h"
47 #include "exec/cpu_ldst_template.h"
50 #include "exec/cpu_ldst_template.h"
55 /* return non zero if error */
56 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
57 uint32_t *e2_ptr
, int selector
)
68 index
= selector
& ~7;
69 if ((index
+ 7) > dt
->limit
) {
72 ptr
= dt
->base
+ index
;
73 *e1_ptr
= cpu_ldl_kernel(env
, ptr
);
74 *e2_ptr
= cpu_ldl_kernel(env
, ptr
+ 4);
78 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
82 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
83 if (e2
& DESC_G_MASK
) {
84 limit
= (limit
<< 12) | 0xfff;
89 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
91 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
94 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
97 sc
->base
= get_seg_base(e1
, e2
);
98 sc
->limit
= get_seg_limit(e1
, e2
);
102 /* init the segment cache in vm86 mode. */
103 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
107 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
108 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
109 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
112 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
113 uint32_t *esp_ptr
, int dpl
)
115 X86CPU
*cpu
= x86_env_get_cpu(env
);
116 int type
, index
, shift
;
121 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
122 for (i
= 0; i
< env
->tr
.limit
; i
++) {
123 printf("%02x ", env
->tr
.base
[i
]);
132 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
133 cpu_abort(CPU(cpu
), "invalid tss");
135 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
136 if ((type
& 7) != 1) {
137 cpu_abort(CPU(cpu
), "invalid tss type");
140 index
= (dpl
* 4 + 2) << shift
;
141 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
142 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
145 *esp_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
);
146 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 2);
148 *esp_ptr
= cpu_ldl_kernel(env
, env
->tr
.base
+ index
);
149 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 4);
153 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
)
158 if ((selector
& 0xfffc) != 0) {
159 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
160 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
162 if (!(e2
& DESC_S_MASK
)) {
163 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
166 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
167 if (seg_reg
== R_CS
) {
168 if (!(e2
& DESC_CS_MASK
)) {
169 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
172 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
174 } else if (seg_reg
== R_SS
) {
175 /* SS must be writable data */
176 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
177 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
179 if (dpl
!= cpl
|| dpl
!= rpl
) {
180 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
183 /* not readable code */
184 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
185 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
187 /* if data or non conforming code, checks the rights */
188 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
189 if (dpl
< cpl
|| dpl
< rpl
) {
190 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
194 if (!(e2
& DESC_P_MASK
)) {
195 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
197 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
198 get_seg_base(e1
, e2
),
199 get_seg_limit(e1
, e2
),
202 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
203 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
208 #define SWITCH_TSS_JMP 0
209 #define SWITCH_TSS_IRET 1
210 #define SWITCH_TSS_CALL 2
212 /* XXX: restore CPU state in registers (PowerPC case) */
213 static void switch_tss(CPUX86State
*env
, int tss_selector
,
214 uint32_t e1
, uint32_t e2
, int source
,
217 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
218 target_ulong tss_base
;
219 uint32_t new_regs
[8], new_segs
[6];
220 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
221 uint32_t old_eflags
, eflags_mask
;
226 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
227 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
230 /* if task gate, we read the TSS segment and we load it */
232 if (!(e2
& DESC_P_MASK
)) {
233 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
235 tss_selector
= e1
>> 16;
236 if (tss_selector
& 4) {
237 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
239 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
240 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
242 if (e2
& DESC_S_MASK
) {
243 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
245 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
246 if ((type
& 7) != 1) {
247 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
251 if (!(e2
& DESC_P_MASK
)) {
252 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
260 tss_limit
= get_seg_limit(e1
, e2
);
261 tss_base
= get_seg_base(e1
, e2
);
262 if ((tss_selector
& 4) != 0 ||
263 tss_limit
< tss_limit_max
) {
264 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
266 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
268 old_tss_limit_max
= 103;
270 old_tss_limit_max
= 43;
273 /* read all the registers from the new TSS */
276 new_cr3
= cpu_ldl_kernel(env
, tss_base
+ 0x1c);
277 new_eip
= cpu_ldl_kernel(env
, tss_base
+ 0x20);
278 new_eflags
= cpu_ldl_kernel(env
, tss_base
+ 0x24);
279 for (i
= 0; i
< 8; i
++) {
280 new_regs
[i
] = cpu_ldl_kernel(env
, tss_base
+ (0x28 + i
* 4));
282 for (i
= 0; i
< 6; i
++) {
283 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x48 + i
* 4));
285 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x60);
286 new_trap
= cpu_ldl_kernel(env
, tss_base
+ 0x64);
290 new_eip
= cpu_lduw_kernel(env
, tss_base
+ 0x0e);
291 new_eflags
= cpu_lduw_kernel(env
, tss_base
+ 0x10);
292 for (i
= 0; i
< 8; i
++) {
293 new_regs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x12 + i
* 2)) |
296 for (i
= 0; i
< 4; i
++) {
297 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x22 + i
* 4));
299 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x2a);
304 /* XXX: avoid a compiler warning, see
305 http://support.amd.com/us/Processor_TechDocs/24593.pdf
306 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
309 /* NOTE: we must avoid memory exceptions during the task switch,
310 so we make dummy accesses before */
311 /* XXX: it can still fail in some cases, so a bigger hack is
312 necessary to valid the TLB after having done the accesses */
314 v1
= cpu_ldub_kernel(env
, env
->tr
.base
);
315 v2
= cpu_ldub_kernel(env
, env
->tr
.base
+ old_tss_limit_max
);
316 cpu_stb_kernel(env
, env
->tr
.base
, v1
);
317 cpu_stb_kernel(env
, env
->tr
.base
+ old_tss_limit_max
, v2
);
319 /* clear busy bit (it is restartable) */
320 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
324 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
325 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
326 e2
&= ~DESC_TSS_BUSY_MASK
;
327 cpu_stl_kernel(env
, ptr
+ 4, e2
);
329 old_eflags
= cpu_compute_eflags(env
);
330 if (source
== SWITCH_TSS_IRET
) {
331 old_eflags
&= ~NT_MASK
;
334 /* save the current state in the old TSS */
337 cpu_stl_kernel(env
, env
->tr
.base
+ 0x20, next_eip
);
338 cpu_stl_kernel(env
, env
->tr
.base
+ 0x24, old_eflags
);
339 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
]);
340 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
]);
341 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
]);
342 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
]);
343 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
]);
344 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
]);
345 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
]);
346 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
]);
347 for (i
= 0; i
< 6; i
++) {
348 cpu_stw_kernel(env
, env
->tr
.base
+ (0x48 + i
* 4),
349 env
->segs
[i
].selector
);
353 cpu_stw_kernel(env
, env
->tr
.base
+ 0x0e, next_eip
);
354 cpu_stw_kernel(env
, env
->tr
.base
+ 0x10, old_eflags
);
355 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
]);
356 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
]);
357 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
]);
358 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
]);
359 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
]);
360 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
]);
361 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
]);
362 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
]);
363 for (i
= 0; i
< 4; i
++) {
364 cpu_stw_kernel(env
, env
->tr
.base
+ (0x22 + i
* 4),
365 env
->segs
[i
].selector
);
369 /* now if an exception occurs, it will occurs in the next task
372 if (source
== SWITCH_TSS_CALL
) {
373 cpu_stw_kernel(env
, tss_base
, env
->tr
.selector
);
374 new_eflags
|= NT_MASK
;
378 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
382 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
383 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
384 e2
|= DESC_TSS_BUSY_MASK
;
385 cpu_stl_kernel(env
, ptr
+ 4, e2
);
388 /* set the new CPU state */
389 /* from this point, any exception which occurs can give problems */
390 env
->cr
[0] |= CR0_TS_MASK
;
391 env
->hflags
|= HF_TS_MASK
;
392 env
->tr
.selector
= tss_selector
;
393 env
->tr
.base
= tss_base
;
394 env
->tr
.limit
= tss_limit
;
395 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
397 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
398 cpu_x86_update_cr3(env
, new_cr3
);
401 /* load all registers without an exception, then reload them with
402 possible exception */
404 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
405 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
407 eflags_mask
&= 0xffff;
409 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
410 /* XXX: what to do in 16 bit case? */
411 env
->regs
[R_EAX
] = new_regs
[0];
412 env
->regs
[R_ECX
] = new_regs
[1];
413 env
->regs
[R_EDX
] = new_regs
[2];
414 env
->regs
[R_EBX
] = new_regs
[3];
415 env
->regs
[R_ESP
] = new_regs
[4];
416 env
->regs
[R_EBP
] = new_regs
[5];
417 env
->regs
[R_ESI
] = new_regs
[6];
418 env
->regs
[R_EDI
] = new_regs
[7];
419 if (new_eflags
& VM_MASK
) {
420 for (i
= 0; i
< 6; i
++) {
421 load_seg_vm(env
, i
, new_segs
[i
]);
424 /* first just selectors as the rest may trigger exceptions */
425 for (i
= 0; i
< 6; i
++) {
426 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
430 env
->ldt
.selector
= new_ldt
& ~4;
437 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
440 if ((new_ldt
& 0xfffc) != 0) {
442 index
= new_ldt
& ~7;
443 if ((index
+ 7) > dt
->limit
) {
444 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
446 ptr
= dt
->base
+ index
;
447 e1
= cpu_ldl_kernel(env
, ptr
);
448 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
449 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
450 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
452 if (!(e2
& DESC_P_MASK
)) {
453 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
455 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
458 /* load the segments */
459 if (!(new_eflags
& VM_MASK
)) {
460 int cpl
= new_segs
[R_CS
] & 3;
461 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
);
462 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
);
463 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
);
464 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
);
465 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
);
466 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
);
469 /* check that env->eip is in the CS segment limits */
470 if (new_eip
> env
->segs
[R_CS
].limit
) {
471 /* XXX: different exception if CALL? */
472 raise_exception_err(env
, EXCP0D_GPF
, 0);
475 #ifndef CONFIG_USER_ONLY
476 /* reset local breakpoints */
477 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
478 for (i
= 0; i
< DR7_MAX_BP
; i
++) {
479 if (hw_local_breakpoint_enabled(env
->dr
[7], i
) &&
480 !hw_global_breakpoint_enabled(env
->dr
[7], i
)) {
481 hw_breakpoint_remove(env
, i
);
484 env
->dr
[7] &= ~DR7_LOCAL_BP_MASK
;
489 static inline unsigned int get_sp_mask(unsigned int e2
)
491 if (e2
& DESC_B_MASK
) {
498 static int exception_has_error_code(int intno
)
514 #define SET_ESP(val, sp_mask) \
516 if ((sp_mask) == 0xffff) { \
517 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
519 } else if ((sp_mask) == 0xffffffffLL) { \
520 env->regs[R_ESP] = (uint32_t)(val); \
522 env->regs[R_ESP] = (val); \
526 #define SET_ESP(val, sp_mask) \
528 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
529 ((val) & (sp_mask)); \
533 /* in 64-bit machines, this can overflow. So this segment addition macro
534 * can be used to trim the value to 32-bit whenever needed */
535 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
537 /* XXX: add a is_user flag to have proper security support */
538 #define PUSHW(ssp, sp, sp_mask, val) \
541 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
544 #define PUSHL(ssp, sp, sp_mask, val) \
547 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
550 #define POPW(ssp, sp, sp_mask, val) \
552 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
556 #define POPL(ssp, sp, sp_mask, val) \
558 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
562 /* protected mode interrupt */
563 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
564 int error_code
, unsigned int next_eip
,
568 target_ulong ptr
, ssp
;
569 int type
, dpl
, selector
, ss_dpl
, cpl
;
570 int has_error_code
, new_stack
, shift
;
571 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
572 uint32_t old_eip
, sp_mask
;
573 int vm86
= env
->eflags
& VM_MASK
;
576 if (!is_int
&& !is_hw
) {
577 has_error_code
= exception_has_error_code(intno
);
586 if (intno
* 8 + 7 > dt
->limit
) {
587 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
589 ptr
= dt
->base
+ intno
* 8;
590 e1
= cpu_ldl_kernel(env
, ptr
);
591 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
592 /* check gate type */
593 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
595 case 5: /* task gate */
596 /* must do that check here to return the correct error code */
597 if (!(e2
& DESC_P_MASK
)) {
598 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
600 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
601 if (has_error_code
) {
605 /* push the error code */
606 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
608 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
613 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
614 ssp
= env
->segs
[R_SS
].base
+ esp
;
616 cpu_stl_kernel(env
, ssp
, error_code
);
618 cpu_stw_kernel(env
, ssp
, error_code
);
623 case 6: /* 286 interrupt gate */
624 case 7: /* 286 trap gate */
625 case 14: /* 386 interrupt gate */
626 case 15: /* 386 trap gate */
629 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
632 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
633 cpl
= env
->hflags
& HF_CPL_MASK
;
634 /* check privilege if software int */
635 if (is_int
&& dpl
< cpl
) {
636 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
638 /* check valid bit */
639 if (!(e2
& DESC_P_MASK
)) {
640 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
643 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
644 if ((selector
& 0xfffc) == 0) {
645 raise_exception_err(env
, EXCP0D_GPF
, 0);
647 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
648 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
650 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
651 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
653 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
655 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
657 if (!(e2
& DESC_P_MASK
)) {
658 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
660 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
661 /* to inner privilege */
662 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
);
663 if ((ss
& 0xfffc) == 0) {
664 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
666 if ((ss
& 3) != dpl
) {
667 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
669 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
670 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
672 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
674 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
676 if (!(ss_e2
& DESC_S_MASK
) ||
677 (ss_e2
& DESC_CS_MASK
) ||
678 !(ss_e2
& DESC_W_MASK
)) {
679 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
681 if (!(ss_e2
& DESC_P_MASK
)) {
682 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
685 sp_mask
= get_sp_mask(ss_e2
);
686 ssp
= get_seg_base(ss_e1
, ss_e2
);
687 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
688 /* to same privilege */
690 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
693 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
694 ssp
= env
->segs
[R_SS
].base
;
695 esp
= env
->regs
[R_ESP
];
698 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
699 new_stack
= 0; /* avoid warning */
700 sp_mask
= 0; /* avoid warning */
701 ssp
= 0; /* avoid warning */
702 esp
= 0; /* avoid warning */
708 /* XXX: check that enough room is available */
709 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
718 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
719 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
720 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
721 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
723 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
724 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
726 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
727 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
728 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
729 if (has_error_code
) {
730 PUSHL(ssp
, esp
, sp_mask
, error_code
);
735 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
736 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
737 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
738 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
740 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
741 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
743 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
744 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
745 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
746 if (has_error_code
) {
747 PUSHW(ssp
, esp
, sp_mask
, error_code
);
751 /* interrupt gate clear IF mask */
752 if ((type
& 1) == 0) {
753 env
->eflags
&= ~IF_MASK
;
755 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
759 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
761 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
762 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
764 ss
= (ss
& ~3) | dpl
;
765 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
766 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
768 SET_ESP(esp
, sp_mask
);
770 selector
= (selector
& ~3) | dpl
;
771 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
772 get_seg_base(e1
, e2
),
773 get_seg_limit(e1
, e2
),
780 #define PUSHQ(sp, val) \
783 cpu_stq_kernel(env, sp, (val)); \
786 #define POPQ(sp, val) \
788 val = cpu_ldq_kernel(env, sp); \
792 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
794 X86CPU
*cpu
= x86_env_get_cpu(env
);
798 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
799 env
->tr
.base
, env
->tr
.limit
);
802 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
803 cpu_abort(CPU(cpu
), "invalid tss");
805 index
= 8 * level
+ 4;
806 if ((index
+ 7) > env
->tr
.limit
) {
807 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
809 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
812 /* 64 bit interrupt */
813 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
814 int error_code
, target_ulong next_eip
, int is_hw
)
818 int type
, dpl
, selector
, cpl
, ist
;
819 int has_error_code
, new_stack
;
820 uint32_t e1
, e2
, e3
, ss
;
821 target_ulong old_eip
, esp
, offset
;
824 if (!is_int
&& !is_hw
) {
825 has_error_code
= exception_has_error_code(intno
);
834 if (intno
* 16 + 15 > dt
->limit
) {
835 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
837 ptr
= dt
->base
+ intno
* 16;
838 e1
= cpu_ldl_kernel(env
, ptr
);
839 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
840 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
841 /* check gate type */
842 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
844 case 14: /* 386 interrupt gate */
845 case 15: /* 386 trap gate */
848 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
851 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
852 cpl
= env
->hflags
& HF_CPL_MASK
;
853 /* check privilege if software int */
854 if (is_int
&& dpl
< cpl
) {
855 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
857 /* check valid bit */
858 if (!(e2
& DESC_P_MASK
)) {
859 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
862 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
864 if ((selector
& 0xfffc) == 0) {
865 raise_exception_err(env
, EXCP0D_GPF
, 0);
868 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
869 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
871 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
872 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
874 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
876 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
878 if (!(e2
& DESC_P_MASK
)) {
879 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
881 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
882 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
884 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
885 /* to inner privilege */
887 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
889 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
890 /* to same privilege */
891 if (env
->eflags
& VM_MASK
) {
892 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
895 esp
= env
->regs
[R_ESP
];
898 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
899 new_stack
= 0; /* avoid warning */
900 esp
= 0; /* avoid warning */
902 esp
&= ~0xfLL
; /* align stack */
904 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
905 PUSHQ(esp
, env
->regs
[R_ESP
]);
906 PUSHQ(esp
, cpu_compute_eflags(env
));
907 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
909 if (has_error_code
) {
910 PUSHQ(esp
, error_code
);
913 /* interrupt gate clear IF mask */
914 if ((type
& 1) == 0) {
915 env
->eflags
&= ~IF_MASK
;
917 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
921 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
923 env
->regs
[R_ESP
] = esp
;
925 selector
= (selector
& ~3) | dpl
;
926 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
927 get_seg_base(e1
, e2
),
928 get_seg_limit(e1
, e2
),
935 #if defined(CONFIG_USER_ONLY)
936 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
938 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
940 cs
->exception_index
= EXCP_SYSCALL
;
941 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
945 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
949 if (!(env
->efer
& MSR_EFER_SCE
)) {
950 raise_exception_err(env
, EXCP06_ILLOP
, 0);
952 selector
= (env
->star
>> 32) & 0xffff;
953 if (env
->hflags
& HF_LMA_MASK
) {
956 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
957 env
->regs
[11] = cpu_compute_eflags(env
);
959 code64
= env
->hflags
& HF_CS64_MASK
;
961 env
->eflags
&= ~env
->fmask
;
962 cpu_load_eflags(env
, env
->eflags
, 0);
963 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
965 DESC_G_MASK
| DESC_P_MASK
|
967 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
969 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
971 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
973 DESC_W_MASK
| DESC_A_MASK
);
975 env
->eip
= env
->lstar
;
977 env
->eip
= env
->cstar
;
980 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
982 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
983 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
985 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
987 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
988 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
990 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
992 DESC_W_MASK
| DESC_A_MASK
);
993 env
->eip
= (uint32_t)env
->star
;
1000 void helper_sysret(CPUX86State
*env
, int dflag
)
1004 if (!(env
->efer
& MSR_EFER_SCE
)) {
1005 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1007 cpl
= env
->hflags
& HF_CPL_MASK
;
1008 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1009 raise_exception_err(env
, EXCP0D_GPF
, 0);
1011 selector
= (env
->star
>> 48) & 0xffff;
1012 if (env
->hflags
& HF_LMA_MASK
) {
1013 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1014 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1017 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1019 DESC_G_MASK
| DESC_P_MASK
|
1020 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1021 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1023 env
->eip
= env
->regs
[R_ECX
];
1025 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1027 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1028 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1029 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1030 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1032 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1034 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1035 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1036 DESC_W_MASK
| DESC_A_MASK
);
1038 env
->eflags
|= IF_MASK
;
1039 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1041 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1042 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1043 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1044 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1045 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1047 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1048 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1049 DESC_W_MASK
| DESC_A_MASK
);
1054 /* real mode interrupt */
1055 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1056 int error_code
, unsigned int next_eip
)
1059 target_ulong ptr
, ssp
;
1061 uint32_t offset
, esp
;
1062 uint32_t old_cs
, old_eip
;
1064 /* real mode (simpler!) */
1066 if (intno
* 4 + 3 > dt
->limit
) {
1067 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1069 ptr
= dt
->base
+ intno
* 4;
1070 offset
= cpu_lduw_kernel(env
, ptr
);
1071 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1072 esp
= env
->regs
[R_ESP
];
1073 ssp
= env
->segs
[R_SS
].base
;
1079 old_cs
= env
->segs
[R_CS
].selector
;
1080 /* XXX: use SS segment size? */
1081 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1082 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1083 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1085 /* update processor state */
1086 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1088 env
->segs
[R_CS
].selector
= selector
;
1089 env
->segs
[R_CS
].base
= (selector
<< 4);
1090 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1093 #if defined(CONFIG_USER_ONLY)
1094 /* fake user mode interrupt */
1095 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1096 int error_code
, target_ulong next_eip
)
1100 int dpl
, cpl
, shift
;
1104 if (env
->hflags
& HF_LMA_MASK
) {
1109 ptr
= dt
->base
+ (intno
<< shift
);
1110 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1112 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1113 cpl
= env
->hflags
& HF_CPL_MASK
;
1114 /* check privilege if software int */
1115 if (is_int
&& dpl
< cpl
) {
1116 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1119 /* Since we emulate only user space, we cannot do more than
1120 exiting the emulation with the suitable exception and error
1121 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1122 if (is_int
|| intno
== EXCP_SYSCALL
) {
1123 env
->eip
= next_eip
;
1129 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1130 int error_code
, int is_hw
, int rm
)
1132 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1133 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1134 control
.event_inj
));
1136 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1140 type
= SVM_EVTINJ_TYPE_SOFT
;
1142 type
= SVM_EVTINJ_TYPE_EXEPT
;
1144 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1145 if (!rm
&& exception_has_error_code(intno
)) {
1146 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1147 stl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1148 control
.event_inj_err
),
1152 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1159 * Begin execution of an interruption. is_int is TRUE if coming from
1160 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1161 * instruction. It is only relevant if is_int is TRUE.
1163 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1164 int error_code
, target_ulong next_eip
, int is_hw
)
1166 CPUX86State
*env
= &cpu
->env
;
1168 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1169 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1172 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1173 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1174 count
, intno
, error_code
, is_int
,
1175 env
->hflags
& HF_CPL_MASK
,
1176 env
->segs
[R_CS
].selector
, env
->eip
,
1177 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1178 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1179 if (intno
== 0x0e) {
1180 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1182 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1185 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1192 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1193 for (i
= 0; i
< 16; i
++) {
1194 qemu_log(" %02x", ldub(ptr
+ i
));
1202 if (env
->cr
[0] & CR0_PE_MASK
) {
1203 #if !defined(CONFIG_USER_ONLY)
1204 if (env
->hflags
& HF_SVMI_MASK
) {
1205 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1208 #ifdef TARGET_X86_64
1209 if (env
->hflags
& HF_LMA_MASK
) {
1210 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1214 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1218 #if !defined(CONFIG_USER_ONLY)
1219 if (env
->hflags
& HF_SVMI_MASK
) {
1220 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1223 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1226 #if !defined(CONFIG_USER_ONLY)
1227 if (env
->hflags
& HF_SVMI_MASK
) {
1228 CPUState
*cs
= CPU(cpu
);
1229 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+
1230 offsetof(struct vmcb
,
1231 control
.event_inj
));
1234 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1235 event_inj
& ~SVM_EVTINJ_VALID
);
1240 void x86_cpu_do_interrupt(CPUState
*cs
)
1242 X86CPU
*cpu
= X86_CPU(cs
);
1243 CPUX86State
*env
= &cpu
->env
;
1245 #if defined(CONFIG_USER_ONLY)
1246 /* if user mode only, we simulate a fake exception
1247 which will be handled outside the cpu execution
1249 do_interrupt_user(env
, cs
->exception_index
,
1250 env
->exception_is_int
,
1252 env
->exception_next_eip
);
1253 /* successfully delivered */
1254 env
->old_exception
= -1;
1256 /* simulate a real cpu exception. On i386, it can
1257 trigger new exceptions, but we do not handle
1258 double or triple faults yet. */
1259 do_interrupt_all(cpu
, cs
->exception_index
,
1260 env
->exception_is_int
,
1262 env
->exception_next_eip
, 0);
1263 /* successfully delivered */
1264 env
->old_exception
= -1;
1268 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1270 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1273 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1275 X86CPU
*cpu
= X86_CPU(cs
);
1276 CPUX86State
*env
= &cpu
->env
;
1279 #if !defined(CONFIG_USER_ONLY)
1280 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
1281 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1282 apic_poll_irq(cpu
->apic_state
);
1285 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
1287 } else if (env
->hflags2
& HF2_GIF_MASK
) {
1288 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
1289 !(env
->hflags
& HF_SMM_MASK
)) {
1290 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0);
1291 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1294 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
1295 !(env
->hflags2
& HF2_NMI_MASK
)) {
1296 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1297 env
->hflags2
|= HF2_NMI_MASK
;
1298 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1300 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
1301 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1302 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1304 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
1305 (((env
->hflags2
& HF2_VINTR_MASK
) &&
1306 (env
->hflags2
& HF2_HIF_MASK
)) ||
1307 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
1308 (env
->eflags
& IF_MASK
&&
1309 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
1311 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0);
1312 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1313 CPU_INTERRUPT_VIRQ
);
1314 intno
= cpu_get_pic_interrupt(env
);
1315 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1316 "Servicing hardware INT=0x%02x\n", intno
);
1317 do_interrupt_x86_hardirq(env
, intno
, 1);
1318 /* ensure that no TB jump will be modified as
1319 the program flow was changed */
1321 #if !defined(CONFIG_USER_ONLY)
1322 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
1323 (env
->eflags
& IF_MASK
) &&
1324 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
1326 /* FIXME: this should respect TPR */
1327 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0);
1328 intno
= ldl_phys(cs
->as
, env
->vm_vmcb
1329 + offsetof(struct vmcb
, control
.int_vector
));
1330 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1331 "Servicing virtual hardware INT=0x%02x\n", intno
);
1332 do_interrupt_x86_hardirq(env
, intno
, 1);
1333 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1342 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1346 uint32_t esp_mask
, esp
, ebp
;
1348 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1349 ssp
= env
->segs
[R_SS
].base
;
1350 ebp
= env
->regs
[R_EBP
];
1351 esp
= env
->regs
[R_ESP
];
1358 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
),
1359 cpu_ldl_data(env
, ssp
+ (ebp
& esp_mask
)));
1362 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1369 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
),
1370 cpu_lduw_data(env
, ssp
+ (ebp
& esp_mask
)));
1373 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1377 #ifdef TARGET_X86_64
1378 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1381 target_ulong esp
, ebp
;
1383 ebp
= env
->regs
[R_EBP
];
1384 esp
= env
->regs
[R_ESP
];
1392 cpu_stq_data(env
, esp
, cpu_ldq_data(env
, ebp
));
1395 cpu_stq_data(env
, esp
, t1
);
1402 cpu_stw_data(env
, esp
, cpu_lduw_data(env
, ebp
));
1405 cpu_stw_data(env
, esp
, t1
);
1410 void helper_lldt(CPUX86State
*env
, int selector
)
1414 int index
, entry_limit
;
1418 if ((selector
& 0xfffc) == 0) {
1419 /* XXX: NULL selector case: invalid LDT */
1423 if (selector
& 0x4) {
1424 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1427 index
= selector
& ~7;
1428 #ifdef TARGET_X86_64
1429 if (env
->hflags
& HF_LMA_MASK
) {
1436 if ((index
+ entry_limit
) > dt
->limit
) {
1437 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1439 ptr
= dt
->base
+ index
;
1440 e1
= cpu_ldl_kernel(env
, ptr
);
1441 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1442 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1443 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1445 if (!(e2
& DESC_P_MASK
)) {
1446 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1448 #ifdef TARGET_X86_64
1449 if (env
->hflags
& HF_LMA_MASK
) {
1452 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1453 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1454 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1458 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1461 env
->ldt
.selector
= selector
;
1464 void helper_ltr(CPUX86State
*env
, int selector
)
1468 int index
, type
, entry_limit
;
1472 if ((selector
& 0xfffc) == 0) {
1473 /* NULL selector case: invalid TR */
1478 if (selector
& 0x4) {
1479 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1482 index
= selector
& ~7;
1483 #ifdef TARGET_X86_64
1484 if (env
->hflags
& HF_LMA_MASK
) {
1491 if ((index
+ entry_limit
) > dt
->limit
) {
1492 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1494 ptr
= dt
->base
+ index
;
1495 e1
= cpu_ldl_kernel(env
, ptr
);
1496 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1497 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1498 if ((e2
& DESC_S_MASK
) ||
1499 (type
!= 1 && type
!= 9)) {
1500 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1502 if (!(e2
& DESC_P_MASK
)) {
1503 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1505 #ifdef TARGET_X86_64
1506 if (env
->hflags
& HF_LMA_MASK
) {
1509 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1510 e4
= cpu_ldl_kernel(env
, ptr
+ 12);
1511 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1512 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1514 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1515 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1519 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1521 e2
|= DESC_TSS_BUSY_MASK
;
1522 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1524 env
->tr
.selector
= selector
;
1527 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1528 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1537 cpl
= env
->hflags
& HF_CPL_MASK
;
1538 if ((selector
& 0xfffc) == 0) {
1539 /* null selector case */
1541 #ifdef TARGET_X86_64
1542 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1545 raise_exception_err(env
, EXCP0D_GPF
, 0);
1547 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1550 if (selector
& 0x4) {
1555 index
= selector
& ~7;
1556 if ((index
+ 7) > dt
->limit
) {
1557 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1559 ptr
= dt
->base
+ index
;
1560 e1
= cpu_ldl_kernel(env
, ptr
);
1561 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1563 if (!(e2
& DESC_S_MASK
)) {
1564 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1567 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1568 if (seg_reg
== R_SS
) {
1569 /* must be writable segment */
1570 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1571 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1573 if (rpl
!= cpl
|| dpl
!= cpl
) {
1574 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1577 /* must be readable segment */
1578 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1579 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1582 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1583 /* if not conforming code, test rights */
1584 if (dpl
< cpl
|| dpl
< rpl
) {
1585 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1590 if (!(e2
& DESC_P_MASK
)) {
1591 if (seg_reg
== R_SS
) {
1592 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1594 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1598 /* set the access bit if not already set */
1599 if (!(e2
& DESC_A_MASK
)) {
1601 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1604 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1605 get_seg_base(e1
, e2
),
1606 get_seg_limit(e1
, e2
),
1609 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1610 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1615 /* protected mode jump */
1616 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1617 int next_eip_addend
)
1620 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1621 target_ulong next_eip
;
1623 if ((new_cs
& 0xfffc) == 0) {
1624 raise_exception_err(env
, EXCP0D_GPF
, 0);
1626 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1627 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1629 cpl
= env
->hflags
& HF_CPL_MASK
;
1630 if (e2
& DESC_S_MASK
) {
1631 if (!(e2
& DESC_CS_MASK
)) {
1632 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1634 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1635 if (e2
& DESC_C_MASK
) {
1636 /* conforming code segment */
1638 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1641 /* non conforming code segment */
1644 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1647 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1650 if (!(e2
& DESC_P_MASK
)) {
1651 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1653 limit
= get_seg_limit(e1
, e2
);
1654 if (new_eip
> limit
&&
1655 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1656 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1658 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1659 get_seg_base(e1
, e2
), limit
, e2
);
1662 /* jump to call or task gate */
1663 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1665 cpl
= env
->hflags
& HF_CPL_MASK
;
1666 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1668 case 1: /* 286 TSS */
1669 case 9: /* 386 TSS */
1670 case 5: /* task gate */
1671 if (dpl
< cpl
|| dpl
< rpl
) {
1672 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1674 next_eip
= env
->eip
+ next_eip_addend
;
1675 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1677 case 4: /* 286 call gate */
1678 case 12: /* 386 call gate */
1679 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1680 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1682 if (!(e2
& DESC_P_MASK
)) {
1683 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1686 new_eip
= (e1
& 0xffff);
1688 new_eip
|= (e2
& 0xffff0000);
1690 if (load_segment(env
, &e1
, &e2
, gate_cs
) != 0) {
1691 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1693 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1694 /* must be code segment */
1695 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1696 (DESC_S_MASK
| DESC_CS_MASK
))) {
1697 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1699 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1700 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1701 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1703 if (!(e2
& DESC_P_MASK
)) {
1704 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1706 limit
= get_seg_limit(e1
, e2
);
1707 if (new_eip
> limit
) {
1708 raise_exception_err(env
, EXCP0D_GPF
, 0);
1710 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1711 get_seg_base(e1
, e2
), limit
, e2
);
1715 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1721 /* real mode call */
1722 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1723 int shift
, int next_eip
)
1726 uint32_t esp
, esp_mask
;
1730 esp
= env
->regs
[R_ESP
];
1731 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1732 ssp
= env
->segs
[R_SS
].base
;
1734 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1735 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1737 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1738 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1741 SET_ESP(esp
, esp_mask
);
1743 env
->segs
[R_CS
].selector
= new_cs
;
1744 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1747 /* protected mode call */
1748 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1749 int shift
, int next_eip_addend
)
1752 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1753 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1754 uint32_t val
, limit
, old_sp_mask
;
1755 target_ulong ssp
, old_ssp
, next_eip
;
1757 next_eip
= env
->eip
+ next_eip_addend
;
1758 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1759 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1760 if ((new_cs
& 0xfffc) == 0) {
1761 raise_exception_err(env
, EXCP0D_GPF
, 0);
1763 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1764 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1766 cpl
= env
->hflags
& HF_CPL_MASK
;
1767 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1768 if (e2
& DESC_S_MASK
) {
1769 if (!(e2
& DESC_CS_MASK
)) {
1770 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1772 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1773 if (e2
& DESC_C_MASK
) {
1774 /* conforming code segment */
1776 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1779 /* non conforming code segment */
1782 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1785 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1788 if (!(e2
& DESC_P_MASK
)) {
1789 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1792 #ifdef TARGET_X86_64
1793 /* XXX: check 16/32 bit cases in long mode */
1798 rsp
= env
->regs
[R_ESP
];
1799 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1800 PUSHQ(rsp
, next_eip
);
1801 /* from this point, not restartable */
1802 env
->regs
[R_ESP
] = rsp
;
1803 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1804 get_seg_base(e1
, e2
),
1805 get_seg_limit(e1
, e2
), e2
);
1810 sp
= env
->regs
[R_ESP
];
1811 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1812 ssp
= env
->segs
[R_SS
].base
;
1814 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1815 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1817 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1818 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1821 limit
= get_seg_limit(e1
, e2
);
1822 if (new_eip
> limit
) {
1823 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1825 /* from this point, not restartable */
1826 SET_ESP(sp
, sp_mask
);
1827 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1828 get_seg_base(e1
, e2
), limit
, e2
);
1832 /* check gate type */
1833 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1834 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1837 case 1: /* available 286 TSS */
1838 case 9: /* available 386 TSS */
1839 case 5: /* task gate */
1840 if (dpl
< cpl
|| dpl
< rpl
) {
1841 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1843 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1845 case 4: /* 286 call gate */
1846 case 12: /* 386 call gate */
1849 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1854 if (dpl
< cpl
|| dpl
< rpl
) {
1855 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1857 /* check valid bit */
1858 if (!(e2
& DESC_P_MASK
)) {
1859 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1861 selector
= e1
>> 16;
1862 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1863 param_count
= e2
& 0x1f;
1864 if ((selector
& 0xfffc) == 0) {
1865 raise_exception_err(env
, EXCP0D_GPF
, 0);
1868 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
1869 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1871 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1872 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1874 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1876 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1878 if (!(e2
& DESC_P_MASK
)) {
1879 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1882 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1883 /* to inner privilege */
1884 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
);
1885 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1886 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1888 if ((ss
& 0xfffc) == 0) {
1889 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1891 if ((ss
& 3) != dpl
) {
1892 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1894 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
1895 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1897 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1898 if (ss_dpl
!= dpl
) {
1899 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1901 if (!(ss_e2
& DESC_S_MASK
) ||
1902 (ss_e2
& DESC_CS_MASK
) ||
1903 !(ss_e2
& DESC_W_MASK
)) {
1904 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1906 if (!(ss_e2
& DESC_P_MASK
)) {
1907 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1910 /* push_size = ((param_count * 2) + 8) << shift; */
1912 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1913 old_ssp
= env
->segs
[R_SS
].base
;
1915 sp_mask
= get_sp_mask(ss_e2
);
1916 ssp
= get_seg_base(ss_e1
, ss_e2
);
1918 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1919 PUSHL(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1920 for (i
= param_count
- 1; i
>= 0; i
--) {
1921 val
= cpu_ldl_kernel(env
, old_ssp
+
1922 ((env
->regs
[R_ESP
] + i
* 4) &
1924 PUSHL(ssp
, sp
, sp_mask
, val
);
1927 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1928 PUSHW(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1929 for (i
= param_count
- 1; i
>= 0; i
--) {
1930 val
= cpu_lduw_kernel(env
, old_ssp
+
1931 ((env
->regs
[R_ESP
] + i
* 2) &
1933 PUSHW(ssp
, sp
, sp_mask
, val
);
1938 /* to same privilege */
1939 sp
= env
->regs
[R_ESP
];
1940 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1941 ssp
= env
->segs
[R_SS
].base
;
1942 /* push_size = (4 << shift); */
1947 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1948 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1950 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1951 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1954 /* from this point, not restartable */
1957 ss
= (ss
& ~3) | dpl
;
1958 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1960 get_seg_limit(ss_e1
, ss_e2
),
1964 selector
= (selector
& ~3) | dpl
;
1965 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1966 get_seg_base(e1
, e2
),
1967 get_seg_limit(e1
, e2
),
1969 SET_ESP(sp
, sp_mask
);
1974 /* real and vm86 mode iret */
1975 void helper_iret_real(CPUX86State
*env
, int shift
)
1977 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1981 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1982 sp
= env
->regs
[R_ESP
];
1983 ssp
= env
->segs
[R_SS
].base
;
1986 POPL(ssp
, sp
, sp_mask
, new_eip
);
1987 POPL(ssp
, sp
, sp_mask
, new_cs
);
1989 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1992 POPW(ssp
, sp
, sp_mask
, new_eip
);
1993 POPW(ssp
, sp
, sp_mask
, new_cs
);
1994 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1996 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1997 env
->segs
[R_CS
].selector
= new_cs
;
1998 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2000 if (env
->eflags
& VM_MASK
) {
2001 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
2004 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
2008 eflags_mask
&= 0xffff;
2010 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2011 env
->hflags2
&= ~HF2_NMI_MASK
;
2014 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
2019 /* XXX: on x86_64, we do not want to nullify FS and GS because
2020 they may still contain a valid base. I would be interested to
2021 know how a real x86_64 CPU behaves */
2022 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2023 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2027 e2
= env
->segs
[seg_reg
].flags
;
2028 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2029 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2030 /* data or non conforming code segment */
2032 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2037 /* protected mode iret */
2038 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2039 int is_iret
, int addend
)
2041 uint32_t new_cs
, new_eflags
, new_ss
;
2042 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2043 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2044 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2045 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2047 #ifdef TARGET_X86_64
2053 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2055 sp
= env
->regs
[R_ESP
];
2056 ssp
= env
->segs
[R_SS
].base
;
2057 new_eflags
= 0; /* avoid warning */
2058 #ifdef TARGET_X86_64
2064 POPQ(sp
, new_eflags
);
2071 POPL(ssp
, sp
, sp_mask
, new_eip
);
2072 POPL(ssp
, sp
, sp_mask
, new_cs
);
2075 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2076 if (new_eflags
& VM_MASK
) {
2077 goto return_to_vm86
;
2082 POPW(ssp
, sp
, sp_mask
, new_eip
);
2083 POPW(ssp
, sp
, sp_mask
, new_cs
);
2085 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2089 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2090 new_cs
, new_eip
, shift
, addend
);
2091 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2092 if ((new_cs
& 0xfffc) == 0) {
2093 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2095 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
2096 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2098 if (!(e2
& DESC_S_MASK
) ||
2099 !(e2
& DESC_CS_MASK
)) {
2100 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2102 cpl
= env
->hflags
& HF_CPL_MASK
;
2105 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2107 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2108 if (e2
& DESC_C_MASK
) {
2110 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2114 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2117 if (!(e2
& DESC_P_MASK
)) {
2118 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2122 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2123 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2124 /* return to same privilege level */
2125 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2126 get_seg_base(e1
, e2
),
2127 get_seg_limit(e1
, e2
),
2130 /* return to different privilege level */
2131 #ifdef TARGET_X86_64
2141 POPL(ssp
, sp
, sp_mask
, new_esp
);
2142 POPL(ssp
, sp
, sp_mask
, new_ss
);
2146 POPW(ssp
, sp
, sp_mask
, new_esp
);
2147 POPW(ssp
, sp
, sp_mask
, new_ss
);
2150 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2152 if ((new_ss
& 0xfffc) == 0) {
2153 #ifdef TARGET_X86_64
2154 /* NULL ss is allowed in long mode if cpl != 3 */
2155 /* XXX: test CS64? */
2156 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2157 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2159 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2160 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2161 DESC_W_MASK
| DESC_A_MASK
);
2162 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2166 raise_exception_err(env
, EXCP0D_GPF
, 0);
2169 if ((new_ss
& 3) != rpl
) {
2170 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2172 if (load_segment(env
, &ss_e1
, &ss_e2
, new_ss
) != 0) {
2173 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2175 if (!(ss_e2
& DESC_S_MASK
) ||
2176 (ss_e2
& DESC_CS_MASK
) ||
2177 !(ss_e2
& DESC_W_MASK
)) {
2178 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2180 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2182 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2184 if (!(ss_e2
& DESC_P_MASK
)) {
2185 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2187 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2188 get_seg_base(ss_e1
, ss_e2
),
2189 get_seg_limit(ss_e1
, ss_e2
),
2193 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2194 get_seg_base(e1
, e2
),
2195 get_seg_limit(e1
, e2
),
2198 #ifdef TARGET_X86_64
2199 if (env
->hflags
& HF_CS64_MASK
) {
2204 sp_mask
= get_sp_mask(ss_e2
);
2207 /* validate data segments */
2208 validate_seg(env
, R_ES
, rpl
);
2209 validate_seg(env
, R_DS
, rpl
);
2210 validate_seg(env
, R_FS
, rpl
);
2211 validate_seg(env
, R_GS
, rpl
);
2215 SET_ESP(sp
, sp_mask
);
2218 /* NOTE: 'cpl' is the _old_ CPL */
2219 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2221 eflags_mask
|= IOPL_MASK
;
2223 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2225 eflags_mask
|= IF_MASK
;
2228 eflags_mask
&= 0xffff;
2230 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2235 POPL(ssp
, sp
, sp_mask
, new_esp
);
2236 POPL(ssp
, sp
, sp_mask
, new_ss
);
2237 POPL(ssp
, sp
, sp_mask
, new_es
);
2238 POPL(ssp
, sp
, sp_mask
, new_ds
);
2239 POPL(ssp
, sp
, sp_mask
, new_fs
);
2240 POPL(ssp
, sp
, sp_mask
, new_gs
);
2242 /* modify processor state */
2243 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2244 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2246 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2247 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2248 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2249 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2250 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2251 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2253 env
->eip
= new_eip
& 0xffff;
2254 env
->regs
[R_ESP
] = new_esp
;
2257 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2259 int tss_selector
, type
;
2262 /* specific case for TSS */
2263 if (env
->eflags
& NT_MASK
) {
2264 #ifdef TARGET_X86_64
2265 if (env
->hflags
& HF_LMA_MASK
) {
2266 raise_exception_err(env
, EXCP0D_GPF
, 0);
2269 tss_selector
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0);
2270 if (tss_selector
& 4) {
2271 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2273 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
2274 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2276 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2277 /* NOTE: we check both segment and busy TSS */
2279 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2281 switch_tss(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2283 helper_ret_protected(env
, shift
, 1, 0);
2285 env
->hflags2
&= ~HF2_NMI_MASK
;
2288 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2290 helper_ret_protected(env
, shift
, 0, addend
);
2293 void helper_sysenter(CPUX86State
*env
)
2295 if (env
->sysenter_cs
== 0) {
2296 raise_exception_err(env
, EXCP0D_GPF
, 0);
2298 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2300 #ifdef TARGET_X86_64
2301 if (env
->hflags
& HF_LMA_MASK
) {
2302 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2304 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2306 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2311 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2313 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2315 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2317 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2319 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2321 DESC_W_MASK
| DESC_A_MASK
);
2322 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2323 env
->eip
= env
->sysenter_eip
;
2326 void helper_sysexit(CPUX86State
*env
, int dflag
)
2330 cpl
= env
->hflags
& HF_CPL_MASK
;
2331 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2332 raise_exception_err(env
, EXCP0D_GPF
, 0);
2334 #ifdef TARGET_X86_64
2336 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2338 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2339 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2340 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2342 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2344 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2345 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2346 DESC_W_MASK
| DESC_A_MASK
);
2350 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2352 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2353 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2354 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2355 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2357 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2358 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2359 DESC_W_MASK
| DESC_A_MASK
);
2361 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2362 env
->eip
= env
->regs
[R_EDX
];
2365 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2368 uint32_t e1
, e2
, eflags
, selector
;
2369 int rpl
, dpl
, cpl
, type
;
2371 selector
= selector1
& 0xffff;
2372 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2373 if ((selector
& 0xfffc) == 0) {
2376 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2380 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2381 cpl
= env
->hflags
& HF_CPL_MASK
;
2382 if (e2
& DESC_S_MASK
) {
2383 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2386 if (dpl
< cpl
|| dpl
< rpl
) {
2391 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2402 if (dpl
< cpl
|| dpl
< rpl
) {
2404 CC_SRC
= eflags
& ~CC_Z
;
2408 limit
= get_seg_limit(e1
, e2
);
2409 CC_SRC
= eflags
| CC_Z
;
2413 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2415 uint32_t e1
, e2
, eflags
, selector
;
2416 int rpl
, dpl
, cpl
, type
;
2418 selector
= selector1
& 0xffff;
2419 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2420 if ((selector
& 0xfffc) == 0) {
2423 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2427 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2428 cpl
= env
->hflags
& HF_CPL_MASK
;
2429 if (e2
& DESC_S_MASK
) {
2430 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2433 if (dpl
< cpl
|| dpl
< rpl
) {
2438 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2452 if (dpl
< cpl
|| dpl
< rpl
) {
2454 CC_SRC
= eflags
& ~CC_Z
;
2458 CC_SRC
= eflags
| CC_Z
;
2459 return e2
& 0x00f0ff00;
2462 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2464 uint32_t e1
, e2
, eflags
, selector
;
2467 selector
= selector1
& 0xffff;
2468 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2469 if ((selector
& 0xfffc) == 0) {
2472 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2475 if (!(e2
& DESC_S_MASK
)) {
2479 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2480 cpl
= env
->hflags
& HF_CPL_MASK
;
2481 if (e2
& DESC_CS_MASK
) {
2482 if (!(e2
& DESC_R_MASK
)) {
2485 if (!(e2
& DESC_C_MASK
)) {
2486 if (dpl
< cpl
|| dpl
< rpl
) {
2491 if (dpl
< cpl
|| dpl
< rpl
) {
2493 CC_SRC
= eflags
& ~CC_Z
;
2497 CC_SRC
= eflags
| CC_Z
;
2500 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2502 uint32_t e1
, e2
, eflags
, selector
;
2505 selector
= selector1
& 0xffff;
2506 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2507 if ((selector
& 0xfffc) == 0) {
2510 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2513 if (!(e2
& DESC_S_MASK
)) {
2517 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2518 cpl
= env
->hflags
& HF_CPL_MASK
;
2519 if (e2
& DESC_CS_MASK
) {
2522 if (dpl
< cpl
|| dpl
< rpl
) {
2525 if (!(e2
& DESC_W_MASK
)) {
2527 CC_SRC
= eflags
& ~CC_Z
;
2531 CC_SRC
= eflags
| CC_Z
;
2534 #if defined(CONFIG_USER_ONLY)
2535 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2537 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2538 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2540 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2541 (selector
<< 4), 0xffff,
2542 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2543 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2545 helper_load_seg(env
, seg_reg
, selector
);
2550 /* check if Port I/O is allowed in TSS */
2551 static inline void check_io(CPUX86State
*env
, int addr
, int size
)
2553 int io_offset
, val
, mask
;
2555 /* TSS must be a valid 32 bit one */
2556 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2557 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2558 env
->tr
.limit
< 103) {
2561 io_offset
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0x66);
2562 io_offset
+= (addr
>> 3);
2563 /* Note: the check needs two bytes */
2564 if ((io_offset
+ 1) > env
->tr
.limit
) {
2567 val
= cpu_lduw_kernel(env
, env
->tr
.base
+ io_offset
);
2569 mask
= (1 << size
) - 1;
2570 /* all bits must be zero to allow the I/O */
2571 if ((val
& mask
) != 0) {
2573 raise_exception_err(env
, EXCP0D_GPF
, 0);
2577 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2579 check_io(env
, t0
, 1);
2582 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2584 check_io(env
, t0
, 2);
2587 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2589 check_io(env
, t0
, 4);