2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
37 #ifdef CONFIG_USER_ONLY
38 #define MEMSUFFIX _kernel
40 #include "exec/cpu_ldst_useronly_template.h"
43 #include "exec/cpu_ldst_useronly_template.h"
46 #include "exec/cpu_ldst_useronly_template.h"
49 #include "exec/cpu_ldst_useronly_template.h"
52 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53 #define MEMSUFFIX _kernel
55 #include "exec/cpu_ldst_template.h"
58 #include "exec/cpu_ldst_template.h"
61 #include "exec/cpu_ldst_template.h"
64 #include "exec/cpu_ldst_template.h"
69 /* return non zero if error */
70 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
71 uint32_t *e2_ptr
, int selector
)
82 index
= selector
& ~7;
83 if ((index
+ 7) > dt
->limit
) {
86 ptr
= dt
->base
+ index
;
87 *e1_ptr
= cpu_ldl_kernel(env
, ptr
);
88 *e2_ptr
= cpu_ldl_kernel(env
, ptr
+ 4);
92 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
96 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
97 if (e2
& DESC_G_MASK
) {
98 limit
= (limit
<< 12) | 0xfff;
103 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
105 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
108 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
111 sc
->base
= get_seg_base(e1
, e2
);
112 sc
->limit
= get_seg_limit(e1
, e2
);
116 /* init the segment cache in vm86 mode. */
117 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
121 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
122 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
123 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
126 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
127 uint32_t *esp_ptr
, int dpl
)
129 X86CPU
*cpu
= x86_env_get_cpu(env
);
130 int type
, index
, shift
;
135 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
136 for (i
= 0; i
< env
->tr
.limit
; i
++) {
137 printf("%02x ", env
->tr
.base
[i
]);
146 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
147 cpu_abort(CPU(cpu
), "invalid tss");
149 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
150 if ((type
& 7) != 1) {
151 cpu_abort(CPU(cpu
), "invalid tss type");
154 index
= (dpl
* 4 + 2) << shift
;
155 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
156 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
159 *esp_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
);
160 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 2);
162 *esp_ptr
= cpu_ldl_kernel(env
, env
->tr
.base
+ index
);
163 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 4);
167 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
)
172 if ((selector
& 0xfffc) != 0) {
173 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
174 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
176 if (!(e2
& DESC_S_MASK
)) {
177 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
180 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
181 if (seg_reg
== R_CS
) {
182 if (!(e2
& DESC_CS_MASK
)) {
183 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
186 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
188 } else if (seg_reg
== R_SS
) {
189 /* SS must be writable data */
190 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
191 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
193 if (dpl
!= cpl
|| dpl
!= rpl
) {
194 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
197 /* not readable code */
198 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
199 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
201 /* if data or non conforming code, checks the rights */
202 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
203 if (dpl
< cpl
|| dpl
< rpl
) {
204 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
208 if (!(e2
& DESC_P_MASK
)) {
209 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
211 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
212 get_seg_base(e1
, e2
),
213 get_seg_limit(e1
, e2
),
216 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
217 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
222 #define SWITCH_TSS_JMP 0
223 #define SWITCH_TSS_IRET 1
224 #define SWITCH_TSS_CALL 2
226 /* XXX: restore CPU state in registers (PowerPC case) */
227 static void switch_tss(CPUX86State
*env
, int tss_selector
,
228 uint32_t e1
, uint32_t e2
, int source
,
231 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
232 target_ulong tss_base
;
233 uint32_t new_regs
[8], new_segs
[6];
234 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
235 uint32_t old_eflags
, eflags_mask
;
240 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
241 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
244 /* if task gate, we read the TSS segment and we load it */
246 if (!(e2
& DESC_P_MASK
)) {
247 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
249 tss_selector
= e1
>> 16;
250 if (tss_selector
& 4) {
251 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
253 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
254 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
256 if (e2
& DESC_S_MASK
) {
257 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
259 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
260 if ((type
& 7) != 1) {
261 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
265 if (!(e2
& DESC_P_MASK
)) {
266 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
274 tss_limit
= get_seg_limit(e1
, e2
);
275 tss_base
= get_seg_base(e1
, e2
);
276 if ((tss_selector
& 4) != 0 ||
277 tss_limit
< tss_limit_max
) {
278 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
280 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
282 old_tss_limit_max
= 103;
284 old_tss_limit_max
= 43;
287 /* read all the registers from the new TSS */
290 new_cr3
= cpu_ldl_kernel(env
, tss_base
+ 0x1c);
291 new_eip
= cpu_ldl_kernel(env
, tss_base
+ 0x20);
292 new_eflags
= cpu_ldl_kernel(env
, tss_base
+ 0x24);
293 for (i
= 0; i
< 8; i
++) {
294 new_regs
[i
] = cpu_ldl_kernel(env
, tss_base
+ (0x28 + i
* 4));
296 for (i
= 0; i
< 6; i
++) {
297 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x48 + i
* 4));
299 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x60);
300 new_trap
= cpu_ldl_kernel(env
, tss_base
+ 0x64);
304 new_eip
= cpu_lduw_kernel(env
, tss_base
+ 0x0e);
305 new_eflags
= cpu_lduw_kernel(env
, tss_base
+ 0x10);
306 for (i
= 0; i
< 8; i
++) {
307 new_regs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x12 + i
* 2)) |
310 for (i
= 0; i
< 4; i
++) {
311 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x22 + i
* 4));
313 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x2a);
318 /* XXX: avoid a compiler warning, see
319 http://support.amd.com/us/Processor_TechDocs/24593.pdf
320 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
323 /* NOTE: we must avoid memory exceptions during the task switch,
324 so we make dummy accesses before */
325 /* XXX: it can still fail in some cases, so a bigger hack is
326 necessary to valid the TLB after having done the accesses */
328 v1
= cpu_ldub_kernel(env
, env
->tr
.base
);
329 v2
= cpu_ldub_kernel(env
, env
->tr
.base
+ old_tss_limit_max
);
330 cpu_stb_kernel(env
, env
->tr
.base
, v1
);
331 cpu_stb_kernel(env
, env
->tr
.base
+ old_tss_limit_max
, v2
);
333 /* clear busy bit (it is restartable) */
334 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
338 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
339 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
340 e2
&= ~DESC_TSS_BUSY_MASK
;
341 cpu_stl_kernel(env
, ptr
+ 4, e2
);
343 old_eflags
= cpu_compute_eflags(env
);
344 if (source
== SWITCH_TSS_IRET
) {
345 old_eflags
&= ~NT_MASK
;
348 /* save the current state in the old TSS */
351 cpu_stl_kernel(env
, env
->tr
.base
+ 0x20, next_eip
);
352 cpu_stl_kernel(env
, env
->tr
.base
+ 0x24, old_eflags
);
353 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
]);
354 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
]);
355 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
]);
356 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
]);
357 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
]);
358 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
]);
359 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
]);
360 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
]);
361 for (i
= 0; i
< 6; i
++) {
362 cpu_stw_kernel(env
, env
->tr
.base
+ (0x48 + i
* 4),
363 env
->segs
[i
].selector
);
367 cpu_stw_kernel(env
, env
->tr
.base
+ 0x0e, next_eip
);
368 cpu_stw_kernel(env
, env
->tr
.base
+ 0x10, old_eflags
);
369 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
]);
370 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
]);
371 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
]);
372 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
]);
373 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
]);
374 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
]);
375 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
]);
376 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
]);
377 for (i
= 0; i
< 4; i
++) {
378 cpu_stw_kernel(env
, env
->tr
.base
+ (0x22 + i
* 4),
379 env
->segs
[i
].selector
);
383 /* now if an exception occurs, it will occurs in the next task
386 if (source
== SWITCH_TSS_CALL
) {
387 cpu_stw_kernel(env
, tss_base
, env
->tr
.selector
);
388 new_eflags
|= NT_MASK
;
392 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
396 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
397 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
398 e2
|= DESC_TSS_BUSY_MASK
;
399 cpu_stl_kernel(env
, ptr
+ 4, e2
);
402 /* set the new CPU state */
403 /* from this point, any exception which occurs can give problems */
404 env
->cr
[0] |= CR0_TS_MASK
;
405 env
->hflags
|= HF_TS_MASK
;
406 env
->tr
.selector
= tss_selector
;
407 env
->tr
.base
= tss_base
;
408 env
->tr
.limit
= tss_limit
;
409 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
411 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
412 cpu_x86_update_cr3(env
, new_cr3
);
415 /* load all registers without an exception, then reload them with
416 possible exception */
418 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
419 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
421 eflags_mask
&= 0xffff;
423 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
424 /* XXX: what to do in 16 bit case? */
425 env
->regs
[R_EAX
] = new_regs
[0];
426 env
->regs
[R_ECX
] = new_regs
[1];
427 env
->regs
[R_EDX
] = new_regs
[2];
428 env
->regs
[R_EBX
] = new_regs
[3];
429 env
->regs
[R_ESP
] = new_regs
[4];
430 env
->regs
[R_EBP
] = new_regs
[5];
431 env
->regs
[R_ESI
] = new_regs
[6];
432 env
->regs
[R_EDI
] = new_regs
[7];
433 if (new_eflags
& VM_MASK
) {
434 for (i
= 0; i
< 6; i
++) {
435 load_seg_vm(env
, i
, new_segs
[i
]);
438 /* first just selectors as the rest may trigger exceptions */
439 for (i
= 0; i
< 6; i
++) {
440 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
444 env
->ldt
.selector
= new_ldt
& ~4;
451 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
454 if ((new_ldt
& 0xfffc) != 0) {
456 index
= new_ldt
& ~7;
457 if ((index
+ 7) > dt
->limit
) {
458 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
460 ptr
= dt
->base
+ index
;
461 e1
= cpu_ldl_kernel(env
, ptr
);
462 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
463 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
464 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
466 if (!(e2
& DESC_P_MASK
)) {
467 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
469 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
472 /* load the segments */
473 if (!(new_eflags
& VM_MASK
)) {
474 int cpl
= new_segs
[R_CS
] & 3;
475 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
);
476 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
);
477 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
);
478 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
);
479 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
);
480 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
);
483 /* check that env->eip is in the CS segment limits */
484 if (new_eip
> env
->segs
[R_CS
].limit
) {
485 /* XXX: different exception if CALL? */
486 raise_exception_err(env
, EXCP0D_GPF
, 0);
489 #ifndef CONFIG_USER_ONLY
490 /* reset local breakpoints */
491 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
492 for (i
= 0; i
< DR7_MAX_BP
; i
++) {
493 if (hw_local_breakpoint_enabled(env
->dr
[7], i
) &&
494 !hw_global_breakpoint_enabled(env
->dr
[7], i
)) {
495 hw_breakpoint_remove(env
, i
);
498 env
->dr
[7] &= ~DR7_LOCAL_BP_MASK
;
503 static inline unsigned int get_sp_mask(unsigned int e2
)
505 if (e2
& DESC_B_MASK
) {
512 static int exception_has_error_code(int intno
)
528 #define SET_ESP(val, sp_mask) \
530 if ((sp_mask) == 0xffff) { \
531 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
533 } else if ((sp_mask) == 0xffffffffLL) { \
534 env->regs[R_ESP] = (uint32_t)(val); \
536 env->regs[R_ESP] = (val); \
540 #define SET_ESP(val, sp_mask) \
542 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
543 ((val) & (sp_mask)); \
547 /* in 64-bit machines, this can overflow. So this segment addition macro
548 * can be used to trim the value to 32-bit whenever needed */
549 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
551 /* XXX: add a is_user flag to have proper security support */
552 #define PUSHW(ssp, sp, sp_mask, val) \
555 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
558 #define PUSHL(ssp, sp, sp_mask, val) \
561 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
564 #define POPW(ssp, sp, sp_mask, val) \
566 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
570 #define POPL(ssp, sp, sp_mask, val) \
572 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
576 /* protected mode interrupt */
577 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
578 int error_code
, unsigned int next_eip
,
582 target_ulong ptr
, ssp
;
583 int type
, dpl
, selector
, ss_dpl
, cpl
;
584 int has_error_code
, new_stack
, shift
;
585 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
586 uint32_t old_eip
, sp_mask
;
587 int vm86
= env
->eflags
& VM_MASK
;
590 if (!is_int
&& !is_hw
) {
591 has_error_code
= exception_has_error_code(intno
);
600 if (intno
* 8 + 7 > dt
->limit
) {
601 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
603 ptr
= dt
->base
+ intno
* 8;
604 e1
= cpu_ldl_kernel(env
, ptr
);
605 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
606 /* check gate type */
607 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
609 case 5: /* task gate */
610 /* must do that check here to return the correct error code */
611 if (!(e2
& DESC_P_MASK
)) {
612 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
614 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
615 if (has_error_code
) {
619 /* push the error code */
620 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
622 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
627 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
628 ssp
= env
->segs
[R_SS
].base
+ esp
;
630 cpu_stl_kernel(env
, ssp
, error_code
);
632 cpu_stw_kernel(env
, ssp
, error_code
);
637 case 6: /* 286 interrupt gate */
638 case 7: /* 286 trap gate */
639 case 14: /* 386 interrupt gate */
640 case 15: /* 386 trap gate */
643 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
646 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
647 cpl
= env
->hflags
& HF_CPL_MASK
;
648 /* check privilege if software int */
649 if (is_int
&& dpl
< cpl
) {
650 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
652 /* check valid bit */
653 if (!(e2
& DESC_P_MASK
)) {
654 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
657 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
658 if ((selector
& 0xfffc) == 0) {
659 raise_exception_err(env
, EXCP0D_GPF
, 0);
661 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
662 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
664 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
665 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
667 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
669 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
671 if (!(e2
& DESC_P_MASK
)) {
672 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
674 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
675 /* to inner privilege */
676 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
);
677 if ((ss
& 0xfffc) == 0) {
678 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
680 if ((ss
& 3) != dpl
) {
681 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
683 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
684 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
686 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
688 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
690 if (!(ss_e2
& DESC_S_MASK
) ||
691 (ss_e2
& DESC_CS_MASK
) ||
692 !(ss_e2
& DESC_W_MASK
)) {
693 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
695 if (!(ss_e2
& DESC_P_MASK
)) {
696 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
699 sp_mask
= get_sp_mask(ss_e2
);
700 ssp
= get_seg_base(ss_e1
, ss_e2
);
701 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
702 /* to same privilege */
704 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
707 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
708 ssp
= env
->segs
[R_SS
].base
;
709 esp
= env
->regs
[R_ESP
];
712 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
713 new_stack
= 0; /* avoid warning */
714 sp_mask
= 0; /* avoid warning */
715 ssp
= 0; /* avoid warning */
716 esp
= 0; /* avoid warning */
722 /* XXX: check that enough room is available */
723 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
732 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
733 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
734 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
735 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
737 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
738 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
740 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
741 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
742 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
743 if (has_error_code
) {
744 PUSHL(ssp
, esp
, sp_mask
, error_code
);
749 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
750 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
751 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
752 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
754 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
755 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
757 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
758 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
759 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
760 if (has_error_code
) {
761 PUSHW(ssp
, esp
, sp_mask
, error_code
);
765 /* interrupt gate clear IF mask */
766 if ((type
& 1) == 0) {
767 env
->eflags
&= ~IF_MASK
;
769 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
773 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
774 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
775 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
776 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
778 ss
= (ss
& ~3) | dpl
;
779 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
780 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
782 SET_ESP(esp
, sp_mask
);
784 selector
= (selector
& ~3) | dpl
;
785 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
786 get_seg_base(e1
, e2
),
787 get_seg_limit(e1
, e2
),
794 #define PUSHQ(sp, val) \
797 cpu_stq_kernel(env, sp, (val)); \
800 #define POPQ(sp, val) \
802 val = cpu_ldq_kernel(env, sp); \
806 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
808 X86CPU
*cpu
= x86_env_get_cpu(env
);
812 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
813 env
->tr
.base
, env
->tr
.limit
);
816 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
817 cpu_abort(CPU(cpu
), "invalid tss");
819 index
= 8 * level
+ 4;
820 if ((index
+ 7) > env
->tr
.limit
) {
821 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
823 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
826 /* 64 bit interrupt */
827 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
828 int error_code
, target_ulong next_eip
, int is_hw
)
832 int type
, dpl
, selector
, cpl
, ist
;
833 int has_error_code
, new_stack
;
834 uint32_t e1
, e2
, e3
, ss
;
835 target_ulong old_eip
, esp
, offset
;
838 if (!is_int
&& !is_hw
) {
839 has_error_code
= exception_has_error_code(intno
);
848 if (intno
* 16 + 15 > dt
->limit
) {
849 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
851 ptr
= dt
->base
+ intno
* 16;
852 e1
= cpu_ldl_kernel(env
, ptr
);
853 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
854 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
855 /* check gate type */
856 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
858 case 14: /* 386 interrupt gate */
859 case 15: /* 386 trap gate */
862 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
865 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
866 cpl
= env
->hflags
& HF_CPL_MASK
;
867 /* check privilege if software int */
868 if (is_int
&& dpl
< cpl
) {
869 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
871 /* check valid bit */
872 if (!(e2
& DESC_P_MASK
)) {
873 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
876 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
878 if ((selector
& 0xfffc) == 0) {
879 raise_exception_err(env
, EXCP0D_GPF
, 0);
882 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
883 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
885 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
886 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
888 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
890 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
892 if (!(e2
& DESC_P_MASK
)) {
893 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
895 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
896 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
898 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
899 /* to inner privilege */
901 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
903 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
904 /* to same privilege */
905 if (env
->eflags
& VM_MASK
) {
906 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
909 esp
= env
->regs
[R_ESP
];
912 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
913 new_stack
= 0; /* avoid warning */
914 esp
= 0; /* avoid warning */
916 esp
&= ~0xfLL
; /* align stack */
918 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
919 PUSHQ(esp
, env
->regs
[R_ESP
]);
920 PUSHQ(esp
, cpu_compute_eflags(env
));
921 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
923 if (has_error_code
) {
924 PUSHQ(esp
, error_code
);
927 /* interrupt gate clear IF mask */
928 if ((type
& 1) == 0) {
929 env
->eflags
&= ~IF_MASK
;
931 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
935 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
937 env
->regs
[R_ESP
] = esp
;
939 selector
= (selector
& ~3) | dpl
;
940 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
941 get_seg_base(e1
, e2
),
942 get_seg_limit(e1
, e2
),
949 #if defined(CONFIG_USER_ONLY)
950 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
952 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
954 cs
->exception_index
= EXCP_SYSCALL
;
955 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
959 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
963 if (!(env
->efer
& MSR_EFER_SCE
)) {
964 raise_exception_err(env
, EXCP06_ILLOP
, 0);
966 selector
= (env
->star
>> 32) & 0xffff;
967 if (env
->hflags
& HF_LMA_MASK
) {
970 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
971 env
->regs
[11] = cpu_compute_eflags(env
);
973 code64
= env
->hflags
& HF_CS64_MASK
;
975 env
->eflags
&= ~env
->fmask
;
976 cpu_load_eflags(env
, env
->eflags
, 0);
977 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
979 DESC_G_MASK
| DESC_P_MASK
|
981 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
983 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
985 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
987 DESC_W_MASK
| DESC_A_MASK
);
989 env
->eip
= env
->lstar
;
991 env
->eip
= env
->cstar
;
994 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
996 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
997 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
999 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1001 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1002 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1004 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1006 DESC_W_MASK
| DESC_A_MASK
);
1007 env
->eip
= (uint32_t)env
->star
;
1013 #ifdef TARGET_X86_64
1014 void helper_sysret(CPUX86State
*env
, int dflag
)
1018 if (!(env
->efer
& MSR_EFER_SCE
)) {
1019 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1021 cpl
= env
->hflags
& HF_CPL_MASK
;
1022 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1023 raise_exception_err(env
, EXCP0D_GPF
, 0);
1025 selector
= (env
->star
>> 48) & 0xffff;
1026 if (env
->hflags
& HF_LMA_MASK
) {
1027 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1028 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1031 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1033 DESC_G_MASK
| DESC_P_MASK
|
1034 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1035 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1037 env
->eip
= env
->regs
[R_ECX
];
1039 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1041 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1042 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1043 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1044 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1046 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1048 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1049 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1050 DESC_W_MASK
| DESC_A_MASK
);
1052 env
->eflags
|= IF_MASK
;
1053 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1055 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1056 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1057 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1058 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1059 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1061 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1062 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1063 DESC_W_MASK
| DESC_A_MASK
);
1068 /* real mode interrupt */
1069 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1070 int error_code
, unsigned int next_eip
)
1073 target_ulong ptr
, ssp
;
1075 uint32_t offset
, esp
;
1076 uint32_t old_cs
, old_eip
;
1078 /* real mode (simpler!) */
1080 if (intno
* 4 + 3 > dt
->limit
) {
1081 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1083 ptr
= dt
->base
+ intno
* 4;
1084 offset
= cpu_lduw_kernel(env
, ptr
);
1085 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1086 esp
= env
->regs
[R_ESP
];
1087 ssp
= env
->segs
[R_SS
].base
;
1093 old_cs
= env
->segs
[R_CS
].selector
;
1094 /* XXX: use SS segment size? */
1095 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1096 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1097 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1099 /* update processor state */
1100 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1102 env
->segs
[R_CS
].selector
= selector
;
1103 env
->segs
[R_CS
].base
= (selector
<< 4);
1104 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1107 #if defined(CONFIG_USER_ONLY)
1108 /* fake user mode interrupt */
1109 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1110 int error_code
, target_ulong next_eip
)
1114 int dpl
, cpl
, shift
;
1118 if (env
->hflags
& HF_LMA_MASK
) {
1123 ptr
= dt
->base
+ (intno
<< shift
);
1124 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1126 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1127 cpl
= env
->hflags
& HF_CPL_MASK
;
1128 /* check privilege if software int */
1129 if (is_int
&& dpl
< cpl
) {
1130 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1133 /* Since we emulate only user space, we cannot do more than
1134 exiting the emulation with the suitable exception and error
1135 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1136 if (is_int
|| intno
== EXCP_SYSCALL
) {
1137 env
->eip
= next_eip
;
1143 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1144 int error_code
, int is_hw
, int rm
)
1146 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1147 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1148 control
.event_inj
));
1150 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1154 type
= SVM_EVTINJ_TYPE_SOFT
;
1156 type
= SVM_EVTINJ_TYPE_EXEPT
;
1158 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1159 if (!rm
&& exception_has_error_code(intno
)) {
1160 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1161 stl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1162 control
.event_inj_err
),
1166 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1173 * Begin execution of an interruption. is_int is TRUE if coming from
1174 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1175 * instruction. It is only relevant if is_int is TRUE.
1177 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1178 int error_code
, target_ulong next_eip
, int is_hw
)
1180 CPUX86State
*env
= &cpu
->env
;
1182 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1183 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1186 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1187 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1188 count
, intno
, error_code
, is_int
,
1189 env
->hflags
& HF_CPL_MASK
,
1190 env
->segs
[R_CS
].selector
, env
->eip
,
1191 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1192 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1193 if (intno
== 0x0e) {
1194 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1196 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1199 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1206 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1207 for (i
= 0; i
< 16; i
++) {
1208 qemu_log(" %02x", ldub(ptr
+ i
));
1216 if (env
->cr
[0] & CR0_PE_MASK
) {
1217 #if !defined(CONFIG_USER_ONLY)
1218 if (env
->hflags
& HF_SVMI_MASK
) {
1219 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1222 #ifdef TARGET_X86_64
1223 if (env
->hflags
& HF_LMA_MASK
) {
1224 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1228 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1232 #if !defined(CONFIG_USER_ONLY)
1233 if (env
->hflags
& HF_SVMI_MASK
) {
1234 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1237 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1240 #if !defined(CONFIG_USER_ONLY)
1241 if (env
->hflags
& HF_SVMI_MASK
) {
1242 CPUState
*cs
= CPU(cpu
);
1243 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+
1244 offsetof(struct vmcb
,
1245 control
.event_inj
));
1248 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1249 event_inj
& ~SVM_EVTINJ_VALID
);
1254 void x86_cpu_do_interrupt(CPUState
*cs
)
1256 X86CPU
*cpu
= X86_CPU(cs
);
1257 CPUX86State
*env
= &cpu
->env
;
1259 #if defined(CONFIG_USER_ONLY)
1260 /* if user mode only, we simulate a fake exception
1261 which will be handled outside the cpu execution
1263 do_interrupt_user(env
, cs
->exception_index
,
1264 env
->exception_is_int
,
1266 env
->exception_next_eip
);
1267 /* successfully delivered */
1268 env
->old_exception
= -1;
1270 /* simulate a real cpu exception. On i386, it can
1271 trigger new exceptions, but we do not handle
1272 double or triple faults yet. */
1273 do_interrupt_all(cpu
, cs
->exception_index
,
1274 env
->exception_is_int
,
1276 env
->exception_next_eip
, 0);
1277 /* successfully delivered */
1278 env
->old_exception
= -1;
1282 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1284 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1287 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1289 X86CPU
*cpu
= X86_CPU(cs
);
1290 CPUX86State
*env
= &cpu
->env
;
1293 #if !defined(CONFIG_USER_ONLY)
1294 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
1295 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1296 apic_poll_irq(cpu
->apic_state
);
1299 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
1301 } else if (env
->hflags2
& HF2_GIF_MASK
) {
1302 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
1303 !(env
->hflags
& HF_SMM_MASK
)) {
1304 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0);
1305 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1308 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
1309 !(env
->hflags2
& HF2_NMI_MASK
)) {
1310 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1311 env
->hflags2
|= HF2_NMI_MASK
;
1312 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1314 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
1315 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1316 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1318 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
1319 (((env
->hflags2
& HF2_VINTR_MASK
) &&
1320 (env
->hflags2
& HF2_HIF_MASK
)) ||
1321 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
1322 (env
->eflags
& IF_MASK
&&
1323 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
1325 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0);
1326 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1327 CPU_INTERRUPT_VIRQ
);
1328 intno
= cpu_get_pic_interrupt(env
);
1329 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1330 "Servicing hardware INT=0x%02x\n", intno
);
1331 do_interrupt_x86_hardirq(env
, intno
, 1);
1332 /* ensure that no TB jump will be modified as
1333 the program flow was changed */
1335 #if !defined(CONFIG_USER_ONLY)
1336 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
1337 (env
->eflags
& IF_MASK
) &&
1338 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
1340 /* FIXME: this should respect TPR */
1341 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0);
1342 intno
= ldl_phys(cs
->as
, env
->vm_vmcb
1343 + offsetof(struct vmcb
, control
.int_vector
));
1344 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1345 "Servicing virtual hardware INT=0x%02x\n", intno
);
1346 do_interrupt_x86_hardirq(env
, intno
, 1);
1347 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1356 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1360 uint32_t esp_mask
, esp
, ebp
;
1362 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1363 ssp
= env
->segs
[R_SS
].base
;
1364 ebp
= env
->regs
[R_EBP
];
1365 esp
= env
->regs
[R_ESP
];
1372 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
),
1373 cpu_ldl_data(env
, ssp
+ (ebp
& esp_mask
)));
1376 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1383 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
),
1384 cpu_lduw_data(env
, ssp
+ (ebp
& esp_mask
)));
1387 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1391 #ifdef TARGET_X86_64
1392 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1395 target_ulong esp
, ebp
;
1397 ebp
= env
->regs
[R_EBP
];
1398 esp
= env
->regs
[R_ESP
];
1406 cpu_stq_data(env
, esp
, cpu_ldq_data(env
, ebp
));
1409 cpu_stq_data(env
, esp
, t1
);
1416 cpu_stw_data(env
, esp
, cpu_lduw_data(env
, ebp
));
1419 cpu_stw_data(env
, esp
, t1
);
1424 void helper_lldt(CPUX86State
*env
, int selector
)
1428 int index
, entry_limit
;
1432 if ((selector
& 0xfffc) == 0) {
1433 /* XXX: NULL selector case: invalid LDT */
1437 if (selector
& 0x4) {
1438 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1441 index
= selector
& ~7;
1442 #ifdef TARGET_X86_64
1443 if (env
->hflags
& HF_LMA_MASK
) {
1450 if ((index
+ entry_limit
) > dt
->limit
) {
1451 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1453 ptr
= dt
->base
+ index
;
1454 e1
= cpu_ldl_kernel(env
, ptr
);
1455 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1456 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1457 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1459 if (!(e2
& DESC_P_MASK
)) {
1460 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1462 #ifdef TARGET_X86_64
1463 if (env
->hflags
& HF_LMA_MASK
) {
1466 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1467 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1468 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1472 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1475 env
->ldt
.selector
= selector
;
1478 void helper_ltr(CPUX86State
*env
, int selector
)
1482 int index
, type
, entry_limit
;
1486 if ((selector
& 0xfffc) == 0) {
1487 /* NULL selector case: invalid TR */
1492 if (selector
& 0x4) {
1493 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1496 index
= selector
& ~7;
1497 #ifdef TARGET_X86_64
1498 if (env
->hflags
& HF_LMA_MASK
) {
1505 if ((index
+ entry_limit
) > dt
->limit
) {
1506 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1508 ptr
= dt
->base
+ index
;
1509 e1
= cpu_ldl_kernel(env
, ptr
);
1510 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1511 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1512 if ((e2
& DESC_S_MASK
) ||
1513 (type
!= 1 && type
!= 9)) {
1514 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1516 if (!(e2
& DESC_P_MASK
)) {
1517 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1519 #ifdef TARGET_X86_64
1520 if (env
->hflags
& HF_LMA_MASK
) {
1523 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1524 e4
= cpu_ldl_kernel(env
, ptr
+ 12);
1525 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1526 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1528 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1529 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1533 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1535 e2
|= DESC_TSS_BUSY_MASK
;
1536 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1538 env
->tr
.selector
= selector
;
1541 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1542 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1551 cpl
= env
->hflags
& HF_CPL_MASK
;
1552 if ((selector
& 0xfffc) == 0) {
1553 /* null selector case */
1555 #ifdef TARGET_X86_64
1556 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1559 raise_exception_err(env
, EXCP0D_GPF
, 0);
1561 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1564 if (selector
& 0x4) {
1569 index
= selector
& ~7;
1570 if ((index
+ 7) > dt
->limit
) {
1571 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1573 ptr
= dt
->base
+ index
;
1574 e1
= cpu_ldl_kernel(env
, ptr
);
1575 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1577 if (!(e2
& DESC_S_MASK
)) {
1578 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1581 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1582 if (seg_reg
== R_SS
) {
1583 /* must be writable segment */
1584 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1585 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1587 if (rpl
!= cpl
|| dpl
!= cpl
) {
1588 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1591 /* must be readable segment */
1592 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1593 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1596 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1597 /* if not conforming code, test rights */
1598 if (dpl
< cpl
|| dpl
< rpl
) {
1599 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1604 if (!(e2
& DESC_P_MASK
)) {
1605 if (seg_reg
== R_SS
) {
1606 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1608 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1612 /* set the access bit if not already set */
1613 if (!(e2
& DESC_A_MASK
)) {
1615 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1618 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1619 get_seg_base(e1
, e2
),
1620 get_seg_limit(e1
, e2
),
1623 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1624 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1629 /* protected mode jump */
1630 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1631 int next_eip_addend
)
1634 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1635 target_ulong next_eip
;
1637 if ((new_cs
& 0xfffc) == 0) {
1638 raise_exception_err(env
, EXCP0D_GPF
, 0);
1640 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1641 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1643 cpl
= env
->hflags
& HF_CPL_MASK
;
1644 if (e2
& DESC_S_MASK
) {
1645 if (!(e2
& DESC_CS_MASK
)) {
1646 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1648 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1649 if (e2
& DESC_C_MASK
) {
1650 /* conforming code segment */
1652 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1655 /* non conforming code segment */
1658 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1661 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1664 if (!(e2
& DESC_P_MASK
)) {
1665 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1667 limit
= get_seg_limit(e1
, e2
);
1668 if (new_eip
> limit
&&
1669 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1670 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1672 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1673 get_seg_base(e1
, e2
), limit
, e2
);
1676 /* jump to call or task gate */
1677 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1679 cpl
= env
->hflags
& HF_CPL_MASK
;
1680 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1682 case 1: /* 286 TSS */
1683 case 9: /* 386 TSS */
1684 case 5: /* task gate */
1685 if (dpl
< cpl
|| dpl
< rpl
) {
1686 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1688 next_eip
= env
->eip
+ next_eip_addend
;
1689 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1691 case 4: /* 286 call gate */
1692 case 12: /* 386 call gate */
1693 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1694 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1696 if (!(e2
& DESC_P_MASK
)) {
1697 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1700 new_eip
= (e1
& 0xffff);
1702 new_eip
|= (e2
& 0xffff0000);
1704 if (load_segment(env
, &e1
, &e2
, gate_cs
) != 0) {
1705 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1707 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1708 /* must be code segment */
1709 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1710 (DESC_S_MASK
| DESC_CS_MASK
))) {
1711 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1713 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1714 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1715 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1717 if (!(e2
& DESC_P_MASK
)) {
1718 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1720 limit
= get_seg_limit(e1
, e2
);
1721 if (new_eip
> limit
) {
1722 raise_exception_err(env
, EXCP0D_GPF
, 0);
1724 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1725 get_seg_base(e1
, e2
), limit
, e2
);
1729 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1735 /* real mode call */
1736 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1737 int shift
, int next_eip
)
1740 uint32_t esp
, esp_mask
;
1744 esp
= env
->regs
[R_ESP
];
1745 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1746 ssp
= env
->segs
[R_SS
].base
;
1748 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1749 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1751 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1752 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1755 SET_ESP(esp
, esp_mask
);
1757 env
->segs
[R_CS
].selector
= new_cs
;
1758 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1761 /* protected mode call */
1762 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1763 int shift
, int next_eip_addend
)
1766 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1767 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1768 uint32_t val
, limit
, old_sp_mask
;
1769 target_ulong ssp
, old_ssp
, next_eip
;
1771 next_eip
= env
->eip
+ next_eip_addend
;
1772 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1773 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1774 if ((new_cs
& 0xfffc) == 0) {
1775 raise_exception_err(env
, EXCP0D_GPF
, 0);
1777 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1778 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1780 cpl
= env
->hflags
& HF_CPL_MASK
;
1781 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1782 if (e2
& DESC_S_MASK
) {
1783 if (!(e2
& DESC_CS_MASK
)) {
1784 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1786 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1787 if (e2
& DESC_C_MASK
) {
1788 /* conforming code segment */
1790 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1793 /* non conforming code segment */
1796 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1799 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1802 if (!(e2
& DESC_P_MASK
)) {
1803 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1806 #ifdef TARGET_X86_64
1807 /* XXX: check 16/32 bit cases in long mode */
1812 rsp
= env
->regs
[R_ESP
];
1813 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1814 PUSHQ(rsp
, next_eip
);
1815 /* from this point, not restartable */
1816 env
->regs
[R_ESP
] = rsp
;
1817 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1818 get_seg_base(e1
, e2
),
1819 get_seg_limit(e1
, e2
), e2
);
1824 sp
= env
->regs
[R_ESP
];
1825 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1826 ssp
= env
->segs
[R_SS
].base
;
1828 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1829 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1831 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1832 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1835 limit
= get_seg_limit(e1
, e2
);
1836 if (new_eip
> limit
) {
1837 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1839 /* from this point, not restartable */
1840 SET_ESP(sp
, sp_mask
);
1841 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1842 get_seg_base(e1
, e2
), limit
, e2
);
1846 /* check gate type */
1847 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1848 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1851 case 1: /* available 286 TSS */
1852 case 9: /* available 386 TSS */
1853 case 5: /* task gate */
1854 if (dpl
< cpl
|| dpl
< rpl
) {
1855 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1857 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1859 case 4: /* 286 call gate */
1860 case 12: /* 386 call gate */
1863 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1868 if (dpl
< cpl
|| dpl
< rpl
) {
1869 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1871 /* check valid bit */
1872 if (!(e2
& DESC_P_MASK
)) {
1873 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1875 selector
= e1
>> 16;
1876 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1877 param_count
= e2
& 0x1f;
1878 if ((selector
& 0xfffc) == 0) {
1879 raise_exception_err(env
, EXCP0D_GPF
, 0);
1882 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
1883 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1885 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1886 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1888 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1890 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1892 if (!(e2
& DESC_P_MASK
)) {
1893 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1896 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1897 /* to inner privilege */
1898 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
);
1899 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1900 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1902 if ((ss
& 0xfffc) == 0) {
1903 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1905 if ((ss
& 3) != dpl
) {
1906 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1908 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
1909 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1911 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1912 if (ss_dpl
!= dpl
) {
1913 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1915 if (!(ss_e2
& DESC_S_MASK
) ||
1916 (ss_e2
& DESC_CS_MASK
) ||
1917 !(ss_e2
& DESC_W_MASK
)) {
1918 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1920 if (!(ss_e2
& DESC_P_MASK
)) {
1921 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1924 /* push_size = ((param_count * 2) + 8) << shift; */
1926 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1927 old_ssp
= env
->segs
[R_SS
].base
;
1929 sp_mask
= get_sp_mask(ss_e2
);
1930 ssp
= get_seg_base(ss_e1
, ss_e2
);
1932 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1933 PUSHL(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1934 for (i
= param_count
- 1; i
>= 0; i
--) {
1935 val
= cpu_ldl_kernel(env
, old_ssp
+
1936 ((env
->regs
[R_ESP
] + i
* 4) &
1938 PUSHL(ssp
, sp
, sp_mask
, val
);
1941 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1942 PUSHW(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1943 for (i
= param_count
- 1; i
>= 0; i
--) {
1944 val
= cpu_lduw_kernel(env
, old_ssp
+
1945 ((env
->regs
[R_ESP
] + i
* 2) &
1947 PUSHW(ssp
, sp
, sp_mask
, val
);
1952 /* to same privilege */
1953 sp
= env
->regs
[R_ESP
];
1954 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1955 ssp
= env
->segs
[R_SS
].base
;
1956 /* push_size = (4 << shift); */
1961 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1962 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1964 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1965 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1968 /* from this point, not restartable */
1971 ss
= (ss
& ~3) | dpl
;
1972 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1974 get_seg_limit(ss_e1
, ss_e2
),
1978 selector
= (selector
& ~3) | dpl
;
1979 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1980 get_seg_base(e1
, e2
),
1981 get_seg_limit(e1
, e2
),
1983 SET_ESP(sp
, sp_mask
);
1988 /* real and vm86 mode iret */
1989 void helper_iret_real(CPUX86State
*env
, int shift
)
1991 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1995 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1996 sp
= env
->regs
[R_ESP
];
1997 ssp
= env
->segs
[R_SS
].base
;
2000 POPL(ssp
, sp
, sp_mask
, new_eip
);
2001 POPL(ssp
, sp
, sp_mask
, new_cs
);
2003 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2006 POPW(ssp
, sp
, sp_mask
, new_eip
);
2007 POPW(ssp
, sp
, sp_mask
, new_cs
);
2008 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2010 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
2011 env
->segs
[R_CS
].selector
= new_cs
;
2012 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2014 if (env
->eflags
& VM_MASK
) {
2015 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
2018 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
2022 eflags_mask
&= 0xffff;
2024 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2025 env
->hflags2
&= ~HF2_NMI_MASK
;
2028 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
2033 /* XXX: on x86_64, we do not want to nullify FS and GS because
2034 they may still contain a valid base. I would be interested to
2035 know how a real x86_64 CPU behaves */
2036 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2037 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2041 e2
= env
->segs
[seg_reg
].flags
;
2042 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2043 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2044 /* data or non conforming code segment */
2046 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2051 /* protected mode iret */
2052 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2053 int is_iret
, int addend
)
2055 uint32_t new_cs
, new_eflags
, new_ss
;
2056 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2057 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2058 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2059 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2061 #ifdef TARGET_X86_64
2067 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2069 sp
= env
->regs
[R_ESP
];
2070 ssp
= env
->segs
[R_SS
].base
;
2071 new_eflags
= 0; /* avoid warning */
2072 #ifdef TARGET_X86_64
2078 POPQ(sp
, new_eflags
);
2085 POPL(ssp
, sp
, sp_mask
, new_eip
);
2086 POPL(ssp
, sp
, sp_mask
, new_cs
);
2089 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2090 if (new_eflags
& VM_MASK
) {
2091 goto return_to_vm86
;
2096 POPW(ssp
, sp
, sp_mask
, new_eip
);
2097 POPW(ssp
, sp
, sp_mask
, new_cs
);
2099 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2103 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2104 new_cs
, new_eip
, shift
, addend
);
2105 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2106 if ((new_cs
& 0xfffc) == 0) {
2107 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2109 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
2110 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2112 if (!(e2
& DESC_S_MASK
) ||
2113 !(e2
& DESC_CS_MASK
)) {
2114 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2116 cpl
= env
->hflags
& HF_CPL_MASK
;
2119 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2121 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2122 if (e2
& DESC_C_MASK
) {
2124 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2128 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2131 if (!(e2
& DESC_P_MASK
)) {
2132 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2136 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2137 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2138 /* return to same privilege level */
2139 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2140 get_seg_base(e1
, e2
),
2141 get_seg_limit(e1
, e2
),
2144 /* return to different privilege level */
2145 #ifdef TARGET_X86_64
2155 POPL(ssp
, sp
, sp_mask
, new_esp
);
2156 POPL(ssp
, sp
, sp_mask
, new_ss
);
2160 POPW(ssp
, sp
, sp_mask
, new_esp
);
2161 POPW(ssp
, sp
, sp_mask
, new_ss
);
2164 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2166 if ((new_ss
& 0xfffc) == 0) {
2167 #ifdef TARGET_X86_64
2168 /* NULL ss is allowed in long mode if cpl != 3 */
2169 /* XXX: test CS64? */
2170 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2171 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2173 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2174 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2175 DESC_W_MASK
| DESC_A_MASK
);
2176 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2180 raise_exception_err(env
, EXCP0D_GPF
, 0);
2183 if ((new_ss
& 3) != rpl
) {
2184 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2186 if (load_segment(env
, &ss_e1
, &ss_e2
, new_ss
) != 0) {
2187 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2189 if (!(ss_e2
& DESC_S_MASK
) ||
2190 (ss_e2
& DESC_CS_MASK
) ||
2191 !(ss_e2
& DESC_W_MASK
)) {
2192 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2194 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2196 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2198 if (!(ss_e2
& DESC_P_MASK
)) {
2199 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2201 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2202 get_seg_base(ss_e1
, ss_e2
),
2203 get_seg_limit(ss_e1
, ss_e2
),
2207 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2208 get_seg_base(e1
, e2
),
2209 get_seg_limit(e1
, e2
),
2212 #ifdef TARGET_X86_64
2213 if (env
->hflags
& HF_CS64_MASK
) {
2218 sp_mask
= get_sp_mask(ss_e2
);
2221 /* validate data segments */
2222 validate_seg(env
, R_ES
, rpl
);
2223 validate_seg(env
, R_DS
, rpl
);
2224 validate_seg(env
, R_FS
, rpl
);
2225 validate_seg(env
, R_GS
, rpl
);
2229 SET_ESP(sp
, sp_mask
);
2232 /* NOTE: 'cpl' is the _old_ CPL */
2233 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2235 eflags_mask
|= IOPL_MASK
;
2237 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2239 eflags_mask
|= IF_MASK
;
2242 eflags_mask
&= 0xffff;
2244 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2249 POPL(ssp
, sp
, sp_mask
, new_esp
);
2250 POPL(ssp
, sp
, sp_mask
, new_ss
);
2251 POPL(ssp
, sp
, sp_mask
, new_es
);
2252 POPL(ssp
, sp
, sp_mask
, new_ds
);
2253 POPL(ssp
, sp
, sp_mask
, new_fs
);
2254 POPL(ssp
, sp
, sp_mask
, new_gs
);
2256 /* modify processor state */
2257 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2258 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2260 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2261 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2262 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2263 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2264 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2265 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2267 env
->eip
= new_eip
& 0xffff;
2268 env
->regs
[R_ESP
] = new_esp
;
2271 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2273 int tss_selector
, type
;
2276 /* specific case for TSS */
2277 if (env
->eflags
& NT_MASK
) {
2278 #ifdef TARGET_X86_64
2279 if (env
->hflags
& HF_LMA_MASK
) {
2280 raise_exception_err(env
, EXCP0D_GPF
, 0);
2283 tss_selector
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0);
2284 if (tss_selector
& 4) {
2285 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2287 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
2288 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2290 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2291 /* NOTE: we check both segment and busy TSS */
2293 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2295 switch_tss(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2297 helper_ret_protected(env
, shift
, 1, 0);
2299 env
->hflags2
&= ~HF2_NMI_MASK
;
2302 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2304 helper_ret_protected(env
, shift
, 0, addend
);
2307 void helper_sysenter(CPUX86State
*env
)
2309 if (env
->sysenter_cs
== 0) {
2310 raise_exception_err(env
, EXCP0D_GPF
, 0);
2312 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2314 #ifdef TARGET_X86_64
2315 if (env
->hflags
& HF_LMA_MASK
) {
2316 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2318 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2320 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2325 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2327 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2329 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2331 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2333 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2335 DESC_W_MASK
| DESC_A_MASK
);
2336 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2337 env
->eip
= env
->sysenter_eip
;
2340 void helper_sysexit(CPUX86State
*env
, int dflag
)
2344 cpl
= env
->hflags
& HF_CPL_MASK
;
2345 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2346 raise_exception_err(env
, EXCP0D_GPF
, 0);
2348 #ifdef TARGET_X86_64
2350 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2352 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2353 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2354 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2356 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2358 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2359 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2360 DESC_W_MASK
| DESC_A_MASK
);
2364 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2366 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2367 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2368 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2369 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2371 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2372 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2373 DESC_W_MASK
| DESC_A_MASK
);
2375 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2376 env
->eip
= env
->regs
[R_EDX
];
2379 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2382 uint32_t e1
, e2
, eflags
, selector
;
2383 int rpl
, dpl
, cpl
, type
;
2385 selector
= selector1
& 0xffff;
2386 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2387 if ((selector
& 0xfffc) == 0) {
2390 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2394 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2395 cpl
= env
->hflags
& HF_CPL_MASK
;
2396 if (e2
& DESC_S_MASK
) {
2397 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2400 if (dpl
< cpl
|| dpl
< rpl
) {
2405 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2416 if (dpl
< cpl
|| dpl
< rpl
) {
2418 CC_SRC
= eflags
& ~CC_Z
;
2422 limit
= get_seg_limit(e1
, e2
);
2423 CC_SRC
= eflags
| CC_Z
;
2427 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2429 uint32_t e1
, e2
, eflags
, selector
;
2430 int rpl
, dpl
, cpl
, type
;
2432 selector
= selector1
& 0xffff;
2433 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2434 if ((selector
& 0xfffc) == 0) {
2437 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2441 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2442 cpl
= env
->hflags
& HF_CPL_MASK
;
2443 if (e2
& DESC_S_MASK
) {
2444 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2447 if (dpl
< cpl
|| dpl
< rpl
) {
2452 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2466 if (dpl
< cpl
|| dpl
< rpl
) {
2468 CC_SRC
= eflags
& ~CC_Z
;
2472 CC_SRC
= eflags
| CC_Z
;
2473 return e2
& 0x00f0ff00;
2476 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2478 uint32_t e1
, e2
, eflags
, selector
;
2481 selector
= selector1
& 0xffff;
2482 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2483 if ((selector
& 0xfffc) == 0) {
2486 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2489 if (!(e2
& DESC_S_MASK
)) {
2493 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2494 cpl
= env
->hflags
& HF_CPL_MASK
;
2495 if (e2
& DESC_CS_MASK
) {
2496 if (!(e2
& DESC_R_MASK
)) {
2499 if (!(e2
& DESC_C_MASK
)) {
2500 if (dpl
< cpl
|| dpl
< rpl
) {
2505 if (dpl
< cpl
|| dpl
< rpl
) {
2507 CC_SRC
= eflags
& ~CC_Z
;
2511 CC_SRC
= eflags
| CC_Z
;
2514 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2516 uint32_t e1
, e2
, eflags
, selector
;
2519 selector
= selector1
& 0xffff;
2520 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2521 if ((selector
& 0xfffc) == 0) {
2524 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2527 if (!(e2
& DESC_S_MASK
)) {
2531 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2532 cpl
= env
->hflags
& HF_CPL_MASK
;
2533 if (e2
& DESC_CS_MASK
) {
2536 if (dpl
< cpl
|| dpl
< rpl
) {
2539 if (!(e2
& DESC_W_MASK
)) {
2541 CC_SRC
= eflags
& ~CC_Z
;
2545 CC_SRC
= eflags
| CC_Z
;
2548 #if defined(CONFIG_USER_ONLY)
2549 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2551 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2552 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2554 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2555 (selector
<< 4), 0xffff,
2556 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2557 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2559 helper_load_seg(env
, seg_reg
, selector
);
2564 /* check if Port I/O is allowed in TSS */
2565 static inline void check_io(CPUX86State
*env
, int addr
, int size
)
2567 int io_offset
, val
, mask
;
2569 /* TSS must be a valid 32 bit one */
2570 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2571 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2572 env
->tr
.limit
< 103) {
2575 io_offset
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0x66);
2576 io_offset
+= (addr
>> 3);
2577 /* Note: the check needs two bytes */
2578 if ((io_offset
+ 1) > env
->tr
.limit
) {
2581 val
= cpu_lduw_kernel(env
, env
->tr
.base
+ io_offset
);
2583 mask
= (1 << size
) - 1;
2584 /* all bits must be zero to allow the I/O */
2585 if ((val
& mask
) != 0) {
2587 raise_exception_err(env
, EXCP0D_GPF
, 0);
2591 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2593 check_io(env
, t0
, 1);
2596 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2598 check_io(env
, t0
, 2);
2601 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2603 check_io(env
, t0
, 4);