2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
37 #ifndef CONFIG_USER_ONLY
38 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
39 #define MEMSUFFIX _kernel
41 #include "exec/cpu_ldst_template.h"
44 #include "exec/cpu_ldst_template.h"
47 #include "exec/cpu_ldst_template.h"
50 #include "exec/cpu_ldst_template.h"
55 /* return non zero if error */
56 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
57 uint32_t *e2_ptr
, int selector
)
68 index
= selector
& ~7;
69 if ((index
+ 7) > dt
->limit
) {
72 ptr
= dt
->base
+ index
;
73 *e1_ptr
= cpu_ldl_kernel(env
, ptr
);
74 *e2_ptr
= cpu_ldl_kernel(env
, ptr
+ 4);
78 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
82 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
83 if (e2
& DESC_G_MASK
) {
84 limit
= (limit
<< 12) | 0xfff;
89 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
91 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
94 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
97 sc
->base
= get_seg_base(e1
, e2
);
98 sc
->limit
= get_seg_limit(e1
, e2
);
102 /* init the segment cache in vm86 mode. */
103 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
107 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
108 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
109 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
112 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
113 uint32_t *esp_ptr
, int dpl
)
115 X86CPU
*cpu
= x86_env_get_cpu(env
);
116 int type
, index
, shift
;
121 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
122 for (i
= 0; i
< env
->tr
.limit
; i
++) {
123 printf("%02x ", env
->tr
.base
[i
]);
132 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
133 cpu_abort(CPU(cpu
), "invalid tss");
135 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
136 if ((type
& 7) != 1) {
137 cpu_abort(CPU(cpu
), "invalid tss type");
140 index
= (dpl
* 4 + 2) << shift
;
141 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
142 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
145 *esp_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
);
146 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 2);
148 *esp_ptr
= cpu_ldl_kernel(env
, env
->tr
.base
+ index
);
149 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 4);
153 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
)
158 if ((selector
& 0xfffc) != 0) {
159 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
160 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
162 if (!(e2
& DESC_S_MASK
)) {
163 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
166 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
167 if (seg_reg
== R_CS
) {
168 if (!(e2
& DESC_CS_MASK
)) {
169 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
172 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
174 } else if (seg_reg
== R_SS
) {
175 /* SS must be writable data */
176 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
177 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
179 if (dpl
!= cpl
|| dpl
!= rpl
) {
180 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
183 /* not readable code */
184 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
185 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
187 /* if data or non conforming code, checks the rights */
188 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
189 if (dpl
< cpl
|| dpl
< rpl
) {
190 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
194 if (!(e2
& DESC_P_MASK
)) {
195 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
197 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
198 get_seg_base(e1
, e2
),
199 get_seg_limit(e1
, e2
),
202 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
203 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
208 #define SWITCH_TSS_JMP 0
209 #define SWITCH_TSS_IRET 1
210 #define SWITCH_TSS_CALL 2
212 /* XXX: restore CPU state in registers (PowerPC case) */
213 static void switch_tss(CPUX86State
*env
, int tss_selector
,
214 uint32_t e1
, uint32_t e2
, int source
,
217 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
218 target_ulong tss_base
;
219 uint32_t new_regs
[8], new_segs
[6];
220 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
221 uint32_t old_eflags
, eflags_mask
;
226 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
227 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
230 /* if task gate, we read the TSS segment and we load it */
232 if (!(e2
& DESC_P_MASK
)) {
233 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
235 tss_selector
= e1
>> 16;
236 if (tss_selector
& 4) {
237 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
239 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
240 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
242 if (e2
& DESC_S_MASK
) {
243 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
245 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
246 if ((type
& 7) != 1) {
247 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
251 if (!(e2
& DESC_P_MASK
)) {
252 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
260 tss_limit
= get_seg_limit(e1
, e2
);
261 tss_base
= get_seg_base(e1
, e2
);
262 if ((tss_selector
& 4) != 0 ||
263 tss_limit
< tss_limit_max
) {
264 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
266 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
268 old_tss_limit_max
= 103;
270 old_tss_limit_max
= 43;
273 /* read all the registers from the new TSS */
276 new_cr3
= cpu_ldl_kernel(env
, tss_base
+ 0x1c);
277 new_eip
= cpu_ldl_kernel(env
, tss_base
+ 0x20);
278 new_eflags
= cpu_ldl_kernel(env
, tss_base
+ 0x24);
279 for (i
= 0; i
< 8; i
++) {
280 new_regs
[i
] = cpu_ldl_kernel(env
, tss_base
+ (0x28 + i
* 4));
282 for (i
= 0; i
< 6; i
++) {
283 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x48 + i
* 4));
285 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x60);
286 new_trap
= cpu_ldl_kernel(env
, tss_base
+ 0x64);
290 new_eip
= cpu_lduw_kernel(env
, tss_base
+ 0x0e);
291 new_eflags
= cpu_lduw_kernel(env
, tss_base
+ 0x10);
292 for (i
= 0; i
< 8; i
++) {
293 new_regs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x12 + i
* 2)) |
296 for (i
= 0; i
< 4; i
++) {
297 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x22 + i
* 4));
299 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x2a);
304 /* XXX: avoid a compiler warning, see
305 http://support.amd.com/us/Processor_TechDocs/24593.pdf
306 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
309 /* NOTE: we must avoid memory exceptions during the task switch,
310 so we make dummy accesses before */
311 /* XXX: it can still fail in some cases, so a bigger hack is
312 necessary to valid the TLB after having done the accesses */
314 v1
= cpu_ldub_kernel(env
, env
->tr
.base
);
315 v2
= cpu_ldub_kernel(env
, env
->tr
.base
+ old_tss_limit_max
);
316 cpu_stb_kernel(env
, env
->tr
.base
, v1
);
317 cpu_stb_kernel(env
, env
->tr
.base
+ old_tss_limit_max
, v2
);
319 /* clear busy bit (it is restartable) */
320 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
324 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
325 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
326 e2
&= ~DESC_TSS_BUSY_MASK
;
327 cpu_stl_kernel(env
, ptr
+ 4, e2
);
329 old_eflags
= cpu_compute_eflags(env
);
330 if (source
== SWITCH_TSS_IRET
) {
331 old_eflags
&= ~NT_MASK
;
334 /* save the current state in the old TSS */
337 cpu_stl_kernel(env
, env
->tr
.base
+ 0x20, next_eip
);
338 cpu_stl_kernel(env
, env
->tr
.base
+ 0x24, old_eflags
);
339 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
]);
340 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
]);
341 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
]);
342 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
]);
343 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
]);
344 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
]);
345 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
]);
346 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
]);
347 for (i
= 0; i
< 6; i
++) {
348 cpu_stw_kernel(env
, env
->tr
.base
+ (0x48 + i
* 4),
349 env
->segs
[i
].selector
);
353 cpu_stw_kernel(env
, env
->tr
.base
+ 0x0e, next_eip
);
354 cpu_stw_kernel(env
, env
->tr
.base
+ 0x10, old_eflags
);
355 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
]);
356 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
]);
357 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
]);
358 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
]);
359 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
]);
360 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
]);
361 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
]);
362 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
]);
363 for (i
= 0; i
< 4; i
++) {
364 cpu_stw_kernel(env
, env
->tr
.base
+ (0x22 + i
* 4),
365 env
->segs
[i
].selector
);
369 /* now if an exception occurs, it will occurs in the next task
372 if (source
== SWITCH_TSS_CALL
) {
373 cpu_stw_kernel(env
, tss_base
, env
->tr
.selector
);
374 new_eflags
|= NT_MASK
;
378 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
382 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
383 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
384 e2
|= DESC_TSS_BUSY_MASK
;
385 cpu_stl_kernel(env
, ptr
+ 4, e2
);
388 /* set the new CPU state */
389 /* from this point, any exception which occurs can give problems */
390 env
->cr
[0] |= CR0_TS_MASK
;
391 env
->hflags
|= HF_TS_MASK
;
392 env
->tr
.selector
= tss_selector
;
393 env
->tr
.base
= tss_base
;
394 env
->tr
.limit
= tss_limit
;
395 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
397 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
398 cpu_x86_update_cr3(env
, new_cr3
);
401 /* load all registers without an exception, then reload them with
402 possible exception */
404 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
405 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
407 eflags_mask
&= 0xffff;
409 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
410 /* XXX: what to do in 16 bit case? */
411 env
->regs
[R_EAX
] = new_regs
[0];
412 env
->regs
[R_ECX
] = new_regs
[1];
413 env
->regs
[R_EDX
] = new_regs
[2];
414 env
->regs
[R_EBX
] = new_regs
[3];
415 env
->regs
[R_ESP
] = new_regs
[4];
416 env
->regs
[R_EBP
] = new_regs
[5];
417 env
->regs
[R_ESI
] = new_regs
[6];
418 env
->regs
[R_EDI
] = new_regs
[7];
419 if (new_eflags
& VM_MASK
) {
420 for (i
= 0; i
< 6; i
++) {
421 load_seg_vm(env
, i
, new_segs
[i
]);
424 /* first just selectors as the rest may trigger exceptions */
425 for (i
= 0; i
< 6; i
++) {
426 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
430 env
->ldt
.selector
= new_ldt
& ~4;
437 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
440 if ((new_ldt
& 0xfffc) != 0) {
442 index
= new_ldt
& ~7;
443 if ((index
+ 7) > dt
->limit
) {
444 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
446 ptr
= dt
->base
+ index
;
447 e1
= cpu_ldl_kernel(env
, ptr
);
448 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
449 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
450 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
452 if (!(e2
& DESC_P_MASK
)) {
453 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
455 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
458 /* load the segments */
459 if (!(new_eflags
& VM_MASK
)) {
460 int cpl
= new_segs
[R_CS
] & 3;
461 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
);
462 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
);
463 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
);
464 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
);
465 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
);
466 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
);
469 /* check that env->eip is in the CS segment limits */
470 if (new_eip
> env
->segs
[R_CS
].limit
) {
471 /* XXX: different exception if CALL? */
472 raise_exception_err(env
, EXCP0D_GPF
, 0);
475 #ifndef CONFIG_USER_ONLY
476 /* reset local breakpoints */
477 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
478 for (i
= 0; i
< DR7_MAX_BP
; i
++) {
479 if (hw_local_breakpoint_enabled(env
->dr
[7], i
) &&
480 !hw_global_breakpoint_enabled(env
->dr
[7], i
)) {
481 hw_breakpoint_remove(env
, i
);
484 env
->dr
[7] &= ~DR7_LOCAL_BP_MASK
;
489 static inline unsigned int get_sp_mask(unsigned int e2
)
491 if (e2
& DESC_B_MASK
) {
498 static int exception_has_error_code(int intno
)
514 #define SET_ESP(val, sp_mask) \
516 if ((sp_mask) == 0xffff) { \
517 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
519 } else if ((sp_mask) == 0xffffffffLL) { \
520 env->regs[R_ESP] = (uint32_t)(val); \
522 env->regs[R_ESP] = (val); \
526 #define SET_ESP(val, sp_mask) \
528 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
529 ((val) & (sp_mask)); \
533 /* in 64-bit machines, this can overflow. So this segment addition macro
534 * can be used to trim the value to 32-bit whenever needed */
535 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
537 /* XXX: add a is_user flag to have proper security support */
538 #define PUSHW(ssp, sp, sp_mask, val) \
541 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
544 #define PUSHL(ssp, sp, sp_mask, val) \
547 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
550 #define POPW(ssp, sp, sp_mask, val) \
552 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
556 #define POPL(ssp, sp, sp_mask, val) \
558 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
562 /* protected mode interrupt */
563 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
564 int error_code
, unsigned int next_eip
,
568 target_ulong ptr
, ssp
;
569 int type
, dpl
, selector
, ss_dpl
, cpl
;
570 int has_error_code
, new_stack
, shift
;
571 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
572 uint32_t old_eip
, sp_mask
;
573 int vm86
= env
->eflags
& VM_MASK
;
576 if (!is_int
&& !is_hw
) {
577 has_error_code
= exception_has_error_code(intno
);
586 if (intno
* 8 + 7 > dt
->limit
) {
587 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
589 ptr
= dt
->base
+ intno
* 8;
590 e1
= cpu_ldl_kernel(env
, ptr
);
591 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
592 /* check gate type */
593 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
595 case 5: /* task gate */
596 /* must do that check here to return the correct error code */
597 if (!(e2
& DESC_P_MASK
)) {
598 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
600 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
601 if (has_error_code
) {
605 /* push the error code */
606 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
608 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
613 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
614 ssp
= env
->segs
[R_SS
].base
+ esp
;
616 cpu_stl_kernel(env
, ssp
, error_code
);
618 cpu_stw_kernel(env
, ssp
, error_code
);
623 case 6: /* 286 interrupt gate */
624 case 7: /* 286 trap gate */
625 case 14: /* 386 interrupt gate */
626 case 15: /* 386 trap gate */
629 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
632 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
633 cpl
= env
->hflags
& HF_CPL_MASK
;
634 /* check privilege if software int */
635 if (is_int
&& dpl
< cpl
) {
636 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
638 /* check valid bit */
639 if (!(e2
& DESC_P_MASK
)) {
640 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
643 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
644 if ((selector
& 0xfffc) == 0) {
645 raise_exception_err(env
, EXCP0D_GPF
, 0);
647 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
648 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
650 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
651 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
653 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
655 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
657 if (!(e2
& DESC_P_MASK
)) {
658 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
660 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
661 /* to inner privilege */
662 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
);
663 if ((ss
& 0xfffc) == 0) {
664 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
666 if ((ss
& 3) != dpl
) {
667 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
669 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
670 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
672 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
674 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
676 if (!(ss_e2
& DESC_S_MASK
) ||
677 (ss_e2
& DESC_CS_MASK
) ||
678 !(ss_e2
& DESC_W_MASK
)) {
679 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
681 if (!(ss_e2
& DESC_P_MASK
)) {
682 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
685 sp_mask
= get_sp_mask(ss_e2
);
686 ssp
= get_seg_base(ss_e1
, ss_e2
);
687 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
688 /* to same privilege */
690 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
693 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
694 ssp
= env
->segs
[R_SS
].base
;
695 esp
= env
->regs
[R_ESP
];
698 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
699 new_stack
= 0; /* avoid warning */
700 sp_mask
= 0; /* avoid warning */
701 ssp
= 0; /* avoid warning */
702 esp
= 0; /* avoid warning */
708 /* XXX: check that enough room is available */
709 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
718 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
719 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
720 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
721 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
723 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
724 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
726 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
727 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
728 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
729 if (has_error_code
) {
730 PUSHL(ssp
, esp
, sp_mask
, error_code
);
735 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
736 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
737 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
738 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
740 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
741 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
743 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
744 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
745 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
746 if (has_error_code
) {
747 PUSHW(ssp
, esp
, sp_mask
, error_code
);
751 /* interrupt gate clear IF mask */
752 if ((type
& 1) == 0) {
753 env
->eflags
&= ~IF_MASK
;
755 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
759 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
761 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
762 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
764 ss
= (ss
& ~3) | dpl
;
765 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
766 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
768 SET_ESP(esp
, sp_mask
);
770 selector
= (selector
& ~3) | dpl
;
771 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
772 get_seg_base(e1
, e2
),
773 get_seg_limit(e1
, e2
),
780 #define PUSHQ(sp, val) \
783 cpu_stq_kernel(env, sp, (val)); \
786 #define POPQ(sp, val) \
788 val = cpu_ldq_kernel(env, sp); \
792 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
794 X86CPU
*cpu
= x86_env_get_cpu(env
);
798 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
799 env
->tr
.base
, env
->tr
.limit
);
802 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
803 cpu_abort(CPU(cpu
), "invalid tss");
805 index
= 8 * level
+ 4;
806 if ((index
+ 7) > env
->tr
.limit
) {
807 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
809 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
812 /* 64 bit interrupt */
813 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
814 int error_code
, target_ulong next_eip
, int is_hw
)
818 int type
, dpl
, selector
, cpl
, ist
;
819 int has_error_code
, new_stack
;
820 uint32_t e1
, e2
, e3
, ss
;
821 target_ulong old_eip
, esp
, offset
;
824 if (!is_int
&& !is_hw
) {
825 has_error_code
= exception_has_error_code(intno
);
834 if (intno
* 16 + 15 > dt
->limit
) {
835 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
837 ptr
= dt
->base
+ intno
* 16;
838 e1
= cpu_ldl_kernel(env
, ptr
);
839 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
840 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
841 /* check gate type */
842 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
844 case 14: /* 386 interrupt gate */
845 case 15: /* 386 trap gate */
848 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
851 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
852 cpl
= env
->hflags
& HF_CPL_MASK
;
853 /* check privilege if software int */
854 if (is_int
&& dpl
< cpl
) {
855 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
857 /* check valid bit */
858 if (!(e2
& DESC_P_MASK
)) {
859 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
862 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
864 if ((selector
& 0xfffc) == 0) {
865 raise_exception_err(env
, EXCP0D_GPF
, 0);
868 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
869 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
871 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
872 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
874 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
876 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
878 if (!(e2
& DESC_P_MASK
)) {
879 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
881 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
882 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
884 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
885 /* to inner privilege */
887 esp
= get_rsp_from_tss(env
, ist
+ 3);
889 esp
= get_rsp_from_tss(env
, dpl
);
891 esp
&= ~0xfLL
; /* align stack */
894 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
895 /* to same privilege */
896 if (env
->eflags
& VM_MASK
) {
897 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
901 esp
= get_rsp_from_tss(env
, ist
+ 3);
903 esp
= env
->regs
[R_ESP
];
905 esp
&= ~0xfLL
; /* align stack */
908 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
909 new_stack
= 0; /* avoid warning */
910 esp
= 0; /* avoid warning */
913 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
914 PUSHQ(esp
, env
->regs
[R_ESP
]);
915 PUSHQ(esp
, cpu_compute_eflags(env
));
916 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
918 if (has_error_code
) {
919 PUSHQ(esp
, error_code
);
922 /* interrupt gate clear IF mask */
923 if ((type
& 1) == 0) {
924 env
->eflags
&= ~IF_MASK
;
926 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
930 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
932 env
->regs
[R_ESP
] = esp
;
934 selector
= (selector
& ~3) | dpl
;
935 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
936 get_seg_base(e1
, e2
),
937 get_seg_limit(e1
, e2
),
944 #if defined(CONFIG_USER_ONLY)
945 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
947 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
949 cs
->exception_index
= EXCP_SYSCALL
;
950 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
954 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
958 if (!(env
->efer
& MSR_EFER_SCE
)) {
959 raise_exception_err(env
, EXCP06_ILLOP
, 0);
961 selector
= (env
->star
>> 32) & 0xffff;
962 if (env
->hflags
& HF_LMA_MASK
) {
965 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
966 env
->regs
[11] = cpu_compute_eflags(env
);
968 code64
= env
->hflags
& HF_CS64_MASK
;
970 env
->eflags
&= ~env
->fmask
;
971 cpu_load_eflags(env
, env
->eflags
, 0);
972 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
974 DESC_G_MASK
| DESC_P_MASK
|
976 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
978 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
980 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
982 DESC_W_MASK
| DESC_A_MASK
);
984 env
->eip
= env
->lstar
;
986 env
->eip
= env
->cstar
;
989 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
991 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
992 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
994 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
996 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
997 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
999 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1001 DESC_W_MASK
| DESC_A_MASK
);
1002 env
->eip
= (uint32_t)env
->star
;
1008 #ifdef TARGET_X86_64
1009 void helper_sysret(CPUX86State
*env
, int dflag
)
1013 if (!(env
->efer
& MSR_EFER_SCE
)) {
1014 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1016 cpl
= env
->hflags
& HF_CPL_MASK
;
1017 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1018 raise_exception_err(env
, EXCP0D_GPF
, 0);
1020 selector
= (env
->star
>> 48) & 0xffff;
1021 if (env
->hflags
& HF_LMA_MASK
) {
1022 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1023 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1026 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1028 DESC_G_MASK
| DESC_P_MASK
|
1029 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1030 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1032 env
->eip
= env
->regs
[R_ECX
];
1034 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1036 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1037 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1038 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1039 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1041 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1043 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1044 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1045 DESC_W_MASK
| DESC_A_MASK
);
1047 env
->eflags
|= IF_MASK
;
1048 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1050 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1051 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1052 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1053 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1054 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1056 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1057 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1058 DESC_W_MASK
| DESC_A_MASK
);
1063 /* real mode interrupt */
1064 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1065 int error_code
, unsigned int next_eip
)
1068 target_ulong ptr
, ssp
;
1070 uint32_t offset
, esp
;
1071 uint32_t old_cs
, old_eip
;
1073 /* real mode (simpler!) */
1075 if (intno
* 4 + 3 > dt
->limit
) {
1076 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1078 ptr
= dt
->base
+ intno
* 4;
1079 offset
= cpu_lduw_kernel(env
, ptr
);
1080 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1081 esp
= env
->regs
[R_ESP
];
1082 ssp
= env
->segs
[R_SS
].base
;
1088 old_cs
= env
->segs
[R_CS
].selector
;
1089 /* XXX: use SS segment size? */
1090 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1091 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1092 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1094 /* update processor state */
1095 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1097 env
->segs
[R_CS
].selector
= selector
;
1098 env
->segs
[R_CS
].base
= (selector
<< 4);
1099 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1102 #if defined(CONFIG_USER_ONLY)
1103 /* fake user mode interrupt */
1104 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1105 int error_code
, target_ulong next_eip
)
1109 int dpl
, cpl
, shift
;
1113 if (env
->hflags
& HF_LMA_MASK
) {
1118 ptr
= dt
->base
+ (intno
<< shift
);
1119 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1121 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1122 cpl
= env
->hflags
& HF_CPL_MASK
;
1123 /* check privilege if software int */
1124 if (is_int
&& dpl
< cpl
) {
1125 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1128 /* Since we emulate only user space, we cannot do more than
1129 exiting the emulation with the suitable exception and error
1130 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1131 if (is_int
|| intno
== EXCP_SYSCALL
) {
1132 env
->eip
= next_eip
;
1138 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1139 int error_code
, int is_hw
, int rm
)
1141 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1142 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1143 control
.event_inj
));
1145 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1149 type
= SVM_EVTINJ_TYPE_SOFT
;
1151 type
= SVM_EVTINJ_TYPE_EXEPT
;
1153 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1154 if (!rm
&& exception_has_error_code(intno
)) {
1155 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1156 stl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1157 control
.event_inj_err
),
1161 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1168 * Begin execution of an interruption. is_int is TRUE if coming from
1169 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1170 * instruction. It is only relevant if is_int is TRUE.
1172 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1173 int error_code
, target_ulong next_eip
, int is_hw
)
1175 CPUX86State
*env
= &cpu
->env
;
1177 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1178 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1181 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1182 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1183 count
, intno
, error_code
, is_int
,
1184 env
->hflags
& HF_CPL_MASK
,
1185 env
->segs
[R_CS
].selector
, env
->eip
,
1186 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1187 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1188 if (intno
== 0x0e) {
1189 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1191 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1194 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1201 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1202 for (i
= 0; i
< 16; i
++) {
1203 qemu_log(" %02x", ldub(ptr
+ i
));
1211 if (env
->cr
[0] & CR0_PE_MASK
) {
1212 #if !defined(CONFIG_USER_ONLY)
1213 if (env
->hflags
& HF_SVMI_MASK
) {
1214 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1217 #ifdef TARGET_X86_64
1218 if (env
->hflags
& HF_LMA_MASK
) {
1219 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1223 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1227 #if !defined(CONFIG_USER_ONLY)
1228 if (env
->hflags
& HF_SVMI_MASK
) {
1229 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1232 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1235 #if !defined(CONFIG_USER_ONLY)
1236 if (env
->hflags
& HF_SVMI_MASK
) {
1237 CPUState
*cs
= CPU(cpu
);
1238 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+
1239 offsetof(struct vmcb
,
1240 control
.event_inj
));
1243 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1244 event_inj
& ~SVM_EVTINJ_VALID
);
1249 void x86_cpu_do_interrupt(CPUState
*cs
)
1251 X86CPU
*cpu
= X86_CPU(cs
);
1252 CPUX86State
*env
= &cpu
->env
;
1254 #if defined(CONFIG_USER_ONLY)
1255 /* if user mode only, we simulate a fake exception
1256 which will be handled outside the cpu execution
1258 do_interrupt_user(env
, cs
->exception_index
,
1259 env
->exception_is_int
,
1261 env
->exception_next_eip
);
1262 /* successfully delivered */
1263 env
->old_exception
= -1;
1265 /* simulate a real cpu exception. On i386, it can
1266 trigger new exceptions, but we do not handle
1267 double or triple faults yet. */
1268 do_interrupt_all(cpu
, cs
->exception_index
,
1269 env
->exception_is_int
,
1271 env
->exception_next_eip
, 0);
1272 /* successfully delivered */
1273 env
->old_exception
= -1;
1277 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1279 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1282 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1284 X86CPU
*cpu
= X86_CPU(cs
);
1285 CPUX86State
*env
= &cpu
->env
;
1288 #if !defined(CONFIG_USER_ONLY)
1289 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
1290 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1291 apic_poll_irq(cpu
->apic_state
);
1294 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
1296 } else if (env
->hflags2
& HF2_GIF_MASK
) {
1297 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
1298 !(env
->hflags
& HF_SMM_MASK
)) {
1299 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0);
1300 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1303 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
1304 !(env
->hflags2
& HF2_NMI_MASK
)) {
1305 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1306 env
->hflags2
|= HF2_NMI_MASK
;
1307 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1309 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
1310 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1311 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1313 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
1314 (((env
->hflags2
& HF2_VINTR_MASK
) &&
1315 (env
->hflags2
& HF2_HIF_MASK
)) ||
1316 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
1317 (env
->eflags
& IF_MASK
&&
1318 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
1320 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0);
1321 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1322 CPU_INTERRUPT_VIRQ
);
1323 intno
= cpu_get_pic_interrupt(env
);
1324 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1325 "Servicing hardware INT=0x%02x\n", intno
);
1326 do_interrupt_x86_hardirq(env
, intno
, 1);
1327 /* ensure that no TB jump will be modified as
1328 the program flow was changed */
1330 #if !defined(CONFIG_USER_ONLY)
1331 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
1332 (env
->eflags
& IF_MASK
) &&
1333 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
1335 /* FIXME: this should respect TPR */
1336 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0);
1337 intno
= ldl_phys(cs
->as
, env
->vm_vmcb
1338 + offsetof(struct vmcb
, control
.int_vector
));
1339 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1340 "Servicing virtual hardware INT=0x%02x\n", intno
);
1341 do_interrupt_x86_hardirq(env
, intno
, 1);
1342 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1351 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1355 uint32_t esp_mask
, esp
, ebp
;
1357 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1358 ssp
= env
->segs
[R_SS
].base
;
1359 ebp
= env
->regs
[R_EBP
];
1360 esp
= env
->regs
[R_ESP
];
1367 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
),
1368 cpu_ldl_data(env
, ssp
+ (ebp
& esp_mask
)));
1371 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1378 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
),
1379 cpu_lduw_data(env
, ssp
+ (ebp
& esp_mask
)));
1382 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1386 #ifdef TARGET_X86_64
1387 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1390 target_ulong esp
, ebp
;
1392 ebp
= env
->regs
[R_EBP
];
1393 esp
= env
->regs
[R_ESP
];
1401 cpu_stq_data(env
, esp
, cpu_ldq_data(env
, ebp
));
1404 cpu_stq_data(env
, esp
, t1
);
1411 cpu_stw_data(env
, esp
, cpu_lduw_data(env
, ebp
));
1414 cpu_stw_data(env
, esp
, t1
);
1419 void helper_lldt(CPUX86State
*env
, int selector
)
1423 int index
, entry_limit
;
1427 if ((selector
& 0xfffc) == 0) {
1428 /* XXX: NULL selector case: invalid LDT */
1432 if (selector
& 0x4) {
1433 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1436 index
= selector
& ~7;
1437 #ifdef TARGET_X86_64
1438 if (env
->hflags
& HF_LMA_MASK
) {
1445 if ((index
+ entry_limit
) > dt
->limit
) {
1446 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1448 ptr
= dt
->base
+ index
;
1449 e1
= cpu_ldl_kernel(env
, ptr
);
1450 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1451 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1452 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1454 if (!(e2
& DESC_P_MASK
)) {
1455 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1457 #ifdef TARGET_X86_64
1458 if (env
->hflags
& HF_LMA_MASK
) {
1461 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1462 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1463 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1467 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1470 env
->ldt
.selector
= selector
;
1473 void helper_ltr(CPUX86State
*env
, int selector
)
1477 int index
, type
, entry_limit
;
1481 if ((selector
& 0xfffc) == 0) {
1482 /* NULL selector case: invalid TR */
1487 if (selector
& 0x4) {
1488 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1491 index
= selector
& ~7;
1492 #ifdef TARGET_X86_64
1493 if (env
->hflags
& HF_LMA_MASK
) {
1500 if ((index
+ entry_limit
) > dt
->limit
) {
1501 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1503 ptr
= dt
->base
+ index
;
1504 e1
= cpu_ldl_kernel(env
, ptr
);
1505 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1506 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1507 if ((e2
& DESC_S_MASK
) ||
1508 (type
!= 1 && type
!= 9)) {
1509 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1511 if (!(e2
& DESC_P_MASK
)) {
1512 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1514 #ifdef TARGET_X86_64
1515 if (env
->hflags
& HF_LMA_MASK
) {
1518 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1519 e4
= cpu_ldl_kernel(env
, ptr
+ 12);
1520 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1521 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1523 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1524 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1528 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1530 e2
|= DESC_TSS_BUSY_MASK
;
1531 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1533 env
->tr
.selector
= selector
;
1536 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1537 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1546 cpl
= env
->hflags
& HF_CPL_MASK
;
1547 if ((selector
& 0xfffc) == 0) {
1548 /* null selector case */
1550 #ifdef TARGET_X86_64
1551 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1554 raise_exception_err(env
, EXCP0D_GPF
, 0);
1556 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1559 if (selector
& 0x4) {
1564 index
= selector
& ~7;
1565 if ((index
+ 7) > dt
->limit
) {
1566 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1568 ptr
= dt
->base
+ index
;
1569 e1
= cpu_ldl_kernel(env
, ptr
);
1570 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1572 if (!(e2
& DESC_S_MASK
)) {
1573 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1576 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1577 if (seg_reg
== R_SS
) {
1578 /* must be writable segment */
1579 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1580 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1582 if (rpl
!= cpl
|| dpl
!= cpl
) {
1583 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1586 /* must be readable segment */
1587 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1588 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1591 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1592 /* if not conforming code, test rights */
1593 if (dpl
< cpl
|| dpl
< rpl
) {
1594 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1599 if (!(e2
& DESC_P_MASK
)) {
1600 if (seg_reg
== R_SS
) {
1601 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1603 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1607 /* set the access bit if not already set */
1608 if (!(e2
& DESC_A_MASK
)) {
1610 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1613 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1614 get_seg_base(e1
, e2
),
1615 get_seg_limit(e1
, e2
),
1618 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1619 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1624 /* protected mode jump */
1625 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1626 int next_eip_addend
)
1629 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1630 target_ulong next_eip
;
1632 if ((new_cs
& 0xfffc) == 0) {
1633 raise_exception_err(env
, EXCP0D_GPF
, 0);
1635 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1636 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1638 cpl
= env
->hflags
& HF_CPL_MASK
;
1639 if (e2
& DESC_S_MASK
) {
1640 if (!(e2
& DESC_CS_MASK
)) {
1641 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1643 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1644 if (e2
& DESC_C_MASK
) {
1645 /* conforming code segment */
1647 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1650 /* non conforming code segment */
1653 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1656 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1659 if (!(e2
& DESC_P_MASK
)) {
1660 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1662 limit
= get_seg_limit(e1
, e2
);
1663 if (new_eip
> limit
&&
1664 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1665 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1667 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1668 get_seg_base(e1
, e2
), limit
, e2
);
1671 /* jump to call or task gate */
1672 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1674 cpl
= env
->hflags
& HF_CPL_MASK
;
1675 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1677 case 1: /* 286 TSS */
1678 case 9: /* 386 TSS */
1679 case 5: /* task gate */
1680 if (dpl
< cpl
|| dpl
< rpl
) {
1681 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1683 next_eip
= env
->eip
+ next_eip_addend
;
1684 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1686 case 4: /* 286 call gate */
1687 case 12: /* 386 call gate */
1688 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1689 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1691 if (!(e2
& DESC_P_MASK
)) {
1692 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1695 new_eip
= (e1
& 0xffff);
1697 new_eip
|= (e2
& 0xffff0000);
1699 if (load_segment(env
, &e1
, &e2
, gate_cs
) != 0) {
1700 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1702 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1703 /* must be code segment */
1704 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1705 (DESC_S_MASK
| DESC_CS_MASK
))) {
1706 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1708 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1709 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1710 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1712 if (!(e2
& DESC_P_MASK
)) {
1713 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1715 limit
= get_seg_limit(e1
, e2
);
1716 if (new_eip
> limit
) {
1717 raise_exception_err(env
, EXCP0D_GPF
, 0);
1719 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1720 get_seg_base(e1
, e2
), limit
, e2
);
1724 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1730 /* real mode call */
1731 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1732 int shift
, int next_eip
)
1735 uint32_t esp
, esp_mask
;
1739 esp
= env
->regs
[R_ESP
];
1740 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1741 ssp
= env
->segs
[R_SS
].base
;
1743 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1744 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1746 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1747 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1750 SET_ESP(esp
, esp_mask
);
1752 env
->segs
[R_CS
].selector
= new_cs
;
1753 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1756 /* protected mode call */
1757 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1758 int shift
, int next_eip_addend
)
1761 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1762 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1763 uint32_t val
, limit
, old_sp_mask
;
1764 target_ulong ssp
, old_ssp
, next_eip
;
1766 next_eip
= env
->eip
+ next_eip_addend
;
1767 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1768 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1769 if ((new_cs
& 0xfffc) == 0) {
1770 raise_exception_err(env
, EXCP0D_GPF
, 0);
1772 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1773 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1775 cpl
= env
->hflags
& HF_CPL_MASK
;
1776 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1777 if (e2
& DESC_S_MASK
) {
1778 if (!(e2
& DESC_CS_MASK
)) {
1779 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1781 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1782 if (e2
& DESC_C_MASK
) {
1783 /* conforming code segment */
1785 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1788 /* non conforming code segment */
1791 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1794 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1797 if (!(e2
& DESC_P_MASK
)) {
1798 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1801 #ifdef TARGET_X86_64
1802 /* XXX: check 16/32 bit cases in long mode */
1807 rsp
= env
->regs
[R_ESP
];
1808 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1809 PUSHQ(rsp
, next_eip
);
1810 /* from this point, not restartable */
1811 env
->regs
[R_ESP
] = rsp
;
1812 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1813 get_seg_base(e1
, e2
),
1814 get_seg_limit(e1
, e2
), e2
);
1819 sp
= env
->regs
[R_ESP
];
1820 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1821 ssp
= env
->segs
[R_SS
].base
;
1823 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1824 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1826 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1827 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1830 limit
= get_seg_limit(e1
, e2
);
1831 if (new_eip
> limit
) {
1832 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1834 /* from this point, not restartable */
1835 SET_ESP(sp
, sp_mask
);
1836 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1837 get_seg_base(e1
, e2
), limit
, e2
);
1841 /* check gate type */
1842 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1843 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1846 case 1: /* available 286 TSS */
1847 case 9: /* available 386 TSS */
1848 case 5: /* task gate */
1849 if (dpl
< cpl
|| dpl
< rpl
) {
1850 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1852 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1854 case 4: /* 286 call gate */
1855 case 12: /* 386 call gate */
1858 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1863 if (dpl
< cpl
|| dpl
< rpl
) {
1864 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1866 /* check valid bit */
1867 if (!(e2
& DESC_P_MASK
)) {
1868 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1870 selector
= e1
>> 16;
1871 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1872 param_count
= e2
& 0x1f;
1873 if ((selector
& 0xfffc) == 0) {
1874 raise_exception_err(env
, EXCP0D_GPF
, 0);
1877 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
1878 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1880 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1881 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1883 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1885 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1887 if (!(e2
& DESC_P_MASK
)) {
1888 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1891 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1892 /* to inner privilege */
1893 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
);
1894 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1895 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1897 if ((ss
& 0xfffc) == 0) {
1898 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1900 if ((ss
& 3) != dpl
) {
1901 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1903 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
1904 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1906 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1907 if (ss_dpl
!= dpl
) {
1908 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1910 if (!(ss_e2
& DESC_S_MASK
) ||
1911 (ss_e2
& DESC_CS_MASK
) ||
1912 !(ss_e2
& DESC_W_MASK
)) {
1913 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1915 if (!(ss_e2
& DESC_P_MASK
)) {
1916 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1919 /* push_size = ((param_count * 2) + 8) << shift; */
1921 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1922 old_ssp
= env
->segs
[R_SS
].base
;
1924 sp_mask
= get_sp_mask(ss_e2
);
1925 ssp
= get_seg_base(ss_e1
, ss_e2
);
1927 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1928 PUSHL(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1929 for (i
= param_count
- 1; i
>= 0; i
--) {
1930 val
= cpu_ldl_kernel(env
, old_ssp
+
1931 ((env
->regs
[R_ESP
] + i
* 4) &
1933 PUSHL(ssp
, sp
, sp_mask
, val
);
1936 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1937 PUSHW(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1938 for (i
= param_count
- 1; i
>= 0; i
--) {
1939 val
= cpu_lduw_kernel(env
, old_ssp
+
1940 ((env
->regs
[R_ESP
] + i
* 2) &
1942 PUSHW(ssp
, sp
, sp_mask
, val
);
1947 /* to same privilege */
1948 sp
= env
->regs
[R_ESP
];
1949 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1950 ssp
= env
->segs
[R_SS
].base
;
1951 /* push_size = (4 << shift); */
1956 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1957 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1959 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1960 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1963 /* from this point, not restartable */
1966 ss
= (ss
& ~3) | dpl
;
1967 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1969 get_seg_limit(ss_e1
, ss_e2
),
1973 selector
= (selector
& ~3) | dpl
;
1974 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1975 get_seg_base(e1
, e2
),
1976 get_seg_limit(e1
, e2
),
1978 SET_ESP(sp
, sp_mask
);
1983 /* real and vm86 mode iret */
1984 void helper_iret_real(CPUX86State
*env
, int shift
)
1986 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1990 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1991 sp
= env
->regs
[R_ESP
];
1992 ssp
= env
->segs
[R_SS
].base
;
1995 POPL(ssp
, sp
, sp_mask
, new_eip
);
1996 POPL(ssp
, sp
, sp_mask
, new_cs
);
1998 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2001 POPW(ssp
, sp
, sp_mask
, new_eip
);
2002 POPW(ssp
, sp
, sp_mask
, new_cs
);
2003 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2005 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
2006 env
->segs
[R_CS
].selector
= new_cs
;
2007 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2009 if (env
->eflags
& VM_MASK
) {
2010 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
2013 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
2017 eflags_mask
&= 0xffff;
2019 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2020 env
->hflags2
&= ~HF2_NMI_MASK
;
2023 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
2028 /* XXX: on x86_64, we do not want to nullify FS and GS because
2029 they may still contain a valid base. I would be interested to
2030 know how a real x86_64 CPU behaves */
2031 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2032 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2036 e2
= env
->segs
[seg_reg
].flags
;
2037 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2038 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2039 /* data or non conforming code segment */
2041 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2046 /* protected mode iret */
2047 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2048 int is_iret
, int addend
)
2050 uint32_t new_cs
, new_eflags
, new_ss
;
2051 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2052 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2053 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2054 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2056 #ifdef TARGET_X86_64
2062 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2064 sp
= env
->regs
[R_ESP
];
2065 ssp
= env
->segs
[R_SS
].base
;
2066 new_eflags
= 0; /* avoid warning */
2067 #ifdef TARGET_X86_64
2073 POPQ(sp
, new_eflags
);
2080 POPL(ssp
, sp
, sp_mask
, new_eip
);
2081 POPL(ssp
, sp
, sp_mask
, new_cs
);
2084 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2085 if (new_eflags
& VM_MASK
) {
2086 goto return_to_vm86
;
2091 POPW(ssp
, sp
, sp_mask
, new_eip
);
2092 POPW(ssp
, sp
, sp_mask
, new_cs
);
2094 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2098 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2099 new_cs
, new_eip
, shift
, addend
);
2100 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2101 if ((new_cs
& 0xfffc) == 0) {
2102 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2104 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
2105 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2107 if (!(e2
& DESC_S_MASK
) ||
2108 !(e2
& DESC_CS_MASK
)) {
2109 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2111 cpl
= env
->hflags
& HF_CPL_MASK
;
2114 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2116 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2117 if (e2
& DESC_C_MASK
) {
2119 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2123 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2126 if (!(e2
& DESC_P_MASK
)) {
2127 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2131 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2132 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2133 /* return to same privilege level */
2134 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2135 get_seg_base(e1
, e2
),
2136 get_seg_limit(e1
, e2
),
2139 /* return to different privilege level */
2140 #ifdef TARGET_X86_64
2150 POPL(ssp
, sp
, sp_mask
, new_esp
);
2151 POPL(ssp
, sp
, sp_mask
, new_ss
);
2155 POPW(ssp
, sp
, sp_mask
, new_esp
);
2156 POPW(ssp
, sp
, sp_mask
, new_ss
);
2159 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2161 if ((new_ss
& 0xfffc) == 0) {
2162 #ifdef TARGET_X86_64
2163 /* NULL ss is allowed in long mode if cpl != 3 */
2164 /* XXX: test CS64? */
2165 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2166 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2168 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2169 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2170 DESC_W_MASK
| DESC_A_MASK
);
2171 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2175 raise_exception_err(env
, EXCP0D_GPF
, 0);
2178 if ((new_ss
& 3) != rpl
) {
2179 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2181 if (load_segment(env
, &ss_e1
, &ss_e2
, new_ss
) != 0) {
2182 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2184 if (!(ss_e2
& DESC_S_MASK
) ||
2185 (ss_e2
& DESC_CS_MASK
) ||
2186 !(ss_e2
& DESC_W_MASK
)) {
2187 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2189 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2191 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2193 if (!(ss_e2
& DESC_P_MASK
)) {
2194 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2196 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2197 get_seg_base(ss_e1
, ss_e2
),
2198 get_seg_limit(ss_e1
, ss_e2
),
2202 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2203 get_seg_base(e1
, e2
),
2204 get_seg_limit(e1
, e2
),
2207 #ifdef TARGET_X86_64
2208 if (env
->hflags
& HF_CS64_MASK
) {
2213 sp_mask
= get_sp_mask(ss_e2
);
2216 /* validate data segments */
2217 validate_seg(env
, R_ES
, rpl
);
2218 validate_seg(env
, R_DS
, rpl
);
2219 validate_seg(env
, R_FS
, rpl
);
2220 validate_seg(env
, R_GS
, rpl
);
2224 SET_ESP(sp
, sp_mask
);
2227 /* NOTE: 'cpl' is the _old_ CPL */
2228 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2230 eflags_mask
|= IOPL_MASK
;
2232 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2234 eflags_mask
|= IF_MASK
;
2237 eflags_mask
&= 0xffff;
2239 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2244 POPL(ssp
, sp
, sp_mask
, new_esp
);
2245 POPL(ssp
, sp
, sp_mask
, new_ss
);
2246 POPL(ssp
, sp
, sp_mask
, new_es
);
2247 POPL(ssp
, sp
, sp_mask
, new_ds
);
2248 POPL(ssp
, sp
, sp_mask
, new_fs
);
2249 POPL(ssp
, sp
, sp_mask
, new_gs
);
2251 /* modify processor state */
2252 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2253 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2255 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2256 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2257 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2258 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2259 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2260 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2262 env
->eip
= new_eip
& 0xffff;
2263 env
->regs
[R_ESP
] = new_esp
;
2266 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2268 int tss_selector
, type
;
2271 /* specific case for TSS */
2272 if (env
->eflags
& NT_MASK
) {
2273 #ifdef TARGET_X86_64
2274 if (env
->hflags
& HF_LMA_MASK
) {
2275 raise_exception_err(env
, EXCP0D_GPF
, 0);
2278 tss_selector
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0);
2279 if (tss_selector
& 4) {
2280 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2282 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
2283 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2285 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2286 /* NOTE: we check both segment and busy TSS */
2288 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2290 switch_tss(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2292 helper_ret_protected(env
, shift
, 1, 0);
2294 env
->hflags2
&= ~HF2_NMI_MASK
;
2297 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2299 helper_ret_protected(env
, shift
, 0, addend
);
2302 void helper_sysenter(CPUX86State
*env
)
2304 if (env
->sysenter_cs
== 0) {
2305 raise_exception_err(env
, EXCP0D_GPF
, 0);
2307 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2309 #ifdef TARGET_X86_64
2310 if (env
->hflags
& HF_LMA_MASK
) {
2311 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2313 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2315 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2320 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2322 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2324 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2326 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2328 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2330 DESC_W_MASK
| DESC_A_MASK
);
2331 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2332 env
->eip
= env
->sysenter_eip
;
2335 void helper_sysexit(CPUX86State
*env
, int dflag
)
2339 cpl
= env
->hflags
& HF_CPL_MASK
;
2340 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2341 raise_exception_err(env
, EXCP0D_GPF
, 0);
2343 #ifdef TARGET_X86_64
2345 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2347 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2348 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2349 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2351 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2353 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2354 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2355 DESC_W_MASK
| DESC_A_MASK
);
2359 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2361 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2362 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2363 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2364 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2366 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2367 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2368 DESC_W_MASK
| DESC_A_MASK
);
2370 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2371 env
->eip
= env
->regs
[R_EDX
];
2374 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2377 uint32_t e1
, e2
, eflags
, selector
;
2378 int rpl
, dpl
, cpl
, type
;
2380 selector
= selector1
& 0xffff;
2381 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2382 if ((selector
& 0xfffc) == 0) {
2385 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2389 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2390 cpl
= env
->hflags
& HF_CPL_MASK
;
2391 if (e2
& DESC_S_MASK
) {
2392 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2395 if (dpl
< cpl
|| dpl
< rpl
) {
2400 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2411 if (dpl
< cpl
|| dpl
< rpl
) {
2413 CC_SRC
= eflags
& ~CC_Z
;
2417 limit
= get_seg_limit(e1
, e2
);
2418 CC_SRC
= eflags
| CC_Z
;
2422 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2424 uint32_t e1
, e2
, eflags
, selector
;
2425 int rpl
, dpl
, cpl
, type
;
2427 selector
= selector1
& 0xffff;
2428 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2429 if ((selector
& 0xfffc) == 0) {
2432 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2436 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2437 cpl
= env
->hflags
& HF_CPL_MASK
;
2438 if (e2
& DESC_S_MASK
) {
2439 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2442 if (dpl
< cpl
|| dpl
< rpl
) {
2447 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2461 if (dpl
< cpl
|| dpl
< rpl
) {
2463 CC_SRC
= eflags
& ~CC_Z
;
2467 CC_SRC
= eflags
| CC_Z
;
2468 return e2
& 0x00f0ff00;
2471 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2473 uint32_t e1
, e2
, eflags
, selector
;
2476 selector
= selector1
& 0xffff;
2477 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2478 if ((selector
& 0xfffc) == 0) {
2481 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2484 if (!(e2
& DESC_S_MASK
)) {
2488 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2489 cpl
= env
->hflags
& HF_CPL_MASK
;
2490 if (e2
& DESC_CS_MASK
) {
2491 if (!(e2
& DESC_R_MASK
)) {
2494 if (!(e2
& DESC_C_MASK
)) {
2495 if (dpl
< cpl
|| dpl
< rpl
) {
2500 if (dpl
< cpl
|| dpl
< rpl
) {
2502 CC_SRC
= eflags
& ~CC_Z
;
2506 CC_SRC
= eflags
| CC_Z
;
2509 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2511 uint32_t e1
, e2
, eflags
, selector
;
2514 selector
= selector1
& 0xffff;
2515 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2516 if ((selector
& 0xfffc) == 0) {
2519 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2522 if (!(e2
& DESC_S_MASK
)) {
2526 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2527 cpl
= env
->hflags
& HF_CPL_MASK
;
2528 if (e2
& DESC_CS_MASK
) {
2531 if (dpl
< cpl
|| dpl
< rpl
) {
2534 if (!(e2
& DESC_W_MASK
)) {
2536 CC_SRC
= eflags
& ~CC_Z
;
2540 CC_SRC
= eflags
| CC_Z
;
2543 #if defined(CONFIG_USER_ONLY)
2544 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2546 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2547 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2549 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2550 (selector
<< 4), 0xffff,
2551 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2552 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2554 helper_load_seg(env
, seg_reg
, selector
);
2559 /* check if Port I/O is allowed in TSS */
2560 static inline void check_io(CPUX86State
*env
, int addr
, int size
)
2562 int io_offset
, val
, mask
;
2564 /* TSS must be a valid 32 bit one */
2565 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2566 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2567 env
->tr
.limit
< 103) {
2570 io_offset
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0x66);
2571 io_offset
+= (addr
>> 3);
2572 /* Note: the check needs two bytes */
2573 if ((io_offset
+ 1) > env
->tr
.limit
) {
2576 val
= cpu_lduw_kernel(env
, env
->tr
.base
+ io_offset
);
2578 mask
= (1 << size
) - 1;
2579 /* all bits must be zero to allow the I/O */
2580 if ((val
& mask
) != 0) {
2582 raise_exception_err(env
, EXCP0D_GPF
, 0);
2586 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2588 check_io(env
, t0
, 1);
2591 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2593 check_io(env
, t0
, 2);
2596 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2598 check_io(env
, t0
, 4);