2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #if !defined(CONFIG_USER_ONLY)
28 #include "exec/softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
40 /* return non zero if error */
41 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
42 uint32_t *e2_ptr
, int selector
)
53 index
= selector
& ~7;
54 if ((index
+ 7) > dt
->limit
) {
57 ptr
= dt
->base
+ index
;
58 *e1_ptr
= cpu_ldl_kernel(env
, ptr
);
59 *e2_ptr
= cpu_ldl_kernel(env
, ptr
+ 4);
63 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
67 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
68 if (e2
& DESC_G_MASK
) {
69 limit
= (limit
<< 12) | 0xfff;
74 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
76 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
79 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
82 sc
->base
= get_seg_base(e1
, e2
);
83 sc
->limit
= get_seg_limit(e1
, e2
);
87 /* init the segment cache in vm86 mode. */
88 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
91 cpu_x86_load_seg_cache(env
, seg
, selector
,
92 (selector
<< 4), 0xffff, 0);
95 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
96 uint32_t *esp_ptr
, int dpl
)
98 X86CPU
*cpu
= x86_env_get_cpu(env
);
99 int type
, index
, shift
;
104 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
105 for (i
= 0; i
< env
->tr
.limit
; i
++) {
106 printf("%02x ", env
->tr
.base
[i
]);
115 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
116 cpu_abort(CPU(cpu
), "invalid tss");
118 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
119 if ((type
& 7) != 1) {
120 cpu_abort(CPU(cpu
), "invalid tss type");
123 index
= (dpl
* 4 + 2) << shift
;
124 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
125 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
128 *esp_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
);
129 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 2);
131 *esp_ptr
= cpu_ldl_kernel(env
, env
->tr
.base
+ index
);
132 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 4);
136 /* XXX: merge with load_seg() */
137 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
142 if ((selector
& 0xfffc) != 0) {
143 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
144 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
146 if (!(e2
& DESC_S_MASK
)) {
147 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
150 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
151 cpl
= env
->hflags
& HF_CPL_MASK
;
152 if (seg_reg
== R_CS
) {
153 if (!(e2
& DESC_CS_MASK
)) {
154 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
156 /* XXX: is it correct? */
158 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
160 if ((e2
& DESC_C_MASK
) && dpl
> rpl
) {
161 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
163 } else if (seg_reg
== R_SS
) {
164 /* SS must be writable data */
165 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
166 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
168 if (dpl
!= cpl
|| dpl
!= rpl
) {
169 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
172 /* not readable code */
173 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
174 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
176 /* if data or non conforming code, checks the rights */
177 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
178 if (dpl
< cpl
|| dpl
< rpl
) {
179 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
183 if (!(e2
& DESC_P_MASK
)) {
184 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
186 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
187 get_seg_base(e1
, e2
),
188 get_seg_limit(e1
, e2
),
191 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
192 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
197 #define SWITCH_TSS_JMP 0
198 #define SWITCH_TSS_IRET 1
199 #define SWITCH_TSS_CALL 2
201 /* XXX: restore CPU state in registers (PowerPC case) */
202 static void switch_tss(CPUX86State
*env
, int tss_selector
,
203 uint32_t e1
, uint32_t e2
, int source
,
206 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
207 target_ulong tss_base
;
208 uint32_t new_regs
[8], new_segs
[6];
209 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
210 uint32_t old_eflags
, eflags_mask
;
215 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
216 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
219 /* if task gate, we read the TSS segment and we load it */
221 if (!(e2
& DESC_P_MASK
)) {
222 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
224 tss_selector
= e1
>> 16;
225 if (tss_selector
& 4) {
226 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
228 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
229 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
231 if (e2
& DESC_S_MASK
) {
232 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
234 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
235 if ((type
& 7) != 1) {
236 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
240 if (!(e2
& DESC_P_MASK
)) {
241 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
249 tss_limit
= get_seg_limit(e1
, e2
);
250 tss_base
= get_seg_base(e1
, e2
);
251 if ((tss_selector
& 4) != 0 ||
252 tss_limit
< tss_limit_max
) {
253 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
255 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
257 old_tss_limit_max
= 103;
259 old_tss_limit_max
= 43;
262 /* read all the registers from the new TSS */
265 new_cr3
= cpu_ldl_kernel(env
, tss_base
+ 0x1c);
266 new_eip
= cpu_ldl_kernel(env
, tss_base
+ 0x20);
267 new_eflags
= cpu_ldl_kernel(env
, tss_base
+ 0x24);
268 for (i
= 0; i
< 8; i
++) {
269 new_regs
[i
] = cpu_ldl_kernel(env
, tss_base
+ (0x28 + i
* 4));
271 for (i
= 0; i
< 6; i
++) {
272 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x48 + i
* 4));
274 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x60);
275 new_trap
= cpu_ldl_kernel(env
, tss_base
+ 0x64);
279 new_eip
= cpu_lduw_kernel(env
, tss_base
+ 0x0e);
280 new_eflags
= cpu_lduw_kernel(env
, tss_base
+ 0x10);
281 for (i
= 0; i
< 8; i
++) {
282 new_regs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x12 + i
* 2)) |
285 for (i
= 0; i
< 4; i
++) {
286 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x22 + i
* 4));
288 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x2a);
293 /* XXX: avoid a compiler warning, see
294 http://support.amd.com/us/Processor_TechDocs/24593.pdf
295 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
298 /* NOTE: we must avoid memory exceptions during the task switch,
299 so we make dummy accesses before */
300 /* XXX: it can still fail in some cases, so a bigger hack is
301 necessary to valid the TLB after having done the accesses */
303 v1
= cpu_ldub_kernel(env
, env
->tr
.base
);
304 v2
= cpu_ldub_kernel(env
, env
->tr
.base
+ old_tss_limit_max
);
305 cpu_stb_kernel(env
, env
->tr
.base
, v1
);
306 cpu_stb_kernel(env
, env
->tr
.base
+ old_tss_limit_max
, v2
);
308 /* clear busy bit (it is restartable) */
309 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
313 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
314 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
315 e2
&= ~DESC_TSS_BUSY_MASK
;
316 cpu_stl_kernel(env
, ptr
+ 4, e2
);
318 old_eflags
= cpu_compute_eflags(env
);
319 if (source
== SWITCH_TSS_IRET
) {
320 old_eflags
&= ~NT_MASK
;
323 /* save the current state in the old TSS */
326 cpu_stl_kernel(env
, env
->tr
.base
+ 0x20, next_eip
);
327 cpu_stl_kernel(env
, env
->tr
.base
+ 0x24, old_eflags
);
328 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
]);
329 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
]);
330 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
]);
331 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
]);
332 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
]);
333 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
]);
334 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
]);
335 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
]);
336 for (i
= 0; i
< 6; i
++) {
337 cpu_stw_kernel(env
, env
->tr
.base
+ (0x48 + i
* 4),
338 env
->segs
[i
].selector
);
342 cpu_stw_kernel(env
, env
->tr
.base
+ 0x0e, next_eip
);
343 cpu_stw_kernel(env
, env
->tr
.base
+ 0x10, old_eflags
);
344 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
]);
345 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
]);
346 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
]);
347 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
]);
348 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
]);
349 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
]);
350 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
]);
351 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
]);
352 for (i
= 0; i
< 4; i
++) {
353 cpu_stw_kernel(env
, env
->tr
.base
+ (0x22 + i
* 4),
354 env
->segs
[i
].selector
);
358 /* now if an exception occurs, it will occurs in the next task
361 if (source
== SWITCH_TSS_CALL
) {
362 cpu_stw_kernel(env
, tss_base
, env
->tr
.selector
);
363 new_eflags
|= NT_MASK
;
367 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
371 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
372 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
373 e2
|= DESC_TSS_BUSY_MASK
;
374 cpu_stl_kernel(env
, ptr
+ 4, e2
);
377 /* set the new CPU state */
378 /* from this point, any exception which occurs can give problems */
379 env
->cr
[0] |= CR0_TS_MASK
;
380 env
->hflags
|= HF_TS_MASK
;
381 env
->tr
.selector
= tss_selector
;
382 env
->tr
.base
= tss_base
;
383 env
->tr
.limit
= tss_limit
;
384 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
386 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
387 cpu_x86_update_cr3(env
, new_cr3
);
390 /* load all registers without an exception, then reload them with
391 possible exception */
393 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
394 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
396 eflags_mask
&= 0xffff;
398 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
399 /* XXX: what to do in 16 bit case? */
400 env
->regs
[R_EAX
] = new_regs
[0];
401 env
->regs
[R_ECX
] = new_regs
[1];
402 env
->regs
[R_EDX
] = new_regs
[2];
403 env
->regs
[R_EBX
] = new_regs
[3];
404 env
->regs
[R_ESP
] = new_regs
[4];
405 env
->regs
[R_EBP
] = new_regs
[5];
406 env
->regs
[R_ESI
] = new_regs
[6];
407 env
->regs
[R_EDI
] = new_regs
[7];
408 if (new_eflags
& VM_MASK
) {
409 for (i
= 0; i
< 6; i
++) {
410 load_seg_vm(env
, i
, new_segs
[i
]);
412 /* in vm86, CPL is always 3 */
413 cpu_x86_set_cpl(env
, 3);
415 /* CPL is set the RPL of CS */
416 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
417 /* first just selectors as the rest may trigger exceptions */
418 for (i
= 0; i
< 6; i
++) {
419 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
423 env
->ldt
.selector
= new_ldt
& ~4;
430 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
433 if ((new_ldt
& 0xfffc) != 0) {
435 index
= new_ldt
& ~7;
436 if ((index
+ 7) > dt
->limit
) {
437 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
439 ptr
= dt
->base
+ index
;
440 e1
= cpu_ldl_kernel(env
, ptr
);
441 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
442 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
443 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
445 if (!(e2
& DESC_P_MASK
)) {
446 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
448 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
451 /* load the segments */
452 if (!(new_eflags
& VM_MASK
)) {
453 tss_load_seg(env
, R_CS
, new_segs
[R_CS
]);
454 tss_load_seg(env
, R_SS
, new_segs
[R_SS
]);
455 tss_load_seg(env
, R_ES
, new_segs
[R_ES
]);
456 tss_load_seg(env
, R_DS
, new_segs
[R_DS
]);
457 tss_load_seg(env
, R_FS
, new_segs
[R_FS
]);
458 tss_load_seg(env
, R_GS
, new_segs
[R_GS
]);
461 /* check that env->eip is in the CS segment limits */
462 if (new_eip
> env
->segs
[R_CS
].limit
) {
463 /* XXX: different exception if CALL? */
464 raise_exception_err(env
, EXCP0D_GPF
, 0);
467 #ifndef CONFIG_USER_ONLY
468 /* reset local breakpoints */
469 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
470 for (i
= 0; i
< DR7_MAX_BP
; i
++) {
471 if (hw_local_breakpoint_enabled(env
->dr
[7], i
) &&
472 !hw_global_breakpoint_enabled(env
->dr
[7], i
)) {
473 hw_breakpoint_remove(env
, i
);
476 env
->dr
[7] &= ~DR7_LOCAL_BP_MASK
;
481 static inline unsigned int get_sp_mask(unsigned int e2
)
483 if (e2
& DESC_B_MASK
) {
490 static int exception_has_error_code(int intno
)
506 #define SET_ESP(val, sp_mask) \
508 if ((sp_mask) == 0xffff) { \
509 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
511 } else if ((sp_mask) == 0xffffffffLL) { \
512 env->regs[R_ESP] = (uint32_t)(val); \
514 env->regs[R_ESP] = (val); \
518 #define SET_ESP(val, sp_mask) \
520 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
521 ((val) & (sp_mask)); \
525 /* in 64-bit machines, this can overflow. So this segment addition macro
526 * can be used to trim the value to 32-bit whenever needed */
527 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
529 /* XXX: add a is_user flag to have proper security support */
530 #define PUSHW(ssp, sp, sp_mask, val) \
533 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
536 #define PUSHL(ssp, sp, sp_mask, val) \
539 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
542 #define POPW(ssp, sp, sp_mask, val) \
544 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
548 #define POPL(ssp, sp, sp_mask, val) \
550 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
554 /* protected mode interrupt */
555 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
556 int error_code
, unsigned int next_eip
,
560 target_ulong ptr
, ssp
;
561 int type
, dpl
, selector
, ss_dpl
, cpl
;
562 int has_error_code
, new_stack
, shift
;
563 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
564 uint32_t old_eip
, sp_mask
;
567 if (!is_int
&& !is_hw
) {
568 has_error_code
= exception_has_error_code(intno
);
577 if (intno
* 8 + 7 > dt
->limit
) {
578 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
580 ptr
= dt
->base
+ intno
* 8;
581 e1
= cpu_ldl_kernel(env
, ptr
);
582 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
583 /* check gate type */
584 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
586 case 5: /* task gate */
587 /* must do that check here to return the correct error code */
588 if (!(e2
& DESC_P_MASK
)) {
589 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
591 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
592 if (has_error_code
) {
596 /* push the error code */
597 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
599 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
604 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
605 ssp
= env
->segs
[R_SS
].base
+ esp
;
607 cpu_stl_kernel(env
, ssp
, error_code
);
609 cpu_stw_kernel(env
, ssp
, error_code
);
614 case 6: /* 286 interrupt gate */
615 case 7: /* 286 trap gate */
616 case 14: /* 386 interrupt gate */
617 case 15: /* 386 trap gate */
620 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
623 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
624 cpl
= env
->hflags
& HF_CPL_MASK
;
625 /* check privilege if software int */
626 if (is_int
&& dpl
< cpl
) {
627 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
629 /* check valid bit */
630 if (!(e2
& DESC_P_MASK
)) {
631 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
634 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
635 if ((selector
& 0xfffc) == 0) {
636 raise_exception_err(env
, EXCP0D_GPF
, 0);
638 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
639 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
641 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
642 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
644 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
646 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
648 if (!(e2
& DESC_P_MASK
)) {
649 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
651 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
652 /* to inner privilege */
653 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
);
654 if ((ss
& 0xfffc) == 0) {
655 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
657 if ((ss
& 3) != dpl
) {
658 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
660 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
661 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
663 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
665 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
667 if (!(ss_e2
& DESC_S_MASK
) ||
668 (ss_e2
& DESC_CS_MASK
) ||
669 !(ss_e2
& DESC_W_MASK
)) {
670 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
672 if (!(ss_e2
& DESC_P_MASK
)) {
673 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
676 sp_mask
= get_sp_mask(ss_e2
);
677 ssp
= get_seg_base(ss_e1
, ss_e2
);
678 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
679 /* to same privilege */
680 if (env
->eflags
& VM_MASK
) {
681 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
684 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
685 ssp
= env
->segs
[R_SS
].base
;
686 esp
= env
->regs
[R_ESP
];
689 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
690 new_stack
= 0; /* avoid warning */
691 sp_mask
= 0; /* avoid warning */
692 ssp
= 0; /* avoid warning */
693 esp
= 0; /* avoid warning */
699 /* XXX: check that enough room is available */
700 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
701 if (env
->eflags
& VM_MASK
) {
708 if (env
->eflags
& VM_MASK
) {
709 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
710 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
711 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
712 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
714 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
715 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
717 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
718 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
719 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
720 if (has_error_code
) {
721 PUSHL(ssp
, esp
, sp_mask
, error_code
);
725 if (env
->eflags
& VM_MASK
) {
726 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
727 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
728 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
729 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
731 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
732 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
734 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
735 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
736 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
737 if (has_error_code
) {
738 PUSHW(ssp
, esp
, sp_mask
, error_code
);
743 if (env
->eflags
& VM_MASK
) {
744 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
745 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
746 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
747 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
749 ss
= (ss
& ~3) | dpl
;
750 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
751 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
753 SET_ESP(esp
, sp_mask
);
755 selector
= (selector
& ~3) | dpl
;
756 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
757 get_seg_base(e1
, e2
),
758 get_seg_limit(e1
, e2
),
760 cpu_x86_set_cpl(env
, dpl
);
763 /* interrupt gate clear IF mask */
764 if ((type
& 1) == 0) {
765 env
->eflags
&= ~IF_MASK
;
767 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
772 #define PUSHQ(sp, val) \
775 cpu_stq_kernel(env, sp, (val)); \
778 #define POPQ(sp, val) \
780 val = cpu_ldq_kernel(env, sp); \
784 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
786 X86CPU
*cpu
= x86_env_get_cpu(env
);
790 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
791 env
->tr
.base
, env
->tr
.limit
);
794 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
795 cpu_abort(CPU(cpu
), "invalid tss");
797 index
= 8 * level
+ 4;
798 if ((index
+ 7) > env
->tr
.limit
) {
799 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
801 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
804 /* 64 bit interrupt */
805 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
806 int error_code
, target_ulong next_eip
, int is_hw
)
810 int type
, dpl
, selector
, cpl
, ist
;
811 int has_error_code
, new_stack
;
812 uint32_t e1
, e2
, e3
, ss
;
813 target_ulong old_eip
, esp
, offset
;
816 if (!is_int
&& !is_hw
) {
817 has_error_code
= exception_has_error_code(intno
);
826 if (intno
* 16 + 15 > dt
->limit
) {
827 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
829 ptr
= dt
->base
+ intno
* 16;
830 e1
= cpu_ldl_kernel(env
, ptr
);
831 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
832 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
833 /* check gate type */
834 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
836 case 14: /* 386 interrupt gate */
837 case 15: /* 386 trap gate */
840 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
843 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
844 cpl
= env
->hflags
& HF_CPL_MASK
;
845 /* check privilege if software int */
846 if (is_int
&& dpl
< cpl
) {
847 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
849 /* check valid bit */
850 if (!(e2
& DESC_P_MASK
)) {
851 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
854 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
856 if ((selector
& 0xfffc) == 0) {
857 raise_exception_err(env
, EXCP0D_GPF
, 0);
860 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
861 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
863 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
864 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
866 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
868 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
870 if (!(e2
& DESC_P_MASK
)) {
871 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
873 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
874 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
876 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
877 /* to inner privilege */
879 esp
= get_rsp_from_tss(env
, ist
+ 3);
881 esp
= get_rsp_from_tss(env
, dpl
);
883 esp
&= ~0xfLL
; /* align stack */
886 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
887 /* to same privilege */
888 if (env
->eflags
& VM_MASK
) {
889 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
893 esp
= get_rsp_from_tss(env
, ist
+ 3);
895 esp
= env
->regs
[R_ESP
];
897 esp
&= ~0xfLL
; /* align stack */
900 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
901 new_stack
= 0; /* avoid warning */
902 esp
= 0; /* avoid warning */
905 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
906 PUSHQ(esp
, env
->regs
[R_ESP
]);
907 PUSHQ(esp
, cpu_compute_eflags(env
));
908 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
910 if (has_error_code
) {
911 PUSHQ(esp
, error_code
);
916 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
918 env
->regs
[R_ESP
] = esp
;
920 selector
= (selector
& ~3) | dpl
;
921 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
922 get_seg_base(e1
, e2
),
923 get_seg_limit(e1
, e2
),
925 cpu_x86_set_cpl(env
, dpl
);
928 /* interrupt gate clear IF mask */
929 if ((type
& 1) == 0) {
930 env
->eflags
&= ~IF_MASK
;
932 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
937 #if defined(CONFIG_USER_ONLY)
938 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
940 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
942 cs
->exception_index
= EXCP_SYSCALL
;
943 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
947 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
951 if (!(env
->efer
& MSR_EFER_SCE
)) {
952 raise_exception_err(env
, EXCP06_ILLOP
, 0);
954 selector
= (env
->star
>> 32) & 0xffff;
955 if (env
->hflags
& HF_LMA_MASK
) {
958 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
959 env
->regs
[11] = cpu_compute_eflags(env
);
961 code64
= env
->hflags
& HF_CS64_MASK
;
963 cpu_x86_set_cpl(env
, 0);
964 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
966 DESC_G_MASK
| DESC_P_MASK
|
968 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
970 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
972 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
974 DESC_W_MASK
| DESC_A_MASK
);
975 env
->eflags
&= ~env
->fmask
;
976 cpu_load_eflags(env
, env
->eflags
, 0);
978 env
->eip
= env
->lstar
;
980 env
->eip
= env
->cstar
;
983 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
985 cpu_x86_set_cpl(env
, 0);
986 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
988 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
990 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
991 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
993 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
995 DESC_W_MASK
| DESC_A_MASK
);
996 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
997 env
->eip
= (uint32_t)env
->star
;
1003 #ifdef TARGET_X86_64
1004 void helper_sysret(CPUX86State
*env
, int dflag
)
1008 if (!(env
->efer
& MSR_EFER_SCE
)) {
1009 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1011 cpl
= env
->hflags
& HF_CPL_MASK
;
1012 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1013 raise_exception_err(env
, EXCP0D_GPF
, 0);
1015 selector
= (env
->star
>> 48) & 0xffff;
1016 if (env
->hflags
& HF_LMA_MASK
) {
1018 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1020 DESC_G_MASK
| DESC_P_MASK
|
1021 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1022 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1024 env
->eip
= env
->regs
[R_ECX
];
1026 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1028 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1029 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1030 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1031 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1033 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1035 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1036 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1037 DESC_W_MASK
| DESC_A_MASK
);
1038 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1039 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1041 cpu_x86_set_cpl(env
, 3);
1043 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1045 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1046 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1047 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1048 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1049 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1051 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1052 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1053 DESC_W_MASK
| DESC_A_MASK
);
1054 env
->eflags
|= IF_MASK
;
1055 cpu_x86_set_cpl(env
, 3);
1060 /* real mode interrupt */
1061 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1062 int error_code
, unsigned int next_eip
)
1065 target_ulong ptr
, ssp
;
1067 uint32_t offset
, esp
;
1068 uint32_t old_cs
, old_eip
;
1070 /* real mode (simpler!) */
1072 if (intno
* 4 + 3 > dt
->limit
) {
1073 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1075 ptr
= dt
->base
+ intno
* 4;
1076 offset
= cpu_lduw_kernel(env
, ptr
);
1077 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1078 esp
= env
->regs
[R_ESP
];
1079 ssp
= env
->segs
[R_SS
].base
;
1085 old_cs
= env
->segs
[R_CS
].selector
;
1086 /* XXX: use SS segment size? */
1087 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1088 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1089 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1091 /* update processor state */
1092 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1094 env
->segs
[R_CS
].selector
= selector
;
1095 env
->segs
[R_CS
].base
= (selector
<< 4);
1096 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1099 #if defined(CONFIG_USER_ONLY)
1100 /* fake user mode interrupt */
1101 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1102 int error_code
, target_ulong next_eip
)
1106 int dpl
, cpl
, shift
;
1110 if (env
->hflags
& HF_LMA_MASK
) {
1115 ptr
= dt
->base
+ (intno
<< shift
);
1116 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1118 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1119 cpl
= env
->hflags
& HF_CPL_MASK
;
1120 /* check privilege if software int */
1121 if (is_int
&& dpl
< cpl
) {
1122 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1125 /* Since we emulate only user space, we cannot do more than
1126 exiting the emulation with the suitable exception and error
1129 env
->eip
= next_eip
;
1135 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1136 int error_code
, int is_hw
, int rm
)
1138 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1139 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1140 control
.event_inj
));
1142 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1146 type
= SVM_EVTINJ_TYPE_SOFT
;
1148 type
= SVM_EVTINJ_TYPE_EXEPT
;
1150 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1151 if (!rm
&& exception_has_error_code(intno
)) {
1152 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1153 stl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1154 control
.event_inj_err
),
1158 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1165 * Begin execution of an interruption. is_int is TRUE if coming from
1166 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1167 * instruction. It is only relevant if is_int is TRUE.
1169 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1170 int error_code
, target_ulong next_eip
, int is_hw
)
1172 CPUX86State
*env
= &cpu
->env
;
1174 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1175 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1178 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1179 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1180 count
, intno
, error_code
, is_int
,
1181 env
->hflags
& HF_CPL_MASK
,
1182 env
->segs
[R_CS
].selector
, env
->eip
,
1183 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1184 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1185 if (intno
== 0x0e) {
1186 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1188 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1191 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1198 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1199 for (i
= 0; i
< 16; i
++) {
1200 qemu_log(" %02x", ldub(ptr
+ i
));
1208 if (env
->cr
[0] & CR0_PE_MASK
) {
1209 #if !defined(CONFIG_USER_ONLY)
1210 if (env
->hflags
& HF_SVMI_MASK
) {
1211 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1214 #ifdef TARGET_X86_64
1215 if (env
->hflags
& HF_LMA_MASK
) {
1216 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1220 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1224 #if !defined(CONFIG_USER_ONLY)
1225 if (env
->hflags
& HF_SVMI_MASK
) {
1226 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1229 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1232 #if !defined(CONFIG_USER_ONLY)
1233 if (env
->hflags
& HF_SVMI_MASK
) {
1234 CPUState
*cs
= CPU(cpu
);
1235 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+
1236 offsetof(struct vmcb
,
1237 control
.event_inj
));
1240 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1241 event_inj
& ~SVM_EVTINJ_VALID
);
1246 void x86_cpu_do_interrupt(CPUState
*cs
)
1248 X86CPU
*cpu
= X86_CPU(cs
);
1249 CPUX86State
*env
= &cpu
->env
;
1251 #if defined(CONFIG_USER_ONLY)
1252 /* if user mode only, we simulate a fake exception
1253 which will be handled outside the cpu execution
1255 do_interrupt_user(env
, cs
->exception_index
,
1256 env
->exception_is_int
,
1258 env
->exception_next_eip
);
1259 /* successfully delivered */
1260 env
->old_exception
= -1;
1262 /* simulate a real cpu exception. On i386, it can
1263 trigger new exceptions, but we do not handle
1264 double or triple faults yet. */
1265 do_interrupt_all(cpu
, cs
->exception_index
,
1266 env
->exception_is_int
,
1268 env
->exception_next_eip
, 0);
1269 /* successfully delivered */
1270 env
->old_exception
= -1;
1274 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1276 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1279 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1283 uint32_t esp_mask
, esp
, ebp
;
1285 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1286 ssp
= env
->segs
[R_SS
].base
;
1287 ebp
= env
->regs
[R_EBP
];
1288 esp
= env
->regs
[R_ESP
];
1295 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
),
1296 cpu_ldl_data(env
, ssp
+ (ebp
& esp_mask
)));
1299 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1306 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
),
1307 cpu_lduw_data(env
, ssp
+ (ebp
& esp_mask
)));
1310 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1314 #ifdef TARGET_X86_64
1315 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1318 target_ulong esp
, ebp
;
1320 ebp
= env
->regs
[R_EBP
];
1321 esp
= env
->regs
[R_ESP
];
1329 cpu_stq_data(env
, esp
, cpu_ldq_data(env
, ebp
));
1332 cpu_stq_data(env
, esp
, t1
);
1339 cpu_stw_data(env
, esp
, cpu_lduw_data(env
, ebp
));
1342 cpu_stw_data(env
, esp
, t1
);
1347 void helper_lldt(CPUX86State
*env
, int selector
)
1351 int index
, entry_limit
;
1355 if ((selector
& 0xfffc) == 0) {
1356 /* XXX: NULL selector case: invalid LDT */
1360 if (selector
& 0x4) {
1361 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1364 index
= selector
& ~7;
1365 #ifdef TARGET_X86_64
1366 if (env
->hflags
& HF_LMA_MASK
) {
1373 if ((index
+ entry_limit
) > dt
->limit
) {
1374 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1376 ptr
= dt
->base
+ index
;
1377 e1
= cpu_ldl_kernel(env
, ptr
);
1378 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1379 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1380 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1382 if (!(e2
& DESC_P_MASK
)) {
1383 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1385 #ifdef TARGET_X86_64
1386 if (env
->hflags
& HF_LMA_MASK
) {
1389 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1390 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1391 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1395 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1398 env
->ldt
.selector
= selector
;
1401 void helper_ltr(CPUX86State
*env
, int selector
)
1405 int index
, type
, entry_limit
;
1409 if ((selector
& 0xfffc) == 0) {
1410 /* NULL selector case: invalid TR */
1415 if (selector
& 0x4) {
1416 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1419 index
= selector
& ~7;
1420 #ifdef TARGET_X86_64
1421 if (env
->hflags
& HF_LMA_MASK
) {
1428 if ((index
+ entry_limit
) > dt
->limit
) {
1429 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1431 ptr
= dt
->base
+ index
;
1432 e1
= cpu_ldl_kernel(env
, ptr
);
1433 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1434 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1435 if ((e2
& DESC_S_MASK
) ||
1436 (type
!= 1 && type
!= 9)) {
1437 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1439 if (!(e2
& DESC_P_MASK
)) {
1440 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1442 #ifdef TARGET_X86_64
1443 if (env
->hflags
& HF_LMA_MASK
) {
1446 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1447 e4
= cpu_ldl_kernel(env
, ptr
+ 12);
1448 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1449 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1451 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1452 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1456 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1458 e2
|= DESC_TSS_BUSY_MASK
;
1459 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1461 env
->tr
.selector
= selector
;
1464 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1465 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1474 cpl
= env
->hflags
& HF_CPL_MASK
;
1475 if ((selector
& 0xfffc) == 0) {
1476 /* null selector case */
1478 #ifdef TARGET_X86_64
1479 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1482 raise_exception_err(env
, EXCP0D_GPF
, 0);
1484 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1487 if (selector
& 0x4) {
1492 index
= selector
& ~7;
1493 if ((index
+ 7) > dt
->limit
) {
1494 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1496 ptr
= dt
->base
+ index
;
1497 e1
= cpu_ldl_kernel(env
, ptr
);
1498 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1500 if (!(e2
& DESC_S_MASK
)) {
1501 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1504 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1505 if (seg_reg
== R_SS
) {
1506 /* must be writable segment */
1507 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1508 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1510 if (rpl
!= cpl
|| dpl
!= cpl
) {
1511 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1514 /* must be readable segment */
1515 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1516 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1519 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1520 /* if not conforming code, test rights */
1521 if (dpl
< cpl
|| dpl
< rpl
) {
1522 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1527 if (!(e2
& DESC_P_MASK
)) {
1528 if (seg_reg
== R_SS
) {
1529 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1531 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1535 /* set the access bit if not already set */
1536 if (!(e2
& DESC_A_MASK
)) {
1538 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1541 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1542 get_seg_base(e1
, e2
),
1543 get_seg_limit(e1
, e2
),
1546 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1547 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1552 /* protected mode jump */
1553 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1554 int next_eip_addend
)
1557 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1558 target_ulong next_eip
;
1560 if ((new_cs
& 0xfffc) == 0) {
1561 raise_exception_err(env
, EXCP0D_GPF
, 0);
1563 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1564 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1566 cpl
= env
->hflags
& HF_CPL_MASK
;
1567 if (e2
& DESC_S_MASK
) {
1568 if (!(e2
& DESC_CS_MASK
)) {
1569 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1571 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1572 if (e2
& DESC_C_MASK
) {
1573 /* conforming code segment */
1575 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1578 /* non conforming code segment */
1581 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1584 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1587 if (!(e2
& DESC_P_MASK
)) {
1588 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1590 limit
= get_seg_limit(e1
, e2
);
1591 if (new_eip
> limit
&&
1592 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1593 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1595 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1596 get_seg_base(e1
, e2
), limit
, e2
);
1599 /* jump to call or task gate */
1600 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1602 cpl
= env
->hflags
& HF_CPL_MASK
;
1603 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1605 case 1: /* 286 TSS */
1606 case 9: /* 386 TSS */
1607 case 5: /* task gate */
1608 if (dpl
< cpl
|| dpl
< rpl
) {
1609 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1611 next_eip
= env
->eip
+ next_eip_addend
;
1612 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1613 CC_OP
= CC_OP_EFLAGS
;
1615 case 4: /* 286 call gate */
1616 case 12: /* 386 call gate */
1617 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1618 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1620 if (!(e2
& DESC_P_MASK
)) {
1621 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1624 new_eip
= (e1
& 0xffff);
1626 new_eip
|= (e2
& 0xffff0000);
1628 if (load_segment(env
, &e1
, &e2
, gate_cs
) != 0) {
1629 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1631 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1632 /* must be code segment */
1633 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1634 (DESC_S_MASK
| DESC_CS_MASK
))) {
1635 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1637 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1638 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1639 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1641 if (!(e2
& DESC_P_MASK
)) {
1642 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1644 limit
= get_seg_limit(e1
, e2
);
1645 if (new_eip
> limit
) {
1646 raise_exception_err(env
, EXCP0D_GPF
, 0);
1648 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1649 get_seg_base(e1
, e2
), limit
, e2
);
1653 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1659 /* real mode call */
1660 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1661 int shift
, int next_eip
)
1664 uint32_t esp
, esp_mask
;
1668 esp
= env
->regs
[R_ESP
];
1669 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1670 ssp
= env
->segs
[R_SS
].base
;
1672 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1673 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1675 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1676 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1679 SET_ESP(esp
, esp_mask
);
1681 env
->segs
[R_CS
].selector
= new_cs
;
1682 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1685 /* protected mode call */
1686 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1687 int shift
, int next_eip_addend
)
1690 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1691 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1692 uint32_t val
, limit
, old_sp_mask
;
1693 target_ulong ssp
, old_ssp
, next_eip
;
1695 next_eip
= env
->eip
+ next_eip_addend
;
1696 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1697 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1698 if ((new_cs
& 0xfffc) == 0) {
1699 raise_exception_err(env
, EXCP0D_GPF
, 0);
1701 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1702 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1704 cpl
= env
->hflags
& HF_CPL_MASK
;
1705 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1706 if (e2
& DESC_S_MASK
) {
1707 if (!(e2
& DESC_CS_MASK
)) {
1708 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1710 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1711 if (e2
& DESC_C_MASK
) {
1712 /* conforming code segment */
1714 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1717 /* non conforming code segment */
1720 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1723 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1726 if (!(e2
& DESC_P_MASK
)) {
1727 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1730 #ifdef TARGET_X86_64
1731 /* XXX: check 16/32 bit cases in long mode */
1736 rsp
= env
->regs
[R_ESP
];
1737 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1738 PUSHQ(rsp
, next_eip
);
1739 /* from this point, not restartable */
1740 env
->regs
[R_ESP
] = rsp
;
1741 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1742 get_seg_base(e1
, e2
),
1743 get_seg_limit(e1
, e2
), e2
);
1748 sp
= env
->regs
[R_ESP
];
1749 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1750 ssp
= env
->segs
[R_SS
].base
;
1752 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1753 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1755 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1756 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1759 limit
= get_seg_limit(e1
, e2
);
1760 if (new_eip
> limit
) {
1761 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1763 /* from this point, not restartable */
1764 SET_ESP(sp
, sp_mask
);
1765 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1766 get_seg_base(e1
, e2
), limit
, e2
);
1770 /* check gate type */
1771 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1772 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1775 case 1: /* available 286 TSS */
1776 case 9: /* available 386 TSS */
1777 case 5: /* task gate */
1778 if (dpl
< cpl
|| dpl
< rpl
) {
1779 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1781 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1782 CC_OP
= CC_OP_EFLAGS
;
1784 case 4: /* 286 call gate */
1785 case 12: /* 386 call gate */
1788 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1793 if (dpl
< cpl
|| dpl
< rpl
) {
1794 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1796 /* check valid bit */
1797 if (!(e2
& DESC_P_MASK
)) {
1798 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1800 selector
= e1
>> 16;
1801 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1802 param_count
= e2
& 0x1f;
1803 if ((selector
& 0xfffc) == 0) {
1804 raise_exception_err(env
, EXCP0D_GPF
, 0);
1807 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
1808 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1810 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1811 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1813 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1815 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1817 if (!(e2
& DESC_P_MASK
)) {
1818 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1821 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1822 /* to inner privilege */
1823 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
);
1824 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1825 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1827 if ((ss
& 0xfffc) == 0) {
1828 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1830 if ((ss
& 3) != dpl
) {
1831 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1833 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
1834 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1836 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1837 if (ss_dpl
!= dpl
) {
1838 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1840 if (!(ss_e2
& DESC_S_MASK
) ||
1841 (ss_e2
& DESC_CS_MASK
) ||
1842 !(ss_e2
& DESC_W_MASK
)) {
1843 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1845 if (!(ss_e2
& DESC_P_MASK
)) {
1846 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1849 /* push_size = ((param_count * 2) + 8) << shift; */
1851 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1852 old_ssp
= env
->segs
[R_SS
].base
;
1854 sp_mask
= get_sp_mask(ss_e2
);
1855 ssp
= get_seg_base(ss_e1
, ss_e2
);
1857 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1858 PUSHL(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1859 for (i
= param_count
- 1; i
>= 0; i
--) {
1860 val
= cpu_ldl_kernel(env
, old_ssp
+
1861 ((env
->regs
[R_ESP
] + i
* 4) &
1863 PUSHL(ssp
, sp
, sp_mask
, val
);
1866 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1867 PUSHW(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1868 for (i
= param_count
- 1; i
>= 0; i
--) {
1869 val
= cpu_lduw_kernel(env
, old_ssp
+
1870 ((env
->regs
[R_ESP
] + i
* 2) &
1872 PUSHW(ssp
, sp
, sp_mask
, val
);
1877 /* to same privilege */
1878 sp
= env
->regs
[R_ESP
];
1879 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1880 ssp
= env
->segs
[R_SS
].base
;
1881 /* push_size = (4 << shift); */
1886 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1887 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1889 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1890 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1893 /* from this point, not restartable */
1896 ss
= (ss
& ~3) | dpl
;
1897 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1899 get_seg_limit(ss_e1
, ss_e2
),
1903 selector
= (selector
& ~3) | dpl
;
1904 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1905 get_seg_base(e1
, e2
),
1906 get_seg_limit(e1
, e2
),
1908 cpu_x86_set_cpl(env
, dpl
);
1909 SET_ESP(sp
, sp_mask
);
1914 /* real and vm86 mode iret */
1915 void helper_iret_real(CPUX86State
*env
, int shift
)
1917 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1921 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1922 sp
= env
->regs
[R_ESP
];
1923 ssp
= env
->segs
[R_SS
].base
;
1926 POPL(ssp
, sp
, sp_mask
, new_eip
);
1927 POPL(ssp
, sp
, sp_mask
, new_cs
);
1929 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1932 POPW(ssp
, sp
, sp_mask
, new_eip
);
1933 POPW(ssp
, sp
, sp_mask
, new_cs
);
1934 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1936 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1937 env
->segs
[R_CS
].selector
= new_cs
;
1938 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1940 if (env
->eflags
& VM_MASK
) {
1941 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1944 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1948 eflags_mask
&= 0xffff;
1950 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1951 env
->hflags2
&= ~HF2_NMI_MASK
;
1954 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
1959 /* XXX: on x86_64, we do not want to nullify FS and GS because
1960 they may still contain a valid base. I would be interested to
1961 know how a real x86_64 CPU behaves */
1962 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1963 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1967 e2
= env
->segs
[seg_reg
].flags
;
1968 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1969 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1970 /* data or non conforming code segment */
1972 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
1977 /* protected mode iret */
1978 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
1979 int is_iret
, int addend
)
1981 uint32_t new_cs
, new_eflags
, new_ss
;
1982 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1983 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1984 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1985 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1987 #ifdef TARGET_X86_64
1993 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1995 sp
= env
->regs
[R_ESP
];
1996 ssp
= env
->segs
[R_SS
].base
;
1997 new_eflags
= 0; /* avoid warning */
1998 #ifdef TARGET_X86_64
2004 POPQ(sp
, new_eflags
);
2011 POPL(ssp
, sp
, sp_mask
, new_eip
);
2012 POPL(ssp
, sp
, sp_mask
, new_cs
);
2015 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2016 if (new_eflags
& VM_MASK
) {
2017 goto return_to_vm86
;
2022 POPW(ssp
, sp
, sp_mask
, new_eip
);
2023 POPW(ssp
, sp
, sp_mask
, new_cs
);
2025 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2029 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2030 new_cs
, new_eip
, shift
, addend
);
2031 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2032 if ((new_cs
& 0xfffc) == 0) {
2033 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2035 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
2036 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2038 if (!(e2
& DESC_S_MASK
) ||
2039 !(e2
& DESC_CS_MASK
)) {
2040 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2042 cpl
= env
->hflags
& HF_CPL_MASK
;
2045 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2047 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2048 if (e2
& DESC_C_MASK
) {
2050 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2054 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2057 if (!(e2
& DESC_P_MASK
)) {
2058 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2062 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2063 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2064 /* return to same privilege level */
2065 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2066 get_seg_base(e1
, e2
),
2067 get_seg_limit(e1
, e2
),
2070 /* return to different privilege level */
2071 #ifdef TARGET_X86_64
2081 POPL(ssp
, sp
, sp_mask
, new_esp
);
2082 POPL(ssp
, sp
, sp_mask
, new_ss
);
2086 POPW(ssp
, sp
, sp_mask
, new_esp
);
2087 POPW(ssp
, sp
, sp_mask
, new_ss
);
2090 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2092 if ((new_ss
& 0xfffc) == 0) {
2093 #ifdef TARGET_X86_64
2094 /* NULL ss is allowed in long mode if cpl != 3 */
2095 /* XXX: test CS64? */
2096 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2097 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2099 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2100 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2101 DESC_W_MASK
| DESC_A_MASK
);
2102 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2106 raise_exception_err(env
, EXCP0D_GPF
, 0);
2109 if ((new_ss
& 3) != rpl
) {
2110 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2112 if (load_segment(env
, &ss_e1
, &ss_e2
, new_ss
) != 0) {
2113 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2115 if (!(ss_e2
& DESC_S_MASK
) ||
2116 (ss_e2
& DESC_CS_MASK
) ||
2117 !(ss_e2
& DESC_W_MASK
)) {
2118 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2120 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2122 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2124 if (!(ss_e2
& DESC_P_MASK
)) {
2125 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2127 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2128 get_seg_base(ss_e1
, ss_e2
),
2129 get_seg_limit(ss_e1
, ss_e2
),
2133 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2134 get_seg_base(e1
, e2
),
2135 get_seg_limit(e1
, e2
),
2137 cpu_x86_set_cpl(env
, rpl
);
2139 #ifdef TARGET_X86_64
2140 if (env
->hflags
& HF_CS64_MASK
) {
2145 sp_mask
= get_sp_mask(ss_e2
);
2148 /* validate data segments */
2149 validate_seg(env
, R_ES
, rpl
);
2150 validate_seg(env
, R_DS
, rpl
);
2151 validate_seg(env
, R_FS
, rpl
);
2152 validate_seg(env
, R_GS
, rpl
);
2156 SET_ESP(sp
, sp_mask
);
2159 /* NOTE: 'cpl' is the _old_ CPL */
2160 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2162 eflags_mask
|= IOPL_MASK
;
2164 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2166 eflags_mask
|= IF_MASK
;
2169 eflags_mask
&= 0xffff;
2171 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2176 POPL(ssp
, sp
, sp_mask
, new_esp
);
2177 POPL(ssp
, sp
, sp_mask
, new_ss
);
2178 POPL(ssp
, sp
, sp_mask
, new_es
);
2179 POPL(ssp
, sp
, sp_mask
, new_ds
);
2180 POPL(ssp
, sp
, sp_mask
, new_fs
);
2181 POPL(ssp
, sp
, sp_mask
, new_gs
);
2183 /* modify processor state */
2184 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2185 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2187 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2188 cpu_x86_set_cpl(env
, 3);
2189 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2190 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2191 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2192 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2193 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2195 env
->eip
= new_eip
& 0xffff;
2196 env
->regs
[R_ESP
] = new_esp
;
2199 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2201 int tss_selector
, type
;
2204 /* specific case for TSS */
2205 if (env
->eflags
& NT_MASK
) {
2206 #ifdef TARGET_X86_64
2207 if (env
->hflags
& HF_LMA_MASK
) {
2208 raise_exception_err(env
, EXCP0D_GPF
, 0);
2211 tss_selector
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0);
2212 if (tss_selector
& 4) {
2213 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2215 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
2216 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2218 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2219 /* NOTE: we check both segment and busy TSS */
2221 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2223 switch_tss(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2225 helper_ret_protected(env
, shift
, 1, 0);
2227 env
->hflags2
&= ~HF2_NMI_MASK
;
2230 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2232 helper_ret_protected(env
, shift
, 0, addend
);
2235 void helper_sysenter(CPUX86State
*env
)
2237 if (env
->sysenter_cs
== 0) {
2238 raise_exception_err(env
, EXCP0D_GPF
, 0);
2240 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2241 cpu_x86_set_cpl(env
, 0);
2243 #ifdef TARGET_X86_64
2244 if (env
->hflags
& HF_LMA_MASK
) {
2245 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2247 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2249 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2254 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2256 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2258 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2260 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2262 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2264 DESC_W_MASK
| DESC_A_MASK
);
2265 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2266 env
->eip
= env
->sysenter_eip
;
2269 void helper_sysexit(CPUX86State
*env
, int dflag
)
2273 cpl
= env
->hflags
& HF_CPL_MASK
;
2274 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2275 raise_exception_err(env
, EXCP0D_GPF
, 0);
2277 cpu_x86_set_cpl(env
, 3);
2278 #ifdef TARGET_X86_64
2280 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2282 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2283 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2284 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2286 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2288 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2289 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2290 DESC_W_MASK
| DESC_A_MASK
);
2294 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2296 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2297 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2298 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2299 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2301 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2302 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2303 DESC_W_MASK
| DESC_A_MASK
);
2305 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2306 env
->eip
= env
->regs
[R_EDX
];
2309 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2312 uint32_t e1
, e2
, eflags
, selector
;
2313 int rpl
, dpl
, cpl
, type
;
2315 selector
= selector1
& 0xffff;
2316 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2317 if ((selector
& 0xfffc) == 0) {
2320 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2324 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2325 cpl
= env
->hflags
& HF_CPL_MASK
;
2326 if (e2
& DESC_S_MASK
) {
2327 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2330 if (dpl
< cpl
|| dpl
< rpl
) {
2335 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2346 if (dpl
< cpl
|| dpl
< rpl
) {
2348 CC_SRC
= eflags
& ~CC_Z
;
2352 limit
= get_seg_limit(e1
, e2
);
2353 CC_SRC
= eflags
| CC_Z
;
2357 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2359 uint32_t e1
, e2
, eflags
, selector
;
2360 int rpl
, dpl
, cpl
, type
;
2362 selector
= selector1
& 0xffff;
2363 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2364 if ((selector
& 0xfffc) == 0) {
2367 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2371 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2372 cpl
= env
->hflags
& HF_CPL_MASK
;
2373 if (e2
& DESC_S_MASK
) {
2374 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2377 if (dpl
< cpl
|| dpl
< rpl
) {
2382 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2396 if (dpl
< cpl
|| dpl
< rpl
) {
2398 CC_SRC
= eflags
& ~CC_Z
;
2402 CC_SRC
= eflags
| CC_Z
;
2403 return e2
& 0x00f0ff00;
2406 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2408 uint32_t e1
, e2
, eflags
, selector
;
2411 selector
= selector1
& 0xffff;
2412 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2413 if ((selector
& 0xfffc) == 0) {
2416 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2419 if (!(e2
& DESC_S_MASK
)) {
2423 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2424 cpl
= env
->hflags
& HF_CPL_MASK
;
2425 if (e2
& DESC_CS_MASK
) {
2426 if (!(e2
& DESC_R_MASK
)) {
2429 if (!(e2
& DESC_C_MASK
)) {
2430 if (dpl
< cpl
|| dpl
< rpl
) {
2435 if (dpl
< cpl
|| dpl
< rpl
) {
2437 CC_SRC
= eflags
& ~CC_Z
;
2441 CC_SRC
= eflags
| CC_Z
;
2444 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2446 uint32_t e1
, e2
, eflags
, selector
;
2449 selector
= selector1
& 0xffff;
2450 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2451 if ((selector
& 0xfffc) == 0) {
2454 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2457 if (!(e2
& DESC_S_MASK
)) {
2461 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2462 cpl
= env
->hflags
& HF_CPL_MASK
;
2463 if (e2
& DESC_CS_MASK
) {
2466 if (dpl
< cpl
|| dpl
< rpl
) {
2469 if (!(e2
& DESC_W_MASK
)) {
2471 CC_SRC
= eflags
& ~CC_Z
;
2475 CC_SRC
= eflags
| CC_Z
;
2478 #if defined(CONFIG_USER_ONLY)
2479 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2481 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2483 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2484 (selector
<< 4), 0xffff, 0);
2486 helper_load_seg(env
, seg_reg
, selector
);