2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #if !defined(CONFIG_USER_ONLY)
28 #include "exec/softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
40 /* return non zero if error */
41 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
42 uint32_t *e2_ptr
, int selector
)
53 index
= selector
& ~7;
54 if ((index
+ 7) > dt
->limit
) {
57 ptr
= dt
->base
+ index
;
58 *e1_ptr
= cpu_ldl_kernel(env
, ptr
);
59 *e2_ptr
= cpu_ldl_kernel(env
, ptr
+ 4);
63 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
67 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
68 if (e2
& DESC_G_MASK
) {
69 limit
= (limit
<< 12) | 0xfff;
74 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
76 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
79 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
82 sc
->base
= get_seg_base(e1
, e2
);
83 sc
->limit
= get_seg_limit(e1
, e2
);
87 /* init the segment cache in vm86 mode. */
88 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
91 cpu_x86_load_seg_cache(env
, seg
, selector
,
92 (selector
<< 4), 0xffff, 0);
95 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
96 uint32_t *esp_ptr
, int dpl
)
98 int type
, index
, shift
;
103 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
104 for (i
= 0; i
< env
->tr
.limit
; i
++) {
105 printf("%02x ", env
->tr
.base
[i
]);
114 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
115 cpu_abort(env
, "invalid tss");
117 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
118 if ((type
& 7) != 1) {
119 cpu_abort(env
, "invalid tss type");
122 index
= (dpl
* 4 + 2) << shift
;
123 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
124 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
127 *esp_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
);
128 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 2);
130 *esp_ptr
= cpu_ldl_kernel(env
, env
->tr
.base
+ index
);
131 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 4);
135 /* XXX: merge with load_seg() */
136 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
141 if ((selector
& 0xfffc) != 0) {
142 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
143 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
145 if (!(e2
& DESC_S_MASK
)) {
146 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
149 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
150 cpl
= env
->hflags
& HF_CPL_MASK
;
151 if (seg_reg
== R_CS
) {
152 if (!(e2
& DESC_CS_MASK
)) {
153 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
155 /* XXX: is it correct? */
157 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
159 if ((e2
& DESC_C_MASK
) && dpl
> rpl
) {
160 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
162 } else if (seg_reg
== R_SS
) {
163 /* SS must be writable data */
164 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
165 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
167 if (dpl
!= cpl
|| dpl
!= rpl
) {
168 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
171 /* not readable code */
172 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
173 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
175 /* if data or non conforming code, checks the rights */
176 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
177 if (dpl
< cpl
|| dpl
< rpl
) {
178 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
182 if (!(e2
& DESC_P_MASK
)) {
183 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
185 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
186 get_seg_base(e1
, e2
),
187 get_seg_limit(e1
, e2
),
190 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
191 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
196 #define SWITCH_TSS_JMP 0
197 #define SWITCH_TSS_IRET 1
198 #define SWITCH_TSS_CALL 2
200 /* XXX: restore CPU state in registers (PowerPC case) */
201 static void switch_tss(CPUX86State
*env
, int tss_selector
,
202 uint32_t e1
, uint32_t e2
, int source
,
205 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
206 target_ulong tss_base
;
207 uint32_t new_regs
[8], new_segs
[6];
208 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
209 uint32_t old_eflags
, eflags_mask
;
214 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
215 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
218 /* if task gate, we read the TSS segment and we load it */
220 if (!(e2
& DESC_P_MASK
)) {
221 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
223 tss_selector
= e1
>> 16;
224 if (tss_selector
& 4) {
225 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
227 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
228 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
230 if (e2
& DESC_S_MASK
) {
231 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
233 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
234 if ((type
& 7) != 1) {
235 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
239 if (!(e2
& DESC_P_MASK
)) {
240 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
248 tss_limit
= get_seg_limit(e1
, e2
);
249 tss_base
= get_seg_base(e1
, e2
);
250 if ((tss_selector
& 4) != 0 ||
251 tss_limit
< tss_limit_max
) {
252 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
254 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
256 old_tss_limit_max
= 103;
258 old_tss_limit_max
= 43;
261 /* read all the registers from the new TSS */
264 new_cr3
= cpu_ldl_kernel(env
, tss_base
+ 0x1c);
265 new_eip
= cpu_ldl_kernel(env
, tss_base
+ 0x20);
266 new_eflags
= cpu_ldl_kernel(env
, tss_base
+ 0x24);
267 for (i
= 0; i
< 8; i
++) {
268 new_regs
[i
] = cpu_ldl_kernel(env
, tss_base
+ (0x28 + i
* 4));
270 for (i
= 0; i
< 6; i
++) {
271 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x48 + i
* 4));
273 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x60);
274 new_trap
= cpu_ldl_kernel(env
, tss_base
+ 0x64);
278 new_eip
= cpu_lduw_kernel(env
, tss_base
+ 0x0e);
279 new_eflags
= cpu_lduw_kernel(env
, tss_base
+ 0x10);
280 for (i
= 0; i
< 8; i
++) {
281 new_regs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x12 + i
* 2)) |
284 for (i
= 0; i
< 4; i
++) {
285 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x22 + i
* 4));
287 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x2a);
292 /* XXX: avoid a compiler warning, see
293 http://support.amd.com/us/Processor_TechDocs/24593.pdf
294 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
297 /* NOTE: we must avoid memory exceptions during the task switch,
298 so we make dummy accesses before */
299 /* XXX: it can still fail in some cases, so a bigger hack is
300 necessary to valid the TLB after having done the accesses */
302 v1
= cpu_ldub_kernel(env
, env
->tr
.base
);
303 v2
= cpu_ldub_kernel(env
, env
->tr
.base
+ old_tss_limit_max
);
304 cpu_stb_kernel(env
, env
->tr
.base
, v1
);
305 cpu_stb_kernel(env
, env
->tr
.base
+ old_tss_limit_max
, v2
);
307 /* clear busy bit (it is restartable) */
308 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
312 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
313 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
314 e2
&= ~DESC_TSS_BUSY_MASK
;
315 cpu_stl_kernel(env
, ptr
+ 4, e2
);
317 old_eflags
= cpu_compute_eflags(env
);
318 if (source
== SWITCH_TSS_IRET
) {
319 old_eflags
&= ~NT_MASK
;
322 /* save the current state in the old TSS */
325 cpu_stl_kernel(env
, env
->tr
.base
+ 0x20, next_eip
);
326 cpu_stl_kernel(env
, env
->tr
.base
+ 0x24, old_eflags
);
327 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
]);
328 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
]);
329 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
]);
330 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
]);
331 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
]);
332 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
]);
333 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
]);
334 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
]);
335 for (i
= 0; i
< 6; i
++) {
336 cpu_stw_kernel(env
, env
->tr
.base
+ (0x48 + i
* 4),
337 env
->segs
[i
].selector
);
341 cpu_stw_kernel(env
, env
->tr
.base
+ 0x0e, next_eip
);
342 cpu_stw_kernel(env
, env
->tr
.base
+ 0x10, old_eflags
);
343 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
]);
344 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
]);
345 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
]);
346 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
]);
347 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
]);
348 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
]);
349 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
]);
350 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
]);
351 for (i
= 0; i
< 4; i
++) {
352 cpu_stw_kernel(env
, env
->tr
.base
+ (0x22 + i
* 4),
353 env
->segs
[i
].selector
);
357 /* now if an exception occurs, it will occurs in the next task
360 if (source
== SWITCH_TSS_CALL
) {
361 cpu_stw_kernel(env
, tss_base
, env
->tr
.selector
);
362 new_eflags
|= NT_MASK
;
366 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
370 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
371 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
372 e2
|= DESC_TSS_BUSY_MASK
;
373 cpu_stl_kernel(env
, ptr
+ 4, e2
);
376 /* set the new CPU state */
377 /* from this point, any exception which occurs can give problems */
378 env
->cr
[0] |= CR0_TS_MASK
;
379 env
->hflags
|= HF_TS_MASK
;
380 env
->tr
.selector
= tss_selector
;
381 env
->tr
.base
= tss_base
;
382 env
->tr
.limit
= tss_limit
;
383 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
385 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
386 cpu_x86_update_cr3(env
, new_cr3
);
389 /* load all registers without an exception, then reload them with
390 possible exception */
392 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
393 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
395 eflags_mask
&= 0xffff;
397 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
398 /* XXX: what to do in 16 bit case? */
399 env
->regs
[R_EAX
] = new_regs
[0];
400 env
->regs
[R_ECX
] = new_regs
[1];
401 env
->regs
[R_EDX
] = new_regs
[2];
402 env
->regs
[R_EBX
] = new_regs
[3];
403 env
->regs
[R_ESP
] = new_regs
[4];
404 env
->regs
[R_EBP
] = new_regs
[5];
405 env
->regs
[R_ESI
] = new_regs
[6];
406 env
->regs
[R_EDI
] = new_regs
[7];
407 if (new_eflags
& VM_MASK
) {
408 for (i
= 0; i
< 6; i
++) {
409 load_seg_vm(env
, i
, new_segs
[i
]);
411 /* in vm86, CPL is always 3 */
412 cpu_x86_set_cpl(env
, 3);
414 /* CPL is set the RPL of CS */
415 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
416 /* first just selectors as the rest may trigger exceptions */
417 for (i
= 0; i
< 6; i
++) {
418 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
422 env
->ldt
.selector
= new_ldt
& ~4;
429 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
432 if ((new_ldt
& 0xfffc) != 0) {
434 index
= new_ldt
& ~7;
435 if ((index
+ 7) > dt
->limit
) {
436 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
438 ptr
= dt
->base
+ index
;
439 e1
= cpu_ldl_kernel(env
, ptr
);
440 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
441 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
442 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
444 if (!(e2
& DESC_P_MASK
)) {
445 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
447 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
450 /* load the segments */
451 if (!(new_eflags
& VM_MASK
)) {
452 tss_load_seg(env
, R_CS
, new_segs
[R_CS
]);
453 tss_load_seg(env
, R_SS
, new_segs
[R_SS
]);
454 tss_load_seg(env
, R_ES
, new_segs
[R_ES
]);
455 tss_load_seg(env
, R_DS
, new_segs
[R_DS
]);
456 tss_load_seg(env
, R_FS
, new_segs
[R_FS
]);
457 tss_load_seg(env
, R_GS
, new_segs
[R_GS
]);
460 /* check that env->eip is in the CS segment limits */
461 if (new_eip
> env
->segs
[R_CS
].limit
) {
462 /* XXX: different exception if CALL? */
463 raise_exception_err(env
, EXCP0D_GPF
, 0);
466 #ifndef CONFIG_USER_ONLY
467 /* reset local breakpoints */
468 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
469 for (i
= 0; i
< DR7_MAX_BP
; i
++) {
470 if (hw_local_breakpoint_enabled(env
->dr
[7], i
) &&
471 !hw_global_breakpoint_enabled(env
->dr
[7], i
)) {
472 hw_breakpoint_remove(env
, i
);
475 env
->dr
[7] &= ~DR7_LOCAL_BP_MASK
;
480 static inline unsigned int get_sp_mask(unsigned int e2
)
482 if (e2
& DESC_B_MASK
) {
489 static int exception_has_error_code(int intno
)
505 #define SET_ESP(val, sp_mask) \
507 if ((sp_mask) == 0xffff) { \
508 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
510 } else if ((sp_mask) == 0xffffffffLL) { \
511 env->regs[R_ESP] = (uint32_t)(val); \
513 env->regs[R_ESP] = (val); \
517 #define SET_ESP(val, sp_mask) \
519 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
520 ((val) & (sp_mask)); \
524 /* in 64-bit machines, this can overflow. So this segment addition macro
525 * can be used to trim the value to 32-bit whenever needed */
526 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
528 /* XXX: add a is_user flag to have proper security support */
529 #define PUSHW(ssp, sp, sp_mask, val) \
532 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
535 #define PUSHL(ssp, sp, sp_mask, val) \
538 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
541 #define POPW(ssp, sp, sp_mask, val) \
543 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
547 #define POPL(ssp, sp, sp_mask, val) \
549 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
553 /* protected mode interrupt */
554 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
555 int error_code
, unsigned int next_eip
,
559 target_ulong ptr
, ssp
;
560 int type
, dpl
, selector
, ss_dpl
, cpl
;
561 int has_error_code
, new_stack
, shift
;
562 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
563 uint32_t old_eip
, sp_mask
;
566 if (!is_int
&& !is_hw
) {
567 has_error_code
= exception_has_error_code(intno
);
576 if (intno
* 8 + 7 > dt
->limit
) {
577 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
579 ptr
= dt
->base
+ intno
* 8;
580 e1
= cpu_ldl_kernel(env
, ptr
);
581 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
582 /* check gate type */
583 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
585 case 5: /* task gate */
586 /* must do that check here to return the correct error code */
587 if (!(e2
& DESC_P_MASK
)) {
588 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
590 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
591 if (has_error_code
) {
595 /* push the error code */
596 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
598 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
603 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
604 ssp
= env
->segs
[R_SS
].base
+ esp
;
606 cpu_stl_kernel(env
, ssp
, error_code
);
608 cpu_stw_kernel(env
, ssp
, error_code
);
613 case 6: /* 286 interrupt gate */
614 case 7: /* 286 trap gate */
615 case 14: /* 386 interrupt gate */
616 case 15: /* 386 trap gate */
619 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
622 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
623 cpl
= env
->hflags
& HF_CPL_MASK
;
624 /* check privilege if software int */
625 if (is_int
&& dpl
< cpl
) {
626 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
628 /* check valid bit */
629 if (!(e2
& DESC_P_MASK
)) {
630 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
633 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
634 if ((selector
& 0xfffc) == 0) {
635 raise_exception_err(env
, EXCP0D_GPF
, 0);
637 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
638 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
640 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
641 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
643 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
645 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
647 if (!(e2
& DESC_P_MASK
)) {
648 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
650 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
651 /* to inner privilege */
652 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
);
653 if ((ss
& 0xfffc) == 0) {
654 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
656 if ((ss
& 3) != dpl
) {
657 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
659 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
660 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
662 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
664 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
666 if (!(ss_e2
& DESC_S_MASK
) ||
667 (ss_e2
& DESC_CS_MASK
) ||
668 !(ss_e2
& DESC_W_MASK
)) {
669 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
671 if (!(ss_e2
& DESC_P_MASK
)) {
672 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
675 sp_mask
= get_sp_mask(ss_e2
);
676 ssp
= get_seg_base(ss_e1
, ss_e2
);
677 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
678 /* to same privilege */
679 if (env
->eflags
& VM_MASK
) {
680 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
683 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
684 ssp
= env
->segs
[R_SS
].base
;
685 esp
= env
->regs
[R_ESP
];
688 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
689 new_stack
= 0; /* avoid warning */
690 sp_mask
= 0; /* avoid warning */
691 ssp
= 0; /* avoid warning */
692 esp
= 0; /* avoid warning */
698 /* XXX: check that enough room is available */
699 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
700 if (env
->eflags
& VM_MASK
) {
707 if (env
->eflags
& VM_MASK
) {
708 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
709 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
710 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
711 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
713 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
714 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
716 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
717 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
718 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
719 if (has_error_code
) {
720 PUSHL(ssp
, esp
, sp_mask
, error_code
);
724 if (env
->eflags
& VM_MASK
) {
725 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
726 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
727 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
728 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
730 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
731 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
733 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
734 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
735 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
736 if (has_error_code
) {
737 PUSHW(ssp
, esp
, sp_mask
, error_code
);
742 if (env
->eflags
& VM_MASK
) {
743 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
744 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
745 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
746 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
748 ss
= (ss
& ~3) | dpl
;
749 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
750 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
752 SET_ESP(esp
, sp_mask
);
754 selector
= (selector
& ~3) | dpl
;
755 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
756 get_seg_base(e1
, e2
),
757 get_seg_limit(e1
, e2
),
759 cpu_x86_set_cpl(env
, dpl
);
762 /* interrupt gate clear IF mask */
763 if ((type
& 1) == 0) {
764 env
->eflags
&= ~IF_MASK
;
766 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
771 #define PUSHQ(sp, val) \
774 cpu_stq_kernel(env, sp, (val)); \
777 #define POPQ(sp, val) \
779 val = cpu_ldq_kernel(env, sp); \
783 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
788 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
789 env
->tr
.base
, env
->tr
.limit
);
792 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
793 cpu_abort(env
, "invalid tss");
795 index
= 8 * level
+ 4;
796 if ((index
+ 7) > env
->tr
.limit
) {
797 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
799 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
802 /* 64 bit interrupt */
803 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
804 int error_code
, target_ulong next_eip
, int is_hw
)
808 int type
, dpl
, selector
, cpl
, ist
;
809 int has_error_code
, new_stack
;
810 uint32_t e1
, e2
, e3
, ss
;
811 target_ulong old_eip
, esp
, offset
;
814 if (!is_int
&& !is_hw
) {
815 has_error_code
= exception_has_error_code(intno
);
824 if (intno
* 16 + 15 > dt
->limit
) {
825 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
827 ptr
= dt
->base
+ intno
* 16;
828 e1
= cpu_ldl_kernel(env
, ptr
);
829 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
830 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
831 /* check gate type */
832 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
834 case 14: /* 386 interrupt gate */
835 case 15: /* 386 trap gate */
838 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
841 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
842 cpl
= env
->hflags
& HF_CPL_MASK
;
843 /* check privilege if software int */
844 if (is_int
&& dpl
< cpl
) {
845 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
847 /* check valid bit */
848 if (!(e2
& DESC_P_MASK
)) {
849 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
852 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
854 if ((selector
& 0xfffc) == 0) {
855 raise_exception_err(env
, EXCP0D_GPF
, 0);
858 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
859 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
861 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
862 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
864 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
866 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
868 if (!(e2
& DESC_P_MASK
)) {
869 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
871 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
872 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
874 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
875 /* to inner privilege */
877 esp
= get_rsp_from_tss(env
, ist
+ 3);
879 esp
= get_rsp_from_tss(env
, dpl
);
881 esp
&= ~0xfLL
; /* align stack */
884 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
885 /* to same privilege */
886 if (env
->eflags
& VM_MASK
) {
887 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
891 esp
= get_rsp_from_tss(env
, ist
+ 3);
893 esp
= env
->regs
[R_ESP
];
895 esp
&= ~0xfLL
; /* align stack */
898 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
899 new_stack
= 0; /* avoid warning */
900 esp
= 0; /* avoid warning */
903 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
904 PUSHQ(esp
, env
->regs
[R_ESP
]);
905 PUSHQ(esp
, cpu_compute_eflags(env
));
906 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
908 if (has_error_code
) {
909 PUSHQ(esp
, error_code
);
914 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
916 env
->regs
[R_ESP
] = esp
;
918 selector
= (selector
& ~3) | dpl
;
919 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
920 get_seg_base(e1
, e2
),
921 get_seg_limit(e1
, e2
),
923 cpu_x86_set_cpl(env
, dpl
);
926 /* interrupt gate clear IF mask */
927 if ((type
& 1) == 0) {
928 env
->eflags
&= ~IF_MASK
;
930 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
935 #if defined(CONFIG_USER_ONLY)
936 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
938 env
->exception_index
= EXCP_SYSCALL
;
939 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
943 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
947 if (!(env
->efer
& MSR_EFER_SCE
)) {
948 raise_exception_err(env
, EXCP06_ILLOP
, 0);
950 selector
= (env
->star
>> 32) & 0xffff;
951 if (env
->hflags
& HF_LMA_MASK
) {
954 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
955 env
->regs
[11] = cpu_compute_eflags(env
);
957 code64
= env
->hflags
& HF_CS64_MASK
;
959 cpu_x86_set_cpl(env
, 0);
960 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
962 DESC_G_MASK
| DESC_P_MASK
|
964 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
966 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
968 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
970 DESC_W_MASK
| DESC_A_MASK
);
971 env
->eflags
&= ~env
->fmask
;
972 cpu_load_eflags(env
, env
->eflags
, 0);
974 env
->eip
= env
->lstar
;
976 env
->eip
= env
->cstar
;
979 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
981 cpu_x86_set_cpl(env
, 0);
982 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
984 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
986 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
987 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
989 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
991 DESC_W_MASK
| DESC_A_MASK
);
992 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
993 env
->eip
= (uint32_t)env
->star
;
1000 void helper_sysret(CPUX86State
*env
, int dflag
)
1004 if (!(env
->efer
& MSR_EFER_SCE
)) {
1005 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1007 cpl
= env
->hflags
& HF_CPL_MASK
;
1008 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1009 raise_exception_err(env
, EXCP0D_GPF
, 0);
1011 selector
= (env
->star
>> 48) & 0xffff;
1012 if (env
->hflags
& HF_LMA_MASK
) {
1014 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1016 DESC_G_MASK
| DESC_P_MASK
|
1017 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1018 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1020 env
->eip
= env
->regs
[R_ECX
];
1022 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1024 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1025 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1026 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1027 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1029 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1031 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1032 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1033 DESC_W_MASK
| DESC_A_MASK
);
1034 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1035 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1037 cpu_x86_set_cpl(env
, 3);
1039 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1041 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1042 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1043 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1044 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1045 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1047 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1048 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1049 DESC_W_MASK
| DESC_A_MASK
);
1050 env
->eflags
|= IF_MASK
;
1051 cpu_x86_set_cpl(env
, 3);
1056 /* real mode interrupt */
1057 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1058 int error_code
, unsigned int next_eip
)
1061 target_ulong ptr
, ssp
;
1063 uint32_t offset
, esp
;
1064 uint32_t old_cs
, old_eip
;
1066 /* real mode (simpler!) */
1068 if (intno
* 4 + 3 > dt
->limit
) {
1069 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1071 ptr
= dt
->base
+ intno
* 4;
1072 offset
= cpu_lduw_kernel(env
, ptr
);
1073 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1074 esp
= env
->regs
[R_ESP
];
1075 ssp
= env
->segs
[R_SS
].base
;
1081 old_cs
= env
->segs
[R_CS
].selector
;
1082 /* XXX: use SS segment size? */
1083 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1084 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1085 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1087 /* update processor state */
1088 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1090 env
->segs
[R_CS
].selector
= selector
;
1091 env
->segs
[R_CS
].base
= (selector
<< 4);
1092 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1095 #if defined(CONFIG_USER_ONLY)
1096 /* fake user mode interrupt */
1097 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1098 int error_code
, target_ulong next_eip
)
1102 int dpl
, cpl
, shift
;
1106 if (env
->hflags
& HF_LMA_MASK
) {
1111 ptr
= dt
->base
+ (intno
<< shift
);
1112 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1114 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1115 cpl
= env
->hflags
& HF_CPL_MASK
;
1116 /* check privilege if software int */
1117 if (is_int
&& dpl
< cpl
) {
1118 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1121 /* Since we emulate only user space, we cannot do more than
1122 exiting the emulation with the suitable exception and error
1125 env
->eip
= next_eip
;
1131 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1132 int error_code
, int is_hw
, int rm
)
1134 CPUState
*cs
= ENV_GET_CPU(env
);
1135 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1136 control
.event_inj
));
1138 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1142 type
= SVM_EVTINJ_TYPE_SOFT
;
1144 type
= SVM_EVTINJ_TYPE_EXEPT
;
1146 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1147 if (!rm
&& exception_has_error_code(intno
)) {
1148 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1149 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
1150 control
.event_inj_err
),
1153 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1160 * Begin execution of an interruption. is_int is TRUE if coming from
1161 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1162 * instruction. It is only relevant if is_int is TRUE.
1164 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1165 int error_code
, target_ulong next_eip
, int is_hw
)
1167 CPUX86State
*env
= &cpu
->env
;
1169 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1170 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1173 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1174 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1175 count
, intno
, error_code
, is_int
,
1176 env
->hflags
& HF_CPL_MASK
,
1177 env
->segs
[R_CS
].selector
, env
->eip
,
1178 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1179 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1180 if (intno
== 0x0e) {
1181 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1183 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1186 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1193 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1194 for (i
= 0; i
< 16; i
++) {
1195 qemu_log(" %02x", ldub(ptr
+ i
));
1203 if (env
->cr
[0] & CR0_PE_MASK
) {
1204 #if !defined(CONFIG_USER_ONLY)
1205 if (env
->hflags
& HF_SVMI_MASK
) {
1206 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1209 #ifdef TARGET_X86_64
1210 if (env
->hflags
& HF_LMA_MASK
) {
1211 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1215 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1219 #if !defined(CONFIG_USER_ONLY)
1220 if (env
->hflags
& HF_SVMI_MASK
) {
1221 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1224 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1227 #if !defined(CONFIG_USER_ONLY)
1228 if (env
->hflags
& HF_SVMI_MASK
) {
1229 CPUState
*cs
= CPU(cpu
);
1230 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+
1231 offsetof(struct vmcb
,
1232 control
.event_inj
));
1234 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1235 event_inj
& ~SVM_EVTINJ_VALID
);
1240 void x86_cpu_do_interrupt(CPUState
*cs
)
1242 X86CPU
*cpu
= X86_CPU(cs
);
1243 CPUX86State
*env
= &cpu
->env
;
1245 #if defined(CONFIG_USER_ONLY)
1246 /* if user mode only, we simulate a fake exception
1247 which will be handled outside the cpu execution
1249 do_interrupt_user(env
, env
->exception_index
,
1250 env
->exception_is_int
,
1252 env
->exception_next_eip
);
1253 /* successfully delivered */
1254 env
->old_exception
= -1;
1256 /* simulate a real cpu exception. On i386, it can
1257 trigger new exceptions, but we do not handle
1258 double or triple faults yet. */
1259 do_interrupt_all(cpu
, env
->exception_index
,
1260 env
->exception_is_int
,
1262 env
->exception_next_eip
, 0);
1263 /* successfully delivered */
1264 env
->old_exception
= -1;
1268 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1270 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1273 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1277 uint32_t esp_mask
, esp
, ebp
;
1279 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1280 ssp
= env
->segs
[R_SS
].base
;
1281 ebp
= env
->regs
[R_EBP
];
1282 esp
= env
->regs
[R_ESP
];
1289 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
),
1290 cpu_ldl_data(env
, ssp
+ (ebp
& esp_mask
)));
1293 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1300 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
),
1301 cpu_lduw_data(env
, ssp
+ (ebp
& esp_mask
)));
1304 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1308 #ifdef TARGET_X86_64
1309 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1312 target_ulong esp
, ebp
;
1314 ebp
= env
->regs
[R_EBP
];
1315 esp
= env
->regs
[R_ESP
];
1323 cpu_stq_data(env
, esp
, cpu_ldq_data(env
, ebp
));
1326 cpu_stq_data(env
, esp
, t1
);
1333 cpu_stw_data(env
, esp
, cpu_lduw_data(env
, ebp
));
1336 cpu_stw_data(env
, esp
, t1
);
1341 void helper_lldt(CPUX86State
*env
, int selector
)
1345 int index
, entry_limit
;
1349 if ((selector
& 0xfffc) == 0) {
1350 /* XXX: NULL selector case: invalid LDT */
1354 if (selector
& 0x4) {
1355 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1358 index
= selector
& ~7;
1359 #ifdef TARGET_X86_64
1360 if (env
->hflags
& HF_LMA_MASK
) {
1367 if ((index
+ entry_limit
) > dt
->limit
) {
1368 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1370 ptr
= dt
->base
+ index
;
1371 e1
= cpu_ldl_kernel(env
, ptr
);
1372 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1373 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1374 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1376 if (!(e2
& DESC_P_MASK
)) {
1377 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1379 #ifdef TARGET_X86_64
1380 if (env
->hflags
& HF_LMA_MASK
) {
1383 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1384 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1385 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1389 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1392 env
->ldt
.selector
= selector
;
1395 void helper_ltr(CPUX86State
*env
, int selector
)
1399 int index
, type
, entry_limit
;
1403 if ((selector
& 0xfffc) == 0) {
1404 /* NULL selector case: invalid TR */
1409 if (selector
& 0x4) {
1410 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1413 index
= selector
& ~7;
1414 #ifdef TARGET_X86_64
1415 if (env
->hflags
& HF_LMA_MASK
) {
1422 if ((index
+ entry_limit
) > dt
->limit
) {
1423 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1425 ptr
= dt
->base
+ index
;
1426 e1
= cpu_ldl_kernel(env
, ptr
);
1427 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1428 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1429 if ((e2
& DESC_S_MASK
) ||
1430 (type
!= 1 && type
!= 9)) {
1431 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1433 if (!(e2
& DESC_P_MASK
)) {
1434 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1436 #ifdef TARGET_X86_64
1437 if (env
->hflags
& HF_LMA_MASK
) {
1440 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1441 e4
= cpu_ldl_kernel(env
, ptr
+ 12);
1442 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1443 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1445 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1446 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1450 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1452 e2
|= DESC_TSS_BUSY_MASK
;
1453 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1455 env
->tr
.selector
= selector
;
1458 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1459 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1468 cpl
= env
->hflags
& HF_CPL_MASK
;
1469 if ((selector
& 0xfffc) == 0) {
1470 /* null selector case */
1472 #ifdef TARGET_X86_64
1473 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1476 raise_exception_err(env
, EXCP0D_GPF
, 0);
1478 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1481 if (selector
& 0x4) {
1486 index
= selector
& ~7;
1487 if ((index
+ 7) > dt
->limit
) {
1488 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1490 ptr
= dt
->base
+ index
;
1491 e1
= cpu_ldl_kernel(env
, ptr
);
1492 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1494 if (!(e2
& DESC_S_MASK
)) {
1495 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1498 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1499 if (seg_reg
== R_SS
) {
1500 /* must be writable segment */
1501 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1502 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1504 if (rpl
!= cpl
|| dpl
!= cpl
) {
1505 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1508 /* must be readable segment */
1509 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1510 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1513 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1514 /* if not conforming code, test rights */
1515 if (dpl
< cpl
|| dpl
< rpl
) {
1516 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1521 if (!(e2
& DESC_P_MASK
)) {
1522 if (seg_reg
== R_SS
) {
1523 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1525 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1529 /* set the access bit if not already set */
1530 if (!(e2
& DESC_A_MASK
)) {
1532 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1535 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1536 get_seg_base(e1
, e2
),
1537 get_seg_limit(e1
, e2
),
1540 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1541 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1546 /* protected mode jump */
1547 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1548 int next_eip_addend
)
1551 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1552 target_ulong next_eip
;
1554 if ((new_cs
& 0xfffc) == 0) {
1555 raise_exception_err(env
, EXCP0D_GPF
, 0);
1557 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1558 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1560 cpl
= env
->hflags
& HF_CPL_MASK
;
1561 if (e2
& DESC_S_MASK
) {
1562 if (!(e2
& DESC_CS_MASK
)) {
1563 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1565 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1566 if (e2
& DESC_C_MASK
) {
1567 /* conforming code segment */
1569 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1572 /* non conforming code segment */
1575 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1578 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1581 if (!(e2
& DESC_P_MASK
)) {
1582 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1584 limit
= get_seg_limit(e1
, e2
);
1585 if (new_eip
> limit
&&
1586 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1587 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1589 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1590 get_seg_base(e1
, e2
), limit
, e2
);
1593 /* jump to call or task gate */
1594 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1596 cpl
= env
->hflags
& HF_CPL_MASK
;
1597 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1599 case 1: /* 286 TSS */
1600 case 9: /* 386 TSS */
1601 case 5: /* task gate */
1602 if (dpl
< cpl
|| dpl
< rpl
) {
1603 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1605 next_eip
= env
->eip
+ next_eip_addend
;
1606 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1607 CC_OP
= CC_OP_EFLAGS
;
1609 case 4: /* 286 call gate */
1610 case 12: /* 386 call gate */
1611 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1612 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1614 if (!(e2
& DESC_P_MASK
)) {
1615 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1618 new_eip
= (e1
& 0xffff);
1620 new_eip
|= (e2
& 0xffff0000);
1622 if (load_segment(env
, &e1
, &e2
, gate_cs
) != 0) {
1623 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1625 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1626 /* must be code segment */
1627 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1628 (DESC_S_MASK
| DESC_CS_MASK
))) {
1629 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1631 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1632 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1633 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1635 if (!(e2
& DESC_P_MASK
)) {
1636 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1638 limit
= get_seg_limit(e1
, e2
);
1639 if (new_eip
> limit
) {
1640 raise_exception_err(env
, EXCP0D_GPF
, 0);
1642 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1643 get_seg_base(e1
, e2
), limit
, e2
);
1647 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1653 /* real mode call */
1654 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1655 int shift
, int next_eip
)
1658 uint32_t esp
, esp_mask
;
1662 esp
= env
->regs
[R_ESP
];
1663 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1664 ssp
= env
->segs
[R_SS
].base
;
1666 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1667 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1669 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1670 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1673 SET_ESP(esp
, esp_mask
);
1675 env
->segs
[R_CS
].selector
= new_cs
;
1676 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1679 /* protected mode call */
1680 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1681 int shift
, int next_eip_addend
)
1684 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1685 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1686 uint32_t val
, limit
, old_sp_mask
;
1687 target_ulong ssp
, old_ssp
, next_eip
;
1689 next_eip
= env
->eip
+ next_eip_addend
;
1690 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1691 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1692 if ((new_cs
& 0xfffc) == 0) {
1693 raise_exception_err(env
, EXCP0D_GPF
, 0);
1695 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1696 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1698 cpl
= env
->hflags
& HF_CPL_MASK
;
1699 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1700 if (e2
& DESC_S_MASK
) {
1701 if (!(e2
& DESC_CS_MASK
)) {
1702 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1704 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1705 if (e2
& DESC_C_MASK
) {
1706 /* conforming code segment */
1708 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1711 /* non conforming code segment */
1714 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1717 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1720 if (!(e2
& DESC_P_MASK
)) {
1721 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1724 #ifdef TARGET_X86_64
1725 /* XXX: check 16/32 bit cases in long mode */
1730 rsp
= env
->regs
[R_ESP
];
1731 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1732 PUSHQ(rsp
, next_eip
);
1733 /* from this point, not restartable */
1734 env
->regs
[R_ESP
] = rsp
;
1735 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1736 get_seg_base(e1
, e2
),
1737 get_seg_limit(e1
, e2
), e2
);
1742 sp
= env
->regs
[R_ESP
];
1743 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1744 ssp
= env
->segs
[R_SS
].base
;
1746 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1747 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1749 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1750 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1753 limit
= get_seg_limit(e1
, e2
);
1754 if (new_eip
> limit
) {
1755 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1757 /* from this point, not restartable */
1758 SET_ESP(sp
, sp_mask
);
1759 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1760 get_seg_base(e1
, e2
), limit
, e2
);
1764 /* check gate type */
1765 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1766 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1769 case 1: /* available 286 TSS */
1770 case 9: /* available 386 TSS */
1771 case 5: /* task gate */
1772 if (dpl
< cpl
|| dpl
< rpl
) {
1773 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1775 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1776 CC_OP
= CC_OP_EFLAGS
;
1778 case 4: /* 286 call gate */
1779 case 12: /* 386 call gate */
1782 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1787 if (dpl
< cpl
|| dpl
< rpl
) {
1788 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1790 /* check valid bit */
1791 if (!(e2
& DESC_P_MASK
)) {
1792 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1794 selector
= e1
>> 16;
1795 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1796 param_count
= e2
& 0x1f;
1797 if ((selector
& 0xfffc) == 0) {
1798 raise_exception_err(env
, EXCP0D_GPF
, 0);
1801 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
1802 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1804 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1805 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1807 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1809 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1811 if (!(e2
& DESC_P_MASK
)) {
1812 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1815 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1816 /* to inner privilege */
1817 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
);
1818 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1819 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1821 if ((ss
& 0xfffc) == 0) {
1822 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1824 if ((ss
& 3) != dpl
) {
1825 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1827 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
1828 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1830 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1831 if (ss_dpl
!= dpl
) {
1832 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1834 if (!(ss_e2
& DESC_S_MASK
) ||
1835 (ss_e2
& DESC_CS_MASK
) ||
1836 !(ss_e2
& DESC_W_MASK
)) {
1837 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1839 if (!(ss_e2
& DESC_P_MASK
)) {
1840 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1843 /* push_size = ((param_count * 2) + 8) << shift; */
1845 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1846 old_ssp
= env
->segs
[R_SS
].base
;
1848 sp_mask
= get_sp_mask(ss_e2
);
1849 ssp
= get_seg_base(ss_e1
, ss_e2
);
1851 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1852 PUSHL(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1853 for (i
= param_count
- 1; i
>= 0; i
--) {
1854 val
= cpu_ldl_kernel(env
, old_ssp
+
1855 ((env
->regs
[R_ESP
] + i
* 4) &
1857 PUSHL(ssp
, sp
, sp_mask
, val
);
1860 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1861 PUSHW(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1862 for (i
= param_count
- 1; i
>= 0; i
--) {
1863 val
= cpu_lduw_kernel(env
, old_ssp
+
1864 ((env
->regs
[R_ESP
] + i
* 2) &
1866 PUSHW(ssp
, sp
, sp_mask
, val
);
1871 /* to same privilege */
1872 sp
= env
->regs
[R_ESP
];
1873 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1874 ssp
= env
->segs
[R_SS
].base
;
1875 /* push_size = (4 << shift); */
1880 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1881 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1883 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1884 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1887 /* from this point, not restartable */
1890 ss
= (ss
& ~3) | dpl
;
1891 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1893 get_seg_limit(ss_e1
, ss_e2
),
1897 selector
= (selector
& ~3) | dpl
;
1898 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1899 get_seg_base(e1
, e2
),
1900 get_seg_limit(e1
, e2
),
1902 cpu_x86_set_cpl(env
, dpl
);
1903 SET_ESP(sp
, sp_mask
);
1908 /* real and vm86 mode iret */
1909 void helper_iret_real(CPUX86State
*env
, int shift
)
1911 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1915 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1916 sp
= env
->regs
[R_ESP
];
1917 ssp
= env
->segs
[R_SS
].base
;
1920 POPL(ssp
, sp
, sp_mask
, new_eip
);
1921 POPL(ssp
, sp
, sp_mask
, new_cs
);
1923 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1926 POPW(ssp
, sp
, sp_mask
, new_eip
);
1927 POPW(ssp
, sp
, sp_mask
, new_cs
);
1928 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1930 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1931 env
->segs
[R_CS
].selector
= new_cs
;
1932 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1934 if (env
->eflags
& VM_MASK
) {
1935 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1938 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1942 eflags_mask
&= 0xffff;
1944 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1945 env
->hflags2
&= ~HF2_NMI_MASK
;
1948 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
1953 /* XXX: on x86_64, we do not want to nullify FS and GS because
1954 they may still contain a valid base. I would be interested to
1955 know how a real x86_64 CPU behaves */
1956 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1957 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1961 e2
= env
->segs
[seg_reg
].flags
;
1962 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1963 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1964 /* data or non conforming code segment */
1966 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
1971 /* protected mode iret */
1972 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
1973 int is_iret
, int addend
)
1975 uint32_t new_cs
, new_eflags
, new_ss
;
1976 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1977 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1978 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1979 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1981 #ifdef TARGET_X86_64
1987 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1989 sp
= env
->regs
[R_ESP
];
1990 ssp
= env
->segs
[R_SS
].base
;
1991 new_eflags
= 0; /* avoid warning */
1992 #ifdef TARGET_X86_64
1998 POPQ(sp
, new_eflags
);
2005 POPL(ssp
, sp
, sp_mask
, new_eip
);
2006 POPL(ssp
, sp
, sp_mask
, new_cs
);
2009 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2010 if (new_eflags
& VM_MASK
) {
2011 goto return_to_vm86
;
2016 POPW(ssp
, sp
, sp_mask
, new_eip
);
2017 POPW(ssp
, sp
, sp_mask
, new_cs
);
2019 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2023 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2024 new_cs
, new_eip
, shift
, addend
);
2025 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2026 if ((new_cs
& 0xfffc) == 0) {
2027 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2029 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
2030 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2032 if (!(e2
& DESC_S_MASK
) ||
2033 !(e2
& DESC_CS_MASK
)) {
2034 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2036 cpl
= env
->hflags
& HF_CPL_MASK
;
2039 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2041 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2042 if (e2
& DESC_C_MASK
) {
2044 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2048 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2051 if (!(e2
& DESC_P_MASK
)) {
2052 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2056 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2057 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2058 /* return to same privilege level */
2059 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2060 get_seg_base(e1
, e2
),
2061 get_seg_limit(e1
, e2
),
2064 /* return to different privilege level */
2065 #ifdef TARGET_X86_64
2075 POPL(ssp
, sp
, sp_mask
, new_esp
);
2076 POPL(ssp
, sp
, sp_mask
, new_ss
);
2080 POPW(ssp
, sp
, sp_mask
, new_esp
);
2081 POPW(ssp
, sp
, sp_mask
, new_ss
);
2084 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2086 if ((new_ss
& 0xfffc) == 0) {
2087 #ifdef TARGET_X86_64
2088 /* NULL ss is allowed in long mode if cpl != 3 */
2089 /* XXX: test CS64? */
2090 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2091 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2093 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2094 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2095 DESC_W_MASK
| DESC_A_MASK
);
2096 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2100 raise_exception_err(env
, EXCP0D_GPF
, 0);
2103 if ((new_ss
& 3) != rpl
) {
2104 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2106 if (load_segment(env
, &ss_e1
, &ss_e2
, new_ss
) != 0) {
2107 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2109 if (!(ss_e2
& DESC_S_MASK
) ||
2110 (ss_e2
& DESC_CS_MASK
) ||
2111 !(ss_e2
& DESC_W_MASK
)) {
2112 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2114 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2116 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2118 if (!(ss_e2
& DESC_P_MASK
)) {
2119 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2121 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2122 get_seg_base(ss_e1
, ss_e2
),
2123 get_seg_limit(ss_e1
, ss_e2
),
2127 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2128 get_seg_base(e1
, e2
),
2129 get_seg_limit(e1
, e2
),
2131 cpu_x86_set_cpl(env
, rpl
);
2133 #ifdef TARGET_X86_64
2134 if (env
->hflags
& HF_CS64_MASK
) {
2139 sp_mask
= get_sp_mask(ss_e2
);
2142 /* validate data segments */
2143 validate_seg(env
, R_ES
, rpl
);
2144 validate_seg(env
, R_DS
, rpl
);
2145 validate_seg(env
, R_FS
, rpl
);
2146 validate_seg(env
, R_GS
, rpl
);
2150 SET_ESP(sp
, sp_mask
);
2153 /* NOTE: 'cpl' is the _old_ CPL */
2154 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2156 eflags_mask
|= IOPL_MASK
;
2158 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2160 eflags_mask
|= IF_MASK
;
2163 eflags_mask
&= 0xffff;
2165 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2170 POPL(ssp
, sp
, sp_mask
, new_esp
);
2171 POPL(ssp
, sp
, sp_mask
, new_ss
);
2172 POPL(ssp
, sp
, sp_mask
, new_es
);
2173 POPL(ssp
, sp
, sp_mask
, new_ds
);
2174 POPL(ssp
, sp
, sp_mask
, new_fs
);
2175 POPL(ssp
, sp
, sp_mask
, new_gs
);
2177 /* modify processor state */
2178 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2179 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2181 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2182 cpu_x86_set_cpl(env
, 3);
2183 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2184 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2185 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2186 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2187 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2189 env
->eip
= new_eip
& 0xffff;
2190 env
->regs
[R_ESP
] = new_esp
;
2193 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2195 int tss_selector
, type
;
2198 /* specific case for TSS */
2199 if (env
->eflags
& NT_MASK
) {
2200 #ifdef TARGET_X86_64
2201 if (env
->hflags
& HF_LMA_MASK
) {
2202 raise_exception_err(env
, EXCP0D_GPF
, 0);
2205 tss_selector
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0);
2206 if (tss_selector
& 4) {
2207 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2209 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
2210 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2212 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2213 /* NOTE: we check both segment and busy TSS */
2215 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2217 switch_tss(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2219 helper_ret_protected(env
, shift
, 1, 0);
2221 env
->hflags2
&= ~HF2_NMI_MASK
;
2224 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2226 helper_ret_protected(env
, shift
, 0, addend
);
2229 void helper_sysenter(CPUX86State
*env
)
2231 if (env
->sysenter_cs
== 0) {
2232 raise_exception_err(env
, EXCP0D_GPF
, 0);
2234 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2235 cpu_x86_set_cpl(env
, 0);
2237 #ifdef TARGET_X86_64
2238 if (env
->hflags
& HF_LMA_MASK
) {
2239 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2241 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2243 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2248 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2250 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2252 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2254 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2256 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2258 DESC_W_MASK
| DESC_A_MASK
);
2259 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2260 env
->eip
= env
->sysenter_eip
;
2263 void helper_sysexit(CPUX86State
*env
, int dflag
)
2267 cpl
= env
->hflags
& HF_CPL_MASK
;
2268 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2269 raise_exception_err(env
, EXCP0D_GPF
, 0);
2271 cpu_x86_set_cpl(env
, 3);
2272 #ifdef TARGET_X86_64
2274 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2276 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2277 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2278 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2280 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2282 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2283 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2284 DESC_W_MASK
| DESC_A_MASK
);
2288 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2290 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2291 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2292 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2293 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2295 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2296 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2297 DESC_W_MASK
| DESC_A_MASK
);
2299 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2300 env
->eip
= env
->regs
[R_EDX
];
2303 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2306 uint32_t e1
, e2
, eflags
, selector
;
2307 int rpl
, dpl
, cpl
, type
;
2309 selector
= selector1
& 0xffff;
2310 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2311 if ((selector
& 0xfffc) == 0) {
2314 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2318 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2319 cpl
= env
->hflags
& HF_CPL_MASK
;
2320 if (e2
& DESC_S_MASK
) {
2321 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2324 if (dpl
< cpl
|| dpl
< rpl
) {
2329 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2340 if (dpl
< cpl
|| dpl
< rpl
) {
2342 CC_SRC
= eflags
& ~CC_Z
;
2346 limit
= get_seg_limit(e1
, e2
);
2347 CC_SRC
= eflags
| CC_Z
;
2351 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2353 uint32_t e1
, e2
, eflags
, selector
;
2354 int rpl
, dpl
, cpl
, type
;
2356 selector
= selector1
& 0xffff;
2357 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2358 if ((selector
& 0xfffc) == 0) {
2361 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2365 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2366 cpl
= env
->hflags
& HF_CPL_MASK
;
2367 if (e2
& DESC_S_MASK
) {
2368 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2371 if (dpl
< cpl
|| dpl
< rpl
) {
2376 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2390 if (dpl
< cpl
|| dpl
< rpl
) {
2392 CC_SRC
= eflags
& ~CC_Z
;
2396 CC_SRC
= eflags
| CC_Z
;
2397 return e2
& 0x00f0ff00;
2400 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2402 uint32_t e1
, e2
, eflags
, selector
;
2405 selector
= selector1
& 0xffff;
2406 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2407 if ((selector
& 0xfffc) == 0) {
2410 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2413 if (!(e2
& DESC_S_MASK
)) {
2417 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2418 cpl
= env
->hflags
& HF_CPL_MASK
;
2419 if (e2
& DESC_CS_MASK
) {
2420 if (!(e2
& DESC_R_MASK
)) {
2423 if (!(e2
& DESC_C_MASK
)) {
2424 if (dpl
< cpl
|| dpl
< rpl
) {
2429 if (dpl
< cpl
|| dpl
< rpl
) {
2431 CC_SRC
= eflags
& ~CC_Z
;
2435 CC_SRC
= eflags
| CC_Z
;
2438 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2440 uint32_t e1
, e2
, eflags
, selector
;
2443 selector
= selector1
& 0xffff;
2444 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2445 if ((selector
& 0xfffc) == 0) {
2448 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2451 if (!(e2
& DESC_S_MASK
)) {
2455 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2456 cpl
= env
->hflags
& HF_CPL_MASK
;
2457 if (e2
& DESC_CS_MASK
) {
2460 if (dpl
< cpl
|| dpl
< rpl
) {
2463 if (!(e2
& DESC_W_MASK
)) {
2465 CC_SRC
= eflags
& ~CC_Z
;
2469 CC_SRC
= eflags
| CC_Z
;
2472 #if defined(CONFIG_USER_ONLY)
2473 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2475 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2477 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2478 (selector
<< 4), 0xffff, 0);
2480 helper_load_seg(env
, seg_reg
, selector
);