2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
37 #ifndef CONFIG_USER_ONLY
38 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
39 #define MEMSUFFIX _kernel
41 #include "exec/cpu_ldst_template.h"
44 #include "exec/cpu_ldst_template.h"
47 #include "exec/cpu_ldst_template.h"
50 #include "exec/cpu_ldst_template.h"
55 /* return non zero if error */
56 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
57 uint32_t *e2_ptr
, int selector
)
68 index
= selector
& ~7;
69 if ((index
+ 7) > dt
->limit
) {
72 ptr
= dt
->base
+ index
;
73 *e1_ptr
= cpu_ldl_kernel(env
, ptr
);
74 *e2_ptr
= cpu_ldl_kernel(env
, ptr
+ 4);
78 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
82 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
83 if (e2
& DESC_G_MASK
) {
84 limit
= (limit
<< 12) | 0xfff;
89 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
91 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
94 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
97 sc
->base
= get_seg_base(e1
, e2
);
98 sc
->limit
= get_seg_limit(e1
, e2
);
102 /* init the segment cache in vm86 mode. */
103 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
107 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
108 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
109 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
112 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
113 uint32_t *esp_ptr
, int dpl
)
115 X86CPU
*cpu
= x86_env_get_cpu(env
);
116 int type
, index
, shift
;
121 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
122 for (i
= 0; i
< env
->tr
.limit
; i
++) {
123 printf("%02x ", env
->tr
.base
[i
]);
132 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
133 cpu_abort(CPU(cpu
), "invalid tss");
135 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
136 if ((type
& 7) != 1) {
137 cpu_abort(CPU(cpu
), "invalid tss type");
140 index
= (dpl
* 4 + 2) << shift
;
141 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
142 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
145 *esp_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
);
146 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 2);
148 *esp_ptr
= cpu_ldl_kernel(env
, env
->tr
.base
+ index
);
149 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 4);
153 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
)
158 if ((selector
& 0xfffc) != 0) {
159 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
160 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
162 if (!(e2
& DESC_S_MASK
)) {
163 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
166 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
167 if (seg_reg
== R_CS
) {
168 if (!(e2
& DESC_CS_MASK
)) {
169 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
172 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
174 } else if (seg_reg
== R_SS
) {
175 /* SS must be writable data */
176 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
177 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
179 if (dpl
!= cpl
|| dpl
!= rpl
) {
180 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
183 /* not readable code */
184 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
185 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
187 /* if data or non conforming code, checks the rights */
188 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
189 if (dpl
< cpl
|| dpl
< rpl
) {
190 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
194 if (!(e2
& DESC_P_MASK
)) {
195 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
197 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
198 get_seg_base(e1
, e2
),
199 get_seg_limit(e1
, e2
),
202 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
203 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
208 #define SWITCH_TSS_JMP 0
209 #define SWITCH_TSS_IRET 1
210 #define SWITCH_TSS_CALL 2
212 /* XXX: restore CPU state in registers (PowerPC case) */
213 static void switch_tss(CPUX86State
*env
, int tss_selector
,
214 uint32_t e1
, uint32_t e2
, int source
,
217 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
218 target_ulong tss_base
;
219 uint32_t new_regs
[8], new_segs
[6];
220 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
221 uint32_t old_eflags
, eflags_mask
;
226 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
227 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
230 /* if task gate, we read the TSS segment and we load it */
232 if (!(e2
& DESC_P_MASK
)) {
233 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
235 tss_selector
= e1
>> 16;
236 if (tss_selector
& 4) {
237 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
239 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
240 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
242 if (e2
& DESC_S_MASK
) {
243 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
245 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
246 if ((type
& 7) != 1) {
247 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
251 if (!(e2
& DESC_P_MASK
)) {
252 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
260 tss_limit
= get_seg_limit(e1
, e2
);
261 tss_base
= get_seg_base(e1
, e2
);
262 if ((tss_selector
& 4) != 0 ||
263 tss_limit
< tss_limit_max
) {
264 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
266 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
268 old_tss_limit_max
= 103;
270 old_tss_limit_max
= 43;
273 /* read all the registers from the new TSS */
276 new_cr3
= cpu_ldl_kernel(env
, tss_base
+ 0x1c);
277 new_eip
= cpu_ldl_kernel(env
, tss_base
+ 0x20);
278 new_eflags
= cpu_ldl_kernel(env
, tss_base
+ 0x24);
279 for (i
= 0; i
< 8; i
++) {
280 new_regs
[i
] = cpu_ldl_kernel(env
, tss_base
+ (0x28 + i
* 4));
282 for (i
= 0; i
< 6; i
++) {
283 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x48 + i
* 4));
285 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x60);
286 new_trap
= cpu_ldl_kernel(env
, tss_base
+ 0x64);
290 new_eip
= cpu_lduw_kernel(env
, tss_base
+ 0x0e);
291 new_eflags
= cpu_lduw_kernel(env
, tss_base
+ 0x10);
292 for (i
= 0; i
< 8; i
++) {
293 new_regs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x12 + i
* 2)) |
296 for (i
= 0; i
< 4; i
++) {
297 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x22 + i
* 4));
299 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x2a);
304 /* XXX: avoid a compiler warning, see
305 http://support.amd.com/us/Processor_TechDocs/24593.pdf
306 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
309 /* NOTE: we must avoid memory exceptions during the task switch,
310 so we make dummy accesses before */
311 /* XXX: it can still fail in some cases, so a bigger hack is
312 necessary to valid the TLB after having done the accesses */
314 v1
= cpu_ldub_kernel(env
, env
->tr
.base
);
315 v2
= cpu_ldub_kernel(env
, env
->tr
.base
+ old_tss_limit_max
);
316 cpu_stb_kernel(env
, env
->tr
.base
, v1
);
317 cpu_stb_kernel(env
, env
->tr
.base
+ old_tss_limit_max
, v2
);
319 /* clear busy bit (it is restartable) */
320 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
324 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
325 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
326 e2
&= ~DESC_TSS_BUSY_MASK
;
327 cpu_stl_kernel(env
, ptr
+ 4, e2
);
329 old_eflags
= cpu_compute_eflags(env
);
330 if (source
== SWITCH_TSS_IRET
) {
331 old_eflags
&= ~NT_MASK
;
334 /* save the current state in the old TSS */
337 cpu_stl_kernel(env
, env
->tr
.base
+ 0x20, next_eip
);
338 cpu_stl_kernel(env
, env
->tr
.base
+ 0x24, old_eflags
);
339 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
]);
340 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
]);
341 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
]);
342 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
]);
343 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
]);
344 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
]);
345 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
]);
346 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
]);
347 for (i
= 0; i
< 6; i
++) {
348 cpu_stw_kernel(env
, env
->tr
.base
+ (0x48 + i
* 4),
349 env
->segs
[i
].selector
);
353 cpu_stw_kernel(env
, env
->tr
.base
+ 0x0e, next_eip
);
354 cpu_stw_kernel(env
, env
->tr
.base
+ 0x10, old_eflags
);
355 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
]);
356 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
]);
357 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
]);
358 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
]);
359 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
]);
360 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
]);
361 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
]);
362 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
]);
363 for (i
= 0; i
< 4; i
++) {
364 cpu_stw_kernel(env
, env
->tr
.base
+ (0x22 + i
* 4),
365 env
->segs
[i
].selector
);
369 /* now if an exception occurs, it will occurs in the next task
372 if (source
== SWITCH_TSS_CALL
) {
373 cpu_stw_kernel(env
, tss_base
, env
->tr
.selector
);
374 new_eflags
|= NT_MASK
;
378 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
382 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
383 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
384 e2
|= DESC_TSS_BUSY_MASK
;
385 cpu_stl_kernel(env
, ptr
+ 4, e2
);
388 /* set the new CPU state */
389 /* from this point, any exception which occurs can give problems */
390 env
->cr
[0] |= CR0_TS_MASK
;
391 env
->hflags
|= HF_TS_MASK
;
392 env
->tr
.selector
= tss_selector
;
393 env
->tr
.base
= tss_base
;
394 env
->tr
.limit
= tss_limit
;
395 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
397 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
398 cpu_x86_update_cr3(env
, new_cr3
);
401 /* load all registers without an exception, then reload them with
402 possible exception */
404 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
405 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
407 eflags_mask
&= 0xffff;
409 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
410 /* XXX: what to do in 16 bit case? */
411 env
->regs
[R_EAX
] = new_regs
[0];
412 env
->regs
[R_ECX
] = new_regs
[1];
413 env
->regs
[R_EDX
] = new_regs
[2];
414 env
->regs
[R_EBX
] = new_regs
[3];
415 env
->regs
[R_ESP
] = new_regs
[4];
416 env
->regs
[R_EBP
] = new_regs
[5];
417 env
->regs
[R_ESI
] = new_regs
[6];
418 env
->regs
[R_EDI
] = new_regs
[7];
419 if (new_eflags
& VM_MASK
) {
420 for (i
= 0; i
< 6; i
++) {
421 load_seg_vm(env
, i
, new_segs
[i
]);
424 /* first just selectors as the rest may trigger exceptions */
425 for (i
= 0; i
< 6; i
++) {
426 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
430 env
->ldt
.selector
= new_ldt
& ~4;
437 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
440 if ((new_ldt
& 0xfffc) != 0) {
442 index
= new_ldt
& ~7;
443 if ((index
+ 7) > dt
->limit
) {
444 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
446 ptr
= dt
->base
+ index
;
447 e1
= cpu_ldl_kernel(env
, ptr
);
448 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
449 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
450 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
452 if (!(e2
& DESC_P_MASK
)) {
453 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
455 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
458 /* load the segments */
459 if (!(new_eflags
& VM_MASK
)) {
460 int cpl
= new_segs
[R_CS
] & 3;
461 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
);
462 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
);
463 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
);
464 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
);
465 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
);
466 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
);
469 /* check that env->eip is in the CS segment limits */
470 if (new_eip
> env
->segs
[R_CS
].limit
) {
471 /* XXX: different exception if CALL? */
472 raise_exception_err(env
, EXCP0D_GPF
, 0);
475 #ifndef CONFIG_USER_ONLY
476 /* reset local breakpoints */
477 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
478 for (i
= 0; i
< DR7_MAX_BP
; i
++) {
479 if (hw_local_breakpoint_enabled(env
->dr
[7], i
) &&
480 !hw_global_breakpoint_enabled(env
->dr
[7], i
)) {
481 hw_breakpoint_remove(env
, i
);
484 env
->dr
[7] &= ~DR7_LOCAL_BP_MASK
;
489 static inline unsigned int get_sp_mask(unsigned int e2
)
491 if (e2
& DESC_B_MASK
) {
498 static int exception_has_error_code(int intno
)
514 #define SET_ESP(val, sp_mask) \
516 if ((sp_mask) == 0xffff) { \
517 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
519 } else if ((sp_mask) == 0xffffffffLL) { \
520 env->regs[R_ESP] = (uint32_t)(val); \
522 env->regs[R_ESP] = (val); \
526 #define SET_ESP(val, sp_mask) \
528 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
529 ((val) & (sp_mask)); \
533 /* in 64-bit machines, this can overflow. So this segment addition macro
534 * can be used to trim the value to 32-bit whenever needed */
535 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
537 /* XXX: add a is_user flag to have proper security support */
538 #define PUSHW(ssp, sp, sp_mask, val) \
541 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
544 #define PUSHL(ssp, sp, sp_mask, val) \
547 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
550 #define POPW(ssp, sp, sp_mask, val) \
552 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
556 #define POPL(ssp, sp, sp_mask, val) \
558 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
562 /* protected mode interrupt */
563 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
564 int error_code
, unsigned int next_eip
,
568 target_ulong ptr
, ssp
;
569 int type
, dpl
, selector
, ss_dpl
, cpl
;
570 int has_error_code
, new_stack
, shift
;
571 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
572 uint32_t old_eip
, sp_mask
;
573 int vm86
= env
->eflags
& VM_MASK
;
576 if (!is_int
&& !is_hw
) {
577 has_error_code
= exception_has_error_code(intno
);
586 if (intno
* 8 + 7 > dt
->limit
) {
587 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
589 ptr
= dt
->base
+ intno
* 8;
590 e1
= cpu_ldl_kernel(env
, ptr
);
591 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
592 /* check gate type */
593 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
595 case 5: /* task gate */
596 /* must do that check here to return the correct error code */
597 if (!(e2
& DESC_P_MASK
)) {
598 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
600 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
601 if (has_error_code
) {
605 /* push the error code */
606 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
608 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
613 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
614 ssp
= env
->segs
[R_SS
].base
+ esp
;
616 cpu_stl_kernel(env
, ssp
, error_code
);
618 cpu_stw_kernel(env
, ssp
, error_code
);
623 case 6: /* 286 interrupt gate */
624 case 7: /* 286 trap gate */
625 case 14: /* 386 interrupt gate */
626 case 15: /* 386 trap gate */
629 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
632 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
633 cpl
= env
->hflags
& HF_CPL_MASK
;
634 /* check privilege if software int */
635 if (is_int
&& dpl
< cpl
) {
636 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
638 /* check valid bit */
639 if (!(e2
& DESC_P_MASK
)) {
640 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
643 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
644 if ((selector
& 0xfffc) == 0) {
645 raise_exception_err(env
, EXCP0D_GPF
, 0);
647 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
648 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
650 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
651 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
653 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
655 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
657 if (!(e2
& DESC_P_MASK
)) {
658 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
660 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
661 /* to inner privilege */
662 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
);
663 if ((ss
& 0xfffc) == 0) {
664 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
666 if ((ss
& 3) != dpl
) {
667 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
669 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
670 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
672 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
674 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
676 if (!(ss_e2
& DESC_S_MASK
) ||
677 (ss_e2
& DESC_CS_MASK
) ||
678 !(ss_e2
& DESC_W_MASK
)) {
679 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
681 if (!(ss_e2
& DESC_P_MASK
)) {
682 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
685 sp_mask
= get_sp_mask(ss_e2
);
686 ssp
= get_seg_base(ss_e1
, ss_e2
);
687 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
688 /* to same privilege */
690 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
693 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
694 ssp
= env
->segs
[R_SS
].base
;
695 esp
= env
->regs
[R_ESP
];
698 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
699 new_stack
= 0; /* avoid warning */
700 sp_mask
= 0; /* avoid warning */
701 ssp
= 0; /* avoid warning */
702 esp
= 0; /* avoid warning */
708 /* XXX: check that enough room is available */
709 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
718 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
719 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
720 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
721 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
723 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
724 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
726 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
727 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
728 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
729 if (has_error_code
) {
730 PUSHL(ssp
, esp
, sp_mask
, error_code
);
735 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
736 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
737 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
738 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
740 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
741 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
743 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
744 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
745 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
746 if (has_error_code
) {
747 PUSHW(ssp
, esp
, sp_mask
, error_code
);
751 /* interrupt gate clear IF mask */
752 if ((type
& 1) == 0) {
753 env
->eflags
&= ~IF_MASK
;
755 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
759 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
761 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
762 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
764 ss
= (ss
& ~3) | dpl
;
765 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
766 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
768 SET_ESP(esp
, sp_mask
);
770 selector
= (selector
& ~3) | dpl
;
771 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
772 get_seg_base(e1
, e2
),
773 get_seg_limit(e1
, e2
),
780 #define PUSHQ(sp, val) \
783 cpu_stq_kernel(env, sp, (val)); \
786 #define POPQ(sp, val) \
788 val = cpu_ldq_kernel(env, sp); \
792 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
794 X86CPU
*cpu
= x86_env_get_cpu(env
);
798 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
799 env
->tr
.base
, env
->tr
.limit
);
802 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
803 cpu_abort(CPU(cpu
), "invalid tss");
805 index
= 8 * level
+ 4;
806 if ((index
+ 7) > env
->tr
.limit
) {
807 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
809 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
812 /* 64 bit interrupt */
813 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
814 int error_code
, target_ulong next_eip
, int is_hw
)
818 int type
, dpl
, selector
, cpl
, ist
;
819 int has_error_code
, new_stack
;
820 uint32_t e1
, e2
, e3
, ss
;
821 target_ulong old_eip
, esp
, offset
;
824 if (!is_int
&& !is_hw
) {
825 has_error_code
= exception_has_error_code(intno
);
834 if (intno
* 16 + 15 > dt
->limit
) {
835 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
837 ptr
= dt
->base
+ intno
* 16;
838 e1
= cpu_ldl_kernel(env
, ptr
);
839 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
840 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
841 /* check gate type */
842 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
844 case 14: /* 386 interrupt gate */
845 case 15: /* 386 trap gate */
848 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
851 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
852 cpl
= env
->hflags
& HF_CPL_MASK
;
853 /* check privilege if software int */
854 if (is_int
&& dpl
< cpl
) {
855 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
857 /* check valid bit */
858 if (!(e2
& DESC_P_MASK
)) {
859 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
862 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
864 if ((selector
& 0xfffc) == 0) {
865 raise_exception_err(env
, EXCP0D_GPF
, 0);
868 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
869 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
871 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
872 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
874 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
876 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
878 if (!(e2
& DESC_P_MASK
)) {
879 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
881 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
882 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
884 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
885 /* to inner privilege */
887 esp
= get_rsp_from_tss(env
, ist
+ 3);
889 esp
= get_rsp_from_tss(env
, dpl
);
891 esp
&= ~0xfLL
; /* align stack */
894 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
895 /* to same privilege */
896 if (env
->eflags
& VM_MASK
) {
897 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
901 esp
= get_rsp_from_tss(env
, ist
+ 3);
903 esp
= env
->regs
[R_ESP
];
905 esp
&= ~0xfLL
; /* align stack */
908 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
909 new_stack
= 0; /* avoid warning */
910 esp
= 0; /* avoid warning */
913 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
914 PUSHQ(esp
, env
->regs
[R_ESP
]);
915 PUSHQ(esp
, cpu_compute_eflags(env
));
916 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
918 if (has_error_code
) {
919 PUSHQ(esp
, error_code
);
922 /* interrupt gate clear IF mask */
923 if ((type
& 1) == 0) {
924 env
->eflags
&= ~IF_MASK
;
926 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
930 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
932 env
->regs
[R_ESP
] = esp
;
934 selector
= (selector
& ~3) | dpl
;
935 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
936 get_seg_base(e1
, e2
),
937 get_seg_limit(e1
, e2
),
944 #if defined(CONFIG_USER_ONLY)
945 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
947 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
949 cs
->exception_index
= EXCP_SYSCALL
;
950 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
954 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
958 if (!(env
->efer
& MSR_EFER_SCE
)) {
959 raise_exception_err(env
, EXCP06_ILLOP
, 0);
961 selector
= (env
->star
>> 32) & 0xffff;
962 if (env
->hflags
& HF_LMA_MASK
) {
965 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
966 env
->regs
[11] = cpu_compute_eflags(env
);
968 code64
= env
->hflags
& HF_CS64_MASK
;
970 env
->eflags
&= ~env
->fmask
;
971 cpu_load_eflags(env
, env
->eflags
, 0);
972 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
974 DESC_G_MASK
| DESC_P_MASK
|
976 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
978 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
980 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
982 DESC_W_MASK
| DESC_A_MASK
);
984 env
->eip
= env
->lstar
;
986 env
->eip
= env
->cstar
;
989 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
991 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
992 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
994 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
996 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
997 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
999 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1001 DESC_W_MASK
| DESC_A_MASK
);
1002 env
->eip
= (uint32_t)env
->star
;
1008 #ifdef TARGET_X86_64
1009 void helper_sysret(CPUX86State
*env
, int dflag
)
1013 if (!(env
->efer
& MSR_EFER_SCE
)) {
1014 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1016 cpl
= env
->hflags
& HF_CPL_MASK
;
1017 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1018 raise_exception_err(env
, EXCP0D_GPF
, 0);
1020 selector
= (env
->star
>> 48) & 0xffff;
1021 if (env
->hflags
& HF_LMA_MASK
) {
1022 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1023 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1026 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1028 DESC_G_MASK
| DESC_P_MASK
|
1029 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1030 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1032 env
->eip
= env
->regs
[R_ECX
];
1034 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1036 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1037 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1038 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1039 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1041 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1043 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1044 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1045 DESC_W_MASK
| DESC_A_MASK
);
1047 env
->eflags
|= IF_MASK
;
1048 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1050 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1051 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1052 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1053 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1054 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1056 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1057 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1058 DESC_W_MASK
| DESC_A_MASK
);
1063 /* real mode interrupt */
1064 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1065 int error_code
, unsigned int next_eip
)
1068 target_ulong ptr
, ssp
;
1070 uint32_t offset
, esp
;
1071 uint32_t old_cs
, old_eip
;
1073 /* real mode (simpler!) */
1075 if (intno
* 4 + 3 > dt
->limit
) {
1076 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1078 ptr
= dt
->base
+ intno
* 4;
1079 offset
= cpu_lduw_kernel(env
, ptr
);
1080 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1081 esp
= env
->regs
[R_ESP
];
1082 ssp
= env
->segs
[R_SS
].base
;
1088 old_cs
= env
->segs
[R_CS
].selector
;
1089 /* XXX: use SS segment size? */
1090 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1091 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1092 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1094 /* update processor state */
1095 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1097 env
->segs
[R_CS
].selector
= selector
;
1098 env
->segs
[R_CS
].base
= (selector
<< 4);
1099 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1102 #if defined(CONFIG_USER_ONLY)
1103 /* fake user mode interrupt */
1104 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1105 int error_code
, target_ulong next_eip
)
1109 int dpl
, cpl
, shift
;
1113 if (env
->hflags
& HF_LMA_MASK
) {
1118 ptr
= dt
->base
+ (intno
<< shift
);
1119 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1121 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1122 cpl
= env
->hflags
& HF_CPL_MASK
;
1123 /* check privilege if software int */
1124 if (is_int
&& dpl
< cpl
) {
1125 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1128 /* Since we emulate only user space, we cannot do more than
1129 exiting the emulation with the suitable exception and error
1132 env
->eip
= next_eip
;
1138 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1139 int error_code
, int is_hw
, int rm
)
1141 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1142 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1143 control
.event_inj
));
1145 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1149 type
= SVM_EVTINJ_TYPE_SOFT
;
1151 type
= SVM_EVTINJ_TYPE_EXEPT
;
1153 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1154 if (!rm
&& exception_has_error_code(intno
)) {
1155 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1156 stl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1157 control
.event_inj_err
),
1161 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1168 * Begin execution of an interruption. is_int is TRUE if coming from
1169 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1170 * instruction. It is only relevant if is_int is TRUE.
1172 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1173 int error_code
, target_ulong next_eip
, int is_hw
)
1175 CPUX86State
*env
= &cpu
->env
;
1177 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1178 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1181 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1182 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1183 count
, intno
, error_code
, is_int
,
1184 env
->hflags
& HF_CPL_MASK
,
1185 env
->segs
[R_CS
].selector
, env
->eip
,
1186 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1187 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1188 if (intno
== 0x0e) {
1189 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1191 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1194 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1201 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1202 for (i
= 0; i
< 16; i
++) {
1203 qemu_log(" %02x", ldub(ptr
+ i
));
1211 if (env
->cr
[0] & CR0_PE_MASK
) {
1212 #if !defined(CONFIG_USER_ONLY)
1213 if (env
->hflags
& HF_SVMI_MASK
) {
1214 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1217 #ifdef TARGET_X86_64
1218 if (env
->hflags
& HF_LMA_MASK
) {
1219 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1223 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1227 #if !defined(CONFIG_USER_ONLY)
1228 if (env
->hflags
& HF_SVMI_MASK
) {
1229 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1232 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1235 #if !defined(CONFIG_USER_ONLY)
1236 if (env
->hflags
& HF_SVMI_MASK
) {
1237 CPUState
*cs
= CPU(cpu
);
1238 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+
1239 offsetof(struct vmcb
,
1240 control
.event_inj
));
1243 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1244 event_inj
& ~SVM_EVTINJ_VALID
);
1249 void x86_cpu_do_interrupt(CPUState
*cs
)
1251 X86CPU
*cpu
= X86_CPU(cs
);
1252 CPUX86State
*env
= &cpu
->env
;
1254 #if defined(CONFIG_USER_ONLY)
1255 /* if user mode only, we simulate a fake exception
1256 which will be handled outside the cpu execution
1258 do_interrupt_user(env
, cs
->exception_index
,
1259 env
->exception_is_int
,
1261 env
->exception_next_eip
);
1262 /* successfully delivered */
1263 env
->old_exception
= -1;
1265 /* simulate a real cpu exception. On i386, it can
1266 trigger new exceptions, but we do not handle
1267 double or triple faults yet. */
1268 do_interrupt_all(cpu
, cs
->exception_index
,
1269 env
->exception_is_int
,
1271 env
->exception_next_eip
, 0);
1272 /* successfully delivered */
1273 env
->old_exception
= -1;
1277 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1279 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1282 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1286 uint32_t esp_mask
, esp
, ebp
;
1288 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1289 ssp
= env
->segs
[R_SS
].base
;
1290 ebp
= env
->regs
[R_EBP
];
1291 esp
= env
->regs
[R_ESP
];
1298 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
),
1299 cpu_ldl_data(env
, ssp
+ (ebp
& esp_mask
)));
1302 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1309 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
),
1310 cpu_lduw_data(env
, ssp
+ (ebp
& esp_mask
)));
1313 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1317 #ifdef TARGET_X86_64
1318 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1321 target_ulong esp
, ebp
;
1323 ebp
= env
->regs
[R_EBP
];
1324 esp
= env
->regs
[R_ESP
];
1332 cpu_stq_data(env
, esp
, cpu_ldq_data(env
, ebp
));
1335 cpu_stq_data(env
, esp
, t1
);
1342 cpu_stw_data(env
, esp
, cpu_lduw_data(env
, ebp
));
1345 cpu_stw_data(env
, esp
, t1
);
1350 void helper_lldt(CPUX86State
*env
, int selector
)
1354 int index
, entry_limit
;
1358 if ((selector
& 0xfffc) == 0) {
1359 /* XXX: NULL selector case: invalid LDT */
1363 if (selector
& 0x4) {
1364 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1367 index
= selector
& ~7;
1368 #ifdef TARGET_X86_64
1369 if (env
->hflags
& HF_LMA_MASK
) {
1376 if ((index
+ entry_limit
) > dt
->limit
) {
1377 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1379 ptr
= dt
->base
+ index
;
1380 e1
= cpu_ldl_kernel(env
, ptr
);
1381 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1382 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1383 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1385 if (!(e2
& DESC_P_MASK
)) {
1386 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1388 #ifdef TARGET_X86_64
1389 if (env
->hflags
& HF_LMA_MASK
) {
1392 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1393 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1394 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1398 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1401 env
->ldt
.selector
= selector
;
1404 void helper_ltr(CPUX86State
*env
, int selector
)
1408 int index
, type
, entry_limit
;
1412 if ((selector
& 0xfffc) == 0) {
1413 /* NULL selector case: invalid TR */
1418 if (selector
& 0x4) {
1419 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1422 index
= selector
& ~7;
1423 #ifdef TARGET_X86_64
1424 if (env
->hflags
& HF_LMA_MASK
) {
1431 if ((index
+ entry_limit
) > dt
->limit
) {
1432 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1434 ptr
= dt
->base
+ index
;
1435 e1
= cpu_ldl_kernel(env
, ptr
);
1436 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1437 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1438 if ((e2
& DESC_S_MASK
) ||
1439 (type
!= 1 && type
!= 9)) {
1440 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1442 if (!(e2
& DESC_P_MASK
)) {
1443 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1445 #ifdef TARGET_X86_64
1446 if (env
->hflags
& HF_LMA_MASK
) {
1449 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1450 e4
= cpu_ldl_kernel(env
, ptr
+ 12);
1451 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1452 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1454 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1455 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1459 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1461 e2
|= DESC_TSS_BUSY_MASK
;
1462 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1464 env
->tr
.selector
= selector
;
1467 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1468 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1477 cpl
= env
->hflags
& HF_CPL_MASK
;
1478 if ((selector
& 0xfffc) == 0) {
1479 /* null selector case */
1481 #ifdef TARGET_X86_64
1482 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1485 raise_exception_err(env
, EXCP0D_GPF
, 0);
1487 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1490 if (selector
& 0x4) {
1495 index
= selector
& ~7;
1496 if ((index
+ 7) > dt
->limit
) {
1497 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1499 ptr
= dt
->base
+ index
;
1500 e1
= cpu_ldl_kernel(env
, ptr
);
1501 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1503 if (!(e2
& DESC_S_MASK
)) {
1504 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1507 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1508 if (seg_reg
== R_SS
) {
1509 /* must be writable segment */
1510 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1511 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1513 if (rpl
!= cpl
|| dpl
!= cpl
) {
1514 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1517 /* must be readable segment */
1518 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1519 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1522 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1523 /* if not conforming code, test rights */
1524 if (dpl
< cpl
|| dpl
< rpl
) {
1525 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1530 if (!(e2
& DESC_P_MASK
)) {
1531 if (seg_reg
== R_SS
) {
1532 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1534 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1538 /* set the access bit if not already set */
1539 if (!(e2
& DESC_A_MASK
)) {
1541 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1544 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1545 get_seg_base(e1
, e2
),
1546 get_seg_limit(e1
, e2
),
1549 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1550 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1555 /* protected mode jump */
1556 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1557 int next_eip_addend
)
1560 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1561 target_ulong next_eip
;
1563 if ((new_cs
& 0xfffc) == 0) {
1564 raise_exception_err(env
, EXCP0D_GPF
, 0);
1566 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1567 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1569 cpl
= env
->hflags
& HF_CPL_MASK
;
1570 if (e2
& DESC_S_MASK
) {
1571 if (!(e2
& DESC_CS_MASK
)) {
1572 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1574 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1575 if (e2
& DESC_C_MASK
) {
1576 /* conforming code segment */
1578 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1581 /* non conforming code segment */
1584 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1587 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1590 if (!(e2
& DESC_P_MASK
)) {
1591 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1593 limit
= get_seg_limit(e1
, e2
);
1594 if (new_eip
> limit
&&
1595 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1596 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1598 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1599 get_seg_base(e1
, e2
), limit
, e2
);
1602 /* jump to call or task gate */
1603 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1605 cpl
= env
->hflags
& HF_CPL_MASK
;
1606 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1608 case 1: /* 286 TSS */
1609 case 9: /* 386 TSS */
1610 case 5: /* task gate */
1611 if (dpl
< cpl
|| dpl
< rpl
) {
1612 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1614 next_eip
= env
->eip
+ next_eip_addend
;
1615 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1617 case 4: /* 286 call gate */
1618 case 12: /* 386 call gate */
1619 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1620 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1622 if (!(e2
& DESC_P_MASK
)) {
1623 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1626 new_eip
= (e1
& 0xffff);
1628 new_eip
|= (e2
& 0xffff0000);
1630 if (load_segment(env
, &e1
, &e2
, gate_cs
) != 0) {
1631 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1633 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1634 /* must be code segment */
1635 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1636 (DESC_S_MASK
| DESC_CS_MASK
))) {
1637 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1639 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1640 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1641 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1643 if (!(e2
& DESC_P_MASK
)) {
1644 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1646 limit
= get_seg_limit(e1
, e2
);
1647 if (new_eip
> limit
) {
1648 raise_exception_err(env
, EXCP0D_GPF
, 0);
1650 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1651 get_seg_base(e1
, e2
), limit
, e2
);
1655 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1661 /* real mode call */
1662 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1663 int shift
, int next_eip
)
1666 uint32_t esp
, esp_mask
;
1670 esp
= env
->regs
[R_ESP
];
1671 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1672 ssp
= env
->segs
[R_SS
].base
;
1674 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1675 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1677 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1678 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1681 SET_ESP(esp
, esp_mask
);
1683 env
->segs
[R_CS
].selector
= new_cs
;
1684 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1687 /* protected mode call */
1688 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1689 int shift
, int next_eip_addend
)
1692 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1693 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1694 uint32_t val
, limit
, old_sp_mask
;
1695 target_ulong ssp
, old_ssp
, next_eip
;
1697 next_eip
= env
->eip
+ next_eip_addend
;
1698 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1699 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1700 if ((new_cs
& 0xfffc) == 0) {
1701 raise_exception_err(env
, EXCP0D_GPF
, 0);
1703 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1704 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1706 cpl
= env
->hflags
& HF_CPL_MASK
;
1707 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1708 if (e2
& DESC_S_MASK
) {
1709 if (!(e2
& DESC_CS_MASK
)) {
1710 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1712 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1713 if (e2
& DESC_C_MASK
) {
1714 /* conforming code segment */
1716 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1719 /* non conforming code segment */
1722 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1725 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1728 if (!(e2
& DESC_P_MASK
)) {
1729 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1732 #ifdef TARGET_X86_64
1733 /* XXX: check 16/32 bit cases in long mode */
1738 rsp
= env
->regs
[R_ESP
];
1739 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1740 PUSHQ(rsp
, next_eip
);
1741 /* from this point, not restartable */
1742 env
->regs
[R_ESP
] = rsp
;
1743 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1744 get_seg_base(e1
, e2
),
1745 get_seg_limit(e1
, e2
), e2
);
1750 sp
= env
->regs
[R_ESP
];
1751 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1752 ssp
= env
->segs
[R_SS
].base
;
1754 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1755 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1757 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1758 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1761 limit
= get_seg_limit(e1
, e2
);
1762 if (new_eip
> limit
) {
1763 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1765 /* from this point, not restartable */
1766 SET_ESP(sp
, sp_mask
);
1767 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1768 get_seg_base(e1
, e2
), limit
, e2
);
1772 /* check gate type */
1773 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1774 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1777 case 1: /* available 286 TSS */
1778 case 9: /* available 386 TSS */
1779 case 5: /* task gate */
1780 if (dpl
< cpl
|| dpl
< rpl
) {
1781 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1783 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1785 case 4: /* 286 call gate */
1786 case 12: /* 386 call gate */
1789 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1794 if (dpl
< cpl
|| dpl
< rpl
) {
1795 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1797 /* check valid bit */
1798 if (!(e2
& DESC_P_MASK
)) {
1799 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1801 selector
= e1
>> 16;
1802 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1803 param_count
= e2
& 0x1f;
1804 if ((selector
& 0xfffc) == 0) {
1805 raise_exception_err(env
, EXCP0D_GPF
, 0);
1808 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
1809 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1811 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1812 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1814 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1816 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1818 if (!(e2
& DESC_P_MASK
)) {
1819 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1822 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1823 /* to inner privilege */
1824 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
);
1825 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1826 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1828 if ((ss
& 0xfffc) == 0) {
1829 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1831 if ((ss
& 3) != dpl
) {
1832 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1834 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
1835 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1837 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1838 if (ss_dpl
!= dpl
) {
1839 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1841 if (!(ss_e2
& DESC_S_MASK
) ||
1842 (ss_e2
& DESC_CS_MASK
) ||
1843 !(ss_e2
& DESC_W_MASK
)) {
1844 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1846 if (!(ss_e2
& DESC_P_MASK
)) {
1847 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1850 /* push_size = ((param_count * 2) + 8) << shift; */
1852 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1853 old_ssp
= env
->segs
[R_SS
].base
;
1855 sp_mask
= get_sp_mask(ss_e2
);
1856 ssp
= get_seg_base(ss_e1
, ss_e2
);
1858 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1859 PUSHL(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1860 for (i
= param_count
- 1; i
>= 0; i
--) {
1861 val
= cpu_ldl_kernel(env
, old_ssp
+
1862 ((env
->regs
[R_ESP
] + i
* 4) &
1864 PUSHL(ssp
, sp
, sp_mask
, val
);
1867 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1868 PUSHW(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1869 for (i
= param_count
- 1; i
>= 0; i
--) {
1870 val
= cpu_lduw_kernel(env
, old_ssp
+
1871 ((env
->regs
[R_ESP
] + i
* 2) &
1873 PUSHW(ssp
, sp
, sp_mask
, val
);
1878 /* to same privilege */
1879 sp
= env
->regs
[R_ESP
];
1880 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1881 ssp
= env
->segs
[R_SS
].base
;
1882 /* push_size = (4 << shift); */
1887 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1888 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1890 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1891 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1894 /* from this point, not restartable */
1897 ss
= (ss
& ~3) | dpl
;
1898 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1900 get_seg_limit(ss_e1
, ss_e2
),
1904 selector
= (selector
& ~3) | dpl
;
1905 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1906 get_seg_base(e1
, e2
),
1907 get_seg_limit(e1
, e2
),
1909 SET_ESP(sp
, sp_mask
);
1914 /* real and vm86 mode iret */
1915 void helper_iret_real(CPUX86State
*env
, int shift
)
1917 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1921 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1922 sp
= env
->regs
[R_ESP
];
1923 ssp
= env
->segs
[R_SS
].base
;
1926 POPL(ssp
, sp
, sp_mask
, new_eip
);
1927 POPL(ssp
, sp
, sp_mask
, new_cs
);
1929 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1932 POPW(ssp
, sp
, sp_mask
, new_eip
);
1933 POPW(ssp
, sp
, sp_mask
, new_cs
);
1934 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1936 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1937 env
->segs
[R_CS
].selector
= new_cs
;
1938 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1940 if (env
->eflags
& VM_MASK
) {
1941 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1944 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1948 eflags_mask
&= 0xffff;
1950 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1951 env
->hflags2
&= ~HF2_NMI_MASK
;
1954 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
1959 /* XXX: on x86_64, we do not want to nullify FS and GS because
1960 they may still contain a valid base. I would be interested to
1961 know how a real x86_64 CPU behaves */
1962 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1963 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1967 e2
= env
->segs
[seg_reg
].flags
;
1968 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1969 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1970 /* data or non conforming code segment */
1972 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
1977 /* protected mode iret */
1978 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
1979 int is_iret
, int addend
)
1981 uint32_t new_cs
, new_eflags
, new_ss
;
1982 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1983 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1984 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1985 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1987 #ifdef TARGET_X86_64
1993 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1995 sp
= env
->regs
[R_ESP
];
1996 ssp
= env
->segs
[R_SS
].base
;
1997 new_eflags
= 0; /* avoid warning */
1998 #ifdef TARGET_X86_64
2004 POPQ(sp
, new_eflags
);
2011 POPL(ssp
, sp
, sp_mask
, new_eip
);
2012 POPL(ssp
, sp
, sp_mask
, new_cs
);
2015 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2016 if (new_eflags
& VM_MASK
) {
2017 goto return_to_vm86
;
2022 POPW(ssp
, sp
, sp_mask
, new_eip
);
2023 POPW(ssp
, sp
, sp_mask
, new_cs
);
2025 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2029 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2030 new_cs
, new_eip
, shift
, addend
);
2031 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2032 if ((new_cs
& 0xfffc) == 0) {
2033 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2035 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
2036 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2038 if (!(e2
& DESC_S_MASK
) ||
2039 !(e2
& DESC_CS_MASK
)) {
2040 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2042 cpl
= env
->hflags
& HF_CPL_MASK
;
2045 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2047 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2048 if (e2
& DESC_C_MASK
) {
2050 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2054 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2057 if (!(e2
& DESC_P_MASK
)) {
2058 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2062 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2063 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2064 /* return to same privilege level */
2065 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2066 get_seg_base(e1
, e2
),
2067 get_seg_limit(e1
, e2
),
2070 /* return to different privilege level */
2071 #ifdef TARGET_X86_64
2081 POPL(ssp
, sp
, sp_mask
, new_esp
);
2082 POPL(ssp
, sp
, sp_mask
, new_ss
);
2086 POPW(ssp
, sp
, sp_mask
, new_esp
);
2087 POPW(ssp
, sp
, sp_mask
, new_ss
);
2090 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2092 if ((new_ss
& 0xfffc) == 0) {
2093 #ifdef TARGET_X86_64
2094 /* NULL ss is allowed in long mode if cpl != 3 */
2095 /* XXX: test CS64? */
2096 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2097 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2099 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2100 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2101 DESC_W_MASK
| DESC_A_MASK
);
2102 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2106 raise_exception_err(env
, EXCP0D_GPF
, 0);
2109 if ((new_ss
& 3) != rpl
) {
2110 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2112 if (load_segment(env
, &ss_e1
, &ss_e2
, new_ss
) != 0) {
2113 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2115 if (!(ss_e2
& DESC_S_MASK
) ||
2116 (ss_e2
& DESC_CS_MASK
) ||
2117 !(ss_e2
& DESC_W_MASK
)) {
2118 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2120 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2122 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2124 if (!(ss_e2
& DESC_P_MASK
)) {
2125 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2127 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2128 get_seg_base(ss_e1
, ss_e2
),
2129 get_seg_limit(ss_e1
, ss_e2
),
2133 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2134 get_seg_base(e1
, e2
),
2135 get_seg_limit(e1
, e2
),
2138 #ifdef TARGET_X86_64
2139 if (env
->hflags
& HF_CS64_MASK
) {
2144 sp_mask
= get_sp_mask(ss_e2
);
2147 /* validate data segments */
2148 validate_seg(env
, R_ES
, rpl
);
2149 validate_seg(env
, R_DS
, rpl
);
2150 validate_seg(env
, R_FS
, rpl
);
2151 validate_seg(env
, R_GS
, rpl
);
2155 SET_ESP(sp
, sp_mask
);
2158 /* NOTE: 'cpl' is the _old_ CPL */
2159 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2161 eflags_mask
|= IOPL_MASK
;
2163 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2165 eflags_mask
|= IF_MASK
;
2168 eflags_mask
&= 0xffff;
2170 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2175 POPL(ssp
, sp
, sp_mask
, new_esp
);
2176 POPL(ssp
, sp
, sp_mask
, new_ss
);
2177 POPL(ssp
, sp
, sp_mask
, new_es
);
2178 POPL(ssp
, sp
, sp_mask
, new_ds
);
2179 POPL(ssp
, sp
, sp_mask
, new_fs
);
2180 POPL(ssp
, sp
, sp_mask
, new_gs
);
2182 /* modify processor state */
2183 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2184 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2186 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2187 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2188 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2189 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2190 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2191 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2193 env
->eip
= new_eip
& 0xffff;
2194 env
->regs
[R_ESP
] = new_esp
;
2197 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2199 int tss_selector
, type
;
2202 /* specific case for TSS */
2203 if (env
->eflags
& NT_MASK
) {
2204 #ifdef TARGET_X86_64
2205 if (env
->hflags
& HF_LMA_MASK
) {
2206 raise_exception_err(env
, EXCP0D_GPF
, 0);
2209 tss_selector
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0);
2210 if (tss_selector
& 4) {
2211 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2213 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
2214 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2216 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2217 /* NOTE: we check both segment and busy TSS */
2219 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2221 switch_tss(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2223 helper_ret_protected(env
, shift
, 1, 0);
2225 env
->hflags2
&= ~HF2_NMI_MASK
;
2228 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2230 helper_ret_protected(env
, shift
, 0, addend
);
2233 void helper_sysenter(CPUX86State
*env
)
2235 if (env
->sysenter_cs
== 0) {
2236 raise_exception_err(env
, EXCP0D_GPF
, 0);
2238 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2240 #ifdef TARGET_X86_64
2241 if (env
->hflags
& HF_LMA_MASK
) {
2242 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2244 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2246 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2251 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2253 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2255 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2257 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2259 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2261 DESC_W_MASK
| DESC_A_MASK
);
2262 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2263 env
->eip
= env
->sysenter_eip
;
2266 void helper_sysexit(CPUX86State
*env
, int dflag
)
2270 cpl
= env
->hflags
& HF_CPL_MASK
;
2271 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2272 raise_exception_err(env
, EXCP0D_GPF
, 0);
2274 #ifdef TARGET_X86_64
2276 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2278 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2279 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2280 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2282 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2284 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2285 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2286 DESC_W_MASK
| DESC_A_MASK
);
2290 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2292 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2293 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2294 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2295 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2297 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2298 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2299 DESC_W_MASK
| DESC_A_MASK
);
2301 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2302 env
->eip
= env
->regs
[R_EDX
];
2305 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2308 uint32_t e1
, e2
, eflags
, selector
;
2309 int rpl
, dpl
, cpl
, type
;
2311 selector
= selector1
& 0xffff;
2312 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2313 if ((selector
& 0xfffc) == 0) {
2316 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2320 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2321 cpl
= env
->hflags
& HF_CPL_MASK
;
2322 if (e2
& DESC_S_MASK
) {
2323 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2326 if (dpl
< cpl
|| dpl
< rpl
) {
2331 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2342 if (dpl
< cpl
|| dpl
< rpl
) {
2344 CC_SRC
= eflags
& ~CC_Z
;
2348 limit
= get_seg_limit(e1
, e2
);
2349 CC_SRC
= eflags
| CC_Z
;
2353 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2355 uint32_t e1
, e2
, eflags
, selector
;
2356 int rpl
, dpl
, cpl
, type
;
2358 selector
= selector1
& 0xffff;
2359 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2360 if ((selector
& 0xfffc) == 0) {
2363 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2367 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2368 cpl
= env
->hflags
& HF_CPL_MASK
;
2369 if (e2
& DESC_S_MASK
) {
2370 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2373 if (dpl
< cpl
|| dpl
< rpl
) {
2378 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2392 if (dpl
< cpl
|| dpl
< rpl
) {
2394 CC_SRC
= eflags
& ~CC_Z
;
2398 CC_SRC
= eflags
| CC_Z
;
2399 return e2
& 0x00f0ff00;
2402 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2404 uint32_t e1
, e2
, eflags
, selector
;
2407 selector
= selector1
& 0xffff;
2408 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2409 if ((selector
& 0xfffc) == 0) {
2412 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2415 if (!(e2
& DESC_S_MASK
)) {
2419 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2420 cpl
= env
->hflags
& HF_CPL_MASK
;
2421 if (e2
& DESC_CS_MASK
) {
2422 if (!(e2
& DESC_R_MASK
)) {
2425 if (!(e2
& DESC_C_MASK
)) {
2426 if (dpl
< cpl
|| dpl
< rpl
) {
2431 if (dpl
< cpl
|| dpl
< rpl
) {
2433 CC_SRC
= eflags
& ~CC_Z
;
2437 CC_SRC
= eflags
| CC_Z
;
2440 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2442 uint32_t e1
, e2
, eflags
, selector
;
2445 selector
= selector1
& 0xffff;
2446 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2447 if ((selector
& 0xfffc) == 0) {
2450 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2453 if (!(e2
& DESC_S_MASK
)) {
2457 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2458 cpl
= env
->hflags
& HF_CPL_MASK
;
2459 if (e2
& DESC_CS_MASK
) {
2462 if (dpl
< cpl
|| dpl
< rpl
) {
2465 if (!(e2
& DESC_W_MASK
)) {
2467 CC_SRC
= eflags
& ~CC_Z
;
2471 CC_SRC
= eflags
| CC_Z
;
2474 #if defined(CONFIG_USER_ONLY)
2475 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2477 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2478 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2480 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2481 (selector
<< 4), 0xffff,
2482 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2483 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2485 helper_load_seg(env
, seg_reg
, selector
);
2490 /* check if Port I/O is allowed in TSS */
2491 static inline void check_io(CPUX86State
*env
, int addr
, int size
)
2493 int io_offset
, val
, mask
;
2495 /* TSS must be a valid 32 bit one */
2496 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2497 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2498 env
->tr
.limit
< 103) {
2501 io_offset
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0x66);
2502 io_offset
+= (addr
>> 3);
2503 /* Note: the check needs two bytes */
2504 if ((io_offset
+ 1) > env
->tr
.limit
) {
2507 val
= cpu_lduw_kernel(env
, env
->tr
.base
+ io_offset
);
2509 mask
= (1 << size
) - 1;
2510 /* all bits must be zero to allow the I/O */
2511 if ((val
& mask
) != 0) {
2513 raise_exception_err(env
, EXCP0D_GPF
, 0);
2517 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2519 check_io(env
, t0
, 1);
2522 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2524 check_io(env
, t0
, 2);
2527 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2529 check_io(env
, t0
, 4);