4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "dyngen-exec.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
34 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
35 # define LOG_PCALL_STATE(env) \
36 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
38 # define LOG_PCALL(...) do { } while (0)
39 # define LOG_PCALL_STATE(env) do { } while (0)
42 /* broken thread support */
44 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
46 void helper_lock(void)
48 spin_lock(&global_cpu_lock
);
51 void helper_unlock(void)
53 spin_unlock(&global_cpu_lock
);
56 /* return non zero if error */
57 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
69 index
= selector
& ~7;
70 if ((index
+ 7) > dt
->limit
) {
73 ptr
= dt
->base
+ index
;
74 *e1_ptr
= ldl_kernel(ptr
);
75 *e2_ptr
= ldl_kernel(ptr
+ 4);
79 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
83 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
84 if (e2
& DESC_G_MASK
) {
85 limit
= (limit
<< 12) | 0xfff;
90 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
92 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
95 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
98 sc
->base
= get_seg_base(e1
, e2
);
99 sc
->limit
= get_seg_limit(e1
, e2
);
103 /* init the segment cache in vm86 mode. */
104 static inline void load_seg_vm(int seg
, int selector
)
107 cpu_x86_load_seg_cache(env
, seg
, selector
,
108 (selector
<< 4), 0xffff, 0);
111 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
112 uint32_t *esp_ptr
, int dpl
)
114 int type
, index
, shift
;
119 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
120 for (i
= 0; i
< env
->tr
.limit
; i
++) {
121 printf("%02x ", env
->tr
.base
[i
]);
130 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
131 cpu_abort(env
, "invalid tss");
133 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
134 if ((type
& 7) != 1) {
135 cpu_abort(env
, "invalid tss type");
138 index
= (dpl
* 4 + 2) << shift
;
139 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
140 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
143 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
144 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
146 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
147 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
151 /* XXX: merge with load_seg() */
152 static void tss_load_seg(int seg_reg
, int selector
)
157 if ((selector
& 0xfffc) != 0) {
158 if (load_segment(&e1
, &e2
, selector
) != 0) {
159 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
161 if (!(e2
& DESC_S_MASK
)) {
162 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
165 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
166 cpl
= env
->hflags
& HF_CPL_MASK
;
167 if (seg_reg
== R_CS
) {
168 if (!(e2
& DESC_CS_MASK
)) {
169 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
171 /* XXX: is it correct? */
173 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
175 if ((e2
& DESC_C_MASK
) && dpl
> rpl
) {
176 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
178 } else if (seg_reg
== R_SS
) {
179 /* SS must be writable data */
180 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
181 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
183 if (dpl
!= cpl
|| dpl
!= rpl
) {
184 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
187 /* not readable code */
188 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
189 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
191 /* if data or non conforming code, checks the rights */
192 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
193 if (dpl
< cpl
|| dpl
< rpl
) {
194 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
198 if (!(e2
& DESC_P_MASK
)) {
199 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
201 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
202 get_seg_base(e1
, e2
),
203 get_seg_limit(e1
, e2
),
206 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
207 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
212 #define SWITCH_TSS_JMP 0
213 #define SWITCH_TSS_IRET 1
214 #define SWITCH_TSS_CALL 2
216 /* XXX: restore CPU state in registers (PowerPC case) */
217 static void switch_tss(int tss_selector
,
218 uint32_t e1
, uint32_t e2
, int source
,
221 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
222 target_ulong tss_base
;
223 uint32_t new_regs
[8], new_segs
[6];
224 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
225 uint32_t old_eflags
, eflags_mask
;
230 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
231 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
234 /* if task gate, we read the TSS segment and we load it */
236 if (!(e2
& DESC_P_MASK
)) {
237 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
239 tss_selector
= e1
>> 16;
240 if (tss_selector
& 4) {
241 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
243 if (load_segment(&e1
, &e2
, tss_selector
) != 0) {
244 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
246 if (e2
& DESC_S_MASK
) {
247 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
249 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
250 if ((type
& 7) != 1) {
251 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
255 if (!(e2
& DESC_P_MASK
)) {
256 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
264 tss_limit
= get_seg_limit(e1
, e2
);
265 tss_base
= get_seg_base(e1
, e2
);
266 if ((tss_selector
& 4) != 0 ||
267 tss_limit
< tss_limit_max
) {
268 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
270 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
272 old_tss_limit_max
= 103;
274 old_tss_limit_max
= 43;
277 /* read all the registers from the new TSS */
280 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
281 new_eip
= ldl_kernel(tss_base
+ 0x20);
282 new_eflags
= ldl_kernel(tss_base
+ 0x24);
283 for (i
= 0; i
< 8; i
++) {
284 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
286 for (i
= 0; i
< 6; i
++) {
287 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
289 new_ldt
= lduw_kernel(tss_base
+ 0x60);
290 new_trap
= ldl_kernel(tss_base
+ 0x64);
294 new_eip
= lduw_kernel(tss_base
+ 0x0e);
295 new_eflags
= lduw_kernel(tss_base
+ 0x10);
296 for (i
= 0; i
< 8; i
++) {
297 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
299 for (i
= 0; i
< 4; i
++) {
300 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
302 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
307 /* XXX: avoid a compiler warning, see
308 http://support.amd.com/us/Processor_TechDocs/24593.pdf
309 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
312 /* NOTE: we must avoid memory exceptions during the task switch,
313 so we make dummy accesses before */
314 /* XXX: it can still fail in some cases, so a bigger hack is
315 necessary to valid the TLB after having done the accesses */
317 v1
= ldub_kernel(env
->tr
.base
);
318 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
319 stb_kernel(env
->tr
.base
, v1
);
320 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
322 /* clear busy bit (it is restartable) */
323 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
327 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
328 e2
= ldl_kernel(ptr
+ 4);
329 e2
&= ~DESC_TSS_BUSY_MASK
;
330 stl_kernel(ptr
+ 4, e2
);
332 old_eflags
= cpu_compute_eflags(env
);
333 if (source
== SWITCH_TSS_IRET
) {
334 old_eflags
&= ~NT_MASK
;
337 /* save the current state in the old TSS */
340 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
341 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
342 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
343 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
344 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
345 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
346 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
347 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
348 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
349 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
350 for (i
= 0; i
< 6; i
++) {
351 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
355 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
356 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
357 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
358 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
359 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
360 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
361 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
362 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
363 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
364 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
365 for (i
= 0; i
< 4; i
++) {
366 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
370 /* now if an exception occurs, it will occurs in the next task
373 if (source
== SWITCH_TSS_CALL
) {
374 stw_kernel(tss_base
, env
->tr
.selector
);
375 new_eflags
|= NT_MASK
;
379 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
383 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
384 e2
= ldl_kernel(ptr
+ 4);
385 e2
|= DESC_TSS_BUSY_MASK
;
386 stl_kernel(ptr
+ 4, e2
);
389 /* set the new CPU state */
390 /* from this point, any exception which occurs can give problems */
391 env
->cr
[0] |= CR0_TS_MASK
;
392 env
->hflags
|= HF_TS_MASK
;
393 env
->tr
.selector
= tss_selector
;
394 env
->tr
.base
= tss_base
;
395 env
->tr
.limit
= tss_limit
;
396 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
398 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
399 cpu_x86_update_cr3(env
, new_cr3
);
402 /* load all registers without an exception, then reload them with
403 possible exception */
405 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
406 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
408 eflags_mask
&= 0xffff;
410 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
411 /* XXX: what to do in 16 bit case? */
420 if (new_eflags
& VM_MASK
) {
421 for (i
= 0; i
< 6; i
++) {
422 load_seg_vm(i
, new_segs
[i
]);
424 /* in vm86, CPL is always 3 */
425 cpu_x86_set_cpl(env
, 3);
427 /* CPL is set the RPL of CS */
428 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
429 /* first just selectors as the rest may trigger exceptions */
430 for (i
= 0; i
< 6; i
++) {
431 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
435 env
->ldt
.selector
= new_ldt
& ~4;
442 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
445 if ((new_ldt
& 0xfffc) != 0) {
447 index
= new_ldt
& ~7;
448 if ((index
+ 7) > dt
->limit
) {
449 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
451 ptr
= dt
->base
+ index
;
452 e1
= ldl_kernel(ptr
);
453 e2
= ldl_kernel(ptr
+ 4);
454 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
455 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
457 if (!(e2
& DESC_P_MASK
)) {
458 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
460 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
463 /* load the segments */
464 if (!(new_eflags
& VM_MASK
)) {
465 tss_load_seg(R_CS
, new_segs
[R_CS
]);
466 tss_load_seg(R_SS
, new_segs
[R_SS
]);
467 tss_load_seg(R_ES
, new_segs
[R_ES
]);
468 tss_load_seg(R_DS
, new_segs
[R_DS
]);
469 tss_load_seg(R_FS
, new_segs
[R_FS
]);
470 tss_load_seg(R_GS
, new_segs
[R_GS
]);
473 /* check that EIP is in the CS segment limits */
474 if (new_eip
> env
->segs
[R_CS
].limit
) {
475 /* XXX: different exception if CALL? */
476 raise_exception_err(env
, EXCP0D_GPF
, 0);
479 #ifndef CONFIG_USER_ONLY
480 /* reset local breakpoints */
481 if (env
->dr
[7] & 0x55) {
482 for (i
= 0; i
< 4; i
++) {
483 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1) {
484 hw_breakpoint_remove(env
, i
);
492 /* check if Port I/O is allowed in TSS */
493 static inline void check_io(int addr
, int size
)
495 int io_offset
, val
, mask
;
497 /* TSS must be a valid 32 bit one */
498 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
499 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
500 env
->tr
.limit
< 103) {
503 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
504 io_offset
+= (addr
>> 3);
505 /* Note: the check needs two bytes */
506 if ((io_offset
+ 1) > env
->tr
.limit
) {
509 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
511 mask
= (1 << size
) - 1;
512 /* all bits must be zero to allow the I/O */
513 if ((val
& mask
) != 0) {
515 raise_exception_err(env
, EXCP0D_GPF
, 0);
519 void helper_check_iob(uint32_t t0
)
524 void helper_check_iow(uint32_t t0
)
529 void helper_check_iol(uint32_t t0
)
534 void helper_outb(uint32_t port
, uint32_t data
)
536 cpu_outb(port
, data
& 0xff);
539 target_ulong
helper_inb(uint32_t port
)
541 return cpu_inb(port
);
544 void helper_outw(uint32_t port
, uint32_t data
)
546 cpu_outw(port
, data
& 0xffff);
549 target_ulong
helper_inw(uint32_t port
)
551 return cpu_inw(port
);
554 void helper_outl(uint32_t port
, uint32_t data
)
556 cpu_outl(port
, data
);
559 target_ulong
helper_inl(uint32_t port
)
561 return cpu_inl(port
);
564 static inline unsigned int get_sp_mask(unsigned int e2
)
566 if (e2
& DESC_B_MASK
) {
573 static int exception_has_error_code(int intno
)
589 #define SET_ESP(val, sp_mask) \
591 if ((sp_mask) == 0xffff) { \
592 ESP = (ESP & ~0xffff) | ((val) & 0xffff); \
593 } else if ((sp_mask) == 0xffffffffLL) { \
594 ESP = (uint32_t)(val); \
600 #define SET_ESP(val, sp_mask) \
602 ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
606 /* in 64-bit machines, this can overflow. So this segment addition macro
607 * can be used to trim the value to 32-bit whenever needed */
608 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
610 /* XXX: add a is_user flag to have proper security support */
611 #define PUSHW(ssp, sp, sp_mask, val) \
614 stw_kernel((ssp) + (sp & (sp_mask)), (val)); \
617 #define PUSHL(ssp, sp, sp_mask, val) \
620 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
623 #define POPW(ssp, sp, sp_mask, val) \
625 val = lduw_kernel((ssp) + (sp & (sp_mask))); \
629 #define POPL(ssp, sp, sp_mask, val) \
631 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \
635 /* protected mode interrupt */
636 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
637 unsigned int next_eip
, int is_hw
)
640 target_ulong ptr
, ssp
;
641 int type
, dpl
, selector
, ss_dpl
, cpl
;
642 int has_error_code
, new_stack
, shift
;
643 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
644 uint32_t old_eip
, sp_mask
;
647 if (!is_int
&& !is_hw
) {
648 has_error_code
= exception_has_error_code(intno
);
657 if (intno
* 8 + 7 > dt
->limit
) {
658 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
660 ptr
= dt
->base
+ intno
* 8;
661 e1
= ldl_kernel(ptr
);
662 e2
= ldl_kernel(ptr
+ 4);
663 /* check gate type */
664 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
666 case 5: /* task gate */
667 /* must do that check here to return the correct error code */
668 if (!(e2
& DESC_P_MASK
)) {
669 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
671 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
672 if (has_error_code
) {
676 /* push the error code */
677 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
679 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
684 esp
= (ESP
- (2 << shift
)) & mask
;
685 ssp
= env
->segs
[R_SS
].base
+ esp
;
687 stl_kernel(ssp
, error_code
);
689 stw_kernel(ssp
, error_code
);
694 case 6: /* 286 interrupt gate */
695 case 7: /* 286 trap gate */
696 case 14: /* 386 interrupt gate */
697 case 15: /* 386 trap gate */
700 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
703 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
704 cpl
= env
->hflags
& HF_CPL_MASK
;
705 /* check privilege if software int */
706 if (is_int
&& dpl
< cpl
) {
707 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
709 /* check valid bit */
710 if (!(e2
& DESC_P_MASK
)) {
711 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
714 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
715 if ((selector
& 0xfffc) == 0) {
716 raise_exception_err(env
, EXCP0D_GPF
, 0);
718 if (load_segment(&e1
, &e2
, selector
) != 0) {
719 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
721 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
722 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
724 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
726 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
728 if (!(e2
& DESC_P_MASK
)) {
729 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
731 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
732 /* to inner privilege */
733 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
734 if ((ss
& 0xfffc) == 0) {
735 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
737 if ((ss
& 3) != dpl
) {
738 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
740 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0) {
741 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
743 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
745 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
747 if (!(ss_e2
& DESC_S_MASK
) ||
748 (ss_e2
& DESC_CS_MASK
) ||
749 !(ss_e2
& DESC_W_MASK
)) {
750 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
752 if (!(ss_e2
& DESC_P_MASK
)) {
753 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
756 sp_mask
= get_sp_mask(ss_e2
);
757 ssp
= get_seg_base(ss_e1
, ss_e2
);
758 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
759 /* to same privilege */
760 if (env
->eflags
& VM_MASK
) {
761 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
764 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
765 ssp
= env
->segs
[R_SS
].base
;
769 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
770 new_stack
= 0; /* avoid warning */
771 sp_mask
= 0; /* avoid warning */
772 ssp
= 0; /* avoid warning */
773 esp
= 0; /* avoid warning */
779 /* XXX: check that enough room is available */
780 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
781 if (env
->eflags
& VM_MASK
) {
788 if (env
->eflags
& VM_MASK
) {
789 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
790 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
791 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
792 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
794 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
795 PUSHL(ssp
, esp
, sp_mask
, ESP
);
797 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
798 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
799 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
800 if (has_error_code
) {
801 PUSHL(ssp
, esp
, sp_mask
, error_code
);
805 if (env
->eflags
& VM_MASK
) {
806 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
807 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
808 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
809 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
811 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
812 PUSHW(ssp
, esp
, sp_mask
, ESP
);
814 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
815 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
816 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
817 if (has_error_code
) {
818 PUSHW(ssp
, esp
, sp_mask
, error_code
);
823 if (env
->eflags
& VM_MASK
) {
824 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
827 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
829 ss
= (ss
& ~3) | dpl
;
830 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
831 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
833 SET_ESP(esp
, sp_mask
);
835 selector
= (selector
& ~3) | dpl
;
836 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
837 get_seg_base(e1
, e2
),
838 get_seg_limit(e1
, e2
),
840 cpu_x86_set_cpl(env
, dpl
);
843 /* interrupt gate clear IF mask */
844 if ((type
& 1) == 0) {
845 env
->eflags
&= ~IF_MASK
;
847 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
852 #define PUSHQ(sp, val) \
855 stq_kernel(sp, (val)); \
858 #define POPQ(sp, val) \
860 val = ldq_kernel(sp); \
864 static inline target_ulong
get_rsp_from_tss(int level
)
869 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
870 env
->tr
.base
, env
->tr
.limit
);
873 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
874 cpu_abort(env
, "invalid tss");
876 index
= 8 * level
+ 4;
877 if ((index
+ 7) > env
->tr
.limit
) {
878 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
880 return ldq_kernel(env
->tr
.base
+ index
);
883 /* 64 bit interrupt */
884 static void do_interrupt64(int intno
, int is_int
, int error_code
,
885 target_ulong next_eip
, int is_hw
)
889 int type
, dpl
, selector
, cpl
, ist
;
890 int has_error_code
, new_stack
;
891 uint32_t e1
, e2
, e3
, ss
;
892 target_ulong old_eip
, esp
, offset
;
895 if (!is_int
&& !is_hw
) {
896 has_error_code
= exception_has_error_code(intno
);
905 if (intno
* 16 + 15 > dt
->limit
) {
906 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
908 ptr
= dt
->base
+ intno
* 16;
909 e1
= ldl_kernel(ptr
);
910 e2
= ldl_kernel(ptr
+ 4);
911 e3
= ldl_kernel(ptr
+ 8);
912 /* check gate type */
913 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
915 case 14: /* 386 interrupt gate */
916 case 15: /* 386 trap gate */
919 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
922 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
923 cpl
= env
->hflags
& HF_CPL_MASK
;
924 /* check privilege if software int */
925 if (is_int
&& dpl
< cpl
) {
926 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
928 /* check valid bit */
929 if (!(e2
& DESC_P_MASK
)) {
930 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
933 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
935 if ((selector
& 0xfffc) == 0) {
936 raise_exception_err(env
, EXCP0D_GPF
, 0);
939 if (load_segment(&e1
, &e2
, selector
) != 0) {
940 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
942 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
943 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
945 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
947 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
949 if (!(e2
& DESC_P_MASK
)) {
950 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
952 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
953 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
955 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
956 /* to inner privilege */
958 esp
= get_rsp_from_tss(ist
+ 3);
960 esp
= get_rsp_from_tss(dpl
);
962 esp
&= ~0xfLL
; /* align stack */
965 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
966 /* to same privilege */
967 if (env
->eflags
& VM_MASK
) {
968 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
972 esp
= get_rsp_from_tss(ist
+ 3);
976 esp
&= ~0xfLL
; /* align stack */
979 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
980 new_stack
= 0; /* avoid warning */
981 esp
= 0; /* avoid warning */
984 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
986 PUSHQ(esp
, cpu_compute_eflags(env
));
987 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
989 if (has_error_code
) {
990 PUSHQ(esp
, error_code
);
995 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
999 selector
= (selector
& ~3) | dpl
;
1000 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1001 get_seg_base(e1
, e2
),
1002 get_seg_limit(e1
, e2
),
1004 cpu_x86_set_cpl(env
, dpl
);
1007 /* interrupt gate clear IF mask */
1008 if ((type
& 1) == 0) {
1009 env
->eflags
&= ~IF_MASK
;
1011 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1015 #ifdef TARGET_X86_64
1016 #if defined(CONFIG_USER_ONLY)
1017 void helper_syscall(int next_eip_addend
)
1019 env
->exception_index
= EXCP_SYSCALL
;
1020 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1024 void helper_syscall(int next_eip_addend
)
1028 if (!(env
->efer
& MSR_EFER_SCE
)) {
1029 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1031 selector
= (env
->star
>> 32) & 0xffff;
1032 if (env
->hflags
& HF_LMA_MASK
) {
1035 ECX
= env
->eip
+ next_eip_addend
;
1036 env
->regs
[11] = cpu_compute_eflags(env
);
1038 code64
= env
->hflags
& HF_CS64_MASK
;
1040 cpu_x86_set_cpl(env
, 0);
1041 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1043 DESC_G_MASK
| DESC_P_MASK
|
1045 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1047 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1049 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1051 DESC_W_MASK
| DESC_A_MASK
);
1052 env
->eflags
&= ~env
->fmask
;
1053 cpu_load_eflags(env
, env
->eflags
, 0);
1055 env
->eip
= env
->lstar
;
1057 env
->eip
= env
->cstar
;
1060 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1062 cpu_x86_set_cpl(env
, 0);
1063 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1065 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1067 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1068 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1070 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1072 DESC_W_MASK
| DESC_A_MASK
);
1073 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1074 env
->eip
= (uint32_t)env
->star
;
1080 #ifdef TARGET_X86_64
1081 void helper_sysret(int dflag
)
1085 if (!(env
->efer
& MSR_EFER_SCE
)) {
1086 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1088 cpl
= env
->hflags
& HF_CPL_MASK
;
1089 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1090 raise_exception_err(env
, EXCP0D_GPF
, 0);
1092 selector
= (env
->star
>> 48) & 0xffff;
1093 if (env
->hflags
& HF_LMA_MASK
) {
1095 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1097 DESC_G_MASK
| DESC_P_MASK
|
1098 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1099 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1103 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1105 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1106 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1107 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1108 env
->eip
= (uint32_t)ECX
;
1110 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1112 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1113 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1114 DESC_W_MASK
| DESC_A_MASK
);
1115 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1116 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1118 cpu_x86_set_cpl(env
, 3);
1120 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1122 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1123 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1124 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1125 env
->eip
= (uint32_t)ECX
;
1126 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1128 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1129 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1130 DESC_W_MASK
| DESC_A_MASK
);
1131 env
->eflags
|= IF_MASK
;
1132 cpu_x86_set_cpl(env
, 3);
1137 /* real mode interrupt */
1138 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1139 unsigned int next_eip
)
1142 target_ulong ptr
, ssp
;
1144 uint32_t offset
, esp
;
1145 uint32_t old_cs
, old_eip
;
1147 /* real mode (simpler!) */
1149 if (intno
* 4 + 3 > dt
->limit
) {
1150 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1152 ptr
= dt
->base
+ intno
* 4;
1153 offset
= lduw_kernel(ptr
);
1154 selector
= lduw_kernel(ptr
+ 2);
1156 ssp
= env
->segs
[R_SS
].base
;
1162 old_cs
= env
->segs
[R_CS
].selector
;
1163 /* XXX: use SS segment size? */
1164 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1165 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1166 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1168 /* update processor state */
1169 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1171 env
->segs
[R_CS
].selector
= selector
;
1172 env
->segs
[R_CS
].base
= (selector
<< 4);
1173 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1176 #if defined(CONFIG_USER_ONLY)
1177 /* fake user mode interrupt */
1178 static void do_interrupt_user(int intno
, int is_int
, int error_code
,
1179 target_ulong next_eip
)
1183 int dpl
, cpl
, shift
;
1187 if (env
->hflags
& HF_LMA_MASK
) {
1192 ptr
= dt
->base
+ (intno
<< shift
);
1193 e2
= ldl_kernel(ptr
+ 4);
1195 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1196 cpl
= env
->hflags
& HF_CPL_MASK
;
1197 /* check privilege if software int */
1198 if (is_int
&& dpl
< cpl
) {
1199 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1202 /* Since we emulate only user space, we cannot do more than
1203 exiting the emulation with the suitable exception and error
1212 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1215 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
1216 control
.event_inj
));
1218 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1222 type
= SVM_EVTINJ_TYPE_SOFT
;
1224 type
= SVM_EVTINJ_TYPE_EXEPT
;
1226 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1227 if (!rm
&& exception_has_error_code(intno
)) {
1228 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1229 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
1230 control
.event_inj_err
),
1233 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1240 * Begin execution of an interruption. is_int is TRUE if coming from
1241 * the int instruction. next_eip is the EIP value AFTER the interrupt
1242 * instruction. It is only relevant if is_int is TRUE.
1244 static void do_interrupt_all(int intno
, int is_int
, int error_code
,
1245 target_ulong next_eip
, int is_hw
)
1247 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1248 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1251 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1252 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1253 count
, intno
, error_code
, is_int
,
1254 env
->hflags
& HF_CPL_MASK
,
1255 env
->segs
[R_CS
].selector
, EIP
,
1256 (int)env
->segs
[R_CS
].base
+ EIP
,
1257 env
->segs
[R_SS
].selector
, ESP
);
1258 if (intno
== 0x0e) {
1259 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1261 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1264 log_cpu_state(env
, X86_DUMP_CCOP
);
1271 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1272 for (i
= 0; i
< 16; i
++) {
1273 qemu_log(" %02x", ldub(ptr
+ i
));
1281 if (env
->cr
[0] & CR0_PE_MASK
) {
1282 #if !defined(CONFIG_USER_ONLY)
1283 if (env
->hflags
& HF_SVMI_MASK
) {
1284 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1287 #ifdef TARGET_X86_64
1288 if (env
->hflags
& HF_LMA_MASK
) {
1289 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1293 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1296 #if !defined(CONFIG_USER_ONLY)
1297 if (env
->hflags
& HF_SVMI_MASK
) {
1298 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1301 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1304 #if !defined(CONFIG_USER_ONLY)
1305 if (env
->hflags
& HF_SVMI_MASK
) {
1306 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+
1307 offsetof(struct vmcb
,
1308 control
.event_inj
));
1310 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1311 event_inj
& ~SVM_EVTINJ_VALID
);
1316 void do_interrupt(CPUX86State
*env1
)
1318 CPUX86State
*saved_env
;
1322 #if defined(CONFIG_USER_ONLY)
1323 /* if user mode only, we simulate a fake exception
1324 which will be handled outside the cpu execution
1326 do_interrupt_user(env
->exception_index
,
1327 env
->exception_is_int
,
1329 env
->exception_next_eip
);
1330 /* successfully delivered */
1331 env
->old_exception
= -1;
1333 /* simulate a real cpu exception. On i386, it can
1334 trigger new exceptions, but we do not handle
1335 double or triple faults yet. */
1336 do_interrupt_all(env
->exception_index
,
1337 env
->exception_is_int
,
1339 env
->exception_next_eip
, 0);
1340 /* successfully delivered */
1341 env
->old_exception
= -1;
1346 void do_interrupt_x86_hardirq(CPUX86State
*env1
, int intno
, int is_hw
)
1348 CPUX86State
*saved_env
;
1352 do_interrupt_all(intno
, 0, 0, 0, is_hw
);
1358 #if defined(CONFIG_USER_ONLY)
1360 void do_smm_enter(CPUX86State
*env1
)
1364 void helper_rsm(void)
1370 #ifdef TARGET_X86_64
1371 #define SMM_REVISION_ID 0x00020064
1373 #define SMM_REVISION_ID 0x00020000
1376 void do_smm_enter(CPUX86State
*env1
)
1378 target_ulong sm_state
;
1381 CPUX86State
*saved_env
;
1386 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1387 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1389 env
->hflags
|= HF_SMM_MASK
;
1390 cpu_smm_update(env
);
1392 sm_state
= env
->smbase
+ 0x8000;
1394 #ifdef TARGET_X86_64
1395 for (i
= 0; i
< 6; i
++) {
1397 offset
= 0x7e00 + i
* 16;
1398 stw_phys(sm_state
+ offset
, dt
->selector
);
1399 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1400 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1401 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1404 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1405 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1407 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1408 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1409 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1410 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1412 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1413 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1415 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1416 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1417 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1418 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1420 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1422 stq_phys(sm_state
+ 0x7ff8, EAX
);
1423 stq_phys(sm_state
+ 0x7ff0, ECX
);
1424 stq_phys(sm_state
+ 0x7fe8, EDX
);
1425 stq_phys(sm_state
+ 0x7fe0, EBX
);
1426 stq_phys(sm_state
+ 0x7fd8, ESP
);
1427 stq_phys(sm_state
+ 0x7fd0, EBP
);
1428 stq_phys(sm_state
+ 0x7fc8, ESI
);
1429 stq_phys(sm_state
+ 0x7fc0, EDI
);
1430 for (i
= 8; i
< 16; i
++) {
1431 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1433 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1434 stl_phys(sm_state
+ 0x7f70, cpu_compute_eflags(env
));
1435 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1436 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1438 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1439 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1440 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1442 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1443 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1445 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1446 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1447 stl_phys(sm_state
+ 0x7ff4, cpu_compute_eflags(env
));
1448 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1449 stl_phys(sm_state
+ 0x7fec, EDI
);
1450 stl_phys(sm_state
+ 0x7fe8, ESI
);
1451 stl_phys(sm_state
+ 0x7fe4, EBP
);
1452 stl_phys(sm_state
+ 0x7fe0, ESP
);
1453 stl_phys(sm_state
+ 0x7fdc, EBX
);
1454 stl_phys(sm_state
+ 0x7fd8, EDX
);
1455 stl_phys(sm_state
+ 0x7fd4, ECX
);
1456 stl_phys(sm_state
+ 0x7fd0, EAX
);
1457 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1458 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1460 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1461 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1462 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1463 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1465 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1466 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1467 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1468 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1470 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1471 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1473 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1474 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1476 for (i
= 0; i
< 6; i
++) {
1479 offset
= 0x7f84 + i
* 12;
1481 offset
= 0x7f2c + (i
- 3) * 12;
1483 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1484 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1485 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1486 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1488 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1490 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1491 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1493 /* init SMM cpu state */
1495 #ifdef TARGET_X86_64
1496 cpu_load_efer(env
, 0);
1498 cpu_load_eflags(env
, 0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
|
1500 env
->eip
= 0x00008000;
1501 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1503 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1504 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1509 cpu_x86_update_cr0(env
,
1510 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
|
1512 cpu_x86_update_cr4(env
, 0);
1513 env
->dr
[7] = 0x00000400;
1514 CC_OP
= CC_OP_EFLAGS
;
1518 void helper_rsm(void)
1520 target_ulong sm_state
;
1524 sm_state
= env
->smbase
+ 0x8000;
1525 #ifdef TARGET_X86_64
1526 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1528 for (i
= 0; i
< 6; i
++) {
1529 offset
= 0x7e00 + i
* 16;
1530 cpu_x86_load_seg_cache(env
, i
,
1531 lduw_phys(sm_state
+ offset
),
1532 ldq_phys(sm_state
+ offset
+ 8),
1533 ldl_phys(sm_state
+ offset
+ 4),
1534 (lduw_phys(sm_state
+ offset
+ 2) &
1538 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1539 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1541 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1542 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1543 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1544 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1546 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1547 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1549 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1550 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1551 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1552 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1554 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1555 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1556 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1557 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1558 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1559 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1560 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1561 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1562 for (i
= 8; i
< 16; i
++) {
1563 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1565 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1566 cpu_load_eflags(env
, ldl_phys(sm_state
+ 0x7f70),
1567 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1568 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1569 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1571 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1572 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1573 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1575 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1576 if (val
& 0x20000) {
1577 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1580 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1581 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1582 cpu_load_eflags(env
, ldl_phys(sm_state
+ 0x7ff4),
1583 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1584 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1585 EDI
= ldl_phys(sm_state
+ 0x7fec);
1586 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1587 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1588 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1589 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1590 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1591 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1592 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1593 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1594 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1596 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1597 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1598 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1599 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1601 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1602 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1603 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1604 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1606 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1607 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1609 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1610 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1612 for (i
= 0; i
< 6; i
++) {
1614 offset
= 0x7f84 + i
* 12;
1616 offset
= 0x7f2c + (i
- 3) * 12;
1618 cpu_x86_load_seg_cache(env
, i
,
1619 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1620 ldl_phys(sm_state
+ offset
+ 8),
1621 ldl_phys(sm_state
+ offset
+ 4),
1622 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1624 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1626 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1627 if (val
& 0x20000) {
1628 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1631 CC_OP
= CC_OP_EFLAGS
;
1632 env
->hflags
&= ~HF_SMM_MASK
;
1633 cpu_smm_update(env
);
1635 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1636 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1639 #endif /* !CONFIG_USER_ONLY */
1641 void helper_into(int next_eip_addend
)
1645 eflags
= helper_cc_compute_all(CC_OP
);
1646 if (eflags
& CC_O
) {
1647 raise_interrupt(env
, EXCP04_INTO
, 1, 0, next_eip_addend
);
1651 void helper_cmpxchg8b(target_ulong a0
)
1656 eflags
= helper_cc_compute_all(CC_OP
);
1658 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1659 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1662 /* always do the store */
1664 EDX
= (uint32_t)(d
>> 32);
1671 #ifdef TARGET_X86_64
1672 void helper_cmpxchg16b(target_ulong a0
)
1677 if ((a0
& 0xf) != 0) {
1678 raise_exception(env
, EXCP0D_GPF
);
1680 eflags
= helper_cc_compute_all(CC_OP
);
1683 if (d0
== EAX
&& d1
== EDX
) {
1688 /* always do the store */
1699 void helper_single_step(void)
1701 #ifndef CONFIG_USER_ONLY
1702 check_hw_breakpoints(env
, 1);
1703 env
->dr
[6] |= DR6_BS
;
1705 raise_exception(env
, EXCP01_DB
);
1708 void helper_cpuid(void)
1710 uint32_t eax
, ebx
, ecx
, edx
;
1712 cpu_svm_check_intercept_param(env
, SVM_EXIT_CPUID
, 0);
1714 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1721 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1724 uint32_t esp_mask
, esp
, ebp
;
1726 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1727 ssp
= env
->segs
[R_SS
].base
;
1736 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1739 stl(ssp
+ (esp
& esp_mask
), t1
);
1746 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1749 stw(ssp
+ (esp
& esp_mask
), t1
);
1753 #ifdef TARGET_X86_64
1754 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1756 target_ulong esp
, ebp
;
1777 stw(esp
, lduw(ebp
));
1785 void helper_lldt(int selector
)
1789 int index
, entry_limit
;
1793 if ((selector
& 0xfffc) == 0) {
1794 /* XXX: NULL selector case: invalid LDT */
1798 if (selector
& 0x4) {
1799 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1802 index
= selector
& ~7;
1803 #ifdef TARGET_X86_64
1804 if (env
->hflags
& HF_LMA_MASK
) {
1811 if ((index
+ entry_limit
) > dt
->limit
) {
1812 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1814 ptr
= dt
->base
+ index
;
1815 e1
= ldl_kernel(ptr
);
1816 e2
= ldl_kernel(ptr
+ 4);
1817 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1818 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1820 if (!(e2
& DESC_P_MASK
)) {
1821 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1823 #ifdef TARGET_X86_64
1824 if (env
->hflags
& HF_LMA_MASK
) {
1827 e3
= ldl_kernel(ptr
+ 8);
1828 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1829 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1833 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1836 env
->ldt
.selector
= selector
;
1839 void helper_ltr(int selector
)
1843 int index
, type
, entry_limit
;
1847 if ((selector
& 0xfffc) == 0) {
1848 /* NULL selector case: invalid TR */
1853 if (selector
& 0x4) {
1854 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1857 index
= selector
& ~7;
1858 #ifdef TARGET_X86_64
1859 if (env
->hflags
& HF_LMA_MASK
) {
1866 if ((index
+ entry_limit
) > dt
->limit
) {
1867 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1869 ptr
= dt
->base
+ index
;
1870 e1
= ldl_kernel(ptr
);
1871 e2
= ldl_kernel(ptr
+ 4);
1872 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1873 if ((e2
& DESC_S_MASK
) ||
1874 (type
!= 1 && type
!= 9)) {
1875 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1877 if (!(e2
& DESC_P_MASK
)) {
1878 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1880 #ifdef TARGET_X86_64
1881 if (env
->hflags
& HF_LMA_MASK
) {
1884 e3
= ldl_kernel(ptr
+ 8);
1885 e4
= ldl_kernel(ptr
+ 12);
1886 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1887 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1889 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1890 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1894 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1896 e2
|= DESC_TSS_BUSY_MASK
;
1897 stl_kernel(ptr
+ 4, e2
);
1899 env
->tr
.selector
= selector
;
1902 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1903 void helper_load_seg(int seg_reg
, int selector
)
1912 cpl
= env
->hflags
& HF_CPL_MASK
;
1913 if ((selector
& 0xfffc) == 0) {
1914 /* null selector case */
1916 #ifdef TARGET_X86_64
1917 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1920 raise_exception_err(env
, EXCP0D_GPF
, 0);
1922 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1925 if (selector
& 0x4) {
1930 index
= selector
& ~7;
1931 if ((index
+ 7) > dt
->limit
) {
1932 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1934 ptr
= dt
->base
+ index
;
1935 e1
= ldl_kernel(ptr
);
1936 e2
= ldl_kernel(ptr
+ 4);
1938 if (!(e2
& DESC_S_MASK
)) {
1939 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1942 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1943 if (seg_reg
== R_SS
) {
1944 /* must be writable segment */
1945 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1946 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1948 if (rpl
!= cpl
|| dpl
!= cpl
) {
1949 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1952 /* must be readable segment */
1953 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1954 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1957 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1958 /* if not conforming code, test rights */
1959 if (dpl
< cpl
|| dpl
< rpl
) {
1960 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1965 if (!(e2
& DESC_P_MASK
)) {
1966 if (seg_reg
== R_SS
) {
1967 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1969 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1973 /* set the access bit if not already set */
1974 if (!(e2
& DESC_A_MASK
)) {
1976 stl_kernel(ptr
+ 4, e2
);
1979 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1980 get_seg_base(e1
, e2
),
1981 get_seg_limit(e1
, e2
),
1984 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1985 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1990 /* protected mode jump */
1991 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
1992 int next_eip_addend
)
1995 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1996 target_ulong next_eip
;
1998 if ((new_cs
& 0xfffc) == 0) {
1999 raise_exception_err(env
, EXCP0D_GPF
, 0);
2001 if (load_segment(&e1
, &e2
, new_cs
) != 0) {
2002 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2004 cpl
= env
->hflags
& HF_CPL_MASK
;
2005 if (e2
& DESC_S_MASK
) {
2006 if (!(e2
& DESC_CS_MASK
)) {
2007 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2009 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2010 if (e2
& DESC_C_MASK
) {
2011 /* conforming code segment */
2013 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2016 /* non conforming code segment */
2019 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2022 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2025 if (!(e2
& DESC_P_MASK
)) {
2026 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2028 limit
= get_seg_limit(e1
, e2
);
2029 if (new_eip
> limit
&&
2030 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
2031 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2033 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2034 get_seg_base(e1
, e2
), limit
, e2
);
2037 /* jump to call or task gate */
2038 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2040 cpl
= env
->hflags
& HF_CPL_MASK
;
2041 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2043 case 1: /* 286 TSS */
2044 case 9: /* 386 TSS */
2045 case 5: /* task gate */
2046 if (dpl
< cpl
|| dpl
< rpl
) {
2047 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2049 next_eip
= env
->eip
+ next_eip_addend
;
2050 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2051 CC_OP
= CC_OP_EFLAGS
;
2053 case 4: /* 286 call gate */
2054 case 12: /* 386 call gate */
2055 if ((dpl
< cpl
) || (dpl
< rpl
)) {
2056 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2058 if (!(e2
& DESC_P_MASK
)) {
2059 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2062 new_eip
= (e1
& 0xffff);
2064 new_eip
|= (e2
& 0xffff0000);
2066 if (load_segment(&e1
, &e2
, gate_cs
) != 0) {
2067 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
2069 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2070 /* must be code segment */
2071 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2072 (DESC_S_MASK
| DESC_CS_MASK
))) {
2073 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
2075 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2076 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
2077 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
2079 if (!(e2
& DESC_P_MASK
)) {
2080 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
2082 limit
= get_seg_limit(e1
, e2
);
2083 if (new_eip
> limit
) {
2084 raise_exception_err(env
, EXCP0D_GPF
, 0);
2086 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2087 get_seg_base(e1
, e2
), limit
, e2
);
2091 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2097 /* real mode call */
2098 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2099 int shift
, int next_eip
)
2102 uint32_t esp
, esp_mask
;
2107 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2108 ssp
= env
->segs
[R_SS
].base
;
2110 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2111 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2113 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2114 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2117 SET_ESP(esp
, esp_mask
);
2119 env
->segs
[R_CS
].selector
= new_cs
;
2120 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2123 /* protected mode call */
2124 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2125 int shift
, int next_eip_addend
)
2128 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2129 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2130 uint32_t val
, limit
, old_sp_mask
;
2131 target_ulong ssp
, old_ssp
, next_eip
;
2133 next_eip
= env
->eip
+ next_eip_addend
;
2134 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2135 LOG_PCALL_STATE(env
);
2136 if ((new_cs
& 0xfffc) == 0) {
2137 raise_exception_err(env
, EXCP0D_GPF
, 0);
2139 if (load_segment(&e1
, &e2
, new_cs
) != 0) {
2140 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2142 cpl
= env
->hflags
& HF_CPL_MASK
;
2143 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2144 if (e2
& DESC_S_MASK
) {
2145 if (!(e2
& DESC_CS_MASK
)) {
2146 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2148 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2149 if (e2
& DESC_C_MASK
) {
2150 /* conforming code segment */
2152 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2155 /* non conforming code segment */
2158 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2161 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2164 if (!(e2
& DESC_P_MASK
)) {
2165 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2168 #ifdef TARGET_X86_64
2169 /* XXX: check 16/32 bit cases in long mode */
2175 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2176 PUSHQ(rsp
, next_eip
);
2177 /* from this point, not restartable */
2179 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2180 get_seg_base(e1
, e2
),
2181 get_seg_limit(e1
, e2
), e2
);
2187 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2188 ssp
= env
->segs
[R_SS
].base
;
2190 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2191 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2193 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2194 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2197 limit
= get_seg_limit(e1
, e2
);
2198 if (new_eip
> limit
) {
2199 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2201 /* from this point, not restartable */
2202 SET_ESP(sp
, sp_mask
);
2203 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2204 get_seg_base(e1
, e2
), limit
, e2
);
2208 /* check gate type */
2209 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2210 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2213 case 1: /* available 286 TSS */
2214 case 9: /* available 386 TSS */
2215 case 5: /* task gate */
2216 if (dpl
< cpl
|| dpl
< rpl
) {
2217 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2219 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2220 CC_OP
= CC_OP_EFLAGS
;
2222 case 4: /* 286 call gate */
2223 case 12: /* 386 call gate */
2226 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2231 if (dpl
< cpl
|| dpl
< rpl
) {
2232 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2234 /* check valid bit */
2235 if (!(e2
& DESC_P_MASK
)) {
2236 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2238 selector
= e1
>> 16;
2239 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2240 param_count
= e2
& 0x1f;
2241 if ((selector
& 0xfffc) == 0) {
2242 raise_exception_err(env
, EXCP0D_GPF
, 0);
2245 if (load_segment(&e1
, &e2
, selector
) != 0) {
2246 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
2248 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
2249 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
2251 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2253 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
2255 if (!(e2
& DESC_P_MASK
)) {
2256 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
2259 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2260 /* to inner privilege */
2261 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2262 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
2264 ss
, sp
, param_count
, ESP
);
2265 if ((ss
& 0xfffc) == 0) {
2266 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
2268 if ((ss
& 3) != dpl
) {
2269 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
2271 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0) {
2272 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
2274 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2275 if (ss_dpl
!= dpl
) {
2276 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
2278 if (!(ss_e2
& DESC_S_MASK
) ||
2279 (ss_e2
& DESC_CS_MASK
) ||
2280 !(ss_e2
& DESC_W_MASK
)) {
2281 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
2283 if (!(ss_e2
& DESC_P_MASK
)) {
2284 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
2287 /* push_size = ((param_count * 2) + 8) << shift; */
2289 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2290 old_ssp
= env
->segs
[R_SS
].base
;
2292 sp_mask
= get_sp_mask(ss_e2
);
2293 ssp
= get_seg_base(ss_e1
, ss_e2
);
2295 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2296 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2297 for (i
= param_count
- 1; i
>= 0; i
--) {
2298 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2299 PUSHL(ssp
, sp
, sp_mask
, val
);
2302 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2303 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2304 for (i
= param_count
- 1; i
>= 0; i
--) {
2305 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2306 PUSHW(ssp
, sp
, sp_mask
, val
);
2311 /* to same privilege */
2313 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2314 ssp
= env
->segs
[R_SS
].base
;
2315 /* push_size = (4 << shift); */
2320 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2321 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2323 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2324 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2327 /* from this point, not restartable */
2330 ss
= (ss
& ~3) | dpl
;
2331 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2333 get_seg_limit(ss_e1
, ss_e2
),
2337 selector
= (selector
& ~3) | dpl
;
2338 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2339 get_seg_base(e1
, e2
),
2340 get_seg_limit(e1
, e2
),
2342 cpu_x86_set_cpl(env
, dpl
);
2343 SET_ESP(sp
, sp_mask
);
2348 /* real and vm86 mode iret */
2349 void helper_iret_real(int shift
)
2351 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2355 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
2357 ssp
= env
->segs
[R_SS
].base
;
2360 POPL(ssp
, sp
, sp_mask
, new_eip
);
2361 POPL(ssp
, sp
, sp_mask
, new_cs
);
2363 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2366 POPW(ssp
, sp
, sp_mask
, new_eip
);
2367 POPW(ssp
, sp
, sp_mask
, new_cs
);
2368 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2370 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2371 env
->segs
[R_CS
].selector
= new_cs
;
2372 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2374 if (env
->eflags
& VM_MASK
) {
2375 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
2378 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
2382 eflags_mask
&= 0xffff;
2384 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2385 env
->hflags2
&= ~HF2_NMI_MASK
;
2388 static inline void validate_seg(int seg_reg
, int cpl
)
2393 /* XXX: on x86_64, we do not want to nullify FS and GS because
2394 they may still contain a valid base. I would be interested to
2395 know how a real x86_64 CPU behaves */
2396 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2397 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2401 e2
= env
->segs
[seg_reg
].flags
;
2402 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2403 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2404 /* data or non conforming code segment */
2406 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2411 /* protected mode iret */
2412 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2414 uint32_t new_cs
, new_eflags
, new_ss
;
2415 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2416 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2417 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2418 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2420 #ifdef TARGET_X86_64
2426 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2429 ssp
= env
->segs
[R_SS
].base
;
2430 new_eflags
= 0; /* avoid warning */
2431 #ifdef TARGET_X86_64
2437 POPQ(sp
, new_eflags
);
2444 POPL(ssp
, sp
, sp_mask
, new_eip
);
2445 POPL(ssp
, sp
, sp_mask
, new_cs
);
2448 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2449 if (new_eflags
& VM_MASK
) {
2450 goto return_to_vm86
;
2455 POPW(ssp
, sp
, sp_mask
, new_eip
);
2456 POPW(ssp
, sp
, sp_mask
, new_cs
);
2458 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2462 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2463 new_cs
, new_eip
, shift
, addend
);
2464 LOG_PCALL_STATE(env
);
2465 if ((new_cs
& 0xfffc) == 0) {
2466 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2468 if (load_segment(&e1
, &e2
, new_cs
) != 0) {
2469 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2471 if (!(e2
& DESC_S_MASK
) ||
2472 !(e2
& DESC_CS_MASK
)) {
2473 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2475 cpl
= env
->hflags
& HF_CPL_MASK
;
2478 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2480 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2481 if (e2
& DESC_C_MASK
) {
2483 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2487 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2490 if (!(e2
& DESC_P_MASK
)) {
2491 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2495 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2496 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2497 /* return to same privilege level */
2498 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2499 get_seg_base(e1
, e2
),
2500 get_seg_limit(e1
, e2
),
2503 /* return to different privilege level */
2504 #ifdef TARGET_X86_64
2514 POPL(ssp
, sp
, sp_mask
, new_esp
);
2515 POPL(ssp
, sp
, sp_mask
, new_ss
);
2519 POPW(ssp
, sp
, sp_mask
, new_esp
);
2520 POPW(ssp
, sp
, sp_mask
, new_ss
);
2523 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2525 if ((new_ss
& 0xfffc) == 0) {
2526 #ifdef TARGET_X86_64
2527 /* NULL ss is allowed in long mode if cpl != 3 */
2528 /* XXX: test CS64? */
2529 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2530 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2532 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2533 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2534 DESC_W_MASK
| DESC_A_MASK
);
2535 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2539 raise_exception_err(env
, EXCP0D_GPF
, 0);
2542 if ((new_ss
& 3) != rpl
) {
2543 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2545 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0) {
2546 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2548 if (!(ss_e2
& DESC_S_MASK
) ||
2549 (ss_e2
& DESC_CS_MASK
) ||
2550 !(ss_e2
& DESC_W_MASK
)) {
2551 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2553 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2555 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2557 if (!(ss_e2
& DESC_P_MASK
)) {
2558 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2560 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2561 get_seg_base(ss_e1
, ss_e2
),
2562 get_seg_limit(ss_e1
, ss_e2
),
2566 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2567 get_seg_base(e1
, e2
),
2568 get_seg_limit(e1
, e2
),
2570 cpu_x86_set_cpl(env
, rpl
);
2572 #ifdef TARGET_X86_64
2573 if (env
->hflags
& HF_CS64_MASK
) {
2578 sp_mask
= get_sp_mask(ss_e2
);
2581 /* validate data segments */
2582 validate_seg(R_ES
, rpl
);
2583 validate_seg(R_DS
, rpl
);
2584 validate_seg(R_FS
, rpl
);
2585 validate_seg(R_GS
, rpl
);
2589 SET_ESP(sp
, sp_mask
);
2592 /* NOTE: 'cpl' is the _old_ CPL */
2593 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2595 eflags_mask
|= IOPL_MASK
;
2597 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2599 eflags_mask
|= IF_MASK
;
2602 eflags_mask
&= 0xffff;
2604 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2609 POPL(ssp
, sp
, sp_mask
, new_esp
);
2610 POPL(ssp
, sp
, sp_mask
, new_ss
);
2611 POPL(ssp
, sp
, sp_mask
, new_es
);
2612 POPL(ssp
, sp
, sp_mask
, new_ds
);
2613 POPL(ssp
, sp
, sp_mask
, new_fs
);
2614 POPL(ssp
, sp
, sp_mask
, new_gs
);
2616 /* modify processor state */
2617 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2618 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2620 load_seg_vm(R_CS
, new_cs
& 0xffff);
2621 cpu_x86_set_cpl(env
, 3);
2622 load_seg_vm(R_SS
, new_ss
& 0xffff);
2623 load_seg_vm(R_ES
, new_es
& 0xffff);
2624 load_seg_vm(R_DS
, new_ds
& 0xffff);
2625 load_seg_vm(R_FS
, new_fs
& 0xffff);
2626 load_seg_vm(R_GS
, new_gs
& 0xffff);
2628 env
->eip
= new_eip
& 0xffff;
2632 void helper_iret_protected(int shift
, int next_eip
)
2634 int tss_selector
, type
;
2637 /* specific case for TSS */
2638 if (env
->eflags
& NT_MASK
) {
2639 #ifdef TARGET_X86_64
2640 if (env
->hflags
& HF_LMA_MASK
) {
2641 raise_exception_err(env
, EXCP0D_GPF
, 0);
2644 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2645 if (tss_selector
& 4) {
2646 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2648 if (load_segment(&e1
, &e2
, tss_selector
) != 0) {
2649 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2651 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2652 /* NOTE: we check both segment and busy TSS */
2654 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2656 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2658 helper_ret_protected(shift
, 1, 0);
2660 env
->hflags2
&= ~HF2_NMI_MASK
;
2663 void helper_lret_protected(int shift
, int addend
)
2665 helper_ret_protected(shift
, 0, addend
);
2668 void helper_sysenter(void)
2670 if (env
->sysenter_cs
== 0) {
2671 raise_exception_err(env
, EXCP0D_GPF
, 0);
2673 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2674 cpu_x86_set_cpl(env
, 0);
2676 #ifdef TARGET_X86_64
2677 if (env
->hflags
& HF_LMA_MASK
) {
2678 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2680 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2682 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2687 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2689 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2691 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2693 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2695 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2697 DESC_W_MASK
| DESC_A_MASK
);
2698 ESP
= env
->sysenter_esp
;
2699 EIP
= env
->sysenter_eip
;
2702 void helper_sysexit(int dflag
)
2706 cpl
= env
->hflags
& HF_CPL_MASK
;
2707 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2708 raise_exception_err(env
, EXCP0D_GPF
, 0);
2710 cpu_x86_set_cpl(env
, 3);
2711 #ifdef TARGET_X86_64
2713 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2715 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2716 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2717 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2719 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2721 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2722 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2723 DESC_W_MASK
| DESC_A_MASK
);
2727 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2729 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2730 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2731 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2732 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2734 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2735 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2736 DESC_W_MASK
| DESC_A_MASK
);
2742 #if defined(CONFIG_USER_ONLY)
2743 target_ulong
helper_read_crN(int reg
)
2748 void helper_write_crN(int reg
, target_ulong t0
)
2752 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2756 target_ulong
helper_read_crN(int reg
)
2760 cpu_svm_check_intercept_param(env
, SVM_EXIT_READ_CR0
+ reg
, 0);
2766 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2767 val
= cpu_get_apic_tpr(env
->apic_state
);
2776 void helper_write_crN(int reg
, target_ulong t0
)
2778 cpu_svm_check_intercept_param(env
, SVM_EXIT_WRITE_CR0
+ reg
, 0);
2781 cpu_x86_update_cr0(env
, t0
);
2784 cpu_x86_update_cr3(env
, t0
);
2787 cpu_x86_update_cr4(env
, t0
);
2790 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2791 cpu_set_apic_tpr(env
->apic_state
, t0
);
2793 env
->v_tpr
= t0
& 0x0f;
2801 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2806 hw_breakpoint_remove(env
, reg
);
2808 hw_breakpoint_insert(env
, reg
);
2809 } else if (reg
== 7) {
2810 for (i
= 0; i
< 4; i
++) {
2811 hw_breakpoint_remove(env
, i
);
2814 for (i
= 0; i
< 4; i
++) {
2815 hw_breakpoint_insert(env
, i
);
2823 void helper_lmsw(target_ulong t0
)
2825 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2826 if already set to one. */
2827 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2828 helper_write_crN(0, t0
);
2831 void helper_invlpg(target_ulong addr
)
2833 cpu_svm_check_intercept_param(env
, SVM_EXIT_INVLPG
, 0);
2834 tlb_flush_page(env
, addr
);
2837 void helper_rdtsc(void)
2841 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2842 raise_exception(env
, EXCP0D_GPF
);
2844 cpu_svm_check_intercept_param(env
, SVM_EXIT_RDTSC
, 0);
2846 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2847 EAX
= (uint32_t)(val
);
2848 EDX
= (uint32_t)(val
>> 32);
2851 void helper_rdtscp(void)
2854 ECX
= (uint32_t)(env
->tsc_aux
);
2857 void helper_rdpmc(void)
2859 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2860 raise_exception(env
, EXCP0D_GPF
);
2862 cpu_svm_check_intercept_param(env
, SVM_EXIT_RDPMC
, 0);
2864 /* currently unimplemented */
2865 qemu_log_mask(LOG_UNIMP
, "x86: unimplemented rdpmc\n");
2866 raise_exception_err(env
, EXCP06_ILLOP
, 0);
2869 #if defined(CONFIG_USER_ONLY)
2870 void helper_wrmsr(void)
2874 void helper_rdmsr(void)
2878 void helper_wrmsr(void)
2882 cpu_svm_check_intercept_param(env
, SVM_EXIT_MSR
, 1);
2884 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
2886 switch ((uint32_t)ECX
) {
2887 case MSR_IA32_SYSENTER_CS
:
2888 env
->sysenter_cs
= val
& 0xffff;
2890 case MSR_IA32_SYSENTER_ESP
:
2891 env
->sysenter_esp
= val
;
2893 case MSR_IA32_SYSENTER_EIP
:
2894 env
->sysenter_eip
= val
;
2896 case MSR_IA32_APICBASE
:
2897 cpu_set_apic_base(env
->apic_state
, val
);
2901 uint64_t update_mask
;
2904 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
) {
2905 update_mask
|= MSR_EFER_SCE
;
2907 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
) {
2908 update_mask
|= MSR_EFER_LME
;
2910 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
) {
2911 update_mask
|= MSR_EFER_FFXSR
;
2913 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
) {
2914 update_mask
|= MSR_EFER_NXE
;
2916 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
) {
2917 update_mask
|= MSR_EFER_SVME
;
2919 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
) {
2920 update_mask
|= MSR_EFER_FFXSR
;
2922 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
2923 (val
& update_mask
));
2932 case MSR_VM_HSAVE_PA
:
2933 env
->vm_hsave
= val
;
2935 #ifdef TARGET_X86_64
2946 env
->segs
[R_FS
].base
= val
;
2949 env
->segs
[R_GS
].base
= val
;
2951 case MSR_KERNELGSBASE
:
2952 env
->kernelgsbase
= val
;
2955 case MSR_MTRRphysBase(0):
2956 case MSR_MTRRphysBase(1):
2957 case MSR_MTRRphysBase(2):
2958 case MSR_MTRRphysBase(3):
2959 case MSR_MTRRphysBase(4):
2960 case MSR_MTRRphysBase(5):
2961 case MSR_MTRRphysBase(6):
2962 case MSR_MTRRphysBase(7):
2963 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
2965 case MSR_MTRRphysMask(0):
2966 case MSR_MTRRphysMask(1):
2967 case MSR_MTRRphysMask(2):
2968 case MSR_MTRRphysMask(3):
2969 case MSR_MTRRphysMask(4):
2970 case MSR_MTRRphysMask(5):
2971 case MSR_MTRRphysMask(6):
2972 case MSR_MTRRphysMask(7):
2973 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
2975 case MSR_MTRRfix64K_00000
:
2976 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
2978 case MSR_MTRRfix16K_80000
:
2979 case MSR_MTRRfix16K_A0000
:
2980 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
2982 case MSR_MTRRfix4K_C0000
:
2983 case MSR_MTRRfix4K_C8000
:
2984 case MSR_MTRRfix4K_D0000
:
2985 case MSR_MTRRfix4K_D8000
:
2986 case MSR_MTRRfix4K_E0000
:
2987 case MSR_MTRRfix4K_E8000
:
2988 case MSR_MTRRfix4K_F0000
:
2989 case MSR_MTRRfix4K_F8000
:
2990 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
2992 case MSR_MTRRdefType
:
2993 env
->mtrr_deftype
= val
;
2995 case MSR_MCG_STATUS
:
2996 env
->mcg_status
= val
;
2999 if ((env
->mcg_cap
& MCG_CTL_P
)
3000 && (val
== 0 || val
== ~(uint64_t)0)) {
3007 case MSR_IA32_MISC_ENABLE
:
3008 env
->msr_ia32_misc_enable
= val
;
3011 if ((uint32_t)ECX
>= MSR_MC0_CTL
3012 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3013 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3014 if ((offset
& 0x3) != 0
3015 || (val
== 0 || val
== ~(uint64_t)0)) {
3016 env
->mce_banks
[offset
] = val
;
3020 /* XXX: exception? */
3025 void helper_rdmsr(void)
3029 cpu_svm_check_intercept_param(env
, SVM_EXIT_MSR
, 0);
3031 switch ((uint32_t)ECX
) {
3032 case MSR_IA32_SYSENTER_CS
:
3033 val
= env
->sysenter_cs
;
3035 case MSR_IA32_SYSENTER_ESP
:
3036 val
= env
->sysenter_esp
;
3038 case MSR_IA32_SYSENTER_EIP
:
3039 val
= env
->sysenter_eip
;
3041 case MSR_IA32_APICBASE
:
3042 val
= cpu_get_apic_base(env
->apic_state
);
3053 case MSR_VM_HSAVE_PA
:
3054 val
= env
->vm_hsave
;
3056 case MSR_IA32_PERF_STATUS
:
3057 /* tsc_increment_by_tick */
3059 /* CPU multiplier */
3060 val
|= (((uint64_t)4ULL) << 40);
3062 #ifdef TARGET_X86_64
3073 val
= env
->segs
[R_FS
].base
;
3076 val
= env
->segs
[R_GS
].base
;
3078 case MSR_KERNELGSBASE
:
3079 val
= env
->kernelgsbase
;
3085 case MSR_MTRRphysBase(0):
3086 case MSR_MTRRphysBase(1):
3087 case MSR_MTRRphysBase(2):
3088 case MSR_MTRRphysBase(3):
3089 case MSR_MTRRphysBase(4):
3090 case MSR_MTRRphysBase(5):
3091 case MSR_MTRRphysBase(6):
3092 case MSR_MTRRphysBase(7):
3093 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3095 case MSR_MTRRphysMask(0):
3096 case MSR_MTRRphysMask(1):
3097 case MSR_MTRRphysMask(2):
3098 case MSR_MTRRphysMask(3):
3099 case MSR_MTRRphysMask(4):
3100 case MSR_MTRRphysMask(5):
3101 case MSR_MTRRphysMask(6):
3102 case MSR_MTRRphysMask(7):
3103 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3105 case MSR_MTRRfix64K_00000
:
3106 val
= env
->mtrr_fixed
[0];
3108 case MSR_MTRRfix16K_80000
:
3109 case MSR_MTRRfix16K_A0000
:
3110 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3112 case MSR_MTRRfix4K_C0000
:
3113 case MSR_MTRRfix4K_C8000
:
3114 case MSR_MTRRfix4K_D0000
:
3115 case MSR_MTRRfix4K_D8000
:
3116 case MSR_MTRRfix4K_E0000
:
3117 case MSR_MTRRfix4K_E8000
:
3118 case MSR_MTRRfix4K_F0000
:
3119 case MSR_MTRRfix4K_F8000
:
3120 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3122 case MSR_MTRRdefType
:
3123 val
= env
->mtrr_deftype
;
3126 if (env
->cpuid_features
& CPUID_MTRR
) {
3127 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
|
3128 MSR_MTRRcap_WC_SUPPORTED
;
3130 /* XXX: exception? */
3138 if (env
->mcg_cap
& MCG_CTL_P
) {
3144 case MSR_MCG_STATUS
:
3145 val
= env
->mcg_status
;
3147 case MSR_IA32_MISC_ENABLE
:
3148 val
= env
->msr_ia32_misc_enable
;
3151 if ((uint32_t)ECX
>= MSR_MC0_CTL
3152 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3153 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3154 val
= env
->mce_banks
[offset
];
3157 /* XXX: exception? */
3161 EAX
= (uint32_t)(val
);
3162 EDX
= (uint32_t)(val
>> 32);
3166 target_ulong
helper_lsl(target_ulong selector1
)
3169 uint32_t e1
, e2
, eflags
, selector
;
3170 int rpl
, dpl
, cpl
, type
;
3172 selector
= selector1
& 0xffff;
3173 eflags
= helper_cc_compute_all(CC_OP
);
3174 if ((selector
& 0xfffc) == 0) {
3177 if (load_segment(&e1
, &e2
, selector
) != 0) {
3181 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3182 cpl
= env
->hflags
& HF_CPL_MASK
;
3183 if (e2
& DESC_S_MASK
) {
3184 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3187 if (dpl
< cpl
|| dpl
< rpl
) {
3192 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3203 if (dpl
< cpl
|| dpl
< rpl
) {
3205 CC_SRC
= eflags
& ~CC_Z
;
3209 limit
= get_seg_limit(e1
, e2
);
3210 CC_SRC
= eflags
| CC_Z
;
3214 target_ulong
helper_lar(target_ulong selector1
)
3216 uint32_t e1
, e2
, eflags
, selector
;
3217 int rpl
, dpl
, cpl
, type
;
3219 selector
= selector1
& 0xffff;
3220 eflags
= helper_cc_compute_all(CC_OP
);
3221 if ((selector
& 0xfffc) == 0) {
3224 if (load_segment(&e1
, &e2
, selector
) != 0) {
3228 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3229 cpl
= env
->hflags
& HF_CPL_MASK
;
3230 if (e2
& DESC_S_MASK
) {
3231 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3234 if (dpl
< cpl
|| dpl
< rpl
) {
3239 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3253 if (dpl
< cpl
|| dpl
< rpl
) {
3255 CC_SRC
= eflags
& ~CC_Z
;
3259 CC_SRC
= eflags
| CC_Z
;
3260 return e2
& 0x00f0ff00;
3263 void helper_verr(target_ulong selector1
)
3265 uint32_t e1
, e2
, eflags
, selector
;
3268 selector
= selector1
& 0xffff;
3269 eflags
= helper_cc_compute_all(CC_OP
);
3270 if ((selector
& 0xfffc) == 0) {
3273 if (load_segment(&e1
, &e2
, selector
) != 0) {
3276 if (!(e2
& DESC_S_MASK
)) {
3280 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3281 cpl
= env
->hflags
& HF_CPL_MASK
;
3282 if (e2
& DESC_CS_MASK
) {
3283 if (!(e2
& DESC_R_MASK
)) {
3286 if (!(e2
& DESC_C_MASK
)) {
3287 if (dpl
< cpl
|| dpl
< rpl
) {
3292 if (dpl
< cpl
|| dpl
< rpl
) {
3294 CC_SRC
= eflags
& ~CC_Z
;
3298 CC_SRC
= eflags
| CC_Z
;
3301 void helper_verw(target_ulong selector1
)
3303 uint32_t e1
, e2
, eflags
, selector
;
3306 selector
= selector1
& 0xffff;
3307 eflags
= helper_cc_compute_all(CC_OP
);
3308 if ((selector
& 0xfffc) == 0) {
3311 if (load_segment(&e1
, &e2
, selector
) != 0) {
3314 if (!(e2
& DESC_S_MASK
)) {
3318 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3319 cpl
= env
->hflags
& HF_CPL_MASK
;
3320 if (e2
& DESC_CS_MASK
) {
3323 if (dpl
< cpl
|| dpl
< rpl
) {
3326 if (!(e2
& DESC_W_MASK
)) {
3328 CC_SRC
= eflags
& ~CC_Z
;
3332 CC_SRC
= eflags
| CC_Z
;
3335 #if defined(CONFIG_USER_ONLY)
3336 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
3338 CPUX86State
*saved_env
;
3342 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
3344 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
3345 (selector
<< 4), 0xffff, 0);
3347 helper_load_seg(seg_reg
, selector
);
3353 static void do_hlt(void)
3355 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
3357 env
->exception_index
= EXCP_HLT
;
3361 void helper_hlt(int next_eip_addend
)
3363 cpu_svm_check_intercept_param(env
, SVM_EXIT_HLT
, 0);
3364 EIP
+= next_eip_addend
;
3369 void helper_monitor(target_ulong ptr
)
3371 if ((uint32_t)ECX
!= 0) {
3372 raise_exception(env
, EXCP0D_GPF
);
3374 /* XXX: store address? */
3375 cpu_svm_check_intercept_param(env
, SVM_EXIT_MONITOR
, 0);
3378 void helper_mwait(int next_eip_addend
)
3380 if ((uint32_t)ECX
!= 0) {
3381 raise_exception(env
, EXCP0D_GPF
);
3383 cpu_svm_check_intercept_param(env
, SVM_EXIT_MWAIT
, 0);
3384 EIP
+= next_eip_addend
;
3386 /* XXX: not complete but not completely erroneous */
3387 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
3388 /* more than one CPU: do not sleep because another CPU may
3395 void helper_debug(void)
3397 env
->exception_index
= EXCP_DEBUG
;
3401 void helper_boundw(target_ulong a0
, int v
)
3406 high
= ldsw(a0
+ 2);
3408 if (v
< low
|| v
> high
) {
3409 raise_exception(env
, EXCP05_BOUND
);
3413 void helper_boundl(target_ulong a0
, int v
)
3419 if (v
< low
|| v
> high
) {
3420 raise_exception(env
, EXCP05_BOUND
);
3424 #if !defined(CONFIG_USER_ONLY)
3426 #define MMUSUFFIX _mmu
3429 #include "softmmu_template.h"
3432 #include "softmmu_template.h"
3435 #include "softmmu_template.h"
3438 #include "softmmu_template.h"
3442 #if !defined(CONFIG_USER_ONLY)
3443 /* try to fill the TLB and return an exception if error. If retaddr is
3444 NULL, it means that the function was called in C code (i.e. not
3445 from generated code or from helper.c) */
3446 /* XXX: fix it to restore all registers */
3447 void tlb_fill(CPUX86State
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
3450 TranslationBlock
*tb
;
3452 CPUX86State
*saved_env
;
3457 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
3460 /* now we have a real cpu fault */
3461 tb
= tb_find_pc(retaddr
);
3463 /* the PC is inside the translated code. It means that we have
3464 a virtual CPU fault */
3465 cpu_restore_state(tb
, env
, retaddr
);
3468 raise_exception_err(env
, env
->exception_index
, env
->error_code
);