2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
37 #ifdef CONFIG_USER_ONLY
38 #define MEMSUFFIX _kernel
40 #include "exec/cpu_ldst_useronly_template.h"
43 #include "exec/cpu_ldst_useronly_template.h"
46 #include "exec/cpu_ldst_useronly_template.h"
49 #include "exec/cpu_ldst_useronly_template.h"
52 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53 #define MEMSUFFIX _kernel
55 #include "exec/cpu_ldst_template.h"
58 #include "exec/cpu_ldst_template.h"
61 #include "exec/cpu_ldst_template.h"
64 #include "exec/cpu_ldst_template.h"
69 /* return non zero if error */
70 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
71 uint32_t *e2_ptr
, int selector
,
83 index
= selector
& ~7;
84 if ((index
+ 7) > dt
->limit
) {
87 ptr
= dt
->base
+ index
;
88 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
89 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
93 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
94 uint32_t *e2_ptr
, int selector
)
96 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
99 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
103 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
104 if (e2
& DESC_G_MASK
) {
105 limit
= (limit
<< 12) | 0xfff;
110 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
112 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
115 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
118 sc
->base
= get_seg_base(e1
, e2
);
119 sc
->limit
= get_seg_limit(e1
, e2
);
123 /* init the segment cache in vm86 mode. */
124 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
128 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
129 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
130 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
133 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
134 uint32_t *esp_ptr
, int dpl
,
137 X86CPU
*cpu
= x86_env_get_cpu(env
);
138 int type
, index
, shift
;
143 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
144 for (i
= 0; i
< env
->tr
.limit
; i
++) {
145 printf("%02x ", env
->tr
.base
[i
]);
154 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
155 cpu_abort(CPU(cpu
), "invalid tss");
157 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
158 if ((type
& 7) != 1) {
159 cpu_abort(CPU(cpu
), "invalid tss type");
162 index
= (dpl
* 4 + 2) << shift
;
163 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
164 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
167 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
168 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
170 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
171 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
175 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
,
181 if ((selector
& 0xfffc) != 0) {
182 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
183 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
185 if (!(e2
& DESC_S_MASK
)) {
186 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
189 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
190 if (seg_reg
== R_CS
) {
191 if (!(e2
& DESC_CS_MASK
)) {
192 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
195 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
197 } else if (seg_reg
== R_SS
) {
198 /* SS must be writable data */
199 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
200 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
202 if (dpl
!= cpl
|| dpl
!= rpl
) {
203 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
206 /* not readable code */
207 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
208 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
210 /* if data or non conforming code, checks the rights */
211 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
212 if (dpl
< cpl
|| dpl
< rpl
) {
213 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
217 if (!(e2
& DESC_P_MASK
)) {
218 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
220 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
221 get_seg_base(e1
, e2
),
222 get_seg_limit(e1
, e2
),
225 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
226 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
231 #define SWITCH_TSS_JMP 0
232 #define SWITCH_TSS_IRET 1
233 #define SWITCH_TSS_CALL 2
235 /* XXX: restore CPU state in registers (PowerPC case) */
236 static void switch_tss_ra(CPUX86State
*env
, int tss_selector
,
237 uint32_t e1
, uint32_t e2
, int source
,
238 uint32_t next_eip
, uintptr_t retaddr
)
240 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
241 target_ulong tss_base
;
242 uint32_t new_regs
[8], new_segs
[6];
243 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
244 uint32_t old_eflags
, eflags_mask
;
249 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
250 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
253 /* if task gate, we read the TSS segment and we load it */
255 if (!(e2
& DESC_P_MASK
)) {
256 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
258 tss_selector
= e1
>> 16;
259 if (tss_selector
& 4) {
260 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
262 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
263 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
265 if (e2
& DESC_S_MASK
) {
266 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
268 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
269 if ((type
& 7) != 1) {
270 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
274 if (!(e2
& DESC_P_MASK
)) {
275 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
283 tss_limit
= get_seg_limit(e1
, e2
);
284 tss_base
= get_seg_base(e1
, e2
);
285 if ((tss_selector
& 4) != 0 ||
286 tss_limit
< tss_limit_max
) {
287 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
289 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
291 old_tss_limit_max
= 103;
293 old_tss_limit_max
= 43;
296 /* read all the registers from the new TSS */
299 new_cr3
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x1c, retaddr
);
300 new_eip
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x20, retaddr
);
301 new_eflags
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x24, retaddr
);
302 for (i
= 0; i
< 8; i
++) {
303 new_regs
[i
] = cpu_ldl_kernel_ra(env
, tss_base
+ (0x28 + i
* 4),
306 for (i
= 0; i
< 6; i
++) {
307 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x48 + i
* 4),
310 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x60, retaddr
);
311 new_trap
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x64, retaddr
);
315 new_eip
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x0e, retaddr
);
316 new_eflags
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x10, retaddr
);
317 for (i
= 0; i
< 8; i
++) {
318 new_regs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x12 + i
* 2),
319 retaddr
) | 0xffff0000;
321 for (i
= 0; i
< 4; i
++) {
322 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x22 + i
* 4),
325 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x2a, retaddr
);
330 /* XXX: avoid a compiler warning, see
331 http://support.amd.com/us/Processor_TechDocs/24593.pdf
332 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
335 /* NOTE: we must avoid memory exceptions during the task switch,
336 so we make dummy accesses before */
337 /* XXX: it can still fail in some cases, so a bigger hack is
338 necessary to valid the TLB after having done the accesses */
340 v1
= cpu_ldub_kernel_ra(env
, env
->tr
.base
, retaddr
);
341 v2
= cpu_ldub_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, retaddr
);
342 cpu_stb_kernel_ra(env
, env
->tr
.base
, v1
, retaddr
);
343 cpu_stb_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, v2
, retaddr
);
345 /* clear busy bit (it is restartable) */
346 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
350 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
351 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
352 e2
&= ~DESC_TSS_BUSY_MASK
;
353 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
355 old_eflags
= cpu_compute_eflags(env
);
356 if (source
== SWITCH_TSS_IRET
) {
357 old_eflags
&= ~NT_MASK
;
360 /* save the current state in the old TSS */
363 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x20, next_eip
, retaddr
);
364 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x24, old_eflags
, retaddr
);
365 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
], retaddr
);
366 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
], retaddr
);
367 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
], retaddr
);
368 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
], retaddr
);
369 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
], retaddr
);
370 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
], retaddr
);
371 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
], retaddr
);
372 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
], retaddr
);
373 for (i
= 0; i
< 6; i
++) {
374 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x48 + i
* 4),
375 env
->segs
[i
].selector
, retaddr
);
379 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x0e, next_eip
, retaddr
);
380 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x10, old_eflags
, retaddr
);
381 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
], retaddr
);
382 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
], retaddr
);
383 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
], retaddr
);
384 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
], retaddr
);
385 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
], retaddr
);
386 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
], retaddr
);
387 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
], retaddr
);
388 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
], retaddr
);
389 for (i
= 0; i
< 4; i
++) {
390 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x22 + i
* 4),
391 env
->segs
[i
].selector
, retaddr
);
395 /* now if an exception occurs, it will occurs in the next task
398 if (source
== SWITCH_TSS_CALL
) {
399 cpu_stw_kernel_ra(env
, tss_base
, env
->tr
.selector
, retaddr
);
400 new_eflags
|= NT_MASK
;
404 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
408 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
409 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
410 e2
|= DESC_TSS_BUSY_MASK
;
411 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
414 /* set the new CPU state */
415 /* from this point, any exception which occurs can give problems */
416 env
->cr
[0] |= CR0_TS_MASK
;
417 env
->hflags
|= HF_TS_MASK
;
418 env
->tr
.selector
= tss_selector
;
419 env
->tr
.base
= tss_base
;
420 env
->tr
.limit
= tss_limit
;
421 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
423 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
424 cpu_x86_update_cr3(env
, new_cr3
);
427 /* load all registers without an exception, then reload them with
428 possible exception */
430 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
431 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
433 eflags_mask
&= 0xffff;
435 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
436 /* XXX: what to do in 16 bit case? */
437 env
->regs
[R_EAX
] = new_regs
[0];
438 env
->regs
[R_ECX
] = new_regs
[1];
439 env
->regs
[R_EDX
] = new_regs
[2];
440 env
->regs
[R_EBX
] = new_regs
[3];
441 env
->regs
[R_ESP
] = new_regs
[4];
442 env
->regs
[R_EBP
] = new_regs
[5];
443 env
->regs
[R_ESI
] = new_regs
[6];
444 env
->regs
[R_EDI
] = new_regs
[7];
445 if (new_eflags
& VM_MASK
) {
446 for (i
= 0; i
< 6; i
++) {
447 load_seg_vm(env
, i
, new_segs
[i
]);
450 /* first just selectors as the rest may trigger exceptions */
451 for (i
= 0; i
< 6; i
++) {
452 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
456 env
->ldt
.selector
= new_ldt
& ~4;
463 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
466 if ((new_ldt
& 0xfffc) != 0) {
468 index
= new_ldt
& ~7;
469 if ((index
+ 7) > dt
->limit
) {
470 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
472 ptr
= dt
->base
+ index
;
473 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
474 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
475 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
476 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
478 if (!(e2
& DESC_P_MASK
)) {
479 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
481 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
484 /* load the segments */
485 if (!(new_eflags
& VM_MASK
)) {
486 int cpl
= new_segs
[R_CS
] & 3;
487 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
488 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
489 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
490 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
491 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
492 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
495 /* check that env->eip is in the CS segment limits */
496 if (new_eip
> env
->segs
[R_CS
].limit
) {
497 /* XXX: different exception if CALL? */
498 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
501 #ifndef CONFIG_USER_ONLY
502 /* reset local breakpoints */
503 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
504 cpu_x86_update_dr7(env
, env
->dr
[7] & ~DR7_LOCAL_BP_MASK
);
509 static void switch_tss(CPUX86State
*env
, int tss_selector
,
510 uint32_t e1
, uint32_t e2
, int source
,
513 switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
516 static inline unsigned int get_sp_mask(unsigned int e2
)
518 if (e2
& DESC_B_MASK
) {
525 static int exception_has_error_code(int intno
)
541 #define SET_ESP(val, sp_mask) \
543 if ((sp_mask) == 0xffff) { \
544 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
546 } else if ((sp_mask) == 0xffffffffLL) { \
547 env->regs[R_ESP] = (uint32_t)(val); \
549 env->regs[R_ESP] = (val); \
553 #define SET_ESP(val, sp_mask) \
555 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
556 ((val) & (sp_mask)); \
560 /* in 64-bit machines, this can overflow. So this segment addition macro
561 * can be used to trim the value to 32-bit whenever needed */
562 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
564 /* XXX: add a is_user flag to have proper security support */
565 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
568 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
571 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
574 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
577 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
579 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
583 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
585 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
589 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
590 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
591 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
592 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
594 /* protected mode interrupt */
595 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
596 int error_code
, unsigned int next_eip
,
600 target_ulong ptr
, ssp
;
601 int type
, dpl
, selector
, ss_dpl
, cpl
;
602 int has_error_code
, new_stack
, shift
;
603 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
604 uint32_t old_eip
, sp_mask
;
605 int vm86
= env
->eflags
& VM_MASK
;
608 if (!is_int
&& !is_hw
) {
609 has_error_code
= exception_has_error_code(intno
);
618 if (intno
* 8 + 7 > dt
->limit
) {
619 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
621 ptr
= dt
->base
+ intno
* 8;
622 e1
= cpu_ldl_kernel(env
, ptr
);
623 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
624 /* check gate type */
625 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
627 case 5: /* task gate */
628 /* must do that check here to return the correct error code */
629 if (!(e2
& DESC_P_MASK
)) {
630 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
632 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
633 if (has_error_code
) {
637 /* push the error code */
638 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
640 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
645 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
646 ssp
= env
->segs
[R_SS
].base
+ esp
;
648 cpu_stl_kernel(env
, ssp
, error_code
);
650 cpu_stw_kernel(env
, ssp
, error_code
);
655 case 6: /* 286 interrupt gate */
656 case 7: /* 286 trap gate */
657 case 14: /* 386 interrupt gate */
658 case 15: /* 386 trap gate */
661 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
664 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
665 cpl
= env
->hflags
& HF_CPL_MASK
;
666 /* check privilege if software int */
667 if (is_int
&& dpl
< cpl
) {
668 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
670 /* check valid bit */
671 if (!(e2
& DESC_P_MASK
)) {
672 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
675 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
676 if ((selector
& 0xfffc) == 0) {
677 raise_exception_err(env
, EXCP0D_GPF
, 0);
679 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
680 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
682 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
683 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
685 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
687 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
689 if (!(e2
& DESC_P_MASK
)) {
690 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
692 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
693 /* to inner privilege */
694 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
695 if ((ss
& 0xfffc) == 0) {
696 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
698 if ((ss
& 3) != dpl
) {
699 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
701 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
702 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
704 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
706 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
708 if (!(ss_e2
& DESC_S_MASK
) ||
709 (ss_e2
& DESC_CS_MASK
) ||
710 !(ss_e2
& DESC_W_MASK
)) {
711 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
713 if (!(ss_e2
& DESC_P_MASK
)) {
714 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
717 sp_mask
= get_sp_mask(ss_e2
);
718 ssp
= get_seg_base(ss_e1
, ss_e2
);
719 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
720 /* to same privilege */
722 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
725 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
726 ssp
= env
->segs
[R_SS
].base
;
727 esp
= env
->regs
[R_ESP
];
730 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
731 new_stack
= 0; /* avoid warning */
732 sp_mask
= 0; /* avoid warning */
733 ssp
= 0; /* avoid warning */
734 esp
= 0; /* avoid warning */
740 /* XXX: check that enough room is available */
741 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
750 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
751 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
752 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
753 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
755 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
756 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
758 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
759 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
760 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
761 if (has_error_code
) {
762 PUSHL(ssp
, esp
, sp_mask
, error_code
);
767 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
768 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
769 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
770 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
772 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
773 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
775 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
776 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
777 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
778 if (has_error_code
) {
779 PUSHW(ssp
, esp
, sp_mask
, error_code
);
783 /* interrupt gate clear IF mask */
784 if ((type
& 1) == 0) {
785 env
->eflags
&= ~IF_MASK
;
787 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
791 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
792 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
793 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
796 ss
= (ss
& ~3) | dpl
;
797 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
798 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
800 SET_ESP(esp
, sp_mask
);
802 selector
= (selector
& ~3) | dpl
;
803 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
804 get_seg_base(e1
, e2
),
805 get_seg_limit(e1
, e2
),
812 #define PUSHQ_RA(sp, val, ra) \
815 cpu_stq_kernel_ra(env, sp, (val), ra); \
818 #define POPQ_RA(sp, val, ra) \
820 val = cpu_ldq_kernel_ra(env, sp, ra); \
824 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
825 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
827 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
829 X86CPU
*cpu
= x86_env_get_cpu(env
);
833 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
834 env
->tr
.base
, env
->tr
.limit
);
837 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
838 cpu_abort(CPU(cpu
), "invalid tss");
840 index
= 8 * level
+ 4;
841 if ((index
+ 7) > env
->tr
.limit
) {
842 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
844 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
847 /* 64 bit interrupt */
848 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
849 int error_code
, target_ulong next_eip
, int is_hw
)
853 int type
, dpl
, selector
, cpl
, ist
;
854 int has_error_code
, new_stack
;
855 uint32_t e1
, e2
, e3
, ss
;
856 target_ulong old_eip
, esp
, offset
;
859 if (!is_int
&& !is_hw
) {
860 has_error_code
= exception_has_error_code(intno
);
869 if (intno
* 16 + 15 > dt
->limit
) {
870 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
872 ptr
= dt
->base
+ intno
* 16;
873 e1
= cpu_ldl_kernel(env
, ptr
);
874 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
875 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
876 /* check gate type */
877 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
879 case 14: /* 386 interrupt gate */
880 case 15: /* 386 trap gate */
883 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
886 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
887 cpl
= env
->hflags
& HF_CPL_MASK
;
888 /* check privilege if software int */
889 if (is_int
&& dpl
< cpl
) {
890 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
892 /* check valid bit */
893 if (!(e2
& DESC_P_MASK
)) {
894 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
897 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
899 if ((selector
& 0xfffc) == 0) {
900 raise_exception_err(env
, EXCP0D_GPF
, 0);
903 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
904 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
906 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
907 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
909 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
911 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
913 if (!(e2
& DESC_P_MASK
)) {
914 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
916 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
917 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
919 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
920 /* to inner privilege */
922 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
924 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
925 /* to same privilege */
926 if (env
->eflags
& VM_MASK
) {
927 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
930 esp
= env
->regs
[R_ESP
];
933 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
934 new_stack
= 0; /* avoid warning */
935 esp
= 0; /* avoid warning */
937 esp
&= ~0xfLL
; /* align stack */
939 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
940 PUSHQ(esp
, env
->regs
[R_ESP
]);
941 PUSHQ(esp
, cpu_compute_eflags(env
));
942 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
944 if (has_error_code
) {
945 PUSHQ(esp
, error_code
);
948 /* interrupt gate clear IF mask */
949 if ((type
& 1) == 0) {
950 env
->eflags
&= ~IF_MASK
;
952 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
956 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
958 env
->regs
[R_ESP
] = esp
;
960 selector
= (selector
& ~3) | dpl
;
961 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
962 get_seg_base(e1
, e2
),
963 get_seg_limit(e1
, e2
),
970 #if defined(CONFIG_USER_ONLY)
971 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
973 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
975 cs
->exception_index
= EXCP_SYSCALL
;
976 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
980 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
984 if (!(env
->efer
& MSR_EFER_SCE
)) {
985 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
987 selector
= (env
->star
>> 32) & 0xffff;
988 if (env
->hflags
& HF_LMA_MASK
) {
991 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
992 env
->regs
[11] = cpu_compute_eflags(env
);
994 code64
= env
->hflags
& HF_CS64_MASK
;
996 env
->eflags
&= ~env
->fmask
;
997 cpu_load_eflags(env
, env
->eflags
, 0);
998 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1000 DESC_G_MASK
| DESC_P_MASK
|
1002 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1004 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1006 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1008 DESC_W_MASK
| DESC_A_MASK
);
1010 env
->eip
= env
->lstar
;
1012 env
->eip
= env
->cstar
;
1015 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
1017 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1018 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1020 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1022 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1023 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1025 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1027 DESC_W_MASK
| DESC_A_MASK
);
1028 env
->eip
= (uint32_t)env
->star
;
1034 #ifdef TARGET_X86_64
1035 void helper_sysret(CPUX86State
*env
, int dflag
)
1039 if (!(env
->efer
& MSR_EFER_SCE
)) {
1040 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
1042 cpl
= env
->hflags
& HF_CPL_MASK
;
1043 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1044 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1046 selector
= (env
->star
>> 48) & 0xffff;
1047 if (env
->hflags
& HF_LMA_MASK
) {
1048 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1049 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1052 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1054 DESC_G_MASK
| DESC_P_MASK
|
1055 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1056 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1058 env
->eip
= env
->regs
[R_ECX
];
1060 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1062 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1063 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1064 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1065 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1067 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1069 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1070 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1071 DESC_W_MASK
| DESC_A_MASK
);
1073 env
->eflags
|= IF_MASK
;
1074 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1076 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1077 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1078 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1079 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1080 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1082 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1083 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1084 DESC_W_MASK
| DESC_A_MASK
);
1089 /* real mode interrupt */
1090 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1091 int error_code
, unsigned int next_eip
)
1094 target_ulong ptr
, ssp
;
1096 uint32_t offset
, esp
;
1097 uint32_t old_cs
, old_eip
;
1099 /* real mode (simpler!) */
1101 if (intno
* 4 + 3 > dt
->limit
) {
1102 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1104 ptr
= dt
->base
+ intno
* 4;
1105 offset
= cpu_lduw_kernel(env
, ptr
);
1106 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1107 esp
= env
->regs
[R_ESP
];
1108 ssp
= env
->segs
[R_SS
].base
;
1114 old_cs
= env
->segs
[R_CS
].selector
;
1115 /* XXX: use SS segment size? */
1116 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1117 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1118 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1120 /* update processor state */
1121 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1123 env
->segs
[R_CS
].selector
= selector
;
1124 env
->segs
[R_CS
].base
= (selector
<< 4);
1125 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1128 #if defined(CONFIG_USER_ONLY)
1129 /* fake user mode interrupt */
1130 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1131 int error_code
, target_ulong next_eip
)
1135 int dpl
, cpl
, shift
;
1139 if (env
->hflags
& HF_LMA_MASK
) {
1144 ptr
= dt
->base
+ (intno
<< shift
);
1145 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1147 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1148 cpl
= env
->hflags
& HF_CPL_MASK
;
1149 /* check privilege if software int */
1150 if (is_int
&& dpl
< cpl
) {
1151 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1154 /* Since we emulate only user space, we cannot do more than
1155 exiting the emulation with the suitable exception and error
1156 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1157 if (is_int
|| intno
== EXCP_SYSCALL
) {
1158 env
->eip
= next_eip
;
1164 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1165 int error_code
, int is_hw
, int rm
)
1167 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1168 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1169 control
.event_inj
));
1171 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1175 type
= SVM_EVTINJ_TYPE_SOFT
;
1177 type
= SVM_EVTINJ_TYPE_EXEPT
;
1179 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1180 if (!rm
&& exception_has_error_code(intno
)) {
1181 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1182 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1183 control
.event_inj_err
),
1187 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1194 * Begin execution of an interruption. is_int is TRUE if coming from
1195 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1196 * instruction. It is only relevant if is_int is TRUE.
1198 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1199 int error_code
, target_ulong next_eip
, int is_hw
)
1201 CPUX86State
*env
= &cpu
->env
;
1203 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1204 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1207 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1208 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1209 count
, intno
, error_code
, is_int
,
1210 env
->hflags
& HF_CPL_MASK
,
1211 env
->segs
[R_CS
].selector
, env
->eip
,
1212 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1213 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1214 if (intno
== 0x0e) {
1215 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1217 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1220 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1227 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1228 for (i
= 0; i
< 16; i
++) {
1229 qemu_log(" %02x", ldub(ptr
+ i
));
1237 if (env
->cr
[0] & CR0_PE_MASK
) {
1238 #if !defined(CONFIG_USER_ONLY)
1239 if (env
->hflags
& HF_SVMI_MASK
) {
1240 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1243 #ifdef TARGET_X86_64
1244 if (env
->hflags
& HF_LMA_MASK
) {
1245 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1249 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1253 #if !defined(CONFIG_USER_ONLY)
1254 if (env
->hflags
& HF_SVMI_MASK
) {
1255 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1258 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1261 #if !defined(CONFIG_USER_ONLY)
1262 if (env
->hflags
& HF_SVMI_MASK
) {
1263 CPUState
*cs
= CPU(cpu
);
1264 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1265 offsetof(struct vmcb
,
1266 control
.event_inj
));
1269 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1270 event_inj
& ~SVM_EVTINJ_VALID
);
1275 void x86_cpu_do_interrupt(CPUState
*cs
)
1277 X86CPU
*cpu
= X86_CPU(cs
);
1278 CPUX86State
*env
= &cpu
->env
;
1280 #if defined(CONFIG_USER_ONLY)
1281 /* if user mode only, we simulate a fake exception
1282 which will be handled outside the cpu execution
1284 do_interrupt_user(env
, cs
->exception_index
,
1285 env
->exception_is_int
,
1287 env
->exception_next_eip
);
1288 /* successfully delivered */
1289 env
->old_exception
= -1;
1291 /* simulate a real cpu exception. On i386, it can
1292 trigger new exceptions, but we do not handle
1293 double or triple faults yet. */
1294 do_interrupt_all(cpu
, cs
->exception_index
,
1295 env
->exception_is_int
,
1297 env
->exception_next_eip
, 0);
1298 /* successfully delivered */
1299 env
->old_exception
= -1;
1303 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1305 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1308 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1310 X86CPU
*cpu
= X86_CPU(cs
);
1311 CPUX86State
*env
= &cpu
->env
;
1314 #if !defined(CONFIG_USER_ONLY)
1315 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
1316 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1317 apic_poll_irq(cpu
->apic_state
);
1318 /* Don't process multiple interrupt requests in a single call.
1319 This is required to make icount-driven execution deterministic. */
1323 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
1325 } else if (env
->hflags2
& HF2_GIF_MASK
) {
1326 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
1327 !(env
->hflags
& HF_SMM_MASK
)) {
1328 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0);
1329 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1332 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
1333 !(env
->hflags2
& HF2_NMI_MASK
)) {
1334 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1335 env
->hflags2
|= HF2_NMI_MASK
;
1336 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1338 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
1339 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1340 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1342 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
1343 (((env
->hflags2
& HF2_VINTR_MASK
) &&
1344 (env
->hflags2
& HF2_HIF_MASK
)) ||
1345 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
1346 (env
->eflags
& IF_MASK
&&
1347 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
1349 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0);
1350 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1351 CPU_INTERRUPT_VIRQ
);
1352 intno
= cpu_get_pic_interrupt(env
);
1353 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1354 "Servicing hardware INT=0x%02x\n", intno
);
1355 do_interrupt_x86_hardirq(env
, intno
, 1);
1356 /* ensure that no TB jump will be modified as
1357 the program flow was changed */
1359 #if !defined(CONFIG_USER_ONLY)
1360 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
1361 (env
->eflags
& IF_MASK
) &&
1362 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
1364 /* FIXME: this should respect TPR */
1365 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0);
1366 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
1367 + offsetof(struct vmcb
, control
.int_vector
));
1368 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1369 "Servicing virtual hardware INT=0x%02x\n", intno
);
1370 do_interrupt_x86_hardirq(env
, intno
, 1);
1371 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1380 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1384 uint32_t esp_mask
, esp
, ebp
;
1386 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1387 ssp
= env
->segs
[R_SS
].base
;
1388 ebp
= env
->regs
[R_EBP
];
1389 esp
= env
->regs
[R_ESP
];
1396 cpu_stl_data_ra(env
, ssp
+ (esp
& esp_mask
),
1397 cpu_ldl_data_ra(env
, ssp
+ (ebp
& esp_mask
),
1402 cpu_stl_data_ra(env
, ssp
+ (esp
& esp_mask
), t1
, GETPC());
1409 cpu_stw_data_ra(env
, ssp
+ (esp
& esp_mask
),
1410 cpu_lduw_data_ra(env
, ssp
+ (ebp
& esp_mask
),
1415 cpu_stw_data_ra(env
, ssp
+ (esp
& esp_mask
), t1
, GETPC());
1419 #ifdef TARGET_X86_64
1420 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1423 target_ulong esp
, ebp
;
1425 ebp
= env
->regs
[R_EBP
];
1426 esp
= env
->regs
[R_ESP
];
1434 cpu_stq_data_ra(env
, esp
, cpu_ldq_data_ra(env
, ebp
, GETPC()),
1438 cpu_stq_data_ra(env
, esp
, t1
, GETPC());
1445 cpu_stw_data_ra(env
, esp
, cpu_lduw_data_ra(env
, ebp
, GETPC()),
1449 cpu_stw_data_ra(env
, esp
, t1
, GETPC());
1454 void helper_lldt(CPUX86State
*env
, int selector
)
1458 int index
, entry_limit
;
1462 if ((selector
& 0xfffc) == 0) {
1463 /* XXX: NULL selector case: invalid LDT */
1467 if (selector
& 0x4) {
1468 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1471 index
= selector
& ~7;
1472 #ifdef TARGET_X86_64
1473 if (env
->hflags
& HF_LMA_MASK
) {
1480 if ((index
+ entry_limit
) > dt
->limit
) {
1481 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1483 ptr
= dt
->base
+ index
;
1484 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1485 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1486 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1487 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1489 if (!(e2
& DESC_P_MASK
)) {
1490 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1492 #ifdef TARGET_X86_64
1493 if (env
->hflags
& HF_LMA_MASK
) {
1496 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1497 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1498 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1502 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1505 env
->ldt
.selector
= selector
;
1508 void helper_ltr(CPUX86State
*env
, int selector
)
1512 int index
, type
, entry_limit
;
1516 if ((selector
& 0xfffc) == 0) {
1517 /* NULL selector case: invalid TR */
1522 if (selector
& 0x4) {
1523 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1526 index
= selector
& ~7;
1527 #ifdef TARGET_X86_64
1528 if (env
->hflags
& HF_LMA_MASK
) {
1535 if ((index
+ entry_limit
) > dt
->limit
) {
1536 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1538 ptr
= dt
->base
+ index
;
1539 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1540 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1541 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1542 if ((e2
& DESC_S_MASK
) ||
1543 (type
!= 1 && type
!= 9)) {
1544 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1546 if (!(e2
& DESC_P_MASK
)) {
1547 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1549 #ifdef TARGET_X86_64
1550 if (env
->hflags
& HF_LMA_MASK
) {
1553 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1554 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1555 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1556 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1558 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1559 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1563 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1565 e2
|= DESC_TSS_BUSY_MASK
;
1566 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1568 env
->tr
.selector
= selector
;
1571 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1572 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1581 cpl
= env
->hflags
& HF_CPL_MASK
;
1582 if ((selector
& 0xfffc) == 0) {
1583 /* null selector case */
1585 #ifdef TARGET_X86_64
1586 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1589 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1591 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1594 if (selector
& 0x4) {
1599 index
= selector
& ~7;
1600 if ((index
+ 7) > dt
->limit
) {
1601 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1603 ptr
= dt
->base
+ index
;
1604 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1605 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1607 if (!(e2
& DESC_S_MASK
)) {
1608 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1611 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1612 if (seg_reg
== R_SS
) {
1613 /* must be writable segment */
1614 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1615 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1617 if (rpl
!= cpl
|| dpl
!= cpl
) {
1618 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1621 /* must be readable segment */
1622 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1623 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1626 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1627 /* if not conforming code, test rights */
1628 if (dpl
< cpl
|| dpl
< rpl
) {
1629 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1634 if (!(e2
& DESC_P_MASK
)) {
1635 if (seg_reg
== R_SS
) {
1636 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1638 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1642 /* set the access bit if not already set */
1643 if (!(e2
& DESC_A_MASK
)) {
1645 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1648 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1649 get_seg_base(e1
, e2
),
1650 get_seg_limit(e1
, e2
),
1653 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1654 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1659 /* protected mode jump */
1660 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1661 target_ulong next_eip
)
1664 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1666 if ((new_cs
& 0xfffc) == 0) {
1667 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1669 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1670 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1672 cpl
= env
->hflags
& HF_CPL_MASK
;
1673 if (e2
& DESC_S_MASK
) {
1674 if (!(e2
& DESC_CS_MASK
)) {
1675 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1677 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1678 if (e2
& DESC_C_MASK
) {
1679 /* conforming code segment */
1681 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1684 /* non conforming code segment */
1687 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1690 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1693 if (!(e2
& DESC_P_MASK
)) {
1694 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1696 limit
= get_seg_limit(e1
, e2
);
1697 if (new_eip
> limit
&&
1698 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1699 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1701 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1702 get_seg_base(e1
, e2
), limit
, e2
);
1705 /* jump to call or task gate */
1706 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1708 cpl
= env
->hflags
& HF_CPL_MASK
;
1709 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1711 case 1: /* 286 TSS */
1712 case 9: /* 386 TSS */
1713 case 5: /* task gate */
1714 if (dpl
< cpl
|| dpl
< rpl
) {
1715 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1717 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1719 case 4: /* 286 call gate */
1720 case 12: /* 386 call gate */
1721 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1722 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1724 if (!(e2
& DESC_P_MASK
)) {
1725 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1728 new_eip
= (e1
& 0xffff);
1730 new_eip
|= (e2
& 0xffff0000);
1732 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1733 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1735 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1736 /* must be code segment */
1737 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1738 (DESC_S_MASK
| DESC_CS_MASK
))) {
1739 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1741 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1742 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1743 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1745 if (!(e2
& DESC_P_MASK
)) {
1746 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1748 limit
= get_seg_limit(e1
, e2
);
1749 if (new_eip
> limit
) {
1750 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1752 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1753 get_seg_base(e1
, e2
), limit
, e2
);
1757 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1763 /* real mode call */
1764 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1765 int shift
, int next_eip
)
1768 uint32_t esp
, esp_mask
;
1772 esp
= env
->regs
[R_ESP
];
1773 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1774 ssp
= env
->segs
[R_SS
].base
;
1776 PUSHL_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1777 PUSHL_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1779 PUSHW_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1780 PUSHW_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1783 SET_ESP(esp
, esp_mask
);
1785 env
->segs
[R_CS
].selector
= new_cs
;
1786 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1789 /* protected mode call */
1790 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1791 int shift
, target_ulong next_eip
)
1794 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1795 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1796 uint32_t val
, limit
, old_sp_mask
;
1797 target_ulong ssp
, old_ssp
;
1799 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1800 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1801 if ((new_cs
& 0xfffc) == 0) {
1802 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1804 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1805 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1807 cpl
= env
->hflags
& HF_CPL_MASK
;
1808 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1809 if (e2
& DESC_S_MASK
) {
1810 if (!(e2
& DESC_CS_MASK
)) {
1811 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1813 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1814 if (e2
& DESC_C_MASK
) {
1815 /* conforming code segment */
1817 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1820 /* non conforming code segment */
1823 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1826 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1829 if (!(e2
& DESC_P_MASK
)) {
1830 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1833 #ifdef TARGET_X86_64
1834 /* XXX: check 16/32 bit cases in long mode */
1839 rsp
= env
->regs
[R_ESP
];
1840 PUSHQ_RA(rsp
, env
->segs
[R_CS
].selector
, GETPC());
1841 PUSHQ_RA(rsp
, next_eip
, GETPC());
1842 /* from this point, not restartable */
1843 env
->regs
[R_ESP
] = rsp
;
1844 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1845 get_seg_base(e1
, e2
),
1846 get_seg_limit(e1
, e2
), e2
);
1851 sp
= env
->regs
[R_ESP
];
1852 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1853 ssp
= env
->segs
[R_SS
].base
;
1855 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1856 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1858 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1859 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1862 limit
= get_seg_limit(e1
, e2
);
1863 if (new_eip
> limit
) {
1864 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1866 /* from this point, not restartable */
1867 SET_ESP(sp
, sp_mask
);
1868 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1869 get_seg_base(e1
, e2
), limit
, e2
);
1873 /* check gate type */
1874 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1875 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1878 case 1: /* available 286 TSS */
1879 case 9: /* available 386 TSS */
1880 case 5: /* task gate */
1881 if (dpl
< cpl
|| dpl
< rpl
) {
1882 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1884 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1886 case 4: /* 286 call gate */
1887 case 12: /* 386 call gate */
1890 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1895 if (dpl
< cpl
|| dpl
< rpl
) {
1896 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1898 /* check valid bit */
1899 if (!(e2
& DESC_P_MASK
)) {
1900 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1902 selector
= e1
>> 16;
1903 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1904 param_count
= e2
& 0x1f;
1905 if ((selector
& 0xfffc) == 0) {
1906 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1909 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1910 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1912 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1913 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1915 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1917 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1919 if (!(e2
& DESC_P_MASK
)) {
1920 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1923 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1924 /* to inner privilege */
1925 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
, GETPC());
1926 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1927 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1929 if ((ss
& 0xfffc) == 0) {
1930 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1932 if ((ss
& 3) != dpl
) {
1933 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1935 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1936 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1938 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1939 if (ss_dpl
!= dpl
) {
1940 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1942 if (!(ss_e2
& DESC_S_MASK
) ||
1943 (ss_e2
& DESC_CS_MASK
) ||
1944 !(ss_e2
& DESC_W_MASK
)) {
1945 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1947 if (!(ss_e2
& DESC_P_MASK
)) {
1948 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1951 /* push_size = ((param_count * 2) + 8) << shift; */
1953 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1954 old_ssp
= env
->segs
[R_SS
].base
;
1956 sp_mask
= get_sp_mask(ss_e2
);
1957 ssp
= get_seg_base(ss_e1
, ss_e2
);
1959 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1960 PUSHL_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1961 for (i
= param_count
- 1; i
>= 0; i
--) {
1962 val
= cpu_ldl_kernel_ra(env
, old_ssp
+
1963 ((env
->regs
[R_ESP
] + i
* 4) &
1964 old_sp_mask
), GETPC());
1965 PUSHL_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1968 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1969 PUSHW_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1970 for (i
= param_count
- 1; i
>= 0; i
--) {
1971 val
= cpu_lduw_kernel_ra(env
, old_ssp
+
1972 ((env
->regs
[R_ESP
] + i
* 2) &
1973 old_sp_mask
), GETPC());
1974 PUSHW_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1979 /* to same privilege */
1980 sp
= env
->regs
[R_ESP
];
1981 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1982 ssp
= env
->segs
[R_SS
].base
;
1983 /* push_size = (4 << shift); */
1988 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1989 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1991 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1992 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1995 /* from this point, not restartable */
1998 ss
= (ss
& ~3) | dpl
;
1999 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2001 get_seg_limit(ss_e1
, ss_e2
),
2005 selector
= (selector
& ~3) | dpl
;
2006 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2007 get_seg_base(e1
, e2
),
2008 get_seg_limit(e1
, e2
),
2010 SET_ESP(sp
, sp_mask
);
2015 /* real and vm86 mode iret */
2016 void helper_iret_real(CPUX86State
*env
, int shift
)
2018 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2022 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
2023 sp
= env
->regs
[R_ESP
];
2024 ssp
= env
->segs
[R_SS
].base
;
2027 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
2028 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
2030 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
2033 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
2034 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
2035 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
2037 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
2038 env
->segs
[R_CS
].selector
= new_cs
;
2039 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2041 if (env
->eflags
& VM_MASK
) {
2042 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
2045 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
2049 eflags_mask
&= 0xffff;
2051 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2052 env
->hflags2
&= ~HF2_NMI_MASK
;
2055 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
2060 /* XXX: on x86_64, we do not want to nullify FS and GS because
2061 they may still contain a valid base. I would be interested to
2062 know how a real x86_64 CPU behaves */
2063 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2064 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2068 e2
= env
->segs
[seg_reg
].flags
;
2069 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2070 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2071 /* data or non conforming code segment */
2073 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2078 /* protected mode iret */
2079 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2080 int is_iret
, int addend
,
2083 uint32_t new_cs
, new_eflags
, new_ss
;
2084 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2085 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2086 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2087 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2089 #ifdef TARGET_X86_64
2095 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2097 sp
= env
->regs
[R_ESP
];
2098 ssp
= env
->segs
[R_SS
].base
;
2099 new_eflags
= 0; /* avoid warning */
2100 #ifdef TARGET_X86_64
2102 POPQ_RA(sp
, new_eip
, retaddr
);
2103 POPQ_RA(sp
, new_cs
, retaddr
);
2106 POPQ_RA(sp
, new_eflags
, retaddr
);
2113 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2114 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2117 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2118 if (new_eflags
& VM_MASK
) {
2119 goto return_to_vm86
;
2124 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2125 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2127 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2131 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2132 new_cs
, new_eip
, shift
, addend
);
2133 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2134 if ((new_cs
& 0xfffc) == 0) {
2135 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2137 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
2138 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2140 if (!(e2
& DESC_S_MASK
) ||
2141 !(e2
& DESC_CS_MASK
)) {
2142 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2144 cpl
= env
->hflags
& HF_CPL_MASK
;
2147 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2149 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2150 if (e2
& DESC_C_MASK
) {
2152 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2156 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2159 if (!(e2
& DESC_P_MASK
)) {
2160 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2164 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2165 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2166 /* return to same privilege level */
2167 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2168 get_seg_base(e1
, e2
),
2169 get_seg_limit(e1
, e2
),
2172 /* return to different privilege level */
2173 #ifdef TARGET_X86_64
2175 POPQ_RA(sp
, new_esp
, retaddr
);
2176 POPQ_RA(sp
, new_ss
, retaddr
);
2183 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2184 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2188 POPW_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2189 POPW_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2192 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2194 if ((new_ss
& 0xfffc) == 0) {
2195 #ifdef TARGET_X86_64
2196 /* NULL ss is allowed in long mode if cpl != 3 */
2197 /* XXX: test CS64? */
2198 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2199 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2201 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2202 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2203 DESC_W_MASK
| DESC_A_MASK
);
2204 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2208 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2211 if ((new_ss
& 3) != rpl
) {
2212 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2214 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2215 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2217 if (!(ss_e2
& DESC_S_MASK
) ||
2218 (ss_e2
& DESC_CS_MASK
) ||
2219 !(ss_e2
& DESC_W_MASK
)) {
2220 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2222 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2224 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2226 if (!(ss_e2
& DESC_P_MASK
)) {
2227 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2229 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2230 get_seg_base(ss_e1
, ss_e2
),
2231 get_seg_limit(ss_e1
, ss_e2
),
2235 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2236 get_seg_base(e1
, e2
),
2237 get_seg_limit(e1
, e2
),
2240 #ifdef TARGET_X86_64
2241 if (env
->hflags
& HF_CS64_MASK
) {
2246 sp_mask
= get_sp_mask(ss_e2
);
2249 /* validate data segments */
2250 validate_seg(env
, R_ES
, rpl
);
2251 validate_seg(env
, R_DS
, rpl
);
2252 validate_seg(env
, R_FS
, rpl
);
2253 validate_seg(env
, R_GS
, rpl
);
2257 SET_ESP(sp
, sp_mask
);
2260 /* NOTE: 'cpl' is the _old_ CPL */
2261 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2263 eflags_mask
|= IOPL_MASK
;
2265 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2267 eflags_mask
|= IF_MASK
;
2270 eflags_mask
&= 0xffff;
2272 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2277 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2278 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2279 POPL_RA(ssp
, sp
, sp_mask
, new_es
, retaddr
);
2280 POPL_RA(ssp
, sp
, sp_mask
, new_ds
, retaddr
);
2281 POPL_RA(ssp
, sp
, sp_mask
, new_fs
, retaddr
);
2282 POPL_RA(ssp
, sp
, sp_mask
, new_gs
, retaddr
);
2284 /* modify processor state */
2285 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2286 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2288 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2289 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2290 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2291 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2292 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2293 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2295 env
->eip
= new_eip
& 0xffff;
2296 env
->regs
[R_ESP
] = new_esp
;
2299 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2301 int tss_selector
, type
;
2304 /* specific case for TSS */
2305 if (env
->eflags
& NT_MASK
) {
2306 #ifdef TARGET_X86_64
2307 if (env
->hflags
& HF_LMA_MASK
) {
2308 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2311 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2312 if (tss_selector
& 4) {
2313 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2315 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2316 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2318 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2319 /* NOTE: we check both segment and busy TSS */
2321 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2323 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2325 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2327 env
->hflags2
&= ~HF2_NMI_MASK
;
2330 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2332 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2335 void helper_sysenter(CPUX86State
*env
)
2337 if (env
->sysenter_cs
== 0) {
2338 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2340 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2342 #ifdef TARGET_X86_64
2343 if (env
->hflags
& HF_LMA_MASK
) {
2344 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2346 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2348 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2353 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2355 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2357 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2359 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2361 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2363 DESC_W_MASK
| DESC_A_MASK
);
2364 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2365 env
->eip
= env
->sysenter_eip
;
2368 void helper_sysexit(CPUX86State
*env
, int dflag
)
2372 cpl
= env
->hflags
& HF_CPL_MASK
;
2373 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2374 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2376 #ifdef TARGET_X86_64
2378 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2380 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2381 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2382 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2384 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2386 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2387 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2388 DESC_W_MASK
| DESC_A_MASK
);
2392 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2394 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2395 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2396 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2397 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2399 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2400 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2401 DESC_W_MASK
| DESC_A_MASK
);
2403 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2404 env
->eip
= env
->regs
[R_EDX
];
2407 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2410 uint32_t e1
, e2
, eflags
, selector
;
2411 int rpl
, dpl
, cpl
, type
;
2413 selector
= selector1
& 0xffff;
2414 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2415 if ((selector
& 0xfffc) == 0) {
2418 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2422 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2423 cpl
= env
->hflags
& HF_CPL_MASK
;
2424 if (e2
& DESC_S_MASK
) {
2425 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2428 if (dpl
< cpl
|| dpl
< rpl
) {
2433 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2444 if (dpl
< cpl
|| dpl
< rpl
) {
2446 CC_SRC
= eflags
& ~CC_Z
;
2450 limit
= get_seg_limit(e1
, e2
);
2451 CC_SRC
= eflags
| CC_Z
;
2455 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2457 uint32_t e1
, e2
, eflags
, selector
;
2458 int rpl
, dpl
, cpl
, type
;
2460 selector
= selector1
& 0xffff;
2461 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2462 if ((selector
& 0xfffc) == 0) {
2465 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2469 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2470 cpl
= env
->hflags
& HF_CPL_MASK
;
2471 if (e2
& DESC_S_MASK
) {
2472 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2475 if (dpl
< cpl
|| dpl
< rpl
) {
2480 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2494 if (dpl
< cpl
|| dpl
< rpl
) {
2496 CC_SRC
= eflags
& ~CC_Z
;
2500 CC_SRC
= eflags
| CC_Z
;
2501 return e2
& 0x00f0ff00;
2504 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2506 uint32_t e1
, e2
, eflags
, selector
;
2509 selector
= selector1
& 0xffff;
2510 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2511 if ((selector
& 0xfffc) == 0) {
2514 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2517 if (!(e2
& DESC_S_MASK
)) {
2521 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2522 cpl
= env
->hflags
& HF_CPL_MASK
;
2523 if (e2
& DESC_CS_MASK
) {
2524 if (!(e2
& DESC_R_MASK
)) {
2527 if (!(e2
& DESC_C_MASK
)) {
2528 if (dpl
< cpl
|| dpl
< rpl
) {
2533 if (dpl
< cpl
|| dpl
< rpl
) {
2535 CC_SRC
= eflags
& ~CC_Z
;
2539 CC_SRC
= eflags
| CC_Z
;
2542 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2544 uint32_t e1
, e2
, eflags
, selector
;
2547 selector
= selector1
& 0xffff;
2548 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2549 if ((selector
& 0xfffc) == 0) {
2552 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2555 if (!(e2
& DESC_S_MASK
)) {
2559 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2560 cpl
= env
->hflags
& HF_CPL_MASK
;
2561 if (e2
& DESC_CS_MASK
) {
2564 if (dpl
< cpl
|| dpl
< rpl
) {
2567 if (!(e2
& DESC_W_MASK
)) {
2569 CC_SRC
= eflags
& ~CC_Z
;
2573 CC_SRC
= eflags
| CC_Z
;
2576 #if defined(CONFIG_USER_ONLY)
2577 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2579 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2580 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2582 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2583 (selector
<< 4), 0xffff,
2584 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2585 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2587 helper_load_seg(env
, seg_reg
, selector
);
2592 /* check if Port I/O is allowed in TSS */
2593 static inline void check_io(CPUX86State
*env
, int addr
, int size
,
2596 int io_offset
, val
, mask
;
2598 /* TSS must be a valid 32 bit one */
2599 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2600 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2601 env
->tr
.limit
< 103) {
2604 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
2605 io_offset
+= (addr
>> 3);
2606 /* Note: the check needs two bytes */
2607 if ((io_offset
+ 1) > env
->tr
.limit
) {
2610 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
2612 mask
= (1 << size
) - 1;
2613 /* all bits must be zero to allow the I/O */
2614 if ((val
& mask
) != 0) {
2616 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2620 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2622 check_io(env
, t0
, 1, GETPC());
2625 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2627 check_io(env
, t0
, 2, GETPC());
2630 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2632 check_io(env
, t0
, 4, GETPC());