2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
37 #ifdef CONFIG_USER_ONLY
38 #define MEMSUFFIX _kernel
40 #include "exec/cpu_ldst_useronly_template.h"
43 #include "exec/cpu_ldst_useronly_template.h"
46 #include "exec/cpu_ldst_useronly_template.h"
49 #include "exec/cpu_ldst_useronly_template.h"
52 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53 #define MEMSUFFIX _kernel
55 #include "exec/cpu_ldst_template.h"
58 #include "exec/cpu_ldst_template.h"
61 #include "exec/cpu_ldst_template.h"
64 #include "exec/cpu_ldst_template.h"
69 /* return non zero if error */
70 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
71 uint32_t *e2_ptr
, int selector
,
83 index
= selector
& ~7;
84 if ((index
+ 7) > dt
->limit
) {
87 ptr
= dt
->base
+ index
;
88 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
89 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
93 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
94 uint32_t *e2_ptr
, int selector
)
96 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
99 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
103 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
104 if (e2
& DESC_G_MASK
) {
105 limit
= (limit
<< 12) | 0xfff;
110 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
112 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
115 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
118 sc
->base
= get_seg_base(e1
, e2
);
119 sc
->limit
= get_seg_limit(e1
, e2
);
123 /* init the segment cache in vm86 mode. */
124 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
128 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
129 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
130 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
133 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
134 uint32_t *esp_ptr
, int dpl
,
137 X86CPU
*cpu
= x86_env_get_cpu(env
);
138 int type
, index
, shift
;
143 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
144 for (i
= 0; i
< env
->tr
.limit
; i
++) {
145 printf("%02x ", env
->tr
.base
[i
]);
154 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
155 cpu_abort(CPU(cpu
), "invalid tss");
157 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
158 if ((type
& 7) != 1) {
159 cpu_abort(CPU(cpu
), "invalid tss type");
162 index
= (dpl
* 4 + 2) << shift
;
163 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
164 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
167 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
168 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
170 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
171 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
175 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
,
181 if ((selector
& 0xfffc) != 0) {
182 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
183 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
185 if (!(e2
& DESC_S_MASK
)) {
186 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
189 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
190 if (seg_reg
== R_CS
) {
191 if (!(e2
& DESC_CS_MASK
)) {
192 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
195 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
197 } else if (seg_reg
== R_SS
) {
198 /* SS must be writable data */
199 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
200 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
202 if (dpl
!= cpl
|| dpl
!= rpl
) {
203 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
206 /* not readable code */
207 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
208 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
210 /* if data or non conforming code, checks the rights */
211 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
212 if (dpl
< cpl
|| dpl
< rpl
) {
213 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
217 if (!(e2
& DESC_P_MASK
)) {
218 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
220 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
221 get_seg_base(e1
, e2
),
222 get_seg_limit(e1
, e2
),
225 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
226 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
231 #define SWITCH_TSS_JMP 0
232 #define SWITCH_TSS_IRET 1
233 #define SWITCH_TSS_CALL 2
235 /* XXX: restore CPU state in registers (PowerPC case) */
236 static void switch_tss_ra(CPUX86State
*env
, int tss_selector
,
237 uint32_t e1
, uint32_t e2
, int source
,
238 uint32_t next_eip
, uintptr_t retaddr
)
240 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
241 target_ulong tss_base
;
242 uint32_t new_regs
[8], new_segs
[6];
243 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
244 uint32_t old_eflags
, eflags_mask
;
249 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
250 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
253 /* if task gate, we read the TSS segment and we load it */
255 if (!(e2
& DESC_P_MASK
)) {
256 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
258 tss_selector
= e1
>> 16;
259 if (tss_selector
& 4) {
260 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
262 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
263 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
265 if (e2
& DESC_S_MASK
) {
266 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
268 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
269 if ((type
& 7) != 1) {
270 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
274 if (!(e2
& DESC_P_MASK
)) {
275 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
283 tss_limit
= get_seg_limit(e1
, e2
);
284 tss_base
= get_seg_base(e1
, e2
);
285 if ((tss_selector
& 4) != 0 ||
286 tss_limit
< tss_limit_max
) {
287 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
289 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
291 old_tss_limit_max
= 103;
293 old_tss_limit_max
= 43;
296 /* read all the registers from the new TSS */
299 new_cr3
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x1c, retaddr
);
300 new_eip
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x20, retaddr
);
301 new_eflags
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x24, retaddr
);
302 for (i
= 0; i
< 8; i
++) {
303 new_regs
[i
] = cpu_ldl_kernel_ra(env
, tss_base
+ (0x28 + i
* 4),
306 for (i
= 0; i
< 6; i
++) {
307 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x48 + i
* 4),
310 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x60, retaddr
);
311 new_trap
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x64, retaddr
);
315 new_eip
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x0e, retaddr
);
316 new_eflags
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x10, retaddr
);
317 for (i
= 0; i
< 8; i
++) {
318 new_regs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x12 + i
* 2),
319 retaddr
) | 0xffff0000;
321 for (i
= 0; i
< 4; i
++) {
322 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x22 + i
* 4),
325 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x2a, retaddr
);
330 /* XXX: avoid a compiler warning, see
331 http://support.amd.com/us/Processor_TechDocs/24593.pdf
332 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
335 /* NOTE: we must avoid memory exceptions during the task switch,
336 so we make dummy accesses before */
337 /* XXX: it can still fail in some cases, so a bigger hack is
338 necessary to valid the TLB after having done the accesses */
340 v1
= cpu_ldub_kernel_ra(env
, env
->tr
.base
, retaddr
);
341 v2
= cpu_ldub_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, retaddr
);
342 cpu_stb_kernel_ra(env
, env
->tr
.base
, v1
, retaddr
);
343 cpu_stb_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, v2
, retaddr
);
345 /* clear busy bit (it is restartable) */
346 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
350 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
351 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
352 e2
&= ~DESC_TSS_BUSY_MASK
;
353 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
355 old_eflags
= cpu_compute_eflags(env
);
356 if (source
== SWITCH_TSS_IRET
) {
357 old_eflags
&= ~NT_MASK
;
360 /* save the current state in the old TSS */
363 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x20, next_eip
, retaddr
);
364 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x24, old_eflags
, retaddr
);
365 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
], retaddr
);
366 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
], retaddr
);
367 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
], retaddr
);
368 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
], retaddr
);
369 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
], retaddr
);
370 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
], retaddr
);
371 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
], retaddr
);
372 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
], retaddr
);
373 for (i
= 0; i
< 6; i
++) {
374 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x48 + i
* 4),
375 env
->segs
[i
].selector
, retaddr
);
379 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x0e, next_eip
, retaddr
);
380 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x10, old_eflags
, retaddr
);
381 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
], retaddr
);
382 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
], retaddr
);
383 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
], retaddr
);
384 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
], retaddr
);
385 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
], retaddr
);
386 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
], retaddr
);
387 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
], retaddr
);
388 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
], retaddr
);
389 for (i
= 0; i
< 4; i
++) {
390 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x22 + i
* 4),
391 env
->segs
[i
].selector
, retaddr
);
395 /* now if an exception occurs, it will occurs in the next task
398 if (source
== SWITCH_TSS_CALL
) {
399 cpu_stw_kernel_ra(env
, tss_base
, env
->tr
.selector
, retaddr
);
400 new_eflags
|= NT_MASK
;
404 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
408 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
409 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
410 e2
|= DESC_TSS_BUSY_MASK
;
411 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
414 /* set the new CPU state */
415 /* from this point, any exception which occurs can give problems */
416 env
->cr
[0] |= CR0_TS_MASK
;
417 env
->hflags
|= HF_TS_MASK
;
418 env
->tr
.selector
= tss_selector
;
419 env
->tr
.base
= tss_base
;
420 env
->tr
.limit
= tss_limit
;
421 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
423 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
424 cpu_x86_update_cr3(env
, new_cr3
);
427 /* load all registers without an exception, then reload them with
428 possible exception */
430 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
431 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
433 eflags_mask
&= 0xffff;
435 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
436 /* XXX: what to do in 16 bit case? */
437 env
->regs
[R_EAX
] = new_regs
[0];
438 env
->regs
[R_ECX
] = new_regs
[1];
439 env
->regs
[R_EDX
] = new_regs
[2];
440 env
->regs
[R_EBX
] = new_regs
[3];
441 env
->regs
[R_ESP
] = new_regs
[4];
442 env
->regs
[R_EBP
] = new_regs
[5];
443 env
->regs
[R_ESI
] = new_regs
[6];
444 env
->regs
[R_EDI
] = new_regs
[7];
445 if (new_eflags
& VM_MASK
) {
446 for (i
= 0; i
< 6; i
++) {
447 load_seg_vm(env
, i
, new_segs
[i
]);
450 /* first just selectors as the rest may trigger exceptions */
451 for (i
= 0; i
< 6; i
++) {
452 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
456 env
->ldt
.selector
= new_ldt
& ~4;
463 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
466 if ((new_ldt
& 0xfffc) != 0) {
468 index
= new_ldt
& ~7;
469 if ((index
+ 7) > dt
->limit
) {
470 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
472 ptr
= dt
->base
+ index
;
473 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
474 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
475 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
476 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
478 if (!(e2
& DESC_P_MASK
)) {
479 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
481 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
484 /* load the segments */
485 if (!(new_eflags
& VM_MASK
)) {
486 int cpl
= new_segs
[R_CS
] & 3;
487 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
488 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
489 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
490 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
491 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
492 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
495 /* check that env->eip is in the CS segment limits */
496 if (new_eip
> env
->segs
[R_CS
].limit
) {
497 /* XXX: different exception if CALL? */
498 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
501 #ifndef CONFIG_USER_ONLY
502 /* reset local breakpoints */
503 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
504 for (i
= 0; i
< DR7_MAX_BP
; i
++) {
505 if (hw_local_breakpoint_enabled(env
->dr
[7], i
) &&
506 !hw_global_breakpoint_enabled(env
->dr
[7], i
)) {
507 hw_breakpoint_remove(env
, i
);
510 env
->dr
[7] &= ~DR7_LOCAL_BP_MASK
;
515 static void switch_tss(CPUX86State
*env
, int tss_selector
,
516 uint32_t e1
, uint32_t e2
, int source
,
519 switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
522 static inline unsigned int get_sp_mask(unsigned int e2
)
524 if (e2
& DESC_B_MASK
) {
531 static int exception_has_error_code(int intno
)
547 #define SET_ESP(val, sp_mask) \
549 if ((sp_mask) == 0xffff) { \
550 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
552 } else if ((sp_mask) == 0xffffffffLL) { \
553 env->regs[R_ESP] = (uint32_t)(val); \
555 env->regs[R_ESP] = (val); \
559 #define SET_ESP(val, sp_mask) \
561 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
562 ((val) & (sp_mask)); \
566 /* in 64-bit machines, this can overflow. So this segment addition macro
567 * can be used to trim the value to 32-bit whenever needed */
568 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
570 /* XXX: add a is_user flag to have proper security support */
571 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
574 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
577 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
580 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
583 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
585 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
589 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
591 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
595 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
596 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
597 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
598 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
600 /* protected mode interrupt */
601 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
602 int error_code
, unsigned int next_eip
,
606 target_ulong ptr
, ssp
;
607 int type
, dpl
, selector
, ss_dpl
, cpl
;
608 int has_error_code
, new_stack
, shift
;
609 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
610 uint32_t old_eip
, sp_mask
;
611 int vm86
= env
->eflags
& VM_MASK
;
614 if (!is_int
&& !is_hw
) {
615 has_error_code
= exception_has_error_code(intno
);
624 if (intno
* 8 + 7 > dt
->limit
) {
625 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
627 ptr
= dt
->base
+ intno
* 8;
628 e1
= cpu_ldl_kernel(env
, ptr
);
629 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
630 /* check gate type */
631 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
633 case 5: /* task gate */
634 /* must do that check here to return the correct error code */
635 if (!(e2
& DESC_P_MASK
)) {
636 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
638 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
639 if (has_error_code
) {
643 /* push the error code */
644 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
646 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
651 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
652 ssp
= env
->segs
[R_SS
].base
+ esp
;
654 cpu_stl_kernel(env
, ssp
, error_code
);
656 cpu_stw_kernel(env
, ssp
, error_code
);
661 case 6: /* 286 interrupt gate */
662 case 7: /* 286 trap gate */
663 case 14: /* 386 interrupt gate */
664 case 15: /* 386 trap gate */
667 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
670 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
671 cpl
= env
->hflags
& HF_CPL_MASK
;
672 /* check privilege if software int */
673 if (is_int
&& dpl
< cpl
) {
674 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
676 /* check valid bit */
677 if (!(e2
& DESC_P_MASK
)) {
678 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
681 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
682 if ((selector
& 0xfffc) == 0) {
683 raise_exception_err(env
, EXCP0D_GPF
, 0);
685 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
686 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
688 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
689 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
691 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
693 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
695 if (!(e2
& DESC_P_MASK
)) {
696 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
698 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
699 /* to inner privilege */
700 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
701 if ((ss
& 0xfffc) == 0) {
702 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
704 if ((ss
& 3) != dpl
) {
705 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
707 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
708 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
710 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
712 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
714 if (!(ss_e2
& DESC_S_MASK
) ||
715 (ss_e2
& DESC_CS_MASK
) ||
716 !(ss_e2
& DESC_W_MASK
)) {
717 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
719 if (!(ss_e2
& DESC_P_MASK
)) {
720 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
723 sp_mask
= get_sp_mask(ss_e2
);
724 ssp
= get_seg_base(ss_e1
, ss_e2
);
725 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
726 /* to same privilege */
728 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
731 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
732 ssp
= env
->segs
[R_SS
].base
;
733 esp
= env
->regs
[R_ESP
];
736 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
737 new_stack
= 0; /* avoid warning */
738 sp_mask
= 0; /* avoid warning */
739 ssp
= 0; /* avoid warning */
740 esp
= 0; /* avoid warning */
746 /* XXX: check that enough room is available */
747 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
756 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
757 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
758 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
759 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
761 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
762 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
764 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
765 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
766 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
767 if (has_error_code
) {
768 PUSHL(ssp
, esp
, sp_mask
, error_code
);
773 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
774 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
775 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
776 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
778 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
779 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
781 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
782 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
783 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
784 if (has_error_code
) {
785 PUSHW(ssp
, esp
, sp_mask
, error_code
);
789 /* interrupt gate clear IF mask */
790 if ((type
& 1) == 0) {
791 env
->eflags
&= ~IF_MASK
;
793 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
797 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
802 ss
= (ss
& ~3) | dpl
;
803 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
804 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
806 SET_ESP(esp
, sp_mask
);
808 selector
= (selector
& ~3) | dpl
;
809 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
810 get_seg_base(e1
, e2
),
811 get_seg_limit(e1
, e2
),
818 #define PUSHQ_RA(sp, val, ra) \
821 cpu_stq_kernel_ra(env, sp, (val), ra); \
824 #define POPQ_RA(sp, val, ra) \
826 val = cpu_ldq_kernel_ra(env, sp, ra); \
830 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
831 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
833 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
835 X86CPU
*cpu
= x86_env_get_cpu(env
);
839 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
840 env
->tr
.base
, env
->tr
.limit
);
843 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
844 cpu_abort(CPU(cpu
), "invalid tss");
846 index
= 8 * level
+ 4;
847 if ((index
+ 7) > env
->tr
.limit
) {
848 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
850 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
853 /* 64 bit interrupt */
854 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
855 int error_code
, target_ulong next_eip
, int is_hw
)
859 int type
, dpl
, selector
, cpl
, ist
;
860 int has_error_code
, new_stack
;
861 uint32_t e1
, e2
, e3
, ss
;
862 target_ulong old_eip
, esp
, offset
;
865 if (!is_int
&& !is_hw
) {
866 has_error_code
= exception_has_error_code(intno
);
875 if (intno
* 16 + 15 > dt
->limit
) {
876 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
878 ptr
= dt
->base
+ intno
* 16;
879 e1
= cpu_ldl_kernel(env
, ptr
);
880 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
881 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
882 /* check gate type */
883 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
889 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
892 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
893 cpl
= env
->hflags
& HF_CPL_MASK
;
894 /* check privilege if software int */
895 if (is_int
&& dpl
< cpl
) {
896 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
898 /* check valid bit */
899 if (!(e2
& DESC_P_MASK
)) {
900 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
903 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
905 if ((selector
& 0xfffc) == 0) {
906 raise_exception_err(env
, EXCP0D_GPF
, 0);
909 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
910 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
912 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
913 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
915 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
917 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
919 if (!(e2
& DESC_P_MASK
)) {
920 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
922 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
923 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
925 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
926 /* to inner privilege */
928 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
930 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
931 /* to same privilege */
932 if (env
->eflags
& VM_MASK
) {
933 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
936 esp
= env
->regs
[R_ESP
];
939 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
940 new_stack
= 0; /* avoid warning */
941 esp
= 0; /* avoid warning */
943 esp
&= ~0xfLL
; /* align stack */
945 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
946 PUSHQ(esp
, env
->regs
[R_ESP
]);
947 PUSHQ(esp
, cpu_compute_eflags(env
));
948 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
950 if (has_error_code
) {
951 PUSHQ(esp
, error_code
);
954 /* interrupt gate clear IF mask */
955 if ((type
& 1) == 0) {
956 env
->eflags
&= ~IF_MASK
;
958 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
962 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
964 env
->regs
[R_ESP
] = esp
;
966 selector
= (selector
& ~3) | dpl
;
967 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
968 get_seg_base(e1
, e2
),
969 get_seg_limit(e1
, e2
),
976 #if defined(CONFIG_USER_ONLY)
977 void QEMU_NORETURN
helper_syscall(CPUX86State
*env
, int next_eip_addend
)
979 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
981 cs
->exception_index
= EXCP_SYSCALL
;
982 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
986 void QEMU_NORETURN
helper_vsyscall(CPUX86State
*env
)
988 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
989 cs
->exception_index
= EXCP_VSYSCALL
;
993 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
997 if (!(env
->efer
& MSR_EFER_SCE
)) {
998 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
1000 selector
= (env
->star
>> 32) & 0xffff;
1001 if (env
->hflags
& HF_LMA_MASK
) {
1004 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
1005 env
->regs
[11] = cpu_compute_eflags(env
);
1007 code64
= env
->hflags
& HF_CS64_MASK
;
1009 env
->eflags
&= ~env
->fmask
;
1010 cpu_load_eflags(env
, env
->eflags
, 0);
1011 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1013 DESC_G_MASK
| DESC_P_MASK
|
1015 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1017 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1019 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1021 DESC_W_MASK
| DESC_A_MASK
);
1023 env
->eip
= env
->lstar
;
1025 env
->eip
= env
->cstar
;
1028 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
1030 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1031 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1033 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1035 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1036 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1038 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1040 DESC_W_MASK
| DESC_A_MASK
);
1041 env
->eip
= (uint32_t)env
->star
;
1047 #ifdef TARGET_X86_64
1048 void helper_sysret(CPUX86State
*env
, int dflag
)
1052 if (!(env
->efer
& MSR_EFER_SCE
)) {
1053 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
1055 cpl
= env
->hflags
& HF_CPL_MASK
;
1056 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1057 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1059 selector
= (env
->star
>> 48) & 0xffff;
1060 if (env
->hflags
& HF_LMA_MASK
) {
1061 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1062 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1065 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1067 DESC_G_MASK
| DESC_P_MASK
|
1068 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1069 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1071 env
->eip
= env
->regs
[R_ECX
];
1073 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1075 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1076 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1077 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1078 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1080 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1082 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1083 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1084 DESC_W_MASK
| DESC_A_MASK
);
1086 env
->eflags
|= IF_MASK
;
1087 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1089 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1090 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1091 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1092 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1093 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1095 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1096 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1097 DESC_W_MASK
| DESC_A_MASK
);
1102 /* real mode interrupt */
1103 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1104 int error_code
, unsigned int next_eip
)
1107 target_ulong ptr
, ssp
;
1109 uint32_t offset
, esp
;
1110 uint32_t old_cs
, old_eip
;
1112 /* real mode (simpler!) */
1114 if (intno
* 4 + 3 > dt
->limit
) {
1115 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1117 ptr
= dt
->base
+ intno
* 4;
1118 offset
= cpu_lduw_kernel(env
, ptr
);
1119 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1120 esp
= env
->regs
[R_ESP
];
1121 ssp
= env
->segs
[R_SS
].base
;
1127 old_cs
= env
->segs
[R_CS
].selector
;
1128 /* XXX: use SS segment size? */
1129 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1130 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1131 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1133 /* update processor state */
1134 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1136 env
->segs
[R_CS
].selector
= selector
;
1137 env
->segs
[R_CS
].base
= (selector
<< 4);
1138 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1141 #if defined(CONFIG_USER_ONLY)
1142 /* fake user mode interrupt */
1143 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1144 int error_code
, target_ulong next_eip
)
1148 int dpl
, cpl
, shift
;
1152 if (env
->hflags
& HF_LMA_MASK
) {
1157 ptr
= dt
->base
+ (intno
<< shift
);
1158 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1160 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1161 cpl
= env
->hflags
& HF_CPL_MASK
;
1162 /* check privilege if software int */
1163 if (is_int
&& dpl
< cpl
) {
1164 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1167 /* Since we emulate only user space, we cannot do more than
1168 exiting the emulation with the suitable exception and error
1169 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1170 if (is_int
|| intno
== EXCP_SYSCALL
) {
1171 env
->eip
= next_eip
;
1177 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1178 int error_code
, int is_hw
, int rm
)
1180 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1181 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1182 control
.event_inj
));
1184 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1188 type
= SVM_EVTINJ_TYPE_SOFT
;
1190 type
= SVM_EVTINJ_TYPE_EXEPT
;
1192 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1193 if (!rm
&& exception_has_error_code(intno
)) {
1194 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1195 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1196 control
.event_inj_err
),
1200 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1207 * Begin execution of an interruption. is_int is TRUE if coming from
1208 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1209 * instruction. It is only relevant if is_int is TRUE.
1211 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1212 int error_code
, target_ulong next_eip
, int is_hw
)
1214 CPUX86State
*env
= &cpu
->env
;
1216 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1217 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1220 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1221 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1222 count
, intno
, error_code
, is_int
,
1223 env
->hflags
& HF_CPL_MASK
,
1224 env
->segs
[R_CS
].selector
, env
->eip
,
1225 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1226 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1227 if (intno
== 0x0e) {
1228 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1230 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1233 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1240 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1241 for (i
= 0; i
< 16; i
++) {
1242 qemu_log(" %02x", ldub(ptr
+ i
));
1250 if (env
->cr
[0] & CR0_PE_MASK
) {
1251 #if !defined(CONFIG_USER_ONLY)
1252 if (env
->hflags
& HF_SVMI_MASK
) {
1253 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1256 #ifdef TARGET_X86_64
1257 if (env
->hflags
& HF_LMA_MASK
) {
1258 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1262 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1266 #if !defined(CONFIG_USER_ONLY)
1267 if (env
->hflags
& HF_SVMI_MASK
) {
1268 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1271 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1274 #if !defined(CONFIG_USER_ONLY)
1275 if (env
->hflags
& HF_SVMI_MASK
) {
1276 CPUState
*cs
= CPU(cpu
);
1277 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1278 offsetof(struct vmcb
,
1279 control
.event_inj
));
1282 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1283 event_inj
& ~SVM_EVTINJ_VALID
);
1288 void x86_cpu_do_interrupt(CPUState
*cs
)
1290 X86CPU
*cpu
= X86_CPU(cs
);
1291 CPUX86State
*env
= &cpu
->env
;
1293 #if defined(CONFIG_USER_ONLY)
1294 /* if user mode only, we simulate a fake exception
1295 which will be handled outside the cpu execution
1297 do_interrupt_user(env
, cs
->exception_index
,
1298 env
->exception_is_int
,
1300 env
->exception_next_eip
);
1301 /* successfully delivered */
1302 env
->old_exception
= -1;
1304 /* simulate a real cpu exception. On i386, it can
1305 trigger new exceptions, but we do not handle
1306 double or triple faults yet. */
1307 do_interrupt_all(cpu
, cs
->exception_index
,
1308 env
->exception_is_int
,
1310 env
->exception_next_eip
, 0);
1311 /* successfully delivered */
1312 env
->old_exception
= -1;
1316 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1318 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1321 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1323 X86CPU
*cpu
= X86_CPU(cs
);
1324 CPUX86State
*env
= &cpu
->env
;
1327 #if !defined(CONFIG_USER_ONLY)
1328 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
1329 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1330 apic_poll_irq(cpu
->apic_state
);
1333 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
1335 } else if (env
->hflags2
& HF2_GIF_MASK
) {
1336 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
1337 !(env
->hflags
& HF_SMM_MASK
)) {
1338 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0);
1339 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1342 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
1343 !(env
->hflags2
& HF2_NMI_MASK
)) {
1344 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1345 env
->hflags2
|= HF2_NMI_MASK
;
1346 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1348 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
1349 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1350 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1352 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
1353 (((env
->hflags2
& HF2_VINTR_MASK
) &&
1354 (env
->hflags2
& HF2_HIF_MASK
)) ||
1355 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
1356 (env
->eflags
& IF_MASK
&&
1357 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
1359 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0);
1360 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1361 CPU_INTERRUPT_VIRQ
);
1362 intno
= cpu_get_pic_interrupt(env
);
1363 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1364 "Servicing hardware INT=0x%02x\n", intno
);
1365 do_interrupt_x86_hardirq(env
, intno
, 1);
1366 /* ensure that no TB jump will be modified as
1367 the program flow was changed */
1369 #if !defined(CONFIG_USER_ONLY)
1370 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
1371 (env
->eflags
& IF_MASK
) &&
1372 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
1374 /* FIXME: this should respect TPR */
1375 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0);
1376 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
1377 + offsetof(struct vmcb
, control
.int_vector
));
1378 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1379 "Servicing virtual hardware INT=0x%02x\n", intno
);
1380 do_interrupt_x86_hardirq(env
, intno
, 1);
1381 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1390 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1394 uint32_t esp_mask
, esp
, ebp
;
1396 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1397 ssp
= env
->segs
[R_SS
].base
;
1398 ebp
= env
->regs
[R_EBP
];
1399 esp
= env
->regs
[R_ESP
];
1406 cpu_stl_data_ra(env
, ssp
+ (esp
& esp_mask
),
1407 cpu_ldl_data_ra(env
, ssp
+ (ebp
& esp_mask
),
1412 cpu_stl_data_ra(env
, ssp
+ (esp
& esp_mask
), t1
, GETPC());
1419 cpu_stw_data_ra(env
, ssp
+ (esp
& esp_mask
),
1420 cpu_lduw_data_ra(env
, ssp
+ (ebp
& esp_mask
),
1425 cpu_stw_data_ra(env
, ssp
+ (esp
& esp_mask
), t1
, GETPC());
1429 #ifdef TARGET_X86_64
1430 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1433 target_ulong esp
, ebp
;
1435 ebp
= env
->regs
[R_EBP
];
1436 esp
= env
->regs
[R_ESP
];
1444 cpu_stq_data_ra(env
, esp
, cpu_ldq_data_ra(env
, ebp
, GETPC()),
1448 cpu_stq_data_ra(env
, esp
, t1
, GETPC());
1455 cpu_stw_data_ra(env
, esp
, cpu_lduw_data_ra(env
, ebp
, GETPC()),
1459 cpu_stw_data_ra(env
, esp
, t1
, GETPC());
1464 void helper_lldt(CPUX86State
*env
, int selector
)
1468 int index
, entry_limit
;
1472 if ((selector
& 0xfffc) == 0) {
1473 /* XXX: NULL selector case: invalid LDT */
1477 if (selector
& 0x4) {
1478 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1481 index
= selector
& ~7;
1482 #ifdef TARGET_X86_64
1483 if (env
->hflags
& HF_LMA_MASK
) {
1490 if ((index
+ entry_limit
) > dt
->limit
) {
1491 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1493 ptr
= dt
->base
+ index
;
1494 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1495 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1496 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1497 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1499 if (!(e2
& DESC_P_MASK
)) {
1500 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1502 #ifdef TARGET_X86_64
1503 if (env
->hflags
& HF_LMA_MASK
) {
1506 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1507 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1508 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1512 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1515 env
->ldt
.selector
= selector
;
1518 void helper_ltr(CPUX86State
*env
, int selector
)
1522 int index
, type
, entry_limit
;
1526 if ((selector
& 0xfffc) == 0) {
1527 /* NULL selector case: invalid TR */
1532 if (selector
& 0x4) {
1533 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1536 index
= selector
& ~7;
1537 #ifdef TARGET_X86_64
1538 if (env
->hflags
& HF_LMA_MASK
) {
1545 if ((index
+ entry_limit
) > dt
->limit
) {
1546 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1548 ptr
= dt
->base
+ index
;
1549 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1550 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1551 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1552 if ((e2
& DESC_S_MASK
) ||
1553 (type
!= 1 && type
!= 9)) {
1554 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1556 if (!(e2
& DESC_P_MASK
)) {
1557 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1559 #ifdef TARGET_X86_64
1560 if (env
->hflags
& HF_LMA_MASK
) {
1563 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1564 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1565 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1566 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1568 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1569 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1573 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1575 e2
|= DESC_TSS_BUSY_MASK
;
1576 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1578 env
->tr
.selector
= selector
;
1581 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1582 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1591 cpl
= env
->hflags
& HF_CPL_MASK
;
1592 if ((selector
& 0xfffc) == 0) {
1593 /* null selector case */
1595 #ifdef TARGET_X86_64
1596 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1599 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1601 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1604 if (selector
& 0x4) {
1609 index
= selector
& ~7;
1610 if ((index
+ 7) > dt
->limit
) {
1611 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1613 ptr
= dt
->base
+ index
;
1614 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1615 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1617 if (!(e2
& DESC_S_MASK
)) {
1618 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1621 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1622 if (seg_reg
== R_SS
) {
1623 /* must be writable segment */
1624 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1625 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1627 if (rpl
!= cpl
|| dpl
!= cpl
) {
1628 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1631 /* must be readable segment */
1632 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1633 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1636 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1637 /* if not conforming code, test rights */
1638 if (dpl
< cpl
|| dpl
< rpl
) {
1639 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1644 if (!(e2
& DESC_P_MASK
)) {
1645 if (seg_reg
== R_SS
) {
1646 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1648 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1652 /* set the access bit if not already set */
1653 if (!(e2
& DESC_A_MASK
)) {
1655 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1658 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1659 get_seg_base(e1
, e2
),
1660 get_seg_limit(e1
, e2
),
1663 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1664 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1669 /* protected mode jump */
1670 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1671 target_ulong next_eip
)
1674 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1676 if ((new_cs
& 0xfffc) == 0) {
1677 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1679 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1680 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1682 cpl
= env
->hflags
& HF_CPL_MASK
;
1683 if (e2
& DESC_S_MASK
) {
1684 if (!(e2
& DESC_CS_MASK
)) {
1685 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1687 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1688 if (e2
& DESC_C_MASK
) {
1689 /* conforming code segment */
1691 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1694 /* non conforming code segment */
1697 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1700 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1703 if (!(e2
& DESC_P_MASK
)) {
1704 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1706 limit
= get_seg_limit(e1
, e2
);
1707 if (new_eip
> limit
&&
1708 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1709 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1711 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1712 get_seg_base(e1
, e2
), limit
, e2
);
1715 /* jump to call or task gate */
1716 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1718 cpl
= env
->hflags
& HF_CPL_MASK
;
1719 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1721 case 1: /* 286 TSS */
1722 case 9: /* 386 TSS */
1723 case 5: /* task gate */
1724 if (dpl
< cpl
|| dpl
< rpl
) {
1725 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1727 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1729 case 4: /* 286 call gate */
1730 case 12: /* 386 call gate */
1731 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1732 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1734 if (!(e2
& DESC_P_MASK
)) {
1735 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1738 new_eip
= (e1
& 0xffff);
1740 new_eip
|= (e2
& 0xffff0000);
1742 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1743 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1745 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1746 /* must be code segment */
1747 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1748 (DESC_S_MASK
| DESC_CS_MASK
))) {
1749 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1751 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1752 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1753 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1755 if (!(e2
& DESC_P_MASK
)) {
1756 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1758 limit
= get_seg_limit(e1
, e2
);
1759 if (new_eip
> limit
) {
1760 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1762 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1763 get_seg_base(e1
, e2
), limit
, e2
);
1767 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1773 /* real mode call */
1774 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1775 int shift
, int next_eip
)
1778 uint32_t esp
, esp_mask
;
1782 esp
= env
->regs
[R_ESP
];
1783 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1784 ssp
= env
->segs
[R_SS
].base
;
1786 PUSHL_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1787 PUSHL_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1789 PUSHW_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1790 PUSHW_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1793 SET_ESP(esp
, esp_mask
);
1795 env
->segs
[R_CS
].selector
= new_cs
;
1796 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1799 /* protected mode call */
1800 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1801 int shift
, target_ulong next_eip
)
1804 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1805 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1806 uint32_t val
, limit
, old_sp_mask
;
1807 target_ulong ssp
, old_ssp
;
1809 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1810 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1811 if ((new_cs
& 0xfffc) == 0) {
1812 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1814 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1815 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1817 cpl
= env
->hflags
& HF_CPL_MASK
;
1818 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1819 if (e2
& DESC_S_MASK
) {
1820 if (!(e2
& DESC_CS_MASK
)) {
1821 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1823 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1824 if (e2
& DESC_C_MASK
) {
1825 /* conforming code segment */
1827 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1830 /* non conforming code segment */
1833 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1836 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1839 if (!(e2
& DESC_P_MASK
)) {
1840 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1843 #ifdef TARGET_X86_64
1844 /* XXX: check 16/32 bit cases in long mode */
1849 rsp
= env
->regs
[R_ESP
];
1850 PUSHQ_RA(rsp
, env
->segs
[R_CS
].selector
, GETPC());
1851 PUSHQ_RA(rsp
, next_eip
, GETPC());
1852 /* from this point, not restartable */
1853 env
->regs
[R_ESP
] = rsp
;
1854 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1855 get_seg_base(e1
, e2
),
1856 get_seg_limit(e1
, e2
), e2
);
1861 sp
= env
->regs
[R_ESP
];
1862 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1863 ssp
= env
->segs
[R_SS
].base
;
1865 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1866 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1868 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1869 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1872 limit
= get_seg_limit(e1
, e2
);
1873 if (new_eip
> limit
) {
1874 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1876 /* from this point, not restartable */
1877 SET_ESP(sp
, sp_mask
);
1878 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1879 get_seg_base(e1
, e2
), limit
, e2
);
1883 /* check gate type */
1884 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1885 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1888 case 1: /* available 286 TSS */
1889 case 9: /* available 386 TSS */
1890 case 5: /* task gate */
1891 if (dpl
< cpl
|| dpl
< rpl
) {
1892 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1894 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1896 case 4: /* 286 call gate */
1897 case 12: /* 386 call gate */
1900 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1905 if (dpl
< cpl
|| dpl
< rpl
) {
1906 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1908 /* check valid bit */
1909 if (!(e2
& DESC_P_MASK
)) {
1910 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1912 selector
= e1
>> 16;
1913 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1914 param_count
= e2
& 0x1f;
1915 if ((selector
& 0xfffc) == 0) {
1916 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1919 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1920 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1922 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1923 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1925 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1927 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1929 if (!(e2
& DESC_P_MASK
)) {
1930 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1933 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1934 /* to inner privilege */
1935 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
, GETPC());
1936 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1937 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1939 if ((ss
& 0xfffc) == 0) {
1940 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1942 if ((ss
& 3) != dpl
) {
1943 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1945 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1946 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1948 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1949 if (ss_dpl
!= dpl
) {
1950 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1952 if (!(ss_e2
& DESC_S_MASK
) ||
1953 (ss_e2
& DESC_CS_MASK
) ||
1954 !(ss_e2
& DESC_W_MASK
)) {
1955 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1957 if (!(ss_e2
& DESC_P_MASK
)) {
1958 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1961 /* push_size = ((param_count * 2) + 8) << shift; */
1963 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1964 old_ssp
= env
->segs
[R_SS
].base
;
1966 sp_mask
= get_sp_mask(ss_e2
);
1967 ssp
= get_seg_base(ss_e1
, ss_e2
);
1969 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1970 PUSHL_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1971 for (i
= param_count
- 1; i
>= 0; i
--) {
1972 val
= cpu_ldl_kernel_ra(env
, old_ssp
+
1973 ((env
->regs
[R_ESP
] + i
* 4) &
1974 old_sp_mask
), GETPC());
1975 PUSHL_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1978 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1979 PUSHW_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1980 for (i
= param_count
- 1; i
>= 0; i
--) {
1981 val
= cpu_lduw_kernel_ra(env
, old_ssp
+
1982 ((env
->regs
[R_ESP
] + i
* 2) &
1983 old_sp_mask
), GETPC());
1984 PUSHW_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1989 /* to same privilege */
1990 sp
= env
->regs
[R_ESP
];
1991 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1992 ssp
= env
->segs
[R_SS
].base
;
1993 /* push_size = (4 << shift); */
1998 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1999 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
2001 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
2002 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
2005 /* from this point, not restartable */
2008 ss
= (ss
& ~3) | dpl
;
2009 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2011 get_seg_limit(ss_e1
, ss_e2
),
2015 selector
= (selector
& ~3) | dpl
;
2016 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2017 get_seg_base(e1
, e2
),
2018 get_seg_limit(e1
, e2
),
2020 SET_ESP(sp
, sp_mask
);
2025 /* real and vm86 mode iret */
2026 void helper_iret_real(CPUX86State
*env
, int shift
)
2028 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2032 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
2033 sp
= env
->regs
[R_ESP
];
2034 ssp
= env
->segs
[R_SS
].base
;
2037 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
2038 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
2040 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
2043 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
2044 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
2045 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
2047 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
2048 env
->segs
[R_CS
].selector
= new_cs
;
2049 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2051 if (env
->eflags
& VM_MASK
) {
2052 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
2055 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
2059 eflags_mask
&= 0xffff;
2061 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2062 env
->hflags2
&= ~HF2_NMI_MASK
;
2065 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
2070 /* XXX: on x86_64, we do not want to nullify FS and GS because
2071 they may still contain a valid base. I would be interested to
2072 know how a real x86_64 CPU behaves */
2073 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2074 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2078 e2
= env
->segs
[seg_reg
].flags
;
2079 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2080 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2081 /* data or non conforming code segment */
2083 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2088 /* protected mode iret */
2089 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2090 int is_iret
, int addend
,
2093 uint32_t new_cs
, new_eflags
, new_ss
;
2094 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2095 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2096 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2097 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2099 #ifdef TARGET_X86_64
2105 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2107 sp
= env
->regs
[R_ESP
];
2108 ssp
= env
->segs
[R_SS
].base
;
2109 new_eflags
= 0; /* avoid warning */
2110 #ifdef TARGET_X86_64
2112 POPQ_RA(sp
, new_eip
, retaddr
);
2113 POPQ_RA(sp
, new_cs
, retaddr
);
2116 POPQ_RA(sp
, new_eflags
, retaddr
);
2123 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2124 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2127 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2128 if (new_eflags
& VM_MASK
) {
2129 goto return_to_vm86
;
2134 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2135 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2137 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2141 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2142 new_cs
, new_eip
, shift
, addend
);
2143 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2144 if ((new_cs
& 0xfffc) == 0) {
2145 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2147 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
2148 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2150 if (!(e2
& DESC_S_MASK
) ||
2151 !(e2
& DESC_CS_MASK
)) {
2152 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2154 cpl
= env
->hflags
& HF_CPL_MASK
;
2157 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2159 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2160 if (e2
& DESC_C_MASK
) {
2162 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2166 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2169 if (!(e2
& DESC_P_MASK
)) {
2170 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2174 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2175 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2176 /* return to same privilege level */
2177 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2178 get_seg_base(e1
, e2
),
2179 get_seg_limit(e1
, e2
),
2182 /* return to different privilege level */
2183 #ifdef TARGET_X86_64
2185 POPQ_RA(sp
, new_esp
, retaddr
);
2186 POPQ_RA(sp
, new_ss
, retaddr
);
2193 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2194 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2198 POPW_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2199 POPW_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2202 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2204 if ((new_ss
& 0xfffc) == 0) {
2205 #ifdef TARGET_X86_64
2206 /* NULL ss is allowed in long mode if cpl != 3 */
2207 /* XXX: test CS64? */
2208 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2209 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2211 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2212 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2213 DESC_W_MASK
| DESC_A_MASK
);
2214 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2218 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2221 if ((new_ss
& 3) != rpl
) {
2222 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2224 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2225 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2227 if (!(ss_e2
& DESC_S_MASK
) ||
2228 (ss_e2
& DESC_CS_MASK
) ||
2229 !(ss_e2
& DESC_W_MASK
)) {
2230 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2232 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2234 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2236 if (!(ss_e2
& DESC_P_MASK
)) {
2237 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2239 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2240 get_seg_base(ss_e1
, ss_e2
),
2241 get_seg_limit(ss_e1
, ss_e2
),
2245 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2246 get_seg_base(e1
, e2
),
2247 get_seg_limit(e1
, e2
),
2250 #ifdef TARGET_X86_64
2251 if (env
->hflags
& HF_CS64_MASK
) {
2256 sp_mask
= get_sp_mask(ss_e2
);
2259 /* validate data segments */
2260 validate_seg(env
, R_ES
, rpl
);
2261 validate_seg(env
, R_DS
, rpl
);
2262 validate_seg(env
, R_FS
, rpl
);
2263 validate_seg(env
, R_GS
, rpl
);
2267 SET_ESP(sp
, sp_mask
);
2270 /* NOTE: 'cpl' is the _old_ CPL */
2271 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2273 eflags_mask
|= IOPL_MASK
;
2275 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2277 eflags_mask
|= IF_MASK
;
2280 eflags_mask
&= 0xffff;
2282 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2287 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2288 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2289 POPL_RA(ssp
, sp
, sp_mask
, new_es
, retaddr
);
2290 POPL_RA(ssp
, sp
, sp_mask
, new_ds
, retaddr
);
2291 POPL_RA(ssp
, sp
, sp_mask
, new_fs
, retaddr
);
2292 POPL_RA(ssp
, sp
, sp_mask
, new_gs
, retaddr
);
2294 /* modify processor state */
2295 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2296 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2298 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2299 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2300 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2301 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2302 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2303 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2305 env
->eip
= new_eip
& 0xffff;
2306 env
->regs
[R_ESP
] = new_esp
;
2309 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2311 int tss_selector
, type
;
2314 /* specific case for TSS */
2315 if (env
->eflags
& NT_MASK
) {
2316 #ifdef TARGET_X86_64
2317 if (env
->hflags
& HF_LMA_MASK
) {
2318 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2321 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2322 if (tss_selector
& 4) {
2323 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2325 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2326 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2328 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2329 /* NOTE: we check both segment and busy TSS */
2331 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2333 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2335 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2337 env
->hflags2
&= ~HF2_NMI_MASK
;
2340 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2342 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2345 void helper_sysenter(CPUX86State
*env
)
2347 if (env
->sysenter_cs
== 0) {
2348 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2350 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2352 #ifdef TARGET_X86_64
2353 if (env
->hflags
& HF_LMA_MASK
) {
2354 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2356 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2358 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2363 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2365 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2367 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2369 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2371 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2373 DESC_W_MASK
| DESC_A_MASK
);
2374 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2375 env
->eip
= env
->sysenter_eip
;
2378 void helper_sysexit(CPUX86State
*env
, int dflag
)
2382 cpl
= env
->hflags
& HF_CPL_MASK
;
2383 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2384 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2386 #ifdef TARGET_X86_64
2388 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2390 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2391 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2392 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2394 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2396 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2397 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2398 DESC_W_MASK
| DESC_A_MASK
);
2402 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2404 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2405 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2406 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2407 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2409 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2410 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2411 DESC_W_MASK
| DESC_A_MASK
);
2413 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2414 env
->eip
= env
->regs
[R_EDX
];
2417 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2420 uint32_t e1
, e2
, eflags
, selector
;
2421 int rpl
, dpl
, cpl
, type
;
2423 selector
= selector1
& 0xffff;
2424 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2425 if ((selector
& 0xfffc) == 0) {
2428 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2432 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2433 cpl
= env
->hflags
& HF_CPL_MASK
;
2434 if (e2
& DESC_S_MASK
) {
2435 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2438 if (dpl
< cpl
|| dpl
< rpl
) {
2443 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2454 if (dpl
< cpl
|| dpl
< rpl
) {
2456 CC_SRC
= eflags
& ~CC_Z
;
2460 limit
= get_seg_limit(e1
, e2
);
2461 CC_SRC
= eflags
| CC_Z
;
2465 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2467 uint32_t e1
, e2
, eflags
, selector
;
2468 int rpl
, dpl
, cpl
, type
;
2470 selector
= selector1
& 0xffff;
2471 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2472 if ((selector
& 0xfffc) == 0) {
2475 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2479 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2480 cpl
= env
->hflags
& HF_CPL_MASK
;
2481 if (e2
& DESC_S_MASK
) {
2482 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2485 if (dpl
< cpl
|| dpl
< rpl
) {
2490 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2504 if (dpl
< cpl
|| dpl
< rpl
) {
2506 CC_SRC
= eflags
& ~CC_Z
;
2510 CC_SRC
= eflags
| CC_Z
;
2511 return e2
& 0x00f0ff00;
2514 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2516 uint32_t e1
, e2
, eflags
, selector
;
2519 selector
= selector1
& 0xffff;
2520 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2521 if ((selector
& 0xfffc) == 0) {
2524 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2527 if (!(e2
& DESC_S_MASK
)) {
2531 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2532 cpl
= env
->hflags
& HF_CPL_MASK
;
2533 if (e2
& DESC_CS_MASK
) {
2534 if (!(e2
& DESC_R_MASK
)) {
2537 if (!(e2
& DESC_C_MASK
)) {
2538 if (dpl
< cpl
|| dpl
< rpl
) {
2543 if (dpl
< cpl
|| dpl
< rpl
) {
2545 CC_SRC
= eflags
& ~CC_Z
;
2549 CC_SRC
= eflags
| CC_Z
;
2552 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2554 uint32_t e1
, e2
, eflags
, selector
;
2557 selector
= selector1
& 0xffff;
2558 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2559 if ((selector
& 0xfffc) == 0) {
2562 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2565 if (!(e2
& DESC_S_MASK
)) {
2569 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2570 cpl
= env
->hflags
& HF_CPL_MASK
;
2571 if (e2
& DESC_CS_MASK
) {
2574 if (dpl
< cpl
|| dpl
< rpl
) {
2577 if (!(e2
& DESC_W_MASK
)) {
2579 CC_SRC
= eflags
& ~CC_Z
;
2583 CC_SRC
= eflags
| CC_Z
;
2586 #if defined(CONFIG_USER_ONLY)
2587 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2589 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2590 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2592 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2593 (selector
<< 4), 0xffff,
2594 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2595 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2597 helper_load_seg(env
, seg_reg
, selector
);
2602 /* check if Port I/O is allowed in TSS */
2603 static inline void check_io(CPUX86State
*env
, int addr
, int size
,
2606 int io_offset
, val
, mask
;
2608 /* TSS must be a valid 32 bit one */
2609 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2610 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2611 env
->tr
.limit
< 103) {
2614 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
2615 io_offset
+= (addr
>> 3);
2616 /* Note: the check needs two bytes */
2617 if ((io_offset
+ 1) > env
->tr
.limit
) {
2620 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
2622 mask
= (1 << size
) - 1;
2623 /* all bits must be zero to allow the I/O */
2624 if ((val
& mask
) != 0) {
2626 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2630 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2632 check_io(env
, t0
, 1, GETPC());
2635 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2637 check_io(env
, t0
, 2, GETPC());
2640 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2642 check_io(env
, t0
, 4, GETPC());