2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
30 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31 # define LOG_PCALL_STATE(cpu) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
34 # define LOG_PCALL(...) do { } while (0)
35 # define LOG_PCALL_STATE(cpu) do { } while (0)
38 #ifdef CONFIG_USER_ONLY
39 #define MEMSUFFIX _kernel
41 #include "exec/cpu_ldst_useronly_template.h"
44 #include "exec/cpu_ldst_useronly_template.h"
47 #include "exec/cpu_ldst_useronly_template.h"
50 #include "exec/cpu_ldst_useronly_template.h"
53 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
54 #define MEMSUFFIX _kernel
56 #include "exec/cpu_ldst_template.h"
59 #include "exec/cpu_ldst_template.h"
62 #include "exec/cpu_ldst_template.h"
65 #include "exec/cpu_ldst_template.h"
70 /* return non zero if error */
71 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
72 uint32_t *e2_ptr
, int selector
,
84 index
= selector
& ~7;
85 if ((index
+ 7) > dt
->limit
) {
88 ptr
= dt
->base
+ index
;
89 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
90 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
94 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
95 uint32_t *e2_ptr
, int selector
)
97 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
100 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
104 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
105 if (e2
& DESC_G_MASK
) {
106 limit
= (limit
<< 12) | 0xfff;
111 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
113 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
116 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
119 sc
->base
= get_seg_base(e1
, e2
);
120 sc
->limit
= get_seg_limit(e1
, e2
);
124 /* init the segment cache in vm86 mode. */
125 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
129 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
130 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
131 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
134 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
135 uint32_t *esp_ptr
, int dpl
,
138 X86CPU
*cpu
= x86_env_get_cpu(env
);
139 int type
, index
, shift
;
144 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
145 for (i
= 0; i
< env
->tr
.limit
; i
++) {
146 printf("%02x ", env
->tr
.base
[i
]);
155 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
156 cpu_abort(CPU(cpu
), "invalid tss");
158 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
159 if ((type
& 7) != 1) {
160 cpu_abort(CPU(cpu
), "invalid tss type");
163 index
= (dpl
* 4 + 2) << shift
;
164 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
165 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
168 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
169 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
171 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
172 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
176 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
,
182 if ((selector
& 0xfffc) != 0) {
183 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
184 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
186 if (!(e2
& DESC_S_MASK
)) {
187 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
190 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
191 if (seg_reg
== R_CS
) {
192 if (!(e2
& DESC_CS_MASK
)) {
193 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
196 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
198 } else if (seg_reg
== R_SS
) {
199 /* SS must be writable data */
200 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
201 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
203 if (dpl
!= cpl
|| dpl
!= rpl
) {
204 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
207 /* not readable code */
208 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
209 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
211 /* if data or non conforming code, checks the rights */
212 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
213 if (dpl
< cpl
|| dpl
< rpl
) {
214 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
218 if (!(e2
& DESC_P_MASK
)) {
219 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
221 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
222 get_seg_base(e1
, e2
),
223 get_seg_limit(e1
, e2
),
226 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
227 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
232 #define SWITCH_TSS_JMP 0
233 #define SWITCH_TSS_IRET 1
234 #define SWITCH_TSS_CALL 2
236 /* XXX: restore CPU state in registers (PowerPC case) */
237 static void switch_tss_ra(CPUX86State
*env
, int tss_selector
,
238 uint32_t e1
, uint32_t e2
, int source
,
239 uint32_t next_eip
, uintptr_t retaddr
)
241 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
242 target_ulong tss_base
;
243 uint32_t new_regs
[8], new_segs
[6];
244 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
245 uint32_t old_eflags
, eflags_mask
;
250 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
251 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
254 /* if task gate, we read the TSS segment and we load it */
256 if (!(e2
& DESC_P_MASK
)) {
257 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
259 tss_selector
= e1
>> 16;
260 if (tss_selector
& 4) {
261 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
263 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
264 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
266 if (e2
& DESC_S_MASK
) {
267 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
269 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
270 if ((type
& 7) != 1) {
271 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
275 if (!(e2
& DESC_P_MASK
)) {
276 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
284 tss_limit
= get_seg_limit(e1
, e2
);
285 tss_base
= get_seg_base(e1
, e2
);
286 if ((tss_selector
& 4) != 0 ||
287 tss_limit
< tss_limit_max
) {
288 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
290 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
292 old_tss_limit_max
= 103;
294 old_tss_limit_max
= 43;
297 /* read all the registers from the new TSS */
300 new_cr3
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x1c, retaddr
);
301 new_eip
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x20, retaddr
);
302 new_eflags
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x24, retaddr
);
303 for (i
= 0; i
< 8; i
++) {
304 new_regs
[i
] = cpu_ldl_kernel_ra(env
, tss_base
+ (0x28 + i
* 4),
307 for (i
= 0; i
< 6; i
++) {
308 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x48 + i
* 4),
311 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x60, retaddr
);
312 new_trap
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x64, retaddr
);
316 new_eip
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x0e, retaddr
);
317 new_eflags
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x10, retaddr
);
318 for (i
= 0; i
< 8; i
++) {
319 new_regs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x12 + i
* 2),
320 retaddr
) | 0xffff0000;
322 for (i
= 0; i
< 4; i
++) {
323 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x22 + i
* 4),
326 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x2a, retaddr
);
331 /* XXX: avoid a compiler warning, see
332 http://support.amd.com/us/Processor_TechDocs/24593.pdf
333 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
336 /* NOTE: we must avoid memory exceptions during the task switch,
337 so we make dummy accesses before */
338 /* XXX: it can still fail in some cases, so a bigger hack is
339 necessary to valid the TLB after having done the accesses */
341 v1
= cpu_ldub_kernel_ra(env
, env
->tr
.base
, retaddr
);
342 v2
= cpu_ldub_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, retaddr
);
343 cpu_stb_kernel_ra(env
, env
->tr
.base
, v1
, retaddr
);
344 cpu_stb_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, v2
, retaddr
);
346 /* clear busy bit (it is restartable) */
347 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
351 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
352 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
353 e2
&= ~DESC_TSS_BUSY_MASK
;
354 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
356 old_eflags
= cpu_compute_eflags(env
);
357 if (source
== SWITCH_TSS_IRET
) {
358 old_eflags
&= ~NT_MASK
;
361 /* save the current state in the old TSS */
364 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x20, next_eip
, retaddr
);
365 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x24, old_eflags
, retaddr
);
366 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
], retaddr
);
367 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
], retaddr
);
368 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
], retaddr
);
369 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
], retaddr
);
370 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
], retaddr
);
371 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
], retaddr
);
372 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
], retaddr
);
373 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
], retaddr
);
374 for (i
= 0; i
< 6; i
++) {
375 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x48 + i
* 4),
376 env
->segs
[i
].selector
, retaddr
);
380 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x0e, next_eip
, retaddr
);
381 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x10, old_eflags
, retaddr
);
382 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
], retaddr
);
383 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
], retaddr
);
384 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
], retaddr
);
385 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
], retaddr
);
386 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
], retaddr
);
387 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
], retaddr
);
388 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
], retaddr
);
389 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
], retaddr
);
390 for (i
= 0; i
< 4; i
++) {
391 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x22 + i
* 4),
392 env
->segs
[i
].selector
, retaddr
);
396 /* now if an exception occurs, it will occurs in the next task
399 if (source
== SWITCH_TSS_CALL
) {
400 cpu_stw_kernel_ra(env
, tss_base
, env
->tr
.selector
, retaddr
);
401 new_eflags
|= NT_MASK
;
405 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
409 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
410 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
411 e2
|= DESC_TSS_BUSY_MASK
;
412 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
415 /* set the new CPU state */
416 /* from this point, any exception which occurs can give problems */
417 env
->cr
[0] |= CR0_TS_MASK
;
418 env
->hflags
|= HF_TS_MASK
;
419 env
->tr
.selector
= tss_selector
;
420 env
->tr
.base
= tss_base
;
421 env
->tr
.limit
= tss_limit
;
422 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
424 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
425 cpu_x86_update_cr3(env
, new_cr3
);
428 /* load all registers without an exception, then reload them with
429 possible exception */
431 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
432 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
434 eflags_mask
&= 0xffff;
436 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
437 /* XXX: what to do in 16 bit case? */
438 env
->regs
[R_EAX
] = new_regs
[0];
439 env
->regs
[R_ECX
] = new_regs
[1];
440 env
->regs
[R_EDX
] = new_regs
[2];
441 env
->regs
[R_EBX
] = new_regs
[3];
442 env
->regs
[R_ESP
] = new_regs
[4];
443 env
->regs
[R_EBP
] = new_regs
[5];
444 env
->regs
[R_ESI
] = new_regs
[6];
445 env
->regs
[R_EDI
] = new_regs
[7];
446 if (new_eflags
& VM_MASK
) {
447 for (i
= 0; i
< 6; i
++) {
448 load_seg_vm(env
, i
, new_segs
[i
]);
451 /* first just selectors as the rest may trigger exceptions */
452 for (i
= 0; i
< 6; i
++) {
453 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
457 env
->ldt
.selector
= new_ldt
& ~4;
464 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
467 if ((new_ldt
& 0xfffc) != 0) {
469 index
= new_ldt
& ~7;
470 if ((index
+ 7) > dt
->limit
) {
471 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
473 ptr
= dt
->base
+ index
;
474 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
475 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
476 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
477 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
479 if (!(e2
& DESC_P_MASK
)) {
480 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
482 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
485 /* load the segments */
486 if (!(new_eflags
& VM_MASK
)) {
487 int cpl
= new_segs
[R_CS
] & 3;
488 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
489 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
490 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
491 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
492 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
493 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
496 /* check that env->eip is in the CS segment limits */
497 if (new_eip
> env
->segs
[R_CS
].limit
) {
498 /* XXX: different exception if CALL? */
499 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
502 #ifndef CONFIG_USER_ONLY
503 /* reset local breakpoints */
504 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
505 cpu_x86_update_dr7(env
, env
->dr
[7] & ~DR7_LOCAL_BP_MASK
);
510 static void switch_tss(CPUX86State
*env
, int tss_selector
,
511 uint32_t e1
, uint32_t e2
, int source
,
514 switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
517 static inline unsigned int get_sp_mask(unsigned int e2
)
519 if (e2
& DESC_B_MASK
) {
526 static int exception_has_error_code(int intno
)
542 #define SET_ESP(val, sp_mask) \
544 if ((sp_mask) == 0xffff) { \
545 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
547 } else if ((sp_mask) == 0xffffffffLL) { \
548 env->regs[R_ESP] = (uint32_t)(val); \
550 env->regs[R_ESP] = (val); \
554 #define SET_ESP(val, sp_mask) \
556 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
557 ((val) & (sp_mask)); \
561 /* in 64-bit machines, this can overflow. So this segment addition macro
562 * can be used to trim the value to 32-bit whenever needed */
563 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
565 /* XXX: add a is_user flag to have proper security support */
566 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
569 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
572 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
575 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
578 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
580 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
584 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
586 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
590 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
591 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
592 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
593 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
595 /* protected mode interrupt */
596 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
597 int error_code
, unsigned int next_eip
,
601 target_ulong ptr
, ssp
;
602 int type
, dpl
, selector
, ss_dpl
, cpl
;
603 int has_error_code
, new_stack
, shift
;
604 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
605 uint32_t old_eip
, sp_mask
;
606 int vm86
= env
->eflags
& VM_MASK
;
609 if (!is_int
&& !is_hw
) {
610 has_error_code
= exception_has_error_code(intno
);
619 if (intno
* 8 + 7 > dt
->limit
) {
620 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
622 ptr
= dt
->base
+ intno
* 8;
623 e1
= cpu_ldl_kernel(env
, ptr
);
624 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
625 /* check gate type */
626 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
628 case 5: /* task gate */
629 /* must do that check here to return the correct error code */
630 if (!(e2
& DESC_P_MASK
)) {
631 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
633 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
634 if (has_error_code
) {
638 /* push the error code */
639 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
641 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
646 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
647 ssp
= env
->segs
[R_SS
].base
+ esp
;
649 cpu_stl_kernel(env
, ssp
, error_code
);
651 cpu_stw_kernel(env
, ssp
, error_code
);
656 case 6: /* 286 interrupt gate */
657 case 7: /* 286 trap gate */
658 case 14: /* 386 interrupt gate */
659 case 15: /* 386 trap gate */
662 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
665 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
666 cpl
= env
->hflags
& HF_CPL_MASK
;
667 /* check privilege if software int */
668 if (is_int
&& dpl
< cpl
) {
669 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
671 /* check valid bit */
672 if (!(e2
& DESC_P_MASK
)) {
673 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
676 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
677 if ((selector
& 0xfffc) == 0) {
678 raise_exception_err(env
, EXCP0D_GPF
, 0);
680 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
681 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
683 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
684 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
686 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
688 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
690 if (!(e2
& DESC_P_MASK
)) {
691 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
693 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
694 /* to inner privilege */
695 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
696 if ((ss
& 0xfffc) == 0) {
697 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
699 if ((ss
& 3) != dpl
) {
700 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
702 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
703 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
705 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
707 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
709 if (!(ss_e2
& DESC_S_MASK
) ||
710 (ss_e2
& DESC_CS_MASK
) ||
711 !(ss_e2
& DESC_W_MASK
)) {
712 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
714 if (!(ss_e2
& DESC_P_MASK
)) {
715 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
718 sp_mask
= get_sp_mask(ss_e2
);
719 ssp
= get_seg_base(ss_e1
, ss_e2
);
720 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
721 /* to same privilege */
723 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
726 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
727 ssp
= env
->segs
[R_SS
].base
;
728 esp
= env
->regs
[R_ESP
];
731 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
732 new_stack
= 0; /* avoid warning */
733 sp_mask
= 0; /* avoid warning */
734 ssp
= 0; /* avoid warning */
735 esp
= 0; /* avoid warning */
741 /* XXX: check that enough room is available */
742 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
751 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
752 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
753 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
754 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
756 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
757 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
759 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
760 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
761 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
762 if (has_error_code
) {
763 PUSHL(ssp
, esp
, sp_mask
, error_code
);
768 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
769 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
770 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
771 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
773 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
774 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
776 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
777 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
778 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
779 if (has_error_code
) {
780 PUSHW(ssp
, esp
, sp_mask
, error_code
);
784 /* interrupt gate clear IF mask */
785 if ((type
& 1) == 0) {
786 env
->eflags
&= ~IF_MASK
;
788 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
792 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
793 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
797 ss
= (ss
& ~3) | dpl
;
798 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
799 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
801 SET_ESP(esp
, sp_mask
);
803 selector
= (selector
& ~3) | dpl
;
804 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
805 get_seg_base(e1
, e2
),
806 get_seg_limit(e1
, e2
),
813 #define PUSHQ_RA(sp, val, ra) \
816 cpu_stq_kernel_ra(env, sp, (val), ra); \
819 #define POPQ_RA(sp, val, ra) \
821 val = cpu_ldq_kernel_ra(env, sp, ra); \
825 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
826 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
828 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
830 X86CPU
*cpu
= x86_env_get_cpu(env
);
834 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
835 env
->tr
.base
, env
->tr
.limit
);
838 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
839 cpu_abort(CPU(cpu
), "invalid tss");
841 index
= 8 * level
+ 4;
842 if ((index
+ 7) > env
->tr
.limit
) {
843 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
845 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
848 /* 64 bit interrupt */
849 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
850 int error_code
, target_ulong next_eip
, int is_hw
)
854 int type
, dpl
, selector
, cpl
, ist
;
855 int has_error_code
, new_stack
;
856 uint32_t e1
, e2
, e3
, ss
;
857 target_ulong old_eip
, esp
, offset
;
860 if (!is_int
&& !is_hw
) {
861 has_error_code
= exception_has_error_code(intno
);
870 if (intno
* 16 + 15 > dt
->limit
) {
871 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
873 ptr
= dt
->base
+ intno
* 16;
874 e1
= cpu_ldl_kernel(env
, ptr
);
875 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
876 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
877 /* check gate type */
878 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
880 case 14: /* 386 interrupt gate */
881 case 15: /* 386 trap gate */
884 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
887 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
888 cpl
= env
->hflags
& HF_CPL_MASK
;
889 /* check privilege if software int */
890 if (is_int
&& dpl
< cpl
) {
891 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
893 /* check valid bit */
894 if (!(e2
& DESC_P_MASK
)) {
895 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
898 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
900 if ((selector
& 0xfffc) == 0) {
901 raise_exception_err(env
, EXCP0D_GPF
, 0);
904 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
905 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
907 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
908 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
910 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
912 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
914 if (!(e2
& DESC_P_MASK
)) {
915 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
917 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
918 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
920 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
921 /* to inner privilege */
923 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
925 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
926 /* to same privilege */
927 if (env
->eflags
& VM_MASK
) {
928 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
931 esp
= env
->regs
[R_ESP
];
934 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
935 new_stack
= 0; /* avoid warning */
936 esp
= 0; /* avoid warning */
938 esp
&= ~0xfLL
; /* align stack */
940 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
941 PUSHQ(esp
, env
->regs
[R_ESP
]);
942 PUSHQ(esp
, cpu_compute_eflags(env
));
943 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
945 if (has_error_code
) {
946 PUSHQ(esp
, error_code
);
949 /* interrupt gate clear IF mask */
950 if ((type
& 1) == 0) {
951 env
->eflags
&= ~IF_MASK
;
953 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
957 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
959 env
->regs
[R_ESP
] = esp
;
961 selector
= (selector
& ~3) | dpl
;
962 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
963 get_seg_base(e1
, e2
),
964 get_seg_limit(e1
, e2
),
971 #if defined(CONFIG_USER_ONLY)
972 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
974 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
976 cs
->exception_index
= EXCP_SYSCALL
;
977 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
981 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
985 if (!(env
->efer
& MSR_EFER_SCE
)) {
986 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
988 selector
= (env
->star
>> 32) & 0xffff;
989 if (env
->hflags
& HF_LMA_MASK
) {
992 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
993 env
->regs
[11] = cpu_compute_eflags(env
);
995 code64
= env
->hflags
& HF_CS64_MASK
;
997 env
->eflags
&= ~env
->fmask
;
998 cpu_load_eflags(env
, env
->eflags
, 0);
999 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1001 DESC_G_MASK
| DESC_P_MASK
|
1003 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1005 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1007 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1009 DESC_W_MASK
| DESC_A_MASK
);
1011 env
->eip
= env
->lstar
;
1013 env
->eip
= env
->cstar
;
1016 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
1018 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1019 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1021 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1023 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1024 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1026 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1028 DESC_W_MASK
| DESC_A_MASK
);
1029 env
->eip
= (uint32_t)env
->star
;
1035 #ifdef TARGET_X86_64
1036 void helper_sysret(CPUX86State
*env
, int dflag
)
1040 if (!(env
->efer
& MSR_EFER_SCE
)) {
1041 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
1043 cpl
= env
->hflags
& HF_CPL_MASK
;
1044 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1045 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1047 selector
= (env
->star
>> 48) & 0xffff;
1048 if (env
->hflags
& HF_LMA_MASK
) {
1049 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1050 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1053 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1055 DESC_G_MASK
| DESC_P_MASK
|
1056 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1057 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1059 env
->eip
= env
->regs
[R_ECX
];
1061 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1063 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1064 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1065 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1066 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1068 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1070 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1071 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1072 DESC_W_MASK
| DESC_A_MASK
);
1074 env
->eflags
|= IF_MASK
;
1075 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1077 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1078 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1079 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1080 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1081 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1083 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1084 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1085 DESC_W_MASK
| DESC_A_MASK
);
1090 /* real mode interrupt */
1091 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1092 int error_code
, unsigned int next_eip
)
1095 target_ulong ptr
, ssp
;
1097 uint32_t offset
, esp
;
1098 uint32_t old_cs
, old_eip
;
1100 /* real mode (simpler!) */
1102 if (intno
* 4 + 3 > dt
->limit
) {
1103 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1105 ptr
= dt
->base
+ intno
* 4;
1106 offset
= cpu_lduw_kernel(env
, ptr
);
1107 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1108 esp
= env
->regs
[R_ESP
];
1109 ssp
= env
->segs
[R_SS
].base
;
1115 old_cs
= env
->segs
[R_CS
].selector
;
1116 /* XXX: use SS segment size? */
1117 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1118 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1119 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1121 /* update processor state */
1122 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1124 env
->segs
[R_CS
].selector
= selector
;
1125 env
->segs
[R_CS
].base
= (selector
<< 4);
1126 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1129 #if defined(CONFIG_USER_ONLY)
1130 /* fake user mode interrupt */
1131 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1132 int error_code
, target_ulong next_eip
)
1136 int dpl
, cpl
, shift
;
1140 if (env
->hflags
& HF_LMA_MASK
) {
1145 ptr
= dt
->base
+ (intno
<< shift
);
1146 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1148 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1149 cpl
= env
->hflags
& HF_CPL_MASK
;
1150 /* check privilege if software int */
1151 if (is_int
&& dpl
< cpl
) {
1152 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1155 /* Since we emulate only user space, we cannot do more than
1156 exiting the emulation with the suitable exception and error
1157 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1158 if (is_int
|| intno
== EXCP_SYSCALL
) {
1159 env
->eip
= next_eip
;
1165 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1166 int error_code
, int is_hw
, int rm
)
1168 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1169 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1170 control
.event_inj
));
1172 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1176 type
= SVM_EVTINJ_TYPE_SOFT
;
1178 type
= SVM_EVTINJ_TYPE_EXEPT
;
1180 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1181 if (!rm
&& exception_has_error_code(intno
)) {
1182 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1183 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1184 control
.event_inj_err
),
1188 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1195 * Begin execution of an interruption. is_int is TRUE if coming from
1196 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1197 * instruction. It is only relevant if is_int is TRUE.
1199 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1200 int error_code
, target_ulong next_eip
, int is_hw
)
1202 CPUX86State
*env
= &cpu
->env
;
1204 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1205 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1208 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1209 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1210 count
, intno
, error_code
, is_int
,
1211 env
->hflags
& HF_CPL_MASK
,
1212 env
->segs
[R_CS
].selector
, env
->eip
,
1213 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1214 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1215 if (intno
== 0x0e) {
1216 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1218 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1221 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1228 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1229 for (i
= 0; i
< 16; i
++) {
1230 qemu_log(" %02x", ldub(ptr
+ i
));
1238 if (env
->cr
[0] & CR0_PE_MASK
) {
1239 #if !defined(CONFIG_USER_ONLY)
1240 if (env
->hflags
& HF_SVMI_MASK
) {
1241 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1244 #ifdef TARGET_X86_64
1245 if (env
->hflags
& HF_LMA_MASK
) {
1246 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1250 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1254 #if !defined(CONFIG_USER_ONLY)
1255 if (env
->hflags
& HF_SVMI_MASK
) {
1256 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1259 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1262 #if !defined(CONFIG_USER_ONLY)
1263 if (env
->hflags
& HF_SVMI_MASK
) {
1264 CPUState
*cs
= CPU(cpu
);
1265 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1266 offsetof(struct vmcb
,
1267 control
.event_inj
));
1270 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1271 event_inj
& ~SVM_EVTINJ_VALID
);
1276 void x86_cpu_do_interrupt(CPUState
*cs
)
1278 X86CPU
*cpu
= X86_CPU(cs
);
1279 CPUX86State
*env
= &cpu
->env
;
1281 #if defined(CONFIG_USER_ONLY)
1282 /* if user mode only, we simulate a fake exception
1283 which will be handled outside the cpu execution
1285 do_interrupt_user(env
, cs
->exception_index
,
1286 env
->exception_is_int
,
1288 env
->exception_next_eip
);
1289 /* successfully delivered */
1290 env
->old_exception
= -1;
1292 /* simulate a real cpu exception. On i386, it can
1293 trigger new exceptions, but we do not handle
1294 double or triple faults yet. */
1295 do_interrupt_all(cpu
, cs
->exception_index
,
1296 env
->exception_is_int
,
1298 env
->exception_next_eip
, 0);
1299 /* successfully delivered */
1300 env
->old_exception
= -1;
1304 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1306 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1309 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1311 X86CPU
*cpu
= X86_CPU(cs
);
1312 CPUX86State
*env
= &cpu
->env
;
1315 #if !defined(CONFIG_USER_ONLY)
1316 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
1317 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1318 apic_poll_irq(cpu
->apic_state
);
1319 /* Don't process multiple interrupt requests in a single call.
1320 This is required to make icount-driven execution deterministic. */
1324 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
1326 } else if (env
->hflags2
& HF2_GIF_MASK
) {
1327 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
1328 !(env
->hflags
& HF_SMM_MASK
)) {
1329 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0);
1330 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1333 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
1334 !(env
->hflags2
& HF2_NMI_MASK
)) {
1335 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1336 env
->hflags2
|= HF2_NMI_MASK
;
1337 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1339 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
1340 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1341 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1343 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
1344 (((env
->hflags2
& HF2_VINTR_MASK
) &&
1345 (env
->hflags2
& HF2_HIF_MASK
)) ||
1346 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
1347 (env
->eflags
& IF_MASK
&&
1348 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
1350 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0);
1351 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1352 CPU_INTERRUPT_VIRQ
);
1353 intno
= cpu_get_pic_interrupt(env
);
1354 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1355 "Servicing hardware INT=0x%02x\n", intno
);
1356 do_interrupt_x86_hardirq(env
, intno
, 1);
1357 /* ensure that no TB jump will be modified as
1358 the program flow was changed */
1360 #if !defined(CONFIG_USER_ONLY)
1361 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
1362 (env
->eflags
& IF_MASK
) &&
1363 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
1365 /* FIXME: this should respect TPR */
1366 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0);
1367 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
1368 + offsetof(struct vmcb
, control
.int_vector
));
1369 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1370 "Servicing virtual hardware INT=0x%02x\n", intno
);
1371 do_interrupt_x86_hardirq(env
, intno
, 1);
1372 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1381 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1385 uint32_t esp_mask
, esp
, ebp
;
1387 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1388 ssp
= env
->segs
[R_SS
].base
;
1389 ebp
= env
->regs
[R_EBP
];
1390 esp
= env
->regs
[R_ESP
];
1397 cpu_stl_data_ra(env
, ssp
+ (esp
& esp_mask
),
1398 cpu_ldl_data_ra(env
, ssp
+ (ebp
& esp_mask
),
1403 cpu_stl_data_ra(env
, ssp
+ (esp
& esp_mask
), t1
, GETPC());
1410 cpu_stw_data_ra(env
, ssp
+ (esp
& esp_mask
),
1411 cpu_lduw_data_ra(env
, ssp
+ (ebp
& esp_mask
),
1416 cpu_stw_data_ra(env
, ssp
+ (esp
& esp_mask
), t1
, GETPC());
1420 #ifdef TARGET_X86_64
1421 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1424 target_ulong esp
, ebp
;
1426 ebp
= env
->regs
[R_EBP
];
1427 esp
= env
->regs
[R_ESP
];
1435 cpu_stq_data_ra(env
, esp
, cpu_ldq_data_ra(env
, ebp
, GETPC()),
1439 cpu_stq_data_ra(env
, esp
, t1
, GETPC());
1446 cpu_stw_data_ra(env
, esp
, cpu_lduw_data_ra(env
, ebp
, GETPC()),
1450 cpu_stw_data_ra(env
, esp
, t1
, GETPC());
1455 void helper_lldt(CPUX86State
*env
, int selector
)
1459 int index
, entry_limit
;
1463 if ((selector
& 0xfffc) == 0) {
1464 /* XXX: NULL selector case: invalid LDT */
1468 if (selector
& 0x4) {
1469 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1472 index
= selector
& ~7;
1473 #ifdef TARGET_X86_64
1474 if (env
->hflags
& HF_LMA_MASK
) {
1481 if ((index
+ entry_limit
) > dt
->limit
) {
1482 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1484 ptr
= dt
->base
+ index
;
1485 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1486 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1487 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1488 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1490 if (!(e2
& DESC_P_MASK
)) {
1491 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1493 #ifdef TARGET_X86_64
1494 if (env
->hflags
& HF_LMA_MASK
) {
1497 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1498 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1499 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1503 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1506 env
->ldt
.selector
= selector
;
1509 void helper_ltr(CPUX86State
*env
, int selector
)
1513 int index
, type
, entry_limit
;
1517 if ((selector
& 0xfffc) == 0) {
1518 /* NULL selector case: invalid TR */
1523 if (selector
& 0x4) {
1524 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1527 index
= selector
& ~7;
1528 #ifdef TARGET_X86_64
1529 if (env
->hflags
& HF_LMA_MASK
) {
1536 if ((index
+ entry_limit
) > dt
->limit
) {
1537 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1539 ptr
= dt
->base
+ index
;
1540 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1541 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1542 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1543 if ((e2
& DESC_S_MASK
) ||
1544 (type
!= 1 && type
!= 9)) {
1545 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1547 if (!(e2
& DESC_P_MASK
)) {
1548 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1550 #ifdef TARGET_X86_64
1551 if (env
->hflags
& HF_LMA_MASK
) {
1554 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1555 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1556 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1557 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1559 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1560 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1564 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1566 e2
|= DESC_TSS_BUSY_MASK
;
1567 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1569 env
->tr
.selector
= selector
;
1572 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1573 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1582 cpl
= env
->hflags
& HF_CPL_MASK
;
1583 if ((selector
& 0xfffc) == 0) {
1584 /* null selector case */
1586 #ifdef TARGET_X86_64
1587 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1590 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1592 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1595 if (selector
& 0x4) {
1600 index
= selector
& ~7;
1601 if ((index
+ 7) > dt
->limit
) {
1602 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1604 ptr
= dt
->base
+ index
;
1605 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1606 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1608 if (!(e2
& DESC_S_MASK
)) {
1609 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1612 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1613 if (seg_reg
== R_SS
) {
1614 /* must be writable segment */
1615 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1616 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1618 if (rpl
!= cpl
|| dpl
!= cpl
) {
1619 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1622 /* must be readable segment */
1623 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1624 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1627 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1628 /* if not conforming code, test rights */
1629 if (dpl
< cpl
|| dpl
< rpl
) {
1630 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1635 if (!(e2
& DESC_P_MASK
)) {
1636 if (seg_reg
== R_SS
) {
1637 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1639 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1643 /* set the access bit if not already set */
1644 if (!(e2
& DESC_A_MASK
)) {
1646 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1649 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1650 get_seg_base(e1
, e2
),
1651 get_seg_limit(e1
, e2
),
1654 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1655 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1660 /* protected mode jump */
1661 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1662 target_ulong next_eip
)
1665 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1667 if ((new_cs
& 0xfffc) == 0) {
1668 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1670 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1671 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1673 cpl
= env
->hflags
& HF_CPL_MASK
;
1674 if (e2
& DESC_S_MASK
) {
1675 if (!(e2
& DESC_CS_MASK
)) {
1676 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1678 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1679 if (e2
& DESC_C_MASK
) {
1680 /* conforming code segment */
1682 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1685 /* non conforming code segment */
1688 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1691 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1694 if (!(e2
& DESC_P_MASK
)) {
1695 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1697 limit
= get_seg_limit(e1
, e2
);
1698 if (new_eip
> limit
&&
1699 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1700 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1702 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1703 get_seg_base(e1
, e2
), limit
, e2
);
1706 /* jump to call or task gate */
1707 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1709 cpl
= env
->hflags
& HF_CPL_MASK
;
1710 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1712 case 1: /* 286 TSS */
1713 case 9: /* 386 TSS */
1714 case 5: /* task gate */
1715 if (dpl
< cpl
|| dpl
< rpl
) {
1716 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1718 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1720 case 4: /* 286 call gate */
1721 case 12: /* 386 call gate */
1722 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1723 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1725 if (!(e2
& DESC_P_MASK
)) {
1726 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1729 new_eip
= (e1
& 0xffff);
1731 new_eip
|= (e2
& 0xffff0000);
1733 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1734 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1736 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1737 /* must be code segment */
1738 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1739 (DESC_S_MASK
| DESC_CS_MASK
))) {
1740 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1742 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1743 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1744 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1746 if (!(e2
& DESC_P_MASK
)) {
1747 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1749 limit
= get_seg_limit(e1
, e2
);
1750 if (new_eip
> limit
) {
1751 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1753 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1754 get_seg_base(e1
, e2
), limit
, e2
);
1758 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1764 /* real mode call */
1765 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1766 int shift
, int next_eip
)
1769 uint32_t esp
, esp_mask
;
1773 esp
= env
->regs
[R_ESP
];
1774 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1775 ssp
= env
->segs
[R_SS
].base
;
1777 PUSHL_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1778 PUSHL_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1780 PUSHW_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1781 PUSHW_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1784 SET_ESP(esp
, esp_mask
);
1786 env
->segs
[R_CS
].selector
= new_cs
;
1787 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1790 /* protected mode call */
1791 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1792 int shift
, target_ulong next_eip
)
1795 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1796 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1797 uint32_t val
, limit
, old_sp_mask
;
1798 target_ulong ssp
, old_ssp
;
1800 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1801 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1802 if ((new_cs
& 0xfffc) == 0) {
1803 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1805 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1806 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1808 cpl
= env
->hflags
& HF_CPL_MASK
;
1809 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1810 if (e2
& DESC_S_MASK
) {
1811 if (!(e2
& DESC_CS_MASK
)) {
1812 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1814 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1815 if (e2
& DESC_C_MASK
) {
1816 /* conforming code segment */
1818 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1821 /* non conforming code segment */
1824 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1827 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1830 if (!(e2
& DESC_P_MASK
)) {
1831 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1834 #ifdef TARGET_X86_64
1835 /* XXX: check 16/32 bit cases in long mode */
1840 rsp
= env
->regs
[R_ESP
];
1841 PUSHQ_RA(rsp
, env
->segs
[R_CS
].selector
, GETPC());
1842 PUSHQ_RA(rsp
, next_eip
, GETPC());
1843 /* from this point, not restartable */
1844 env
->regs
[R_ESP
] = rsp
;
1845 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1846 get_seg_base(e1
, e2
),
1847 get_seg_limit(e1
, e2
), e2
);
1852 sp
= env
->regs
[R_ESP
];
1853 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1854 ssp
= env
->segs
[R_SS
].base
;
1856 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1857 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1859 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1860 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1863 limit
= get_seg_limit(e1
, e2
);
1864 if (new_eip
> limit
) {
1865 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1867 /* from this point, not restartable */
1868 SET_ESP(sp
, sp_mask
);
1869 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1870 get_seg_base(e1
, e2
), limit
, e2
);
1874 /* check gate type */
1875 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1876 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1879 case 1: /* available 286 TSS */
1880 case 9: /* available 386 TSS */
1881 case 5: /* task gate */
1882 if (dpl
< cpl
|| dpl
< rpl
) {
1883 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1885 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1887 case 4: /* 286 call gate */
1888 case 12: /* 386 call gate */
1891 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1896 if (dpl
< cpl
|| dpl
< rpl
) {
1897 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1899 /* check valid bit */
1900 if (!(e2
& DESC_P_MASK
)) {
1901 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1903 selector
= e1
>> 16;
1904 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1905 param_count
= e2
& 0x1f;
1906 if ((selector
& 0xfffc) == 0) {
1907 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1910 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1911 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1913 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1914 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1916 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1918 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1920 if (!(e2
& DESC_P_MASK
)) {
1921 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1924 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1925 /* to inner privilege */
1926 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
, GETPC());
1927 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1928 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1930 if ((ss
& 0xfffc) == 0) {
1931 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1933 if ((ss
& 3) != dpl
) {
1934 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1936 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1937 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1939 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1940 if (ss_dpl
!= dpl
) {
1941 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1943 if (!(ss_e2
& DESC_S_MASK
) ||
1944 (ss_e2
& DESC_CS_MASK
) ||
1945 !(ss_e2
& DESC_W_MASK
)) {
1946 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1948 if (!(ss_e2
& DESC_P_MASK
)) {
1949 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1952 /* push_size = ((param_count * 2) + 8) << shift; */
1954 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1955 old_ssp
= env
->segs
[R_SS
].base
;
1957 sp_mask
= get_sp_mask(ss_e2
);
1958 ssp
= get_seg_base(ss_e1
, ss_e2
);
1960 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1961 PUSHL_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1962 for (i
= param_count
- 1; i
>= 0; i
--) {
1963 val
= cpu_ldl_kernel_ra(env
, old_ssp
+
1964 ((env
->regs
[R_ESP
] + i
* 4) &
1965 old_sp_mask
), GETPC());
1966 PUSHL_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1969 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1970 PUSHW_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1971 for (i
= param_count
- 1; i
>= 0; i
--) {
1972 val
= cpu_lduw_kernel_ra(env
, old_ssp
+
1973 ((env
->regs
[R_ESP
] + i
* 2) &
1974 old_sp_mask
), GETPC());
1975 PUSHW_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1980 /* to same privilege */
1981 sp
= env
->regs
[R_ESP
];
1982 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1983 ssp
= env
->segs
[R_SS
].base
;
1984 /* push_size = (4 << shift); */
1989 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1990 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1992 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1993 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1996 /* from this point, not restartable */
1999 ss
= (ss
& ~3) | dpl
;
2000 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2002 get_seg_limit(ss_e1
, ss_e2
),
2006 selector
= (selector
& ~3) | dpl
;
2007 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2008 get_seg_base(e1
, e2
),
2009 get_seg_limit(e1
, e2
),
2011 SET_ESP(sp
, sp_mask
);
2016 /* real and vm86 mode iret */
2017 void helper_iret_real(CPUX86State
*env
, int shift
)
2019 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2023 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
2024 sp
= env
->regs
[R_ESP
];
2025 ssp
= env
->segs
[R_SS
].base
;
2028 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
2029 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
2031 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
2034 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
2035 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
2036 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
2038 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
2039 env
->segs
[R_CS
].selector
= new_cs
;
2040 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2042 if (env
->eflags
& VM_MASK
) {
2043 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
2046 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
2050 eflags_mask
&= 0xffff;
2052 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2053 env
->hflags2
&= ~HF2_NMI_MASK
;
2056 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
2061 /* XXX: on x86_64, we do not want to nullify FS and GS because
2062 they may still contain a valid base. I would be interested to
2063 know how a real x86_64 CPU behaves */
2064 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2065 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2069 e2
= env
->segs
[seg_reg
].flags
;
2070 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2071 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2072 /* data or non conforming code segment */
2074 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2079 /* protected mode iret */
2080 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2081 int is_iret
, int addend
,
2084 uint32_t new_cs
, new_eflags
, new_ss
;
2085 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2086 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2087 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2088 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2090 #ifdef TARGET_X86_64
2096 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2098 sp
= env
->regs
[R_ESP
];
2099 ssp
= env
->segs
[R_SS
].base
;
2100 new_eflags
= 0; /* avoid warning */
2101 #ifdef TARGET_X86_64
2103 POPQ_RA(sp
, new_eip
, retaddr
);
2104 POPQ_RA(sp
, new_cs
, retaddr
);
2107 POPQ_RA(sp
, new_eflags
, retaddr
);
2114 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2115 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2118 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2119 if (new_eflags
& VM_MASK
) {
2120 goto return_to_vm86
;
2125 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2126 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2128 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2132 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2133 new_cs
, new_eip
, shift
, addend
);
2134 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2135 if ((new_cs
& 0xfffc) == 0) {
2136 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2138 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
2139 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2141 if (!(e2
& DESC_S_MASK
) ||
2142 !(e2
& DESC_CS_MASK
)) {
2143 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2145 cpl
= env
->hflags
& HF_CPL_MASK
;
2148 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2150 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2151 if (e2
& DESC_C_MASK
) {
2153 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2157 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2160 if (!(e2
& DESC_P_MASK
)) {
2161 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2165 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2166 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2167 /* return to same privilege level */
2168 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2169 get_seg_base(e1
, e2
),
2170 get_seg_limit(e1
, e2
),
2173 /* return to different privilege level */
2174 #ifdef TARGET_X86_64
2176 POPQ_RA(sp
, new_esp
, retaddr
);
2177 POPQ_RA(sp
, new_ss
, retaddr
);
2184 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2185 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2189 POPW_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2190 POPW_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2193 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2195 if ((new_ss
& 0xfffc) == 0) {
2196 #ifdef TARGET_X86_64
2197 /* NULL ss is allowed in long mode if cpl != 3 */
2198 /* XXX: test CS64? */
2199 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2200 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2202 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2203 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2204 DESC_W_MASK
| DESC_A_MASK
);
2205 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2209 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2212 if ((new_ss
& 3) != rpl
) {
2213 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2215 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2216 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2218 if (!(ss_e2
& DESC_S_MASK
) ||
2219 (ss_e2
& DESC_CS_MASK
) ||
2220 !(ss_e2
& DESC_W_MASK
)) {
2221 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2223 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2225 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2227 if (!(ss_e2
& DESC_P_MASK
)) {
2228 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2230 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2231 get_seg_base(ss_e1
, ss_e2
),
2232 get_seg_limit(ss_e1
, ss_e2
),
2236 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2237 get_seg_base(e1
, e2
),
2238 get_seg_limit(e1
, e2
),
2241 #ifdef TARGET_X86_64
2242 if (env
->hflags
& HF_CS64_MASK
) {
2247 sp_mask
= get_sp_mask(ss_e2
);
2250 /* validate data segments */
2251 validate_seg(env
, R_ES
, rpl
);
2252 validate_seg(env
, R_DS
, rpl
);
2253 validate_seg(env
, R_FS
, rpl
);
2254 validate_seg(env
, R_GS
, rpl
);
2258 SET_ESP(sp
, sp_mask
);
2261 /* NOTE: 'cpl' is the _old_ CPL */
2262 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2264 eflags_mask
|= IOPL_MASK
;
2266 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2268 eflags_mask
|= IF_MASK
;
2271 eflags_mask
&= 0xffff;
2273 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2278 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2279 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2280 POPL_RA(ssp
, sp
, sp_mask
, new_es
, retaddr
);
2281 POPL_RA(ssp
, sp
, sp_mask
, new_ds
, retaddr
);
2282 POPL_RA(ssp
, sp
, sp_mask
, new_fs
, retaddr
);
2283 POPL_RA(ssp
, sp
, sp_mask
, new_gs
, retaddr
);
2285 /* modify processor state */
2286 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2287 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2289 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2290 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2291 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2292 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2293 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2294 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2296 env
->eip
= new_eip
& 0xffff;
2297 env
->regs
[R_ESP
] = new_esp
;
2300 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2302 int tss_selector
, type
;
2305 /* specific case for TSS */
2306 if (env
->eflags
& NT_MASK
) {
2307 #ifdef TARGET_X86_64
2308 if (env
->hflags
& HF_LMA_MASK
) {
2309 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2312 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2313 if (tss_selector
& 4) {
2314 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2316 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2317 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2319 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2320 /* NOTE: we check both segment and busy TSS */
2322 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2324 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2326 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2328 env
->hflags2
&= ~HF2_NMI_MASK
;
2331 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2333 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2336 void helper_sysenter(CPUX86State
*env
)
2338 if (env
->sysenter_cs
== 0) {
2339 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2341 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2343 #ifdef TARGET_X86_64
2344 if (env
->hflags
& HF_LMA_MASK
) {
2345 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2347 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2349 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2354 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2356 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2358 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2360 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2362 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2364 DESC_W_MASK
| DESC_A_MASK
);
2365 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2366 env
->eip
= env
->sysenter_eip
;
2369 void helper_sysexit(CPUX86State
*env
, int dflag
)
2373 cpl
= env
->hflags
& HF_CPL_MASK
;
2374 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2375 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2377 #ifdef TARGET_X86_64
2379 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2381 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2382 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2383 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2385 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2387 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2388 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2389 DESC_W_MASK
| DESC_A_MASK
);
2393 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2395 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2396 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2397 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2398 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2400 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2401 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2402 DESC_W_MASK
| DESC_A_MASK
);
2404 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2405 env
->eip
= env
->regs
[R_EDX
];
2408 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2411 uint32_t e1
, e2
, eflags
, selector
;
2412 int rpl
, dpl
, cpl
, type
;
2414 selector
= selector1
& 0xffff;
2415 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2416 if ((selector
& 0xfffc) == 0) {
2419 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2423 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2424 cpl
= env
->hflags
& HF_CPL_MASK
;
2425 if (e2
& DESC_S_MASK
) {
2426 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2429 if (dpl
< cpl
|| dpl
< rpl
) {
2434 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2445 if (dpl
< cpl
|| dpl
< rpl
) {
2447 CC_SRC
= eflags
& ~CC_Z
;
2451 limit
= get_seg_limit(e1
, e2
);
2452 CC_SRC
= eflags
| CC_Z
;
2456 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2458 uint32_t e1
, e2
, eflags
, selector
;
2459 int rpl
, dpl
, cpl
, type
;
2461 selector
= selector1
& 0xffff;
2462 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2463 if ((selector
& 0xfffc) == 0) {
2466 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2470 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2471 cpl
= env
->hflags
& HF_CPL_MASK
;
2472 if (e2
& DESC_S_MASK
) {
2473 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2476 if (dpl
< cpl
|| dpl
< rpl
) {
2481 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2495 if (dpl
< cpl
|| dpl
< rpl
) {
2497 CC_SRC
= eflags
& ~CC_Z
;
2501 CC_SRC
= eflags
| CC_Z
;
2502 return e2
& 0x00f0ff00;
2505 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2507 uint32_t e1
, e2
, eflags
, selector
;
2510 selector
= selector1
& 0xffff;
2511 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2512 if ((selector
& 0xfffc) == 0) {
2515 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2518 if (!(e2
& DESC_S_MASK
)) {
2522 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2523 cpl
= env
->hflags
& HF_CPL_MASK
;
2524 if (e2
& DESC_CS_MASK
) {
2525 if (!(e2
& DESC_R_MASK
)) {
2528 if (!(e2
& DESC_C_MASK
)) {
2529 if (dpl
< cpl
|| dpl
< rpl
) {
2534 if (dpl
< cpl
|| dpl
< rpl
) {
2536 CC_SRC
= eflags
& ~CC_Z
;
2540 CC_SRC
= eflags
| CC_Z
;
2543 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2545 uint32_t e1
, e2
, eflags
, selector
;
2548 selector
= selector1
& 0xffff;
2549 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2550 if ((selector
& 0xfffc) == 0) {
2553 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2556 if (!(e2
& DESC_S_MASK
)) {
2560 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2561 cpl
= env
->hflags
& HF_CPL_MASK
;
2562 if (e2
& DESC_CS_MASK
) {
2565 if (dpl
< cpl
|| dpl
< rpl
) {
2568 if (!(e2
& DESC_W_MASK
)) {
2570 CC_SRC
= eflags
& ~CC_Z
;
2574 CC_SRC
= eflags
| CC_Z
;
2577 #if defined(CONFIG_USER_ONLY)
2578 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2580 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2581 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2583 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2584 (selector
<< 4), 0xffff,
2585 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2586 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2588 helper_load_seg(env
, seg_reg
, selector
);
2593 /* check if Port I/O is allowed in TSS */
2594 static inline void check_io(CPUX86State
*env
, int addr
, int size
,
2597 int io_offset
, val
, mask
;
2599 /* TSS must be a valid 32 bit one */
2600 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2601 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2602 env
->tr
.limit
< 103) {
2605 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
2606 io_offset
+= (addr
>> 3);
2607 /* Note: the check needs two bytes */
2608 if ((io_offset
+ 1) > env
->tr
.limit
) {
2611 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
2613 mask
= (1 << size
) - 1;
2614 /* all bits must be zero to allow the I/O */
2615 if ((val
& mask
) != 0) {
2617 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2621 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2623 check_io(env
, t0
, 1, GETPC());
2626 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2628 check_io(env
, t0
, 2, GETPC());
2631 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2633 check_io(env
, t0
, 4, GETPC());