2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
31 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
32 # define LOG_PCALL_STATE(cpu) \
33 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35 # define LOG_PCALL(...) do { } while (0)
36 # define LOG_PCALL_STATE(cpu) do { } while (0)
39 #ifdef CONFIG_USER_ONLY
40 #define MEMSUFFIX _kernel
42 #include "exec/cpu_ldst_useronly_template.h"
45 #include "exec/cpu_ldst_useronly_template.h"
48 #include "exec/cpu_ldst_useronly_template.h"
51 #include "exec/cpu_ldst_useronly_template.h"
54 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
55 #define MEMSUFFIX _kernel
57 #include "exec/cpu_ldst_template.h"
60 #include "exec/cpu_ldst_template.h"
63 #include "exec/cpu_ldst_template.h"
66 #include "exec/cpu_ldst_template.h"
71 /* return non zero if error */
72 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
73 uint32_t *e2_ptr
, int selector
,
85 index
= selector
& ~7;
86 if ((index
+ 7) > dt
->limit
) {
89 ptr
= dt
->base
+ index
;
90 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
91 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
95 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
96 uint32_t *e2_ptr
, int selector
)
98 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
101 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
105 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
106 if (e2
& DESC_G_MASK
) {
107 limit
= (limit
<< 12) | 0xfff;
112 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
114 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
117 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
120 sc
->base
= get_seg_base(e1
, e2
);
121 sc
->limit
= get_seg_limit(e1
, e2
);
125 /* init the segment cache in vm86 mode. */
126 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
130 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
131 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
132 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
135 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
136 uint32_t *esp_ptr
, int dpl
,
139 X86CPU
*cpu
= x86_env_get_cpu(env
);
140 int type
, index
, shift
;
145 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
146 for (i
= 0; i
< env
->tr
.limit
; i
++) {
147 printf("%02x ", env
->tr
.base
[i
]);
156 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
157 cpu_abort(CPU(cpu
), "invalid tss");
159 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
160 if ((type
& 7) != 1) {
161 cpu_abort(CPU(cpu
), "invalid tss type");
164 index
= (dpl
* 4 + 2) << shift
;
165 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
166 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
169 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
170 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
172 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
173 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
177 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
,
183 if ((selector
& 0xfffc) != 0) {
184 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
185 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
187 if (!(e2
& DESC_S_MASK
)) {
188 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
191 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
192 if (seg_reg
== R_CS
) {
193 if (!(e2
& DESC_CS_MASK
)) {
194 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
197 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
199 } else if (seg_reg
== R_SS
) {
200 /* SS must be writable data */
201 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
202 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
204 if (dpl
!= cpl
|| dpl
!= rpl
) {
205 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
208 /* not readable code */
209 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
210 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
212 /* if data or non conforming code, checks the rights */
213 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
214 if (dpl
< cpl
|| dpl
< rpl
) {
215 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
219 if (!(e2
& DESC_P_MASK
)) {
220 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
222 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
223 get_seg_base(e1
, e2
),
224 get_seg_limit(e1
, e2
),
227 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
228 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
233 #define SWITCH_TSS_JMP 0
234 #define SWITCH_TSS_IRET 1
235 #define SWITCH_TSS_CALL 2
237 /* XXX: restore CPU state in registers (PowerPC case) */
238 static void switch_tss_ra(CPUX86State
*env
, int tss_selector
,
239 uint32_t e1
, uint32_t e2
, int source
,
240 uint32_t next_eip
, uintptr_t retaddr
)
242 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
243 target_ulong tss_base
;
244 uint32_t new_regs
[8], new_segs
[6];
245 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
246 uint32_t old_eflags
, eflags_mask
;
251 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
252 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
255 /* if task gate, we read the TSS segment and we load it */
257 if (!(e2
& DESC_P_MASK
)) {
258 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
260 tss_selector
= e1
>> 16;
261 if (tss_selector
& 4) {
262 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
264 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
265 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
267 if (e2
& DESC_S_MASK
) {
268 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
270 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
271 if ((type
& 7) != 1) {
272 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
276 if (!(e2
& DESC_P_MASK
)) {
277 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
285 tss_limit
= get_seg_limit(e1
, e2
);
286 tss_base
= get_seg_base(e1
, e2
);
287 if ((tss_selector
& 4) != 0 ||
288 tss_limit
< tss_limit_max
) {
289 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
291 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
293 old_tss_limit_max
= 103;
295 old_tss_limit_max
= 43;
298 /* read all the registers from the new TSS */
301 new_cr3
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x1c, retaddr
);
302 new_eip
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x20, retaddr
);
303 new_eflags
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x24, retaddr
);
304 for (i
= 0; i
< 8; i
++) {
305 new_regs
[i
] = cpu_ldl_kernel_ra(env
, tss_base
+ (0x28 + i
* 4),
308 for (i
= 0; i
< 6; i
++) {
309 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x48 + i
* 4),
312 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x60, retaddr
);
313 new_trap
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x64, retaddr
);
317 new_eip
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x0e, retaddr
);
318 new_eflags
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x10, retaddr
);
319 for (i
= 0; i
< 8; i
++) {
320 new_regs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x12 + i
* 2),
321 retaddr
) | 0xffff0000;
323 for (i
= 0; i
< 4; i
++) {
324 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x22 + i
* 4),
327 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x2a, retaddr
);
332 /* XXX: avoid a compiler warning, see
333 http://support.amd.com/us/Processor_TechDocs/24593.pdf
334 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
337 /* NOTE: we must avoid memory exceptions during the task switch,
338 so we make dummy accesses before */
339 /* XXX: it can still fail in some cases, so a bigger hack is
340 necessary to valid the TLB after having done the accesses */
342 v1
= cpu_ldub_kernel_ra(env
, env
->tr
.base
, retaddr
);
343 v2
= cpu_ldub_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, retaddr
);
344 cpu_stb_kernel_ra(env
, env
->tr
.base
, v1
, retaddr
);
345 cpu_stb_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, v2
, retaddr
);
347 /* clear busy bit (it is restartable) */
348 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
352 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
353 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
354 e2
&= ~DESC_TSS_BUSY_MASK
;
355 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
357 old_eflags
= cpu_compute_eflags(env
);
358 if (source
== SWITCH_TSS_IRET
) {
359 old_eflags
&= ~NT_MASK
;
362 /* save the current state in the old TSS */
365 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x20, next_eip
, retaddr
);
366 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x24, old_eflags
, retaddr
);
367 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
], retaddr
);
368 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
], retaddr
);
369 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
], retaddr
);
370 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
], retaddr
);
371 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
], retaddr
);
372 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
], retaddr
);
373 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
], retaddr
);
374 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
], retaddr
);
375 for (i
= 0; i
< 6; i
++) {
376 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x48 + i
* 4),
377 env
->segs
[i
].selector
, retaddr
);
381 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x0e, next_eip
, retaddr
);
382 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x10, old_eflags
, retaddr
);
383 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
], retaddr
);
384 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
], retaddr
);
385 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
], retaddr
);
386 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
], retaddr
);
387 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
], retaddr
);
388 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
], retaddr
);
389 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
], retaddr
);
390 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
], retaddr
);
391 for (i
= 0; i
< 4; i
++) {
392 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x22 + i
* 4),
393 env
->segs
[i
].selector
, retaddr
);
397 /* now if an exception occurs, it will occurs in the next task
400 if (source
== SWITCH_TSS_CALL
) {
401 cpu_stw_kernel_ra(env
, tss_base
, env
->tr
.selector
, retaddr
);
402 new_eflags
|= NT_MASK
;
406 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
410 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
411 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
412 e2
|= DESC_TSS_BUSY_MASK
;
413 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
416 /* set the new CPU state */
417 /* from this point, any exception which occurs can give problems */
418 env
->cr
[0] |= CR0_TS_MASK
;
419 env
->hflags
|= HF_TS_MASK
;
420 env
->tr
.selector
= tss_selector
;
421 env
->tr
.base
= tss_base
;
422 env
->tr
.limit
= tss_limit
;
423 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
425 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
426 cpu_x86_update_cr3(env
, new_cr3
);
429 /* load all registers without an exception, then reload them with
430 possible exception */
432 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
433 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
435 eflags_mask
&= 0xffff;
437 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
438 /* XXX: what to do in 16 bit case? */
439 env
->regs
[R_EAX
] = new_regs
[0];
440 env
->regs
[R_ECX
] = new_regs
[1];
441 env
->regs
[R_EDX
] = new_regs
[2];
442 env
->regs
[R_EBX
] = new_regs
[3];
443 env
->regs
[R_ESP
] = new_regs
[4];
444 env
->regs
[R_EBP
] = new_regs
[5];
445 env
->regs
[R_ESI
] = new_regs
[6];
446 env
->regs
[R_EDI
] = new_regs
[7];
447 if (new_eflags
& VM_MASK
) {
448 for (i
= 0; i
< 6; i
++) {
449 load_seg_vm(env
, i
, new_segs
[i
]);
452 /* first just selectors as the rest may trigger exceptions */
453 for (i
= 0; i
< 6; i
++) {
454 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
458 env
->ldt
.selector
= new_ldt
& ~4;
465 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
468 if ((new_ldt
& 0xfffc) != 0) {
470 index
= new_ldt
& ~7;
471 if ((index
+ 7) > dt
->limit
) {
472 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
474 ptr
= dt
->base
+ index
;
475 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
476 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
477 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
478 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
480 if (!(e2
& DESC_P_MASK
)) {
481 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
483 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
486 /* load the segments */
487 if (!(new_eflags
& VM_MASK
)) {
488 int cpl
= new_segs
[R_CS
] & 3;
489 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
490 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
491 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
492 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
493 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
494 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
497 /* check that env->eip is in the CS segment limits */
498 if (new_eip
> env
->segs
[R_CS
].limit
) {
499 /* XXX: different exception if CALL? */
500 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
503 #ifndef CONFIG_USER_ONLY
504 /* reset local breakpoints */
505 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
506 cpu_x86_update_dr7(env
, env
->dr
[7] & ~DR7_LOCAL_BP_MASK
);
511 static void switch_tss(CPUX86State
*env
, int tss_selector
,
512 uint32_t e1
, uint32_t e2
, int source
,
515 switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
518 static inline unsigned int get_sp_mask(unsigned int e2
)
520 if (e2
& DESC_B_MASK
) {
527 static int exception_has_error_code(int intno
)
543 #define SET_ESP(val, sp_mask) \
545 if ((sp_mask) == 0xffff) { \
546 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
548 } else if ((sp_mask) == 0xffffffffLL) { \
549 env->regs[R_ESP] = (uint32_t)(val); \
551 env->regs[R_ESP] = (val); \
555 #define SET_ESP(val, sp_mask) \
557 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
558 ((val) & (sp_mask)); \
562 /* in 64-bit machines, this can overflow. So this segment addition macro
563 * can be used to trim the value to 32-bit whenever needed */
564 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
566 /* XXX: add a is_user flag to have proper security support */
567 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
570 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
573 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
576 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
579 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
581 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
585 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
587 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
591 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
592 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
593 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
594 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
596 /* protected mode interrupt */
597 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
598 int error_code
, unsigned int next_eip
,
602 target_ulong ptr
, ssp
;
603 int type
, dpl
, selector
, ss_dpl
, cpl
;
604 int has_error_code
, new_stack
, shift
;
605 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
606 uint32_t old_eip
, sp_mask
;
607 int vm86
= env
->eflags
& VM_MASK
;
610 if (!is_int
&& !is_hw
) {
611 has_error_code
= exception_has_error_code(intno
);
620 if (intno
* 8 + 7 > dt
->limit
) {
621 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
623 ptr
= dt
->base
+ intno
* 8;
624 e1
= cpu_ldl_kernel(env
, ptr
);
625 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
626 /* check gate type */
627 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
629 case 5: /* task gate */
630 /* must do that check here to return the correct error code */
631 if (!(e2
& DESC_P_MASK
)) {
632 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
634 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
635 if (has_error_code
) {
639 /* push the error code */
640 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
642 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
647 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
648 ssp
= env
->segs
[R_SS
].base
+ esp
;
650 cpu_stl_kernel(env
, ssp
, error_code
);
652 cpu_stw_kernel(env
, ssp
, error_code
);
657 case 6: /* 286 interrupt gate */
658 case 7: /* 286 trap gate */
659 case 14: /* 386 interrupt gate */
660 case 15: /* 386 trap gate */
663 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
666 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
667 cpl
= env
->hflags
& HF_CPL_MASK
;
668 /* check privilege if software int */
669 if (is_int
&& dpl
< cpl
) {
670 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
672 /* check valid bit */
673 if (!(e2
& DESC_P_MASK
)) {
674 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
677 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
678 if ((selector
& 0xfffc) == 0) {
679 raise_exception_err(env
, EXCP0D_GPF
, 0);
681 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
682 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
684 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
685 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
687 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
689 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
691 if (!(e2
& DESC_P_MASK
)) {
692 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
694 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
695 /* to inner privilege */
696 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
697 if ((ss
& 0xfffc) == 0) {
698 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
700 if ((ss
& 3) != dpl
) {
701 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
703 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
704 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
706 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
708 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
710 if (!(ss_e2
& DESC_S_MASK
) ||
711 (ss_e2
& DESC_CS_MASK
) ||
712 !(ss_e2
& DESC_W_MASK
)) {
713 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
715 if (!(ss_e2
& DESC_P_MASK
)) {
716 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
719 sp_mask
= get_sp_mask(ss_e2
);
720 ssp
= get_seg_base(ss_e1
, ss_e2
);
721 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
722 /* to same privilege */
724 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
727 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
728 ssp
= env
->segs
[R_SS
].base
;
729 esp
= env
->regs
[R_ESP
];
732 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
733 new_stack
= 0; /* avoid warning */
734 sp_mask
= 0; /* avoid warning */
735 ssp
= 0; /* avoid warning */
736 esp
= 0; /* avoid warning */
742 /* XXX: check that enough room is available */
743 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
752 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
753 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
754 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
755 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
757 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
758 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
760 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
761 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
762 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
763 if (has_error_code
) {
764 PUSHL(ssp
, esp
, sp_mask
, error_code
);
769 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
770 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
771 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
772 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
774 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
775 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
777 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
778 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
779 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
780 if (has_error_code
) {
781 PUSHW(ssp
, esp
, sp_mask
, error_code
);
785 /* interrupt gate clear IF mask */
786 if ((type
& 1) == 0) {
787 env
->eflags
&= ~IF_MASK
;
789 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
793 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
798 ss
= (ss
& ~3) | dpl
;
799 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
800 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
802 SET_ESP(esp
, sp_mask
);
804 selector
= (selector
& ~3) | dpl
;
805 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
806 get_seg_base(e1
, e2
),
807 get_seg_limit(e1
, e2
),
814 #define PUSHQ_RA(sp, val, ra) \
817 cpu_stq_kernel_ra(env, sp, (val), ra); \
820 #define POPQ_RA(sp, val, ra) \
822 val = cpu_ldq_kernel_ra(env, sp, ra); \
826 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
827 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
829 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
831 X86CPU
*cpu
= x86_env_get_cpu(env
);
835 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
836 env
->tr
.base
, env
->tr
.limit
);
839 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
840 cpu_abort(CPU(cpu
), "invalid tss");
842 index
= 8 * level
+ 4;
843 if ((index
+ 7) > env
->tr
.limit
) {
844 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
846 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
849 /* 64 bit interrupt */
850 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
851 int error_code
, target_ulong next_eip
, int is_hw
)
855 int type
, dpl
, selector
, cpl
, ist
;
856 int has_error_code
, new_stack
;
857 uint32_t e1
, e2
, e3
, ss
;
858 target_ulong old_eip
, esp
, offset
;
861 if (!is_int
&& !is_hw
) {
862 has_error_code
= exception_has_error_code(intno
);
871 if (intno
* 16 + 15 > dt
->limit
) {
872 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
874 ptr
= dt
->base
+ intno
* 16;
875 e1
= cpu_ldl_kernel(env
, ptr
);
876 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
877 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
878 /* check gate type */
879 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
881 case 14: /* 386 interrupt gate */
882 case 15: /* 386 trap gate */
885 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
888 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
889 cpl
= env
->hflags
& HF_CPL_MASK
;
890 /* check privilege if software int */
891 if (is_int
&& dpl
< cpl
) {
892 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
894 /* check valid bit */
895 if (!(e2
& DESC_P_MASK
)) {
896 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
899 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
901 if ((selector
& 0xfffc) == 0) {
902 raise_exception_err(env
, EXCP0D_GPF
, 0);
905 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
906 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
908 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
909 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
911 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
913 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
915 if (!(e2
& DESC_P_MASK
)) {
916 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
918 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
919 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
921 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
922 /* to inner privilege */
924 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
926 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
927 /* to same privilege */
928 if (env
->eflags
& VM_MASK
) {
929 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
932 esp
= env
->regs
[R_ESP
];
935 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
936 new_stack
= 0; /* avoid warning */
937 esp
= 0; /* avoid warning */
939 esp
&= ~0xfLL
; /* align stack */
941 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
942 PUSHQ(esp
, env
->regs
[R_ESP
]);
943 PUSHQ(esp
, cpu_compute_eflags(env
));
944 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
946 if (has_error_code
) {
947 PUSHQ(esp
, error_code
);
950 /* interrupt gate clear IF mask */
951 if ((type
& 1) == 0) {
952 env
->eflags
&= ~IF_MASK
;
954 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
958 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
960 env
->regs
[R_ESP
] = esp
;
962 selector
= (selector
& ~3) | dpl
;
963 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
964 get_seg_base(e1
, e2
),
965 get_seg_limit(e1
, e2
),
972 #if defined(CONFIG_USER_ONLY)
973 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
975 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
977 cs
->exception_index
= EXCP_SYSCALL
;
978 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
982 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
986 if (!(env
->efer
& MSR_EFER_SCE
)) {
987 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
989 selector
= (env
->star
>> 32) & 0xffff;
990 if (env
->hflags
& HF_LMA_MASK
) {
993 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
994 env
->regs
[11] = cpu_compute_eflags(env
);
996 code64
= env
->hflags
& HF_CS64_MASK
;
998 env
->eflags
&= ~env
->fmask
;
999 cpu_load_eflags(env
, env
->eflags
, 0);
1000 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1002 DESC_G_MASK
| DESC_P_MASK
|
1004 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1006 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1008 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1010 DESC_W_MASK
| DESC_A_MASK
);
1012 env
->eip
= env
->lstar
;
1014 env
->eip
= env
->cstar
;
1017 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
1019 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1020 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1022 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1024 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1025 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1027 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1029 DESC_W_MASK
| DESC_A_MASK
);
1030 env
->eip
= (uint32_t)env
->star
;
1036 #ifdef TARGET_X86_64
1037 void helper_sysret(CPUX86State
*env
, int dflag
)
1041 if (!(env
->efer
& MSR_EFER_SCE
)) {
1042 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
1044 cpl
= env
->hflags
& HF_CPL_MASK
;
1045 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1046 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1048 selector
= (env
->star
>> 48) & 0xffff;
1049 if (env
->hflags
& HF_LMA_MASK
) {
1050 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1051 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1054 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1056 DESC_G_MASK
| DESC_P_MASK
|
1057 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1058 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1060 env
->eip
= env
->regs
[R_ECX
];
1062 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1064 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1065 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1066 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1067 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1069 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1071 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1072 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1073 DESC_W_MASK
| DESC_A_MASK
);
1075 env
->eflags
|= IF_MASK
;
1076 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1078 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1079 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1080 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1081 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1082 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1084 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1085 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1086 DESC_W_MASK
| DESC_A_MASK
);
1091 /* real mode interrupt */
1092 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1093 int error_code
, unsigned int next_eip
)
1096 target_ulong ptr
, ssp
;
1098 uint32_t offset
, esp
;
1099 uint32_t old_cs
, old_eip
;
1101 /* real mode (simpler!) */
1103 if (intno
* 4 + 3 > dt
->limit
) {
1104 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1106 ptr
= dt
->base
+ intno
* 4;
1107 offset
= cpu_lduw_kernel(env
, ptr
);
1108 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1109 esp
= env
->regs
[R_ESP
];
1110 ssp
= env
->segs
[R_SS
].base
;
1116 old_cs
= env
->segs
[R_CS
].selector
;
1117 /* XXX: use SS segment size? */
1118 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1119 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1120 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1122 /* update processor state */
1123 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1125 env
->segs
[R_CS
].selector
= selector
;
1126 env
->segs
[R_CS
].base
= (selector
<< 4);
1127 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1130 #if defined(CONFIG_USER_ONLY)
1131 /* fake user mode interrupt */
1132 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1133 int error_code
, target_ulong next_eip
)
1137 int dpl
, cpl
, shift
;
1141 if (env
->hflags
& HF_LMA_MASK
) {
1146 ptr
= dt
->base
+ (intno
<< shift
);
1147 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1149 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1150 cpl
= env
->hflags
& HF_CPL_MASK
;
1151 /* check privilege if software int */
1152 if (is_int
&& dpl
< cpl
) {
1153 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1156 /* Since we emulate only user space, we cannot do more than
1157 exiting the emulation with the suitable exception and error
1158 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1159 if (is_int
|| intno
== EXCP_SYSCALL
) {
1160 env
->eip
= next_eip
;
1166 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1167 int error_code
, int is_hw
, int rm
)
1169 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1170 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1171 control
.event_inj
));
1173 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1177 type
= SVM_EVTINJ_TYPE_SOFT
;
1179 type
= SVM_EVTINJ_TYPE_EXEPT
;
1181 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1182 if (!rm
&& exception_has_error_code(intno
)) {
1183 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1184 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1185 control
.event_inj_err
),
1189 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1196 * Begin execution of an interruption. is_int is TRUE if coming from
1197 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1198 * instruction. It is only relevant if is_int is TRUE.
1200 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1201 int error_code
, target_ulong next_eip
, int is_hw
)
1203 CPUX86State
*env
= &cpu
->env
;
1205 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1206 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1209 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1210 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1211 count
, intno
, error_code
, is_int
,
1212 env
->hflags
& HF_CPL_MASK
,
1213 env
->segs
[R_CS
].selector
, env
->eip
,
1214 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1215 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1216 if (intno
== 0x0e) {
1217 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1219 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1222 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1229 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1230 for (i
= 0; i
< 16; i
++) {
1231 qemu_log(" %02x", ldub(ptr
+ i
));
1239 if (env
->cr
[0] & CR0_PE_MASK
) {
1240 #if !defined(CONFIG_USER_ONLY)
1241 if (env
->hflags
& HF_SVMI_MASK
) {
1242 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1245 #ifdef TARGET_X86_64
1246 if (env
->hflags
& HF_LMA_MASK
) {
1247 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1251 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1255 #if !defined(CONFIG_USER_ONLY)
1256 if (env
->hflags
& HF_SVMI_MASK
) {
1257 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1260 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1263 #if !defined(CONFIG_USER_ONLY)
1264 if (env
->hflags
& HF_SVMI_MASK
) {
1265 CPUState
*cs
= CPU(cpu
);
1266 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1267 offsetof(struct vmcb
,
1268 control
.event_inj
));
1271 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1272 event_inj
& ~SVM_EVTINJ_VALID
);
1277 void x86_cpu_do_interrupt(CPUState
*cs
)
1279 X86CPU
*cpu
= X86_CPU(cs
);
1280 CPUX86State
*env
= &cpu
->env
;
1282 #if defined(CONFIG_USER_ONLY)
1283 /* if user mode only, we simulate a fake exception
1284 which will be handled outside the cpu execution
1286 do_interrupt_user(env
, cs
->exception_index
,
1287 env
->exception_is_int
,
1289 env
->exception_next_eip
);
1290 /* successfully delivered */
1291 env
->old_exception
= -1;
1293 /* simulate a real cpu exception. On i386, it can
1294 trigger new exceptions, but we do not handle
1295 double or triple faults yet. */
1296 do_interrupt_all(cpu
, cs
->exception_index
,
1297 env
->exception_is_int
,
1299 env
->exception_next_eip
, 0);
1300 /* successfully delivered */
1301 env
->old_exception
= -1;
1305 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1307 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1310 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1312 X86CPU
*cpu
= X86_CPU(cs
);
1313 CPUX86State
*env
= &cpu
->env
;
1316 #if !defined(CONFIG_USER_ONLY)
1317 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
1318 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1319 apic_poll_irq(cpu
->apic_state
);
1320 /* Don't process multiple interrupt requests in a single call.
1321 This is required to make icount-driven execution deterministic. */
1325 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
1327 } else if (env
->hflags2
& HF2_GIF_MASK
) {
1328 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
1329 !(env
->hflags
& HF_SMM_MASK
)) {
1330 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0);
1331 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1334 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
1335 !(env
->hflags2
& HF2_NMI_MASK
)) {
1336 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1337 env
->hflags2
|= HF2_NMI_MASK
;
1338 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1340 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
1341 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1342 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1344 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
1345 (((env
->hflags2
& HF2_VINTR_MASK
) &&
1346 (env
->hflags2
& HF2_HIF_MASK
)) ||
1347 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
1348 (env
->eflags
& IF_MASK
&&
1349 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
1351 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0);
1352 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1353 CPU_INTERRUPT_VIRQ
);
1354 intno
= cpu_get_pic_interrupt(env
);
1355 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1356 "Servicing hardware INT=0x%02x\n", intno
);
1357 do_interrupt_x86_hardirq(env
, intno
, 1);
1358 /* ensure that no TB jump will be modified as
1359 the program flow was changed */
1361 #if !defined(CONFIG_USER_ONLY)
1362 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
1363 (env
->eflags
& IF_MASK
) &&
1364 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
1366 /* FIXME: this should respect TPR */
1367 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0);
1368 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
1369 + offsetof(struct vmcb
, control
.int_vector
));
1370 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1371 "Servicing virtual hardware INT=0x%02x\n", intno
);
1372 do_interrupt_x86_hardirq(env
, intno
, 1);
1373 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1382 void helper_lldt(CPUX86State
*env
, int selector
)
1386 int index
, entry_limit
;
1390 if ((selector
& 0xfffc) == 0) {
1391 /* XXX: NULL selector case: invalid LDT */
1395 if (selector
& 0x4) {
1396 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1399 index
= selector
& ~7;
1400 #ifdef TARGET_X86_64
1401 if (env
->hflags
& HF_LMA_MASK
) {
1408 if ((index
+ entry_limit
) > dt
->limit
) {
1409 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1411 ptr
= dt
->base
+ index
;
1412 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1413 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1414 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1415 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1417 if (!(e2
& DESC_P_MASK
)) {
1418 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1420 #ifdef TARGET_X86_64
1421 if (env
->hflags
& HF_LMA_MASK
) {
1424 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1425 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1426 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1430 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1433 env
->ldt
.selector
= selector
;
1436 void helper_ltr(CPUX86State
*env
, int selector
)
1440 int index
, type
, entry_limit
;
1444 if ((selector
& 0xfffc) == 0) {
1445 /* NULL selector case: invalid TR */
1450 if (selector
& 0x4) {
1451 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1454 index
= selector
& ~7;
1455 #ifdef TARGET_X86_64
1456 if (env
->hflags
& HF_LMA_MASK
) {
1463 if ((index
+ entry_limit
) > dt
->limit
) {
1464 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1466 ptr
= dt
->base
+ index
;
1467 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1468 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1469 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1470 if ((e2
& DESC_S_MASK
) ||
1471 (type
!= 1 && type
!= 9)) {
1472 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1474 if (!(e2
& DESC_P_MASK
)) {
1475 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1477 #ifdef TARGET_X86_64
1478 if (env
->hflags
& HF_LMA_MASK
) {
1481 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1482 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1483 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1484 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1486 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1487 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1491 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1493 e2
|= DESC_TSS_BUSY_MASK
;
1494 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1496 env
->tr
.selector
= selector
;
1499 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1500 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1509 cpl
= env
->hflags
& HF_CPL_MASK
;
1510 if ((selector
& 0xfffc) == 0) {
1511 /* null selector case */
1513 #ifdef TARGET_X86_64
1514 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1517 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1519 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1522 if (selector
& 0x4) {
1527 index
= selector
& ~7;
1528 if ((index
+ 7) > dt
->limit
) {
1529 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1531 ptr
= dt
->base
+ index
;
1532 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1533 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1535 if (!(e2
& DESC_S_MASK
)) {
1536 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1539 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1540 if (seg_reg
== R_SS
) {
1541 /* must be writable segment */
1542 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1543 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1545 if (rpl
!= cpl
|| dpl
!= cpl
) {
1546 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1549 /* must be readable segment */
1550 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1551 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1554 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1555 /* if not conforming code, test rights */
1556 if (dpl
< cpl
|| dpl
< rpl
) {
1557 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1562 if (!(e2
& DESC_P_MASK
)) {
1563 if (seg_reg
== R_SS
) {
1564 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1566 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1570 /* set the access bit if not already set */
1571 if (!(e2
& DESC_A_MASK
)) {
1573 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1576 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1577 get_seg_base(e1
, e2
),
1578 get_seg_limit(e1
, e2
),
1581 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1582 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1587 /* protected mode jump */
1588 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1589 target_ulong next_eip
)
1592 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1594 if ((new_cs
& 0xfffc) == 0) {
1595 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1597 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1598 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1600 cpl
= env
->hflags
& HF_CPL_MASK
;
1601 if (e2
& DESC_S_MASK
) {
1602 if (!(e2
& DESC_CS_MASK
)) {
1603 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1605 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1606 if (e2
& DESC_C_MASK
) {
1607 /* conforming code segment */
1609 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1612 /* non conforming code segment */
1615 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1618 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1621 if (!(e2
& DESC_P_MASK
)) {
1622 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1624 limit
= get_seg_limit(e1
, e2
);
1625 if (new_eip
> limit
&&
1626 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1627 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1629 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1630 get_seg_base(e1
, e2
), limit
, e2
);
1633 /* jump to call or task gate */
1634 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1636 cpl
= env
->hflags
& HF_CPL_MASK
;
1637 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1639 case 1: /* 286 TSS */
1640 case 9: /* 386 TSS */
1641 case 5: /* task gate */
1642 if (dpl
< cpl
|| dpl
< rpl
) {
1643 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1645 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1647 case 4: /* 286 call gate */
1648 case 12: /* 386 call gate */
1649 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1650 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1652 if (!(e2
& DESC_P_MASK
)) {
1653 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1656 new_eip
= (e1
& 0xffff);
1658 new_eip
|= (e2
& 0xffff0000);
1660 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1661 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1663 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1664 /* must be code segment */
1665 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1666 (DESC_S_MASK
| DESC_CS_MASK
))) {
1667 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1669 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1670 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1671 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1673 if (!(e2
& DESC_P_MASK
)) {
1674 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1676 limit
= get_seg_limit(e1
, e2
);
1677 if (new_eip
> limit
) {
1678 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1680 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1681 get_seg_base(e1
, e2
), limit
, e2
);
1685 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1691 /* real mode call */
1692 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1693 int shift
, int next_eip
)
1696 uint32_t esp
, esp_mask
;
1700 esp
= env
->regs
[R_ESP
];
1701 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1702 ssp
= env
->segs
[R_SS
].base
;
1704 PUSHL_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1705 PUSHL_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1707 PUSHW_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1708 PUSHW_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1711 SET_ESP(esp
, esp_mask
);
1713 env
->segs
[R_CS
].selector
= new_cs
;
1714 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1717 /* protected mode call */
1718 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1719 int shift
, target_ulong next_eip
)
1722 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1723 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1724 uint32_t val
, limit
, old_sp_mask
;
1725 target_ulong ssp
, old_ssp
;
1727 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1728 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1729 if ((new_cs
& 0xfffc) == 0) {
1730 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1732 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1733 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1735 cpl
= env
->hflags
& HF_CPL_MASK
;
1736 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1737 if (e2
& DESC_S_MASK
) {
1738 if (!(e2
& DESC_CS_MASK
)) {
1739 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1741 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1742 if (e2
& DESC_C_MASK
) {
1743 /* conforming code segment */
1745 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1748 /* non conforming code segment */
1751 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1754 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1757 if (!(e2
& DESC_P_MASK
)) {
1758 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1761 #ifdef TARGET_X86_64
1762 /* XXX: check 16/32 bit cases in long mode */
1767 rsp
= env
->regs
[R_ESP
];
1768 PUSHQ_RA(rsp
, env
->segs
[R_CS
].selector
, GETPC());
1769 PUSHQ_RA(rsp
, next_eip
, GETPC());
1770 /* from this point, not restartable */
1771 env
->regs
[R_ESP
] = rsp
;
1772 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1773 get_seg_base(e1
, e2
),
1774 get_seg_limit(e1
, e2
), e2
);
1779 sp
= env
->regs
[R_ESP
];
1780 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1781 ssp
= env
->segs
[R_SS
].base
;
1783 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1784 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1786 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1787 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1790 limit
= get_seg_limit(e1
, e2
);
1791 if (new_eip
> limit
) {
1792 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1794 /* from this point, not restartable */
1795 SET_ESP(sp
, sp_mask
);
1796 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1797 get_seg_base(e1
, e2
), limit
, e2
);
1801 /* check gate type */
1802 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1803 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1806 case 1: /* available 286 TSS */
1807 case 9: /* available 386 TSS */
1808 case 5: /* task gate */
1809 if (dpl
< cpl
|| dpl
< rpl
) {
1810 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1812 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1814 case 4: /* 286 call gate */
1815 case 12: /* 386 call gate */
1818 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1823 if (dpl
< cpl
|| dpl
< rpl
) {
1824 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1826 /* check valid bit */
1827 if (!(e2
& DESC_P_MASK
)) {
1828 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1830 selector
= e1
>> 16;
1831 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1832 param_count
= e2
& 0x1f;
1833 if ((selector
& 0xfffc) == 0) {
1834 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1837 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1838 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1840 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1841 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1843 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1845 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1847 if (!(e2
& DESC_P_MASK
)) {
1848 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1851 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1852 /* to inner privilege */
1853 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
, GETPC());
1854 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1855 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1857 if ((ss
& 0xfffc) == 0) {
1858 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1860 if ((ss
& 3) != dpl
) {
1861 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1863 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1864 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1866 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1867 if (ss_dpl
!= dpl
) {
1868 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1870 if (!(ss_e2
& DESC_S_MASK
) ||
1871 (ss_e2
& DESC_CS_MASK
) ||
1872 !(ss_e2
& DESC_W_MASK
)) {
1873 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1875 if (!(ss_e2
& DESC_P_MASK
)) {
1876 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1879 /* push_size = ((param_count * 2) + 8) << shift; */
1881 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1882 old_ssp
= env
->segs
[R_SS
].base
;
1884 sp_mask
= get_sp_mask(ss_e2
);
1885 ssp
= get_seg_base(ss_e1
, ss_e2
);
1887 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1888 PUSHL_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1889 for (i
= param_count
- 1; i
>= 0; i
--) {
1890 val
= cpu_ldl_kernel_ra(env
, old_ssp
+
1891 ((env
->regs
[R_ESP
] + i
* 4) &
1892 old_sp_mask
), GETPC());
1893 PUSHL_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1896 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1897 PUSHW_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1898 for (i
= param_count
- 1; i
>= 0; i
--) {
1899 val
= cpu_lduw_kernel_ra(env
, old_ssp
+
1900 ((env
->regs
[R_ESP
] + i
* 2) &
1901 old_sp_mask
), GETPC());
1902 PUSHW_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1907 /* to same privilege */
1908 sp
= env
->regs
[R_ESP
];
1909 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1910 ssp
= env
->segs
[R_SS
].base
;
1911 /* push_size = (4 << shift); */
1916 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1917 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1919 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1920 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1923 /* from this point, not restartable */
1926 ss
= (ss
& ~3) | dpl
;
1927 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1929 get_seg_limit(ss_e1
, ss_e2
),
1933 selector
= (selector
& ~3) | dpl
;
1934 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1935 get_seg_base(e1
, e2
),
1936 get_seg_limit(e1
, e2
),
1938 SET_ESP(sp
, sp_mask
);
1943 /* real and vm86 mode iret */
1944 void helper_iret_real(CPUX86State
*env
, int shift
)
1946 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1950 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1951 sp
= env
->regs
[R_ESP
];
1952 ssp
= env
->segs
[R_SS
].base
;
1955 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1956 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1958 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1961 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1962 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1963 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1965 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1966 env
->segs
[R_CS
].selector
= new_cs
;
1967 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1969 if (env
->eflags
& VM_MASK
) {
1970 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1973 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1977 eflags_mask
&= 0xffff;
1979 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1980 env
->hflags2
&= ~HF2_NMI_MASK
;
1983 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
1988 /* XXX: on x86_64, we do not want to nullify FS and GS because
1989 they may still contain a valid base. I would be interested to
1990 know how a real x86_64 CPU behaves */
1991 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1992 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1996 e2
= env
->segs
[seg_reg
].flags
;
1997 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1998 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1999 /* data or non conforming code segment */
2001 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2006 /* protected mode iret */
2007 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2008 int is_iret
, int addend
,
2011 uint32_t new_cs
, new_eflags
, new_ss
;
2012 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2013 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2014 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2015 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2017 #ifdef TARGET_X86_64
2023 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2025 sp
= env
->regs
[R_ESP
];
2026 ssp
= env
->segs
[R_SS
].base
;
2027 new_eflags
= 0; /* avoid warning */
2028 #ifdef TARGET_X86_64
2030 POPQ_RA(sp
, new_eip
, retaddr
);
2031 POPQ_RA(sp
, new_cs
, retaddr
);
2034 POPQ_RA(sp
, new_eflags
, retaddr
);
2041 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2042 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2045 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2046 if (new_eflags
& VM_MASK
) {
2047 goto return_to_vm86
;
2052 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2053 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2055 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2059 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2060 new_cs
, new_eip
, shift
, addend
);
2061 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2062 if ((new_cs
& 0xfffc) == 0) {
2063 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2065 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
2066 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2068 if (!(e2
& DESC_S_MASK
) ||
2069 !(e2
& DESC_CS_MASK
)) {
2070 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2072 cpl
= env
->hflags
& HF_CPL_MASK
;
2075 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2077 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2078 if (e2
& DESC_C_MASK
) {
2080 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2084 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2087 if (!(e2
& DESC_P_MASK
)) {
2088 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2092 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2093 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2094 /* return to same privilege level */
2095 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2096 get_seg_base(e1
, e2
),
2097 get_seg_limit(e1
, e2
),
2100 /* return to different privilege level */
2101 #ifdef TARGET_X86_64
2103 POPQ_RA(sp
, new_esp
, retaddr
);
2104 POPQ_RA(sp
, new_ss
, retaddr
);
2111 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2112 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2116 POPW_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2117 POPW_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2120 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2122 if ((new_ss
& 0xfffc) == 0) {
2123 #ifdef TARGET_X86_64
2124 /* NULL ss is allowed in long mode if cpl != 3 */
2125 /* XXX: test CS64? */
2126 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2127 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2129 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2130 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2131 DESC_W_MASK
| DESC_A_MASK
);
2132 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2136 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2139 if ((new_ss
& 3) != rpl
) {
2140 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2142 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2143 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2145 if (!(ss_e2
& DESC_S_MASK
) ||
2146 (ss_e2
& DESC_CS_MASK
) ||
2147 !(ss_e2
& DESC_W_MASK
)) {
2148 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2150 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2152 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2154 if (!(ss_e2
& DESC_P_MASK
)) {
2155 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2157 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2158 get_seg_base(ss_e1
, ss_e2
),
2159 get_seg_limit(ss_e1
, ss_e2
),
2163 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2164 get_seg_base(e1
, e2
),
2165 get_seg_limit(e1
, e2
),
2168 #ifdef TARGET_X86_64
2169 if (env
->hflags
& HF_CS64_MASK
) {
2174 sp_mask
= get_sp_mask(ss_e2
);
2177 /* validate data segments */
2178 validate_seg(env
, R_ES
, rpl
);
2179 validate_seg(env
, R_DS
, rpl
);
2180 validate_seg(env
, R_FS
, rpl
);
2181 validate_seg(env
, R_GS
, rpl
);
2185 SET_ESP(sp
, sp_mask
);
2188 /* NOTE: 'cpl' is the _old_ CPL */
2189 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2191 eflags_mask
|= IOPL_MASK
;
2193 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2195 eflags_mask
|= IF_MASK
;
2198 eflags_mask
&= 0xffff;
2200 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2205 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2206 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2207 POPL_RA(ssp
, sp
, sp_mask
, new_es
, retaddr
);
2208 POPL_RA(ssp
, sp
, sp_mask
, new_ds
, retaddr
);
2209 POPL_RA(ssp
, sp
, sp_mask
, new_fs
, retaddr
);
2210 POPL_RA(ssp
, sp
, sp_mask
, new_gs
, retaddr
);
2212 /* modify processor state */
2213 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2214 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2216 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2217 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2218 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2219 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2220 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2221 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2223 env
->eip
= new_eip
& 0xffff;
2224 env
->regs
[R_ESP
] = new_esp
;
2227 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2229 int tss_selector
, type
;
2232 /* specific case for TSS */
2233 if (env
->eflags
& NT_MASK
) {
2234 #ifdef TARGET_X86_64
2235 if (env
->hflags
& HF_LMA_MASK
) {
2236 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2239 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2240 if (tss_selector
& 4) {
2241 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2243 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2244 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2246 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2247 /* NOTE: we check both segment and busy TSS */
2249 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2251 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2253 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2255 env
->hflags2
&= ~HF2_NMI_MASK
;
2258 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2260 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2263 void helper_sysenter(CPUX86State
*env
)
2265 if (env
->sysenter_cs
== 0) {
2266 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2268 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2270 #ifdef TARGET_X86_64
2271 if (env
->hflags
& HF_LMA_MASK
) {
2272 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2274 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2276 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2281 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2283 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2285 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2287 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2289 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2291 DESC_W_MASK
| DESC_A_MASK
);
2292 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2293 env
->eip
= env
->sysenter_eip
;
2296 void helper_sysexit(CPUX86State
*env
, int dflag
)
2300 cpl
= env
->hflags
& HF_CPL_MASK
;
2301 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2302 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2304 #ifdef TARGET_X86_64
2306 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2308 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2309 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2310 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2312 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2314 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2315 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2316 DESC_W_MASK
| DESC_A_MASK
);
2320 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2322 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2323 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2324 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2325 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2327 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2328 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2329 DESC_W_MASK
| DESC_A_MASK
);
2331 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2332 env
->eip
= env
->regs
[R_EDX
];
2335 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2338 uint32_t e1
, e2
, eflags
, selector
;
2339 int rpl
, dpl
, cpl
, type
;
2341 selector
= selector1
& 0xffff;
2342 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2343 if ((selector
& 0xfffc) == 0) {
2346 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2350 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2351 cpl
= env
->hflags
& HF_CPL_MASK
;
2352 if (e2
& DESC_S_MASK
) {
2353 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2356 if (dpl
< cpl
|| dpl
< rpl
) {
2361 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2372 if (dpl
< cpl
|| dpl
< rpl
) {
2374 CC_SRC
= eflags
& ~CC_Z
;
2378 limit
= get_seg_limit(e1
, e2
);
2379 CC_SRC
= eflags
| CC_Z
;
2383 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2385 uint32_t e1
, e2
, eflags
, selector
;
2386 int rpl
, dpl
, cpl
, type
;
2388 selector
= selector1
& 0xffff;
2389 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2390 if ((selector
& 0xfffc) == 0) {
2393 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2397 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2398 cpl
= env
->hflags
& HF_CPL_MASK
;
2399 if (e2
& DESC_S_MASK
) {
2400 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2403 if (dpl
< cpl
|| dpl
< rpl
) {
2408 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2422 if (dpl
< cpl
|| dpl
< rpl
) {
2424 CC_SRC
= eflags
& ~CC_Z
;
2428 CC_SRC
= eflags
| CC_Z
;
2429 return e2
& 0x00f0ff00;
2432 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2434 uint32_t e1
, e2
, eflags
, selector
;
2437 selector
= selector1
& 0xffff;
2438 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2439 if ((selector
& 0xfffc) == 0) {
2442 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2445 if (!(e2
& DESC_S_MASK
)) {
2449 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2450 cpl
= env
->hflags
& HF_CPL_MASK
;
2451 if (e2
& DESC_CS_MASK
) {
2452 if (!(e2
& DESC_R_MASK
)) {
2455 if (!(e2
& DESC_C_MASK
)) {
2456 if (dpl
< cpl
|| dpl
< rpl
) {
2461 if (dpl
< cpl
|| dpl
< rpl
) {
2463 CC_SRC
= eflags
& ~CC_Z
;
2467 CC_SRC
= eflags
| CC_Z
;
2470 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2472 uint32_t e1
, e2
, eflags
, selector
;
2475 selector
= selector1
& 0xffff;
2476 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2477 if ((selector
& 0xfffc) == 0) {
2480 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2483 if (!(e2
& DESC_S_MASK
)) {
2487 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2488 cpl
= env
->hflags
& HF_CPL_MASK
;
2489 if (e2
& DESC_CS_MASK
) {
2492 if (dpl
< cpl
|| dpl
< rpl
) {
2495 if (!(e2
& DESC_W_MASK
)) {
2497 CC_SRC
= eflags
& ~CC_Z
;
2501 CC_SRC
= eflags
| CC_Z
;
2504 #if defined(CONFIG_USER_ONLY)
2505 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2507 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2508 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2510 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2511 (selector
<< 4), 0xffff,
2512 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2513 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2515 helper_load_seg(env
, seg_reg
, selector
);
2520 /* check if Port I/O is allowed in TSS */
2521 static inline void check_io(CPUX86State
*env
, int addr
, int size
,
2524 int io_offset
, val
, mask
;
2526 /* TSS must be a valid 32 bit one */
2527 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2528 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2529 env
->tr
.limit
< 103) {
2532 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
2533 io_offset
+= (addr
>> 3);
2534 /* Note: the check needs two bytes */
2535 if ((io_offset
+ 1) > env
->tr
.limit
) {
2538 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
2540 mask
= (1 << size
) - 1;
2541 /* all bits must be zero to allow the I/O */
2542 if ((val
& mask
) != 0) {
2544 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2548 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2550 check_io(env
, t0
, 1, GETPC());
2553 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2555 check_io(env
, t0
, 2, GETPC());
2558 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2560 check_io(env
, t0
, 4, GETPC());