2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
40 #ifdef CONFIG_USER_ONLY
41 #define MEMSUFFIX _kernel
43 #include "exec/cpu_ldst_useronly_template.h"
46 #include "exec/cpu_ldst_useronly_template.h"
49 #include "exec/cpu_ldst_useronly_template.h"
52 #include "exec/cpu_ldst_useronly_template.h"
55 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56 #define MEMSUFFIX _kernel
58 #include "exec/cpu_ldst_template.h"
61 #include "exec/cpu_ldst_template.h"
64 #include "exec/cpu_ldst_template.h"
67 #include "exec/cpu_ldst_template.h"
72 /* return non zero if error */
73 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
74 uint32_t *e2_ptr
, int selector
,
86 index
= selector
& ~7;
87 if ((index
+ 7) > dt
->limit
) {
90 ptr
= dt
->base
+ index
;
91 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
92 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
96 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
97 uint32_t *e2_ptr
, int selector
)
99 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
102 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
106 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
107 if (e2
& DESC_G_MASK
) {
108 limit
= (limit
<< 12) | 0xfff;
113 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
115 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
118 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
121 sc
->base
= get_seg_base(e1
, e2
);
122 sc
->limit
= get_seg_limit(e1
, e2
);
126 /* init the segment cache in vm86 mode. */
127 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
131 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
132 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
133 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
136 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
137 uint32_t *esp_ptr
, int dpl
,
140 X86CPU
*cpu
= x86_env_get_cpu(env
);
141 int type
, index
, shift
;
146 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
147 for (i
= 0; i
< env
->tr
.limit
; i
++) {
148 printf("%02x ", env
->tr
.base
[i
]);
157 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
158 cpu_abort(CPU(cpu
), "invalid tss");
160 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
161 if ((type
& 7) != 1) {
162 cpu_abort(CPU(cpu
), "invalid tss type");
165 index
= (dpl
* 4 + 2) << shift
;
166 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
167 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
170 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
171 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
173 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
174 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
178 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
,
184 if ((selector
& 0xfffc) != 0) {
185 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
186 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
188 if (!(e2
& DESC_S_MASK
)) {
189 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
192 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
193 if (seg_reg
== R_CS
) {
194 if (!(e2
& DESC_CS_MASK
)) {
195 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
198 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
200 } else if (seg_reg
== R_SS
) {
201 /* SS must be writable data */
202 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
203 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
205 if (dpl
!= cpl
|| dpl
!= rpl
) {
206 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
209 /* not readable code */
210 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
211 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
213 /* if data or non conforming code, checks the rights */
214 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
215 if (dpl
< cpl
|| dpl
< rpl
) {
216 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
220 if (!(e2
& DESC_P_MASK
)) {
221 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
223 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
224 get_seg_base(e1
, e2
),
225 get_seg_limit(e1
, e2
),
228 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
229 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
234 #define SWITCH_TSS_JMP 0
235 #define SWITCH_TSS_IRET 1
236 #define SWITCH_TSS_CALL 2
238 /* XXX: restore CPU state in registers (PowerPC case) */
239 static void switch_tss_ra(CPUX86State
*env
, int tss_selector
,
240 uint32_t e1
, uint32_t e2
, int source
,
241 uint32_t next_eip
, uintptr_t retaddr
)
243 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
244 target_ulong tss_base
;
245 uint32_t new_regs
[8], new_segs
[6];
246 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
247 uint32_t old_eflags
, eflags_mask
;
252 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
256 /* if task gate, we read the TSS segment and we load it */
258 if (!(e2
& DESC_P_MASK
)) {
259 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
261 tss_selector
= e1
>> 16;
262 if (tss_selector
& 4) {
263 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
265 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
266 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
268 if (e2
& DESC_S_MASK
) {
269 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
271 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
272 if ((type
& 7) != 1) {
273 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
277 if (!(e2
& DESC_P_MASK
)) {
278 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
286 tss_limit
= get_seg_limit(e1
, e2
);
287 tss_base
= get_seg_base(e1
, e2
);
288 if ((tss_selector
& 4) != 0 ||
289 tss_limit
< tss_limit_max
) {
290 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
292 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
294 old_tss_limit_max
= 103;
296 old_tss_limit_max
= 43;
299 /* read all the registers from the new TSS */
302 new_cr3
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x1c, retaddr
);
303 new_eip
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x20, retaddr
);
304 new_eflags
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x24, retaddr
);
305 for (i
= 0; i
< 8; i
++) {
306 new_regs
[i
] = cpu_ldl_kernel_ra(env
, tss_base
+ (0x28 + i
* 4),
309 for (i
= 0; i
< 6; i
++) {
310 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x48 + i
* 4),
313 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x60, retaddr
);
314 new_trap
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x64, retaddr
);
318 new_eip
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x0e, retaddr
);
319 new_eflags
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x10, retaddr
);
320 for (i
= 0; i
< 8; i
++) {
321 new_regs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x12 + i
* 2),
322 retaddr
) | 0xffff0000;
324 for (i
= 0; i
< 4; i
++) {
325 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x22 + i
* 4),
328 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x2a, retaddr
);
333 /* XXX: avoid a compiler warning, see
334 http://support.amd.com/us/Processor_TechDocs/24593.pdf
335 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
338 /* NOTE: we must avoid memory exceptions during the task switch,
339 so we make dummy accesses before */
340 /* XXX: it can still fail in some cases, so a bigger hack is
341 necessary to valid the TLB after having done the accesses */
343 v1
= cpu_ldub_kernel_ra(env
, env
->tr
.base
, retaddr
);
344 v2
= cpu_ldub_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, retaddr
);
345 cpu_stb_kernel_ra(env
, env
->tr
.base
, v1
, retaddr
);
346 cpu_stb_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, v2
, retaddr
);
348 /* clear busy bit (it is restartable) */
349 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
353 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
354 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
355 e2
&= ~DESC_TSS_BUSY_MASK
;
356 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
358 old_eflags
= cpu_compute_eflags(env
);
359 if (source
== SWITCH_TSS_IRET
) {
360 old_eflags
&= ~NT_MASK
;
363 /* save the current state in the old TSS */
366 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x20, next_eip
, retaddr
);
367 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x24, old_eflags
, retaddr
);
368 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
], retaddr
);
369 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
], retaddr
);
370 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
], retaddr
);
371 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
], retaddr
);
372 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
], retaddr
);
373 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
], retaddr
);
374 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
], retaddr
);
375 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
], retaddr
);
376 for (i
= 0; i
< 6; i
++) {
377 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x48 + i
* 4),
378 env
->segs
[i
].selector
, retaddr
);
382 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x0e, next_eip
, retaddr
);
383 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x10, old_eflags
, retaddr
);
384 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
], retaddr
);
385 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
], retaddr
);
386 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
], retaddr
);
387 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
], retaddr
);
388 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
], retaddr
);
389 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
], retaddr
);
390 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
], retaddr
);
391 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
], retaddr
);
392 for (i
= 0; i
< 4; i
++) {
393 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x22 + i
* 4),
394 env
->segs
[i
].selector
, retaddr
);
398 /* now if an exception occurs, it will occurs in the next task
401 if (source
== SWITCH_TSS_CALL
) {
402 cpu_stw_kernel_ra(env
, tss_base
, env
->tr
.selector
, retaddr
);
403 new_eflags
|= NT_MASK
;
407 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
411 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
412 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
413 e2
|= DESC_TSS_BUSY_MASK
;
414 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env
->cr
[0] |= CR0_TS_MASK
;
420 env
->hflags
|= HF_TS_MASK
;
421 env
->tr
.selector
= tss_selector
;
422 env
->tr
.base
= tss_base
;
423 env
->tr
.limit
= tss_limit
;
424 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
426 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
427 cpu_x86_update_cr3(env
, new_cr3
);
430 /* load all registers without an exception, then reload them with
431 possible exception */
433 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
434 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
436 eflags_mask
&= 0xffff;
438 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
439 /* XXX: what to do in 16 bit case? */
440 env
->regs
[R_EAX
] = new_regs
[0];
441 env
->regs
[R_ECX
] = new_regs
[1];
442 env
->regs
[R_EDX
] = new_regs
[2];
443 env
->regs
[R_EBX
] = new_regs
[3];
444 env
->regs
[R_ESP
] = new_regs
[4];
445 env
->regs
[R_EBP
] = new_regs
[5];
446 env
->regs
[R_ESI
] = new_regs
[6];
447 env
->regs
[R_EDI
] = new_regs
[7];
448 if (new_eflags
& VM_MASK
) {
449 for (i
= 0; i
< 6; i
++) {
450 load_seg_vm(env
, i
, new_segs
[i
]);
453 /* first just selectors as the rest may trigger exceptions */
454 for (i
= 0; i
< 6; i
++) {
455 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
459 env
->ldt
.selector
= new_ldt
& ~4;
466 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
469 if ((new_ldt
& 0xfffc) != 0) {
471 index
= new_ldt
& ~7;
472 if ((index
+ 7) > dt
->limit
) {
473 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
475 ptr
= dt
->base
+ index
;
476 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
477 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
478 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
479 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
481 if (!(e2
& DESC_P_MASK
)) {
482 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
484 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
487 /* load the segments */
488 if (!(new_eflags
& VM_MASK
)) {
489 int cpl
= new_segs
[R_CS
] & 3;
490 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
491 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
492 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
493 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
494 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
495 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
498 /* check that env->eip is in the CS segment limits */
499 if (new_eip
> env
->segs
[R_CS
].limit
) {
500 /* XXX: different exception if CALL? */
501 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
504 #ifndef CONFIG_USER_ONLY
505 /* reset local breakpoints */
506 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
507 cpu_x86_update_dr7(env
, env
->dr
[7] & ~DR7_LOCAL_BP_MASK
);
512 static void switch_tss(CPUX86State
*env
, int tss_selector
,
513 uint32_t e1
, uint32_t e2
, int source
,
516 switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
519 static inline unsigned int get_sp_mask(unsigned int e2
)
521 if (e2
& DESC_B_MASK
) {
528 static int exception_has_error_code(int intno
)
544 #define SET_ESP(val, sp_mask) \
546 if ((sp_mask) == 0xffff) { \
547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
549 } else if ((sp_mask) == 0xffffffffLL) { \
550 env->regs[R_ESP] = (uint32_t)(val); \
552 env->regs[R_ESP] = (val); \
556 #define SET_ESP(val, sp_mask) \
558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
559 ((val) & (sp_mask)); \
563 /* in 64-bit machines, this can overflow. So this segment addition macro
564 * can be used to trim the value to 32-bit whenever needed */
565 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
567 /* XXX: add a is_user flag to have proper security support */
568 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
574 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
580 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
586 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
592 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
593 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
594 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
595 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
597 /* protected mode interrupt */
598 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
599 int error_code
, unsigned int next_eip
,
603 target_ulong ptr
, ssp
;
604 int type
, dpl
, selector
, ss_dpl
, cpl
;
605 int has_error_code
, new_stack
, shift
;
606 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
607 uint32_t old_eip
, sp_mask
;
608 int vm86
= env
->eflags
& VM_MASK
;
611 if (!is_int
&& !is_hw
) {
612 has_error_code
= exception_has_error_code(intno
);
621 if (intno
* 8 + 7 > dt
->limit
) {
622 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
624 ptr
= dt
->base
+ intno
* 8;
625 e1
= cpu_ldl_kernel(env
, ptr
);
626 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
627 /* check gate type */
628 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
630 case 5: /* task gate */
631 /* must do that check here to return the correct error code */
632 if (!(e2
& DESC_P_MASK
)) {
633 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
635 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
636 if (has_error_code
) {
640 /* push the error code */
641 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
643 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
648 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
649 ssp
= env
->segs
[R_SS
].base
+ esp
;
651 cpu_stl_kernel(env
, ssp
, error_code
);
653 cpu_stw_kernel(env
, ssp
, error_code
);
658 case 6: /* 286 interrupt gate */
659 case 7: /* 286 trap gate */
660 case 14: /* 386 interrupt gate */
661 case 15: /* 386 trap gate */
664 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
667 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
668 cpl
= env
->hflags
& HF_CPL_MASK
;
669 /* check privilege if software int */
670 if (is_int
&& dpl
< cpl
) {
671 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
673 /* check valid bit */
674 if (!(e2
& DESC_P_MASK
)) {
675 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
678 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
679 if ((selector
& 0xfffc) == 0) {
680 raise_exception_err(env
, EXCP0D_GPF
, 0);
682 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
683 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
685 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
686 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
688 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
690 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
692 if (!(e2
& DESC_P_MASK
)) {
693 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
695 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
696 /* to inner privilege */
697 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
698 if ((ss
& 0xfffc) == 0) {
699 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
701 if ((ss
& 3) != dpl
) {
702 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
704 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
705 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
707 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
709 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
711 if (!(ss_e2
& DESC_S_MASK
) ||
712 (ss_e2
& DESC_CS_MASK
) ||
713 !(ss_e2
& DESC_W_MASK
)) {
714 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
716 if (!(ss_e2
& DESC_P_MASK
)) {
717 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
720 sp_mask
= get_sp_mask(ss_e2
);
721 ssp
= get_seg_base(ss_e1
, ss_e2
);
722 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
723 /* to same privilege */
725 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
728 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
729 ssp
= env
->segs
[R_SS
].base
;
730 esp
= env
->regs
[R_ESP
];
733 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
734 new_stack
= 0; /* avoid warning */
735 sp_mask
= 0; /* avoid warning */
736 ssp
= 0; /* avoid warning */
737 esp
= 0; /* avoid warning */
743 /* XXX: check that enough room is available */
744 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
753 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
754 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
755 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
756 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
758 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
759 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
761 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
762 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
763 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
764 if (has_error_code
) {
765 PUSHL(ssp
, esp
, sp_mask
, error_code
);
770 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
771 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
772 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
773 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
775 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
776 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
778 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
779 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
780 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
781 if (has_error_code
) {
782 PUSHW(ssp
, esp
, sp_mask
, error_code
);
786 /* interrupt gate clear IF mask */
787 if ((type
& 1) == 0) {
788 env
->eflags
&= ~IF_MASK
;
790 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
794 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
799 ss
= (ss
& ~3) | dpl
;
800 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
801 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
803 SET_ESP(esp
, sp_mask
);
805 selector
= (selector
& ~3) | dpl
;
806 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
807 get_seg_base(e1
, e2
),
808 get_seg_limit(e1
, e2
),
815 #define PUSHQ_RA(sp, val, ra) \
818 cpu_stq_kernel_ra(env, sp, (val), ra); \
821 #define POPQ_RA(sp, val, ra) \
823 val = cpu_ldq_kernel_ra(env, sp, ra); \
827 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
828 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
830 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
832 X86CPU
*cpu
= x86_env_get_cpu(env
);
836 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
837 env
->tr
.base
, env
->tr
.limit
);
840 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
841 cpu_abort(CPU(cpu
), "invalid tss");
843 index
= 8 * level
+ 4;
844 if ((index
+ 7) > env
->tr
.limit
) {
845 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
847 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
850 /* 64 bit interrupt */
851 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
852 int error_code
, target_ulong next_eip
, int is_hw
)
856 int type
, dpl
, selector
, cpl
, ist
;
857 int has_error_code
, new_stack
;
858 uint32_t e1
, e2
, e3
, ss
;
859 target_ulong old_eip
, esp
, offset
;
862 if (!is_int
&& !is_hw
) {
863 has_error_code
= exception_has_error_code(intno
);
872 if (intno
* 16 + 15 > dt
->limit
) {
873 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
875 ptr
= dt
->base
+ intno
* 16;
876 e1
= cpu_ldl_kernel(env
, ptr
);
877 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
878 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
879 /* check gate type */
880 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
882 case 14: /* 386 interrupt gate */
883 case 15: /* 386 trap gate */
886 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
889 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
890 cpl
= env
->hflags
& HF_CPL_MASK
;
891 /* check privilege if software int */
892 if (is_int
&& dpl
< cpl
) {
893 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
895 /* check valid bit */
896 if (!(e2
& DESC_P_MASK
)) {
897 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
900 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
902 if ((selector
& 0xfffc) == 0) {
903 raise_exception_err(env
, EXCP0D_GPF
, 0);
906 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
907 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
909 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
910 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
912 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
914 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
916 if (!(e2
& DESC_P_MASK
)) {
917 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
919 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
920 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
922 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
923 /* to inner privilege */
925 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
927 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
928 /* to same privilege */
929 if (env
->eflags
& VM_MASK
) {
930 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
933 esp
= env
->regs
[R_ESP
];
936 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
937 new_stack
= 0; /* avoid warning */
938 esp
= 0; /* avoid warning */
940 esp
&= ~0xfLL
; /* align stack */
942 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
943 PUSHQ(esp
, env
->regs
[R_ESP
]);
944 PUSHQ(esp
, cpu_compute_eflags(env
));
945 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
947 if (has_error_code
) {
948 PUSHQ(esp
, error_code
);
951 /* interrupt gate clear IF mask */
952 if ((type
& 1) == 0) {
953 env
->eflags
&= ~IF_MASK
;
955 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
959 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
961 env
->regs
[R_ESP
] = esp
;
963 selector
= (selector
& ~3) | dpl
;
964 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
965 get_seg_base(e1
, e2
),
966 get_seg_limit(e1
, e2
),
973 #if defined(CONFIG_USER_ONLY)
974 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
976 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
978 cs
->exception_index
= EXCP_SYSCALL
;
979 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
983 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
987 if (!(env
->efer
& MSR_EFER_SCE
)) {
988 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
990 selector
= (env
->star
>> 32) & 0xffff;
991 if (env
->hflags
& HF_LMA_MASK
) {
994 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
995 env
->regs
[11] = cpu_compute_eflags(env
);
997 code64
= env
->hflags
& HF_CS64_MASK
;
999 env
->eflags
&= ~env
->fmask
;
1000 cpu_load_eflags(env
, env
->eflags
, 0);
1001 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1003 DESC_G_MASK
| DESC_P_MASK
|
1005 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1007 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1009 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1011 DESC_W_MASK
| DESC_A_MASK
);
1013 env
->eip
= env
->lstar
;
1015 env
->eip
= env
->cstar
;
1018 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
1020 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1021 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1023 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1025 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1026 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1028 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1030 DESC_W_MASK
| DESC_A_MASK
);
1031 env
->eip
= (uint32_t)env
->star
;
1037 #ifdef TARGET_X86_64
1038 void helper_sysret(CPUX86State
*env
, int dflag
)
1042 if (!(env
->efer
& MSR_EFER_SCE
)) {
1043 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
1045 cpl
= env
->hflags
& HF_CPL_MASK
;
1046 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1047 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1049 selector
= (env
->star
>> 48) & 0xffff;
1050 if (env
->hflags
& HF_LMA_MASK
) {
1051 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1052 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1055 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1057 DESC_G_MASK
| DESC_P_MASK
|
1058 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1059 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1061 env
->eip
= env
->regs
[R_ECX
];
1063 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1065 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1066 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1067 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1068 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1070 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1072 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1073 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1074 DESC_W_MASK
| DESC_A_MASK
);
1076 env
->eflags
|= IF_MASK
;
1077 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1079 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1080 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1081 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1082 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1083 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1085 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1086 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1087 DESC_W_MASK
| DESC_A_MASK
);
1092 /* real mode interrupt */
1093 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1094 int error_code
, unsigned int next_eip
)
1097 target_ulong ptr
, ssp
;
1099 uint32_t offset
, esp
;
1100 uint32_t old_cs
, old_eip
;
1102 /* real mode (simpler!) */
1104 if (intno
* 4 + 3 > dt
->limit
) {
1105 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1107 ptr
= dt
->base
+ intno
* 4;
1108 offset
= cpu_lduw_kernel(env
, ptr
);
1109 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1110 esp
= env
->regs
[R_ESP
];
1111 ssp
= env
->segs
[R_SS
].base
;
1117 old_cs
= env
->segs
[R_CS
].selector
;
1118 /* XXX: use SS segment size? */
1119 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1120 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1121 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1123 /* update processor state */
1124 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1126 env
->segs
[R_CS
].selector
= selector
;
1127 env
->segs
[R_CS
].base
= (selector
<< 4);
1128 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1131 #if defined(CONFIG_USER_ONLY)
1132 /* fake user mode interrupt. is_int is TRUE if coming from the int
1133 * instruction. next_eip is the env->eip value AFTER the interrupt
1134 * instruction. It is only relevant if is_int is TRUE or if intno
1137 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1138 int error_code
, target_ulong next_eip
)
1143 int dpl
, cpl
, shift
;
1147 if (env
->hflags
& HF_LMA_MASK
) {
1152 ptr
= dt
->base
+ (intno
<< shift
);
1153 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1155 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1156 cpl
= env
->hflags
& HF_CPL_MASK
;
1157 /* check privilege if software int */
1159 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1163 /* Since we emulate only user space, we cannot do more than
1164 exiting the emulation with the suitable exception and error
1165 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1166 if (is_int
|| intno
== EXCP_SYSCALL
) {
1167 env
->eip
= next_eip
;
1173 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1174 int error_code
, int is_hw
, int rm
)
1176 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1177 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1178 control
.event_inj
));
1180 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1184 type
= SVM_EVTINJ_TYPE_SOFT
;
1186 type
= SVM_EVTINJ_TYPE_EXEPT
;
1188 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1189 if (!rm
&& exception_has_error_code(intno
)) {
1190 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1191 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1192 control
.event_inj_err
),
1196 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1203 * Begin execution of an interruption. is_int is TRUE if coming from
1204 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1205 * instruction. It is only relevant if is_int is TRUE.
1207 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1208 int error_code
, target_ulong next_eip
, int is_hw
)
1210 CPUX86State
*env
= &cpu
->env
;
1212 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1213 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1216 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1217 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1218 count
, intno
, error_code
, is_int
,
1219 env
->hflags
& HF_CPL_MASK
,
1220 env
->segs
[R_CS
].selector
, env
->eip
,
1221 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1222 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1223 if (intno
== 0x0e) {
1224 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1226 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1229 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1236 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1237 for (i
= 0; i
< 16; i
++) {
1238 qemu_log(" %02x", ldub(ptr
+ i
));
1246 if (env
->cr
[0] & CR0_PE_MASK
) {
1247 #if !defined(CONFIG_USER_ONLY)
1248 if (env
->hflags
& HF_SVMI_MASK
) {
1249 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1252 #ifdef TARGET_X86_64
1253 if (env
->hflags
& HF_LMA_MASK
) {
1254 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1258 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1262 #if !defined(CONFIG_USER_ONLY)
1263 if (env
->hflags
& HF_SVMI_MASK
) {
1264 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1267 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1270 #if !defined(CONFIG_USER_ONLY)
1271 if (env
->hflags
& HF_SVMI_MASK
) {
1272 CPUState
*cs
= CPU(cpu
);
1273 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1274 offsetof(struct vmcb
,
1275 control
.event_inj
));
1278 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1279 event_inj
& ~SVM_EVTINJ_VALID
);
1284 void x86_cpu_do_interrupt(CPUState
*cs
)
1286 X86CPU
*cpu
= X86_CPU(cs
);
1287 CPUX86State
*env
= &cpu
->env
;
1289 #if defined(CONFIG_USER_ONLY)
1290 /* if user mode only, we simulate a fake exception
1291 which will be handled outside the cpu execution
1293 do_interrupt_user(env
, cs
->exception_index
,
1294 env
->exception_is_int
,
1296 env
->exception_next_eip
);
1297 /* successfully delivered */
1298 env
->old_exception
= -1;
1300 /* simulate a real cpu exception. On i386, it can
1301 trigger new exceptions, but we do not handle
1302 double or triple faults yet. */
1303 do_interrupt_all(cpu
, cs
->exception_index
,
1304 env
->exception_is_int
,
1306 env
->exception_next_eip
, 0);
1307 /* successfully delivered */
1308 env
->old_exception
= -1;
1312 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1314 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1317 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1319 X86CPU
*cpu
= X86_CPU(cs
);
1320 CPUX86State
*env
= &cpu
->env
;
1323 #if !defined(CONFIG_USER_ONLY)
1324 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
1325 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1326 apic_poll_irq(cpu
->apic_state
);
1327 /* Don't process multiple interrupt requests in a single call.
1328 This is required to make icount-driven execution deterministic. */
1332 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
1334 } else if (env
->hflags2
& HF2_GIF_MASK
) {
1335 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
1336 !(env
->hflags
& HF_SMM_MASK
)) {
1337 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0);
1338 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1341 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
1342 !(env
->hflags2
& HF2_NMI_MASK
)) {
1343 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1344 env
->hflags2
|= HF2_NMI_MASK
;
1345 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1347 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
1348 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1349 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1351 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
1352 (((env
->hflags2
& HF2_VINTR_MASK
) &&
1353 (env
->hflags2
& HF2_HIF_MASK
)) ||
1354 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
1355 (env
->eflags
& IF_MASK
&&
1356 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
1358 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0);
1359 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1360 CPU_INTERRUPT_VIRQ
);
1361 intno
= cpu_get_pic_interrupt(env
);
1362 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1363 "Servicing hardware INT=0x%02x\n", intno
);
1364 do_interrupt_x86_hardirq(env
, intno
, 1);
1365 /* ensure that no TB jump will be modified as
1366 the program flow was changed */
1368 #if !defined(CONFIG_USER_ONLY)
1369 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
1370 (env
->eflags
& IF_MASK
) &&
1371 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
1373 /* FIXME: this should respect TPR */
1374 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0);
1375 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
1376 + offsetof(struct vmcb
, control
.int_vector
));
1377 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1378 "Servicing virtual hardware INT=0x%02x\n", intno
);
1379 do_interrupt_x86_hardirq(env
, intno
, 1);
1380 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1389 void helper_lldt(CPUX86State
*env
, int selector
)
1393 int index
, entry_limit
;
1397 if ((selector
& 0xfffc) == 0) {
1398 /* XXX: NULL selector case: invalid LDT */
1402 if (selector
& 0x4) {
1403 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1406 index
= selector
& ~7;
1407 #ifdef TARGET_X86_64
1408 if (env
->hflags
& HF_LMA_MASK
) {
1415 if ((index
+ entry_limit
) > dt
->limit
) {
1416 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1418 ptr
= dt
->base
+ index
;
1419 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1420 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1421 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1422 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1424 if (!(e2
& DESC_P_MASK
)) {
1425 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1427 #ifdef TARGET_X86_64
1428 if (env
->hflags
& HF_LMA_MASK
) {
1431 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1432 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1433 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1437 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1440 env
->ldt
.selector
= selector
;
1443 void helper_ltr(CPUX86State
*env
, int selector
)
1447 int index
, type
, entry_limit
;
1451 if ((selector
& 0xfffc) == 0) {
1452 /* NULL selector case: invalid TR */
1457 if (selector
& 0x4) {
1458 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1461 index
= selector
& ~7;
1462 #ifdef TARGET_X86_64
1463 if (env
->hflags
& HF_LMA_MASK
) {
1470 if ((index
+ entry_limit
) > dt
->limit
) {
1471 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1473 ptr
= dt
->base
+ index
;
1474 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1475 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1476 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1477 if ((e2
& DESC_S_MASK
) ||
1478 (type
!= 1 && type
!= 9)) {
1479 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1481 if (!(e2
& DESC_P_MASK
)) {
1482 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1484 #ifdef TARGET_X86_64
1485 if (env
->hflags
& HF_LMA_MASK
) {
1488 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1489 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1490 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1491 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1493 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1494 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1498 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1500 e2
|= DESC_TSS_BUSY_MASK
;
1501 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1503 env
->tr
.selector
= selector
;
1506 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1507 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1516 cpl
= env
->hflags
& HF_CPL_MASK
;
1517 if ((selector
& 0xfffc) == 0) {
1518 /* null selector case */
1520 #ifdef TARGET_X86_64
1521 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1524 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1526 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1529 if (selector
& 0x4) {
1534 index
= selector
& ~7;
1535 if ((index
+ 7) > dt
->limit
) {
1536 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1538 ptr
= dt
->base
+ index
;
1539 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1540 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1542 if (!(e2
& DESC_S_MASK
)) {
1543 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1546 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1547 if (seg_reg
== R_SS
) {
1548 /* must be writable segment */
1549 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1550 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1552 if (rpl
!= cpl
|| dpl
!= cpl
) {
1553 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1556 /* must be readable segment */
1557 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1558 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1561 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1562 /* if not conforming code, test rights */
1563 if (dpl
< cpl
|| dpl
< rpl
) {
1564 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1569 if (!(e2
& DESC_P_MASK
)) {
1570 if (seg_reg
== R_SS
) {
1571 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1573 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1577 /* set the access bit if not already set */
1578 if (!(e2
& DESC_A_MASK
)) {
1580 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1583 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1584 get_seg_base(e1
, e2
),
1585 get_seg_limit(e1
, e2
),
1588 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1589 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1594 /* protected mode jump */
1595 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1596 target_ulong next_eip
)
1599 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1601 if ((new_cs
& 0xfffc) == 0) {
1602 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1604 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1605 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1607 cpl
= env
->hflags
& HF_CPL_MASK
;
1608 if (e2
& DESC_S_MASK
) {
1609 if (!(e2
& DESC_CS_MASK
)) {
1610 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1612 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1613 if (e2
& DESC_C_MASK
) {
1614 /* conforming code segment */
1616 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1619 /* non conforming code segment */
1622 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1625 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1628 if (!(e2
& DESC_P_MASK
)) {
1629 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1631 limit
= get_seg_limit(e1
, e2
);
1632 if (new_eip
> limit
&&
1633 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1634 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1636 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1637 get_seg_base(e1
, e2
), limit
, e2
);
1640 /* jump to call or task gate */
1641 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1643 cpl
= env
->hflags
& HF_CPL_MASK
;
1644 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1646 case 1: /* 286 TSS */
1647 case 9: /* 386 TSS */
1648 case 5: /* task gate */
1649 if (dpl
< cpl
|| dpl
< rpl
) {
1650 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1652 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1654 case 4: /* 286 call gate */
1655 case 12: /* 386 call gate */
1656 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1657 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1659 if (!(e2
& DESC_P_MASK
)) {
1660 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1663 new_eip
= (e1
& 0xffff);
1665 new_eip
|= (e2
& 0xffff0000);
1667 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1668 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1670 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1671 /* must be code segment */
1672 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1673 (DESC_S_MASK
| DESC_CS_MASK
))) {
1674 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1676 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1677 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1678 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1680 if (!(e2
& DESC_P_MASK
)) {
1681 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1683 limit
= get_seg_limit(e1
, e2
);
1684 if (new_eip
> limit
) {
1685 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1687 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1688 get_seg_base(e1
, e2
), limit
, e2
);
1692 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1698 /* real mode call */
1699 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1700 int shift
, int next_eip
)
1703 uint32_t esp
, esp_mask
;
1707 esp
= env
->regs
[R_ESP
];
1708 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1709 ssp
= env
->segs
[R_SS
].base
;
1711 PUSHL_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1712 PUSHL_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1714 PUSHW_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1715 PUSHW_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1718 SET_ESP(esp
, esp_mask
);
1720 env
->segs
[R_CS
].selector
= new_cs
;
1721 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1724 /* protected mode call */
1725 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1726 int shift
, target_ulong next_eip
)
1729 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1730 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1731 uint32_t val
, limit
, old_sp_mask
;
1732 target_ulong ssp
, old_ssp
;
1734 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1735 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1736 if ((new_cs
& 0xfffc) == 0) {
1737 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1739 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1740 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1742 cpl
= env
->hflags
& HF_CPL_MASK
;
1743 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1744 if (e2
& DESC_S_MASK
) {
1745 if (!(e2
& DESC_CS_MASK
)) {
1746 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1748 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1749 if (e2
& DESC_C_MASK
) {
1750 /* conforming code segment */
1752 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1755 /* non conforming code segment */
1758 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1761 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1764 if (!(e2
& DESC_P_MASK
)) {
1765 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1768 #ifdef TARGET_X86_64
1769 /* XXX: check 16/32 bit cases in long mode */
1774 rsp
= env
->regs
[R_ESP
];
1775 PUSHQ_RA(rsp
, env
->segs
[R_CS
].selector
, GETPC());
1776 PUSHQ_RA(rsp
, next_eip
, GETPC());
1777 /* from this point, not restartable */
1778 env
->regs
[R_ESP
] = rsp
;
1779 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1780 get_seg_base(e1
, e2
),
1781 get_seg_limit(e1
, e2
), e2
);
1786 sp
= env
->regs
[R_ESP
];
1787 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1788 ssp
= env
->segs
[R_SS
].base
;
1790 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1791 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1793 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1794 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1797 limit
= get_seg_limit(e1
, e2
);
1798 if (new_eip
> limit
) {
1799 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1801 /* from this point, not restartable */
1802 SET_ESP(sp
, sp_mask
);
1803 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1804 get_seg_base(e1
, e2
), limit
, e2
);
1808 /* check gate type */
1809 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1810 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1813 case 1: /* available 286 TSS */
1814 case 9: /* available 386 TSS */
1815 case 5: /* task gate */
1816 if (dpl
< cpl
|| dpl
< rpl
) {
1817 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1819 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1821 case 4: /* 286 call gate */
1822 case 12: /* 386 call gate */
1825 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1830 if (dpl
< cpl
|| dpl
< rpl
) {
1831 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1833 /* check valid bit */
1834 if (!(e2
& DESC_P_MASK
)) {
1835 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1837 selector
= e1
>> 16;
1838 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1839 param_count
= e2
& 0x1f;
1840 if ((selector
& 0xfffc) == 0) {
1841 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1844 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1845 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1847 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1848 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1850 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1852 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1854 if (!(e2
& DESC_P_MASK
)) {
1855 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1858 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1859 /* to inner privilege */
1860 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
, GETPC());
1861 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1862 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1864 if ((ss
& 0xfffc) == 0) {
1865 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1867 if ((ss
& 3) != dpl
) {
1868 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1870 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1871 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1873 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1874 if (ss_dpl
!= dpl
) {
1875 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1877 if (!(ss_e2
& DESC_S_MASK
) ||
1878 (ss_e2
& DESC_CS_MASK
) ||
1879 !(ss_e2
& DESC_W_MASK
)) {
1880 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1882 if (!(ss_e2
& DESC_P_MASK
)) {
1883 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1886 /* push_size = ((param_count * 2) + 8) << shift; */
1888 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1889 old_ssp
= env
->segs
[R_SS
].base
;
1891 sp_mask
= get_sp_mask(ss_e2
);
1892 ssp
= get_seg_base(ss_e1
, ss_e2
);
1894 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1895 PUSHL_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1896 for (i
= param_count
- 1; i
>= 0; i
--) {
1897 val
= cpu_ldl_kernel_ra(env
, old_ssp
+
1898 ((env
->regs
[R_ESP
] + i
* 4) &
1899 old_sp_mask
), GETPC());
1900 PUSHL_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1903 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1904 PUSHW_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1905 for (i
= param_count
- 1; i
>= 0; i
--) {
1906 val
= cpu_lduw_kernel_ra(env
, old_ssp
+
1907 ((env
->regs
[R_ESP
] + i
* 2) &
1908 old_sp_mask
), GETPC());
1909 PUSHW_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1914 /* to same privilege */
1915 sp
= env
->regs
[R_ESP
];
1916 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1917 ssp
= env
->segs
[R_SS
].base
;
1918 /* push_size = (4 << shift); */
1923 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1924 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1926 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1927 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1930 /* from this point, not restartable */
1933 ss
= (ss
& ~3) | dpl
;
1934 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1936 get_seg_limit(ss_e1
, ss_e2
),
1940 selector
= (selector
& ~3) | dpl
;
1941 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1942 get_seg_base(e1
, e2
),
1943 get_seg_limit(e1
, e2
),
1945 SET_ESP(sp
, sp_mask
);
1950 /* real and vm86 mode iret */
1951 void helper_iret_real(CPUX86State
*env
, int shift
)
1953 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1957 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1958 sp
= env
->regs
[R_ESP
];
1959 ssp
= env
->segs
[R_SS
].base
;
1962 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1963 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1965 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1968 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1969 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1970 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1972 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1973 env
->segs
[R_CS
].selector
= new_cs
;
1974 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1976 if (env
->eflags
& VM_MASK
) {
1977 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1980 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1984 eflags_mask
&= 0xffff;
1986 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1987 env
->hflags2
&= ~HF2_NMI_MASK
;
1990 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
1995 /* XXX: on x86_64, we do not want to nullify FS and GS because
1996 they may still contain a valid base. I would be interested to
1997 know how a real x86_64 CPU behaves */
1998 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1999 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2003 e2
= env
->segs
[seg_reg
].flags
;
2004 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2005 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2006 /* data or non conforming code segment */
2008 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2013 /* protected mode iret */
2014 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2015 int is_iret
, int addend
,
2018 uint32_t new_cs
, new_eflags
, new_ss
;
2019 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2020 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2021 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2022 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2024 #ifdef TARGET_X86_64
2030 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2032 sp
= env
->regs
[R_ESP
];
2033 ssp
= env
->segs
[R_SS
].base
;
2034 new_eflags
= 0; /* avoid warning */
2035 #ifdef TARGET_X86_64
2037 POPQ_RA(sp
, new_eip
, retaddr
);
2038 POPQ_RA(sp
, new_cs
, retaddr
);
2041 POPQ_RA(sp
, new_eflags
, retaddr
);
2048 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2049 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2052 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2053 if (new_eflags
& VM_MASK
) {
2054 goto return_to_vm86
;
2059 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2060 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2062 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2066 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2067 new_cs
, new_eip
, shift
, addend
);
2068 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2069 if ((new_cs
& 0xfffc) == 0) {
2070 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2072 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
2073 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2075 if (!(e2
& DESC_S_MASK
) ||
2076 !(e2
& DESC_CS_MASK
)) {
2077 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2079 cpl
= env
->hflags
& HF_CPL_MASK
;
2082 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2084 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2085 if (e2
& DESC_C_MASK
) {
2087 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2091 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2094 if (!(e2
& DESC_P_MASK
)) {
2095 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2099 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2100 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2101 /* return to same privilege level */
2102 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2103 get_seg_base(e1
, e2
),
2104 get_seg_limit(e1
, e2
),
2107 /* return to different privilege level */
2108 #ifdef TARGET_X86_64
2110 POPQ_RA(sp
, new_esp
, retaddr
);
2111 POPQ_RA(sp
, new_ss
, retaddr
);
2118 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2119 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2123 POPW_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2124 POPW_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2127 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2129 if ((new_ss
& 0xfffc) == 0) {
2130 #ifdef TARGET_X86_64
2131 /* NULL ss is allowed in long mode if cpl != 3 */
2132 /* XXX: test CS64? */
2133 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2134 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2136 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2137 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2138 DESC_W_MASK
| DESC_A_MASK
);
2139 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2143 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2146 if ((new_ss
& 3) != rpl
) {
2147 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2149 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2150 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2152 if (!(ss_e2
& DESC_S_MASK
) ||
2153 (ss_e2
& DESC_CS_MASK
) ||
2154 !(ss_e2
& DESC_W_MASK
)) {
2155 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2157 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2159 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2161 if (!(ss_e2
& DESC_P_MASK
)) {
2162 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2164 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2165 get_seg_base(ss_e1
, ss_e2
),
2166 get_seg_limit(ss_e1
, ss_e2
),
2170 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2171 get_seg_base(e1
, e2
),
2172 get_seg_limit(e1
, e2
),
2175 #ifdef TARGET_X86_64
2176 if (env
->hflags
& HF_CS64_MASK
) {
2181 sp_mask
= get_sp_mask(ss_e2
);
2184 /* validate data segments */
2185 validate_seg(env
, R_ES
, rpl
);
2186 validate_seg(env
, R_DS
, rpl
);
2187 validate_seg(env
, R_FS
, rpl
);
2188 validate_seg(env
, R_GS
, rpl
);
2192 SET_ESP(sp
, sp_mask
);
2195 /* NOTE: 'cpl' is the _old_ CPL */
2196 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2198 eflags_mask
|= IOPL_MASK
;
2200 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2202 eflags_mask
|= IF_MASK
;
2205 eflags_mask
&= 0xffff;
2207 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2212 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2213 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2214 POPL_RA(ssp
, sp
, sp_mask
, new_es
, retaddr
);
2215 POPL_RA(ssp
, sp
, sp_mask
, new_ds
, retaddr
);
2216 POPL_RA(ssp
, sp
, sp_mask
, new_fs
, retaddr
);
2217 POPL_RA(ssp
, sp
, sp_mask
, new_gs
, retaddr
);
2219 /* modify processor state */
2220 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2221 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2223 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2224 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2225 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2226 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2227 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2228 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2230 env
->eip
= new_eip
& 0xffff;
2231 env
->regs
[R_ESP
] = new_esp
;
2234 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2236 int tss_selector
, type
;
2239 /* specific case for TSS */
2240 if (env
->eflags
& NT_MASK
) {
2241 #ifdef TARGET_X86_64
2242 if (env
->hflags
& HF_LMA_MASK
) {
2243 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2246 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2247 if (tss_selector
& 4) {
2248 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2250 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2251 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2253 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2254 /* NOTE: we check both segment and busy TSS */
2256 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2258 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2260 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2262 env
->hflags2
&= ~HF2_NMI_MASK
;
2265 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2267 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2270 void helper_sysenter(CPUX86State
*env
)
2272 if (env
->sysenter_cs
== 0) {
2273 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2275 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2277 #ifdef TARGET_X86_64
2278 if (env
->hflags
& HF_LMA_MASK
) {
2279 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2281 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2283 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2288 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2290 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2292 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2294 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2296 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2298 DESC_W_MASK
| DESC_A_MASK
);
2299 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2300 env
->eip
= env
->sysenter_eip
;
2303 void helper_sysexit(CPUX86State
*env
, int dflag
)
2307 cpl
= env
->hflags
& HF_CPL_MASK
;
2308 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2309 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2311 #ifdef TARGET_X86_64
2313 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2315 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2316 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2317 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2319 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2321 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2322 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2323 DESC_W_MASK
| DESC_A_MASK
);
2327 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2329 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2330 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2331 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2332 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2334 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2335 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2336 DESC_W_MASK
| DESC_A_MASK
);
2338 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2339 env
->eip
= env
->regs
[R_EDX
];
2342 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2345 uint32_t e1
, e2
, eflags
, selector
;
2346 int rpl
, dpl
, cpl
, type
;
2348 selector
= selector1
& 0xffff;
2349 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2350 if ((selector
& 0xfffc) == 0) {
2353 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2357 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2358 cpl
= env
->hflags
& HF_CPL_MASK
;
2359 if (e2
& DESC_S_MASK
) {
2360 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2363 if (dpl
< cpl
|| dpl
< rpl
) {
2368 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2379 if (dpl
< cpl
|| dpl
< rpl
) {
2381 CC_SRC
= eflags
& ~CC_Z
;
2385 limit
= get_seg_limit(e1
, e2
);
2386 CC_SRC
= eflags
| CC_Z
;
2390 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2392 uint32_t e1
, e2
, eflags
, selector
;
2393 int rpl
, dpl
, cpl
, type
;
2395 selector
= selector1
& 0xffff;
2396 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2397 if ((selector
& 0xfffc) == 0) {
2400 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2404 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2405 cpl
= env
->hflags
& HF_CPL_MASK
;
2406 if (e2
& DESC_S_MASK
) {
2407 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2410 if (dpl
< cpl
|| dpl
< rpl
) {
2415 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2429 if (dpl
< cpl
|| dpl
< rpl
) {
2431 CC_SRC
= eflags
& ~CC_Z
;
2435 CC_SRC
= eflags
| CC_Z
;
2436 return e2
& 0x00f0ff00;
2439 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2441 uint32_t e1
, e2
, eflags
, selector
;
2444 selector
= selector1
& 0xffff;
2445 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2446 if ((selector
& 0xfffc) == 0) {
2449 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2452 if (!(e2
& DESC_S_MASK
)) {
2456 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2457 cpl
= env
->hflags
& HF_CPL_MASK
;
2458 if (e2
& DESC_CS_MASK
) {
2459 if (!(e2
& DESC_R_MASK
)) {
2462 if (!(e2
& DESC_C_MASK
)) {
2463 if (dpl
< cpl
|| dpl
< rpl
) {
2468 if (dpl
< cpl
|| dpl
< rpl
) {
2470 CC_SRC
= eflags
& ~CC_Z
;
2474 CC_SRC
= eflags
| CC_Z
;
2477 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2479 uint32_t e1
, e2
, eflags
, selector
;
2482 selector
= selector1
& 0xffff;
2483 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2484 if ((selector
& 0xfffc) == 0) {
2487 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2490 if (!(e2
& DESC_S_MASK
)) {
2494 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2495 cpl
= env
->hflags
& HF_CPL_MASK
;
2496 if (e2
& DESC_CS_MASK
) {
2499 if (dpl
< cpl
|| dpl
< rpl
) {
2502 if (!(e2
& DESC_W_MASK
)) {
2504 CC_SRC
= eflags
& ~CC_Z
;
2508 CC_SRC
= eflags
| CC_Z
;
2511 #if defined(CONFIG_USER_ONLY)
2512 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2514 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2515 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2517 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2518 (selector
<< 4), 0xffff,
2519 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2520 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2522 helper_load_seg(env
, seg_reg
, selector
);
2527 /* check if Port I/O is allowed in TSS */
2528 static inline void check_io(CPUX86State
*env
, int addr
, int size
,
2531 int io_offset
, val
, mask
;
2533 /* TSS must be a valid 32 bit one */
2534 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2535 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2536 env
->tr
.limit
< 103) {
2539 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
2540 io_offset
+= (addr
>> 3);
2541 /* Note: the check needs two bytes */
2542 if ((io_offset
+ 1) > env
->tr
.limit
) {
2545 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
2547 mask
= (1 << size
) - 1;
2548 /* all bits must be zero to allow the I/O */
2549 if ((val
& mask
) != 0) {
2551 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2555 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2557 check_io(env
, t0
, 1, GETPC());
2560 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2562 check_io(env
, t0
, 2, GETPC());
2565 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2567 check_io(env
, t0
, 4, GETPC());