2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "helper-tcg.h"
33 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
34 # define LOG_PCALL_STATE(cpu) \
35 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
37 # define LOG_PCALL(...) do { } while (0)
38 # define LOG_PCALL_STATE(cpu) do { } while (0)
42 * TODO: Convert callers to compute cpu_mmu_index_kernel once
43 * and use *_mmuidx_ra directly.
45 #define cpu_ldub_kernel_ra(e, p, r) \
46 cpu_ldub_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
47 #define cpu_lduw_kernel_ra(e, p, r) \
48 cpu_lduw_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
49 #define cpu_ldl_kernel_ra(e, p, r) \
50 cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
51 #define cpu_ldq_kernel_ra(e, p, r) \
52 cpu_ldq_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
54 #define cpu_stb_kernel_ra(e, p, v, r) \
55 cpu_stb_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
56 #define cpu_stw_kernel_ra(e, p, v, r) \
57 cpu_stw_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
58 #define cpu_stl_kernel_ra(e, p, v, r) \
59 cpu_stl_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
60 #define cpu_stq_kernel_ra(e, p, v, r) \
61 cpu_stq_mmuidx_ra(e, p, v, cpu_mmu_index_kernel(e), r)
63 #define cpu_ldub_kernel(e, p) cpu_ldub_kernel_ra(e, p, 0)
64 #define cpu_lduw_kernel(e, p) cpu_lduw_kernel_ra(e, p, 0)
65 #define cpu_ldl_kernel(e, p) cpu_ldl_kernel_ra(e, p, 0)
66 #define cpu_ldq_kernel(e, p) cpu_ldq_kernel_ra(e, p, 0)
68 #define cpu_stb_kernel(e, p, v) cpu_stb_kernel_ra(e, p, v, 0)
69 #define cpu_stw_kernel(e, p, v) cpu_stw_kernel_ra(e, p, v, 0)
70 #define cpu_stl_kernel(e, p, v) cpu_stl_kernel_ra(e, p, v, 0)
71 #define cpu_stq_kernel(e, p, v) cpu_stq_kernel_ra(e, p, v, 0)
73 /* return non zero if error */
74 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
75 uint32_t *e2_ptr
, int selector
,
87 index
= selector
& ~7;
88 if ((index
+ 7) > dt
->limit
) {
91 ptr
= dt
->base
+ index
;
92 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
93 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
97 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
98 uint32_t *e2_ptr
, int selector
)
100 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
103 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
107 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
108 if (e2
& DESC_G_MASK
) {
109 limit
= (limit
<< 12) | 0xfff;
114 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
116 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
119 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
122 sc
->base
= get_seg_base(e1
, e2
);
123 sc
->limit
= get_seg_limit(e1
, e2
);
127 /* init the segment cache in vm86 mode. */
128 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
132 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
133 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
134 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
137 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
138 uint32_t *esp_ptr
, int dpl
,
141 X86CPU
*cpu
= env_archcpu(env
);
142 int type
, index
, shift
;
147 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
148 for (i
= 0; i
< env
->tr
.limit
; i
++) {
149 printf("%02x ", env
->tr
.base
[i
]);
158 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
159 cpu_abort(CPU(cpu
), "invalid tss");
161 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
162 if ((type
& 7) != 1) {
163 cpu_abort(CPU(cpu
), "invalid tss type");
166 index
= (dpl
* 4 + 2) << shift
;
167 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
168 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
171 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
172 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
174 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
175 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
179 static void tss_load_seg(CPUX86State
*env
, X86Seg seg_reg
, int selector
,
180 int cpl
, uintptr_t retaddr
)
185 if ((selector
& 0xfffc) != 0) {
186 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
187 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
189 if (!(e2
& DESC_S_MASK
)) {
190 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
193 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
194 if (seg_reg
== R_CS
) {
195 if (!(e2
& DESC_CS_MASK
)) {
196 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
199 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
201 } else if (seg_reg
== R_SS
) {
202 /* SS must be writable data */
203 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
204 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
206 if (dpl
!= cpl
|| dpl
!= rpl
) {
207 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
210 /* not readable code */
211 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
212 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
214 /* if data or non conforming code, checks the rights */
215 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
216 if (dpl
< cpl
|| dpl
< rpl
) {
217 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
221 if (!(e2
& DESC_P_MASK
)) {
222 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
224 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
225 get_seg_base(e1
, e2
),
226 get_seg_limit(e1
, e2
),
229 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
230 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
235 #define SWITCH_TSS_JMP 0
236 #define SWITCH_TSS_IRET 1
237 #define SWITCH_TSS_CALL 2
239 /* XXX: restore CPU state in registers (PowerPC case) */
240 static void switch_tss_ra(CPUX86State
*env
, int tss_selector
,
241 uint32_t e1
, uint32_t e2
, int source
,
242 uint32_t next_eip
, uintptr_t retaddr
)
244 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
245 target_ulong tss_base
;
246 uint32_t new_regs
[8], new_segs
[6];
247 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
248 uint32_t old_eflags
, eflags_mask
;
253 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
254 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
257 /* if task gate, we read the TSS segment and we load it */
259 if (!(e2
& DESC_P_MASK
)) {
260 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
262 tss_selector
= e1
>> 16;
263 if (tss_selector
& 4) {
264 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
266 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
267 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
269 if (e2
& DESC_S_MASK
) {
270 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
272 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
273 if ((type
& 7) != 1) {
274 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
278 if (!(e2
& DESC_P_MASK
)) {
279 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
287 tss_limit
= get_seg_limit(e1
, e2
);
288 tss_base
= get_seg_base(e1
, e2
);
289 if ((tss_selector
& 4) != 0 ||
290 tss_limit
< tss_limit_max
) {
291 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
293 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
295 old_tss_limit_max
= 103;
297 old_tss_limit_max
= 43;
300 /* read all the registers from the new TSS */
303 new_cr3
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x1c, retaddr
);
304 new_eip
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x20, retaddr
);
305 new_eflags
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x24, retaddr
);
306 for (i
= 0; i
< 8; i
++) {
307 new_regs
[i
] = cpu_ldl_kernel_ra(env
, tss_base
+ (0x28 + i
* 4),
310 for (i
= 0; i
< 6; i
++) {
311 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x48 + i
* 4),
314 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x60, retaddr
);
315 new_trap
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x64, retaddr
);
319 new_eip
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x0e, retaddr
);
320 new_eflags
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x10, retaddr
);
321 for (i
= 0; i
< 8; i
++) {
322 new_regs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x12 + i
* 2),
323 retaddr
) | 0xffff0000;
325 for (i
= 0; i
< 4; i
++) {
326 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x22 + i
* 4),
329 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x2a, retaddr
);
334 /* XXX: avoid a compiler warning, see
335 http://support.amd.com/us/Processor_TechDocs/24593.pdf
336 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
339 /* NOTE: we must avoid memory exceptions during the task switch,
340 so we make dummy accesses before */
341 /* XXX: it can still fail in some cases, so a bigger hack is
342 necessary to valid the TLB after having done the accesses */
344 v1
= cpu_ldub_kernel_ra(env
, env
->tr
.base
, retaddr
);
345 v2
= cpu_ldub_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, retaddr
);
346 cpu_stb_kernel_ra(env
, env
->tr
.base
, v1
, retaddr
);
347 cpu_stb_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, v2
, retaddr
);
349 /* clear busy bit (it is restartable) */
350 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
354 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
355 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
356 e2
&= ~DESC_TSS_BUSY_MASK
;
357 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
359 old_eflags
= cpu_compute_eflags(env
);
360 if (source
== SWITCH_TSS_IRET
) {
361 old_eflags
&= ~NT_MASK
;
364 /* save the current state in the old TSS */
367 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x20, next_eip
, retaddr
);
368 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x24, old_eflags
, retaddr
);
369 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
], retaddr
);
370 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
], retaddr
);
371 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
], retaddr
);
372 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
], retaddr
);
373 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
], retaddr
);
374 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
], retaddr
);
375 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
], retaddr
);
376 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
], retaddr
);
377 for (i
= 0; i
< 6; i
++) {
378 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x48 + i
* 4),
379 env
->segs
[i
].selector
, retaddr
);
383 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x0e, next_eip
, retaddr
);
384 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x10, old_eflags
, retaddr
);
385 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
], retaddr
);
386 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
], retaddr
);
387 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
], retaddr
);
388 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
], retaddr
);
389 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
], retaddr
);
390 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
], retaddr
);
391 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
], retaddr
);
392 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
], retaddr
);
393 for (i
= 0; i
< 4; i
++) {
394 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x22 + i
* 4),
395 env
->segs
[i
].selector
, retaddr
);
399 /* now if an exception occurs, it will occurs in the next task
402 if (source
== SWITCH_TSS_CALL
) {
403 cpu_stw_kernel_ra(env
, tss_base
, env
->tr
.selector
, retaddr
);
404 new_eflags
|= NT_MASK
;
408 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
412 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
413 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
414 e2
|= DESC_TSS_BUSY_MASK
;
415 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
418 /* set the new CPU state */
419 /* from this point, any exception which occurs can give problems */
420 env
->cr
[0] |= CR0_TS_MASK
;
421 env
->hflags
|= HF_TS_MASK
;
422 env
->tr
.selector
= tss_selector
;
423 env
->tr
.base
= tss_base
;
424 env
->tr
.limit
= tss_limit
;
425 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
427 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
428 cpu_x86_update_cr3(env
, new_cr3
);
431 /* load all registers without an exception, then reload them with
432 possible exception */
434 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
435 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
437 eflags_mask
&= 0xffff;
439 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
440 /* XXX: what to do in 16 bit case? */
441 env
->regs
[R_EAX
] = new_regs
[0];
442 env
->regs
[R_ECX
] = new_regs
[1];
443 env
->regs
[R_EDX
] = new_regs
[2];
444 env
->regs
[R_EBX
] = new_regs
[3];
445 env
->regs
[R_ESP
] = new_regs
[4];
446 env
->regs
[R_EBP
] = new_regs
[5];
447 env
->regs
[R_ESI
] = new_regs
[6];
448 env
->regs
[R_EDI
] = new_regs
[7];
449 if (new_eflags
& VM_MASK
) {
450 for (i
= 0; i
< 6; i
++) {
451 load_seg_vm(env
, i
, new_segs
[i
]);
454 /* first just selectors as the rest may trigger exceptions */
455 for (i
= 0; i
< 6; i
++) {
456 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
460 env
->ldt
.selector
= new_ldt
& ~4;
467 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
470 if ((new_ldt
& 0xfffc) != 0) {
472 index
= new_ldt
& ~7;
473 if ((index
+ 7) > dt
->limit
) {
474 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
476 ptr
= dt
->base
+ index
;
477 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
478 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
479 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
480 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
482 if (!(e2
& DESC_P_MASK
)) {
483 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
485 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
488 /* load the segments */
489 if (!(new_eflags
& VM_MASK
)) {
490 int cpl
= new_segs
[R_CS
] & 3;
491 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
492 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
493 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
494 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
495 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
496 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
499 /* check that env->eip is in the CS segment limits */
500 if (new_eip
> env
->segs
[R_CS
].limit
) {
501 /* XXX: different exception if CALL? */
502 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
505 #ifndef CONFIG_USER_ONLY
506 /* reset local breakpoints */
507 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
508 cpu_x86_update_dr7(env
, env
->dr
[7] & ~DR7_LOCAL_BP_MASK
);
513 static void switch_tss(CPUX86State
*env
, int tss_selector
,
514 uint32_t e1
, uint32_t e2
, int source
,
517 switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
520 static inline unsigned int get_sp_mask(unsigned int e2
)
523 if (e2
& DESC_L_MASK
) {
527 if (e2
& DESC_B_MASK
) {
534 static int exception_has_error_code(int intno
)
550 #define SET_ESP(val, sp_mask) \
552 if ((sp_mask) == 0xffff) { \
553 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
555 } else if ((sp_mask) == 0xffffffffLL) { \
556 env->regs[R_ESP] = (uint32_t)(val); \
558 env->regs[R_ESP] = (val); \
562 #define SET_ESP(val, sp_mask) \
564 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
565 ((val) & (sp_mask)); \
569 /* in 64-bit machines, this can overflow. So this segment addition macro
570 * can be used to trim the value to 32-bit whenever needed */
571 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
573 /* XXX: add a is_user flag to have proper security support */
574 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
577 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
580 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
583 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
586 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
588 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
592 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
594 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
598 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
599 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
600 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
601 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
603 /* protected mode interrupt */
604 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
605 int error_code
, unsigned int next_eip
,
609 target_ulong ptr
, ssp
;
610 int type
, dpl
, selector
, ss_dpl
, cpl
;
611 int has_error_code
, new_stack
, shift
;
612 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
613 uint32_t old_eip
, sp_mask
;
614 int vm86
= env
->eflags
& VM_MASK
;
617 if (!is_int
&& !is_hw
) {
618 has_error_code
= exception_has_error_code(intno
);
627 if (intno
* 8 + 7 > dt
->limit
) {
628 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
630 ptr
= dt
->base
+ intno
* 8;
631 e1
= cpu_ldl_kernel(env
, ptr
);
632 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
633 /* check gate type */
634 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
636 case 5: /* task gate */
637 case 6: /* 286 interrupt gate */
638 case 7: /* 286 trap gate */
639 case 14: /* 386 interrupt gate */
640 case 15: /* 386 trap gate */
643 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
646 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
647 cpl
= env
->hflags
& HF_CPL_MASK
;
648 /* check privilege if software int */
649 if (is_int
&& dpl
< cpl
) {
650 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
655 /* must do that check here to return the correct error code */
656 if (!(e2
& DESC_P_MASK
)) {
657 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
659 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
660 if (has_error_code
) {
664 /* push the error code */
665 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
667 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
672 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
673 ssp
= env
->segs
[R_SS
].base
+ esp
;
675 cpu_stl_kernel(env
, ssp
, error_code
);
677 cpu_stw_kernel(env
, ssp
, error_code
);
684 /* Otherwise, trap or interrupt gate */
686 /* check valid bit */
687 if (!(e2
& DESC_P_MASK
)) {
688 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
691 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
692 if ((selector
& 0xfffc) == 0) {
693 raise_exception_err(env
, EXCP0D_GPF
, 0);
695 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
696 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
698 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
699 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
701 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
703 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
705 if (!(e2
& DESC_P_MASK
)) {
706 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
708 if (e2
& DESC_C_MASK
) {
712 /* to inner privilege */
713 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
714 if ((ss
& 0xfffc) == 0) {
715 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
717 if ((ss
& 3) != dpl
) {
718 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
720 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
721 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
723 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
725 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
727 if (!(ss_e2
& DESC_S_MASK
) ||
728 (ss_e2
& DESC_CS_MASK
) ||
729 !(ss_e2
& DESC_W_MASK
)) {
730 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
732 if (!(ss_e2
& DESC_P_MASK
)) {
733 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
736 sp_mask
= get_sp_mask(ss_e2
);
737 ssp
= get_seg_base(ss_e1
, ss_e2
);
739 /* to same privilege */
741 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
744 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
745 ssp
= env
->segs
[R_SS
].base
;
746 esp
= env
->regs
[R_ESP
];
752 /* XXX: check that enough room is available */
753 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
762 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
763 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
764 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
765 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
767 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
768 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
770 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
771 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
772 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
773 if (has_error_code
) {
774 PUSHL(ssp
, esp
, sp_mask
, error_code
);
779 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
780 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
781 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
782 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
784 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
785 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
787 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
788 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
789 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
790 if (has_error_code
) {
791 PUSHW(ssp
, esp
, sp_mask
, error_code
);
795 /* interrupt gate clear IF mask */
796 if ((type
& 1) == 0) {
797 env
->eflags
&= ~IF_MASK
;
799 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
803 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
804 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
805 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
806 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
808 ss
= (ss
& ~3) | dpl
;
809 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
810 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
812 SET_ESP(esp
, sp_mask
);
814 selector
= (selector
& ~3) | dpl
;
815 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
816 get_seg_base(e1
, e2
),
817 get_seg_limit(e1
, e2
),
824 #define PUSHQ_RA(sp, val, ra) \
827 cpu_stq_kernel_ra(env, sp, (val), ra); \
830 #define POPQ_RA(sp, val, ra) \
832 val = cpu_ldq_kernel_ra(env, sp, ra); \
836 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
837 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
839 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
841 X86CPU
*cpu
= env_archcpu(env
);
845 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
846 env
->tr
.base
, env
->tr
.limit
);
849 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
850 cpu_abort(CPU(cpu
), "invalid tss");
852 index
= 8 * level
+ 4;
853 if ((index
+ 7) > env
->tr
.limit
) {
854 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
856 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
859 /* 64 bit interrupt */
860 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
861 int error_code
, target_ulong next_eip
, int is_hw
)
865 int type
, dpl
, selector
, cpl
, ist
;
866 int has_error_code
, new_stack
;
867 uint32_t e1
, e2
, e3
, ss
;
868 target_ulong old_eip
, esp
, offset
;
871 if (!is_int
&& !is_hw
) {
872 has_error_code
= exception_has_error_code(intno
);
881 if (intno
* 16 + 15 > dt
->limit
) {
882 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
884 ptr
= dt
->base
+ intno
* 16;
885 e1
= cpu_ldl_kernel(env
, ptr
);
886 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
887 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
888 /* check gate type */
889 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
891 case 14: /* 386 interrupt gate */
892 case 15: /* 386 trap gate */
895 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
898 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
899 cpl
= env
->hflags
& HF_CPL_MASK
;
900 /* check privilege if software int */
901 if (is_int
&& dpl
< cpl
) {
902 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
904 /* check valid bit */
905 if (!(e2
& DESC_P_MASK
)) {
906 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
909 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
911 if ((selector
& 0xfffc) == 0) {
912 raise_exception_err(env
, EXCP0D_GPF
, 0);
915 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
916 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
918 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
919 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
921 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
923 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
925 if (!(e2
& DESC_P_MASK
)) {
926 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
928 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
929 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
931 if (e2
& DESC_C_MASK
) {
934 if (dpl
< cpl
|| ist
!= 0) {
935 /* to inner privilege */
937 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
940 /* to same privilege */
941 if (env
->eflags
& VM_MASK
) {
942 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
945 esp
= env
->regs
[R_ESP
];
947 esp
&= ~0xfLL
; /* align stack */
949 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
950 PUSHQ(esp
, env
->regs
[R_ESP
]);
951 PUSHQ(esp
, cpu_compute_eflags(env
));
952 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
954 if (has_error_code
) {
955 PUSHQ(esp
, error_code
);
958 /* interrupt gate clear IF mask */
959 if ((type
& 1) == 0) {
960 env
->eflags
&= ~IF_MASK
;
962 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
966 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, dpl
<< DESC_DPL_SHIFT
);
968 env
->regs
[R_ESP
] = esp
;
970 selector
= (selector
& ~3) | dpl
;
971 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
972 get_seg_base(e1
, e2
),
973 get_seg_limit(e1
, e2
),
980 #if defined(CONFIG_USER_ONLY)
981 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
983 CPUState
*cs
= env_cpu(env
);
985 cs
->exception_index
= EXCP_SYSCALL
;
986 env
->exception_is_int
= 0;
987 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
991 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
995 if (!(env
->efer
& MSR_EFER_SCE
)) {
996 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
998 selector
= (env
->star
>> 32) & 0xffff;
999 if (env
->hflags
& HF_LMA_MASK
) {
1002 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
1003 env
->regs
[11] = cpu_compute_eflags(env
) & ~RF_MASK
;
1005 code64
= env
->hflags
& HF_CS64_MASK
;
1007 env
->eflags
&= ~(env
->fmask
| RF_MASK
);
1008 cpu_load_eflags(env
, env
->eflags
, 0);
1009 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1011 DESC_G_MASK
| DESC_P_MASK
|
1013 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1015 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1017 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1019 DESC_W_MASK
| DESC_A_MASK
);
1021 env
->eip
= env
->lstar
;
1023 env
->eip
= env
->cstar
;
1026 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
1028 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1029 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1031 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1033 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1034 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1036 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1038 DESC_W_MASK
| DESC_A_MASK
);
1039 env
->eip
= (uint32_t)env
->star
;
1045 #ifdef TARGET_X86_64
1046 void helper_sysret(CPUX86State
*env
, int dflag
)
1050 if (!(env
->efer
& MSR_EFER_SCE
)) {
1051 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
1053 cpl
= env
->hflags
& HF_CPL_MASK
;
1054 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1055 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1057 selector
= (env
->star
>> 48) & 0xffff;
1058 if (env
->hflags
& HF_LMA_MASK
) {
1059 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1060 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1063 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1065 DESC_G_MASK
| DESC_P_MASK
|
1066 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1067 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1069 env
->eip
= env
->regs
[R_ECX
];
1071 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1073 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1074 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1075 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1076 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1078 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1080 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1081 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1082 DESC_W_MASK
| DESC_A_MASK
);
1084 env
->eflags
|= IF_MASK
;
1085 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1087 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1088 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1089 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1090 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1091 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1093 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1094 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1095 DESC_W_MASK
| DESC_A_MASK
);
1100 /* real mode interrupt */
1101 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1102 int error_code
, unsigned int next_eip
)
1105 target_ulong ptr
, ssp
;
1107 uint32_t offset
, esp
;
1108 uint32_t old_cs
, old_eip
;
1110 /* real mode (simpler!) */
1112 if (intno
* 4 + 3 > dt
->limit
) {
1113 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1115 ptr
= dt
->base
+ intno
* 4;
1116 offset
= cpu_lduw_kernel(env
, ptr
);
1117 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1118 esp
= env
->regs
[R_ESP
];
1119 ssp
= env
->segs
[R_SS
].base
;
1125 old_cs
= env
->segs
[R_CS
].selector
;
1126 /* XXX: use SS segment size? */
1127 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1128 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1129 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1131 /* update processor state */
1132 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1134 env
->segs
[R_CS
].selector
= selector
;
1135 env
->segs
[R_CS
].base
= (selector
<< 4);
1136 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1139 #if defined(CONFIG_USER_ONLY)
1140 /* fake user mode interrupt. is_int is TRUE if coming from the int
1141 * instruction. next_eip is the env->eip value AFTER the interrupt
1142 * instruction. It is only relevant if is_int is TRUE or if intno
1145 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1146 int error_code
, target_ulong next_eip
)
1151 int dpl
, cpl
, shift
;
1155 if (env
->hflags
& HF_LMA_MASK
) {
1160 ptr
= dt
->base
+ (intno
<< shift
);
1161 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1163 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1164 cpl
= env
->hflags
& HF_CPL_MASK
;
1165 /* check privilege if software int */
1167 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1171 /* Since we emulate only user space, we cannot do more than
1172 exiting the emulation with the suitable exception and error
1173 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1174 if (is_int
|| intno
== EXCP_SYSCALL
) {
1175 env
->eip
= next_eip
;
1181 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1182 int error_code
, int is_hw
, int rm
)
1184 CPUState
*cs
= env_cpu(env
);
1185 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1186 control
.event_inj
));
1188 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1192 type
= SVM_EVTINJ_TYPE_SOFT
;
1194 type
= SVM_EVTINJ_TYPE_EXEPT
;
1196 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1197 if (!rm
&& exception_has_error_code(intno
)) {
1198 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1199 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1200 control
.event_inj_err
),
1204 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1211 * Begin execution of an interruption. is_int is TRUE if coming from
1212 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1213 * instruction. It is only relevant if is_int is TRUE.
1215 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1216 int error_code
, target_ulong next_eip
, int is_hw
)
1218 CPUX86State
*env
= &cpu
->env
;
1220 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1221 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1224 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1225 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1226 count
, intno
, error_code
, is_int
,
1227 env
->hflags
& HF_CPL_MASK
,
1228 env
->segs
[R_CS
].selector
, env
->eip
,
1229 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1230 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1231 if (intno
== 0x0e) {
1232 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1234 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1237 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1244 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1245 for (i
= 0; i
< 16; i
++) {
1246 qemu_log(" %02x", ldub(ptr
+ i
));
1254 if (env
->cr
[0] & CR0_PE_MASK
) {
1255 #if !defined(CONFIG_USER_ONLY)
1256 if (env
->hflags
& HF_GUEST_MASK
) {
1257 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1260 #ifdef TARGET_X86_64
1261 if (env
->hflags
& HF_LMA_MASK
) {
1262 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1266 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1270 #if !defined(CONFIG_USER_ONLY)
1271 if (env
->hflags
& HF_GUEST_MASK
) {
1272 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1275 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1278 #if !defined(CONFIG_USER_ONLY)
1279 if (env
->hflags
& HF_GUEST_MASK
) {
1280 CPUState
*cs
= CPU(cpu
);
1281 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1282 offsetof(struct vmcb
,
1283 control
.event_inj
));
1286 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1287 event_inj
& ~SVM_EVTINJ_VALID
);
1292 void x86_cpu_do_interrupt(CPUState
*cs
)
1294 X86CPU
*cpu
= X86_CPU(cs
);
1295 CPUX86State
*env
= &cpu
->env
;
1297 #if defined(CONFIG_USER_ONLY)
1298 /* if user mode only, we simulate a fake exception
1299 which will be handled outside the cpu execution
1301 do_interrupt_user(env
, cs
->exception_index
,
1302 env
->exception_is_int
,
1304 env
->exception_next_eip
);
1305 /* successfully delivered */
1306 env
->old_exception
= -1;
1308 if (cs
->exception_index
>= EXCP_VMEXIT
) {
1309 assert(env
->old_exception
== -1);
1310 do_vmexit(env
, cs
->exception_index
- EXCP_VMEXIT
, env
->error_code
);
1312 do_interrupt_all(cpu
, cs
->exception_index
,
1313 env
->exception_is_int
,
1315 env
->exception_next_eip
, 0);
1316 /* successfully delivered */
1317 env
->old_exception
= -1;
1322 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1324 do_interrupt_all(env_archcpu(env
), intno
, 0, 0, 0, is_hw
);
1327 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1329 X86CPU
*cpu
= X86_CPU(cs
);
1330 CPUX86State
*env
= &cpu
->env
;
1333 interrupt_request
= x86_cpu_pending_interrupt(cs
, interrupt_request
);
1334 if (!interrupt_request
) {
1338 /* Don't process multiple interrupt requests in a single call.
1339 * This is required to make icount-driven execution deterministic.
1341 switch (interrupt_request
) {
1342 #if !defined(CONFIG_USER_ONLY)
1343 case CPU_INTERRUPT_POLL
:
1344 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1345 apic_poll_irq(cpu
->apic_state
);
1348 case CPU_INTERRUPT_SIPI
:
1351 case CPU_INTERRUPT_SMI
:
1352 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0, 0);
1353 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1356 case CPU_INTERRUPT_NMI
:
1357 cpu_svm_check_intercept_param(env
, SVM_EXIT_NMI
, 0, 0);
1358 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1359 env
->hflags2
|= HF2_NMI_MASK
;
1360 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1362 case CPU_INTERRUPT_MCE
:
1363 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1364 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1366 case CPU_INTERRUPT_HARD
:
1367 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0, 0);
1368 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1369 CPU_INTERRUPT_VIRQ
);
1370 intno
= cpu_get_pic_interrupt(env
);
1371 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1372 "Servicing hardware INT=0x%02x\n", intno
);
1373 do_interrupt_x86_hardirq(env
, intno
, 1);
1375 #if !defined(CONFIG_USER_ONLY)
1376 case CPU_INTERRUPT_VIRQ
:
1377 /* FIXME: this should respect TPR */
1378 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0, 0);
1379 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
1380 + offsetof(struct vmcb
, control
.int_vector
));
1381 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1382 "Servicing virtual hardware INT=0x%02x\n", intno
);
1383 do_interrupt_x86_hardirq(env
, intno
, 1);
1384 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1389 /* Ensure that no TB jump will be modified as the program flow was changed. */
1393 void helper_lldt(CPUX86State
*env
, int selector
)
1397 int index
, entry_limit
;
1401 if ((selector
& 0xfffc) == 0) {
1402 /* XXX: NULL selector case: invalid LDT */
1406 if (selector
& 0x4) {
1407 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1410 index
= selector
& ~7;
1411 #ifdef TARGET_X86_64
1412 if (env
->hflags
& HF_LMA_MASK
) {
1419 if ((index
+ entry_limit
) > dt
->limit
) {
1420 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1422 ptr
= dt
->base
+ index
;
1423 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1424 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1425 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1426 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1428 if (!(e2
& DESC_P_MASK
)) {
1429 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1431 #ifdef TARGET_X86_64
1432 if (env
->hflags
& HF_LMA_MASK
) {
1435 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1436 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1437 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1441 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1444 env
->ldt
.selector
= selector
;
1447 void helper_ltr(CPUX86State
*env
, int selector
)
1451 int index
, type
, entry_limit
;
1455 if ((selector
& 0xfffc) == 0) {
1456 /* NULL selector case: invalid TR */
1461 if (selector
& 0x4) {
1462 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1465 index
= selector
& ~7;
1466 #ifdef TARGET_X86_64
1467 if (env
->hflags
& HF_LMA_MASK
) {
1474 if ((index
+ entry_limit
) > dt
->limit
) {
1475 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1477 ptr
= dt
->base
+ index
;
1478 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1479 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1480 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1481 if ((e2
& DESC_S_MASK
) ||
1482 (type
!= 1 && type
!= 9)) {
1483 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1485 if (!(e2
& DESC_P_MASK
)) {
1486 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1488 #ifdef TARGET_X86_64
1489 if (env
->hflags
& HF_LMA_MASK
) {
1492 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1493 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1494 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1495 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1497 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1498 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1502 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1504 e2
|= DESC_TSS_BUSY_MASK
;
1505 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1507 env
->tr
.selector
= selector
;
1510 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1511 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1520 cpl
= env
->hflags
& HF_CPL_MASK
;
1521 if ((selector
& 0xfffc) == 0) {
1522 /* null selector case */
1524 #ifdef TARGET_X86_64
1525 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1528 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1530 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1533 if (selector
& 0x4) {
1538 index
= selector
& ~7;
1539 if ((index
+ 7) > dt
->limit
) {
1540 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1542 ptr
= dt
->base
+ index
;
1543 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1544 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1546 if (!(e2
& DESC_S_MASK
)) {
1547 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1550 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1551 if (seg_reg
== R_SS
) {
1552 /* must be writable segment */
1553 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1554 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1556 if (rpl
!= cpl
|| dpl
!= cpl
) {
1557 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1560 /* must be readable segment */
1561 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1562 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1565 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1566 /* if not conforming code, test rights */
1567 if (dpl
< cpl
|| dpl
< rpl
) {
1568 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1573 if (!(e2
& DESC_P_MASK
)) {
1574 if (seg_reg
== R_SS
) {
1575 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1577 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1581 /* set the access bit if not already set */
1582 if (!(e2
& DESC_A_MASK
)) {
1584 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1587 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1588 get_seg_base(e1
, e2
),
1589 get_seg_limit(e1
, e2
),
1592 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1593 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1598 /* protected mode jump */
1599 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1600 target_ulong next_eip
)
1603 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1605 if ((new_cs
& 0xfffc) == 0) {
1606 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1608 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1609 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1611 cpl
= env
->hflags
& HF_CPL_MASK
;
1612 if (e2
& DESC_S_MASK
) {
1613 if (!(e2
& DESC_CS_MASK
)) {
1614 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1616 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1617 if (e2
& DESC_C_MASK
) {
1618 /* conforming code segment */
1620 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1623 /* non conforming code segment */
1626 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1629 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1632 if (!(e2
& DESC_P_MASK
)) {
1633 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1635 limit
= get_seg_limit(e1
, e2
);
1636 if (new_eip
> limit
&&
1637 (!(env
->hflags
& HF_LMA_MASK
) || !(e2
& DESC_L_MASK
))) {
1638 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1640 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1641 get_seg_base(e1
, e2
), limit
, e2
);
1644 /* jump to call or task gate */
1645 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1647 cpl
= env
->hflags
& HF_CPL_MASK
;
1648 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1650 #ifdef TARGET_X86_64
1651 if (env
->efer
& MSR_EFER_LMA
) {
1653 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1658 case 1: /* 286 TSS */
1659 case 9: /* 386 TSS */
1660 case 5: /* task gate */
1661 if (dpl
< cpl
|| dpl
< rpl
) {
1662 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1664 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1666 case 4: /* 286 call gate */
1667 case 12: /* 386 call gate */
1668 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1669 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1671 if (!(e2
& DESC_P_MASK
)) {
1672 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1675 new_eip
= (e1
& 0xffff);
1677 new_eip
|= (e2
& 0xffff0000);
1680 #ifdef TARGET_X86_64
1681 if (env
->efer
& MSR_EFER_LMA
) {
1682 /* load the upper 8 bytes of the 64-bit call gate */
1683 if (load_segment_ra(env
, &e1
, &e2
, new_cs
+ 8, GETPC())) {
1684 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1687 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1689 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1692 new_eip
|= ((target_ulong
)e1
) << 32;
1696 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1697 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1699 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1700 /* must be code segment */
1701 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1702 (DESC_S_MASK
| DESC_CS_MASK
))) {
1703 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1705 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1706 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1707 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1709 #ifdef TARGET_X86_64
1710 if (env
->efer
& MSR_EFER_LMA
) {
1711 if (!(e2
& DESC_L_MASK
)) {
1712 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1714 if (e2
& DESC_B_MASK
) {
1715 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1719 if (!(e2
& DESC_P_MASK
)) {
1720 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1722 limit
= get_seg_limit(e1
, e2
);
1723 if (new_eip
> limit
&&
1724 (!(env
->hflags
& HF_LMA_MASK
) || !(e2
& DESC_L_MASK
))) {
1725 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1727 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1728 get_seg_base(e1
, e2
), limit
, e2
);
1732 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1738 /* real mode call */
1739 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1740 int shift
, int next_eip
)
1743 uint32_t esp
, esp_mask
;
1747 esp
= env
->regs
[R_ESP
];
1748 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1749 ssp
= env
->segs
[R_SS
].base
;
1751 PUSHL_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1752 PUSHL_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1754 PUSHW_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1755 PUSHW_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1758 SET_ESP(esp
, esp_mask
);
1760 env
->segs
[R_CS
].selector
= new_cs
;
1761 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1764 /* protected mode call */
1765 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1766 int shift
, target_ulong next_eip
)
1769 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, param_count
;
1770 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, type
, ss_dpl
, sp_mask
;
1771 uint32_t val
, limit
, old_sp_mask
;
1772 target_ulong ssp
, old_ssp
, offset
, sp
;
1774 LOG_PCALL("lcall %04x:" TARGET_FMT_lx
" s=%d\n", new_cs
, new_eip
, shift
);
1775 LOG_PCALL_STATE(env_cpu(env
));
1776 if ((new_cs
& 0xfffc) == 0) {
1777 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1779 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1780 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1782 cpl
= env
->hflags
& HF_CPL_MASK
;
1783 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1784 if (e2
& DESC_S_MASK
) {
1785 if (!(e2
& DESC_CS_MASK
)) {
1786 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1788 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1789 if (e2
& DESC_C_MASK
) {
1790 /* conforming code segment */
1792 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1795 /* non conforming code segment */
1798 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1801 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1804 if (!(e2
& DESC_P_MASK
)) {
1805 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1808 #ifdef TARGET_X86_64
1809 /* XXX: check 16/32 bit cases in long mode */
1814 rsp
= env
->regs
[R_ESP
];
1815 PUSHQ_RA(rsp
, env
->segs
[R_CS
].selector
, GETPC());
1816 PUSHQ_RA(rsp
, next_eip
, GETPC());
1817 /* from this point, not restartable */
1818 env
->regs
[R_ESP
] = rsp
;
1819 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1820 get_seg_base(e1
, e2
),
1821 get_seg_limit(e1
, e2
), e2
);
1826 sp
= env
->regs
[R_ESP
];
1827 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1828 ssp
= env
->segs
[R_SS
].base
;
1830 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1831 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1833 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1834 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1837 limit
= get_seg_limit(e1
, e2
);
1838 if (new_eip
> limit
) {
1839 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1841 /* from this point, not restartable */
1842 SET_ESP(sp
, sp_mask
);
1843 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1844 get_seg_base(e1
, e2
), limit
, e2
);
1848 /* check gate type */
1849 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1850 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1853 #ifdef TARGET_X86_64
1854 if (env
->efer
& MSR_EFER_LMA
) {
1856 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1862 case 1: /* available 286 TSS */
1863 case 9: /* available 386 TSS */
1864 case 5: /* task gate */
1865 if (dpl
< cpl
|| dpl
< rpl
) {
1866 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1868 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1870 case 4: /* 286 call gate */
1871 case 12: /* 386 call gate */
1874 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1879 if (dpl
< cpl
|| dpl
< rpl
) {
1880 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1882 /* check valid bit */
1883 if (!(e2
& DESC_P_MASK
)) {
1884 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1886 selector
= e1
>> 16;
1887 param_count
= e2
& 0x1f;
1888 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1889 #ifdef TARGET_X86_64
1890 if (env
->efer
& MSR_EFER_LMA
) {
1891 /* load the upper 8 bytes of the 64-bit call gate */
1892 if (load_segment_ra(env
, &e1
, &e2
, new_cs
+ 8, GETPC())) {
1893 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1896 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1898 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1901 offset
|= ((target_ulong
)e1
) << 32;
1904 if ((selector
& 0xfffc) == 0) {
1905 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1908 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1909 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1911 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1912 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1914 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1916 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1918 #ifdef TARGET_X86_64
1919 if (env
->efer
& MSR_EFER_LMA
) {
1920 if (!(e2
& DESC_L_MASK
)) {
1921 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1923 if (e2
& DESC_B_MASK
) {
1924 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1929 if (!(e2
& DESC_P_MASK
)) {
1930 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1933 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1934 /* to inner privilege */
1935 #ifdef TARGET_X86_64
1937 sp
= get_rsp_from_tss(env
, dpl
);
1938 ss
= dpl
; /* SS = NULL selector with RPL = new CPL */
1941 ssp
= 0; /* SS base is always zero in IA-32e mode */
1942 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1943 TARGET_FMT_lx
"\n", ss
, sp
, env
->regs
[R_ESP
]);
1948 get_ss_esp_from_tss(env
, &ss
, &sp32
, dpl
, GETPC());
1949 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1950 TARGET_FMT_lx
"\n", ss
, sp32
, param_count
,
1953 if ((ss
& 0xfffc) == 0) {
1954 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1956 if ((ss
& 3) != dpl
) {
1957 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1959 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1960 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1962 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1963 if (ss_dpl
!= dpl
) {
1964 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1966 if (!(ss_e2
& DESC_S_MASK
) ||
1967 (ss_e2
& DESC_CS_MASK
) ||
1968 !(ss_e2
& DESC_W_MASK
)) {
1969 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1971 if (!(ss_e2
& DESC_P_MASK
)) {
1972 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1975 sp_mask
= get_sp_mask(ss_e2
);
1976 ssp
= get_seg_base(ss_e1
, ss_e2
);
1979 /* push_size = ((param_count * 2) + 8) << shift; */
1981 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1982 old_ssp
= env
->segs
[R_SS
].base
;
1983 #ifdef TARGET_X86_64
1985 /* XXX: verify if new stack address is canonical */
1986 PUSHQ_RA(sp
, env
->segs
[R_SS
].selector
, GETPC());
1987 PUSHQ_RA(sp
, env
->regs
[R_ESP
], GETPC());
1988 /* parameters aren't supported for 64-bit call gates */
1992 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1993 PUSHL_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1994 for (i
= param_count
- 1; i
>= 0; i
--) {
1995 val
= cpu_ldl_kernel_ra(env
, old_ssp
+
1996 ((env
->regs
[R_ESP
] + i
* 4) &
1997 old_sp_mask
), GETPC());
1998 PUSHL_RA(ssp
, sp
, sp_mask
, val
, GETPC());
2001 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
2002 PUSHW_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
2003 for (i
= param_count
- 1; i
>= 0; i
--) {
2004 val
= cpu_lduw_kernel_ra(env
, old_ssp
+
2005 ((env
->regs
[R_ESP
] + i
* 2) &
2006 old_sp_mask
), GETPC());
2007 PUSHW_RA(ssp
, sp
, sp_mask
, val
, GETPC());
2012 /* to same privilege */
2013 sp
= env
->regs
[R_ESP
];
2014 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2015 ssp
= env
->segs
[R_SS
].base
;
2016 /* push_size = (4 << shift); */
2020 #ifdef TARGET_X86_64
2022 PUSHQ_RA(sp
, env
->segs
[R_CS
].selector
, GETPC());
2023 PUSHQ_RA(sp
, next_eip
, GETPC());
2027 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
2028 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
2030 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
2031 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
2034 /* from this point, not restartable */
2037 #ifdef TARGET_X86_64
2039 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
2043 ss
= (ss
& ~3) | dpl
;
2044 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2046 get_seg_limit(ss_e1
, ss_e2
),
2051 selector
= (selector
& ~3) | dpl
;
2052 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2053 get_seg_base(e1
, e2
),
2054 get_seg_limit(e1
, e2
),
2056 SET_ESP(sp
, sp_mask
);
2061 /* real and vm86 mode iret */
2062 void helper_iret_real(CPUX86State
*env
, int shift
)
2064 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2068 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
2069 sp
= env
->regs
[R_ESP
];
2070 ssp
= env
->segs
[R_SS
].base
;
2073 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
2074 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
2076 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
2079 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
2080 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
2081 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
2083 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
2084 env
->segs
[R_CS
].selector
= new_cs
;
2085 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2087 if (env
->eflags
& VM_MASK
) {
2088 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
2091 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
2095 eflags_mask
&= 0xffff;
2097 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2098 env
->hflags2
&= ~HF2_NMI_MASK
;
2101 static inline void validate_seg(CPUX86State
*env
, X86Seg seg_reg
, int cpl
)
2106 /* XXX: on x86_64, we do not want to nullify FS and GS because
2107 they may still contain a valid base. I would be interested to
2108 know how a real x86_64 CPU behaves */
2109 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2110 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2114 e2
= env
->segs
[seg_reg
].flags
;
2115 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2116 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2117 /* data or non conforming code segment */
2119 cpu_x86_load_seg_cache(env
, seg_reg
, 0,
2120 env
->segs
[seg_reg
].base
,
2121 env
->segs
[seg_reg
].limit
,
2122 env
->segs
[seg_reg
].flags
& ~DESC_P_MASK
);
2127 /* protected mode iret */
2128 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2129 int is_iret
, int addend
,
2132 uint32_t new_cs
, new_eflags
, new_ss
;
2133 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2134 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2135 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2136 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2138 #ifdef TARGET_X86_64
2144 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2146 sp
= env
->regs
[R_ESP
];
2147 ssp
= env
->segs
[R_SS
].base
;
2148 new_eflags
= 0; /* avoid warning */
2149 #ifdef TARGET_X86_64
2151 POPQ_RA(sp
, new_eip
, retaddr
);
2152 POPQ_RA(sp
, new_cs
, retaddr
);
2155 POPQ_RA(sp
, new_eflags
, retaddr
);
2162 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2163 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2166 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2167 if (new_eflags
& VM_MASK
) {
2168 goto return_to_vm86
;
2173 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2174 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2176 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2180 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2181 new_cs
, new_eip
, shift
, addend
);
2182 LOG_PCALL_STATE(env_cpu(env
));
2183 if ((new_cs
& 0xfffc) == 0) {
2184 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2186 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
2187 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2189 if (!(e2
& DESC_S_MASK
) ||
2190 !(e2
& DESC_CS_MASK
)) {
2191 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2193 cpl
= env
->hflags
& HF_CPL_MASK
;
2196 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2198 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2199 if (e2
& DESC_C_MASK
) {
2201 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2205 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2208 if (!(e2
& DESC_P_MASK
)) {
2209 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2213 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2214 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2215 /* return to same privilege level */
2216 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2217 get_seg_base(e1
, e2
),
2218 get_seg_limit(e1
, e2
),
2221 /* return to different privilege level */
2222 #ifdef TARGET_X86_64
2224 POPQ_RA(sp
, new_esp
, retaddr
);
2225 POPQ_RA(sp
, new_ss
, retaddr
);
2232 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2233 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2237 POPW_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2238 POPW_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2241 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2243 if ((new_ss
& 0xfffc) == 0) {
2244 #ifdef TARGET_X86_64
2245 /* NULL ss is allowed in long mode if cpl != 3 */
2246 /* XXX: test CS64? */
2247 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2248 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2250 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2251 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2252 DESC_W_MASK
| DESC_A_MASK
);
2253 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2257 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2260 if ((new_ss
& 3) != rpl
) {
2261 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2263 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2264 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2266 if (!(ss_e2
& DESC_S_MASK
) ||
2267 (ss_e2
& DESC_CS_MASK
) ||
2268 !(ss_e2
& DESC_W_MASK
)) {
2269 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2271 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2273 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2275 if (!(ss_e2
& DESC_P_MASK
)) {
2276 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2278 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2279 get_seg_base(ss_e1
, ss_e2
),
2280 get_seg_limit(ss_e1
, ss_e2
),
2284 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2285 get_seg_base(e1
, e2
),
2286 get_seg_limit(e1
, e2
),
2289 #ifdef TARGET_X86_64
2290 if (env
->hflags
& HF_CS64_MASK
) {
2295 sp_mask
= get_sp_mask(ss_e2
);
2298 /* validate data segments */
2299 validate_seg(env
, R_ES
, rpl
);
2300 validate_seg(env
, R_DS
, rpl
);
2301 validate_seg(env
, R_FS
, rpl
);
2302 validate_seg(env
, R_GS
, rpl
);
2306 SET_ESP(sp
, sp_mask
);
2309 /* NOTE: 'cpl' is the _old_ CPL */
2310 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2312 eflags_mask
|= IOPL_MASK
;
2314 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2316 eflags_mask
|= IF_MASK
;
2319 eflags_mask
&= 0xffff;
2321 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2326 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2327 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2328 POPL_RA(ssp
, sp
, sp_mask
, new_es
, retaddr
);
2329 POPL_RA(ssp
, sp
, sp_mask
, new_ds
, retaddr
);
2330 POPL_RA(ssp
, sp
, sp_mask
, new_fs
, retaddr
);
2331 POPL_RA(ssp
, sp
, sp_mask
, new_gs
, retaddr
);
2333 /* modify processor state */
2334 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2335 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2337 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2338 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2339 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2340 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2341 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2342 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2344 env
->eip
= new_eip
& 0xffff;
2345 env
->regs
[R_ESP
] = new_esp
;
2348 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2350 int tss_selector
, type
;
2353 /* specific case for TSS */
2354 if (env
->eflags
& NT_MASK
) {
2355 #ifdef TARGET_X86_64
2356 if (env
->hflags
& HF_LMA_MASK
) {
2357 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2360 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2361 if (tss_selector
& 4) {
2362 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2364 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2365 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2367 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2368 /* NOTE: we check both segment and busy TSS */
2370 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2372 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2374 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2376 env
->hflags2
&= ~HF2_NMI_MASK
;
2379 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2381 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2384 void helper_sysenter(CPUX86State
*env
)
2386 if (env
->sysenter_cs
== 0) {
2387 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2389 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2391 #ifdef TARGET_X86_64
2392 if (env
->hflags
& HF_LMA_MASK
) {
2393 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2395 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2397 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2402 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2404 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2406 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2408 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2410 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2412 DESC_W_MASK
| DESC_A_MASK
);
2413 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2414 env
->eip
= env
->sysenter_eip
;
2417 void helper_sysexit(CPUX86State
*env
, int dflag
)
2421 cpl
= env
->hflags
& HF_CPL_MASK
;
2422 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2423 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2425 #ifdef TARGET_X86_64
2427 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2429 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2430 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2431 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2433 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2435 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2436 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2437 DESC_W_MASK
| DESC_A_MASK
);
2441 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2443 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2444 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2445 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2446 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2448 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2449 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2450 DESC_W_MASK
| DESC_A_MASK
);
2452 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2453 env
->eip
= env
->regs
[R_EDX
];
2456 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2459 uint32_t e1
, e2
, eflags
, selector
;
2460 int rpl
, dpl
, cpl
, type
;
2462 selector
= selector1
& 0xffff;
2463 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2464 if ((selector
& 0xfffc) == 0) {
2467 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2471 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2472 cpl
= env
->hflags
& HF_CPL_MASK
;
2473 if (e2
& DESC_S_MASK
) {
2474 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2477 if (dpl
< cpl
|| dpl
< rpl
) {
2482 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2493 if (dpl
< cpl
|| dpl
< rpl
) {
2495 CC_SRC
= eflags
& ~CC_Z
;
2499 limit
= get_seg_limit(e1
, e2
);
2500 CC_SRC
= eflags
| CC_Z
;
2504 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2506 uint32_t e1
, e2
, eflags
, selector
;
2507 int rpl
, dpl
, cpl
, type
;
2509 selector
= selector1
& 0xffff;
2510 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2511 if ((selector
& 0xfffc) == 0) {
2514 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2518 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2519 cpl
= env
->hflags
& HF_CPL_MASK
;
2520 if (e2
& DESC_S_MASK
) {
2521 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2524 if (dpl
< cpl
|| dpl
< rpl
) {
2529 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2543 if (dpl
< cpl
|| dpl
< rpl
) {
2545 CC_SRC
= eflags
& ~CC_Z
;
2549 CC_SRC
= eflags
| CC_Z
;
2550 return e2
& 0x00f0ff00;
2553 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2555 uint32_t e1
, e2
, eflags
, selector
;
2558 selector
= selector1
& 0xffff;
2559 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2560 if ((selector
& 0xfffc) == 0) {
2563 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2566 if (!(e2
& DESC_S_MASK
)) {
2570 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2571 cpl
= env
->hflags
& HF_CPL_MASK
;
2572 if (e2
& DESC_CS_MASK
) {
2573 if (!(e2
& DESC_R_MASK
)) {
2576 if (!(e2
& DESC_C_MASK
)) {
2577 if (dpl
< cpl
|| dpl
< rpl
) {
2582 if (dpl
< cpl
|| dpl
< rpl
) {
2584 CC_SRC
= eflags
& ~CC_Z
;
2588 CC_SRC
= eflags
| CC_Z
;
2591 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2593 uint32_t e1
, e2
, eflags
, selector
;
2596 selector
= selector1
& 0xffff;
2597 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2598 if ((selector
& 0xfffc) == 0) {
2601 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2604 if (!(e2
& DESC_S_MASK
)) {
2608 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2609 cpl
= env
->hflags
& HF_CPL_MASK
;
2610 if (e2
& DESC_CS_MASK
) {
2613 if (dpl
< cpl
|| dpl
< rpl
) {
2616 if (!(e2
& DESC_W_MASK
)) {
2618 CC_SRC
= eflags
& ~CC_Z
;
2622 CC_SRC
= eflags
| CC_Z
;
2625 #if defined(CONFIG_USER_ONLY)
2626 void cpu_x86_load_seg(CPUX86State
*env
, X86Seg seg_reg
, int selector
)
2628 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2629 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2631 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2632 (selector
<< 4), 0xffff,
2633 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2634 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2636 helper_load_seg(env
, seg_reg
, selector
);
2641 /* check if Port I/O is allowed in TSS */
2642 static inline void check_io(CPUX86State
*env
, int addr
, int size
,
2645 int io_offset
, val
, mask
;
2647 /* TSS must be a valid 32 bit one */
2648 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2649 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2650 env
->tr
.limit
< 103) {
2653 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
2654 io_offset
+= (addr
>> 3);
2655 /* Note: the check needs two bytes */
2656 if ((io_offset
+ 1) > env
->tr
.limit
) {
2659 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
2661 mask
= (1 << size
) - 1;
2662 /* all bits must be zero to allow the I/O */
2663 if ((val
& mask
) != 0) {
2665 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2669 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2671 check_io(env
, t0
, 1, GETPC());
2674 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2676 check_io(env
, t0
, 2, GETPC());
2679 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2681 check_io(env
, t0
, 4, GETPC());