nvdimm acpi: let qemu handle _DSM method
[qemu.git] / target-i386 / seg_helper.c
blobb5f3d72fe352fa08c0eaffc2bb8a86de8c752566
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/log.h"
28 //#define DEBUG_PCALL
30 #ifdef DEBUG_PCALL
31 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
32 # define LOG_PCALL_STATE(cpu) \
33 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
34 #else
35 # define LOG_PCALL(...) do { } while (0)
36 # define LOG_PCALL_STATE(cpu) do { } while (0)
37 #endif
39 #ifdef CONFIG_USER_ONLY
40 #define MEMSUFFIX _kernel
41 #define DATA_SIZE 1
42 #include "exec/cpu_ldst_useronly_template.h"
44 #define DATA_SIZE 2
45 #include "exec/cpu_ldst_useronly_template.h"
47 #define DATA_SIZE 4
48 #include "exec/cpu_ldst_useronly_template.h"
50 #define DATA_SIZE 8
51 #include "exec/cpu_ldst_useronly_template.h"
52 #undef MEMSUFFIX
53 #else
54 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
55 #define MEMSUFFIX _kernel
56 #define DATA_SIZE 1
57 #include "exec/cpu_ldst_template.h"
59 #define DATA_SIZE 2
60 #include "exec/cpu_ldst_template.h"
62 #define DATA_SIZE 4
63 #include "exec/cpu_ldst_template.h"
65 #define DATA_SIZE 8
66 #include "exec/cpu_ldst_template.h"
67 #undef CPU_MMU_INDEX
68 #undef MEMSUFFIX
69 #endif
71 /* return non zero if error */
72 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
73 uint32_t *e2_ptr, int selector,
74 uintptr_t retaddr)
76 SegmentCache *dt;
77 int index;
78 target_ulong ptr;
80 if (selector & 0x4) {
81 dt = &env->ldt;
82 } else {
83 dt = &env->gdt;
85 index = selector & ~7;
86 if ((index + 7) > dt->limit) {
87 return -1;
89 ptr = dt->base + index;
90 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
91 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
92 return 0;
95 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
96 uint32_t *e2_ptr, int selector)
98 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
101 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
103 unsigned int limit;
105 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
106 if (e2 & DESC_G_MASK) {
107 limit = (limit << 12) | 0xfff;
109 return limit;
112 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
114 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
117 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
118 uint32_t e2)
120 sc->base = get_seg_base(e1, e2);
121 sc->limit = get_seg_limit(e1, e2);
122 sc->flags = e2;
125 /* init the segment cache in vm86 mode. */
126 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
128 selector &= 0xffff;
130 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
131 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
132 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
135 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
136 uint32_t *esp_ptr, int dpl,
137 uintptr_t retaddr)
139 X86CPU *cpu = x86_env_get_cpu(env);
140 int type, index, shift;
142 #if 0
144 int i;
145 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
146 for (i = 0; i < env->tr.limit; i++) {
147 printf("%02x ", env->tr.base[i]);
148 if ((i & 7) == 7) {
149 printf("\n");
152 printf("\n");
154 #endif
156 if (!(env->tr.flags & DESC_P_MASK)) {
157 cpu_abort(CPU(cpu), "invalid tss");
159 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
160 if ((type & 7) != 1) {
161 cpu_abort(CPU(cpu), "invalid tss type");
163 shift = type >> 3;
164 index = (dpl * 4 + 2) << shift;
165 if (index + (4 << shift) - 1 > env->tr.limit) {
166 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
168 if (shift == 0) {
169 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
170 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
171 } else {
172 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
173 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
177 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
178 uintptr_t retaddr)
180 uint32_t e1, e2;
181 int rpl, dpl;
183 if ((selector & 0xfffc) != 0) {
184 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
185 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
187 if (!(e2 & DESC_S_MASK)) {
188 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
190 rpl = selector & 3;
191 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
192 if (seg_reg == R_CS) {
193 if (!(e2 & DESC_CS_MASK)) {
194 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
196 if (dpl != rpl) {
197 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
199 } else if (seg_reg == R_SS) {
200 /* SS must be writable data */
201 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
202 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
204 if (dpl != cpl || dpl != rpl) {
205 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
207 } else {
208 /* not readable code */
209 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
210 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
212 /* if data or non conforming code, checks the rights */
213 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
214 if (dpl < cpl || dpl < rpl) {
215 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
219 if (!(e2 & DESC_P_MASK)) {
220 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
222 cpu_x86_load_seg_cache(env, seg_reg, selector,
223 get_seg_base(e1, e2),
224 get_seg_limit(e1, e2),
225 e2);
226 } else {
227 if (seg_reg == R_SS || seg_reg == R_CS) {
228 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
233 #define SWITCH_TSS_JMP 0
234 #define SWITCH_TSS_IRET 1
235 #define SWITCH_TSS_CALL 2
237 /* XXX: restore CPU state in registers (PowerPC case) */
238 static void switch_tss_ra(CPUX86State *env, int tss_selector,
239 uint32_t e1, uint32_t e2, int source,
240 uint32_t next_eip, uintptr_t retaddr)
242 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
243 target_ulong tss_base;
244 uint32_t new_regs[8], new_segs[6];
245 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
246 uint32_t old_eflags, eflags_mask;
247 SegmentCache *dt;
248 int index;
249 target_ulong ptr;
251 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
252 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
253 source);
255 /* if task gate, we read the TSS segment and we load it */
256 if (type == 5) {
257 if (!(e2 & DESC_P_MASK)) {
258 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
260 tss_selector = e1 >> 16;
261 if (tss_selector & 4) {
262 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
264 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
265 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
267 if (e2 & DESC_S_MASK) {
268 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
270 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
271 if ((type & 7) != 1) {
272 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
276 if (!(e2 & DESC_P_MASK)) {
277 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
280 if (type & 8) {
281 tss_limit_max = 103;
282 } else {
283 tss_limit_max = 43;
285 tss_limit = get_seg_limit(e1, e2);
286 tss_base = get_seg_base(e1, e2);
287 if ((tss_selector & 4) != 0 ||
288 tss_limit < tss_limit_max) {
289 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
291 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
292 if (old_type & 8) {
293 old_tss_limit_max = 103;
294 } else {
295 old_tss_limit_max = 43;
298 /* read all the registers from the new TSS */
299 if (type & 8) {
300 /* 32 bit */
301 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
302 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
303 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
304 for (i = 0; i < 8; i++) {
305 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
306 retaddr);
308 for (i = 0; i < 6; i++) {
309 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
310 retaddr);
312 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
313 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
314 } else {
315 /* 16 bit */
316 new_cr3 = 0;
317 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
318 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
319 for (i = 0; i < 8; i++) {
320 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
321 retaddr) | 0xffff0000;
323 for (i = 0; i < 4; i++) {
324 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
325 retaddr);
327 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
328 new_segs[R_FS] = 0;
329 new_segs[R_GS] = 0;
330 new_trap = 0;
332 /* XXX: avoid a compiler warning, see
333 http://support.amd.com/us/Processor_TechDocs/24593.pdf
334 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
335 (void)new_trap;
337 /* NOTE: we must avoid memory exceptions during the task switch,
338 so we make dummy accesses before */
339 /* XXX: it can still fail in some cases, so a bigger hack is
340 necessary to valid the TLB after having done the accesses */
342 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
343 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
344 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
347 /* clear busy bit (it is restartable) */
348 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
349 target_ulong ptr;
350 uint32_t e2;
352 ptr = env->gdt.base + (env->tr.selector & ~7);
353 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
354 e2 &= ~DESC_TSS_BUSY_MASK;
355 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
357 old_eflags = cpu_compute_eflags(env);
358 if (source == SWITCH_TSS_IRET) {
359 old_eflags &= ~NT_MASK;
362 /* save the current state in the old TSS */
363 if (type & 8) {
364 /* 32 bit */
365 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
375 for (i = 0; i < 6; i++) {
376 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
377 env->segs[i].selector, retaddr);
379 } else {
380 /* 16 bit */
381 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
391 for (i = 0; i < 4; i++) {
392 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
393 env->segs[i].selector, retaddr);
397 /* now if an exception occurs, it will occurs in the next task
398 context */
400 if (source == SWITCH_TSS_CALL) {
401 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
402 new_eflags |= NT_MASK;
405 /* set busy bit */
406 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
407 target_ulong ptr;
408 uint32_t e2;
410 ptr = env->gdt.base + (tss_selector & ~7);
411 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
412 e2 |= DESC_TSS_BUSY_MASK;
413 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
416 /* set the new CPU state */
417 /* from this point, any exception which occurs can give problems */
418 env->cr[0] |= CR0_TS_MASK;
419 env->hflags |= HF_TS_MASK;
420 env->tr.selector = tss_selector;
421 env->tr.base = tss_base;
422 env->tr.limit = tss_limit;
423 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
426 cpu_x86_update_cr3(env, new_cr3);
429 /* load all registers without an exception, then reload them with
430 possible exception */
431 env->eip = new_eip;
432 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
433 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
434 if (!(type & 8)) {
435 eflags_mask &= 0xffff;
437 cpu_load_eflags(env, new_eflags, eflags_mask);
438 /* XXX: what to do in 16 bit case? */
439 env->regs[R_EAX] = new_regs[0];
440 env->regs[R_ECX] = new_regs[1];
441 env->regs[R_EDX] = new_regs[2];
442 env->regs[R_EBX] = new_regs[3];
443 env->regs[R_ESP] = new_regs[4];
444 env->regs[R_EBP] = new_regs[5];
445 env->regs[R_ESI] = new_regs[6];
446 env->regs[R_EDI] = new_regs[7];
447 if (new_eflags & VM_MASK) {
448 for (i = 0; i < 6; i++) {
449 load_seg_vm(env, i, new_segs[i]);
451 } else {
452 /* first just selectors as the rest may trigger exceptions */
453 for (i = 0; i < 6; i++) {
454 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458 env->ldt.selector = new_ldt & ~4;
459 env->ldt.base = 0;
460 env->ldt.limit = 0;
461 env->ldt.flags = 0;
463 /* load the LDT */
464 if (new_ldt & 4) {
465 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
468 if ((new_ldt & 0xfffc) != 0) {
469 dt = &env->gdt;
470 index = new_ldt & ~7;
471 if ((index + 7) > dt->limit) {
472 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
474 ptr = dt->base + index;
475 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
476 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
478 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
480 if (!(e2 & DESC_P_MASK)) {
481 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
483 load_seg_cache_raw_dt(&env->ldt, e1, e2);
486 /* load the segments */
487 if (!(new_eflags & VM_MASK)) {
488 int cpl = new_segs[R_CS] & 3;
489 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
490 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
491 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
492 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
493 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
494 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
497 /* check that env->eip is in the CS segment limits */
498 if (new_eip > env->segs[R_CS].limit) {
499 /* XXX: different exception if CALL? */
500 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
503 #ifndef CONFIG_USER_ONLY
504 /* reset local breakpoints */
505 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
506 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
508 #endif
511 static void switch_tss(CPUX86State *env, int tss_selector,
512 uint32_t e1, uint32_t e2, int source,
513 uint32_t next_eip)
515 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
518 static inline unsigned int get_sp_mask(unsigned int e2)
520 if (e2 & DESC_B_MASK) {
521 return 0xffffffff;
522 } else {
523 return 0xffff;
527 static int exception_has_error_code(int intno)
529 switch (intno) {
530 case 8:
531 case 10:
532 case 11:
533 case 12:
534 case 13:
535 case 14:
536 case 17:
537 return 1;
539 return 0;
542 #ifdef TARGET_X86_64
543 #define SET_ESP(val, sp_mask) \
544 do { \
545 if ((sp_mask) == 0xffff) { \
546 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
547 ((val) & 0xffff); \
548 } else if ((sp_mask) == 0xffffffffLL) { \
549 env->regs[R_ESP] = (uint32_t)(val); \
550 } else { \
551 env->regs[R_ESP] = (val); \
553 } while (0)
554 #else
555 #define SET_ESP(val, sp_mask) \
556 do { \
557 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
558 ((val) & (sp_mask)); \
559 } while (0)
560 #endif
562 /* in 64-bit machines, this can overflow. So this segment addition macro
563 * can be used to trim the value to 32-bit whenever needed */
564 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
566 /* XXX: add a is_user flag to have proper security support */
567 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
569 sp -= 2; \
570 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
573 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
575 sp -= 4; \
576 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
579 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
581 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
582 sp += 2; \
585 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
587 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
588 sp += 4; \
591 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
592 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
593 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
594 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
596 /* protected mode interrupt */
597 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
598 int error_code, unsigned int next_eip,
599 int is_hw)
601 SegmentCache *dt;
602 target_ulong ptr, ssp;
603 int type, dpl, selector, ss_dpl, cpl;
604 int has_error_code, new_stack, shift;
605 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
606 uint32_t old_eip, sp_mask;
607 int vm86 = env->eflags & VM_MASK;
609 has_error_code = 0;
610 if (!is_int && !is_hw) {
611 has_error_code = exception_has_error_code(intno);
613 if (is_int) {
614 old_eip = next_eip;
615 } else {
616 old_eip = env->eip;
619 dt = &env->idt;
620 if (intno * 8 + 7 > dt->limit) {
621 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
623 ptr = dt->base + intno * 8;
624 e1 = cpu_ldl_kernel(env, ptr);
625 e2 = cpu_ldl_kernel(env, ptr + 4);
626 /* check gate type */
627 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
628 switch (type) {
629 case 5: /* task gate */
630 /* must do that check here to return the correct error code */
631 if (!(e2 & DESC_P_MASK)) {
632 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
634 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
635 if (has_error_code) {
636 int type;
637 uint32_t mask;
639 /* push the error code */
640 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
641 shift = type >> 3;
642 if (env->segs[R_SS].flags & DESC_B_MASK) {
643 mask = 0xffffffff;
644 } else {
645 mask = 0xffff;
647 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
648 ssp = env->segs[R_SS].base + esp;
649 if (shift) {
650 cpu_stl_kernel(env, ssp, error_code);
651 } else {
652 cpu_stw_kernel(env, ssp, error_code);
654 SET_ESP(esp, mask);
656 return;
657 case 6: /* 286 interrupt gate */
658 case 7: /* 286 trap gate */
659 case 14: /* 386 interrupt gate */
660 case 15: /* 386 trap gate */
661 break;
662 default:
663 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
664 break;
666 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
667 cpl = env->hflags & HF_CPL_MASK;
668 /* check privilege if software int */
669 if (is_int && dpl < cpl) {
670 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
672 /* check valid bit */
673 if (!(e2 & DESC_P_MASK)) {
674 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
676 selector = e1 >> 16;
677 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
678 if ((selector & 0xfffc) == 0) {
679 raise_exception_err(env, EXCP0D_GPF, 0);
681 if (load_segment(env, &e1, &e2, selector) != 0) {
682 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
684 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
685 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
687 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
688 if (dpl > cpl) {
689 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
691 if (!(e2 & DESC_P_MASK)) {
692 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
694 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
695 /* to inner privilege */
696 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
697 if ((ss & 0xfffc) == 0) {
698 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
700 if ((ss & 3) != dpl) {
701 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
703 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
704 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
706 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
707 if (ss_dpl != dpl) {
708 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
710 if (!(ss_e2 & DESC_S_MASK) ||
711 (ss_e2 & DESC_CS_MASK) ||
712 !(ss_e2 & DESC_W_MASK)) {
713 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
715 if (!(ss_e2 & DESC_P_MASK)) {
716 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
718 new_stack = 1;
719 sp_mask = get_sp_mask(ss_e2);
720 ssp = get_seg_base(ss_e1, ss_e2);
721 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
722 /* to same privilege */
723 if (vm86) {
724 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
726 new_stack = 0;
727 sp_mask = get_sp_mask(env->segs[R_SS].flags);
728 ssp = env->segs[R_SS].base;
729 esp = env->regs[R_ESP];
730 dpl = cpl;
731 } else {
732 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
733 new_stack = 0; /* avoid warning */
734 sp_mask = 0; /* avoid warning */
735 ssp = 0; /* avoid warning */
736 esp = 0; /* avoid warning */
739 shift = type >> 3;
741 #if 0
742 /* XXX: check that enough room is available */
743 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
744 if (vm86) {
745 push_size += 8;
747 push_size <<= shift;
748 #endif
749 if (shift == 1) {
750 if (new_stack) {
751 if (vm86) {
752 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
753 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
757 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
758 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
760 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
761 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
762 PUSHL(ssp, esp, sp_mask, old_eip);
763 if (has_error_code) {
764 PUSHL(ssp, esp, sp_mask, error_code);
766 } else {
767 if (new_stack) {
768 if (vm86) {
769 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
770 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
774 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
775 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
777 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
778 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
779 PUSHW(ssp, esp, sp_mask, old_eip);
780 if (has_error_code) {
781 PUSHW(ssp, esp, sp_mask, error_code);
785 /* interrupt gate clear IF mask */
786 if ((type & 1) == 0) {
787 env->eflags &= ~IF_MASK;
789 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
791 if (new_stack) {
792 if (vm86) {
793 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
798 ss = (ss & ~3) | dpl;
799 cpu_x86_load_seg_cache(env, R_SS, ss,
800 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802 SET_ESP(esp, sp_mask);
804 selector = (selector & ~3) | dpl;
805 cpu_x86_load_seg_cache(env, R_CS, selector,
806 get_seg_base(e1, e2),
807 get_seg_limit(e1, e2),
808 e2);
809 env->eip = offset;
812 #ifdef TARGET_X86_64
814 #define PUSHQ_RA(sp, val, ra) \
816 sp -= 8; \
817 cpu_stq_kernel_ra(env, sp, (val), ra); \
820 #define POPQ_RA(sp, val, ra) \
822 val = cpu_ldq_kernel_ra(env, sp, ra); \
823 sp += 8; \
826 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
827 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
829 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
831 X86CPU *cpu = x86_env_get_cpu(env);
832 int index;
834 #if 0
835 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
836 env->tr.base, env->tr.limit);
837 #endif
839 if (!(env->tr.flags & DESC_P_MASK)) {
840 cpu_abort(CPU(cpu), "invalid tss");
842 index = 8 * level + 4;
843 if ((index + 7) > env->tr.limit) {
844 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
846 return cpu_ldq_kernel(env, env->tr.base + index);
849 /* 64 bit interrupt */
850 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
851 int error_code, target_ulong next_eip, int is_hw)
853 SegmentCache *dt;
854 target_ulong ptr;
855 int type, dpl, selector, cpl, ist;
856 int has_error_code, new_stack;
857 uint32_t e1, e2, e3, ss;
858 target_ulong old_eip, esp, offset;
860 has_error_code = 0;
861 if (!is_int && !is_hw) {
862 has_error_code = exception_has_error_code(intno);
864 if (is_int) {
865 old_eip = next_eip;
866 } else {
867 old_eip = env->eip;
870 dt = &env->idt;
871 if (intno * 16 + 15 > dt->limit) {
872 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
874 ptr = dt->base + intno * 16;
875 e1 = cpu_ldl_kernel(env, ptr);
876 e2 = cpu_ldl_kernel(env, ptr + 4);
877 e3 = cpu_ldl_kernel(env, ptr + 8);
878 /* check gate type */
879 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
880 switch (type) {
881 case 14: /* 386 interrupt gate */
882 case 15: /* 386 trap gate */
883 break;
884 default:
885 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
886 break;
888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
889 cpl = env->hflags & HF_CPL_MASK;
890 /* check privilege if software int */
891 if (is_int && dpl < cpl) {
892 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
894 /* check valid bit */
895 if (!(e2 & DESC_P_MASK)) {
896 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
898 selector = e1 >> 16;
899 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
900 ist = e2 & 7;
901 if ((selector & 0xfffc) == 0) {
902 raise_exception_err(env, EXCP0D_GPF, 0);
905 if (load_segment(env, &e1, &e2, selector) != 0) {
906 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
908 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
909 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 if (dpl > cpl) {
913 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
915 if (!(e2 & DESC_P_MASK)) {
916 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
918 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
919 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
921 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
922 /* to inner privilege */
923 new_stack = 1;
924 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
925 ss = 0;
926 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927 /* to same privilege */
928 if (env->eflags & VM_MASK) {
929 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
931 new_stack = 0;
932 esp = env->regs[R_ESP];
933 dpl = cpl;
934 } else {
935 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
936 new_stack = 0; /* avoid warning */
937 esp = 0; /* avoid warning */
939 esp &= ~0xfLL; /* align stack */
941 PUSHQ(esp, env->segs[R_SS].selector);
942 PUSHQ(esp, env->regs[R_ESP]);
943 PUSHQ(esp, cpu_compute_eflags(env));
944 PUSHQ(esp, env->segs[R_CS].selector);
945 PUSHQ(esp, old_eip);
946 if (has_error_code) {
947 PUSHQ(esp, error_code);
950 /* interrupt gate clear IF mask */
951 if ((type & 1) == 0) {
952 env->eflags &= ~IF_MASK;
954 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
956 if (new_stack) {
957 ss = 0 | dpl;
958 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
960 env->regs[R_ESP] = esp;
962 selector = (selector & ~3) | dpl;
963 cpu_x86_load_seg_cache(env, R_CS, selector,
964 get_seg_base(e1, e2),
965 get_seg_limit(e1, e2),
966 e2);
967 env->eip = offset;
969 #endif
971 #ifdef TARGET_X86_64
972 #if defined(CONFIG_USER_ONLY)
973 void helper_syscall(CPUX86State *env, int next_eip_addend)
975 CPUState *cs = CPU(x86_env_get_cpu(env));
977 cs->exception_index = EXCP_SYSCALL;
978 env->exception_next_eip = env->eip + next_eip_addend;
979 cpu_loop_exit(cs);
981 #else
982 void helper_syscall(CPUX86State *env, int next_eip_addend)
984 int selector;
986 if (!(env->efer & MSR_EFER_SCE)) {
987 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
989 selector = (env->star >> 32) & 0xffff;
990 if (env->hflags & HF_LMA_MASK) {
991 int code64;
993 env->regs[R_ECX] = env->eip + next_eip_addend;
994 env->regs[11] = cpu_compute_eflags(env);
996 code64 = env->hflags & HF_CS64_MASK;
998 env->eflags &= ~env->fmask;
999 cpu_load_eflags(env, env->eflags, 0);
1000 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001 0, 0xffffffff,
1002 DESC_G_MASK | DESC_P_MASK |
1003 DESC_S_MASK |
1004 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1005 DESC_L_MASK);
1006 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1007 0, 0xffffffff,
1008 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1009 DESC_S_MASK |
1010 DESC_W_MASK | DESC_A_MASK);
1011 if (code64) {
1012 env->eip = env->lstar;
1013 } else {
1014 env->eip = env->cstar;
1016 } else {
1017 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1019 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1020 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1021 0, 0xffffffff,
1022 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1023 DESC_S_MASK |
1024 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1025 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1026 0, 0xffffffff,
1027 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1028 DESC_S_MASK |
1029 DESC_W_MASK | DESC_A_MASK);
1030 env->eip = (uint32_t)env->star;
1033 #endif
1034 #endif
1036 #ifdef TARGET_X86_64
1037 void helper_sysret(CPUX86State *env, int dflag)
1039 int cpl, selector;
1041 if (!(env->efer & MSR_EFER_SCE)) {
1042 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1044 cpl = env->hflags & HF_CPL_MASK;
1045 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1046 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1048 selector = (env->star >> 48) & 0xffff;
1049 if (env->hflags & HF_LMA_MASK) {
1050 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1051 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1052 NT_MASK);
1053 if (dflag == 2) {
1054 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_P_MASK |
1057 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1058 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1059 DESC_L_MASK);
1060 env->eip = env->regs[R_ECX];
1061 } else {
1062 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1063 0, 0xffffffff,
1064 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1067 env->eip = (uint32_t)env->regs[R_ECX];
1069 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1070 0, 0xffffffff,
1071 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1072 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073 DESC_W_MASK | DESC_A_MASK);
1074 } else {
1075 env->eflags |= IF_MASK;
1076 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1077 0, 0xffffffff,
1078 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1079 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1081 env->eip = (uint32_t)env->regs[R_ECX];
1082 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1083 0, 0xffffffff,
1084 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1085 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086 DESC_W_MASK | DESC_A_MASK);
1089 #endif
1091 /* real mode interrupt */
1092 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1093 int error_code, unsigned int next_eip)
1095 SegmentCache *dt;
1096 target_ulong ptr, ssp;
1097 int selector;
1098 uint32_t offset, esp;
1099 uint32_t old_cs, old_eip;
1101 /* real mode (simpler!) */
1102 dt = &env->idt;
1103 if (intno * 4 + 3 > dt->limit) {
1104 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1106 ptr = dt->base + intno * 4;
1107 offset = cpu_lduw_kernel(env, ptr);
1108 selector = cpu_lduw_kernel(env, ptr + 2);
1109 esp = env->regs[R_ESP];
1110 ssp = env->segs[R_SS].base;
1111 if (is_int) {
1112 old_eip = next_eip;
1113 } else {
1114 old_eip = env->eip;
1116 old_cs = env->segs[R_CS].selector;
1117 /* XXX: use SS segment size? */
1118 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1119 PUSHW(ssp, esp, 0xffff, old_cs);
1120 PUSHW(ssp, esp, 0xffff, old_eip);
1122 /* update processor state */
1123 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1124 env->eip = offset;
1125 env->segs[R_CS].selector = selector;
1126 env->segs[R_CS].base = (selector << 4);
1127 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1130 #if defined(CONFIG_USER_ONLY)
1131 /* fake user mode interrupt */
1132 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1133 int error_code, target_ulong next_eip)
1135 SegmentCache *dt;
1136 target_ulong ptr;
1137 int dpl, cpl, shift;
1138 uint32_t e2;
1140 dt = &env->idt;
1141 if (env->hflags & HF_LMA_MASK) {
1142 shift = 4;
1143 } else {
1144 shift = 3;
1146 ptr = dt->base + (intno << shift);
1147 e2 = cpu_ldl_kernel(env, ptr + 4);
1149 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1150 cpl = env->hflags & HF_CPL_MASK;
1151 /* check privilege if software int */
1152 if (is_int && dpl < cpl) {
1153 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1156 /* Since we emulate only user space, we cannot do more than
1157 exiting the emulation with the suitable exception and error
1158 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1159 if (is_int || intno == EXCP_SYSCALL) {
1160 env->eip = next_eip;
1164 #else
1166 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1167 int error_code, int is_hw, int rm)
1169 CPUState *cs = CPU(x86_env_get_cpu(env));
1170 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1171 control.event_inj));
1173 if (!(event_inj & SVM_EVTINJ_VALID)) {
1174 int type;
1176 if (is_int) {
1177 type = SVM_EVTINJ_TYPE_SOFT;
1178 } else {
1179 type = SVM_EVTINJ_TYPE_EXEPT;
1181 event_inj = intno | type | SVM_EVTINJ_VALID;
1182 if (!rm && exception_has_error_code(intno)) {
1183 event_inj |= SVM_EVTINJ_VALID_ERR;
1184 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1185 control.event_inj_err),
1186 error_code);
1188 x86_stl_phys(cs,
1189 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1190 event_inj);
1193 #endif
1196 * Begin execution of an interruption. is_int is TRUE if coming from
1197 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1198 * instruction. It is only relevant if is_int is TRUE.
1200 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1201 int error_code, target_ulong next_eip, int is_hw)
1203 CPUX86State *env = &cpu->env;
1205 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1206 if ((env->cr[0] & CR0_PE_MASK)) {
1207 static int count;
1209 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1210 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1211 count, intno, error_code, is_int,
1212 env->hflags & HF_CPL_MASK,
1213 env->segs[R_CS].selector, env->eip,
1214 (int)env->segs[R_CS].base + env->eip,
1215 env->segs[R_SS].selector, env->regs[R_ESP]);
1216 if (intno == 0x0e) {
1217 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1218 } else {
1219 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1221 qemu_log("\n");
1222 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1223 #if 0
1225 int i;
1226 target_ulong ptr;
1228 qemu_log(" code=");
1229 ptr = env->segs[R_CS].base + env->eip;
1230 for (i = 0; i < 16; i++) {
1231 qemu_log(" %02x", ldub(ptr + i));
1233 qemu_log("\n");
1235 #endif
1236 count++;
1239 if (env->cr[0] & CR0_PE_MASK) {
1240 #if !defined(CONFIG_USER_ONLY)
1241 if (env->hflags & HF_SVMI_MASK) {
1242 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1244 #endif
1245 #ifdef TARGET_X86_64
1246 if (env->hflags & HF_LMA_MASK) {
1247 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1248 } else
1249 #endif
1251 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1252 is_hw);
1254 } else {
1255 #if !defined(CONFIG_USER_ONLY)
1256 if (env->hflags & HF_SVMI_MASK) {
1257 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1259 #endif
1260 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1263 #if !defined(CONFIG_USER_ONLY)
1264 if (env->hflags & HF_SVMI_MASK) {
1265 CPUState *cs = CPU(cpu);
1266 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1267 offsetof(struct vmcb,
1268 control.event_inj));
1270 x86_stl_phys(cs,
1271 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1272 event_inj & ~SVM_EVTINJ_VALID);
1274 #endif
1277 void x86_cpu_do_interrupt(CPUState *cs)
1279 X86CPU *cpu = X86_CPU(cs);
1280 CPUX86State *env = &cpu->env;
1282 #if defined(CONFIG_USER_ONLY)
1283 /* if user mode only, we simulate a fake exception
1284 which will be handled outside the cpu execution
1285 loop */
1286 do_interrupt_user(env, cs->exception_index,
1287 env->exception_is_int,
1288 env->error_code,
1289 env->exception_next_eip);
1290 /* successfully delivered */
1291 env->old_exception = -1;
1292 #else
1293 /* simulate a real cpu exception. On i386, it can
1294 trigger new exceptions, but we do not handle
1295 double or triple faults yet. */
1296 do_interrupt_all(cpu, cs->exception_index,
1297 env->exception_is_int,
1298 env->error_code,
1299 env->exception_next_eip, 0);
1300 /* successfully delivered */
1301 env->old_exception = -1;
1302 #endif
1305 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1307 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1310 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1312 X86CPU *cpu = X86_CPU(cs);
1313 CPUX86State *env = &cpu->env;
1314 bool ret = false;
1316 #if !defined(CONFIG_USER_ONLY)
1317 if (interrupt_request & CPU_INTERRUPT_POLL) {
1318 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1319 apic_poll_irq(cpu->apic_state);
1320 /* Don't process multiple interrupt requests in a single call.
1321 This is required to make icount-driven execution deterministic. */
1322 return true;
1324 #endif
1325 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1326 do_cpu_sipi(cpu);
1327 } else if (env->hflags2 & HF2_GIF_MASK) {
1328 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1329 !(env->hflags & HF_SMM_MASK)) {
1330 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1331 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1332 do_smm_enter(cpu);
1333 ret = true;
1334 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1335 !(env->hflags2 & HF2_NMI_MASK)) {
1336 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1337 env->hflags2 |= HF2_NMI_MASK;
1338 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1339 ret = true;
1340 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1341 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1342 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1343 ret = true;
1344 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1345 (((env->hflags2 & HF2_VINTR_MASK) &&
1346 (env->hflags2 & HF2_HIF_MASK)) ||
1347 (!(env->hflags2 & HF2_VINTR_MASK) &&
1348 (env->eflags & IF_MASK &&
1349 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1350 int intno;
1351 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1352 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1353 CPU_INTERRUPT_VIRQ);
1354 intno = cpu_get_pic_interrupt(env);
1355 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1356 "Servicing hardware INT=0x%02x\n", intno);
1357 do_interrupt_x86_hardirq(env, intno, 1);
1358 /* ensure that no TB jump will be modified as
1359 the program flow was changed */
1360 ret = true;
1361 #if !defined(CONFIG_USER_ONLY)
1362 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1363 (env->eflags & IF_MASK) &&
1364 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1365 int intno;
1366 /* FIXME: this should respect TPR */
1367 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1368 intno = x86_ldl_phys(cs, env->vm_vmcb
1369 + offsetof(struct vmcb, control.int_vector));
1370 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1371 "Servicing virtual hardware INT=0x%02x\n", intno);
1372 do_interrupt_x86_hardirq(env, intno, 1);
1373 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1374 ret = true;
1375 #endif
1379 return ret;
1382 void helper_lldt(CPUX86State *env, int selector)
1384 SegmentCache *dt;
1385 uint32_t e1, e2;
1386 int index, entry_limit;
1387 target_ulong ptr;
1389 selector &= 0xffff;
1390 if ((selector & 0xfffc) == 0) {
1391 /* XXX: NULL selector case: invalid LDT */
1392 env->ldt.base = 0;
1393 env->ldt.limit = 0;
1394 } else {
1395 if (selector & 0x4) {
1396 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1398 dt = &env->gdt;
1399 index = selector & ~7;
1400 #ifdef TARGET_X86_64
1401 if (env->hflags & HF_LMA_MASK) {
1402 entry_limit = 15;
1403 } else
1404 #endif
1406 entry_limit = 7;
1408 if ((index + entry_limit) > dt->limit) {
1409 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1411 ptr = dt->base + index;
1412 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1413 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1414 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1415 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1417 if (!(e2 & DESC_P_MASK)) {
1418 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1420 #ifdef TARGET_X86_64
1421 if (env->hflags & HF_LMA_MASK) {
1422 uint32_t e3;
1424 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1425 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1426 env->ldt.base |= (target_ulong)e3 << 32;
1427 } else
1428 #endif
1430 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1433 env->ldt.selector = selector;
1436 void helper_ltr(CPUX86State *env, int selector)
1438 SegmentCache *dt;
1439 uint32_t e1, e2;
1440 int index, type, entry_limit;
1441 target_ulong ptr;
1443 selector &= 0xffff;
1444 if ((selector & 0xfffc) == 0) {
1445 /* NULL selector case: invalid TR */
1446 env->tr.base = 0;
1447 env->tr.limit = 0;
1448 env->tr.flags = 0;
1449 } else {
1450 if (selector & 0x4) {
1451 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1453 dt = &env->gdt;
1454 index = selector & ~7;
1455 #ifdef TARGET_X86_64
1456 if (env->hflags & HF_LMA_MASK) {
1457 entry_limit = 15;
1458 } else
1459 #endif
1461 entry_limit = 7;
1463 if ((index + entry_limit) > dt->limit) {
1464 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1466 ptr = dt->base + index;
1467 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1468 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1469 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1470 if ((e2 & DESC_S_MASK) ||
1471 (type != 1 && type != 9)) {
1472 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1474 if (!(e2 & DESC_P_MASK)) {
1475 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1477 #ifdef TARGET_X86_64
1478 if (env->hflags & HF_LMA_MASK) {
1479 uint32_t e3, e4;
1481 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1482 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1483 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1484 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1486 load_seg_cache_raw_dt(&env->tr, e1, e2);
1487 env->tr.base |= (target_ulong)e3 << 32;
1488 } else
1489 #endif
1491 load_seg_cache_raw_dt(&env->tr, e1, e2);
1493 e2 |= DESC_TSS_BUSY_MASK;
1494 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1496 env->tr.selector = selector;
1499 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1500 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1502 uint32_t e1, e2;
1503 int cpl, dpl, rpl;
1504 SegmentCache *dt;
1505 int index;
1506 target_ulong ptr;
1508 selector &= 0xffff;
1509 cpl = env->hflags & HF_CPL_MASK;
1510 if ((selector & 0xfffc) == 0) {
1511 /* null selector case */
1512 if (seg_reg == R_SS
1513 #ifdef TARGET_X86_64
1514 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1515 #endif
1517 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1519 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1520 } else {
1522 if (selector & 0x4) {
1523 dt = &env->ldt;
1524 } else {
1525 dt = &env->gdt;
1527 index = selector & ~7;
1528 if ((index + 7) > dt->limit) {
1529 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1531 ptr = dt->base + index;
1532 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1533 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1535 if (!(e2 & DESC_S_MASK)) {
1536 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1538 rpl = selector & 3;
1539 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1540 if (seg_reg == R_SS) {
1541 /* must be writable segment */
1542 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1543 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1545 if (rpl != cpl || dpl != cpl) {
1546 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1548 } else {
1549 /* must be readable segment */
1550 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1551 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1554 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1555 /* if not conforming code, test rights */
1556 if (dpl < cpl || dpl < rpl) {
1557 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1562 if (!(e2 & DESC_P_MASK)) {
1563 if (seg_reg == R_SS) {
1564 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1565 } else {
1566 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1570 /* set the access bit if not already set */
1571 if (!(e2 & DESC_A_MASK)) {
1572 e2 |= DESC_A_MASK;
1573 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1576 cpu_x86_load_seg_cache(env, seg_reg, selector,
1577 get_seg_base(e1, e2),
1578 get_seg_limit(e1, e2),
1579 e2);
1580 #if 0
1581 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1582 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1583 #endif
1587 /* protected mode jump */
1588 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1589 target_ulong next_eip)
1591 int gate_cs, type;
1592 uint32_t e1, e2, cpl, dpl, rpl, limit;
1594 if ((new_cs & 0xfffc) == 0) {
1595 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1597 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1598 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1600 cpl = env->hflags & HF_CPL_MASK;
1601 if (e2 & DESC_S_MASK) {
1602 if (!(e2 & DESC_CS_MASK)) {
1603 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1605 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1606 if (e2 & DESC_C_MASK) {
1607 /* conforming code segment */
1608 if (dpl > cpl) {
1609 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1611 } else {
1612 /* non conforming code segment */
1613 rpl = new_cs & 3;
1614 if (rpl > cpl) {
1615 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1617 if (dpl != cpl) {
1618 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1621 if (!(e2 & DESC_P_MASK)) {
1622 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1624 limit = get_seg_limit(e1, e2);
1625 if (new_eip > limit &&
1626 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1627 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1629 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1630 get_seg_base(e1, e2), limit, e2);
1631 env->eip = new_eip;
1632 } else {
1633 /* jump to call or task gate */
1634 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1635 rpl = new_cs & 3;
1636 cpl = env->hflags & HF_CPL_MASK;
1637 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1638 switch (type) {
1639 case 1: /* 286 TSS */
1640 case 9: /* 386 TSS */
1641 case 5: /* task gate */
1642 if (dpl < cpl || dpl < rpl) {
1643 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1645 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1646 break;
1647 case 4: /* 286 call gate */
1648 case 12: /* 386 call gate */
1649 if ((dpl < cpl) || (dpl < rpl)) {
1650 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1652 if (!(e2 & DESC_P_MASK)) {
1653 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1655 gate_cs = e1 >> 16;
1656 new_eip = (e1 & 0xffff);
1657 if (type == 12) {
1658 new_eip |= (e2 & 0xffff0000);
1660 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1661 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1663 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1664 /* must be code segment */
1665 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1666 (DESC_S_MASK | DESC_CS_MASK))) {
1667 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1669 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1670 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1671 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1673 if (!(e2 & DESC_P_MASK)) {
1674 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1676 limit = get_seg_limit(e1, e2);
1677 if (new_eip > limit) {
1678 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1680 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1681 get_seg_base(e1, e2), limit, e2);
1682 env->eip = new_eip;
1683 break;
1684 default:
1685 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1686 break;
1691 /* real mode call */
1692 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1693 int shift, int next_eip)
1695 int new_eip;
1696 uint32_t esp, esp_mask;
1697 target_ulong ssp;
1699 new_eip = new_eip1;
1700 esp = env->regs[R_ESP];
1701 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1702 ssp = env->segs[R_SS].base;
1703 if (shift) {
1704 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1705 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1706 } else {
1707 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1708 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1711 SET_ESP(esp, esp_mask);
1712 env->eip = new_eip;
1713 env->segs[R_CS].selector = new_cs;
1714 env->segs[R_CS].base = (new_cs << 4);
1717 /* protected mode call */
1718 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1719 int shift, target_ulong next_eip)
1721 int new_stack, i;
1722 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1723 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1724 uint32_t val, limit, old_sp_mask;
1725 target_ulong ssp, old_ssp;
1727 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1728 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1729 if ((new_cs & 0xfffc) == 0) {
1730 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1732 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1733 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1735 cpl = env->hflags & HF_CPL_MASK;
1736 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1737 if (e2 & DESC_S_MASK) {
1738 if (!(e2 & DESC_CS_MASK)) {
1739 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1741 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1742 if (e2 & DESC_C_MASK) {
1743 /* conforming code segment */
1744 if (dpl > cpl) {
1745 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1747 } else {
1748 /* non conforming code segment */
1749 rpl = new_cs & 3;
1750 if (rpl > cpl) {
1751 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1753 if (dpl != cpl) {
1754 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1757 if (!(e2 & DESC_P_MASK)) {
1758 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1761 #ifdef TARGET_X86_64
1762 /* XXX: check 16/32 bit cases in long mode */
1763 if (shift == 2) {
1764 target_ulong rsp;
1766 /* 64 bit case */
1767 rsp = env->regs[R_ESP];
1768 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1769 PUSHQ_RA(rsp, next_eip, GETPC());
1770 /* from this point, not restartable */
1771 env->regs[R_ESP] = rsp;
1772 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1773 get_seg_base(e1, e2),
1774 get_seg_limit(e1, e2), e2);
1775 env->eip = new_eip;
1776 } else
1777 #endif
1779 sp = env->regs[R_ESP];
1780 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1781 ssp = env->segs[R_SS].base;
1782 if (shift) {
1783 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1784 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1785 } else {
1786 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1787 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1790 limit = get_seg_limit(e1, e2);
1791 if (new_eip > limit) {
1792 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1794 /* from this point, not restartable */
1795 SET_ESP(sp, sp_mask);
1796 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1797 get_seg_base(e1, e2), limit, e2);
1798 env->eip = new_eip;
1800 } else {
1801 /* check gate type */
1802 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1803 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1804 rpl = new_cs & 3;
1805 switch (type) {
1806 case 1: /* available 286 TSS */
1807 case 9: /* available 386 TSS */
1808 case 5: /* task gate */
1809 if (dpl < cpl || dpl < rpl) {
1810 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1812 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1813 return;
1814 case 4: /* 286 call gate */
1815 case 12: /* 386 call gate */
1816 break;
1817 default:
1818 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1819 break;
1821 shift = type >> 3;
1823 if (dpl < cpl || dpl < rpl) {
1824 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1826 /* check valid bit */
1827 if (!(e2 & DESC_P_MASK)) {
1828 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1830 selector = e1 >> 16;
1831 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1832 param_count = e2 & 0x1f;
1833 if ((selector & 0xfffc) == 0) {
1834 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1837 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1838 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1840 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1841 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1843 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1844 if (dpl > cpl) {
1845 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1847 if (!(e2 & DESC_P_MASK)) {
1848 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1851 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1852 /* to inner privilege */
1853 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1854 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1855 TARGET_FMT_lx "\n", ss, sp, param_count,
1856 env->regs[R_ESP]);
1857 if ((ss & 0xfffc) == 0) {
1858 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1860 if ((ss & 3) != dpl) {
1861 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1863 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1864 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1866 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1867 if (ss_dpl != dpl) {
1868 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1870 if (!(ss_e2 & DESC_S_MASK) ||
1871 (ss_e2 & DESC_CS_MASK) ||
1872 !(ss_e2 & DESC_W_MASK)) {
1873 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1875 if (!(ss_e2 & DESC_P_MASK)) {
1876 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1879 /* push_size = ((param_count * 2) + 8) << shift; */
1881 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1882 old_ssp = env->segs[R_SS].base;
1884 sp_mask = get_sp_mask(ss_e2);
1885 ssp = get_seg_base(ss_e1, ss_e2);
1886 if (shift) {
1887 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1888 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1889 for (i = param_count - 1; i >= 0; i--) {
1890 val = cpu_ldl_kernel_ra(env, old_ssp +
1891 ((env->regs[R_ESP] + i * 4) &
1892 old_sp_mask), GETPC());
1893 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1895 } else {
1896 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1897 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1898 for (i = param_count - 1; i >= 0; i--) {
1899 val = cpu_lduw_kernel_ra(env, old_ssp +
1900 ((env->regs[R_ESP] + i * 2) &
1901 old_sp_mask), GETPC());
1902 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1905 new_stack = 1;
1906 } else {
1907 /* to same privilege */
1908 sp = env->regs[R_ESP];
1909 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1910 ssp = env->segs[R_SS].base;
1911 /* push_size = (4 << shift); */
1912 new_stack = 0;
1915 if (shift) {
1916 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1917 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1918 } else {
1919 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1920 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1923 /* from this point, not restartable */
1925 if (new_stack) {
1926 ss = (ss & ~3) | dpl;
1927 cpu_x86_load_seg_cache(env, R_SS, ss,
1928 ssp,
1929 get_seg_limit(ss_e1, ss_e2),
1930 ss_e2);
1933 selector = (selector & ~3) | dpl;
1934 cpu_x86_load_seg_cache(env, R_CS, selector,
1935 get_seg_base(e1, e2),
1936 get_seg_limit(e1, e2),
1937 e2);
1938 SET_ESP(sp, sp_mask);
1939 env->eip = offset;
1943 /* real and vm86 mode iret */
1944 void helper_iret_real(CPUX86State *env, int shift)
1946 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1947 target_ulong ssp;
1948 int eflags_mask;
1950 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1951 sp = env->regs[R_ESP];
1952 ssp = env->segs[R_SS].base;
1953 if (shift == 1) {
1954 /* 32 bits */
1955 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1956 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
1957 new_cs &= 0xffff;
1958 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1959 } else {
1960 /* 16 bits */
1961 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1962 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1963 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
1965 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1966 env->segs[R_CS].selector = new_cs;
1967 env->segs[R_CS].base = (new_cs << 4);
1968 env->eip = new_eip;
1969 if (env->eflags & VM_MASK) {
1970 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1971 NT_MASK;
1972 } else {
1973 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1974 RF_MASK | NT_MASK;
1976 if (shift == 0) {
1977 eflags_mask &= 0xffff;
1979 cpu_load_eflags(env, new_eflags, eflags_mask);
1980 env->hflags2 &= ~HF2_NMI_MASK;
1983 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1985 int dpl;
1986 uint32_t e2;
1988 /* XXX: on x86_64, we do not want to nullify FS and GS because
1989 they may still contain a valid base. I would be interested to
1990 know how a real x86_64 CPU behaves */
1991 if ((seg_reg == R_FS || seg_reg == R_GS) &&
1992 (env->segs[seg_reg].selector & 0xfffc) == 0) {
1993 return;
1996 e2 = env->segs[seg_reg].flags;
1997 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1998 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1999 /* data or non conforming code segment */
2000 if (dpl < cpl) {
2001 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2006 /* protected mode iret */
2007 static inline void helper_ret_protected(CPUX86State *env, int shift,
2008 int is_iret, int addend,
2009 uintptr_t retaddr)
2011 uint32_t new_cs, new_eflags, new_ss;
2012 uint32_t new_es, new_ds, new_fs, new_gs;
2013 uint32_t e1, e2, ss_e1, ss_e2;
2014 int cpl, dpl, rpl, eflags_mask, iopl;
2015 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2017 #ifdef TARGET_X86_64
2018 if (shift == 2) {
2019 sp_mask = -1;
2020 } else
2021 #endif
2023 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2025 sp = env->regs[R_ESP];
2026 ssp = env->segs[R_SS].base;
2027 new_eflags = 0; /* avoid warning */
2028 #ifdef TARGET_X86_64
2029 if (shift == 2) {
2030 POPQ_RA(sp, new_eip, retaddr);
2031 POPQ_RA(sp, new_cs, retaddr);
2032 new_cs &= 0xffff;
2033 if (is_iret) {
2034 POPQ_RA(sp, new_eflags, retaddr);
2036 } else
2037 #endif
2039 if (shift == 1) {
2040 /* 32 bits */
2041 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2042 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2043 new_cs &= 0xffff;
2044 if (is_iret) {
2045 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2046 if (new_eflags & VM_MASK) {
2047 goto return_to_vm86;
2050 } else {
2051 /* 16 bits */
2052 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2053 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2054 if (is_iret) {
2055 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2059 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2060 new_cs, new_eip, shift, addend);
2061 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2062 if ((new_cs & 0xfffc) == 0) {
2063 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2065 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2066 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2068 if (!(e2 & DESC_S_MASK) ||
2069 !(e2 & DESC_CS_MASK)) {
2070 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2072 cpl = env->hflags & HF_CPL_MASK;
2073 rpl = new_cs & 3;
2074 if (rpl < cpl) {
2075 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2077 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2078 if (e2 & DESC_C_MASK) {
2079 if (dpl > rpl) {
2080 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2082 } else {
2083 if (dpl != rpl) {
2084 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2087 if (!(e2 & DESC_P_MASK)) {
2088 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2091 sp += addend;
2092 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2093 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2094 /* return to same privilege level */
2095 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2096 get_seg_base(e1, e2),
2097 get_seg_limit(e1, e2),
2098 e2);
2099 } else {
2100 /* return to different privilege level */
2101 #ifdef TARGET_X86_64
2102 if (shift == 2) {
2103 POPQ_RA(sp, new_esp, retaddr);
2104 POPQ_RA(sp, new_ss, retaddr);
2105 new_ss &= 0xffff;
2106 } else
2107 #endif
2109 if (shift == 1) {
2110 /* 32 bits */
2111 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2112 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2113 new_ss &= 0xffff;
2114 } else {
2115 /* 16 bits */
2116 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2117 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2120 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2121 new_ss, new_esp);
2122 if ((new_ss & 0xfffc) == 0) {
2123 #ifdef TARGET_X86_64
2124 /* NULL ss is allowed in long mode if cpl != 3 */
2125 /* XXX: test CS64? */
2126 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2127 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2128 0, 0xffffffff,
2129 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2130 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2131 DESC_W_MASK | DESC_A_MASK);
2132 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2133 } else
2134 #endif
2136 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2138 } else {
2139 if ((new_ss & 3) != rpl) {
2140 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2142 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2143 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2145 if (!(ss_e2 & DESC_S_MASK) ||
2146 (ss_e2 & DESC_CS_MASK) ||
2147 !(ss_e2 & DESC_W_MASK)) {
2148 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2150 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2151 if (dpl != rpl) {
2152 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2154 if (!(ss_e2 & DESC_P_MASK)) {
2155 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2157 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2158 get_seg_base(ss_e1, ss_e2),
2159 get_seg_limit(ss_e1, ss_e2),
2160 ss_e2);
2163 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2164 get_seg_base(e1, e2),
2165 get_seg_limit(e1, e2),
2166 e2);
2167 sp = new_esp;
2168 #ifdef TARGET_X86_64
2169 if (env->hflags & HF_CS64_MASK) {
2170 sp_mask = -1;
2171 } else
2172 #endif
2174 sp_mask = get_sp_mask(ss_e2);
2177 /* validate data segments */
2178 validate_seg(env, R_ES, rpl);
2179 validate_seg(env, R_DS, rpl);
2180 validate_seg(env, R_FS, rpl);
2181 validate_seg(env, R_GS, rpl);
2183 sp += addend;
2185 SET_ESP(sp, sp_mask);
2186 env->eip = new_eip;
2187 if (is_iret) {
2188 /* NOTE: 'cpl' is the _old_ CPL */
2189 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2190 if (cpl == 0) {
2191 eflags_mask |= IOPL_MASK;
2193 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2194 if (cpl <= iopl) {
2195 eflags_mask |= IF_MASK;
2197 if (shift == 0) {
2198 eflags_mask &= 0xffff;
2200 cpu_load_eflags(env, new_eflags, eflags_mask);
2202 return;
2204 return_to_vm86:
2205 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2206 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2207 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2208 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2209 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2210 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2212 /* modify processor state */
2213 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2214 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2215 VIP_MASK);
2216 load_seg_vm(env, R_CS, new_cs & 0xffff);
2217 load_seg_vm(env, R_SS, new_ss & 0xffff);
2218 load_seg_vm(env, R_ES, new_es & 0xffff);
2219 load_seg_vm(env, R_DS, new_ds & 0xffff);
2220 load_seg_vm(env, R_FS, new_fs & 0xffff);
2221 load_seg_vm(env, R_GS, new_gs & 0xffff);
2223 env->eip = new_eip & 0xffff;
2224 env->regs[R_ESP] = new_esp;
2227 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2229 int tss_selector, type;
2230 uint32_t e1, e2;
2232 /* specific case for TSS */
2233 if (env->eflags & NT_MASK) {
2234 #ifdef TARGET_X86_64
2235 if (env->hflags & HF_LMA_MASK) {
2236 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2238 #endif
2239 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2240 if (tss_selector & 4) {
2241 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2243 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2244 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2246 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2247 /* NOTE: we check both segment and busy TSS */
2248 if (type != 3) {
2249 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2251 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2252 } else {
2253 helper_ret_protected(env, shift, 1, 0, GETPC());
2255 env->hflags2 &= ~HF2_NMI_MASK;
2258 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2260 helper_ret_protected(env, shift, 0, addend, GETPC());
2263 void helper_sysenter(CPUX86State *env)
2265 if (env->sysenter_cs == 0) {
2266 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2268 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2270 #ifdef TARGET_X86_64
2271 if (env->hflags & HF_LMA_MASK) {
2272 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2273 0, 0xffffffff,
2274 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2275 DESC_S_MASK |
2276 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2277 DESC_L_MASK);
2278 } else
2279 #endif
2281 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2282 0, 0xffffffff,
2283 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2284 DESC_S_MASK |
2285 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2287 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2288 0, 0xffffffff,
2289 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2290 DESC_S_MASK |
2291 DESC_W_MASK | DESC_A_MASK);
2292 env->regs[R_ESP] = env->sysenter_esp;
2293 env->eip = env->sysenter_eip;
2296 void helper_sysexit(CPUX86State *env, int dflag)
2298 int cpl;
2300 cpl = env->hflags & HF_CPL_MASK;
2301 if (env->sysenter_cs == 0 || cpl != 0) {
2302 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2304 #ifdef TARGET_X86_64
2305 if (dflag == 2) {
2306 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2307 3, 0, 0xffffffff,
2308 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2309 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2310 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2311 DESC_L_MASK);
2312 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2313 3, 0, 0xffffffff,
2314 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2315 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2316 DESC_W_MASK | DESC_A_MASK);
2317 } else
2318 #endif
2320 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2321 3, 0, 0xffffffff,
2322 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2323 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2324 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2325 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2326 3, 0, 0xffffffff,
2327 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2328 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2329 DESC_W_MASK | DESC_A_MASK);
2331 env->regs[R_ESP] = env->regs[R_ECX];
2332 env->eip = env->regs[R_EDX];
2335 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2337 unsigned int limit;
2338 uint32_t e1, e2, eflags, selector;
2339 int rpl, dpl, cpl, type;
2341 selector = selector1 & 0xffff;
2342 eflags = cpu_cc_compute_all(env, CC_OP);
2343 if ((selector & 0xfffc) == 0) {
2344 goto fail;
2346 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2347 goto fail;
2349 rpl = selector & 3;
2350 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2351 cpl = env->hflags & HF_CPL_MASK;
2352 if (e2 & DESC_S_MASK) {
2353 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2354 /* conforming */
2355 } else {
2356 if (dpl < cpl || dpl < rpl) {
2357 goto fail;
2360 } else {
2361 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2362 switch (type) {
2363 case 1:
2364 case 2:
2365 case 3:
2366 case 9:
2367 case 11:
2368 break;
2369 default:
2370 goto fail;
2372 if (dpl < cpl || dpl < rpl) {
2373 fail:
2374 CC_SRC = eflags & ~CC_Z;
2375 return 0;
2378 limit = get_seg_limit(e1, e2);
2379 CC_SRC = eflags | CC_Z;
2380 return limit;
2383 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2385 uint32_t e1, e2, eflags, selector;
2386 int rpl, dpl, cpl, type;
2388 selector = selector1 & 0xffff;
2389 eflags = cpu_cc_compute_all(env, CC_OP);
2390 if ((selector & 0xfffc) == 0) {
2391 goto fail;
2393 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2394 goto fail;
2396 rpl = selector & 3;
2397 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2398 cpl = env->hflags & HF_CPL_MASK;
2399 if (e2 & DESC_S_MASK) {
2400 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2401 /* conforming */
2402 } else {
2403 if (dpl < cpl || dpl < rpl) {
2404 goto fail;
2407 } else {
2408 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2409 switch (type) {
2410 case 1:
2411 case 2:
2412 case 3:
2413 case 4:
2414 case 5:
2415 case 9:
2416 case 11:
2417 case 12:
2418 break;
2419 default:
2420 goto fail;
2422 if (dpl < cpl || dpl < rpl) {
2423 fail:
2424 CC_SRC = eflags & ~CC_Z;
2425 return 0;
2428 CC_SRC = eflags | CC_Z;
2429 return e2 & 0x00f0ff00;
2432 void helper_verr(CPUX86State *env, target_ulong selector1)
2434 uint32_t e1, e2, eflags, selector;
2435 int rpl, dpl, cpl;
2437 selector = selector1 & 0xffff;
2438 eflags = cpu_cc_compute_all(env, CC_OP);
2439 if ((selector & 0xfffc) == 0) {
2440 goto fail;
2442 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2443 goto fail;
2445 if (!(e2 & DESC_S_MASK)) {
2446 goto fail;
2448 rpl = selector & 3;
2449 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2450 cpl = env->hflags & HF_CPL_MASK;
2451 if (e2 & DESC_CS_MASK) {
2452 if (!(e2 & DESC_R_MASK)) {
2453 goto fail;
2455 if (!(e2 & DESC_C_MASK)) {
2456 if (dpl < cpl || dpl < rpl) {
2457 goto fail;
2460 } else {
2461 if (dpl < cpl || dpl < rpl) {
2462 fail:
2463 CC_SRC = eflags & ~CC_Z;
2464 return;
2467 CC_SRC = eflags | CC_Z;
2470 void helper_verw(CPUX86State *env, target_ulong selector1)
2472 uint32_t e1, e2, eflags, selector;
2473 int rpl, dpl, cpl;
2475 selector = selector1 & 0xffff;
2476 eflags = cpu_cc_compute_all(env, CC_OP);
2477 if ((selector & 0xfffc) == 0) {
2478 goto fail;
2480 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2481 goto fail;
2483 if (!(e2 & DESC_S_MASK)) {
2484 goto fail;
2486 rpl = selector & 3;
2487 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2488 cpl = env->hflags & HF_CPL_MASK;
2489 if (e2 & DESC_CS_MASK) {
2490 goto fail;
2491 } else {
2492 if (dpl < cpl || dpl < rpl) {
2493 goto fail;
2495 if (!(e2 & DESC_W_MASK)) {
2496 fail:
2497 CC_SRC = eflags & ~CC_Z;
2498 return;
2501 CC_SRC = eflags | CC_Z;
2504 #if defined(CONFIG_USER_ONLY)
2505 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2507 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2508 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2509 selector &= 0xffff;
2510 cpu_x86_load_seg_cache(env, seg_reg, selector,
2511 (selector << 4), 0xffff,
2512 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2513 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2514 } else {
2515 helper_load_seg(env, seg_reg, selector);
2518 #endif
2520 /* check if Port I/O is allowed in TSS */
2521 static inline void check_io(CPUX86State *env, int addr, int size,
2522 uintptr_t retaddr)
2524 int io_offset, val, mask;
2526 /* TSS must be a valid 32 bit one */
2527 if (!(env->tr.flags & DESC_P_MASK) ||
2528 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2529 env->tr.limit < 103) {
2530 goto fail;
2532 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2533 io_offset += (addr >> 3);
2534 /* Note: the check needs two bytes */
2535 if ((io_offset + 1) > env->tr.limit) {
2536 goto fail;
2538 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2539 val >>= (addr & 7);
2540 mask = (1 << size) - 1;
2541 /* all bits must be zero to allow the I/O */
2542 if ((val & mask) != 0) {
2543 fail:
2544 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2548 void helper_check_iob(CPUX86State *env, uint32_t t0)
2550 check_io(env, t0, 1, GETPC());
2553 void helper_check_iow(CPUX86State *env, uint32_t t0)
2555 check_io(env, t0, 2, GETPC());
2558 void helper_check_iol(CPUX86State *env, uint32_t t0)
2560 check_io(env, t0, 4, GETPC());