trace: convert stderr backend to log
[qemu.git] / target-i386 / seg_helper.c
blob4f269416a59a99e9fedb08574b15c4fd90c4d922
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/log.h"
28 //#define DEBUG_PCALL
30 #ifdef DEBUG_PCALL
31 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
32 # define LOG_PCALL_STATE(cpu) \
33 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
34 #else
35 # define LOG_PCALL(...) do { } while (0)
36 # define LOG_PCALL_STATE(cpu) do { } while (0)
37 #endif
39 #ifdef CONFIG_USER_ONLY
40 #define MEMSUFFIX _kernel
41 #define DATA_SIZE 1
42 #include "exec/cpu_ldst_useronly_template.h"
44 #define DATA_SIZE 2
45 #include "exec/cpu_ldst_useronly_template.h"
47 #define DATA_SIZE 4
48 #include "exec/cpu_ldst_useronly_template.h"
50 #define DATA_SIZE 8
51 #include "exec/cpu_ldst_useronly_template.h"
52 #undef MEMSUFFIX
53 #else
54 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
55 #define MEMSUFFIX _kernel
56 #define DATA_SIZE 1
57 #include "exec/cpu_ldst_template.h"
59 #define DATA_SIZE 2
60 #include "exec/cpu_ldst_template.h"
62 #define DATA_SIZE 4
63 #include "exec/cpu_ldst_template.h"
65 #define DATA_SIZE 8
66 #include "exec/cpu_ldst_template.h"
67 #undef CPU_MMU_INDEX
68 #undef MEMSUFFIX
69 #endif
71 /* return non zero if error */
72 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
73 uint32_t *e2_ptr, int selector,
74 uintptr_t retaddr)
76 SegmentCache *dt;
77 int index;
78 target_ulong ptr;
80 if (selector & 0x4) {
81 dt = &env->ldt;
82 } else {
83 dt = &env->gdt;
85 index = selector & ~7;
86 if ((index + 7) > dt->limit) {
87 return -1;
89 ptr = dt->base + index;
90 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
91 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
92 return 0;
95 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
96 uint32_t *e2_ptr, int selector)
98 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
101 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
103 unsigned int limit;
105 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
106 if (e2 & DESC_G_MASK) {
107 limit = (limit << 12) | 0xfff;
109 return limit;
112 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
114 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
117 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
118 uint32_t e2)
120 sc->base = get_seg_base(e1, e2);
121 sc->limit = get_seg_limit(e1, e2);
122 sc->flags = e2;
125 /* init the segment cache in vm86 mode. */
126 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
128 selector &= 0xffff;
130 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
131 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
132 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
135 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
136 uint32_t *esp_ptr, int dpl,
137 uintptr_t retaddr)
139 X86CPU *cpu = x86_env_get_cpu(env);
140 int type, index, shift;
142 #if 0
144 int i;
145 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
146 for (i = 0; i < env->tr.limit; i++) {
147 printf("%02x ", env->tr.base[i]);
148 if ((i & 7) == 7) {
149 printf("\n");
152 printf("\n");
154 #endif
156 if (!(env->tr.flags & DESC_P_MASK)) {
157 cpu_abort(CPU(cpu), "invalid tss");
159 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
160 if ((type & 7) != 1) {
161 cpu_abort(CPU(cpu), "invalid tss type");
163 shift = type >> 3;
164 index = (dpl * 4 + 2) << shift;
165 if (index + (4 << shift) - 1 > env->tr.limit) {
166 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
168 if (shift == 0) {
169 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
170 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
171 } else {
172 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
173 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
177 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
178 uintptr_t retaddr)
180 uint32_t e1, e2;
181 int rpl, dpl;
183 if ((selector & 0xfffc) != 0) {
184 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
185 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
187 if (!(e2 & DESC_S_MASK)) {
188 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
190 rpl = selector & 3;
191 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
192 if (seg_reg == R_CS) {
193 if (!(e2 & DESC_CS_MASK)) {
194 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
196 if (dpl != rpl) {
197 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
199 } else if (seg_reg == R_SS) {
200 /* SS must be writable data */
201 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
202 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
204 if (dpl != cpl || dpl != rpl) {
205 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
207 } else {
208 /* not readable code */
209 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
210 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
212 /* if data or non conforming code, checks the rights */
213 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
214 if (dpl < cpl || dpl < rpl) {
215 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
219 if (!(e2 & DESC_P_MASK)) {
220 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
222 cpu_x86_load_seg_cache(env, seg_reg, selector,
223 get_seg_base(e1, e2),
224 get_seg_limit(e1, e2),
225 e2);
226 } else {
227 if (seg_reg == R_SS || seg_reg == R_CS) {
228 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
233 #define SWITCH_TSS_JMP 0
234 #define SWITCH_TSS_IRET 1
235 #define SWITCH_TSS_CALL 2
237 /* XXX: restore CPU state in registers (PowerPC case) */
238 static void switch_tss_ra(CPUX86State *env, int tss_selector,
239 uint32_t e1, uint32_t e2, int source,
240 uint32_t next_eip, uintptr_t retaddr)
242 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
243 target_ulong tss_base;
244 uint32_t new_regs[8], new_segs[6];
245 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
246 uint32_t old_eflags, eflags_mask;
247 SegmentCache *dt;
248 int index;
249 target_ulong ptr;
251 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
252 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
253 source);
255 /* if task gate, we read the TSS segment and we load it */
256 if (type == 5) {
257 if (!(e2 & DESC_P_MASK)) {
258 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
260 tss_selector = e1 >> 16;
261 if (tss_selector & 4) {
262 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
264 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
265 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
267 if (e2 & DESC_S_MASK) {
268 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
270 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
271 if ((type & 7) != 1) {
272 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
276 if (!(e2 & DESC_P_MASK)) {
277 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
280 if (type & 8) {
281 tss_limit_max = 103;
282 } else {
283 tss_limit_max = 43;
285 tss_limit = get_seg_limit(e1, e2);
286 tss_base = get_seg_base(e1, e2);
287 if ((tss_selector & 4) != 0 ||
288 tss_limit < tss_limit_max) {
289 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
291 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
292 if (old_type & 8) {
293 old_tss_limit_max = 103;
294 } else {
295 old_tss_limit_max = 43;
298 /* read all the registers from the new TSS */
299 if (type & 8) {
300 /* 32 bit */
301 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
302 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
303 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
304 for (i = 0; i < 8; i++) {
305 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
306 retaddr);
308 for (i = 0; i < 6; i++) {
309 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
310 retaddr);
312 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
313 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
314 } else {
315 /* 16 bit */
316 new_cr3 = 0;
317 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
318 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
319 for (i = 0; i < 8; i++) {
320 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
321 retaddr) | 0xffff0000;
323 for (i = 0; i < 4; i++) {
324 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
325 retaddr);
327 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
328 new_segs[R_FS] = 0;
329 new_segs[R_GS] = 0;
330 new_trap = 0;
332 /* XXX: avoid a compiler warning, see
333 http://support.amd.com/us/Processor_TechDocs/24593.pdf
334 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
335 (void)new_trap;
337 /* NOTE: we must avoid memory exceptions during the task switch,
338 so we make dummy accesses before */
339 /* XXX: it can still fail in some cases, so a bigger hack is
340 necessary to valid the TLB after having done the accesses */
342 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
343 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
344 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
347 /* clear busy bit (it is restartable) */
348 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
349 target_ulong ptr;
350 uint32_t e2;
352 ptr = env->gdt.base + (env->tr.selector & ~7);
353 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
354 e2 &= ~DESC_TSS_BUSY_MASK;
355 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
357 old_eflags = cpu_compute_eflags(env);
358 if (source == SWITCH_TSS_IRET) {
359 old_eflags &= ~NT_MASK;
362 /* save the current state in the old TSS */
363 if (type & 8) {
364 /* 32 bit */
365 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
375 for (i = 0; i < 6; i++) {
376 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
377 env->segs[i].selector, retaddr);
379 } else {
380 /* 16 bit */
381 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
391 for (i = 0; i < 4; i++) {
392 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
393 env->segs[i].selector, retaddr);
397 /* now if an exception occurs, it will occurs in the next task
398 context */
400 if (source == SWITCH_TSS_CALL) {
401 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
402 new_eflags |= NT_MASK;
405 /* set busy bit */
406 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
407 target_ulong ptr;
408 uint32_t e2;
410 ptr = env->gdt.base + (tss_selector & ~7);
411 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
412 e2 |= DESC_TSS_BUSY_MASK;
413 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
416 /* set the new CPU state */
417 /* from this point, any exception which occurs can give problems */
418 env->cr[0] |= CR0_TS_MASK;
419 env->hflags |= HF_TS_MASK;
420 env->tr.selector = tss_selector;
421 env->tr.base = tss_base;
422 env->tr.limit = tss_limit;
423 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
426 cpu_x86_update_cr3(env, new_cr3);
429 /* load all registers without an exception, then reload them with
430 possible exception */
431 env->eip = new_eip;
432 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
433 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
434 if (!(type & 8)) {
435 eflags_mask &= 0xffff;
437 cpu_load_eflags(env, new_eflags, eflags_mask);
438 /* XXX: what to do in 16 bit case? */
439 env->regs[R_EAX] = new_regs[0];
440 env->regs[R_ECX] = new_regs[1];
441 env->regs[R_EDX] = new_regs[2];
442 env->regs[R_EBX] = new_regs[3];
443 env->regs[R_ESP] = new_regs[4];
444 env->regs[R_EBP] = new_regs[5];
445 env->regs[R_ESI] = new_regs[6];
446 env->regs[R_EDI] = new_regs[7];
447 if (new_eflags & VM_MASK) {
448 for (i = 0; i < 6; i++) {
449 load_seg_vm(env, i, new_segs[i]);
451 } else {
452 /* first just selectors as the rest may trigger exceptions */
453 for (i = 0; i < 6; i++) {
454 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458 env->ldt.selector = new_ldt & ~4;
459 env->ldt.base = 0;
460 env->ldt.limit = 0;
461 env->ldt.flags = 0;
463 /* load the LDT */
464 if (new_ldt & 4) {
465 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
468 if ((new_ldt & 0xfffc) != 0) {
469 dt = &env->gdt;
470 index = new_ldt & ~7;
471 if ((index + 7) > dt->limit) {
472 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
474 ptr = dt->base + index;
475 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
476 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
478 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
480 if (!(e2 & DESC_P_MASK)) {
481 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
483 load_seg_cache_raw_dt(&env->ldt, e1, e2);
486 /* load the segments */
487 if (!(new_eflags & VM_MASK)) {
488 int cpl = new_segs[R_CS] & 3;
489 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
490 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
491 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
492 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
493 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
494 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
497 /* check that env->eip is in the CS segment limits */
498 if (new_eip > env->segs[R_CS].limit) {
499 /* XXX: different exception if CALL? */
500 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
503 #ifndef CONFIG_USER_ONLY
504 /* reset local breakpoints */
505 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
506 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
508 #endif
511 static void switch_tss(CPUX86State *env, int tss_selector,
512 uint32_t e1, uint32_t e2, int source,
513 uint32_t next_eip)
515 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
518 static inline unsigned int get_sp_mask(unsigned int e2)
520 if (e2 & DESC_B_MASK) {
521 return 0xffffffff;
522 } else {
523 return 0xffff;
527 static int exception_has_error_code(int intno)
529 switch (intno) {
530 case 8:
531 case 10:
532 case 11:
533 case 12:
534 case 13:
535 case 14:
536 case 17:
537 return 1;
539 return 0;
542 #ifdef TARGET_X86_64
543 #define SET_ESP(val, sp_mask) \
544 do { \
545 if ((sp_mask) == 0xffff) { \
546 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
547 ((val) & 0xffff); \
548 } else if ((sp_mask) == 0xffffffffLL) { \
549 env->regs[R_ESP] = (uint32_t)(val); \
550 } else { \
551 env->regs[R_ESP] = (val); \
553 } while (0)
554 #else
555 #define SET_ESP(val, sp_mask) \
556 do { \
557 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
558 ((val) & (sp_mask)); \
559 } while (0)
560 #endif
562 /* in 64-bit machines, this can overflow. So this segment addition macro
563 * can be used to trim the value to 32-bit whenever needed */
564 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
566 /* XXX: add a is_user flag to have proper security support */
567 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
569 sp -= 2; \
570 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
573 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
575 sp -= 4; \
576 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
579 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
581 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
582 sp += 2; \
585 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
587 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
588 sp += 4; \
591 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
592 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
593 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
594 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
596 /* protected mode interrupt */
597 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
598 int error_code, unsigned int next_eip,
599 int is_hw)
601 SegmentCache *dt;
602 target_ulong ptr, ssp;
603 int type, dpl, selector, ss_dpl, cpl;
604 int has_error_code, new_stack, shift;
605 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
606 uint32_t old_eip, sp_mask;
607 int vm86 = env->eflags & VM_MASK;
609 has_error_code = 0;
610 if (!is_int && !is_hw) {
611 has_error_code = exception_has_error_code(intno);
613 if (is_int) {
614 old_eip = next_eip;
615 } else {
616 old_eip = env->eip;
619 dt = &env->idt;
620 if (intno * 8 + 7 > dt->limit) {
621 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
623 ptr = dt->base + intno * 8;
624 e1 = cpu_ldl_kernel(env, ptr);
625 e2 = cpu_ldl_kernel(env, ptr + 4);
626 /* check gate type */
627 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
628 switch (type) {
629 case 5: /* task gate */
630 /* must do that check here to return the correct error code */
631 if (!(e2 & DESC_P_MASK)) {
632 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
634 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
635 if (has_error_code) {
636 int type;
637 uint32_t mask;
639 /* push the error code */
640 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
641 shift = type >> 3;
642 if (env->segs[R_SS].flags & DESC_B_MASK) {
643 mask = 0xffffffff;
644 } else {
645 mask = 0xffff;
647 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
648 ssp = env->segs[R_SS].base + esp;
649 if (shift) {
650 cpu_stl_kernel(env, ssp, error_code);
651 } else {
652 cpu_stw_kernel(env, ssp, error_code);
654 SET_ESP(esp, mask);
656 return;
657 case 6: /* 286 interrupt gate */
658 case 7: /* 286 trap gate */
659 case 14: /* 386 interrupt gate */
660 case 15: /* 386 trap gate */
661 break;
662 default:
663 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
664 break;
666 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
667 cpl = env->hflags & HF_CPL_MASK;
668 /* check privilege if software int */
669 if (is_int && dpl < cpl) {
670 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
672 /* check valid bit */
673 if (!(e2 & DESC_P_MASK)) {
674 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
676 selector = e1 >> 16;
677 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
678 if ((selector & 0xfffc) == 0) {
679 raise_exception_err(env, EXCP0D_GPF, 0);
681 if (load_segment(env, &e1, &e2, selector) != 0) {
682 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
684 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
685 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
687 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
688 if (dpl > cpl) {
689 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
691 if (!(e2 & DESC_P_MASK)) {
692 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
694 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
695 /* to inner privilege */
696 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
697 if ((ss & 0xfffc) == 0) {
698 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
700 if ((ss & 3) != dpl) {
701 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
703 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
704 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
706 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
707 if (ss_dpl != dpl) {
708 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
710 if (!(ss_e2 & DESC_S_MASK) ||
711 (ss_e2 & DESC_CS_MASK) ||
712 !(ss_e2 & DESC_W_MASK)) {
713 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
715 if (!(ss_e2 & DESC_P_MASK)) {
716 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
718 new_stack = 1;
719 sp_mask = get_sp_mask(ss_e2);
720 ssp = get_seg_base(ss_e1, ss_e2);
721 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
722 /* to same privilege */
723 if (vm86) {
724 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
726 new_stack = 0;
727 sp_mask = get_sp_mask(env->segs[R_SS].flags);
728 ssp = env->segs[R_SS].base;
729 esp = env->regs[R_ESP];
730 dpl = cpl;
731 } else {
732 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
733 new_stack = 0; /* avoid warning */
734 sp_mask = 0; /* avoid warning */
735 ssp = 0; /* avoid warning */
736 esp = 0; /* avoid warning */
739 shift = type >> 3;
741 #if 0
742 /* XXX: check that enough room is available */
743 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
744 if (vm86) {
745 push_size += 8;
747 push_size <<= shift;
748 #endif
749 if (shift == 1) {
750 if (new_stack) {
751 if (vm86) {
752 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
753 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
757 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
758 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
760 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
761 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
762 PUSHL(ssp, esp, sp_mask, old_eip);
763 if (has_error_code) {
764 PUSHL(ssp, esp, sp_mask, error_code);
766 } else {
767 if (new_stack) {
768 if (vm86) {
769 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
770 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
774 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
775 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
777 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
778 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
779 PUSHW(ssp, esp, sp_mask, old_eip);
780 if (has_error_code) {
781 PUSHW(ssp, esp, sp_mask, error_code);
785 /* interrupt gate clear IF mask */
786 if ((type & 1) == 0) {
787 env->eflags &= ~IF_MASK;
789 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
791 if (new_stack) {
792 if (vm86) {
793 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
798 ss = (ss & ~3) | dpl;
799 cpu_x86_load_seg_cache(env, R_SS, ss,
800 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802 SET_ESP(esp, sp_mask);
804 selector = (selector & ~3) | dpl;
805 cpu_x86_load_seg_cache(env, R_CS, selector,
806 get_seg_base(e1, e2),
807 get_seg_limit(e1, e2),
808 e2);
809 env->eip = offset;
812 #ifdef TARGET_X86_64
814 #define PUSHQ_RA(sp, val, ra) \
816 sp -= 8; \
817 cpu_stq_kernel_ra(env, sp, (val), ra); \
820 #define POPQ_RA(sp, val, ra) \
822 val = cpu_ldq_kernel_ra(env, sp, ra); \
823 sp += 8; \
826 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
827 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
829 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
831 X86CPU *cpu = x86_env_get_cpu(env);
832 int index;
834 #if 0
835 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
836 env->tr.base, env->tr.limit);
837 #endif
839 if (!(env->tr.flags & DESC_P_MASK)) {
840 cpu_abort(CPU(cpu), "invalid tss");
842 index = 8 * level + 4;
843 if ((index + 7) > env->tr.limit) {
844 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
846 return cpu_ldq_kernel(env, env->tr.base + index);
849 /* 64 bit interrupt */
850 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
851 int error_code, target_ulong next_eip, int is_hw)
853 SegmentCache *dt;
854 target_ulong ptr;
855 int type, dpl, selector, cpl, ist;
856 int has_error_code, new_stack;
857 uint32_t e1, e2, e3, ss;
858 target_ulong old_eip, esp, offset;
860 has_error_code = 0;
861 if (!is_int && !is_hw) {
862 has_error_code = exception_has_error_code(intno);
864 if (is_int) {
865 old_eip = next_eip;
866 } else {
867 old_eip = env->eip;
870 dt = &env->idt;
871 if (intno * 16 + 15 > dt->limit) {
872 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
874 ptr = dt->base + intno * 16;
875 e1 = cpu_ldl_kernel(env, ptr);
876 e2 = cpu_ldl_kernel(env, ptr + 4);
877 e3 = cpu_ldl_kernel(env, ptr + 8);
878 /* check gate type */
879 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
880 switch (type) {
881 case 14: /* 386 interrupt gate */
882 case 15: /* 386 trap gate */
883 break;
884 default:
885 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
886 break;
888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
889 cpl = env->hflags & HF_CPL_MASK;
890 /* check privilege if software int */
891 if (is_int && dpl < cpl) {
892 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
894 /* check valid bit */
895 if (!(e2 & DESC_P_MASK)) {
896 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
898 selector = e1 >> 16;
899 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
900 ist = e2 & 7;
901 if ((selector & 0xfffc) == 0) {
902 raise_exception_err(env, EXCP0D_GPF, 0);
905 if (load_segment(env, &e1, &e2, selector) != 0) {
906 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
908 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
909 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
911 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 if (dpl > cpl) {
913 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
915 if (!(e2 & DESC_P_MASK)) {
916 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
918 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
919 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
921 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
922 /* to inner privilege */
923 new_stack = 1;
924 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
925 ss = 0;
926 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927 /* to same privilege */
928 if (env->eflags & VM_MASK) {
929 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
931 new_stack = 0;
932 esp = env->regs[R_ESP];
933 dpl = cpl;
934 } else {
935 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
936 new_stack = 0; /* avoid warning */
937 esp = 0; /* avoid warning */
939 esp &= ~0xfLL; /* align stack */
941 PUSHQ(esp, env->segs[R_SS].selector);
942 PUSHQ(esp, env->regs[R_ESP]);
943 PUSHQ(esp, cpu_compute_eflags(env));
944 PUSHQ(esp, env->segs[R_CS].selector);
945 PUSHQ(esp, old_eip);
946 if (has_error_code) {
947 PUSHQ(esp, error_code);
950 /* interrupt gate clear IF mask */
951 if ((type & 1) == 0) {
952 env->eflags &= ~IF_MASK;
954 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
956 if (new_stack) {
957 ss = 0 | dpl;
958 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
960 env->regs[R_ESP] = esp;
962 selector = (selector & ~3) | dpl;
963 cpu_x86_load_seg_cache(env, R_CS, selector,
964 get_seg_base(e1, e2),
965 get_seg_limit(e1, e2),
966 e2);
967 env->eip = offset;
969 #endif
971 #ifdef TARGET_X86_64
972 #if defined(CONFIG_USER_ONLY)
973 void helper_syscall(CPUX86State *env, int next_eip_addend)
975 CPUState *cs = CPU(x86_env_get_cpu(env));
977 cs->exception_index = EXCP_SYSCALL;
978 env->exception_next_eip = env->eip + next_eip_addend;
979 cpu_loop_exit(cs);
981 #else
982 void helper_syscall(CPUX86State *env, int next_eip_addend)
984 int selector;
986 if (!(env->efer & MSR_EFER_SCE)) {
987 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
989 selector = (env->star >> 32) & 0xffff;
990 if (env->hflags & HF_LMA_MASK) {
991 int code64;
993 env->regs[R_ECX] = env->eip + next_eip_addend;
994 env->regs[11] = cpu_compute_eflags(env);
996 code64 = env->hflags & HF_CS64_MASK;
998 env->eflags &= ~env->fmask;
999 cpu_load_eflags(env, env->eflags, 0);
1000 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001 0, 0xffffffff,
1002 DESC_G_MASK | DESC_P_MASK |
1003 DESC_S_MASK |
1004 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1005 DESC_L_MASK);
1006 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1007 0, 0xffffffff,
1008 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1009 DESC_S_MASK |
1010 DESC_W_MASK | DESC_A_MASK);
1011 if (code64) {
1012 env->eip = env->lstar;
1013 } else {
1014 env->eip = env->cstar;
1016 } else {
1017 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1019 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1020 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1021 0, 0xffffffff,
1022 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1023 DESC_S_MASK |
1024 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1025 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1026 0, 0xffffffff,
1027 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1028 DESC_S_MASK |
1029 DESC_W_MASK | DESC_A_MASK);
1030 env->eip = (uint32_t)env->star;
1033 #endif
1034 #endif
1036 #ifdef TARGET_X86_64
1037 void helper_sysret(CPUX86State *env, int dflag)
1039 int cpl, selector;
1041 if (!(env->efer & MSR_EFER_SCE)) {
1042 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1044 cpl = env->hflags & HF_CPL_MASK;
1045 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1046 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1048 selector = (env->star >> 48) & 0xffff;
1049 if (env->hflags & HF_LMA_MASK) {
1050 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1051 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1052 NT_MASK);
1053 if (dflag == 2) {
1054 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_P_MASK |
1057 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1058 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1059 DESC_L_MASK);
1060 env->eip = env->regs[R_ECX];
1061 } else {
1062 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1063 0, 0xffffffff,
1064 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1067 env->eip = (uint32_t)env->regs[R_ECX];
1069 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1070 0, 0xffffffff,
1071 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1072 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073 DESC_W_MASK | DESC_A_MASK);
1074 } else {
1075 env->eflags |= IF_MASK;
1076 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1077 0, 0xffffffff,
1078 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1079 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1081 env->eip = (uint32_t)env->regs[R_ECX];
1082 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1083 0, 0xffffffff,
1084 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1085 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086 DESC_W_MASK | DESC_A_MASK);
1089 #endif
1091 /* real mode interrupt */
1092 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1093 int error_code, unsigned int next_eip)
1095 SegmentCache *dt;
1096 target_ulong ptr, ssp;
1097 int selector;
1098 uint32_t offset, esp;
1099 uint32_t old_cs, old_eip;
1101 /* real mode (simpler!) */
1102 dt = &env->idt;
1103 if (intno * 4 + 3 > dt->limit) {
1104 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1106 ptr = dt->base + intno * 4;
1107 offset = cpu_lduw_kernel(env, ptr);
1108 selector = cpu_lduw_kernel(env, ptr + 2);
1109 esp = env->regs[R_ESP];
1110 ssp = env->segs[R_SS].base;
1111 if (is_int) {
1112 old_eip = next_eip;
1113 } else {
1114 old_eip = env->eip;
1116 old_cs = env->segs[R_CS].selector;
1117 /* XXX: use SS segment size? */
1118 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1119 PUSHW(ssp, esp, 0xffff, old_cs);
1120 PUSHW(ssp, esp, 0xffff, old_eip);
1122 /* update processor state */
1123 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1124 env->eip = offset;
1125 env->segs[R_CS].selector = selector;
1126 env->segs[R_CS].base = (selector << 4);
1127 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1130 #if defined(CONFIG_USER_ONLY)
1131 /* fake user mode interrupt */
1132 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1133 int error_code, target_ulong next_eip)
1135 SegmentCache *dt;
1136 target_ulong ptr;
1137 int dpl, cpl, shift;
1138 uint32_t e2;
1140 dt = &env->idt;
1141 if (env->hflags & HF_LMA_MASK) {
1142 shift = 4;
1143 } else {
1144 shift = 3;
1146 ptr = dt->base + (intno << shift);
1147 e2 = cpu_ldl_kernel(env, ptr + 4);
1149 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1150 cpl = env->hflags & HF_CPL_MASK;
1151 /* check privilege if software int */
1152 if (is_int && dpl < cpl) {
1153 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1156 /* Since we emulate only user space, we cannot do more than
1157 exiting the emulation with the suitable exception and error
1158 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1159 if (is_int || intno == EXCP_SYSCALL) {
1160 env->eip = next_eip;
1164 #else
1166 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1167 int error_code, int is_hw, int rm)
1169 CPUState *cs = CPU(x86_env_get_cpu(env));
1170 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1171 control.event_inj));
1173 if (!(event_inj & SVM_EVTINJ_VALID)) {
1174 int type;
1176 if (is_int) {
1177 type = SVM_EVTINJ_TYPE_SOFT;
1178 } else {
1179 type = SVM_EVTINJ_TYPE_EXEPT;
1181 event_inj = intno | type | SVM_EVTINJ_VALID;
1182 if (!rm && exception_has_error_code(intno)) {
1183 event_inj |= SVM_EVTINJ_VALID_ERR;
1184 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1185 control.event_inj_err),
1186 error_code);
1188 x86_stl_phys(cs,
1189 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1190 event_inj);
1193 #endif
1196 * Begin execution of an interruption. is_int is TRUE if coming from
1197 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1198 * instruction. It is only relevant if is_int is TRUE.
1200 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1201 int error_code, target_ulong next_eip, int is_hw)
1203 CPUX86State *env = &cpu->env;
1205 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1206 if ((env->cr[0] & CR0_PE_MASK)) {
1207 static int count;
1209 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1210 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1211 count, intno, error_code, is_int,
1212 env->hflags & HF_CPL_MASK,
1213 env->segs[R_CS].selector, env->eip,
1214 (int)env->segs[R_CS].base + env->eip,
1215 env->segs[R_SS].selector, env->regs[R_ESP]);
1216 if (intno == 0x0e) {
1217 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1218 } else {
1219 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1221 qemu_log("\n");
1222 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1223 #if 0
1225 int i;
1226 target_ulong ptr;
1228 qemu_log(" code=");
1229 ptr = env->segs[R_CS].base + env->eip;
1230 for (i = 0; i < 16; i++) {
1231 qemu_log(" %02x", ldub(ptr + i));
1233 qemu_log("\n");
1235 #endif
1236 count++;
1239 if (env->cr[0] & CR0_PE_MASK) {
1240 #if !defined(CONFIG_USER_ONLY)
1241 if (env->hflags & HF_SVMI_MASK) {
1242 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1244 #endif
1245 #ifdef TARGET_X86_64
1246 if (env->hflags & HF_LMA_MASK) {
1247 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1248 } else
1249 #endif
1251 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1252 is_hw);
1254 } else {
1255 #if !defined(CONFIG_USER_ONLY)
1256 if (env->hflags & HF_SVMI_MASK) {
1257 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1259 #endif
1260 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1263 #if !defined(CONFIG_USER_ONLY)
1264 if (env->hflags & HF_SVMI_MASK) {
1265 CPUState *cs = CPU(cpu);
1266 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1267 offsetof(struct vmcb,
1268 control.event_inj));
1270 x86_stl_phys(cs,
1271 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1272 event_inj & ~SVM_EVTINJ_VALID);
1274 #endif
1277 void x86_cpu_do_interrupt(CPUState *cs)
1279 X86CPU *cpu = X86_CPU(cs);
1280 CPUX86State *env = &cpu->env;
1282 #if defined(CONFIG_USER_ONLY)
1283 /* if user mode only, we simulate a fake exception
1284 which will be handled outside the cpu execution
1285 loop */
1286 do_interrupt_user(env, cs->exception_index,
1287 env->exception_is_int,
1288 env->error_code,
1289 env->exception_next_eip);
1290 /* successfully delivered */
1291 env->old_exception = -1;
1292 #else
1293 /* simulate a real cpu exception. On i386, it can
1294 trigger new exceptions, but we do not handle
1295 double or triple faults yet. */
1296 do_interrupt_all(cpu, cs->exception_index,
1297 env->exception_is_int,
1298 env->error_code,
1299 env->exception_next_eip, 0);
1300 /* successfully delivered */
1301 env->old_exception = -1;
1302 #endif
1305 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1307 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1310 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1312 X86CPU *cpu = X86_CPU(cs);
1313 CPUX86State *env = &cpu->env;
1314 bool ret = false;
1316 #if !defined(CONFIG_USER_ONLY)
1317 if (interrupt_request & CPU_INTERRUPT_POLL) {
1318 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1319 apic_poll_irq(cpu->apic_state);
1320 /* Don't process multiple interrupt requests in a single call.
1321 This is required to make icount-driven execution deterministic. */
1322 return true;
1324 #endif
1325 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1326 do_cpu_sipi(cpu);
1327 } else if (env->hflags2 & HF2_GIF_MASK) {
1328 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1329 !(env->hflags & HF_SMM_MASK)) {
1330 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1331 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1332 do_smm_enter(cpu);
1333 ret = true;
1334 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1335 !(env->hflags2 & HF2_NMI_MASK)) {
1336 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1337 env->hflags2 |= HF2_NMI_MASK;
1338 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1339 ret = true;
1340 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1341 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1342 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1343 ret = true;
1344 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1345 (((env->hflags2 & HF2_VINTR_MASK) &&
1346 (env->hflags2 & HF2_HIF_MASK)) ||
1347 (!(env->hflags2 & HF2_VINTR_MASK) &&
1348 (env->eflags & IF_MASK &&
1349 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1350 int intno;
1351 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1352 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1353 CPU_INTERRUPT_VIRQ);
1354 intno = cpu_get_pic_interrupt(env);
1355 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1356 "Servicing hardware INT=0x%02x\n", intno);
1357 do_interrupt_x86_hardirq(env, intno, 1);
1358 /* ensure that no TB jump will be modified as
1359 the program flow was changed */
1360 ret = true;
1361 #if !defined(CONFIG_USER_ONLY)
1362 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1363 (env->eflags & IF_MASK) &&
1364 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1365 int intno;
1366 /* FIXME: this should respect TPR */
1367 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1368 intno = x86_ldl_phys(cs, env->vm_vmcb
1369 + offsetof(struct vmcb, control.int_vector));
1370 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1371 "Servicing virtual hardware INT=0x%02x\n", intno);
1372 do_interrupt_x86_hardirq(env, intno, 1);
1373 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1374 ret = true;
1375 #endif
1379 return ret;
1382 void helper_enter_level(CPUX86State *env, int level, int data32,
1383 target_ulong t1)
1385 target_ulong ssp;
1386 uint32_t esp_mask, esp, ebp;
1388 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1389 ssp = env->segs[R_SS].base;
1390 ebp = env->regs[R_EBP];
1391 esp = env->regs[R_ESP];
1392 if (data32) {
1393 /* 32 bit */
1394 esp -= 4;
1395 while (--level) {
1396 esp -= 4;
1397 ebp -= 4;
1398 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1399 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1400 GETPC()),
1401 GETPC());
1403 esp -= 4;
1404 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1405 } else {
1406 /* 16 bit */
1407 esp -= 2;
1408 while (--level) {
1409 esp -= 2;
1410 ebp -= 2;
1411 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1412 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1413 GETPC()),
1414 GETPC());
1416 esp -= 2;
1417 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1421 #ifdef TARGET_X86_64
1422 void helper_enter64_level(CPUX86State *env, int level, int data64,
1423 target_ulong t1)
1425 target_ulong esp, ebp;
1427 ebp = env->regs[R_EBP];
1428 esp = env->regs[R_ESP];
1430 if (data64) {
1431 /* 64 bit */
1432 esp -= 8;
1433 while (--level) {
1434 esp -= 8;
1435 ebp -= 8;
1436 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1437 GETPC());
1439 esp -= 8;
1440 cpu_stq_data_ra(env, esp, t1, GETPC());
1441 } else {
1442 /* 16 bit */
1443 esp -= 2;
1444 while (--level) {
1445 esp -= 2;
1446 ebp -= 2;
1447 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1448 GETPC());
1450 esp -= 2;
1451 cpu_stw_data_ra(env, esp, t1, GETPC());
1454 #endif
1456 void helper_lldt(CPUX86State *env, int selector)
1458 SegmentCache *dt;
1459 uint32_t e1, e2;
1460 int index, entry_limit;
1461 target_ulong ptr;
1463 selector &= 0xffff;
1464 if ((selector & 0xfffc) == 0) {
1465 /* XXX: NULL selector case: invalid LDT */
1466 env->ldt.base = 0;
1467 env->ldt.limit = 0;
1468 } else {
1469 if (selector & 0x4) {
1470 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1472 dt = &env->gdt;
1473 index = selector & ~7;
1474 #ifdef TARGET_X86_64
1475 if (env->hflags & HF_LMA_MASK) {
1476 entry_limit = 15;
1477 } else
1478 #endif
1480 entry_limit = 7;
1482 if ((index + entry_limit) > dt->limit) {
1483 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1485 ptr = dt->base + index;
1486 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1487 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1488 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1489 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1491 if (!(e2 & DESC_P_MASK)) {
1492 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1494 #ifdef TARGET_X86_64
1495 if (env->hflags & HF_LMA_MASK) {
1496 uint32_t e3;
1498 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1499 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1500 env->ldt.base |= (target_ulong)e3 << 32;
1501 } else
1502 #endif
1504 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1507 env->ldt.selector = selector;
1510 void helper_ltr(CPUX86State *env, int selector)
1512 SegmentCache *dt;
1513 uint32_t e1, e2;
1514 int index, type, entry_limit;
1515 target_ulong ptr;
1517 selector &= 0xffff;
1518 if ((selector & 0xfffc) == 0) {
1519 /* NULL selector case: invalid TR */
1520 env->tr.base = 0;
1521 env->tr.limit = 0;
1522 env->tr.flags = 0;
1523 } else {
1524 if (selector & 0x4) {
1525 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1527 dt = &env->gdt;
1528 index = selector & ~7;
1529 #ifdef TARGET_X86_64
1530 if (env->hflags & HF_LMA_MASK) {
1531 entry_limit = 15;
1532 } else
1533 #endif
1535 entry_limit = 7;
1537 if ((index + entry_limit) > dt->limit) {
1538 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1540 ptr = dt->base + index;
1541 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1542 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1543 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1544 if ((e2 & DESC_S_MASK) ||
1545 (type != 1 && type != 9)) {
1546 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1548 if (!(e2 & DESC_P_MASK)) {
1549 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1551 #ifdef TARGET_X86_64
1552 if (env->hflags & HF_LMA_MASK) {
1553 uint32_t e3, e4;
1555 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1556 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1557 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1558 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1560 load_seg_cache_raw_dt(&env->tr, e1, e2);
1561 env->tr.base |= (target_ulong)e3 << 32;
1562 } else
1563 #endif
1565 load_seg_cache_raw_dt(&env->tr, e1, e2);
1567 e2 |= DESC_TSS_BUSY_MASK;
1568 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1570 env->tr.selector = selector;
1573 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1574 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1576 uint32_t e1, e2;
1577 int cpl, dpl, rpl;
1578 SegmentCache *dt;
1579 int index;
1580 target_ulong ptr;
1582 selector &= 0xffff;
1583 cpl = env->hflags & HF_CPL_MASK;
1584 if ((selector & 0xfffc) == 0) {
1585 /* null selector case */
1586 if (seg_reg == R_SS
1587 #ifdef TARGET_X86_64
1588 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1589 #endif
1591 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1593 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1594 } else {
1596 if (selector & 0x4) {
1597 dt = &env->ldt;
1598 } else {
1599 dt = &env->gdt;
1601 index = selector & ~7;
1602 if ((index + 7) > dt->limit) {
1603 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1605 ptr = dt->base + index;
1606 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1607 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1609 if (!(e2 & DESC_S_MASK)) {
1610 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1612 rpl = selector & 3;
1613 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1614 if (seg_reg == R_SS) {
1615 /* must be writable segment */
1616 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1617 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1619 if (rpl != cpl || dpl != cpl) {
1620 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1622 } else {
1623 /* must be readable segment */
1624 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1625 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1628 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1629 /* if not conforming code, test rights */
1630 if (dpl < cpl || dpl < rpl) {
1631 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1636 if (!(e2 & DESC_P_MASK)) {
1637 if (seg_reg == R_SS) {
1638 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1639 } else {
1640 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1644 /* set the access bit if not already set */
1645 if (!(e2 & DESC_A_MASK)) {
1646 e2 |= DESC_A_MASK;
1647 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1650 cpu_x86_load_seg_cache(env, seg_reg, selector,
1651 get_seg_base(e1, e2),
1652 get_seg_limit(e1, e2),
1653 e2);
1654 #if 0
1655 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1656 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1657 #endif
1661 /* protected mode jump */
1662 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1663 target_ulong next_eip)
1665 int gate_cs, type;
1666 uint32_t e1, e2, cpl, dpl, rpl, limit;
1668 if ((new_cs & 0xfffc) == 0) {
1669 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1671 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1672 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1674 cpl = env->hflags & HF_CPL_MASK;
1675 if (e2 & DESC_S_MASK) {
1676 if (!(e2 & DESC_CS_MASK)) {
1677 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1679 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1680 if (e2 & DESC_C_MASK) {
1681 /* conforming code segment */
1682 if (dpl > cpl) {
1683 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1685 } else {
1686 /* non conforming code segment */
1687 rpl = new_cs & 3;
1688 if (rpl > cpl) {
1689 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1691 if (dpl != cpl) {
1692 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1695 if (!(e2 & DESC_P_MASK)) {
1696 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1698 limit = get_seg_limit(e1, e2);
1699 if (new_eip > limit &&
1700 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1701 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1703 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1704 get_seg_base(e1, e2), limit, e2);
1705 env->eip = new_eip;
1706 } else {
1707 /* jump to call or task gate */
1708 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1709 rpl = new_cs & 3;
1710 cpl = env->hflags & HF_CPL_MASK;
1711 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1712 switch (type) {
1713 case 1: /* 286 TSS */
1714 case 9: /* 386 TSS */
1715 case 5: /* task gate */
1716 if (dpl < cpl || dpl < rpl) {
1717 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1719 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1720 break;
1721 case 4: /* 286 call gate */
1722 case 12: /* 386 call gate */
1723 if ((dpl < cpl) || (dpl < rpl)) {
1724 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1726 if (!(e2 & DESC_P_MASK)) {
1727 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1729 gate_cs = e1 >> 16;
1730 new_eip = (e1 & 0xffff);
1731 if (type == 12) {
1732 new_eip |= (e2 & 0xffff0000);
1734 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1735 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1737 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1738 /* must be code segment */
1739 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1740 (DESC_S_MASK | DESC_CS_MASK))) {
1741 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1743 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1744 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1745 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1747 if (!(e2 & DESC_P_MASK)) {
1748 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1750 limit = get_seg_limit(e1, e2);
1751 if (new_eip > limit) {
1752 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1754 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1755 get_seg_base(e1, e2), limit, e2);
1756 env->eip = new_eip;
1757 break;
1758 default:
1759 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1760 break;
1765 /* real mode call */
1766 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1767 int shift, int next_eip)
1769 int new_eip;
1770 uint32_t esp, esp_mask;
1771 target_ulong ssp;
1773 new_eip = new_eip1;
1774 esp = env->regs[R_ESP];
1775 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1776 ssp = env->segs[R_SS].base;
1777 if (shift) {
1778 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1779 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1780 } else {
1781 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1782 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1785 SET_ESP(esp, esp_mask);
1786 env->eip = new_eip;
1787 env->segs[R_CS].selector = new_cs;
1788 env->segs[R_CS].base = (new_cs << 4);
1791 /* protected mode call */
1792 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1793 int shift, target_ulong next_eip)
1795 int new_stack, i;
1796 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1797 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1798 uint32_t val, limit, old_sp_mask;
1799 target_ulong ssp, old_ssp;
1801 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1802 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1803 if ((new_cs & 0xfffc) == 0) {
1804 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1806 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1807 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1809 cpl = env->hflags & HF_CPL_MASK;
1810 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1811 if (e2 & DESC_S_MASK) {
1812 if (!(e2 & DESC_CS_MASK)) {
1813 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1815 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1816 if (e2 & DESC_C_MASK) {
1817 /* conforming code segment */
1818 if (dpl > cpl) {
1819 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1821 } else {
1822 /* non conforming code segment */
1823 rpl = new_cs & 3;
1824 if (rpl > cpl) {
1825 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1827 if (dpl != cpl) {
1828 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1831 if (!(e2 & DESC_P_MASK)) {
1832 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1835 #ifdef TARGET_X86_64
1836 /* XXX: check 16/32 bit cases in long mode */
1837 if (shift == 2) {
1838 target_ulong rsp;
1840 /* 64 bit case */
1841 rsp = env->regs[R_ESP];
1842 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1843 PUSHQ_RA(rsp, next_eip, GETPC());
1844 /* from this point, not restartable */
1845 env->regs[R_ESP] = rsp;
1846 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1847 get_seg_base(e1, e2),
1848 get_seg_limit(e1, e2), e2);
1849 env->eip = new_eip;
1850 } else
1851 #endif
1853 sp = env->regs[R_ESP];
1854 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1855 ssp = env->segs[R_SS].base;
1856 if (shift) {
1857 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1858 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1859 } else {
1860 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1861 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1864 limit = get_seg_limit(e1, e2);
1865 if (new_eip > limit) {
1866 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1868 /* from this point, not restartable */
1869 SET_ESP(sp, sp_mask);
1870 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1871 get_seg_base(e1, e2), limit, e2);
1872 env->eip = new_eip;
1874 } else {
1875 /* check gate type */
1876 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1877 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1878 rpl = new_cs & 3;
1879 switch (type) {
1880 case 1: /* available 286 TSS */
1881 case 9: /* available 386 TSS */
1882 case 5: /* task gate */
1883 if (dpl < cpl || dpl < rpl) {
1884 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1886 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1887 return;
1888 case 4: /* 286 call gate */
1889 case 12: /* 386 call gate */
1890 break;
1891 default:
1892 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1893 break;
1895 shift = type >> 3;
1897 if (dpl < cpl || dpl < rpl) {
1898 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1900 /* check valid bit */
1901 if (!(e2 & DESC_P_MASK)) {
1902 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1904 selector = e1 >> 16;
1905 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1906 param_count = e2 & 0x1f;
1907 if ((selector & 0xfffc) == 0) {
1908 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1911 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1912 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1914 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1915 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1917 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1918 if (dpl > cpl) {
1919 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1921 if (!(e2 & DESC_P_MASK)) {
1922 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1925 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1926 /* to inner privilege */
1927 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1928 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1929 TARGET_FMT_lx "\n", ss, sp, param_count,
1930 env->regs[R_ESP]);
1931 if ((ss & 0xfffc) == 0) {
1932 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1934 if ((ss & 3) != dpl) {
1935 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1937 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1938 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1940 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1941 if (ss_dpl != dpl) {
1942 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1944 if (!(ss_e2 & DESC_S_MASK) ||
1945 (ss_e2 & DESC_CS_MASK) ||
1946 !(ss_e2 & DESC_W_MASK)) {
1947 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1949 if (!(ss_e2 & DESC_P_MASK)) {
1950 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1953 /* push_size = ((param_count * 2) + 8) << shift; */
1955 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1956 old_ssp = env->segs[R_SS].base;
1958 sp_mask = get_sp_mask(ss_e2);
1959 ssp = get_seg_base(ss_e1, ss_e2);
1960 if (shift) {
1961 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1962 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1963 for (i = param_count - 1; i >= 0; i--) {
1964 val = cpu_ldl_kernel_ra(env, old_ssp +
1965 ((env->regs[R_ESP] + i * 4) &
1966 old_sp_mask), GETPC());
1967 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1969 } else {
1970 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1971 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1972 for (i = param_count - 1; i >= 0; i--) {
1973 val = cpu_lduw_kernel_ra(env, old_ssp +
1974 ((env->regs[R_ESP] + i * 2) &
1975 old_sp_mask), GETPC());
1976 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1979 new_stack = 1;
1980 } else {
1981 /* to same privilege */
1982 sp = env->regs[R_ESP];
1983 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1984 ssp = env->segs[R_SS].base;
1985 /* push_size = (4 << shift); */
1986 new_stack = 0;
1989 if (shift) {
1990 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1991 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1992 } else {
1993 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1994 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1997 /* from this point, not restartable */
1999 if (new_stack) {
2000 ss = (ss & ~3) | dpl;
2001 cpu_x86_load_seg_cache(env, R_SS, ss,
2002 ssp,
2003 get_seg_limit(ss_e1, ss_e2),
2004 ss_e2);
2007 selector = (selector & ~3) | dpl;
2008 cpu_x86_load_seg_cache(env, R_CS, selector,
2009 get_seg_base(e1, e2),
2010 get_seg_limit(e1, e2),
2011 e2);
2012 SET_ESP(sp, sp_mask);
2013 env->eip = offset;
2017 /* real and vm86 mode iret */
2018 void helper_iret_real(CPUX86State *env, int shift)
2020 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2021 target_ulong ssp;
2022 int eflags_mask;
2024 sp_mask = 0xffff; /* XXXX: use SS segment size? */
2025 sp = env->regs[R_ESP];
2026 ssp = env->segs[R_SS].base;
2027 if (shift == 1) {
2028 /* 32 bits */
2029 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2030 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2031 new_cs &= 0xffff;
2032 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2033 } else {
2034 /* 16 bits */
2035 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2036 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2037 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2039 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2040 env->segs[R_CS].selector = new_cs;
2041 env->segs[R_CS].base = (new_cs << 4);
2042 env->eip = new_eip;
2043 if (env->eflags & VM_MASK) {
2044 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2045 NT_MASK;
2046 } else {
2047 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2048 RF_MASK | NT_MASK;
2050 if (shift == 0) {
2051 eflags_mask &= 0xffff;
2053 cpu_load_eflags(env, new_eflags, eflags_mask);
2054 env->hflags2 &= ~HF2_NMI_MASK;
2057 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2059 int dpl;
2060 uint32_t e2;
2062 /* XXX: on x86_64, we do not want to nullify FS and GS because
2063 they may still contain a valid base. I would be interested to
2064 know how a real x86_64 CPU behaves */
2065 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2066 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2067 return;
2070 e2 = env->segs[seg_reg].flags;
2071 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2072 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2073 /* data or non conforming code segment */
2074 if (dpl < cpl) {
2075 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2080 /* protected mode iret */
2081 static inline void helper_ret_protected(CPUX86State *env, int shift,
2082 int is_iret, int addend,
2083 uintptr_t retaddr)
2085 uint32_t new_cs, new_eflags, new_ss;
2086 uint32_t new_es, new_ds, new_fs, new_gs;
2087 uint32_t e1, e2, ss_e1, ss_e2;
2088 int cpl, dpl, rpl, eflags_mask, iopl;
2089 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2091 #ifdef TARGET_X86_64
2092 if (shift == 2) {
2093 sp_mask = -1;
2094 } else
2095 #endif
2097 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2099 sp = env->regs[R_ESP];
2100 ssp = env->segs[R_SS].base;
2101 new_eflags = 0; /* avoid warning */
2102 #ifdef TARGET_X86_64
2103 if (shift == 2) {
2104 POPQ_RA(sp, new_eip, retaddr);
2105 POPQ_RA(sp, new_cs, retaddr);
2106 new_cs &= 0xffff;
2107 if (is_iret) {
2108 POPQ_RA(sp, new_eflags, retaddr);
2110 } else
2111 #endif
2113 if (shift == 1) {
2114 /* 32 bits */
2115 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2116 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2117 new_cs &= 0xffff;
2118 if (is_iret) {
2119 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2120 if (new_eflags & VM_MASK) {
2121 goto return_to_vm86;
2124 } else {
2125 /* 16 bits */
2126 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2127 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2128 if (is_iret) {
2129 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2133 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2134 new_cs, new_eip, shift, addend);
2135 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2136 if ((new_cs & 0xfffc) == 0) {
2137 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2139 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2140 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2142 if (!(e2 & DESC_S_MASK) ||
2143 !(e2 & DESC_CS_MASK)) {
2144 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2146 cpl = env->hflags & HF_CPL_MASK;
2147 rpl = new_cs & 3;
2148 if (rpl < cpl) {
2149 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2151 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2152 if (e2 & DESC_C_MASK) {
2153 if (dpl > rpl) {
2154 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2156 } else {
2157 if (dpl != rpl) {
2158 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2161 if (!(e2 & DESC_P_MASK)) {
2162 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2165 sp += addend;
2166 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2167 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2168 /* return to same privilege level */
2169 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2170 get_seg_base(e1, e2),
2171 get_seg_limit(e1, e2),
2172 e2);
2173 } else {
2174 /* return to different privilege level */
2175 #ifdef TARGET_X86_64
2176 if (shift == 2) {
2177 POPQ_RA(sp, new_esp, retaddr);
2178 POPQ_RA(sp, new_ss, retaddr);
2179 new_ss &= 0xffff;
2180 } else
2181 #endif
2183 if (shift == 1) {
2184 /* 32 bits */
2185 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2186 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2187 new_ss &= 0xffff;
2188 } else {
2189 /* 16 bits */
2190 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2191 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2194 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2195 new_ss, new_esp);
2196 if ((new_ss & 0xfffc) == 0) {
2197 #ifdef TARGET_X86_64
2198 /* NULL ss is allowed in long mode if cpl != 3 */
2199 /* XXX: test CS64? */
2200 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2201 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2202 0, 0xffffffff,
2203 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2204 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2205 DESC_W_MASK | DESC_A_MASK);
2206 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2207 } else
2208 #endif
2210 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2212 } else {
2213 if ((new_ss & 3) != rpl) {
2214 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2216 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2217 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2219 if (!(ss_e2 & DESC_S_MASK) ||
2220 (ss_e2 & DESC_CS_MASK) ||
2221 !(ss_e2 & DESC_W_MASK)) {
2222 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2224 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2225 if (dpl != rpl) {
2226 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2228 if (!(ss_e2 & DESC_P_MASK)) {
2229 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2231 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2232 get_seg_base(ss_e1, ss_e2),
2233 get_seg_limit(ss_e1, ss_e2),
2234 ss_e2);
2237 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2238 get_seg_base(e1, e2),
2239 get_seg_limit(e1, e2),
2240 e2);
2241 sp = new_esp;
2242 #ifdef TARGET_X86_64
2243 if (env->hflags & HF_CS64_MASK) {
2244 sp_mask = -1;
2245 } else
2246 #endif
2248 sp_mask = get_sp_mask(ss_e2);
2251 /* validate data segments */
2252 validate_seg(env, R_ES, rpl);
2253 validate_seg(env, R_DS, rpl);
2254 validate_seg(env, R_FS, rpl);
2255 validate_seg(env, R_GS, rpl);
2257 sp += addend;
2259 SET_ESP(sp, sp_mask);
2260 env->eip = new_eip;
2261 if (is_iret) {
2262 /* NOTE: 'cpl' is the _old_ CPL */
2263 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2264 if (cpl == 0) {
2265 eflags_mask |= IOPL_MASK;
2267 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2268 if (cpl <= iopl) {
2269 eflags_mask |= IF_MASK;
2271 if (shift == 0) {
2272 eflags_mask &= 0xffff;
2274 cpu_load_eflags(env, new_eflags, eflags_mask);
2276 return;
2278 return_to_vm86:
2279 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2280 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2281 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2282 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2283 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2284 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2286 /* modify processor state */
2287 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2288 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2289 VIP_MASK);
2290 load_seg_vm(env, R_CS, new_cs & 0xffff);
2291 load_seg_vm(env, R_SS, new_ss & 0xffff);
2292 load_seg_vm(env, R_ES, new_es & 0xffff);
2293 load_seg_vm(env, R_DS, new_ds & 0xffff);
2294 load_seg_vm(env, R_FS, new_fs & 0xffff);
2295 load_seg_vm(env, R_GS, new_gs & 0xffff);
2297 env->eip = new_eip & 0xffff;
2298 env->regs[R_ESP] = new_esp;
2301 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2303 int tss_selector, type;
2304 uint32_t e1, e2;
2306 /* specific case for TSS */
2307 if (env->eflags & NT_MASK) {
2308 #ifdef TARGET_X86_64
2309 if (env->hflags & HF_LMA_MASK) {
2310 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2312 #endif
2313 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2314 if (tss_selector & 4) {
2315 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2317 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2318 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2320 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2321 /* NOTE: we check both segment and busy TSS */
2322 if (type != 3) {
2323 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2325 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2326 } else {
2327 helper_ret_protected(env, shift, 1, 0, GETPC());
2329 env->hflags2 &= ~HF2_NMI_MASK;
2332 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2334 helper_ret_protected(env, shift, 0, addend, GETPC());
2337 void helper_sysenter(CPUX86State *env)
2339 if (env->sysenter_cs == 0) {
2340 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2342 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2344 #ifdef TARGET_X86_64
2345 if (env->hflags & HF_LMA_MASK) {
2346 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2347 0, 0xffffffff,
2348 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2349 DESC_S_MASK |
2350 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2351 DESC_L_MASK);
2352 } else
2353 #endif
2355 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2356 0, 0xffffffff,
2357 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2358 DESC_S_MASK |
2359 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2361 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2362 0, 0xffffffff,
2363 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2364 DESC_S_MASK |
2365 DESC_W_MASK | DESC_A_MASK);
2366 env->regs[R_ESP] = env->sysenter_esp;
2367 env->eip = env->sysenter_eip;
2370 void helper_sysexit(CPUX86State *env, int dflag)
2372 int cpl;
2374 cpl = env->hflags & HF_CPL_MASK;
2375 if (env->sysenter_cs == 0 || cpl != 0) {
2376 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2378 #ifdef TARGET_X86_64
2379 if (dflag == 2) {
2380 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2381 3, 0, 0xffffffff,
2382 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2383 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2384 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2385 DESC_L_MASK);
2386 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2387 3, 0, 0xffffffff,
2388 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2389 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2390 DESC_W_MASK | DESC_A_MASK);
2391 } else
2392 #endif
2394 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2395 3, 0, 0xffffffff,
2396 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2397 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2398 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2399 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2400 3, 0, 0xffffffff,
2401 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2402 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2403 DESC_W_MASK | DESC_A_MASK);
2405 env->regs[R_ESP] = env->regs[R_ECX];
2406 env->eip = env->regs[R_EDX];
2409 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2411 unsigned int limit;
2412 uint32_t e1, e2, eflags, selector;
2413 int rpl, dpl, cpl, type;
2415 selector = selector1 & 0xffff;
2416 eflags = cpu_cc_compute_all(env, CC_OP);
2417 if ((selector & 0xfffc) == 0) {
2418 goto fail;
2420 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2421 goto fail;
2423 rpl = selector & 3;
2424 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2425 cpl = env->hflags & HF_CPL_MASK;
2426 if (e2 & DESC_S_MASK) {
2427 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2428 /* conforming */
2429 } else {
2430 if (dpl < cpl || dpl < rpl) {
2431 goto fail;
2434 } else {
2435 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2436 switch (type) {
2437 case 1:
2438 case 2:
2439 case 3:
2440 case 9:
2441 case 11:
2442 break;
2443 default:
2444 goto fail;
2446 if (dpl < cpl || dpl < rpl) {
2447 fail:
2448 CC_SRC = eflags & ~CC_Z;
2449 return 0;
2452 limit = get_seg_limit(e1, e2);
2453 CC_SRC = eflags | CC_Z;
2454 return limit;
2457 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2459 uint32_t e1, e2, eflags, selector;
2460 int rpl, dpl, cpl, type;
2462 selector = selector1 & 0xffff;
2463 eflags = cpu_cc_compute_all(env, CC_OP);
2464 if ((selector & 0xfffc) == 0) {
2465 goto fail;
2467 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2468 goto fail;
2470 rpl = selector & 3;
2471 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2472 cpl = env->hflags & HF_CPL_MASK;
2473 if (e2 & DESC_S_MASK) {
2474 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2475 /* conforming */
2476 } else {
2477 if (dpl < cpl || dpl < rpl) {
2478 goto fail;
2481 } else {
2482 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2483 switch (type) {
2484 case 1:
2485 case 2:
2486 case 3:
2487 case 4:
2488 case 5:
2489 case 9:
2490 case 11:
2491 case 12:
2492 break;
2493 default:
2494 goto fail;
2496 if (dpl < cpl || dpl < rpl) {
2497 fail:
2498 CC_SRC = eflags & ~CC_Z;
2499 return 0;
2502 CC_SRC = eflags | CC_Z;
2503 return e2 & 0x00f0ff00;
2506 void helper_verr(CPUX86State *env, target_ulong selector1)
2508 uint32_t e1, e2, eflags, selector;
2509 int rpl, dpl, cpl;
2511 selector = selector1 & 0xffff;
2512 eflags = cpu_cc_compute_all(env, CC_OP);
2513 if ((selector & 0xfffc) == 0) {
2514 goto fail;
2516 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2517 goto fail;
2519 if (!(e2 & DESC_S_MASK)) {
2520 goto fail;
2522 rpl = selector & 3;
2523 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2524 cpl = env->hflags & HF_CPL_MASK;
2525 if (e2 & DESC_CS_MASK) {
2526 if (!(e2 & DESC_R_MASK)) {
2527 goto fail;
2529 if (!(e2 & DESC_C_MASK)) {
2530 if (dpl < cpl || dpl < rpl) {
2531 goto fail;
2534 } else {
2535 if (dpl < cpl || dpl < rpl) {
2536 fail:
2537 CC_SRC = eflags & ~CC_Z;
2538 return;
2541 CC_SRC = eflags | CC_Z;
2544 void helper_verw(CPUX86State *env, target_ulong selector1)
2546 uint32_t e1, e2, eflags, selector;
2547 int rpl, dpl, cpl;
2549 selector = selector1 & 0xffff;
2550 eflags = cpu_cc_compute_all(env, CC_OP);
2551 if ((selector & 0xfffc) == 0) {
2552 goto fail;
2554 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2555 goto fail;
2557 if (!(e2 & DESC_S_MASK)) {
2558 goto fail;
2560 rpl = selector & 3;
2561 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2562 cpl = env->hflags & HF_CPL_MASK;
2563 if (e2 & DESC_CS_MASK) {
2564 goto fail;
2565 } else {
2566 if (dpl < cpl || dpl < rpl) {
2567 goto fail;
2569 if (!(e2 & DESC_W_MASK)) {
2570 fail:
2571 CC_SRC = eflags & ~CC_Z;
2572 return;
2575 CC_SRC = eflags | CC_Z;
2578 #if defined(CONFIG_USER_ONLY)
2579 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2581 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2582 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2583 selector &= 0xffff;
2584 cpu_x86_load_seg_cache(env, seg_reg, selector,
2585 (selector << 4), 0xffff,
2586 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2587 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2588 } else {
2589 helper_load_seg(env, seg_reg, selector);
2592 #endif
2594 /* check if Port I/O is allowed in TSS */
2595 static inline void check_io(CPUX86State *env, int addr, int size,
2596 uintptr_t retaddr)
2598 int io_offset, val, mask;
2600 /* TSS must be a valid 32 bit one */
2601 if (!(env->tr.flags & DESC_P_MASK) ||
2602 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2603 env->tr.limit < 103) {
2604 goto fail;
2606 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2607 io_offset += (addr >> 3);
2608 /* Note: the check needs two bytes */
2609 if ((io_offset + 1) > env->tr.limit) {
2610 goto fail;
2612 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2613 val >>= (addr & 7);
2614 mask = (1 << size) - 1;
2615 /* all bits must be zero to allow the I/O */
2616 if ((val & mask) != 0) {
2617 fail:
2618 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2622 void helper_check_iob(CPUX86State *env, uint32_t t0)
2624 check_io(env, t0, 1, GETPC());
2627 void helper_check_iow(CPUX86State *env, uint32_t t0)
2629 check_io(env, t0, 2, GETPC());
2632 void helper_check_iol(CPUX86State *env, uint32_t t0)
2634 check_io(env, t0, 4, GETPC());