i386: interrupt poll processing
[qemu/ar7.git] / target-i386 / seg_helper.c
blob1cbe559366843a581bf3061f9a5c3c9f348546a0
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
26 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
35 #endif
37 #ifdef CONFIG_USER_ONLY
38 #define MEMSUFFIX _kernel
39 #define DATA_SIZE 1
40 #include "exec/cpu_ldst_useronly_template.h"
42 #define DATA_SIZE 2
43 #include "exec/cpu_ldst_useronly_template.h"
45 #define DATA_SIZE 4
46 #include "exec/cpu_ldst_useronly_template.h"
48 #define DATA_SIZE 8
49 #include "exec/cpu_ldst_useronly_template.h"
50 #undef MEMSUFFIX
51 #else
52 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53 #define MEMSUFFIX _kernel
54 #define DATA_SIZE 1
55 #include "exec/cpu_ldst_template.h"
57 #define DATA_SIZE 2
58 #include "exec/cpu_ldst_template.h"
60 #define DATA_SIZE 4
61 #include "exec/cpu_ldst_template.h"
63 #define DATA_SIZE 8
64 #include "exec/cpu_ldst_template.h"
65 #undef CPU_MMU_INDEX
66 #undef MEMSUFFIX
67 #endif
69 /* return non zero if error */
70 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
71 uint32_t *e2_ptr, int selector,
72 uintptr_t retaddr)
74 SegmentCache *dt;
75 int index;
76 target_ulong ptr;
78 if (selector & 0x4) {
79 dt = &env->ldt;
80 } else {
81 dt = &env->gdt;
83 index = selector & ~7;
84 if ((index + 7) > dt->limit) {
85 return -1;
87 ptr = dt->base + index;
88 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
89 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
90 return 0;
93 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
94 uint32_t *e2_ptr, int selector)
96 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
99 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
101 unsigned int limit;
103 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
104 if (e2 & DESC_G_MASK) {
105 limit = (limit << 12) | 0xfff;
107 return limit;
110 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
112 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
115 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
116 uint32_t e2)
118 sc->base = get_seg_base(e1, e2);
119 sc->limit = get_seg_limit(e1, e2);
120 sc->flags = e2;
123 /* init the segment cache in vm86 mode. */
124 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
126 selector &= 0xffff;
128 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
129 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
130 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
133 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
134 uint32_t *esp_ptr, int dpl,
135 uintptr_t retaddr)
137 X86CPU *cpu = x86_env_get_cpu(env);
138 int type, index, shift;
140 #if 0
142 int i;
143 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
144 for (i = 0; i < env->tr.limit; i++) {
145 printf("%02x ", env->tr.base[i]);
146 if ((i & 7) == 7) {
147 printf("\n");
150 printf("\n");
152 #endif
154 if (!(env->tr.flags & DESC_P_MASK)) {
155 cpu_abort(CPU(cpu), "invalid tss");
157 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
158 if ((type & 7) != 1) {
159 cpu_abort(CPU(cpu), "invalid tss type");
161 shift = type >> 3;
162 index = (dpl * 4 + 2) << shift;
163 if (index + (4 << shift) - 1 > env->tr.limit) {
164 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
166 if (shift == 0) {
167 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
168 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
169 } else {
170 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
175 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
176 uintptr_t retaddr)
178 uint32_t e1, e2;
179 int rpl, dpl;
181 if ((selector & 0xfffc) != 0) {
182 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
183 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
185 if (!(e2 & DESC_S_MASK)) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
188 rpl = selector & 3;
189 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
190 if (seg_reg == R_CS) {
191 if (!(e2 & DESC_CS_MASK)) {
192 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
194 if (dpl != rpl) {
195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
197 } else if (seg_reg == R_SS) {
198 /* SS must be writable data */
199 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
200 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
202 if (dpl != cpl || dpl != rpl) {
203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
205 } else {
206 /* not readable code */
207 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
208 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
210 /* if data or non conforming code, checks the rights */
211 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
212 if (dpl < cpl || dpl < rpl) {
213 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
217 if (!(e2 & DESC_P_MASK)) {
218 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
220 cpu_x86_load_seg_cache(env, seg_reg, selector,
221 get_seg_base(e1, e2),
222 get_seg_limit(e1, e2),
223 e2);
224 } else {
225 if (seg_reg == R_SS || seg_reg == R_CS) {
226 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
231 #define SWITCH_TSS_JMP 0
232 #define SWITCH_TSS_IRET 1
233 #define SWITCH_TSS_CALL 2
235 /* XXX: restore CPU state in registers (PowerPC case) */
236 static void switch_tss_ra(CPUX86State *env, int tss_selector,
237 uint32_t e1, uint32_t e2, int source,
238 uint32_t next_eip, uintptr_t retaddr)
240 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
241 target_ulong tss_base;
242 uint32_t new_regs[8], new_segs[6];
243 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
244 uint32_t old_eflags, eflags_mask;
245 SegmentCache *dt;
246 int index;
247 target_ulong ptr;
249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
250 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
251 source);
253 /* if task gate, we read the TSS segment and we load it */
254 if (type == 5) {
255 if (!(e2 & DESC_P_MASK)) {
256 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
258 tss_selector = e1 >> 16;
259 if (tss_selector & 4) {
260 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
262 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
263 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
265 if (e2 & DESC_S_MASK) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
268 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
269 if ((type & 7) != 1) {
270 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
274 if (!(e2 & DESC_P_MASK)) {
275 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
278 if (type & 8) {
279 tss_limit_max = 103;
280 } else {
281 tss_limit_max = 43;
283 tss_limit = get_seg_limit(e1, e2);
284 tss_base = get_seg_base(e1, e2);
285 if ((tss_selector & 4) != 0 ||
286 tss_limit < tss_limit_max) {
287 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
289 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
290 if (old_type & 8) {
291 old_tss_limit_max = 103;
292 } else {
293 old_tss_limit_max = 43;
296 /* read all the registers from the new TSS */
297 if (type & 8) {
298 /* 32 bit */
299 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
300 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
301 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
302 for (i = 0; i < 8; i++) {
303 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
304 retaddr);
306 for (i = 0; i < 6; i++) {
307 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
308 retaddr);
310 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
311 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
312 } else {
313 /* 16 bit */
314 new_cr3 = 0;
315 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
316 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
317 for (i = 0; i < 8; i++) {
318 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
319 retaddr) | 0xffff0000;
321 for (i = 0; i < 4; i++) {
322 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
323 retaddr);
325 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
326 new_segs[R_FS] = 0;
327 new_segs[R_GS] = 0;
328 new_trap = 0;
330 /* XXX: avoid a compiler warning, see
331 http://support.amd.com/us/Processor_TechDocs/24593.pdf
332 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
333 (void)new_trap;
335 /* NOTE: we must avoid memory exceptions during the task switch,
336 so we make dummy accesses before */
337 /* XXX: it can still fail in some cases, so a bigger hack is
338 necessary to valid the TLB after having done the accesses */
340 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
341 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
342 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
343 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
345 /* clear busy bit (it is restartable) */
346 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
347 target_ulong ptr;
348 uint32_t e2;
350 ptr = env->gdt.base + (env->tr.selector & ~7);
351 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
352 e2 &= ~DESC_TSS_BUSY_MASK;
353 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
355 old_eflags = cpu_compute_eflags(env);
356 if (source == SWITCH_TSS_IRET) {
357 old_eflags &= ~NT_MASK;
360 /* save the current state in the old TSS */
361 if (type & 8) {
362 /* 32 bit */
363 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
364 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
365 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
373 for (i = 0; i < 6; i++) {
374 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
375 env->segs[i].selector, retaddr);
377 } else {
378 /* 16 bit */
379 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
380 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
381 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
389 for (i = 0; i < 4; i++) {
390 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
391 env->segs[i].selector, retaddr);
395 /* now if an exception occurs, it will occurs in the next task
396 context */
398 if (source == SWITCH_TSS_CALL) {
399 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
400 new_eflags |= NT_MASK;
403 /* set busy bit */
404 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
405 target_ulong ptr;
406 uint32_t e2;
408 ptr = env->gdt.base + (tss_selector & ~7);
409 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
410 e2 |= DESC_TSS_BUSY_MASK;
411 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
414 /* set the new CPU state */
415 /* from this point, any exception which occurs can give problems */
416 env->cr[0] |= CR0_TS_MASK;
417 env->hflags |= HF_TS_MASK;
418 env->tr.selector = tss_selector;
419 env->tr.base = tss_base;
420 env->tr.limit = tss_limit;
421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
424 cpu_x86_update_cr3(env, new_cr3);
427 /* load all registers without an exception, then reload them with
428 possible exception */
429 env->eip = new_eip;
430 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
432 if (!(type & 8)) {
433 eflags_mask &= 0xffff;
435 cpu_load_eflags(env, new_eflags, eflags_mask);
436 /* XXX: what to do in 16 bit case? */
437 env->regs[R_EAX] = new_regs[0];
438 env->regs[R_ECX] = new_regs[1];
439 env->regs[R_EDX] = new_regs[2];
440 env->regs[R_EBX] = new_regs[3];
441 env->regs[R_ESP] = new_regs[4];
442 env->regs[R_EBP] = new_regs[5];
443 env->regs[R_ESI] = new_regs[6];
444 env->regs[R_EDI] = new_regs[7];
445 if (new_eflags & VM_MASK) {
446 for (i = 0; i < 6; i++) {
447 load_seg_vm(env, i, new_segs[i]);
449 } else {
450 /* first just selectors as the rest may trigger exceptions */
451 for (i = 0; i < 6; i++) {
452 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
456 env->ldt.selector = new_ldt & ~4;
457 env->ldt.base = 0;
458 env->ldt.limit = 0;
459 env->ldt.flags = 0;
461 /* load the LDT */
462 if (new_ldt & 4) {
463 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
466 if ((new_ldt & 0xfffc) != 0) {
467 dt = &env->gdt;
468 index = new_ldt & ~7;
469 if ((index + 7) > dt->limit) {
470 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
472 ptr = dt->base + index;
473 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
474 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
475 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
476 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
478 if (!(e2 & DESC_P_MASK)) {
479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 int cpl = new_segs[R_CS] & 3;
487 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
488 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
489 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
490 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
491 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
492 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
495 /* check that env->eip is in the CS segment limits */
496 if (new_eip > env->segs[R_CS].limit) {
497 /* XXX: different exception if CALL? */
498 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
501 #ifndef CONFIG_USER_ONLY
502 /* reset local breakpoints */
503 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
504 for (i = 0; i < DR7_MAX_BP; i++) {
505 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
506 !hw_global_breakpoint_enabled(env->dr[7], i)) {
507 hw_breakpoint_remove(env, i);
510 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
512 #endif
515 static void switch_tss(CPUX86State *env, int tss_selector,
516 uint32_t e1, uint32_t e2, int source,
517 uint32_t next_eip)
519 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
522 static inline unsigned int get_sp_mask(unsigned int e2)
524 if (e2 & DESC_B_MASK) {
525 return 0xffffffff;
526 } else {
527 return 0xffff;
531 static int exception_has_error_code(int intno)
533 switch (intno) {
534 case 8:
535 case 10:
536 case 11:
537 case 12:
538 case 13:
539 case 14:
540 case 17:
541 return 1;
543 return 0;
546 #ifdef TARGET_X86_64
547 #define SET_ESP(val, sp_mask) \
548 do { \
549 if ((sp_mask) == 0xffff) { \
550 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
551 ((val) & 0xffff); \
552 } else if ((sp_mask) == 0xffffffffLL) { \
553 env->regs[R_ESP] = (uint32_t)(val); \
554 } else { \
555 env->regs[R_ESP] = (val); \
557 } while (0)
558 #else
559 #define SET_ESP(val, sp_mask) \
560 do { \
561 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
562 ((val) & (sp_mask)); \
563 } while (0)
564 #endif
566 /* in 64-bit machines, this can overflow. So this segment addition macro
567 * can be used to trim the value to 32-bit whenever needed */
568 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
570 /* XXX: add a is_user flag to have proper security support */
571 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
573 sp -= 2; \
574 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
577 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
579 sp -= 4; \
580 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
583 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
585 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
586 sp += 2; \
589 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
591 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
592 sp += 4; \
595 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
596 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
597 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
598 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
600 /* protected mode interrupt */
601 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
602 int error_code, unsigned int next_eip,
603 int is_hw)
605 SegmentCache *dt;
606 target_ulong ptr, ssp;
607 int type, dpl, selector, ss_dpl, cpl;
608 int has_error_code, new_stack, shift;
609 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
610 uint32_t old_eip, sp_mask;
611 int vm86 = env->eflags & VM_MASK;
613 has_error_code = 0;
614 if (!is_int && !is_hw) {
615 has_error_code = exception_has_error_code(intno);
617 if (is_int) {
618 old_eip = next_eip;
619 } else {
620 old_eip = env->eip;
623 dt = &env->idt;
624 if (intno * 8 + 7 > dt->limit) {
625 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
627 ptr = dt->base + intno * 8;
628 e1 = cpu_ldl_kernel(env, ptr);
629 e2 = cpu_ldl_kernel(env, ptr + 4);
630 /* check gate type */
631 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
632 switch (type) {
633 case 5: /* task gate */
634 /* must do that check here to return the correct error code */
635 if (!(e2 & DESC_P_MASK)) {
636 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
638 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
639 if (has_error_code) {
640 int type;
641 uint32_t mask;
643 /* push the error code */
644 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
645 shift = type >> 3;
646 if (env->segs[R_SS].flags & DESC_B_MASK) {
647 mask = 0xffffffff;
648 } else {
649 mask = 0xffff;
651 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
652 ssp = env->segs[R_SS].base + esp;
653 if (shift) {
654 cpu_stl_kernel(env, ssp, error_code);
655 } else {
656 cpu_stw_kernel(env, ssp, error_code);
658 SET_ESP(esp, mask);
660 return;
661 case 6: /* 286 interrupt gate */
662 case 7: /* 286 trap gate */
663 case 14: /* 386 interrupt gate */
664 case 15: /* 386 trap gate */
665 break;
666 default:
667 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
668 break;
670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
671 cpl = env->hflags & HF_CPL_MASK;
672 /* check privilege if software int */
673 if (is_int && dpl < cpl) {
674 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
676 /* check valid bit */
677 if (!(e2 & DESC_P_MASK)) {
678 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
680 selector = e1 >> 16;
681 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
682 if ((selector & 0xfffc) == 0) {
683 raise_exception_err(env, EXCP0D_GPF, 0);
685 if (load_segment(env, &e1, &e2, selector) != 0) {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
688 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
689 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
691 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
692 if (dpl > cpl) {
693 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
695 if (!(e2 & DESC_P_MASK)) {
696 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
698 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
699 /* to inner privilege */
700 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
701 if ((ss & 0xfffc) == 0) {
702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
704 if ((ss & 3) != dpl) {
705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
707 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
708 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
710 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
711 if (ss_dpl != dpl) {
712 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
714 if (!(ss_e2 & DESC_S_MASK) ||
715 (ss_e2 & DESC_CS_MASK) ||
716 !(ss_e2 & DESC_W_MASK)) {
717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
719 if (!(ss_e2 & DESC_P_MASK)) {
720 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
722 new_stack = 1;
723 sp_mask = get_sp_mask(ss_e2);
724 ssp = get_seg_base(ss_e1, ss_e2);
725 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
726 /* to same privilege */
727 if (vm86) {
728 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
730 new_stack = 0;
731 sp_mask = get_sp_mask(env->segs[R_SS].flags);
732 ssp = env->segs[R_SS].base;
733 esp = env->regs[R_ESP];
734 dpl = cpl;
735 } else {
736 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
737 new_stack = 0; /* avoid warning */
738 sp_mask = 0; /* avoid warning */
739 ssp = 0; /* avoid warning */
740 esp = 0; /* avoid warning */
743 shift = type >> 3;
745 #if 0
746 /* XXX: check that enough room is available */
747 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
748 if (vm86) {
749 push_size += 8;
751 push_size <<= shift;
752 #endif
753 if (shift == 1) {
754 if (new_stack) {
755 if (vm86) {
756 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
757 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
758 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
759 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
761 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
762 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
764 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
765 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
766 PUSHL(ssp, esp, sp_mask, old_eip);
767 if (has_error_code) {
768 PUSHL(ssp, esp, sp_mask, error_code);
770 } else {
771 if (new_stack) {
772 if (vm86) {
773 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
774 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
775 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
776 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
778 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
779 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
781 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
782 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
783 PUSHW(ssp, esp, sp_mask, old_eip);
784 if (has_error_code) {
785 PUSHW(ssp, esp, sp_mask, error_code);
789 /* interrupt gate clear IF mask */
790 if ((type & 1) == 0) {
791 env->eflags &= ~IF_MASK;
793 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
795 if (new_stack) {
796 if (vm86) {
797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
802 ss = (ss & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_SS, ss,
804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
806 SET_ESP(esp, sp_mask);
808 selector = (selector & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_CS, selector,
810 get_seg_base(e1, e2),
811 get_seg_limit(e1, e2),
812 e2);
813 env->eip = offset;
816 #ifdef TARGET_X86_64
818 #define PUSHQ_RA(sp, val, ra) \
820 sp -= 8; \
821 cpu_stq_kernel_ra(env, sp, (val), ra); \
824 #define POPQ_RA(sp, val, ra) \
826 val = cpu_ldq_kernel_ra(env, sp, ra); \
827 sp += 8; \
830 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
831 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
833 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
835 X86CPU *cpu = x86_env_get_cpu(env);
836 int index;
838 #if 0
839 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
840 env->tr.base, env->tr.limit);
841 #endif
843 if (!(env->tr.flags & DESC_P_MASK)) {
844 cpu_abort(CPU(cpu), "invalid tss");
846 index = 8 * level + 4;
847 if ((index + 7) > env->tr.limit) {
848 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
850 return cpu_ldq_kernel(env, env->tr.base + index);
853 /* 64 bit interrupt */
854 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
855 int error_code, target_ulong next_eip, int is_hw)
857 SegmentCache *dt;
858 target_ulong ptr;
859 int type, dpl, selector, cpl, ist;
860 int has_error_code, new_stack;
861 uint32_t e1, e2, e3, ss;
862 target_ulong old_eip, esp, offset;
864 has_error_code = 0;
865 if (!is_int && !is_hw) {
866 has_error_code = exception_has_error_code(intno);
868 if (is_int) {
869 old_eip = next_eip;
870 } else {
871 old_eip = env->eip;
874 dt = &env->idt;
875 if (intno * 16 + 15 > dt->limit) {
876 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
878 ptr = dt->base + intno * 16;
879 e1 = cpu_ldl_kernel(env, ptr);
880 e2 = cpu_ldl_kernel(env, ptr + 4);
881 e3 = cpu_ldl_kernel(env, ptr + 8);
882 /* check gate type */
883 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884 switch (type) {
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
887 break;
888 default:
889 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
890 break;
892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893 cpl = env->hflags & HF_CPL_MASK;
894 /* check privilege if software int */
895 if (is_int && dpl < cpl) {
896 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
898 /* check valid bit */
899 if (!(e2 & DESC_P_MASK)) {
900 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
902 selector = e1 >> 16;
903 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
904 ist = e2 & 7;
905 if ((selector & 0xfffc) == 0) {
906 raise_exception_err(env, EXCP0D_GPF, 0);
909 if (load_segment(env, &e1, &e2, selector) != 0) {
910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
912 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
913 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
916 if (dpl > cpl) {
917 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
919 if (!(e2 & DESC_P_MASK)) {
920 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
922 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
923 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
925 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
926 /* to inner privilege */
927 new_stack = 1;
928 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
929 ss = 0;
930 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
931 /* to same privilege */
932 if (env->eflags & VM_MASK) {
933 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
935 new_stack = 0;
936 esp = env->regs[R_ESP];
937 dpl = cpl;
938 } else {
939 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
940 new_stack = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
943 esp &= ~0xfLL; /* align stack */
945 PUSHQ(esp, env->segs[R_SS].selector);
946 PUSHQ(esp, env->regs[R_ESP]);
947 PUSHQ(esp, cpu_compute_eflags(env));
948 PUSHQ(esp, env->segs[R_CS].selector);
949 PUSHQ(esp, old_eip);
950 if (has_error_code) {
951 PUSHQ(esp, error_code);
954 /* interrupt gate clear IF mask */
955 if ((type & 1) == 0) {
956 env->eflags &= ~IF_MASK;
958 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
960 if (new_stack) {
961 ss = 0 | dpl;
962 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
964 env->regs[R_ESP] = esp;
966 selector = (selector & ~3) | dpl;
967 cpu_x86_load_seg_cache(env, R_CS, selector,
968 get_seg_base(e1, e2),
969 get_seg_limit(e1, e2),
970 e2);
971 env->eip = offset;
973 #endif
975 #ifdef TARGET_X86_64
976 #if defined(CONFIG_USER_ONLY)
977 void helper_syscall(CPUX86State *env, int next_eip_addend)
979 CPUState *cs = CPU(x86_env_get_cpu(env));
981 cs->exception_index = EXCP_SYSCALL;
982 env->exception_next_eip = env->eip + next_eip_addend;
983 cpu_loop_exit(cs);
985 #else
986 void helper_syscall(CPUX86State *env, int next_eip_addend)
988 int selector;
990 if (!(env->efer & MSR_EFER_SCE)) {
991 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
993 selector = (env->star >> 32) & 0xffff;
994 if (env->hflags & HF_LMA_MASK) {
995 int code64;
997 env->regs[R_ECX] = env->eip + next_eip_addend;
998 env->regs[11] = cpu_compute_eflags(env);
1000 code64 = env->hflags & HF_CS64_MASK;
1002 env->eflags &= ~env->fmask;
1003 cpu_load_eflags(env, env->eflags, 0);
1004 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1005 0, 0xffffffff,
1006 DESC_G_MASK | DESC_P_MASK |
1007 DESC_S_MASK |
1008 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1009 DESC_L_MASK);
1010 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1011 0, 0xffffffff,
1012 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1013 DESC_S_MASK |
1014 DESC_W_MASK | DESC_A_MASK);
1015 if (code64) {
1016 env->eip = env->lstar;
1017 } else {
1018 env->eip = env->cstar;
1020 } else {
1021 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
1023 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1024 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1025 0, 0xffffffff,
1026 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1027 DESC_S_MASK |
1028 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1029 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1030 0, 0xffffffff,
1031 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032 DESC_S_MASK |
1033 DESC_W_MASK | DESC_A_MASK);
1034 env->eip = (uint32_t)env->star;
1037 #endif
1038 #endif
1040 #ifdef TARGET_X86_64
1041 void helper_sysret(CPUX86State *env, int dflag)
1043 int cpl, selector;
1045 if (!(env->efer & MSR_EFER_SCE)) {
1046 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1048 cpl = env->hflags & HF_CPL_MASK;
1049 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1050 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1052 selector = (env->star >> 48) & 0xffff;
1053 if (env->hflags & HF_LMA_MASK) {
1054 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1055 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1056 NT_MASK);
1057 if (dflag == 2) {
1058 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1059 0, 0xffffffff,
1060 DESC_G_MASK | DESC_P_MASK |
1061 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1062 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1063 DESC_L_MASK);
1064 env->eip = env->regs[R_ECX];
1065 } else {
1066 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1067 0, 0xffffffff,
1068 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1071 env->eip = (uint32_t)env->regs[R_ECX];
1073 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1074 0, 0xffffffff,
1075 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1076 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1077 DESC_W_MASK | DESC_A_MASK);
1078 } else {
1079 env->eflags |= IF_MASK;
1080 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1081 0, 0xffffffff,
1082 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1083 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1084 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1085 env->eip = (uint32_t)env->regs[R_ECX];
1086 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1087 0, 0xffffffff,
1088 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1089 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1090 DESC_W_MASK | DESC_A_MASK);
1093 #endif
1095 /* real mode interrupt */
1096 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1097 int error_code, unsigned int next_eip)
1099 SegmentCache *dt;
1100 target_ulong ptr, ssp;
1101 int selector;
1102 uint32_t offset, esp;
1103 uint32_t old_cs, old_eip;
1105 /* real mode (simpler!) */
1106 dt = &env->idt;
1107 if (intno * 4 + 3 > dt->limit) {
1108 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1110 ptr = dt->base + intno * 4;
1111 offset = cpu_lduw_kernel(env, ptr);
1112 selector = cpu_lduw_kernel(env, ptr + 2);
1113 esp = env->regs[R_ESP];
1114 ssp = env->segs[R_SS].base;
1115 if (is_int) {
1116 old_eip = next_eip;
1117 } else {
1118 old_eip = env->eip;
1120 old_cs = env->segs[R_CS].selector;
1121 /* XXX: use SS segment size? */
1122 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1123 PUSHW(ssp, esp, 0xffff, old_cs);
1124 PUSHW(ssp, esp, 0xffff, old_eip);
1126 /* update processor state */
1127 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1128 env->eip = offset;
1129 env->segs[R_CS].selector = selector;
1130 env->segs[R_CS].base = (selector << 4);
1131 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1134 #if defined(CONFIG_USER_ONLY)
1135 /* fake user mode interrupt */
1136 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1137 int error_code, target_ulong next_eip)
1139 SegmentCache *dt;
1140 target_ulong ptr;
1141 int dpl, cpl, shift;
1142 uint32_t e2;
1144 dt = &env->idt;
1145 if (env->hflags & HF_LMA_MASK) {
1146 shift = 4;
1147 } else {
1148 shift = 3;
1150 ptr = dt->base + (intno << shift);
1151 e2 = cpu_ldl_kernel(env, ptr + 4);
1153 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1154 cpl = env->hflags & HF_CPL_MASK;
1155 /* check privilege if software int */
1156 if (is_int && dpl < cpl) {
1157 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1160 /* Since we emulate only user space, we cannot do more than
1161 exiting the emulation with the suitable exception and error
1162 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1163 if (is_int || intno == EXCP_SYSCALL) {
1164 env->eip = next_eip;
1168 #else
1170 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1171 int error_code, int is_hw, int rm)
1173 CPUState *cs = CPU(x86_env_get_cpu(env));
1174 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1175 control.event_inj));
1177 if (!(event_inj & SVM_EVTINJ_VALID)) {
1178 int type;
1180 if (is_int) {
1181 type = SVM_EVTINJ_TYPE_SOFT;
1182 } else {
1183 type = SVM_EVTINJ_TYPE_EXEPT;
1185 event_inj = intno | type | SVM_EVTINJ_VALID;
1186 if (!rm && exception_has_error_code(intno)) {
1187 event_inj |= SVM_EVTINJ_VALID_ERR;
1188 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1189 control.event_inj_err),
1190 error_code);
1192 x86_stl_phys(cs,
1193 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1194 event_inj);
1197 #endif
1200 * Begin execution of an interruption. is_int is TRUE if coming from
1201 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1202 * instruction. It is only relevant if is_int is TRUE.
1204 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1205 int error_code, target_ulong next_eip, int is_hw)
1207 CPUX86State *env = &cpu->env;
1209 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1210 if ((env->cr[0] & CR0_PE_MASK)) {
1211 static int count;
1213 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1214 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1215 count, intno, error_code, is_int,
1216 env->hflags & HF_CPL_MASK,
1217 env->segs[R_CS].selector, env->eip,
1218 (int)env->segs[R_CS].base + env->eip,
1219 env->segs[R_SS].selector, env->regs[R_ESP]);
1220 if (intno == 0x0e) {
1221 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1222 } else {
1223 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1225 qemu_log("\n");
1226 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1227 #if 0
1229 int i;
1230 target_ulong ptr;
1232 qemu_log(" code=");
1233 ptr = env->segs[R_CS].base + env->eip;
1234 for (i = 0; i < 16; i++) {
1235 qemu_log(" %02x", ldub(ptr + i));
1237 qemu_log("\n");
1239 #endif
1240 count++;
1243 if (env->cr[0] & CR0_PE_MASK) {
1244 #if !defined(CONFIG_USER_ONLY)
1245 if (env->hflags & HF_SVMI_MASK) {
1246 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1248 #endif
1249 #ifdef TARGET_X86_64
1250 if (env->hflags & HF_LMA_MASK) {
1251 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1252 } else
1253 #endif
1255 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1256 is_hw);
1258 } else {
1259 #if !defined(CONFIG_USER_ONLY)
1260 if (env->hflags & HF_SVMI_MASK) {
1261 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1263 #endif
1264 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1267 #if !defined(CONFIG_USER_ONLY)
1268 if (env->hflags & HF_SVMI_MASK) {
1269 CPUState *cs = CPU(cpu);
1270 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1271 offsetof(struct vmcb,
1272 control.event_inj));
1274 x86_stl_phys(cs,
1275 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1276 event_inj & ~SVM_EVTINJ_VALID);
1278 #endif
1281 void x86_cpu_do_interrupt(CPUState *cs)
1283 X86CPU *cpu = X86_CPU(cs);
1284 CPUX86State *env = &cpu->env;
1286 #if defined(CONFIG_USER_ONLY)
1287 /* if user mode only, we simulate a fake exception
1288 which will be handled outside the cpu execution
1289 loop */
1290 do_interrupt_user(env, cs->exception_index,
1291 env->exception_is_int,
1292 env->error_code,
1293 env->exception_next_eip);
1294 /* successfully delivered */
1295 env->old_exception = -1;
1296 #else
1297 /* simulate a real cpu exception. On i386, it can
1298 trigger new exceptions, but we do not handle
1299 double or triple faults yet. */
1300 do_interrupt_all(cpu, cs->exception_index,
1301 env->exception_is_int,
1302 env->error_code,
1303 env->exception_next_eip, 0);
1304 /* successfully delivered */
1305 env->old_exception = -1;
1306 #endif
1309 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1311 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1314 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1316 X86CPU *cpu = X86_CPU(cs);
1317 CPUX86State *env = &cpu->env;
1318 bool ret = false;
1320 #if !defined(CONFIG_USER_ONLY)
1321 if (interrupt_request & CPU_INTERRUPT_POLL) {
1322 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1323 apic_poll_irq(cpu->apic_state);
1324 /* Don't process multiple interrupt requests in a single call.
1325 This is required to make icount-driven execution deterministic. */
1326 return true;
1328 #endif
1329 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1330 do_cpu_sipi(cpu);
1331 } else if (env->hflags2 & HF2_GIF_MASK) {
1332 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1333 !(env->hflags & HF_SMM_MASK)) {
1334 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1335 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1336 do_smm_enter(cpu);
1337 ret = true;
1338 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1339 !(env->hflags2 & HF2_NMI_MASK)) {
1340 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1341 env->hflags2 |= HF2_NMI_MASK;
1342 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1343 ret = true;
1344 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1345 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1346 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1347 ret = true;
1348 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1349 (((env->hflags2 & HF2_VINTR_MASK) &&
1350 (env->hflags2 & HF2_HIF_MASK)) ||
1351 (!(env->hflags2 & HF2_VINTR_MASK) &&
1352 (env->eflags & IF_MASK &&
1353 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1354 int intno;
1355 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1356 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1357 CPU_INTERRUPT_VIRQ);
1358 intno = cpu_get_pic_interrupt(env);
1359 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1360 "Servicing hardware INT=0x%02x\n", intno);
1361 do_interrupt_x86_hardirq(env, intno, 1);
1362 /* ensure that no TB jump will be modified as
1363 the program flow was changed */
1364 ret = true;
1365 #if !defined(CONFIG_USER_ONLY)
1366 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1367 (env->eflags & IF_MASK) &&
1368 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1369 int intno;
1370 /* FIXME: this should respect TPR */
1371 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1372 intno = x86_ldl_phys(cs, env->vm_vmcb
1373 + offsetof(struct vmcb, control.int_vector));
1374 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1375 "Servicing virtual hardware INT=0x%02x\n", intno);
1376 do_interrupt_x86_hardirq(env, intno, 1);
1377 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1378 ret = true;
1379 #endif
1383 return ret;
1386 void helper_enter_level(CPUX86State *env, int level, int data32,
1387 target_ulong t1)
1389 target_ulong ssp;
1390 uint32_t esp_mask, esp, ebp;
1392 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1393 ssp = env->segs[R_SS].base;
1394 ebp = env->regs[R_EBP];
1395 esp = env->regs[R_ESP];
1396 if (data32) {
1397 /* 32 bit */
1398 esp -= 4;
1399 while (--level) {
1400 esp -= 4;
1401 ebp -= 4;
1402 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1403 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1404 GETPC()),
1405 GETPC());
1407 esp -= 4;
1408 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1409 } else {
1410 /* 16 bit */
1411 esp -= 2;
1412 while (--level) {
1413 esp -= 2;
1414 ebp -= 2;
1415 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1416 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1417 GETPC()),
1418 GETPC());
1420 esp -= 2;
1421 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
1425 #ifdef TARGET_X86_64
1426 void helper_enter64_level(CPUX86State *env, int level, int data64,
1427 target_ulong t1)
1429 target_ulong esp, ebp;
1431 ebp = env->regs[R_EBP];
1432 esp = env->regs[R_ESP];
1434 if (data64) {
1435 /* 64 bit */
1436 esp -= 8;
1437 while (--level) {
1438 esp -= 8;
1439 ebp -= 8;
1440 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1441 GETPC());
1443 esp -= 8;
1444 cpu_stq_data_ra(env, esp, t1, GETPC());
1445 } else {
1446 /* 16 bit */
1447 esp -= 2;
1448 while (--level) {
1449 esp -= 2;
1450 ebp -= 2;
1451 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1452 GETPC());
1454 esp -= 2;
1455 cpu_stw_data_ra(env, esp, t1, GETPC());
1458 #endif
1460 void helper_lldt(CPUX86State *env, int selector)
1462 SegmentCache *dt;
1463 uint32_t e1, e2;
1464 int index, entry_limit;
1465 target_ulong ptr;
1467 selector &= 0xffff;
1468 if ((selector & 0xfffc) == 0) {
1469 /* XXX: NULL selector case: invalid LDT */
1470 env->ldt.base = 0;
1471 env->ldt.limit = 0;
1472 } else {
1473 if (selector & 0x4) {
1474 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1476 dt = &env->gdt;
1477 index = selector & ~7;
1478 #ifdef TARGET_X86_64
1479 if (env->hflags & HF_LMA_MASK) {
1480 entry_limit = 15;
1481 } else
1482 #endif
1484 entry_limit = 7;
1486 if ((index + entry_limit) > dt->limit) {
1487 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1489 ptr = dt->base + index;
1490 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1491 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1492 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1493 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1495 if (!(e2 & DESC_P_MASK)) {
1496 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1498 #ifdef TARGET_X86_64
1499 if (env->hflags & HF_LMA_MASK) {
1500 uint32_t e3;
1502 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1503 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1504 env->ldt.base |= (target_ulong)e3 << 32;
1505 } else
1506 #endif
1508 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1511 env->ldt.selector = selector;
1514 void helper_ltr(CPUX86State *env, int selector)
1516 SegmentCache *dt;
1517 uint32_t e1, e2;
1518 int index, type, entry_limit;
1519 target_ulong ptr;
1521 selector &= 0xffff;
1522 if ((selector & 0xfffc) == 0) {
1523 /* NULL selector case: invalid TR */
1524 env->tr.base = 0;
1525 env->tr.limit = 0;
1526 env->tr.flags = 0;
1527 } else {
1528 if (selector & 0x4) {
1529 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1531 dt = &env->gdt;
1532 index = selector & ~7;
1533 #ifdef TARGET_X86_64
1534 if (env->hflags & HF_LMA_MASK) {
1535 entry_limit = 15;
1536 } else
1537 #endif
1539 entry_limit = 7;
1541 if ((index + entry_limit) > dt->limit) {
1542 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1544 ptr = dt->base + index;
1545 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1546 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1547 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1548 if ((e2 & DESC_S_MASK) ||
1549 (type != 1 && type != 9)) {
1550 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1552 if (!(e2 & DESC_P_MASK)) {
1553 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1555 #ifdef TARGET_X86_64
1556 if (env->hflags & HF_LMA_MASK) {
1557 uint32_t e3, e4;
1559 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1560 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1561 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1562 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1564 load_seg_cache_raw_dt(&env->tr, e1, e2);
1565 env->tr.base |= (target_ulong)e3 << 32;
1566 } else
1567 #endif
1569 load_seg_cache_raw_dt(&env->tr, e1, e2);
1571 e2 |= DESC_TSS_BUSY_MASK;
1572 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1574 env->tr.selector = selector;
1577 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1578 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1580 uint32_t e1, e2;
1581 int cpl, dpl, rpl;
1582 SegmentCache *dt;
1583 int index;
1584 target_ulong ptr;
1586 selector &= 0xffff;
1587 cpl = env->hflags & HF_CPL_MASK;
1588 if ((selector & 0xfffc) == 0) {
1589 /* null selector case */
1590 if (seg_reg == R_SS
1591 #ifdef TARGET_X86_64
1592 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1593 #endif
1595 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1597 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1598 } else {
1600 if (selector & 0x4) {
1601 dt = &env->ldt;
1602 } else {
1603 dt = &env->gdt;
1605 index = selector & ~7;
1606 if ((index + 7) > dt->limit) {
1607 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1609 ptr = dt->base + index;
1610 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1611 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1613 if (!(e2 & DESC_S_MASK)) {
1614 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1616 rpl = selector & 3;
1617 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1618 if (seg_reg == R_SS) {
1619 /* must be writable segment */
1620 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1621 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1623 if (rpl != cpl || dpl != cpl) {
1624 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1626 } else {
1627 /* must be readable segment */
1628 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1629 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1632 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1633 /* if not conforming code, test rights */
1634 if (dpl < cpl || dpl < rpl) {
1635 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1640 if (!(e2 & DESC_P_MASK)) {
1641 if (seg_reg == R_SS) {
1642 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1643 } else {
1644 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1648 /* set the access bit if not already set */
1649 if (!(e2 & DESC_A_MASK)) {
1650 e2 |= DESC_A_MASK;
1651 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1654 cpu_x86_load_seg_cache(env, seg_reg, selector,
1655 get_seg_base(e1, e2),
1656 get_seg_limit(e1, e2),
1657 e2);
1658 #if 0
1659 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1660 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1661 #endif
1665 /* protected mode jump */
1666 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1667 target_ulong next_eip)
1669 int gate_cs, type;
1670 uint32_t e1, e2, cpl, dpl, rpl, limit;
1672 if ((new_cs & 0xfffc) == 0) {
1673 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1675 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1676 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1678 cpl = env->hflags & HF_CPL_MASK;
1679 if (e2 & DESC_S_MASK) {
1680 if (!(e2 & DESC_CS_MASK)) {
1681 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1683 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1684 if (e2 & DESC_C_MASK) {
1685 /* conforming code segment */
1686 if (dpl > cpl) {
1687 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1689 } else {
1690 /* non conforming code segment */
1691 rpl = new_cs & 3;
1692 if (rpl > cpl) {
1693 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1695 if (dpl != cpl) {
1696 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1699 if (!(e2 & DESC_P_MASK)) {
1700 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1702 limit = get_seg_limit(e1, e2);
1703 if (new_eip > limit &&
1704 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1705 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1707 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1708 get_seg_base(e1, e2), limit, e2);
1709 env->eip = new_eip;
1710 } else {
1711 /* jump to call or task gate */
1712 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1713 rpl = new_cs & 3;
1714 cpl = env->hflags & HF_CPL_MASK;
1715 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1716 switch (type) {
1717 case 1: /* 286 TSS */
1718 case 9: /* 386 TSS */
1719 case 5: /* task gate */
1720 if (dpl < cpl || dpl < rpl) {
1721 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1723 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1724 break;
1725 case 4: /* 286 call gate */
1726 case 12: /* 386 call gate */
1727 if ((dpl < cpl) || (dpl < rpl)) {
1728 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1730 if (!(e2 & DESC_P_MASK)) {
1731 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1733 gate_cs = e1 >> 16;
1734 new_eip = (e1 & 0xffff);
1735 if (type == 12) {
1736 new_eip |= (e2 & 0xffff0000);
1738 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1739 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1741 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1742 /* must be code segment */
1743 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1744 (DESC_S_MASK | DESC_CS_MASK))) {
1745 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1747 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1748 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1749 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1751 if (!(e2 & DESC_P_MASK)) {
1752 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1754 limit = get_seg_limit(e1, e2);
1755 if (new_eip > limit) {
1756 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1758 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1759 get_seg_base(e1, e2), limit, e2);
1760 env->eip = new_eip;
1761 break;
1762 default:
1763 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1764 break;
1769 /* real mode call */
1770 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1771 int shift, int next_eip)
1773 int new_eip;
1774 uint32_t esp, esp_mask;
1775 target_ulong ssp;
1777 new_eip = new_eip1;
1778 esp = env->regs[R_ESP];
1779 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1780 ssp = env->segs[R_SS].base;
1781 if (shift) {
1782 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1783 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
1784 } else {
1785 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1786 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
1789 SET_ESP(esp, esp_mask);
1790 env->eip = new_eip;
1791 env->segs[R_CS].selector = new_cs;
1792 env->segs[R_CS].base = (new_cs << 4);
1795 /* protected mode call */
1796 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1797 int shift, target_ulong next_eip)
1799 int new_stack, i;
1800 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1801 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1802 uint32_t val, limit, old_sp_mask;
1803 target_ulong ssp, old_ssp;
1805 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1806 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1807 if ((new_cs & 0xfffc) == 0) {
1808 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1810 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1811 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1813 cpl = env->hflags & HF_CPL_MASK;
1814 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1815 if (e2 & DESC_S_MASK) {
1816 if (!(e2 & DESC_CS_MASK)) {
1817 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1819 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1820 if (e2 & DESC_C_MASK) {
1821 /* conforming code segment */
1822 if (dpl > cpl) {
1823 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1825 } else {
1826 /* non conforming code segment */
1827 rpl = new_cs & 3;
1828 if (rpl > cpl) {
1829 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1831 if (dpl != cpl) {
1832 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1835 if (!(e2 & DESC_P_MASK)) {
1836 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1839 #ifdef TARGET_X86_64
1840 /* XXX: check 16/32 bit cases in long mode */
1841 if (shift == 2) {
1842 target_ulong rsp;
1844 /* 64 bit case */
1845 rsp = env->regs[R_ESP];
1846 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1847 PUSHQ_RA(rsp, next_eip, GETPC());
1848 /* from this point, not restartable */
1849 env->regs[R_ESP] = rsp;
1850 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1851 get_seg_base(e1, e2),
1852 get_seg_limit(e1, e2), e2);
1853 env->eip = new_eip;
1854 } else
1855 #endif
1857 sp = env->regs[R_ESP];
1858 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1859 ssp = env->segs[R_SS].base;
1860 if (shift) {
1861 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1862 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1863 } else {
1864 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1865 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
1868 limit = get_seg_limit(e1, e2);
1869 if (new_eip > limit) {
1870 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1872 /* from this point, not restartable */
1873 SET_ESP(sp, sp_mask);
1874 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1875 get_seg_base(e1, e2), limit, e2);
1876 env->eip = new_eip;
1878 } else {
1879 /* check gate type */
1880 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1881 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1882 rpl = new_cs & 3;
1883 switch (type) {
1884 case 1: /* available 286 TSS */
1885 case 9: /* available 386 TSS */
1886 case 5: /* task gate */
1887 if (dpl < cpl || dpl < rpl) {
1888 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1890 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1891 return;
1892 case 4: /* 286 call gate */
1893 case 12: /* 386 call gate */
1894 break;
1895 default:
1896 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1897 break;
1899 shift = type >> 3;
1901 if (dpl < cpl || dpl < rpl) {
1902 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1904 /* check valid bit */
1905 if (!(e2 & DESC_P_MASK)) {
1906 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1908 selector = e1 >> 16;
1909 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1910 param_count = e2 & 0x1f;
1911 if ((selector & 0xfffc) == 0) {
1912 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1915 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1916 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1918 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1919 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1921 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1922 if (dpl > cpl) {
1923 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1925 if (!(e2 & DESC_P_MASK)) {
1926 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1929 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1930 /* to inner privilege */
1931 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
1932 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1933 TARGET_FMT_lx "\n", ss, sp, param_count,
1934 env->regs[R_ESP]);
1935 if ((ss & 0xfffc) == 0) {
1936 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1938 if ((ss & 3) != dpl) {
1939 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1941 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1942 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1944 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1945 if (ss_dpl != dpl) {
1946 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1948 if (!(ss_e2 & DESC_S_MASK) ||
1949 (ss_e2 & DESC_CS_MASK) ||
1950 !(ss_e2 & DESC_W_MASK)) {
1951 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1953 if (!(ss_e2 & DESC_P_MASK)) {
1954 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1957 /* push_size = ((param_count * 2) + 8) << shift; */
1959 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1960 old_ssp = env->segs[R_SS].base;
1962 sp_mask = get_sp_mask(ss_e2);
1963 ssp = get_seg_base(ss_e1, ss_e2);
1964 if (shift) {
1965 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1966 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1967 for (i = param_count - 1; i >= 0; i--) {
1968 val = cpu_ldl_kernel_ra(env, old_ssp +
1969 ((env->regs[R_ESP] + i * 4) &
1970 old_sp_mask), GETPC());
1971 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
1973 } else {
1974 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1975 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
1976 for (i = param_count - 1; i >= 0; i--) {
1977 val = cpu_lduw_kernel_ra(env, old_ssp +
1978 ((env->regs[R_ESP] + i * 2) &
1979 old_sp_mask), GETPC());
1980 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
1983 new_stack = 1;
1984 } else {
1985 /* to same privilege */
1986 sp = env->regs[R_ESP];
1987 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1988 ssp = env->segs[R_SS].base;
1989 /* push_size = (4 << shift); */
1990 new_stack = 0;
1993 if (shift) {
1994 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1995 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
1996 } else {
1997 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1998 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
2001 /* from this point, not restartable */
2003 if (new_stack) {
2004 ss = (ss & ~3) | dpl;
2005 cpu_x86_load_seg_cache(env, R_SS, ss,
2006 ssp,
2007 get_seg_limit(ss_e1, ss_e2),
2008 ss_e2);
2011 selector = (selector & ~3) | dpl;
2012 cpu_x86_load_seg_cache(env, R_CS, selector,
2013 get_seg_base(e1, e2),
2014 get_seg_limit(e1, e2),
2015 e2);
2016 SET_ESP(sp, sp_mask);
2017 env->eip = offset;
2021 /* real and vm86 mode iret */
2022 void helper_iret_real(CPUX86State *env, int shift)
2024 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2025 target_ulong ssp;
2026 int eflags_mask;
2028 sp_mask = 0xffff; /* XXXX: use SS segment size? */
2029 sp = env->regs[R_ESP];
2030 ssp = env->segs[R_SS].base;
2031 if (shift == 1) {
2032 /* 32 bits */
2033 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2034 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
2035 new_cs &= 0xffff;
2036 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2037 } else {
2038 /* 16 bits */
2039 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2040 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2041 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
2043 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2044 env->segs[R_CS].selector = new_cs;
2045 env->segs[R_CS].base = (new_cs << 4);
2046 env->eip = new_eip;
2047 if (env->eflags & VM_MASK) {
2048 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2049 NT_MASK;
2050 } else {
2051 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2052 RF_MASK | NT_MASK;
2054 if (shift == 0) {
2055 eflags_mask &= 0xffff;
2057 cpu_load_eflags(env, new_eflags, eflags_mask);
2058 env->hflags2 &= ~HF2_NMI_MASK;
2061 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2063 int dpl;
2064 uint32_t e2;
2066 /* XXX: on x86_64, we do not want to nullify FS and GS because
2067 they may still contain a valid base. I would be interested to
2068 know how a real x86_64 CPU behaves */
2069 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2070 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2071 return;
2074 e2 = env->segs[seg_reg].flags;
2075 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2076 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2077 /* data or non conforming code segment */
2078 if (dpl < cpl) {
2079 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2084 /* protected mode iret */
2085 static inline void helper_ret_protected(CPUX86State *env, int shift,
2086 int is_iret, int addend,
2087 uintptr_t retaddr)
2089 uint32_t new_cs, new_eflags, new_ss;
2090 uint32_t new_es, new_ds, new_fs, new_gs;
2091 uint32_t e1, e2, ss_e1, ss_e2;
2092 int cpl, dpl, rpl, eflags_mask, iopl;
2093 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2095 #ifdef TARGET_X86_64
2096 if (shift == 2) {
2097 sp_mask = -1;
2098 } else
2099 #endif
2101 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2103 sp = env->regs[R_ESP];
2104 ssp = env->segs[R_SS].base;
2105 new_eflags = 0; /* avoid warning */
2106 #ifdef TARGET_X86_64
2107 if (shift == 2) {
2108 POPQ_RA(sp, new_eip, retaddr);
2109 POPQ_RA(sp, new_cs, retaddr);
2110 new_cs &= 0xffff;
2111 if (is_iret) {
2112 POPQ_RA(sp, new_eflags, retaddr);
2114 } else
2115 #endif
2117 if (shift == 1) {
2118 /* 32 bits */
2119 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2120 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
2121 new_cs &= 0xffff;
2122 if (is_iret) {
2123 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2124 if (new_eflags & VM_MASK) {
2125 goto return_to_vm86;
2128 } else {
2129 /* 16 bits */
2130 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2131 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
2132 if (is_iret) {
2133 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
2137 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2138 new_cs, new_eip, shift, addend);
2139 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2140 if ((new_cs & 0xfffc) == 0) {
2141 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2143 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2144 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2146 if (!(e2 & DESC_S_MASK) ||
2147 !(e2 & DESC_CS_MASK)) {
2148 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2150 cpl = env->hflags & HF_CPL_MASK;
2151 rpl = new_cs & 3;
2152 if (rpl < cpl) {
2153 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2155 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2156 if (e2 & DESC_C_MASK) {
2157 if (dpl > rpl) {
2158 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2160 } else {
2161 if (dpl != rpl) {
2162 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2165 if (!(e2 & DESC_P_MASK)) {
2166 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2169 sp += addend;
2170 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2171 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2172 /* return to same privilege level */
2173 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2174 get_seg_base(e1, e2),
2175 get_seg_limit(e1, e2),
2176 e2);
2177 } else {
2178 /* return to different privilege level */
2179 #ifdef TARGET_X86_64
2180 if (shift == 2) {
2181 POPQ_RA(sp, new_esp, retaddr);
2182 POPQ_RA(sp, new_ss, retaddr);
2183 new_ss &= 0xffff;
2184 } else
2185 #endif
2187 if (shift == 1) {
2188 /* 32 bits */
2189 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2190 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2191 new_ss &= 0xffff;
2192 } else {
2193 /* 16 bits */
2194 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2195 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
2198 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2199 new_ss, new_esp);
2200 if ((new_ss & 0xfffc) == 0) {
2201 #ifdef TARGET_X86_64
2202 /* NULL ss is allowed in long mode if cpl != 3 */
2203 /* XXX: test CS64? */
2204 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2205 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2206 0, 0xffffffff,
2207 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2208 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2209 DESC_W_MASK | DESC_A_MASK);
2210 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2211 } else
2212 #endif
2214 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2216 } else {
2217 if ((new_ss & 3) != rpl) {
2218 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2220 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2221 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2223 if (!(ss_e2 & DESC_S_MASK) ||
2224 (ss_e2 & DESC_CS_MASK) ||
2225 !(ss_e2 & DESC_W_MASK)) {
2226 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2228 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2229 if (dpl != rpl) {
2230 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2232 if (!(ss_e2 & DESC_P_MASK)) {
2233 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2235 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2236 get_seg_base(ss_e1, ss_e2),
2237 get_seg_limit(ss_e1, ss_e2),
2238 ss_e2);
2241 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2242 get_seg_base(e1, e2),
2243 get_seg_limit(e1, e2),
2244 e2);
2245 sp = new_esp;
2246 #ifdef TARGET_X86_64
2247 if (env->hflags & HF_CS64_MASK) {
2248 sp_mask = -1;
2249 } else
2250 #endif
2252 sp_mask = get_sp_mask(ss_e2);
2255 /* validate data segments */
2256 validate_seg(env, R_ES, rpl);
2257 validate_seg(env, R_DS, rpl);
2258 validate_seg(env, R_FS, rpl);
2259 validate_seg(env, R_GS, rpl);
2261 sp += addend;
2263 SET_ESP(sp, sp_mask);
2264 env->eip = new_eip;
2265 if (is_iret) {
2266 /* NOTE: 'cpl' is the _old_ CPL */
2267 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2268 if (cpl == 0) {
2269 eflags_mask |= IOPL_MASK;
2271 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2272 if (cpl <= iopl) {
2273 eflags_mask |= IF_MASK;
2275 if (shift == 0) {
2276 eflags_mask &= 0xffff;
2278 cpu_load_eflags(env, new_eflags, eflags_mask);
2280 return;
2282 return_to_vm86:
2283 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2284 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2285 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2286 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2287 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2288 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
2290 /* modify processor state */
2291 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2292 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2293 VIP_MASK);
2294 load_seg_vm(env, R_CS, new_cs & 0xffff);
2295 load_seg_vm(env, R_SS, new_ss & 0xffff);
2296 load_seg_vm(env, R_ES, new_es & 0xffff);
2297 load_seg_vm(env, R_DS, new_ds & 0xffff);
2298 load_seg_vm(env, R_FS, new_fs & 0xffff);
2299 load_seg_vm(env, R_GS, new_gs & 0xffff);
2301 env->eip = new_eip & 0xffff;
2302 env->regs[R_ESP] = new_esp;
2305 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2307 int tss_selector, type;
2308 uint32_t e1, e2;
2310 /* specific case for TSS */
2311 if (env->eflags & NT_MASK) {
2312 #ifdef TARGET_X86_64
2313 if (env->hflags & HF_LMA_MASK) {
2314 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2316 #endif
2317 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2318 if (tss_selector & 4) {
2319 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2321 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2322 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2324 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2325 /* NOTE: we check both segment and busy TSS */
2326 if (type != 3) {
2327 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2329 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2330 } else {
2331 helper_ret_protected(env, shift, 1, 0, GETPC());
2333 env->hflags2 &= ~HF2_NMI_MASK;
2336 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2338 helper_ret_protected(env, shift, 0, addend, GETPC());
2341 void helper_sysenter(CPUX86State *env)
2343 if (env->sysenter_cs == 0) {
2344 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2346 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2348 #ifdef TARGET_X86_64
2349 if (env->hflags & HF_LMA_MASK) {
2350 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2351 0, 0xffffffff,
2352 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2353 DESC_S_MASK |
2354 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2355 DESC_L_MASK);
2356 } else
2357 #endif
2359 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2360 0, 0xffffffff,
2361 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2362 DESC_S_MASK |
2363 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2365 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2366 0, 0xffffffff,
2367 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2368 DESC_S_MASK |
2369 DESC_W_MASK | DESC_A_MASK);
2370 env->regs[R_ESP] = env->sysenter_esp;
2371 env->eip = env->sysenter_eip;
2374 void helper_sysexit(CPUX86State *env, int dflag)
2376 int cpl;
2378 cpl = env->hflags & HF_CPL_MASK;
2379 if (env->sysenter_cs == 0 || cpl != 0) {
2380 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2382 #ifdef TARGET_X86_64
2383 if (dflag == 2) {
2384 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2385 3, 0, 0xffffffff,
2386 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2387 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2388 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2389 DESC_L_MASK);
2390 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2391 3, 0, 0xffffffff,
2392 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2393 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2394 DESC_W_MASK | DESC_A_MASK);
2395 } else
2396 #endif
2398 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2399 3, 0, 0xffffffff,
2400 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2401 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2402 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2403 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2404 3, 0, 0xffffffff,
2405 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2406 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2407 DESC_W_MASK | DESC_A_MASK);
2409 env->regs[R_ESP] = env->regs[R_ECX];
2410 env->eip = env->regs[R_EDX];
2413 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2415 unsigned int limit;
2416 uint32_t e1, e2, eflags, selector;
2417 int rpl, dpl, cpl, type;
2419 selector = selector1 & 0xffff;
2420 eflags = cpu_cc_compute_all(env, CC_OP);
2421 if ((selector & 0xfffc) == 0) {
2422 goto fail;
2424 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2425 goto fail;
2427 rpl = selector & 3;
2428 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2429 cpl = env->hflags & HF_CPL_MASK;
2430 if (e2 & DESC_S_MASK) {
2431 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2432 /* conforming */
2433 } else {
2434 if (dpl < cpl || dpl < rpl) {
2435 goto fail;
2438 } else {
2439 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2440 switch (type) {
2441 case 1:
2442 case 2:
2443 case 3:
2444 case 9:
2445 case 11:
2446 break;
2447 default:
2448 goto fail;
2450 if (dpl < cpl || dpl < rpl) {
2451 fail:
2452 CC_SRC = eflags & ~CC_Z;
2453 return 0;
2456 limit = get_seg_limit(e1, e2);
2457 CC_SRC = eflags | CC_Z;
2458 return limit;
2461 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2463 uint32_t e1, e2, eflags, selector;
2464 int rpl, dpl, cpl, type;
2466 selector = selector1 & 0xffff;
2467 eflags = cpu_cc_compute_all(env, CC_OP);
2468 if ((selector & 0xfffc) == 0) {
2469 goto fail;
2471 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2472 goto fail;
2474 rpl = selector & 3;
2475 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2476 cpl = env->hflags & HF_CPL_MASK;
2477 if (e2 & DESC_S_MASK) {
2478 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2479 /* conforming */
2480 } else {
2481 if (dpl < cpl || dpl < rpl) {
2482 goto fail;
2485 } else {
2486 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2487 switch (type) {
2488 case 1:
2489 case 2:
2490 case 3:
2491 case 4:
2492 case 5:
2493 case 9:
2494 case 11:
2495 case 12:
2496 break;
2497 default:
2498 goto fail;
2500 if (dpl < cpl || dpl < rpl) {
2501 fail:
2502 CC_SRC = eflags & ~CC_Z;
2503 return 0;
2506 CC_SRC = eflags | CC_Z;
2507 return e2 & 0x00f0ff00;
2510 void helper_verr(CPUX86State *env, target_ulong selector1)
2512 uint32_t e1, e2, eflags, selector;
2513 int rpl, dpl, cpl;
2515 selector = selector1 & 0xffff;
2516 eflags = cpu_cc_compute_all(env, CC_OP);
2517 if ((selector & 0xfffc) == 0) {
2518 goto fail;
2520 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2521 goto fail;
2523 if (!(e2 & DESC_S_MASK)) {
2524 goto fail;
2526 rpl = selector & 3;
2527 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2528 cpl = env->hflags & HF_CPL_MASK;
2529 if (e2 & DESC_CS_MASK) {
2530 if (!(e2 & DESC_R_MASK)) {
2531 goto fail;
2533 if (!(e2 & DESC_C_MASK)) {
2534 if (dpl < cpl || dpl < rpl) {
2535 goto fail;
2538 } else {
2539 if (dpl < cpl || dpl < rpl) {
2540 fail:
2541 CC_SRC = eflags & ~CC_Z;
2542 return;
2545 CC_SRC = eflags | CC_Z;
2548 void helper_verw(CPUX86State *env, target_ulong selector1)
2550 uint32_t e1, e2, eflags, selector;
2551 int rpl, dpl, cpl;
2553 selector = selector1 & 0xffff;
2554 eflags = cpu_cc_compute_all(env, CC_OP);
2555 if ((selector & 0xfffc) == 0) {
2556 goto fail;
2558 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2559 goto fail;
2561 if (!(e2 & DESC_S_MASK)) {
2562 goto fail;
2564 rpl = selector & 3;
2565 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2566 cpl = env->hflags & HF_CPL_MASK;
2567 if (e2 & DESC_CS_MASK) {
2568 goto fail;
2569 } else {
2570 if (dpl < cpl || dpl < rpl) {
2571 goto fail;
2573 if (!(e2 & DESC_W_MASK)) {
2574 fail:
2575 CC_SRC = eflags & ~CC_Z;
2576 return;
2579 CC_SRC = eflags | CC_Z;
2582 #if defined(CONFIG_USER_ONLY)
2583 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2585 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2586 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2587 selector &= 0xffff;
2588 cpu_x86_load_seg_cache(env, seg_reg, selector,
2589 (selector << 4), 0xffff,
2590 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2591 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2592 } else {
2593 helper_load_seg(env, seg_reg, selector);
2596 #endif
2598 /* check if Port I/O is allowed in TSS */
2599 static inline void check_io(CPUX86State *env, int addr, int size,
2600 uintptr_t retaddr)
2602 int io_offset, val, mask;
2604 /* TSS must be a valid 32 bit one */
2605 if (!(env->tr.flags & DESC_P_MASK) ||
2606 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2607 env->tr.limit < 103) {
2608 goto fail;
2610 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
2611 io_offset += (addr >> 3);
2612 /* Note: the check needs two bytes */
2613 if ((io_offset + 1) > env->tr.limit) {
2614 goto fail;
2616 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
2617 val >>= (addr & 7);
2618 mask = (1 << size) - 1;
2619 /* all bits must be zero to allow the I/O */
2620 if ((val & mask) != 0) {
2621 fail:
2622 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2626 void helper_check_iob(CPUX86State *env, uint32_t t0)
2628 check_io(env, t0, 1, GETPC());
2631 void helper_check_iow(CPUX86State *env, uint32_t t0)
2633 check_io(env, t0, 2, GETPC());
2636 void helper_check_iol(CPUX86State *env, uint32_t t0)
2638 check_io(env, t0, 4, GETPC());