spapr_iommu: translate sPAPRTCEAccess to IOMMUAccessFlags
[qemu/ar7.git] / target-i386 / seg_helper.c
blob8a4271ebe26b627dd7e9527f356559b69846ba00
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
26 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
35 #endif
37 #ifdef CONFIG_USER_ONLY
38 #define MEMSUFFIX _kernel
39 #define DATA_SIZE 1
40 #include "exec/cpu_ldst_useronly_template.h"
42 #define DATA_SIZE 2
43 #include "exec/cpu_ldst_useronly_template.h"
45 #define DATA_SIZE 4
46 #include "exec/cpu_ldst_useronly_template.h"
48 #define DATA_SIZE 8
49 #include "exec/cpu_ldst_useronly_template.h"
50 #undef MEMSUFFIX
51 #else
52 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53 #define MEMSUFFIX _kernel
54 #define DATA_SIZE 1
55 #include "exec/cpu_ldst_template.h"
57 #define DATA_SIZE 2
58 #include "exec/cpu_ldst_template.h"
60 #define DATA_SIZE 4
61 #include "exec/cpu_ldst_template.h"
63 #define DATA_SIZE 8
64 #include "exec/cpu_ldst_template.h"
65 #undef CPU_MMU_INDEX
66 #undef MEMSUFFIX
67 #endif
69 /* return non zero if error */
70 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
71 uint32_t *e2_ptr, int selector)
73 SegmentCache *dt;
74 int index;
75 target_ulong ptr;
77 if (selector & 0x4) {
78 dt = &env->ldt;
79 } else {
80 dt = &env->gdt;
82 index = selector & ~7;
83 if ((index + 7) > dt->limit) {
84 return -1;
86 ptr = dt->base + index;
87 *e1_ptr = cpu_ldl_kernel(env, ptr);
88 *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
89 return 0;
92 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
94 unsigned int limit;
96 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
97 if (e2 & DESC_G_MASK) {
98 limit = (limit << 12) | 0xfff;
100 return limit;
103 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
105 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
108 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
109 uint32_t e2)
111 sc->base = get_seg_base(e1, e2);
112 sc->limit = get_seg_limit(e1, e2);
113 sc->flags = e2;
116 /* init the segment cache in vm86 mode. */
117 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
119 selector &= 0xffff;
121 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
122 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
123 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
126 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
127 uint32_t *esp_ptr, int dpl)
129 X86CPU *cpu = x86_env_get_cpu(env);
130 int type, index, shift;
132 #if 0
134 int i;
135 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
136 for (i = 0; i < env->tr.limit; i++) {
137 printf("%02x ", env->tr.base[i]);
138 if ((i & 7) == 7) {
139 printf("\n");
142 printf("\n");
144 #endif
146 if (!(env->tr.flags & DESC_P_MASK)) {
147 cpu_abort(CPU(cpu), "invalid tss");
149 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
150 if ((type & 7) != 1) {
151 cpu_abort(CPU(cpu), "invalid tss type");
153 shift = type >> 3;
154 index = (dpl * 4 + 2) << shift;
155 if (index + (4 << shift) - 1 > env->tr.limit) {
156 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
158 if (shift == 0) {
159 *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
160 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
161 } else {
162 *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
163 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
167 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
169 uint32_t e1, e2;
170 int rpl, dpl;
172 if ((selector & 0xfffc) != 0) {
173 if (load_segment(env, &e1, &e2, selector) != 0) {
174 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
176 if (!(e2 & DESC_S_MASK)) {
177 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
179 rpl = selector & 3;
180 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
181 if (seg_reg == R_CS) {
182 if (!(e2 & DESC_CS_MASK)) {
183 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
185 if (dpl != rpl) {
186 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
188 } else if (seg_reg == R_SS) {
189 /* SS must be writable data */
190 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
191 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
193 if (dpl != cpl || dpl != rpl) {
194 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
196 } else {
197 /* not readable code */
198 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
199 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
201 /* if data or non conforming code, checks the rights */
202 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
203 if (dpl < cpl || dpl < rpl) {
204 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
208 if (!(e2 & DESC_P_MASK)) {
209 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
211 cpu_x86_load_seg_cache(env, seg_reg, selector,
212 get_seg_base(e1, e2),
213 get_seg_limit(e1, e2),
214 e2);
215 } else {
216 if (seg_reg == R_SS || seg_reg == R_CS) {
217 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
222 #define SWITCH_TSS_JMP 0
223 #define SWITCH_TSS_IRET 1
224 #define SWITCH_TSS_CALL 2
226 /* XXX: restore CPU state in registers (PowerPC case) */
227 static void switch_tss(CPUX86State *env, int tss_selector,
228 uint32_t e1, uint32_t e2, int source,
229 uint32_t next_eip)
231 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
232 target_ulong tss_base;
233 uint32_t new_regs[8], new_segs[6];
234 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
235 uint32_t old_eflags, eflags_mask;
236 SegmentCache *dt;
237 int index;
238 target_ulong ptr;
240 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
241 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
242 source);
244 /* if task gate, we read the TSS segment and we load it */
245 if (type == 5) {
246 if (!(e2 & DESC_P_MASK)) {
247 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
249 tss_selector = e1 >> 16;
250 if (tss_selector & 4) {
251 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
253 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
254 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
256 if (e2 & DESC_S_MASK) {
257 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
259 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
260 if ((type & 7) != 1) {
261 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
265 if (!(e2 & DESC_P_MASK)) {
266 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
269 if (type & 8) {
270 tss_limit_max = 103;
271 } else {
272 tss_limit_max = 43;
274 tss_limit = get_seg_limit(e1, e2);
275 tss_base = get_seg_base(e1, e2);
276 if ((tss_selector & 4) != 0 ||
277 tss_limit < tss_limit_max) {
278 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
280 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
281 if (old_type & 8) {
282 old_tss_limit_max = 103;
283 } else {
284 old_tss_limit_max = 43;
287 /* read all the registers from the new TSS */
288 if (type & 8) {
289 /* 32 bit */
290 new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
291 new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
292 new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
293 for (i = 0; i < 8; i++) {
294 new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
296 for (i = 0; i < 6; i++) {
297 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
299 new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
300 new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
301 } else {
302 /* 16 bit */
303 new_cr3 = 0;
304 new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
305 new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
306 for (i = 0; i < 8; i++) {
307 new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
308 0xffff0000;
310 for (i = 0; i < 4; i++) {
311 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
313 new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
314 new_segs[R_FS] = 0;
315 new_segs[R_GS] = 0;
316 new_trap = 0;
318 /* XXX: avoid a compiler warning, see
319 http://support.amd.com/us/Processor_TechDocs/24593.pdf
320 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
321 (void)new_trap;
323 /* NOTE: we must avoid memory exceptions during the task switch,
324 so we make dummy accesses before */
325 /* XXX: it can still fail in some cases, so a bigger hack is
326 necessary to valid the TLB after having done the accesses */
328 v1 = cpu_ldub_kernel(env, env->tr.base);
329 v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
330 cpu_stb_kernel(env, env->tr.base, v1);
331 cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
333 /* clear busy bit (it is restartable) */
334 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
335 target_ulong ptr;
336 uint32_t e2;
338 ptr = env->gdt.base + (env->tr.selector & ~7);
339 e2 = cpu_ldl_kernel(env, ptr + 4);
340 e2 &= ~DESC_TSS_BUSY_MASK;
341 cpu_stl_kernel(env, ptr + 4, e2);
343 old_eflags = cpu_compute_eflags(env);
344 if (source == SWITCH_TSS_IRET) {
345 old_eflags &= ~NT_MASK;
348 /* save the current state in the old TSS */
349 if (type & 8) {
350 /* 32 bit */
351 cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
352 cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
353 cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
354 cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
355 cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
356 cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
357 cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
358 cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
359 cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
360 cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
361 for (i = 0; i < 6; i++) {
362 cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
363 env->segs[i].selector);
365 } else {
366 /* 16 bit */
367 cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
368 cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
369 cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
370 cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
371 cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
372 cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
373 cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
374 cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
375 cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
376 cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
377 for (i = 0; i < 4; i++) {
378 cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
379 env->segs[i].selector);
383 /* now if an exception occurs, it will occurs in the next task
384 context */
386 if (source == SWITCH_TSS_CALL) {
387 cpu_stw_kernel(env, tss_base, env->tr.selector);
388 new_eflags |= NT_MASK;
391 /* set busy bit */
392 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
393 target_ulong ptr;
394 uint32_t e2;
396 ptr = env->gdt.base + (tss_selector & ~7);
397 e2 = cpu_ldl_kernel(env, ptr + 4);
398 e2 |= DESC_TSS_BUSY_MASK;
399 cpu_stl_kernel(env, ptr + 4, e2);
402 /* set the new CPU state */
403 /* from this point, any exception which occurs can give problems */
404 env->cr[0] |= CR0_TS_MASK;
405 env->hflags |= HF_TS_MASK;
406 env->tr.selector = tss_selector;
407 env->tr.base = tss_base;
408 env->tr.limit = tss_limit;
409 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
411 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412 cpu_x86_update_cr3(env, new_cr3);
415 /* load all registers without an exception, then reload them with
416 possible exception */
417 env->eip = new_eip;
418 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
420 if (!(type & 8)) {
421 eflags_mask &= 0xffff;
423 cpu_load_eflags(env, new_eflags, eflags_mask);
424 /* XXX: what to do in 16 bit case? */
425 env->regs[R_EAX] = new_regs[0];
426 env->regs[R_ECX] = new_regs[1];
427 env->regs[R_EDX] = new_regs[2];
428 env->regs[R_EBX] = new_regs[3];
429 env->regs[R_ESP] = new_regs[4];
430 env->regs[R_EBP] = new_regs[5];
431 env->regs[R_ESI] = new_regs[6];
432 env->regs[R_EDI] = new_regs[7];
433 if (new_eflags & VM_MASK) {
434 for (i = 0; i < 6; i++) {
435 load_seg_vm(env, i, new_segs[i]);
437 } else {
438 /* first just selectors as the rest may trigger exceptions */
439 for (i = 0; i < 6; i++) {
440 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
444 env->ldt.selector = new_ldt & ~4;
445 env->ldt.base = 0;
446 env->ldt.limit = 0;
447 env->ldt.flags = 0;
449 /* load the LDT */
450 if (new_ldt & 4) {
451 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
454 if ((new_ldt & 0xfffc) != 0) {
455 dt = &env->gdt;
456 index = new_ldt & ~7;
457 if ((index + 7) > dt->limit) {
458 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
460 ptr = dt->base + index;
461 e1 = cpu_ldl_kernel(env, ptr);
462 e2 = cpu_ldl_kernel(env, ptr + 4);
463 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
464 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
466 if (!(e2 & DESC_P_MASK)) {
467 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
469 load_seg_cache_raw_dt(&env->ldt, e1, e2);
472 /* load the segments */
473 if (!(new_eflags & VM_MASK)) {
474 int cpl = new_segs[R_CS] & 3;
475 tss_load_seg(env, R_CS, new_segs[R_CS], cpl);
476 tss_load_seg(env, R_SS, new_segs[R_SS], cpl);
477 tss_load_seg(env, R_ES, new_segs[R_ES], cpl);
478 tss_load_seg(env, R_DS, new_segs[R_DS], cpl);
479 tss_load_seg(env, R_FS, new_segs[R_FS], cpl);
480 tss_load_seg(env, R_GS, new_segs[R_GS], cpl);
483 /* check that env->eip is in the CS segment limits */
484 if (new_eip > env->segs[R_CS].limit) {
485 /* XXX: different exception if CALL? */
486 raise_exception_err(env, EXCP0D_GPF, 0);
489 #ifndef CONFIG_USER_ONLY
490 /* reset local breakpoints */
491 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
492 for (i = 0; i < DR7_MAX_BP; i++) {
493 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
494 !hw_global_breakpoint_enabled(env->dr[7], i)) {
495 hw_breakpoint_remove(env, i);
498 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
500 #endif
503 static inline unsigned int get_sp_mask(unsigned int e2)
505 if (e2 & DESC_B_MASK) {
506 return 0xffffffff;
507 } else {
508 return 0xffff;
512 static int exception_has_error_code(int intno)
514 switch (intno) {
515 case 8:
516 case 10:
517 case 11:
518 case 12:
519 case 13:
520 case 14:
521 case 17:
522 return 1;
524 return 0;
527 #ifdef TARGET_X86_64
528 #define SET_ESP(val, sp_mask) \
529 do { \
530 if ((sp_mask) == 0xffff) { \
531 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
532 ((val) & 0xffff); \
533 } else if ((sp_mask) == 0xffffffffLL) { \
534 env->regs[R_ESP] = (uint32_t)(val); \
535 } else { \
536 env->regs[R_ESP] = (val); \
538 } while (0)
539 #else
540 #define SET_ESP(val, sp_mask) \
541 do { \
542 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
543 ((val) & (sp_mask)); \
544 } while (0)
545 #endif
547 /* in 64-bit machines, this can overflow. So this segment addition macro
548 * can be used to trim the value to 32-bit whenever needed */
549 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
551 /* XXX: add a is_user flag to have proper security support */
552 #define PUSHW(ssp, sp, sp_mask, val) \
554 sp -= 2; \
555 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
558 #define PUSHL(ssp, sp, sp_mask, val) \
560 sp -= 4; \
561 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
564 #define POPW(ssp, sp, sp_mask, val) \
566 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
567 sp += 2; \
570 #define POPL(ssp, sp, sp_mask, val) \
572 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
573 sp += 4; \
576 /* protected mode interrupt */
577 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
578 int error_code, unsigned int next_eip,
579 int is_hw)
581 SegmentCache *dt;
582 target_ulong ptr, ssp;
583 int type, dpl, selector, ss_dpl, cpl;
584 int has_error_code, new_stack, shift;
585 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
586 uint32_t old_eip, sp_mask;
587 int vm86 = env->eflags & VM_MASK;
589 has_error_code = 0;
590 if (!is_int && !is_hw) {
591 has_error_code = exception_has_error_code(intno);
593 if (is_int) {
594 old_eip = next_eip;
595 } else {
596 old_eip = env->eip;
599 dt = &env->idt;
600 if (intno * 8 + 7 > dt->limit) {
601 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
603 ptr = dt->base + intno * 8;
604 e1 = cpu_ldl_kernel(env, ptr);
605 e2 = cpu_ldl_kernel(env, ptr + 4);
606 /* check gate type */
607 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
608 switch (type) {
609 case 5: /* task gate */
610 /* must do that check here to return the correct error code */
611 if (!(e2 & DESC_P_MASK)) {
612 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
614 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
615 if (has_error_code) {
616 int type;
617 uint32_t mask;
619 /* push the error code */
620 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
621 shift = type >> 3;
622 if (env->segs[R_SS].flags & DESC_B_MASK) {
623 mask = 0xffffffff;
624 } else {
625 mask = 0xffff;
627 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
628 ssp = env->segs[R_SS].base + esp;
629 if (shift) {
630 cpu_stl_kernel(env, ssp, error_code);
631 } else {
632 cpu_stw_kernel(env, ssp, error_code);
634 SET_ESP(esp, mask);
636 return;
637 case 6: /* 286 interrupt gate */
638 case 7: /* 286 trap gate */
639 case 14: /* 386 interrupt gate */
640 case 15: /* 386 trap gate */
641 break;
642 default:
643 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
644 break;
646 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
647 cpl = env->hflags & HF_CPL_MASK;
648 /* check privilege if software int */
649 if (is_int && dpl < cpl) {
650 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
652 /* check valid bit */
653 if (!(e2 & DESC_P_MASK)) {
654 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
656 selector = e1 >> 16;
657 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
658 if ((selector & 0xfffc) == 0) {
659 raise_exception_err(env, EXCP0D_GPF, 0);
661 if (load_segment(env, &e1, &e2, selector) != 0) {
662 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
664 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
665 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668 if (dpl > cpl) {
669 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
671 if (!(e2 & DESC_P_MASK)) {
672 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
674 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
675 /* to inner privilege */
676 get_ss_esp_from_tss(env, &ss, &esp, dpl);
677 if ((ss & 0xfffc) == 0) {
678 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
680 if ((ss & 3) != dpl) {
681 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
683 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
684 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
686 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
687 if (ss_dpl != dpl) {
688 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
690 if (!(ss_e2 & DESC_S_MASK) ||
691 (ss_e2 & DESC_CS_MASK) ||
692 !(ss_e2 & DESC_W_MASK)) {
693 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
695 if (!(ss_e2 & DESC_P_MASK)) {
696 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
698 new_stack = 1;
699 sp_mask = get_sp_mask(ss_e2);
700 ssp = get_seg_base(ss_e1, ss_e2);
701 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
702 /* to same privilege */
703 if (vm86) {
704 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
706 new_stack = 0;
707 sp_mask = get_sp_mask(env->segs[R_SS].flags);
708 ssp = env->segs[R_SS].base;
709 esp = env->regs[R_ESP];
710 dpl = cpl;
711 } else {
712 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
713 new_stack = 0; /* avoid warning */
714 sp_mask = 0; /* avoid warning */
715 ssp = 0; /* avoid warning */
716 esp = 0; /* avoid warning */
719 shift = type >> 3;
721 #if 0
722 /* XXX: check that enough room is available */
723 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
724 if (vm86) {
725 push_size += 8;
727 push_size <<= shift;
728 #endif
729 if (shift == 1) {
730 if (new_stack) {
731 if (vm86) {
732 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
733 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
734 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
735 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
737 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
738 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
740 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
741 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
742 PUSHL(ssp, esp, sp_mask, old_eip);
743 if (has_error_code) {
744 PUSHL(ssp, esp, sp_mask, error_code);
746 } else {
747 if (new_stack) {
748 if (vm86) {
749 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
750 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
751 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
752 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
754 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
755 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
757 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
758 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
759 PUSHW(ssp, esp, sp_mask, old_eip);
760 if (has_error_code) {
761 PUSHW(ssp, esp, sp_mask, error_code);
765 /* interrupt gate clear IF mask */
766 if ((type & 1) == 0) {
767 env->eflags &= ~IF_MASK;
769 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
771 if (new_stack) {
772 if (vm86) {
773 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
774 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
775 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
776 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
778 ss = (ss & ~3) | dpl;
779 cpu_x86_load_seg_cache(env, R_SS, ss,
780 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
782 SET_ESP(esp, sp_mask);
784 selector = (selector & ~3) | dpl;
785 cpu_x86_load_seg_cache(env, R_CS, selector,
786 get_seg_base(e1, e2),
787 get_seg_limit(e1, e2),
788 e2);
789 env->eip = offset;
792 #ifdef TARGET_X86_64
794 #define PUSHQ(sp, val) \
796 sp -= 8; \
797 cpu_stq_kernel(env, sp, (val)); \
800 #define POPQ(sp, val) \
802 val = cpu_ldq_kernel(env, sp); \
803 sp += 8; \
806 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
808 X86CPU *cpu = x86_env_get_cpu(env);
809 int index;
811 #if 0
812 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
813 env->tr.base, env->tr.limit);
814 #endif
816 if (!(env->tr.flags & DESC_P_MASK)) {
817 cpu_abort(CPU(cpu), "invalid tss");
819 index = 8 * level + 4;
820 if ((index + 7) > env->tr.limit) {
821 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
823 return cpu_ldq_kernel(env, env->tr.base + index);
826 /* 64 bit interrupt */
827 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
828 int error_code, target_ulong next_eip, int is_hw)
830 SegmentCache *dt;
831 target_ulong ptr;
832 int type, dpl, selector, cpl, ist;
833 int has_error_code, new_stack;
834 uint32_t e1, e2, e3, ss;
835 target_ulong old_eip, esp, offset;
837 has_error_code = 0;
838 if (!is_int && !is_hw) {
839 has_error_code = exception_has_error_code(intno);
841 if (is_int) {
842 old_eip = next_eip;
843 } else {
844 old_eip = env->eip;
847 dt = &env->idt;
848 if (intno * 16 + 15 > dt->limit) {
849 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
851 ptr = dt->base + intno * 16;
852 e1 = cpu_ldl_kernel(env, ptr);
853 e2 = cpu_ldl_kernel(env, ptr + 4);
854 e3 = cpu_ldl_kernel(env, ptr + 8);
855 /* check gate type */
856 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
857 switch (type) {
858 case 14: /* 386 interrupt gate */
859 case 15: /* 386 trap gate */
860 break;
861 default:
862 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
863 break;
865 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
866 cpl = env->hflags & HF_CPL_MASK;
867 /* check privilege if software int */
868 if (is_int && dpl < cpl) {
869 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
871 /* check valid bit */
872 if (!(e2 & DESC_P_MASK)) {
873 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
875 selector = e1 >> 16;
876 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
877 ist = e2 & 7;
878 if ((selector & 0xfffc) == 0) {
879 raise_exception_err(env, EXCP0D_GPF, 0);
882 if (load_segment(env, &e1, &e2, selector) != 0) {
883 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
885 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
886 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
889 if (dpl > cpl) {
890 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
892 if (!(e2 & DESC_P_MASK)) {
893 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
895 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
896 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
898 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
899 /* to inner privilege */
900 new_stack = 1;
901 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
902 ss = 0;
903 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
904 /* to same privilege */
905 if (env->eflags & VM_MASK) {
906 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
908 new_stack = 0;
909 esp = env->regs[R_ESP];
910 dpl = cpl;
911 } else {
912 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
913 new_stack = 0; /* avoid warning */
914 esp = 0; /* avoid warning */
916 esp &= ~0xfLL; /* align stack */
918 PUSHQ(esp, env->segs[R_SS].selector);
919 PUSHQ(esp, env->regs[R_ESP]);
920 PUSHQ(esp, cpu_compute_eflags(env));
921 PUSHQ(esp, env->segs[R_CS].selector);
922 PUSHQ(esp, old_eip);
923 if (has_error_code) {
924 PUSHQ(esp, error_code);
927 /* interrupt gate clear IF mask */
928 if ((type & 1) == 0) {
929 env->eflags &= ~IF_MASK;
931 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
933 if (new_stack) {
934 ss = 0 | dpl;
935 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
937 env->regs[R_ESP] = esp;
939 selector = (selector & ~3) | dpl;
940 cpu_x86_load_seg_cache(env, R_CS, selector,
941 get_seg_base(e1, e2),
942 get_seg_limit(e1, e2),
943 e2);
944 env->eip = offset;
946 #endif
948 #ifdef TARGET_X86_64
949 #if defined(CONFIG_USER_ONLY)
950 void helper_syscall(CPUX86State *env, int next_eip_addend)
952 CPUState *cs = CPU(x86_env_get_cpu(env));
954 cs->exception_index = EXCP_SYSCALL;
955 env->exception_next_eip = env->eip + next_eip_addend;
956 cpu_loop_exit(cs);
958 #else
959 void helper_syscall(CPUX86State *env, int next_eip_addend)
961 int selector;
963 if (!(env->efer & MSR_EFER_SCE)) {
964 raise_exception_err(env, EXCP06_ILLOP, 0);
966 selector = (env->star >> 32) & 0xffff;
967 if (env->hflags & HF_LMA_MASK) {
968 int code64;
970 env->regs[R_ECX] = env->eip + next_eip_addend;
971 env->regs[11] = cpu_compute_eflags(env);
973 code64 = env->hflags & HF_CS64_MASK;
975 env->eflags &= ~env->fmask;
976 cpu_load_eflags(env, env->eflags, 0);
977 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
978 0, 0xffffffff,
979 DESC_G_MASK | DESC_P_MASK |
980 DESC_S_MASK |
981 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
982 DESC_L_MASK);
983 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
984 0, 0xffffffff,
985 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
986 DESC_S_MASK |
987 DESC_W_MASK | DESC_A_MASK);
988 if (code64) {
989 env->eip = env->lstar;
990 } else {
991 env->eip = env->cstar;
993 } else {
994 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
996 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
997 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
998 0, 0xffffffff,
999 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1000 DESC_S_MASK |
1001 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1002 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1003 0, 0xffffffff,
1004 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1005 DESC_S_MASK |
1006 DESC_W_MASK | DESC_A_MASK);
1007 env->eip = (uint32_t)env->star;
1010 #endif
1011 #endif
1013 #ifdef TARGET_X86_64
1014 void helper_sysret(CPUX86State *env, int dflag)
1016 int cpl, selector;
1018 if (!(env->efer & MSR_EFER_SCE)) {
1019 raise_exception_err(env, EXCP06_ILLOP, 0);
1021 cpl = env->hflags & HF_CPL_MASK;
1022 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1023 raise_exception_err(env, EXCP0D_GPF, 0);
1025 selector = (env->star >> 48) & 0xffff;
1026 if (env->hflags & HF_LMA_MASK) {
1027 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1028 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1029 NT_MASK);
1030 if (dflag == 2) {
1031 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1032 0, 0xffffffff,
1033 DESC_G_MASK | DESC_P_MASK |
1034 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1035 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1036 DESC_L_MASK);
1037 env->eip = env->regs[R_ECX];
1038 } else {
1039 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1040 0, 0xffffffff,
1041 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1042 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1043 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1044 env->eip = (uint32_t)env->regs[R_ECX];
1046 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1047 0, 0xffffffff,
1048 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1049 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1050 DESC_W_MASK | DESC_A_MASK);
1051 } else {
1052 env->eflags |= IF_MASK;
1053 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1054 0, 0xffffffff,
1055 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1056 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1058 env->eip = (uint32_t)env->regs[R_ECX];
1059 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1060 0, 0xffffffff,
1061 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1062 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1063 DESC_W_MASK | DESC_A_MASK);
1066 #endif
1068 /* real mode interrupt */
1069 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1070 int error_code, unsigned int next_eip)
1072 SegmentCache *dt;
1073 target_ulong ptr, ssp;
1074 int selector;
1075 uint32_t offset, esp;
1076 uint32_t old_cs, old_eip;
1078 /* real mode (simpler!) */
1079 dt = &env->idt;
1080 if (intno * 4 + 3 > dt->limit) {
1081 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1083 ptr = dt->base + intno * 4;
1084 offset = cpu_lduw_kernel(env, ptr);
1085 selector = cpu_lduw_kernel(env, ptr + 2);
1086 esp = env->regs[R_ESP];
1087 ssp = env->segs[R_SS].base;
1088 if (is_int) {
1089 old_eip = next_eip;
1090 } else {
1091 old_eip = env->eip;
1093 old_cs = env->segs[R_CS].selector;
1094 /* XXX: use SS segment size? */
1095 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1096 PUSHW(ssp, esp, 0xffff, old_cs);
1097 PUSHW(ssp, esp, 0xffff, old_eip);
1099 /* update processor state */
1100 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1101 env->eip = offset;
1102 env->segs[R_CS].selector = selector;
1103 env->segs[R_CS].base = (selector << 4);
1104 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1107 #if defined(CONFIG_USER_ONLY)
1108 /* fake user mode interrupt */
1109 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1110 int error_code, target_ulong next_eip)
1112 SegmentCache *dt;
1113 target_ulong ptr;
1114 int dpl, cpl, shift;
1115 uint32_t e2;
1117 dt = &env->idt;
1118 if (env->hflags & HF_LMA_MASK) {
1119 shift = 4;
1120 } else {
1121 shift = 3;
1123 ptr = dt->base + (intno << shift);
1124 e2 = cpu_ldl_kernel(env, ptr + 4);
1126 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1127 cpl = env->hflags & HF_CPL_MASK;
1128 /* check privilege if software int */
1129 if (is_int && dpl < cpl) {
1130 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1133 /* Since we emulate only user space, we cannot do more than
1134 exiting the emulation with the suitable exception and error
1135 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1136 if (is_int || intno == EXCP_SYSCALL) {
1137 env->eip = next_eip;
1141 #else
1143 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1144 int error_code, int is_hw, int rm)
1146 CPUState *cs = CPU(x86_env_get_cpu(env));
1147 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1148 control.event_inj));
1150 if (!(event_inj & SVM_EVTINJ_VALID)) {
1151 int type;
1153 if (is_int) {
1154 type = SVM_EVTINJ_TYPE_SOFT;
1155 } else {
1156 type = SVM_EVTINJ_TYPE_EXEPT;
1158 event_inj = intno | type | SVM_EVTINJ_VALID;
1159 if (!rm && exception_has_error_code(intno)) {
1160 event_inj |= SVM_EVTINJ_VALID_ERR;
1161 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
1162 control.event_inj_err),
1163 error_code);
1165 x86_stl_phys(cs,
1166 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1167 event_inj);
1170 #endif
1173 * Begin execution of an interruption. is_int is TRUE if coming from
1174 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1175 * instruction. It is only relevant if is_int is TRUE.
1177 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1178 int error_code, target_ulong next_eip, int is_hw)
1180 CPUX86State *env = &cpu->env;
1182 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1183 if ((env->cr[0] & CR0_PE_MASK)) {
1184 static int count;
1186 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1187 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1188 count, intno, error_code, is_int,
1189 env->hflags & HF_CPL_MASK,
1190 env->segs[R_CS].selector, env->eip,
1191 (int)env->segs[R_CS].base + env->eip,
1192 env->segs[R_SS].selector, env->regs[R_ESP]);
1193 if (intno == 0x0e) {
1194 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1195 } else {
1196 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1198 qemu_log("\n");
1199 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1200 #if 0
1202 int i;
1203 target_ulong ptr;
1205 qemu_log(" code=");
1206 ptr = env->segs[R_CS].base + env->eip;
1207 for (i = 0; i < 16; i++) {
1208 qemu_log(" %02x", ldub(ptr + i));
1210 qemu_log("\n");
1212 #endif
1213 count++;
1216 if (env->cr[0] & CR0_PE_MASK) {
1217 #if !defined(CONFIG_USER_ONLY)
1218 if (env->hflags & HF_SVMI_MASK) {
1219 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1221 #endif
1222 #ifdef TARGET_X86_64
1223 if (env->hflags & HF_LMA_MASK) {
1224 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1225 } else
1226 #endif
1228 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1229 is_hw);
1231 } else {
1232 #if !defined(CONFIG_USER_ONLY)
1233 if (env->hflags & HF_SVMI_MASK) {
1234 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1236 #endif
1237 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1240 #if !defined(CONFIG_USER_ONLY)
1241 if (env->hflags & HF_SVMI_MASK) {
1242 CPUState *cs = CPU(cpu);
1243 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1244 offsetof(struct vmcb,
1245 control.event_inj));
1247 x86_stl_phys(cs,
1248 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1249 event_inj & ~SVM_EVTINJ_VALID);
1251 #endif
1254 void x86_cpu_do_interrupt(CPUState *cs)
1256 X86CPU *cpu = X86_CPU(cs);
1257 CPUX86State *env = &cpu->env;
1259 #if defined(CONFIG_USER_ONLY)
1260 /* if user mode only, we simulate a fake exception
1261 which will be handled outside the cpu execution
1262 loop */
1263 do_interrupt_user(env, cs->exception_index,
1264 env->exception_is_int,
1265 env->error_code,
1266 env->exception_next_eip);
1267 /* successfully delivered */
1268 env->old_exception = -1;
1269 #else
1270 /* simulate a real cpu exception. On i386, it can
1271 trigger new exceptions, but we do not handle
1272 double or triple faults yet. */
1273 do_interrupt_all(cpu, cs->exception_index,
1274 env->exception_is_int,
1275 env->error_code,
1276 env->exception_next_eip, 0);
1277 /* successfully delivered */
1278 env->old_exception = -1;
1279 #endif
1282 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1284 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1287 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1289 X86CPU *cpu = X86_CPU(cs);
1290 CPUX86State *env = &cpu->env;
1291 bool ret = false;
1293 #if !defined(CONFIG_USER_ONLY)
1294 if (interrupt_request & CPU_INTERRUPT_POLL) {
1295 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1296 apic_poll_irq(cpu->apic_state);
1298 #endif
1299 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1300 do_cpu_sipi(cpu);
1301 } else if (env->hflags2 & HF2_GIF_MASK) {
1302 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1303 !(env->hflags & HF_SMM_MASK)) {
1304 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1305 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1306 do_smm_enter(cpu);
1307 ret = true;
1308 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1309 !(env->hflags2 & HF2_NMI_MASK)) {
1310 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1311 env->hflags2 |= HF2_NMI_MASK;
1312 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1313 ret = true;
1314 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1315 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1316 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1317 ret = true;
1318 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1319 (((env->hflags2 & HF2_VINTR_MASK) &&
1320 (env->hflags2 & HF2_HIF_MASK)) ||
1321 (!(env->hflags2 & HF2_VINTR_MASK) &&
1322 (env->eflags & IF_MASK &&
1323 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1324 int intno;
1325 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1326 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1327 CPU_INTERRUPT_VIRQ);
1328 intno = cpu_get_pic_interrupt(env);
1329 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1330 "Servicing hardware INT=0x%02x\n", intno);
1331 do_interrupt_x86_hardirq(env, intno, 1);
1332 /* ensure that no TB jump will be modified as
1333 the program flow was changed */
1334 ret = true;
1335 #if !defined(CONFIG_USER_ONLY)
1336 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1337 (env->eflags & IF_MASK) &&
1338 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1339 int intno;
1340 /* FIXME: this should respect TPR */
1341 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1342 intno = x86_ldl_phys(cs, env->vm_vmcb
1343 + offsetof(struct vmcb, control.int_vector));
1344 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1345 "Servicing virtual hardware INT=0x%02x\n", intno);
1346 do_interrupt_x86_hardirq(env, intno, 1);
1347 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1348 ret = true;
1349 #endif
1353 return ret;
1356 void helper_enter_level(CPUX86State *env, int level, int data32,
1357 target_ulong t1)
1359 target_ulong ssp;
1360 uint32_t esp_mask, esp, ebp;
1362 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1363 ssp = env->segs[R_SS].base;
1364 ebp = env->regs[R_EBP];
1365 esp = env->regs[R_ESP];
1366 if (data32) {
1367 /* 32 bit */
1368 esp -= 4;
1369 while (--level) {
1370 esp -= 4;
1371 ebp -= 4;
1372 cpu_stl_data(env, ssp + (esp & esp_mask),
1373 cpu_ldl_data(env, ssp + (ebp & esp_mask)));
1375 esp -= 4;
1376 cpu_stl_data(env, ssp + (esp & esp_mask), t1);
1377 } else {
1378 /* 16 bit */
1379 esp -= 2;
1380 while (--level) {
1381 esp -= 2;
1382 ebp -= 2;
1383 cpu_stw_data(env, ssp + (esp & esp_mask),
1384 cpu_lduw_data(env, ssp + (ebp & esp_mask)));
1386 esp -= 2;
1387 cpu_stw_data(env, ssp + (esp & esp_mask), t1);
1391 #ifdef TARGET_X86_64
1392 void helper_enter64_level(CPUX86State *env, int level, int data64,
1393 target_ulong t1)
1395 target_ulong esp, ebp;
1397 ebp = env->regs[R_EBP];
1398 esp = env->regs[R_ESP];
1400 if (data64) {
1401 /* 64 bit */
1402 esp -= 8;
1403 while (--level) {
1404 esp -= 8;
1405 ebp -= 8;
1406 cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
1408 esp -= 8;
1409 cpu_stq_data(env, esp, t1);
1410 } else {
1411 /* 16 bit */
1412 esp -= 2;
1413 while (--level) {
1414 esp -= 2;
1415 ebp -= 2;
1416 cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
1418 esp -= 2;
1419 cpu_stw_data(env, esp, t1);
1422 #endif
1424 void helper_lldt(CPUX86State *env, int selector)
1426 SegmentCache *dt;
1427 uint32_t e1, e2;
1428 int index, entry_limit;
1429 target_ulong ptr;
1431 selector &= 0xffff;
1432 if ((selector & 0xfffc) == 0) {
1433 /* XXX: NULL selector case: invalid LDT */
1434 env->ldt.base = 0;
1435 env->ldt.limit = 0;
1436 } else {
1437 if (selector & 0x4) {
1438 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1440 dt = &env->gdt;
1441 index = selector & ~7;
1442 #ifdef TARGET_X86_64
1443 if (env->hflags & HF_LMA_MASK) {
1444 entry_limit = 15;
1445 } else
1446 #endif
1448 entry_limit = 7;
1450 if ((index + entry_limit) > dt->limit) {
1451 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1453 ptr = dt->base + index;
1454 e1 = cpu_ldl_kernel(env, ptr);
1455 e2 = cpu_ldl_kernel(env, ptr + 4);
1456 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1457 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1459 if (!(e2 & DESC_P_MASK)) {
1460 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1462 #ifdef TARGET_X86_64
1463 if (env->hflags & HF_LMA_MASK) {
1464 uint32_t e3;
1466 e3 = cpu_ldl_kernel(env, ptr + 8);
1467 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1468 env->ldt.base |= (target_ulong)e3 << 32;
1469 } else
1470 #endif
1472 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1475 env->ldt.selector = selector;
1478 void helper_ltr(CPUX86State *env, int selector)
1480 SegmentCache *dt;
1481 uint32_t e1, e2;
1482 int index, type, entry_limit;
1483 target_ulong ptr;
1485 selector &= 0xffff;
1486 if ((selector & 0xfffc) == 0) {
1487 /* NULL selector case: invalid TR */
1488 env->tr.base = 0;
1489 env->tr.limit = 0;
1490 env->tr.flags = 0;
1491 } else {
1492 if (selector & 0x4) {
1493 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1495 dt = &env->gdt;
1496 index = selector & ~7;
1497 #ifdef TARGET_X86_64
1498 if (env->hflags & HF_LMA_MASK) {
1499 entry_limit = 15;
1500 } else
1501 #endif
1503 entry_limit = 7;
1505 if ((index + entry_limit) > dt->limit) {
1506 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1508 ptr = dt->base + index;
1509 e1 = cpu_ldl_kernel(env, ptr);
1510 e2 = cpu_ldl_kernel(env, ptr + 4);
1511 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1512 if ((e2 & DESC_S_MASK) ||
1513 (type != 1 && type != 9)) {
1514 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1516 if (!(e2 & DESC_P_MASK)) {
1517 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1519 #ifdef TARGET_X86_64
1520 if (env->hflags & HF_LMA_MASK) {
1521 uint32_t e3, e4;
1523 e3 = cpu_ldl_kernel(env, ptr + 8);
1524 e4 = cpu_ldl_kernel(env, ptr + 12);
1525 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1526 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1528 load_seg_cache_raw_dt(&env->tr, e1, e2);
1529 env->tr.base |= (target_ulong)e3 << 32;
1530 } else
1531 #endif
1533 load_seg_cache_raw_dt(&env->tr, e1, e2);
1535 e2 |= DESC_TSS_BUSY_MASK;
1536 cpu_stl_kernel(env, ptr + 4, e2);
1538 env->tr.selector = selector;
1541 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1542 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1544 uint32_t e1, e2;
1545 int cpl, dpl, rpl;
1546 SegmentCache *dt;
1547 int index;
1548 target_ulong ptr;
1550 selector &= 0xffff;
1551 cpl = env->hflags & HF_CPL_MASK;
1552 if ((selector & 0xfffc) == 0) {
1553 /* null selector case */
1554 if (seg_reg == R_SS
1555 #ifdef TARGET_X86_64
1556 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1557 #endif
1559 raise_exception_err(env, EXCP0D_GPF, 0);
1561 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1562 } else {
1564 if (selector & 0x4) {
1565 dt = &env->ldt;
1566 } else {
1567 dt = &env->gdt;
1569 index = selector & ~7;
1570 if ((index + 7) > dt->limit) {
1571 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1573 ptr = dt->base + index;
1574 e1 = cpu_ldl_kernel(env, ptr);
1575 e2 = cpu_ldl_kernel(env, ptr + 4);
1577 if (!(e2 & DESC_S_MASK)) {
1578 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1580 rpl = selector & 3;
1581 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1582 if (seg_reg == R_SS) {
1583 /* must be writable segment */
1584 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1585 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1587 if (rpl != cpl || dpl != cpl) {
1588 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1590 } else {
1591 /* must be readable segment */
1592 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1593 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1596 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1597 /* if not conforming code, test rights */
1598 if (dpl < cpl || dpl < rpl) {
1599 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1604 if (!(e2 & DESC_P_MASK)) {
1605 if (seg_reg == R_SS) {
1606 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
1607 } else {
1608 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1612 /* set the access bit if not already set */
1613 if (!(e2 & DESC_A_MASK)) {
1614 e2 |= DESC_A_MASK;
1615 cpu_stl_kernel(env, ptr + 4, e2);
1618 cpu_x86_load_seg_cache(env, seg_reg, selector,
1619 get_seg_base(e1, e2),
1620 get_seg_limit(e1, e2),
1621 e2);
1622 #if 0
1623 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1624 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1625 #endif
1629 /* protected mode jump */
1630 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1631 int next_eip_addend)
1633 int gate_cs, type;
1634 uint32_t e1, e2, cpl, dpl, rpl, limit;
1635 target_ulong next_eip;
1637 if ((new_cs & 0xfffc) == 0) {
1638 raise_exception_err(env, EXCP0D_GPF, 0);
1640 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1641 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1643 cpl = env->hflags & HF_CPL_MASK;
1644 if (e2 & DESC_S_MASK) {
1645 if (!(e2 & DESC_CS_MASK)) {
1646 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1648 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1649 if (e2 & DESC_C_MASK) {
1650 /* conforming code segment */
1651 if (dpl > cpl) {
1652 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1654 } else {
1655 /* non conforming code segment */
1656 rpl = new_cs & 3;
1657 if (rpl > cpl) {
1658 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1660 if (dpl != cpl) {
1661 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1664 if (!(e2 & DESC_P_MASK)) {
1665 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1667 limit = get_seg_limit(e1, e2);
1668 if (new_eip > limit &&
1669 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1670 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1672 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1673 get_seg_base(e1, e2), limit, e2);
1674 env->eip = new_eip;
1675 } else {
1676 /* jump to call or task gate */
1677 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1678 rpl = new_cs & 3;
1679 cpl = env->hflags & HF_CPL_MASK;
1680 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1681 switch (type) {
1682 case 1: /* 286 TSS */
1683 case 9: /* 386 TSS */
1684 case 5: /* task gate */
1685 if (dpl < cpl || dpl < rpl) {
1686 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1688 next_eip = env->eip + next_eip_addend;
1689 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1690 break;
1691 case 4: /* 286 call gate */
1692 case 12: /* 386 call gate */
1693 if ((dpl < cpl) || (dpl < rpl)) {
1694 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1696 if (!(e2 & DESC_P_MASK)) {
1697 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1699 gate_cs = e1 >> 16;
1700 new_eip = (e1 & 0xffff);
1701 if (type == 12) {
1702 new_eip |= (e2 & 0xffff0000);
1704 if (load_segment(env, &e1, &e2, gate_cs) != 0) {
1705 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1707 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1708 /* must be code segment */
1709 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1710 (DESC_S_MASK | DESC_CS_MASK))) {
1711 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1713 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1714 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1715 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1717 if (!(e2 & DESC_P_MASK)) {
1718 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1720 limit = get_seg_limit(e1, e2);
1721 if (new_eip > limit) {
1722 raise_exception_err(env, EXCP0D_GPF, 0);
1724 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1725 get_seg_base(e1, e2), limit, e2);
1726 env->eip = new_eip;
1727 break;
1728 default:
1729 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1730 break;
1735 /* real mode call */
1736 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1737 int shift, int next_eip)
1739 int new_eip;
1740 uint32_t esp, esp_mask;
1741 target_ulong ssp;
1743 new_eip = new_eip1;
1744 esp = env->regs[R_ESP];
1745 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1746 ssp = env->segs[R_SS].base;
1747 if (shift) {
1748 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1749 PUSHL(ssp, esp, esp_mask, next_eip);
1750 } else {
1751 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1752 PUSHW(ssp, esp, esp_mask, next_eip);
1755 SET_ESP(esp, esp_mask);
1756 env->eip = new_eip;
1757 env->segs[R_CS].selector = new_cs;
1758 env->segs[R_CS].base = (new_cs << 4);
1761 /* protected mode call */
1762 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1763 int shift, int next_eip_addend)
1765 int new_stack, i;
1766 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1767 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1768 uint32_t val, limit, old_sp_mask;
1769 target_ulong ssp, old_ssp, next_eip;
1771 next_eip = env->eip + next_eip_addend;
1772 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1773 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1774 if ((new_cs & 0xfffc) == 0) {
1775 raise_exception_err(env, EXCP0D_GPF, 0);
1777 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1778 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1780 cpl = env->hflags & HF_CPL_MASK;
1781 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1782 if (e2 & DESC_S_MASK) {
1783 if (!(e2 & DESC_CS_MASK)) {
1784 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1786 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1787 if (e2 & DESC_C_MASK) {
1788 /* conforming code segment */
1789 if (dpl > cpl) {
1790 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1792 } else {
1793 /* non conforming code segment */
1794 rpl = new_cs & 3;
1795 if (rpl > cpl) {
1796 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1798 if (dpl != cpl) {
1799 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1802 if (!(e2 & DESC_P_MASK)) {
1803 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1806 #ifdef TARGET_X86_64
1807 /* XXX: check 16/32 bit cases in long mode */
1808 if (shift == 2) {
1809 target_ulong rsp;
1811 /* 64 bit case */
1812 rsp = env->regs[R_ESP];
1813 PUSHQ(rsp, env->segs[R_CS].selector);
1814 PUSHQ(rsp, next_eip);
1815 /* from this point, not restartable */
1816 env->regs[R_ESP] = rsp;
1817 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1818 get_seg_base(e1, e2),
1819 get_seg_limit(e1, e2), e2);
1820 env->eip = new_eip;
1821 } else
1822 #endif
1824 sp = env->regs[R_ESP];
1825 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1826 ssp = env->segs[R_SS].base;
1827 if (shift) {
1828 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1829 PUSHL(ssp, sp, sp_mask, next_eip);
1830 } else {
1831 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1832 PUSHW(ssp, sp, sp_mask, next_eip);
1835 limit = get_seg_limit(e1, e2);
1836 if (new_eip > limit) {
1837 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1839 /* from this point, not restartable */
1840 SET_ESP(sp, sp_mask);
1841 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1842 get_seg_base(e1, e2), limit, e2);
1843 env->eip = new_eip;
1845 } else {
1846 /* check gate type */
1847 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1848 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1849 rpl = new_cs & 3;
1850 switch (type) {
1851 case 1: /* available 286 TSS */
1852 case 9: /* available 386 TSS */
1853 case 5: /* task gate */
1854 if (dpl < cpl || dpl < rpl) {
1855 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1857 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1858 return;
1859 case 4: /* 286 call gate */
1860 case 12: /* 386 call gate */
1861 break;
1862 default:
1863 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1864 break;
1866 shift = type >> 3;
1868 if (dpl < cpl || dpl < rpl) {
1869 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1871 /* check valid bit */
1872 if (!(e2 & DESC_P_MASK)) {
1873 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1875 selector = e1 >> 16;
1876 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1877 param_count = e2 & 0x1f;
1878 if ((selector & 0xfffc) == 0) {
1879 raise_exception_err(env, EXCP0D_GPF, 0);
1882 if (load_segment(env, &e1, &e2, selector) != 0) {
1883 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1885 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1886 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1889 if (dpl > cpl) {
1890 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1892 if (!(e2 & DESC_P_MASK)) {
1893 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1896 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1897 /* to inner privilege */
1898 get_ss_esp_from_tss(env, &ss, &sp, dpl);
1899 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1900 TARGET_FMT_lx "\n", ss, sp, param_count,
1901 env->regs[R_ESP]);
1902 if ((ss & 0xfffc) == 0) {
1903 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1905 if ((ss & 3) != dpl) {
1906 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1908 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
1909 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1911 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1912 if (ss_dpl != dpl) {
1913 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1915 if (!(ss_e2 & DESC_S_MASK) ||
1916 (ss_e2 & DESC_CS_MASK) ||
1917 !(ss_e2 & DESC_W_MASK)) {
1918 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1920 if (!(ss_e2 & DESC_P_MASK)) {
1921 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1924 /* push_size = ((param_count * 2) + 8) << shift; */
1926 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1927 old_ssp = env->segs[R_SS].base;
1929 sp_mask = get_sp_mask(ss_e2);
1930 ssp = get_seg_base(ss_e1, ss_e2);
1931 if (shift) {
1932 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1933 PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
1934 for (i = param_count - 1; i >= 0; i--) {
1935 val = cpu_ldl_kernel(env, old_ssp +
1936 ((env->regs[R_ESP] + i * 4) &
1937 old_sp_mask));
1938 PUSHL(ssp, sp, sp_mask, val);
1940 } else {
1941 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1942 PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
1943 for (i = param_count - 1; i >= 0; i--) {
1944 val = cpu_lduw_kernel(env, old_ssp +
1945 ((env->regs[R_ESP] + i * 2) &
1946 old_sp_mask));
1947 PUSHW(ssp, sp, sp_mask, val);
1950 new_stack = 1;
1951 } else {
1952 /* to same privilege */
1953 sp = env->regs[R_ESP];
1954 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1955 ssp = env->segs[R_SS].base;
1956 /* push_size = (4 << shift); */
1957 new_stack = 0;
1960 if (shift) {
1961 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1962 PUSHL(ssp, sp, sp_mask, next_eip);
1963 } else {
1964 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1965 PUSHW(ssp, sp, sp_mask, next_eip);
1968 /* from this point, not restartable */
1970 if (new_stack) {
1971 ss = (ss & ~3) | dpl;
1972 cpu_x86_load_seg_cache(env, R_SS, ss,
1973 ssp,
1974 get_seg_limit(ss_e1, ss_e2),
1975 ss_e2);
1978 selector = (selector & ~3) | dpl;
1979 cpu_x86_load_seg_cache(env, R_CS, selector,
1980 get_seg_base(e1, e2),
1981 get_seg_limit(e1, e2),
1982 e2);
1983 SET_ESP(sp, sp_mask);
1984 env->eip = offset;
1988 /* real and vm86 mode iret */
1989 void helper_iret_real(CPUX86State *env, int shift)
1991 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1992 target_ulong ssp;
1993 int eflags_mask;
1995 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1996 sp = env->regs[R_ESP];
1997 ssp = env->segs[R_SS].base;
1998 if (shift == 1) {
1999 /* 32 bits */
2000 POPL(ssp, sp, sp_mask, new_eip);
2001 POPL(ssp, sp, sp_mask, new_cs);
2002 new_cs &= 0xffff;
2003 POPL(ssp, sp, sp_mask, new_eflags);
2004 } else {
2005 /* 16 bits */
2006 POPW(ssp, sp, sp_mask, new_eip);
2007 POPW(ssp, sp, sp_mask, new_cs);
2008 POPW(ssp, sp, sp_mask, new_eflags);
2010 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2011 env->segs[R_CS].selector = new_cs;
2012 env->segs[R_CS].base = (new_cs << 4);
2013 env->eip = new_eip;
2014 if (env->eflags & VM_MASK) {
2015 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2016 NT_MASK;
2017 } else {
2018 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2019 RF_MASK | NT_MASK;
2021 if (shift == 0) {
2022 eflags_mask &= 0xffff;
2024 cpu_load_eflags(env, new_eflags, eflags_mask);
2025 env->hflags2 &= ~HF2_NMI_MASK;
2028 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2030 int dpl;
2031 uint32_t e2;
2033 /* XXX: on x86_64, we do not want to nullify FS and GS because
2034 they may still contain a valid base. I would be interested to
2035 know how a real x86_64 CPU behaves */
2036 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2037 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2038 return;
2041 e2 = env->segs[seg_reg].flags;
2042 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2043 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2044 /* data or non conforming code segment */
2045 if (dpl < cpl) {
2046 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2051 /* protected mode iret */
2052 static inline void helper_ret_protected(CPUX86State *env, int shift,
2053 int is_iret, int addend)
2055 uint32_t new_cs, new_eflags, new_ss;
2056 uint32_t new_es, new_ds, new_fs, new_gs;
2057 uint32_t e1, e2, ss_e1, ss_e2;
2058 int cpl, dpl, rpl, eflags_mask, iopl;
2059 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2061 #ifdef TARGET_X86_64
2062 if (shift == 2) {
2063 sp_mask = -1;
2064 } else
2065 #endif
2067 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2069 sp = env->regs[R_ESP];
2070 ssp = env->segs[R_SS].base;
2071 new_eflags = 0; /* avoid warning */
2072 #ifdef TARGET_X86_64
2073 if (shift == 2) {
2074 POPQ(sp, new_eip);
2075 POPQ(sp, new_cs);
2076 new_cs &= 0xffff;
2077 if (is_iret) {
2078 POPQ(sp, new_eflags);
2080 } else
2081 #endif
2083 if (shift == 1) {
2084 /* 32 bits */
2085 POPL(ssp, sp, sp_mask, new_eip);
2086 POPL(ssp, sp, sp_mask, new_cs);
2087 new_cs &= 0xffff;
2088 if (is_iret) {
2089 POPL(ssp, sp, sp_mask, new_eflags);
2090 if (new_eflags & VM_MASK) {
2091 goto return_to_vm86;
2094 } else {
2095 /* 16 bits */
2096 POPW(ssp, sp, sp_mask, new_eip);
2097 POPW(ssp, sp, sp_mask, new_cs);
2098 if (is_iret) {
2099 POPW(ssp, sp, sp_mask, new_eflags);
2103 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2104 new_cs, new_eip, shift, addend);
2105 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2106 if ((new_cs & 0xfffc) == 0) {
2107 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2109 if (load_segment(env, &e1, &e2, new_cs) != 0) {
2110 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2112 if (!(e2 & DESC_S_MASK) ||
2113 !(e2 & DESC_CS_MASK)) {
2114 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2116 cpl = env->hflags & HF_CPL_MASK;
2117 rpl = new_cs & 3;
2118 if (rpl < cpl) {
2119 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2121 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2122 if (e2 & DESC_C_MASK) {
2123 if (dpl > rpl) {
2124 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2126 } else {
2127 if (dpl != rpl) {
2128 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2131 if (!(e2 & DESC_P_MASK)) {
2132 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2135 sp += addend;
2136 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2137 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2138 /* return to same privilege level */
2139 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2140 get_seg_base(e1, e2),
2141 get_seg_limit(e1, e2),
2142 e2);
2143 } else {
2144 /* return to different privilege level */
2145 #ifdef TARGET_X86_64
2146 if (shift == 2) {
2147 POPQ(sp, new_esp);
2148 POPQ(sp, new_ss);
2149 new_ss &= 0xffff;
2150 } else
2151 #endif
2153 if (shift == 1) {
2154 /* 32 bits */
2155 POPL(ssp, sp, sp_mask, new_esp);
2156 POPL(ssp, sp, sp_mask, new_ss);
2157 new_ss &= 0xffff;
2158 } else {
2159 /* 16 bits */
2160 POPW(ssp, sp, sp_mask, new_esp);
2161 POPW(ssp, sp, sp_mask, new_ss);
2164 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2165 new_ss, new_esp);
2166 if ((new_ss & 0xfffc) == 0) {
2167 #ifdef TARGET_X86_64
2168 /* NULL ss is allowed in long mode if cpl != 3 */
2169 /* XXX: test CS64? */
2170 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2171 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2172 0, 0xffffffff,
2173 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2174 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2175 DESC_W_MASK | DESC_A_MASK);
2176 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2177 } else
2178 #endif
2180 raise_exception_err(env, EXCP0D_GPF, 0);
2182 } else {
2183 if ((new_ss & 3) != rpl) {
2184 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2186 if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
2187 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2189 if (!(ss_e2 & DESC_S_MASK) ||
2190 (ss_e2 & DESC_CS_MASK) ||
2191 !(ss_e2 & DESC_W_MASK)) {
2192 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2194 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2195 if (dpl != rpl) {
2196 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2198 if (!(ss_e2 & DESC_P_MASK)) {
2199 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2201 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2202 get_seg_base(ss_e1, ss_e2),
2203 get_seg_limit(ss_e1, ss_e2),
2204 ss_e2);
2207 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2208 get_seg_base(e1, e2),
2209 get_seg_limit(e1, e2),
2210 e2);
2211 sp = new_esp;
2212 #ifdef TARGET_X86_64
2213 if (env->hflags & HF_CS64_MASK) {
2214 sp_mask = -1;
2215 } else
2216 #endif
2218 sp_mask = get_sp_mask(ss_e2);
2221 /* validate data segments */
2222 validate_seg(env, R_ES, rpl);
2223 validate_seg(env, R_DS, rpl);
2224 validate_seg(env, R_FS, rpl);
2225 validate_seg(env, R_GS, rpl);
2227 sp += addend;
2229 SET_ESP(sp, sp_mask);
2230 env->eip = new_eip;
2231 if (is_iret) {
2232 /* NOTE: 'cpl' is the _old_ CPL */
2233 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2234 if (cpl == 0) {
2235 eflags_mask |= IOPL_MASK;
2237 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2238 if (cpl <= iopl) {
2239 eflags_mask |= IF_MASK;
2241 if (shift == 0) {
2242 eflags_mask &= 0xffff;
2244 cpu_load_eflags(env, new_eflags, eflags_mask);
2246 return;
2248 return_to_vm86:
2249 POPL(ssp, sp, sp_mask, new_esp);
2250 POPL(ssp, sp, sp_mask, new_ss);
2251 POPL(ssp, sp, sp_mask, new_es);
2252 POPL(ssp, sp, sp_mask, new_ds);
2253 POPL(ssp, sp, sp_mask, new_fs);
2254 POPL(ssp, sp, sp_mask, new_gs);
2256 /* modify processor state */
2257 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2258 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2259 VIP_MASK);
2260 load_seg_vm(env, R_CS, new_cs & 0xffff);
2261 load_seg_vm(env, R_SS, new_ss & 0xffff);
2262 load_seg_vm(env, R_ES, new_es & 0xffff);
2263 load_seg_vm(env, R_DS, new_ds & 0xffff);
2264 load_seg_vm(env, R_FS, new_fs & 0xffff);
2265 load_seg_vm(env, R_GS, new_gs & 0xffff);
2267 env->eip = new_eip & 0xffff;
2268 env->regs[R_ESP] = new_esp;
2271 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2273 int tss_selector, type;
2274 uint32_t e1, e2;
2276 /* specific case for TSS */
2277 if (env->eflags & NT_MASK) {
2278 #ifdef TARGET_X86_64
2279 if (env->hflags & HF_LMA_MASK) {
2280 raise_exception_err(env, EXCP0D_GPF, 0);
2282 #endif
2283 tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
2284 if (tss_selector & 4) {
2285 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2287 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
2288 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2290 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2291 /* NOTE: we check both segment and busy TSS */
2292 if (type != 3) {
2293 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2295 switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2296 } else {
2297 helper_ret_protected(env, shift, 1, 0);
2299 env->hflags2 &= ~HF2_NMI_MASK;
2302 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2304 helper_ret_protected(env, shift, 0, addend);
2307 void helper_sysenter(CPUX86State *env)
2309 if (env->sysenter_cs == 0) {
2310 raise_exception_err(env, EXCP0D_GPF, 0);
2312 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2314 #ifdef TARGET_X86_64
2315 if (env->hflags & HF_LMA_MASK) {
2316 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2317 0, 0xffffffff,
2318 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2319 DESC_S_MASK |
2320 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2321 DESC_L_MASK);
2322 } else
2323 #endif
2325 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2326 0, 0xffffffff,
2327 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2328 DESC_S_MASK |
2329 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2331 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2332 0, 0xffffffff,
2333 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2334 DESC_S_MASK |
2335 DESC_W_MASK | DESC_A_MASK);
2336 env->regs[R_ESP] = env->sysenter_esp;
2337 env->eip = env->sysenter_eip;
2340 void helper_sysexit(CPUX86State *env, int dflag)
2342 int cpl;
2344 cpl = env->hflags & HF_CPL_MASK;
2345 if (env->sysenter_cs == 0 || cpl != 0) {
2346 raise_exception_err(env, EXCP0D_GPF, 0);
2348 #ifdef TARGET_X86_64
2349 if (dflag == 2) {
2350 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2351 3, 0, 0xffffffff,
2352 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2353 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2354 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2355 DESC_L_MASK);
2356 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2357 3, 0, 0xffffffff,
2358 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2359 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2360 DESC_W_MASK | DESC_A_MASK);
2361 } else
2362 #endif
2364 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2365 3, 0, 0xffffffff,
2366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2367 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2368 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2369 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2370 3, 0, 0xffffffff,
2371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2372 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2373 DESC_W_MASK | DESC_A_MASK);
2375 env->regs[R_ESP] = env->regs[R_ECX];
2376 env->eip = env->regs[R_EDX];
2379 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2381 unsigned int limit;
2382 uint32_t e1, e2, eflags, selector;
2383 int rpl, dpl, cpl, type;
2385 selector = selector1 & 0xffff;
2386 eflags = cpu_cc_compute_all(env, CC_OP);
2387 if ((selector & 0xfffc) == 0) {
2388 goto fail;
2390 if (load_segment(env, &e1, &e2, selector) != 0) {
2391 goto fail;
2393 rpl = selector & 3;
2394 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2395 cpl = env->hflags & HF_CPL_MASK;
2396 if (e2 & DESC_S_MASK) {
2397 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2398 /* conforming */
2399 } else {
2400 if (dpl < cpl || dpl < rpl) {
2401 goto fail;
2404 } else {
2405 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2406 switch (type) {
2407 case 1:
2408 case 2:
2409 case 3:
2410 case 9:
2411 case 11:
2412 break;
2413 default:
2414 goto fail;
2416 if (dpl < cpl || dpl < rpl) {
2417 fail:
2418 CC_SRC = eflags & ~CC_Z;
2419 return 0;
2422 limit = get_seg_limit(e1, e2);
2423 CC_SRC = eflags | CC_Z;
2424 return limit;
2427 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2429 uint32_t e1, e2, eflags, selector;
2430 int rpl, dpl, cpl, type;
2432 selector = selector1 & 0xffff;
2433 eflags = cpu_cc_compute_all(env, CC_OP);
2434 if ((selector & 0xfffc) == 0) {
2435 goto fail;
2437 if (load_segment(env, &e1, &e2, selector) != 0) {
2438 goto fail;
2440 rpl = selector & 3;
2441 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2442 cpl = env->hflags & HF_CPL_MASK;
2443 if (e2 & DESC_S_MASK) {
2444 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2445 /* conforming */
2446 } else {
2447 if (dpl < cpl || dpl < rpl) {
2448 goto fail;
2451 } else {
2452 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2453 switch (type) {
2454 case 1:
2455 case 2:
2456 case 3:
2457 case 4:
2458 case 5:
2459 case 9:
2460 case 11:
2461 case 12:
2462 break;
2463 default:
2464 goto fail;
2466 if (dpl < cpl || dpl < rpl) {
2467 fail:
2468 CC_SRC = eflags & ~CC_Z;
2469 return 0;
2472 CC_SRC = eflags | CC_Z;
2473 return e2 & 0x00f0ff00;
2476 void helper_verr(CPUX86State *env, target_ulong selector1)
2478 uint32_t e1, e2, eflags, selector;
2479 int rpl, dpl, cpl;
2481 selector = selector1 & 0xffff;
2482 eflags = cpu_cc_compute_all(env, CC_OP);
2483 if ((selector & 0xfffc) == 0) {
2484 goto fail;
2486 if (load_segment(env, &e1, &e2, selector) != 0) {
2487 goto fail;
2489 if (!(e2 & DESC_S_MASK)) {
2490 goto fail;
2492 rpl = selector & 3;
2493 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2494 cpl = env->hflags & HF_CPL_MASK;
2495 if (e2 & DESC_CS_MASK) {
2496 if (!(e2 & DESC_R_MASK)) {
2497 goto fail;
2499 if (!(e2 & DESC_C_MASK)) {
2500 if (dpl < cpl || dpl < rpl) {
2501 goto fail;
2504 } else {
2505 if (dpl < cpl || dpl < rpl) {
2506 fail:
2507 CC_SRC = eflags & ~CC_Z;
2508 return;
2511 CC_SRC = eflags | CC_Z;
2514 void helper_verw(CPUX86State *env, target_ulong selector1)
2516 uint32_t e1, e2, eflags, selector;
2517 int rpl, dpl, cpl;
2519 selector = selector1 & 0xffff;
2520 eflags = cpu_cc_compute_all(env, CC_OP);
2521 if ((selector & 0xfffc) == 0) {
2522 goto fail;
2524 if (load_segment(env, &e1, &e2, selector) != 0) {
2525 goto fail;
2527 if (!(e2 & DESC_S_MASK)) {
2528 goto fail;
2530 rpl = selector & 3;
2531 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2532 cpl = env->hflags & HF_CPL_MASK;
2533 if (e2 & DESC_CS_MASK) {
2534 goto fail;
2535 } else {
2536 if (dpl < cpl || dpl < rpl) {
2537 goto fail;
2539 if (!(e2 & DESC_W_MASK)) {
2540 fail:
2541 CC_SRC = eflags & ~CC_Z;
2542 return;
2545 CC_SRC = eflags | CC_Z;
2548 #if defined(CONFIG_USER_ONLY)
2549 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2551 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2552 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2553 selector &= 0xffff;
2554 cpu_x86_load_seg_cache(env, seg_reg, selector,
2555 (selector << 4), 0xffff,
2556 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2557 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2558 } else {
2559 helper_load_seg(env, seg_reg, selector);
2562 #endif
2564 /* check if Port I/O is allowed in TSS */
2565 static inline void check_io(CPUX86State *env, int addr, int size)
2567 int io_offset, val, mask;
2569 /* TSS must be a valid 32 bit one */
2570 if (!(env->tr.flags & DESC_P_MASK) ||
2571 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2572 env->tr.limit < 103) {
2573 goto fail;
2575 io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
2576 io_offset += (addr >> 3);
2577 /* Note: the check needs two bytes */
2578 if ((io_offset + 1) > env->tr.limit) {
2579 goto fail;
2581 val = cpu_lduw_kernel(env, env->tr.base + io_offset);
2582 val >>= (addr & 7);
2583 mask = (1 << size) - 1;
2584 /* all bits must be zero to allow the I/O */
2585 if ((val & mask) != 0) {
2586 fail:
2587 raise_exception_err(env, EXCP0D_GPF, 0);
2591 void helper_check_iob(CPUX86State *env, uint32_t t0)
2593 check_io(env, t0, 1);
2596 void helper_check_iow(CPUX86State *env, uint32_t t0)
2598 check_io(env, t0, 2);
2601 void helper_check_iol(CPUX86State *env, uint32_t t0)
2603 check_io(env, t0, 4);