test-qdev-global-props: Run tests on subprocess
[qemu/rayw.git] / target-i386 / seg_helper.c
blob13eefbac3b6ca7a46772813264207076e3bf5f77
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
26 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
35 #endif
37 #ifndef CONFIG_USER_ONLY
38 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
39 #define MEMSUFFIX _kernel
40 #define DATA_SIZE 1
41 #include "exec/cpu_ldst_template.h"
43 #define DATA_SIZE 2
44 #include "exec/cpu_ldst_template.h"
46 #define DATA_SIZE 4
47 #include "exec/cpu_ldst_template.h"
49 #define DATA_SIZE 8
50 #include "exec/cpu_ldst_template.h"
51 #undef CPU_MMU_INDEX
52 #undef MEMSUFFIX
53 #endif
55 /* return non zero if error */
56 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
57 uint32_t *e2_ptr, int selector)
59 SegmentCache *dt;
60 int index;
61 target_ulong ptr;
63 if (selector & 0x4) {
64 dt = &env->ldt;
65 } else {
66 dt = &env->gdt;
68 index = selector & ~7;
69 if ((index + 7) > dt->limit) {
70 return -1;
72 ptr = dt->base + index;
73 *e1_ptr = cpu_ldl_kernel(env, ptr);
74 *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
75 return 0;
78 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
80 unsigned int limit;
82 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
83 if (e2 & DESC_G_MASK) {
84 limit = (limit << 12) | 0xfff;
86 return limit;
89 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
91 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
94 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
95 uint32_t e2)
97 sc->base = get_seg_base(e1, e2);
98 sc->limit = get_seg_limit(e1, e2);
99 sc->flags = e2;
102 /* init the segment cache in vm86 mode. */
103 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
105 selector &= 0xffff;
107 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
108 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
109 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
112 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
113 uint32_t *esp_ptr, int dpl)
115 X86CPU *cpu = x86_env_get_cpu(env);
116 int type, index, shift;
118 #if 0
120 int i;
121 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
122 for (i = 0; i < env->tr.limit; i++) {
123 printf("%02x ", env->tr.base[i]);
124 if ((i & 7) == 7) {
125 printf("\n");
128 printf("\n");
130 #endif
132 if (!(env->tr.flags & DESC_P_MASK)) {
133 cpu_abort(CPU(cpu), "invalid tss");
135 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
136 if ((type & 7) != 1) {
137 cpu_abort(CPU(cpu), "invalid tss type");
139 shift = type >> 3;
140 index = (dpl * 4 + 2) << shift;
141 if (index + (4 << shift) - 1 > env->tr.limit) {
142 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
144 if (shift == 0) {
145 *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
146 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
147 } else {
148 *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
149 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
153 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
155 uint32_t e1, e2;
156 int rpl, dpl;
158 if ((selector & 0xfffc) != 0) {
159 if (load_segment(env, &e1, &e2, selector) != 0) {
160 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
162 if (!(e2 & DESC_S_MASK)) {
163 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
165 rpl = selector & 3;
166 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
167 if (seg_reg == R_CS) {
168 if (!(e2 & DESC_CS_MASK)) {
169 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
171 if (dpl != rpl) {
172 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
174 } else if (seg_reg == R_SS) {
175 /* SS must be writable data */
176 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
177 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
179 if (dpl != cpl || dpl != rpl) {
180 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
182 } else {
183 /* not readable code */
184 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
185 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
187 /* if data or non conforming code, checks the rights */
188 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
189 if (dpl < cpl || dpl < rpl) {
190 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
194 if (!(e2 & DESC_P_MASK)) {
195 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
197 cpu_x86_load_seg_cache(env, seg_reg, selector,
198 get_seg_base(e1, e2),
199 get_seg_limit(e1, e2),
200 e2);
201 } else {
202 if (seg_reg == R_SS || seg_reg == R_CS) {
203 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
208 #define SWITCH_TSS_JMP 0
209 #define SWITCH_TSS_IRET 1
210 #define SWITCH_TSS_CALL 2
212 /* XXX: restore CPU state in registers (PowerPC case) */
213 static void switch_tss(CPUX86State *env, int tss_selector,
214 uint32_t e1, uint32_t e2, int source,
215 uint32_t next_eip)
217 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
218 target_ulong tss_base;
219 uint32_t new_regs[8], new_segs[6];
220 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
221 uint32_t old_eflags, eflags_mask;
222 SegmentCache *dt;
223 int index;
224 target_ulong ptr;
226 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
227 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
228 source);
230 /* if task gate, we read the TSS segment and we load it */
231 if (type == 5) {
232 if (!(e2 & DESC_P_MASK)) {
233 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
235 tss_selector = e1 >> 16;
236 if (tss_selector & 4) {
237 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
239 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
240 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
242 if (e2 & DESC_S_MASK) {
243 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
245 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
246 if ((type & 7) != 1) {
247 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
251 if (!(e2 & DESC_P_MASK)) {
252 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
255 if (type & 8) {
256 tss_limit_max = 103;
257 } else {
258 tss_limit_max = 43;
260 tss_limit = get_seg_limit(e1, e2);
261 tss_base = get_seg_base(e1, e2);
262 if ((tss_selector & 4) != 0 ||
263 tss_limit < tss_limit_max) {
264 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
266 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
267 if (old_type & 8) {
268 old_tss_limit_max = 103;
269 } else {
270 old_tss_limit_max = 43;
273 /* read all the registers from the new TSS */
274 if (type & 8) {
275 /* 32 bit */
276 new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
277 new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
278 new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
279 for (i = 0; i < 8; i++) {
280 new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
282 for (i = 0; i < 6; i++) {
283 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
285 new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
286 new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
287 } else {
288 /* 16 bit */
289 new_cr3 = 0;
290 new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
291 new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
292 for (i = 0; i < 8; i++) {
293 new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
294 0xffff0000;
296 for (i = 0; i < 4; i++) {
297 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
299 new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
300 new_segs[R_FS] = 0;
301 new_segs[R_GS] = 0;
302 new_trap = 0;
304 /* XXX: avoid a compiler warning, see
305 http://support.amd.com/us/Processor_TechDocs/24593.pdf
306 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
307 (void)new_trap;
309 /* NOTE: we must avoid memory exceptions during the task switch,
310 so we make dummy accesses before */
311 /* XXX: it can still fail in some cases, so a bigger hack is
312 necessary to valid the TLB after having done the accesses */
314 v1 = cpu_ldub_kernel(env, env->tr.base);
315 v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
316 cpu_stb_kernel(env, env->tr.base, v1);
317 cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
319 /* clear busy bit (it is restartable) */
320 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
321 target_ulong ptr;
322 uint32_t e2;
324 ptr = env->gdt.base + (env->tr.selector & ~7);
325 e2 = cpu_ldl_kernel(env, ptr + 4);
326 e2 &= ~DESC_TSS_BUSY_MASK;
327 cpu_stl_kernel(env, ptr + 4, e2);
329 old_eflags = cpu_compute_eflags(env);
330 if (source == SWITCH_TSS_IRET) {
331 old_eflags &= ~NT_MASK;
334 /* save the current state in the old TSS */
335 if (type & 8) {
336 /* 32 bit */
337 cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
338 cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
339 cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
340 cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
341 cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
342 cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
343 cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
344 cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
345 cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
346 cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
347 for (i = 0; i < 6; i++) {
348 cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
349 env->segs[i].selector);
351 } else {
352 /* 16 bit */
353 cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
354 cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
355 cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
356 cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
357 cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
358 cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
359 cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
360 cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
361 cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
362 cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
363 for (i = 0; i < 4; i++) {
364 cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
365 env->segs[i].selector);
369 /* now if an exception occurs, it will occurs in the next task
370 context */
372 if (source == SWITCH_TSS_CALL) {
373 cpu_stw_kernel(env, tss_base, env->tr.selector);
374 new_eflags |= NT_MASK;
377 /* set busy bit */
378 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
379 target_ulong ptr;
380 uint32_t e2;
382 ptr = env->gdt.base + (tss_selector & ~7);
383 e2 = cpu_ldl_kernel(env, ptr + 4);
384 e2 |= DESC_TSS_BUSY_MASK;
385 cpu_stl_kernel(env, ptr + 4, e2);
388 /* set the new CPU state */
389 /* from this point, any exception which occurs can give problems */
390 env->cr[0] |= CR0_TS_MASK;
391 env->hflags |= HF_TS_MASK;
392 env->tr.selector = tss_selector;
393 env->tr.base = tss_base;
394 env->tr.limit = tss_limit;
395 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
397 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
398 cpu_x86_update_cr3(env, new_cr3);
401 /* load all registers without an exception, then reload them with
402 possible exception */
403 env->eip = new_eip;
404 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
405 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
406 if (!(type & 8)) {
407 eflags_mask &= 0xffff;
409 cpu_load_eflags(env, new_eflags, eflags_mask);
410 /* XXX: what to do in 16 bit case? */
411 env->regs[R_EAX] = new_regs[0];
412 env->regs[R_ECX] = new_regs[1];
413 env->regs[R_EDX] = new_regs[2];
414 env->regs[R_EBX] = new_regs[3];
415 env->regs[R_ESP] = new_regs[4];
416 env->regs[R_EBP] = new_regs[5];
417 env->regs[R_ESI] = new_regs[6];
418 env->regs[R_EDI] = new_regs[7];
419 if (new_eflags & VM_MASK) {
420 for (i = 0; i < 6; i++) {
421 load_seg_vm(env, i, new_segs[i]);
423 } else {
424 /* first just selectors as the rest may trigger exceptions */
425 for (i = 0; i < 6; i++) {
426 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
430 env->ldt.selector = new_ldt & ~4;
431 env->ldt.base = 0;
432 env->ldt.limit = 0;
433 env->ldt.flags = 0;
435 /* load the LDT */
436 if (new_ldt & 4) {
437 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
440 if ((new_ldt & 0xfffc) != 0) {
441 dt = &env->gdt;
442 index = new_ldt & ~7;
443 if ((index + 7) > dt->limit) {
444 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
446 ptr = dt->base + index;
447 e1 = cpu_ldl_kernel(env, ptr);
448 e2 = cpu_ldl_kernel(env, ptr + 4);
449 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
450 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
452 if (!(e2 & DESC_P_MASK)) {
453 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
455 load_seg_cache_raw_dt(&env->ldt, e1, e2);
458 /* load the segments */
459 if (!(new_eflags & VM_MASK)) {
460 int cpl = new_segs[R_CS] & 3;
461 tss_load_seg(env, R_CS, new_segs[R_CS], cpl);
462 tss_load_seg(env, R_SS, new_segs[R_SS], cpl);
463 tss_load_seg(env, R_ES, new_segs[R_ES], cpl);
464 tss_load_seg(env, R_DS, new_segs[R_DS], cpl);
465 tss_load_seg(env, R_FS, new_segs[R_FS], cpl);
466 tss_load_seg(env, R_GS, new_segs[R_GS], cpl);
469 /* check that env->eip is in the CS segment limits */
470 if (new_eip > env->segs[R_CS].limit) {
471 /* XXX: different exception if CALL? */
472 raise_exception_err(env, EXCP0D_GPF, 0);
475 #ifndef CONFIG_USER_ONLY
476 /* reset local breakpoints */
477 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
478 for (i = 0; i < DR7_MAX_BP; i++) {
479 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
480 !hw_global_breakpoint_enabled(env->dr[7], i)) {
481 hw_breakpoint_remove(env, i);
484 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
486 #endif
489 static inline unsigned int get_sp_mask(unsigned int e2)
491 if (e2 & DESC_B_MASK) {
492 return 0xffffffff;
493 } else {
494 return 0xffff;
498 static int exception_has_error_code(int intno)
500 switch (intno) {
501 case 8:
502 case 10:
503 case 11:
504 case 12:
505 case 13:
506 case 14:
507 case 17:
508 return 1;
510 return 0;
513 #ifdef TARGET_X86_64
514 #define SET_ESP(val, sp_mask) \
515 do { \
516 if ((sp_mask) == 0xffff) { \
517 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
518 ((val) & 0xffff); \
519 } else if ((sp_mask) == 0xffffffffLL) { \
520 env->regs[R_ESP] = (uint32_t)(val); \
521 } else { \
522 env->regs[R_ESP] = (val); \
524 } while (0)
525 #else
526 #define SET_ESP(val, sp_mask) \
527 do { \
528 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
529 ((val) & (sp_mask)); \
530 } while (0)
531 #endif
533 /* in 64-bit machines, this can overflow. So this segment addition macro
534 * can be used to trim the value to 32-bit whenever needed */
535 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
537 /* XXX: add a is_user flag to have proper security support */
538 #define PUSHW(ssp, sp, sp_mask, val) \
540 sp -= 2; \
541 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
544 #define PUSHL(ssp, sp, sp_mask, val) \
546 sp -= 4; \
547 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
550 #define POPW(ssp, sp, sp_mask, val) \
552 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
553 sp += 2; \
556 #define POPL(ssp, sp, sp_mask, val) \
558 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
559 sp += 4; \
562 /* protected mode interrupt */
563 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
564 int error_code, unsigned int next_eip,
565 int is_hw)
567 SegmentCache *dt;
568 target_ulong ptr, ssp;
569 int type, dpl, selector, ss_dpl, cpl;
570 int has_error_code, new_stack, shift;
571 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
572 uint32_t old_eip, sp_mask;
573 int vm86 = env->eflags & VM_MASK;
575 has_error_code = 0;
576 if (!is_int && !is_hw) {
577 has_error_code = exception_has_error_code(intno);
579 if (is_int) {
580 old_eip = next_eip;
581 } else {
582 old_eip = env->eip;
585 dt = &env->idt;
586 if (intno * 8 + 7 > dt->limit) {
587 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
589 ptr = dt->base + intno * 8;
590 e1 = cpu_ldl_kernel(env, ptr);
591 e2 = cpu_ldl_kernel(env, ptr + 4);
592 /* check gate type */
593 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
594 switch (type) {
595 case 5: /* task gate */
596 /* must do that check here to return the correct error code */
597 if (!(e2 & DESC_P_MASK)) {
598 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
600 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
601 if (has_error_code) {
602 int type;
603 uint32_t mask;
605 /* push the error code */
606 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
607 shift = type >> 3;
608 if (env->segs[R_SS].flags & DESC_B_MASK) {
609 mask = 0xffffffff;
610 } else {
611 mask = 0xffff;
613 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
614 ssp = env->segs[R_SS].base + esp;
615 if (shift) {
616 cpu_stl_kernel(env, ssp, error_code);
617 } else {
618 cpu_stw_kernel(env, ssp, error_code);
620 SET_ESP(esp, mask);
622 return;
623 case 6: /* 286 interrupt gate */
624 case 7: /* 286 trap gate */
625 case 14: /* 386 interrupt gate */
626 case 15: /* 386 trap gate */
627 break;
628 default:
629 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
630 break;
632 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
633 cpl = env->hflags & HF_CPL_MASK;
634 /* check privilege if software int */
635 if (is_int && dpl < cpl) {
636 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
638 /* check valid bit */
639 if (!(e2 & DESC_P_MASK)) {
640 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
642 selector = e1 >> 16;
643 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
644 if ((selector & 0xfffc) == 0) {
645 raise_exception_err(env, EXCP0D_GPF, 0);
647 if (load_segment(env, &e1, &e2, selector) != 0) {
648 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
650 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
651 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
653 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
654 if (dpl > cpl) {
655 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
657 if (!(e2 & DESC_P_MASK)) {
658 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
660 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
661 /* to inner privilege */
662 get_ss_esp_from_tss(env, &ss, &esp, dpl);
663 if ((ss & 0xfffc) == 0) {
664 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
666 if ((ss & 3) != dpl) {
667 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
669 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
670 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
672 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
673 if (ss_dpl != dpl) {
674 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
676 if (!(ss_e2 & DESC_S_MASK) ||
677 (ss_e2 & DESC_CS_MASK) ||
678 !(ss_e2 & DESC_W_MASK)) {
679 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
681 if (!(ss_e2 & DESC_P_MASK)) {
682 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
684 new_stack = 1;
685 sp_mask = get_sp_mask(ss_e2);
686 ssp = get_seg_base(ss_e1, ss_e2);
687 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
688 /* to same privilege */
689 if (vm86) {
690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
692 new_stack = 0;
693 sp_mask = get_sp_mask(env->segs[R_SS].flags);
694 ssp = env->segs[R_SS].base;
695 esp = env->regs[R_ESP];
696 dpl = cpl;
697 } else {
698 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
699 new_stack = 0; /* avoid warning */
700 sp_mask = 0; /* avoid warning */
701 ssp = 0; /* avoid warning */
702 esp = 0; /* avoid warning */
705 shift = type >> 3;
707 #if 0
708 /* XXX: check that enough room is available */
709 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
710 if (vm86) {
711 push_size += 8;
713 push_size <<= shift;
714 #endif
715 if (shift == 1) {
716 if (new_stack) {
717 if (vm86) {
718 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
719 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
720 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
721 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
723 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
724 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
726 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
727 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
728 PUSHL(ssp, esp, sp_mask, old_eip);
729 if (has_error_code) {
730 PUSHL(ssp, esp, sp_mask, error_code);
732 } else {
733 if (new_stack) {
734 if (vm86) {
735 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
736 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
737 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
738 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
740 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
741 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
743 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
744 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
745 PUSHW(ssp, esp, sp_mask, old_eip);
746 if (has_error_code) {
747 PUSHW(ssp, esp, sp_mask, error_code);
751 /* interrupt gate clear IF mask */
752 if ((type & 1) == 0) {
753 env->eflags &= ~IF_MASK;
755 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
757 if (new_stack) {
758 if (vm86) {
759 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
761 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
762 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
764 ss = (ss & ~3) | dpl;
765 cpu_x86_load_seg_cache(env, R_SS, ss,
766 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
768 SET_ESP(esp, sp_mask);
770 selector = (selector & ~3) | dpl;
771 cpu_x86_load_seg_cache(env, R_CS, selector,
772 get_seg_base(e1, e2),
773 get_seg_limit(e1, e2),
774 e2);
775 env->eip = offset;
778 #ifdef TARGET_X86_64
780 #define PUSHQ(sp, val) \
782 sp -= 8; \
783 cpu_stq_kernel(env, sp, (val)); \
786 #define POPQ(sp, val) \
788 val = cpu_ldq_kernel(env, sp); \
789 sp += 8; \
792 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
794 X86CPU *cpu = x86_env_get_cpu(env);
795 int index;
797 #if 0
798 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
799 env->tr.base, env->tr.limit);
800 #endif
802 if (!(env->tr.flags & DESC_P_MASK)) {
803 cpu_abort(CPU(cpu), "invalid tss");
805 index = 8 * level + 4;
806 if ((index + 7) > env->tr.limit) {
807 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
809 return cpu_ldq_kernel(env, env->tr.base + index);
812 /* 64 bit interrupt */
813 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
814 int error_code, target_ulong next_eip, int is_hw)
816 SegmentCache *dt;
817 target_ulong ptr;
818 int type, dpl, selector, cpl, ist;
819 int has_error_code, new_stack;
820 uint32_t e1, e2, e3, ss;
821 target_ulong old_eip, esp, offset;
823 has_error_code = 0;
824 if (!is_int && !is_hw) {
825 has_error_code = exception_has_error_code(intno);
827 if (is_int) {
828 old_eip = next_eip;
829 } else {
830 old_eip = env->eip;
833 dt = &env->idt;
834 if (intno * 16 + 15 > dt->limit) {
835 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
837 ptr = dt->base + intno * 16;
838 e1 = cpu_ldl_kernel(env, ptr);
839 e2 = cpu_ldl_kernel(env, ptr + 4);
840 e3 = cpu_ldl_kernel(env, ptr + 8);
841 /* check gate type */
842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
843 switch (type) {
844 case 14: /* 386 interrupt gate */
845 case 15: /* 386 trap gate */
846 break;
847 default:
848 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
849 break;
851 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
852 cpl = env->hflags & HF_CPL_MASK;
853 /* check privilege if software int */
854 if (is_int && dpl < cpl) {
855 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
857 /* check valid bit */
858 if (!(e2 & DESC_P_MASK)) {
859 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
861 selector = e1 >> 16;
862 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
863 ist = e2 & 7;
864 if ((selector & 0xfffc) == 0) {
865 raise_exception_err(env, EXCP0D_GPF, 0);
868 if (load_segment(env, &e1, &e2, selector) != 0) {
869 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
871 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
872 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
874 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
875 if (dpl > cpl) {
876 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
878 if (!(e2 & DESC_P_MASK)) {
879 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
881 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
882 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
884 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
885 /* to inner privilege */
886 if (ist != 0) {
887 esp = get_rsp_from_tss(env, ist + 3);
888 } else {
889 esp = get_rsp_from_tss(env, dpl);
891 esp &= ~0xfLL; /* align stack */
892 ss = 0;
893 new_stack = 1;
894 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
895 /* to same privilege */
896 if (env->eflags & VM_MASK) {
897 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
899 new_stack = 0;
900 if (ist != 0) {
901 esp = get_rsp_from_tss(env, ist + 3);
902 } else {
903 esp = env->regs[R_ESP];
905 esp &= ~0xfLL; /* align stack */
906 dpl = cpl;
907 } else {
908 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
909 new_stack = 0; /* avoid warning */
910 esp = 0; /* avoid warning */
913 PUSHQ(esp, env->segs[R_SS].selector);
914 PUSHQ(esp, env->regs[R_ESP]);
915 PUSHQ(esp, cpu_compute_eflags(env));
916 PUSHQ(esp, env->segs[R_CS].selector);
917 PUSHQ(esp, old_eip);
918 if (has_error_code) {
919 PUSHQ(esp, error_code);
922 /* interrupt gate clear IF mask */
923 if ((type & 1) == 0) {
924 env->eflags &= ~IF_MASK;
926 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
928 if (new_stack) {
929 ss = 0 | dpl;
930 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
932 env->regs[R_ESP] = esp;
934 selector = (selector & ~3) | dpl;
935 cpu_x86_load_seg_cache(env, R_CS, selector,
936 get_seg_base(e1, e2),
937 get_seg_limit(e1, e2),
938 e2);
939 env->eip = offset;
941 #endif
943 #ifdef TARGET_X86_64
944 #if defined(CONFIG_USER_ONLY)
945 void helper_syscall(CPUX86State *env, int next_eip_addend)
947 CPUState *cs = CPU(x86_env_get_cpu(env));
949 cs->exception_index = EXCP_SYSCALL;
950 env->exception_next_eip = env->eip + next_eip_addend;
951 cpu_loop_exit(cs);
953 #else
954 void helper_syscall(CPUX86State *env, int next_eip_addend)
956 int selector;
958 if (!(env->efer & MSR_EFER_SCE)) {
959 raise_exception_err(env, EXCP06_ILLOP, 0);
961 selector = (env->star >> 32) & 0xffff;
962 if (env->hflags & HF_LMA_MASK) {
963 int code64;
965 env->regs[R_ECX] = env->eip + next_eip_addend;
966 env->regs[11] = cpu_compute_eflags(env);
968 code64 = env->hflags & HF_CS64_MASK;
970 env->eflags &= ~env->fmask;
971 cpu_load_eflags(env, env->eflags, 0);
972 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
973 0, 0xffffffff,
974 DESC_G_MASK | DESC_P_MASK |
975 DESC_S_MASK |
976 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
977 DESC_L_MASK);
978 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
979 0, 0xffffffff,
980 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
981 DESC_S_MASK |
982 DESC_W_MASK | DESC_A_MASK);
983 if (code64) {
984 env->eip = env->lstar;
985 } else {
986 env->eip = env->cstar;
988 } else {
989 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
991 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
992 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
993 0, 0xffffffff,
994 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
995 DESC_S_MASK |
996 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
997 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
998 0, 0xffffffff,
999 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1000 DESC_S_MASK |
1001 DESC_W_MASK | DESC_A_MASK);
1002 env->eip = (uint32_t)env->star;
1005 #endif
1006 #endif
1008 #ifdef TARGET_X86_64
1009 void helper_sysret(CPUX86State *env, int dflag)
1011 int cpl, selector;
1013 if (!(env->efer & MSR_EFER_SCE)) {
1014 raise_exception_err(env, EXCP06_ILLOP, 0);
1016 cpl = env->hflags & HF_CPL_MASK;
1017 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1018 raise_exception_err(env, EXCP0D_GPF, 0);
1020 selector = (env->star >> 48) & 0xffff;
1021 if (env->hflags & HF_LMA_MASK) {
1022 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1023 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1024 NT_MASK);
1025 if (dflag == 2) {
1026 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_P_MASK |
1029 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1030 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1031 DESC_L_MASK);
1032 env->eip = env->regs[R_ECX];
1033 } else {
1034 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1035 0, 0xffffffff,
1036 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1038 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1039 env->eip = (uint32_t)env->regs[R_ECX];
1041 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1042 0, 0xffffffff,
1043 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1044 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1045 DESC_W_MASK | DESC_A_MASK);
1046 } else {
1047 env->eflags |= IF_MASK;
1048 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1049 0, 0xffffffff,
1050 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1051 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1052 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1053 env->eip = (uint32_t)env->regs[R_ECX];
1054 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1057 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1058 DESC_W_MASK | DESC_A_MASK);
1061 #endif
1063 /* real mode interrupt */
1064 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1065 int error_code, unsigned int next_eip)
1067 SegmentCache *dt;
1068 target_ulong ptr, ssp;
1069 int selector;
1070 uint32_t offset, esp;
1071 uint32_t old_cs, old_eip;
1073 /* real mode (simpler!) */
1074 dt = &env->idt;
1075 if (intno * 4 + 3 > dt->limit) {
1076 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1078 ptr = dt->base + intno * 4;
1079 offset = cpu_lduw_kernel(env, ptr);
1080 selector = cpu_lduw_kernel(env, ptr + 2);
1081 esp = env->regs[R_ESP];
1082 ssp = env->segs[R_SS].base;
1083 if (is_int) {
1084 old_eip = next_eip;
1085 } else {
1086 old_eip = env->eip;
1088 old_cs = env->segs[R_CS].selector;
1089 /* XXX: use SS segment size? */
1090 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1091 PUSHW(ssp, esp, 0xffff, old_cs);
1092 PUSHW(ssp, esp, 0xffff, old_eip);
1094 /* update processor state */
1095 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1096 env->eip = offset;
1097 env->segs[R_CS].selector = selector;
1098 env->segs[R_CS].base = (selector << 4);
1099 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1102 #if defined(CONFIG_USER_ONLY)
1103 /* fake user mode interrupt */
1104 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1105 int error_code, target_ulong next_eip)
1107 SegmentCache *dt;
1108 target_ulong ptr;
1109 int dpl, cpl, shift;
1110 uint32_t e2;
1112 dt = &env->idt;
1113 if (env->hflags & HF_LMA_MASK) {
1114 shift = 4;
1115 } else {
1116 shift = 3;
1118 ptr = dt->base + (intno << shift);
1119 e2 = cpu_ldl_kernel(env, ptr + 4);
1121 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1122 cpl = env->hflags & HF_CPL_MASK;
1123 /* check privilege if software int */
1124 if (is_int && dpl < cpl) {
1125 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1128 /* Since we emulate only user space, we cannot do more than
1129 exiting the emulation with the suitable exception and error
1130 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1131 if (is_int || intno == EXCP_SYSCALL) {
1132 env->eip = next_eip;
1136 #else
1138 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1139 int error_code, int is_hw, int rm)
1141 CPUState *cs = CPU(x86_env_get_cpu(env));
1142 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1143 control.event_inj));
1145 if (!(event_inj & SVM_EVTINJ_VALID)) {
1146 int type;
1148 if (is_int) {
1149 type = SVM_EVTINJ_TYPE_SOFT;
1150 } else {
1151 type = SVM_EVTINJ_TYPE_EXEPT;
1153 event_inj = intno | type | SVM_EVTINJ_VALID;
1154 if (!rm && exception_has_error_code(intno)) {
1155 event_inj |= SVM_EVTINJ_VALID_ERR;
1156 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1157 control.event_inj_err),
1158 error_code);
1160 stl_phys(cs->as,
1161 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1162 event_inj);
1165 #endif
1168 * Begin execution of an interruption. is_int is TRUE if coming from
1169 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1170 * instruction. It is only relevant if is_int is TRUE.
1172 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1173 int error_code, target_ulong next_eip, int is_hw)
1175 CPUX86State *env = &cpu->env;
1177 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1178 if ((env->cr[0] & CR0_PE_MASK)) {
1179 static int count;
1181 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1182 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1183 count, intno, error_code, is_int,
1184 env->hflags & HF_CPL_MASK,
1185 env->segs[R_CS].selector, env->eip,
1186 (int)env->segs[R_CS].base + env->eip,
1187 env->segs[R_SS].selector, env->regs[R_ESP]);
1188 if (intno == 0x0e) {
1189 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1190 } else {
1191 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1193 qemu_log("\n");
1194 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1195 #if 0
1197 int i;
1198 target_ulong ptr;
1200 qemu_log(" code=");
1201 ptr = env->segs[R_CS].base + env->eip;
1202 for (i = 0; i < 16; i++) {
1203 qemu_log(" %02x", ldub(ptr + i));
1205 qemu_log("\n");
1207 #endif
1208 count++;
1211 if (env->cr[0] & CR0_PE_MASK) {
1212 #if !defined(CONFIG_USER_ONLY)
1213 if (env->hflags & HF_SVMI_MASK) {
1214 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1216 #endif
1217 #ifdef TARGET_X86_64
1218 if (env->hflags & HF_LMA_MASK) {
1219 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1220 } else
1221 #endif
1223 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1224 is_hw);
1226 } else {
1227 #if !defined(CONFIG_USER_ONLY)
1228 if (env->hflags & HF_SVMI_MASK) {
1229 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1231 #endif
1232 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1235 #if !defined(CONFIG_USER_ONLY)
1236 if (env->hflags & HF_SVMI_MASK) {
1237 CPUState *cs = CPU(cpu);
1238 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb +
1239 offsetof(struct vmcb,
1240 control.event_inj));
1242 stl_phys(cs->as,
1243 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1244 event_inj & ~SVM_EVTINJ_VALID);
1246 #endif
1249 void x86_cpu_do_interrupt(CPUState *cs)
1251 X86CPU *cpu = X86_CPU(cs);
1252 CPUX86State *env = &cpu->env;
1254 #if defined(CONFIG_USER_ONLY)
1255 /* if user mode only, we simulate a fake exception
1256 which will be handled outside the cpu execution
1257 loop */
1258 do_interrupt_user(env, cs->exception_index,
1259 env->exception_is_int,
1260 env->error_code,
1261 env->exception_next_eip);
1262 /* successfully delivered */
1263 env->old_exception = -1;
1264 #else
1265 /* simulate a real cpu exception. On i386, it can
1266 trigger new exceptions, but we do not handle
1267 double or triple faults yet. */
1268 do_interrupt_all(cpu, cs->exception_index,
1269 env->exception_is_int,
1270 env->error_code,
1271 env->exception_next_eip, 0);
1272 /* successfully delivered */
1273 env->old_exception = -1;
1274 #endif
1277 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1279 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1282 void helper_enter_level(CPUX86State *env, int level, int data32,
1283 target_ulong t1)
1285 target_ulong ssp;
1286 uint32_t esp_mask, esp, ebp;
1288 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1289 ssp = env->segs[R_SS].base;
1290 ebp = env->regs[R_EBP];
1291 esp = env->regs[R_ESP];
1292 if (data32) {
1293 /* 32 bit */
1294 esp -= 4;
1295 while (--level) {
1296 esp -= 4;
1297 ebp -= 4;
1298 cpu_stl_data(env, ssp + (esp & esp_mask),
1299 cpu_ldl_data(env, ssp + (ebp & esp_mask)));
1301 esp -= 4;
1302 cpu_stl_data(env, ssp + (esp & esp_mask), t1);
1303 } else {
1304 /* 16 bit */
1305 esp -= 2;
1306 while (--level) {
1307 esp -= 2;
1308 ebp -= 2;
1309 cpu_stw_data(env, ssp + (esp & esp_mask),
1310 cpu_lduw_data(env, ssp + (ebp & esp_mask)));
1312 esp -= 2;
1313 cpu_stw_data(env, ssp + (esp & esp_mask), t1);
1317 #ifdef TARGET_X86_64
1318 void helper_enter64_level(CPUX86State *env, int level, int data64,
1319 target_ulong t1)
1321 target_ulong esp, ebp;
1323 ebp = env->regs[R_EBP];
1324 esp = env->regs[R_ESP];
1326 if (data64) {
1327 /* 64 bit */
1328 esp -= 8;
1329 while (--level) {
1330 esp -= 8;
1331 ebp -= 8;
1332 cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
1334 esp -= 8;
1335 cpu_stq_data(env, esp, t1);
1336 } else {
1337 /* 16 bit */
1338 esp -= 2;
1339 while (--level) {
1340 esp -= 2;
1341 ebp -= 2;
1342 cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
1344 esp -= 2;
1345 cpu_stw_data(env, esp, t1);
1348 #endif
1350 void helper_lldt(CPUX86State *env, int selector)
1352 SegmentCache *dt;
1353 uint32_t e1, e2;
1354 int index, entry_limit;
1355 target_ulong ptr;
1357 selector &= 0xffff;
1358 if ((selector & 0xfffc) == 0) {
1359 /* XXX: NULL selector case: invalid LDT */
1360 env->ldt.base = 0;
1361 env->ldt.limit = 0;
1362 } else {
1363 if (selector & 0x4) {
1364 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1366 dt = &env->gdt;
1367 index = selector & ~7;
1368 #ifdef TARGET_X86_64
1369 if (env->hflags & HF_LMA_MASK) {
1370 entry_limit = 15;
1371 } else
1372 #endif
1374 entry_limit = 7;
1376 if ((index + entry_limit) > dt->limit) {
1377 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1379 ptr = dt->base + index;
1380 e1 = cpu_ldl_kernel(env, ptr);
1381 e2 = cpu_ldl_kernel(env, ptr + 4);
1382 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1383 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1385 if (!(e2 & DESC_P_MASK)) {
1386 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1388 #ifdef TARGET_X86_64
1389 if (env->hflags & HF_LMA_MASK) {
1390 uint32_t e3;
1392 e3 = cpu_ldl_kernel(env, ptr + 8);
1393 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1394 env->ldt.base |= (target_ulong)e3 << 32;
1395 } else
1396 #endif
1398 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1401 env->ldt.selector = selector;
1404 void helper_ltr(CPUX86State *env, int selector)
1406 SegmentCache *dt;
1407 uint32_t e1, e2;
1408 int index, type, entry_limit;
1409 target_ulong ptr;
1411 selector &= 0xffff;
1412 if ((selector & 0xfffc) == 0) {
1413 /* NULL selector case: invalid TR */
1414 env->tr.base = 0;
1415 env->tr.limit = 0;
1416 env->tr.flags = 0;
1417 } else {
1418 if (selector & 0x4) {
1419 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1421 dt = &env->gdt;
1422 index = selector & ~7;
1423 #ifdef TARGET_X86_64
1424 if (env->hflags & HF_LMA_MASK) {
1425 entry_limit = 15;
1426 } else
1427 #endif
1429 entry_limit = 7;
1431 if ((index + entry_limit) > dt->limit) {
1432 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1434 ptr = dt->base + index;
1435 e1 = cpu_ldl_kernel(env, ptr);
1436 e2 = cpu_ldl_kernel(env, ptr + 4);
1437 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1438 if ((e2 & DESC_S_MASK) ||
1439 (type != 1 && type != 9)) {
1440 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1442 if (!(e2 & DESC_P_MASK)) {
1443 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1445 #ifdef TARGET_X86_64
1446 if (env->hflags & HF_LMA_MASK) {
1447 uint32_t e3, e4;
1449 e3 = cpu_ldl_kernel(env, ptr + 8);
1450 e4 = cpu_ldl_kernel(env, ptr + 12);
1451 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1452 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1454 load_seg_cache_raw_dt(&env->tr, e1, e2);
1455 env->tr.base |= (target_ulong)e3 << 32;
1456 } else
1457 #endif
1459 load_seg_cache_raw_dt(&env->tr, e1, e2);
1461 e2 |= DESC_TSS_BUSY_MASK;
1462 cpu_stl_kernel(env, ptr + 4, e2);
1464 env->tr.selector = selector;
1467 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1468 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1470 uint32_t e1, e2;
1471 int cpl, dpl, rpl;
1472 SegmentCache *dt;
1473 int index;
1474 target_ulong ptr;
1476 selector &= 0xffff;
1477 cpl = env->hflags & HF_CPL_MASK;
1478 if ((selector & 0xfffc) == 0) {
1479 /* null selector case */
1480 if (seg_reg == R_SS
1481 #ifdef TARGET_X86_64
1482 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1483 #endif
1485 raise_exception_err(env, EXCP0D_GPF, 0);
1487 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1488 } else {
1490 if (selector & 0x4) {
1491 dt = &env->ldt;
1492 } else {
1493 dt = &env->gdt;
1495 index = selector & ~7;
1496 if ((index + 7) > dt->limit) {
1497 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1499 ptr = dt->base + index;
1500 e1 = cpu_ldl_kernel(env, ptr);
1501 e2 = cpu_ldl_kernel(env, ptr + 4);
1503 if (!(e2 & DESC_S_MASK)) {
1504 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1506 rpl = selector & 3;
1507 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1508 if (seg_reg == R_SS) {
1509 /* must be writable segment */
1510 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1511 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1513 if (rpl != cpl || dpl != cpl) {
1514 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1516 } else {
1517 /* must be readable segment */
1518 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1519 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1522 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1523 /* if not conforming code, test rights */
1524 if (dpl < cpl || dpl < rpl) {
1525 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1530 if (!(e2 & DESC_P_MASK)) {
1531 if (seg_reg == R_SS) {
1532 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
1533 } else {
1534 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1538 /* set the access bit if not already set */
1539 if (!(e2 & DESC_A_MASK)) {
1540 e2 |= DESC_A_MASK;
1541 cpu_stl_kernel(env, ptr + 4, e2);
1544 cpu_x86_load_seg_cache(env, seg_reg, selector,
1545 get_seg_base(e1, e2),
1546 get_seg_limit(e1, e2),
1547 e2);
1548 #if 0
1549 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1550 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1551 #endif
1555 /* protected mode jump */
1556 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1557 int next_eip_addend)
1559 int gate_cs, type;
1560 uint32_t e1, e2, cpl, dpl, rpl, limit;
1561 target_ulong next_eip;
1563 if ((new_cs & 0xfffc) == 0) {
1564 raise_exception_err(env, EXCP0D_GPF, 0);
1566 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1567 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1569 cpl = env->hflags & HF_CPL_MASK;
1570 if (e2 & DESC_S_MASK) {
1571 if (!(e2 & DESC_CS_MASK)) {
1572 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1574 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1575 if (e2 & DESC_C_MASK) {
1576 /* conforming code segment */
1577 if (dpl > cpl) {
1578 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1580 } else {
1581 /* non conforming code segment */
1582 rpl = new_cs & 3;
1583 if (rpl > cpl) {
1584 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1586 if (dpl != cpl) {
1587 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1590 if (!(e2 & DESC_P_MASK)) {
1591 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1593 limit = get_seg_limit(e1, e2);
1594 if (new_eip > limit &&
1595 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1596 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1598 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1599 get_seg_base(e1, e2), limit, e2);
1600 env->eip = new_eip;
1601 } else {
1602 /* jump to call or task gate */
1603 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1604 rpl = new_cs & 3;
1605 cpl = env->hflags & HF_CPL_MASK;
1606 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1607 switch (type) {
1608 case 1: /* 286 TSS */
1609 case 9: /* 386 TSS */
1610 case 5: /* task gate */
1611 if (dpl < cpl || dpl < rpl) {
1612 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1614 next_eip = env->eip + next_eip_addend;
1615 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1616 break;
1617 case 4: /* 286 call gate */
1618 case 12: /* 386 call gate */
1619 if ((dpl < cpl) || (dpl < rpl)) {
1620 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1622 if (!(e2 & DESC_P_MASK)) {
1623 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1625 gate_cs = e1 >> 16;
1626 new_eip = (e1 & 0xffff);
1627 if (type == 12) {
1628 new_eip |= (e2 & 0xffff0000);
1630 if (load_segment(env, &e1, &e2, gate_cs) != 0) {
1631 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1633 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1634 /* must be code segment */
1635 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1636 (DESC_S_MASK | DESC_CS_MASK))) {
1637 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1639 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1640 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1641 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1643 if (!(e2 & DESC_P_MASK)) {
1644 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1646 limit = get_seg_limit(e1, e2);
1647 if (new_eip > limit) {
1648 raise_exception_err(env, EXCP0D_GPF, 0);
1650 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1651 get_seg_base(e1, e2), limit, e2);
1652 env->eip = new_eip;
1653 break;
1654 default:
1655 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1656 break;
1661 /* real mode call */
1662 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1663 int shift, int next_eip)
1665 int new_eip;
1666 uint32_t esp, esp_mask;
1667 target_ulong ssp;
1669 new_eip = new_eip1;
1670 esp = env->regs[R_ESP];
1671 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1672 ssp = env->segs[R_SS].base;
1673 if (shift) {
1674 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1675 PUSHL(ssp, esp, esp_mask, next_eip);
1676 } else {
1677 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1678 PUSHW(ssp, esp, esp_mask, next_eip);
1681 SET_ESP(esp, esp_mask);
1682 env->eip = new_eip;
1683 env->segs[R_CS].selector = new_cs;
1684 env->segs[R_CS].base = (new_cs << 4);
1687 /* protected mode call */
1688 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1689 int shift, int next_eip_addend)
1691 int new_stack, i;
1692 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1693 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1694 uint32_t val, limit, old_sp_mask;
1695 target_ulong ssp, old_ssp, next_eip;
1697 next_eip = env->eip + next_eip_addend;
1698 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1699 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1700 if ((new_cs & 0xfffc) == 0) {
1701 raise_exception_err(env, EXCP0D_GPF, 0);
1703 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1704 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1706 cpl = env->hflags & HF_CPL_MASK;
1707 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1708 if (e2 & DESC_S_MASK) {
1709 if (!(e2 & DESC_CS_MASK)) {
1710 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1712 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1713 if (e2 & DESC_C_MASK) {
1714 /* conforming code segment */
1715 if (dpl > cpl) {
1716 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1718 } else {
1719 /* non conforming code segment */
1720 rpl = new_cs & 3;
1721 if (rpl > cpl) {
1722 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1724 if (dpl != cpl) {
1725 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1728 if (!(e2 & DESC_P_MASK)) {
1729 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1732 #ifdef TARGET_X86_64
1733 /* XXX: check 16/32 bit cases in long mode */
1734 if (shift == 2) {
1735 target_ulong rsp;
1737 /* 64 bit case */
1738 rsp = env->regs[R_ESP];
1739 PUSHQ(rsp, env->segs[R_CS].selector);
1740 PUSHQ(rsp, next_eip);
1741 /* from this point, not restartable */
1742 env->regs[R_ESP] = rsp;
1743 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1744 get_seg_base(e1, e2),
1745 get_seg_limit(e1, e2), e2);
1746 env->eip = new_eip;
1747 } else
1748 #endif
1750 sp = env->regs[R_ESP];
1751 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1752 ssp = env->segs[R_SS].base;
1753 if (shift) {
1754 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1755 PUSHL(ssp, sp, sp_mask, next_eip);
1756 } else {
1757 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1758 PUSHW(ssp, sp, sp_mask, next_eip);
1761 limit = get_seg_limit(e1, e2);
1762 if (new_eip > limit) {
1763 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1765 /* from this point, not restartable */
1766 SET_ESP(sp, sp_mask);
1767 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1768 get_seg_base(e1, e2), limit, e2);
1769 env->eip = new_eip;
1771 } else {
1772 /* check gate type */
1773 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1774 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1775 rpl = new_cs & 3;
1776 switch (type) {
1777 case 1: /* available 286 TSS */
1778 case 9: /* available 386 TSS */
1779 case 5: /* task gate */
1780 if (dpl < cpl || dpl < rpl) {
1781 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1783 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1784 return;
1785 case 4: /* 286 call gate */
1786 case 12: /* 386 call gate */
1787 break;
1788 default:
1789 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1790 break;
1792 shift = type >> 3;
1794 if (dpl < cpl || dpl < rpl) {
1795 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1797 /* check valid bit */
1798 if (!(e2 & DESC_P_MASK)) {
1799 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1801 selector = e1 >> 16;
1802 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1803 param_count = e2 & 0x1f;
1804 if ((selector & 0xfffc) == 0) {
1805 raise_exception_err(env, EXCP0D_GPF, 0);
1808 if (load_segment(env, &e1, &e2, selector) != 0) {
1809 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1811 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1812 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1814 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1815 if (dpl > cpl) {
1816 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1818 if (!(e2 & DESC_P_MASK)) {
1819 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1822 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1823 /* to inner privilege */
1824 get_ss_esp_from_tss(env, &ss, &sp, dpl);
1825 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1826 TARGET_FMT_lx "\n", ss, sp, param_count,
1827 env->regs[R_ESP]);
1828 if ((ss & 0xfffc) == 0) {
1829 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1831 if ((ss & 3) != dpl) {
1832 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1834 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
1835 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1837 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1838 if (ss_dpl != dpl) {
1839 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1841 if (!(ss_e2 & DESC_S_MASK) ||
1842 (ss_e2 & DESC_CS_MASK) ||
1843 !(ss_e2 & DESC_W_MASK)) {
1844 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1846 if (!(ss_e2 & DESC_P_MASK)) {
1847 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1850 /* push_size = ((param_count * 2) + 8) << shift; */
1852 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1853 old_ssp = env->segs[R_SS].base;
1855 sp_mask = get_sp_mask(ss_e2);
1856 ssp = get_seg_base(ss_e1, ss_e2);
1857 if (shift) {
1858 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1859 PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
1860 for (i = param_count - 1; i >= 0; i--) {
1861 val = cpu_ldl_kernel(env, old_ssp +
1862 ((env->regs[R_ESP] + i * 4) &
1863 old_sp_mask));
1864 PUSHL(ssp, sp, sp_mask, val);
1866 } else {
1867 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1868 PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
1869 for (i = param_count - 1; i >= 0; i--) {
1870 val = cpu_lduw_kernel(env, old_ssp +
1871 ((env->regs[R_ESP] + i * 2) &
1872 old_sp_mask));
1873 PUSHW(ssp, sp, sp_mask, val);
1876 new_stack = 1;
1877 } else {
1878 /* to same privilege */
1879 sp = env->regs[R_ESP];
1880 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1881 ssp = env->segs[R_SS].base;
1882 /* push_size = (4 << shift); */
1883 new_stack = 0;
1886 if (shift) {
1887 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1888 PUSHL(ssp, sp, sp_mask, next_eip);
1889 } else {
1890 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1891 PUSHW(ssp, sp, sp_mask, next_eip);
1894 /* from this point, not restartable */
1896 if (new_stack) {
1897 ss = (ss & ~3) | dpl;
1898 cpu_x86_load_seg_cache(env, R_SS, ss,
1899 ssp,
1900 get_seg_limit(ss_e1, ss_e2),
1901 ss_e2);
1904 selector = (selector & ~3) | dpl;
1905 cpu_x86_load_seg_cache(env, R_CS, selector,
1906 get_seg_base(e1, e2),
1907 get_seg_limit(e1, e2),
1908 e2);
1909 SET_ESP(sp, sp_mask);
1910 env->eip = offset;
1914 /* real and vm86 mode iret */
1915 void helper_iret_real(CPUX86State *env, int shift)
1917 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1918 target_ulong ssp;
1919 int eflags_mask;
1921 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1922 sp = env->regs[R_ESP];
1923 ssp = env->segs[R_SS].base;
1924 if (shift == 1) {
1925 /* 32 bits */
1926 POPL(ssp, sp, sp_mask, new_eip);
1927 POPL(ssp, sp, sp_mask, new_cs);
1928 new_cs &= 0xffff;
1929 POPL(ssp, sp, sp_mask, new_eflags);
1930 } else {
1931 /* 16 bits */
1932 POPW(ssp, sp, sp_mask, new_eip);
1933 POPW(ssp, sp, sp_mask, new_cs);
1934 POPW(ssp, sp, sp_mask, new_eflags);
1936 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1937 env->segs[R_CS].selector = new_cs;
1938 env->segs[R_CS].base = (new_cs << 4);
1939 env->eip = new_eip;
1940 if (env->eflags & VM_MASK) {
1941 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1942 NT_MASK;
1943 } else {
1944 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1945 RF_MASK | NT_MASK;
1947 if (shift == 0) {
1948 eflags_mask &= 0xffff;
1950 cpu_load_eflags(env, new_eflags, eflags_mask);
1951 env->hflags2 &= ~HF2_NMI_MASK;
1954 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1956 int dpl;
1957 uint32_t e2;
1959 /* XXX: on x86_64, we do not want to nullify FS and GS because
1960 they may still contain a valid base. I would be interested to
1961 know how a real x86_64 CPU behaves */
1962 if ((seg_reg == R_FS || seg_reg == R_GS) &&
1963 (env->segs[seg_reg].selector & 0xfffc) == 0) {
1964 return;
1967 e2 = env->segs[seg_reg].flags;
1968 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1969 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1970 /* data or non conforming code segment */
1971 if (dpl < cpl) {
1972 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
1977 /* protected mode iret */
1978 static inline void helper_ret_protected(CPUX86State *env, int shift,
1979 int is_iret, int addend)
1981 uint32_t new_cs, new_eflags, new_ss;
1982 uint32_t new_es, new_ds, new_fs, new_gs;
1983 uint32_t e1, e2, ss_e1, ss_e2;
1984 int cpl, dpl, rpl, eflags_mask, iopl;
1985 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
1987 #ifdef TARGET_X86_64
1988 if (shift == 2) {
1989 sp_mask = -1;
1990 } else
1991 #endif
1993 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1995 sp = env->regs[R_ESP];
1996 ssp = env->segs[R_SS].base;
1997 new_eflags = 0; /* avoid warning */
1998 #ifdef TARGET_X86_64
1999 if (shift == 2) {
2000 POPQ(sp, new_eip);
2001 POPQ(sp, new_cs);
2002 new_cs &= 0xffff;
2003 if (is_iret) {
2004 POPQ(sp, new_eflags);
2006 } else
2007 #endif
2009 if (shift == 1) {
2010 /* 32 bits */
2011 POPL(ssp, sp, sp_mask, new_eip);
2012 POPL(ssp, sp, sp_mask, new_cs);
2013 new_cs &= 0xffff;
2014 if (is_iret) {
2015 POPL(ssp, sp, sp_mask, new_eflags);
2016 if (new_eflags & VM_MASK) {
2017 goto return_to_vm86;
2020 } else {
2021 /* 16 bits */
2022 POPW(ssp, sp, sp_mask, new_eip);
2023 POPW(ssp, sp, sp_mask, new_cs);
2024 if (is_iret) {
2025 POPW(ssp, sp, sp_mask, new_eflags);
2029 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2030 new_cs, new_eip, shift, addend);
2031 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2032 if ((new_cs & 0xfffc) == 0) {
2033 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2035 if (load_segment(env, &e1, &e2, new_cs) != 0) {
2036 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2038 if (!(e2 & DESC_S_MASK) ||
2039 !(e2 & DESC_CS_MASK)) {
2040 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2042 cpl = env->hflags & HF_CPL_MASK;
2043 rpl = new_cs & 3;
2044 if (rpl < cpl) {
2045 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2047 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2048 if (e2 & DESC_C_MASK) {
2049 if (dpl > rpl) {
2050 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2052 } else {
2053 if (dpl != rpl) {
2054 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2057 if (!(e2 & DESC_P_MASK)) {
2058 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2061 sp += addend;
2062 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2063 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2064 /* return to same privilege level */
2065 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2066 get_seg_base(e1, e2),
2067 get_seg_limit(e1, e2),
2068 e2);
2069 } else {
2070 /* return to different privilege level */
2071 #ifdef TARGET_X86_64
2072 if (shift == 2) {
2073 POPQ(sp, new_esp);
2074 POPQ(sp, new_ss);
2075 new_ss &= 0xffff;
2076 } else
2077 #endif
2079 if (shift == 1) {
2080 /* 32 bits */
2081 POPL(ssp, sp, sp_mask, new_esp);
2082 POPL(ssp, sp, sp_mask, new_ss);
2083 new_ss &= 0xffff;
2084 } else {
2085 /* 16 bits */
2086 POPW(ssp, sp, sp_mask, new_esp);
2087 POPW(ssp, sp, sp_mask, new_ss);
2090 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2091 new_ss, new_esp);
2092 if ((new_ss & 0xfffc) == 0) {
2093 #ifdef TARGET_X86_64
2094 /* NULL ss is allowed in long mode if cpl != 3 */
2095 /* XXX: test CS64? */
2096 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2097 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2098 0, 0xffffffff,
2099 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2100 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2101 DESC_W_MASK | DESC_A_MASK);
2102 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2103 } else
2104 #endif
2106 raise_exception_err(env, EXCP0D_GPF, 0);
2108 } else {
2109 if ((new_ss & 3) != rpl) {
2110 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2112 if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
2113 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2115 if (!(ss_e2 & DESC_S_MASK) ||
2116 (ss_e2 & DESC_CS_MASK) ||
2117 !(ss_e2 & DESC_W_MASK)) {
2118 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2120 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2121 if (dpl != rpl) {
2122 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2124 if (!(ss_e2 & DESC_P_MASK)) {
2125 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2127 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2128 get_seg_base(ss_e1, ss_e2),
2129 get_seg_limit(ss_e1, ss_e2),
2130 ss_e2);
2133 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2134 get_seg_base(e1, e2),
2135 get_seg_limit(e1, e2),
2136 e2);
2137 sp = new_esp;
2138 #ifdef TARGET_X86_64
2139 if (env->hflags & HF_CS64_MASK) {
2140 sp_mask = -1;
2141 } else
2142 #endif
2144 sp_mask = get_sp_mask(ss_e2);
2147 /* validate data segments */
2148 validate_seg(env, R_ES, rpl);
2149 validate_seg(env, R_DS, rpl);
2150 validate_seg(env, R_FS, rpl);
2151 validate_seg(env, R_GS, rpl);
2153 sp += addend;
2155 SET_ESP(sp, sp_mask);
2156 env->eip = new_eip;
2157 if (is_iret) {
2158 /* NOTE: 'cpl' is the _old_ CPL */
2159 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2160 if (cpl == 0) {
2161 eflags_mask |= IOPL_MASK;
2163 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2164 if (cpl <= iopl) {
2165 eflags_mask |= IF_MASK;
2167 if (shift == 0) {
2168 eflags_mask &= 0xffff;
2170 cpu_load_eflags(env, new_eflags, eflags_mask);
2172 return;
2174 return_to_vm86:
2175 POPL(ssp, sp, sp_mask, new_esp);
2176 POPL(ssp, sp, sp_mask, new_ss);
2177 POPL(ssp, sp, sp_mask, new_es);
2178 POPL(ssp, sp, sp_mask, new_ds);
2179 POPL(ssp, sp, sp_mask, new_fs);
2180 POPL(ssp, sp, sp_mask, new_gs);
2182 /* modify processor state */
2183 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2184 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2185 VIP_MASK);
2186 load_seg_vm(env, R_CS, new_cs & 0xffff);
2187 load_seg_vm(env, R_SS, new_ss & 0xffff);
2188 load_seg_vm(env, R_ES, new_es & 0xffff);
2189 load_seg_vm(env, R_DS, new_ds & 0xffff);
2190 load_seg_vm(env, R_FS, new_fs & 0xffff);
2191 load_seg_vm(env, R_GS, new_gs & 0xffff);
2193 env->eip = new_eip & 0xffff;
2194 env->regs[R_ESP] = new_esp;
2197 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2199 int tss_selector, type;
2200 uint32_t e1, e2;
2202 /* specific case for TSS */
2203 if (env->eflags & NT_MASK) {
2204 #ifdef TARGET_X86_64
2205 if (env->hflags & HF_LMA_MASK) {
2206 raise_exception_err(env, EXCP0D_GPF, 0);
2208 #endif
2209 tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
2210 if (tss_selector & 4) {
2211 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2213 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
2214 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2216 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2217 /* NOTE: we check both segment and busy TSS */
2218 if (type != 3) {
2219 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2221 switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2222 } else {
2223 helper_ret_protected(env, shift, 1, 0);
2225 env->hflags2 &= ~HF2_NMI_MASK;
2228 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2230 helper_ret_protected(env, shift, 0, addend);
2233 void helper_sysenter(CPUX86State *env)
2235 if (env->sysenter_cs == 0) {
2236 raise_exception_err(env, EXCP0D_GPF, 0);
2238 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2240 #ifdef TARGET_X86_64
2241 if (env->hflags & HF_LMA_MASK) {
2242 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2243 0, 0xffffffff,
2244 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2245 DESC_S_MASK |
2246 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2247 DESC_L_MASK);
2248 } else
2249 #endif
2251 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2252 0, 0xffffffff,
2253 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2254 DESC_S_MASK |
2255 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2257 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2258 0, 0xffffffff,
2259 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2260 DESC_S_MASK |
2261 DESC_W_MASK | DESC_A_MASK);
2262 env->regs[R_ESP] = env->sysenter_esp;
2263 env->eip = env->sysenter_eip;
2266 void helper_sysexit(CPUX86State *env, int dflag)
2268 int cpl;
2270 cpl = env->hflags & HF_CPL_MASK;
2271 if (env->sysenter_cs == 0 || cpl != 0) {
2272 raise_exception_err(env, EXCP0D_GPF, 0);
2274 #ifdef TARGET_X86_64
2275 if (dflag == 2) {
2276 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2277 3, 0, 0xffffffff,
2278 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2279 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2280 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2281 DESC_L_MASK);
2282 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2283 3, 0, 0xffffffff,
2284 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2285 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2286 DESC_W_MASK | DESC_A_MASK);
2287 } else
2288 #endif
2290 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2291 3, 0, 0xffffffff,
2292 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2293 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2294 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2295 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2296 3, 0, 0xffffffff,
2297 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2298 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2299 DESC_W_MASK | DESC_A_MASK);
2301 env->regs[R_ESP] = env->regs[R_ECX];
2302 env->eip = env->regs[R_EDX];
2305 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2307 unsigned int limit;
2308 uint32_t e1, e2, eflags, selector;
2309 int rpl, dpl, cpl, type;
2311 selector = selector1 & 0xffff;
2312 eflags = cpu_cc_compute_all(env, CC_OP);
2313 if ((selector & 0xfffc) == 0) {
2314 goto fail;
2316 if (load_segment(env, &e1, &e2, selector) != 0) {
2317 goto fail;
2319 rpl = selector & 3;
2320 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2321 cpl = env->hflags & HF_CPL_MASK;
2322 if (e2 & DESC_S_MASK) {
2323 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2324 /* conforming */
2325 } else {
2326 if (dpl < cpl || dpl < rpl) {
2327 goto fail;
2330 } else {
2331 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2332 switch (type) {
2333 case 1:
2334 case 2:
2335 case 3:
2336 case 9:
2337 case 11:
2338 break;
2339 default:
2340 goto fail;
2342 if (dpl < cpl || dpl < rpl) {
2343 fail:
2344 CC_SRC = eflags & ~CC_Z;
2345 return 0;
2348 limit = get_seg_limit(e1, e2);
2349 CC_SRC = eflags | CC_Z;
2350 return limit;
2353 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2355 uint32_t e1, e2, eflags, selector;
2356 int rpl, dpl, cpl, type;
2358 selector = selector1 & 0xffff;
2359 eflags = cpu_cc_compute_all(env, CC_OP);
2360 if ((selector & 0xfffc) == 0) {
2361 goto fail;
2363 if (load_segment(env, &e1, &e2, selector) != 0) {
2364 goto fail;
2366 rpl = selector & 3;
2367 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2368 cpl = env->hflags & HF_CPL_MASK;
2369 if (e2 & DESC_S_MASK) {
2370 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2371 /* conforming */
2372 } else {
2373 if (dpl < cpl || dpl < rpl) {
2374 goto fail;
2377 } else {
2378 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2379 switch (type) {
2380 case 1:
2381 case 2:
2382 case 3:
2383 case 4:
2384 case 5:
2385 case 9:
2386 case 11:
2387 case 12:
2388 break;
2389 default:
2390 goto fail;
2392 if (dpl < cpl || dpl < rpl) {
2393 fail:
2394 CC_SRC = eflags & ~CC_Z;
2395 return 0;
2398 CC_SRC = eflags | CC_Z;
2399 return e2 & 0x00f0ff00;
2402 void helper_verr(CPUX86State *env, target_ulong selector1)
2404 uint32_t e1, e2, eflags, selector;
2405 int rpl, dpl, cpl;
2407 selector = selector1 & 0xffff;
2408 eflags = cpu_cc_compute_all(env, CC_OP);
2409 if ((selector & 0xfffc) == 0) {
2410 goto fail;
2412 if (load_segment(env, &e1, &e2, selector) != 0) {
2413 goto fail;
2415 if (!(e2 & DESC_S_MASK)) {
2416 goto fail;
2418 rpl = selector & 3;
2419 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2420 cpl = env->hflags & HF_CPL_MASK;
2421 if (e2 & DESC_CS_MASK) {
2422 if (!(e2 & DESC_R_MASK)) {
2423 goto fail;
2425 if (!(e2 & DESC_C_MASK)) {
2426 if (dpl < cpl || dpl < rpl) {
2427 goto fail;
2430 } else {
2431 if (dpl < cpl || dpl < rpl) {
2432 fail:
2433 CC_SRC = eflags & ~CC_Z;
2434 return;
2437 CC_SRC = eflags | CC_Z;
2440 void helper_verw(CPUX86State *env, target_ulong selector1)
2442 uint32_t e1, e2, eflags, selector;
2443 int rpl, dpl, cpl;
2445 selector = selector1 & 0xffff;
2446 eflags = cpu_cc_compute_all(env, CC_OP);
2447 if ((selector & 0xfffc) == 0) {
2448 goto fail;
2450 if (load_segment(env, &e1, &e2, selector) != 0) {
2451 goto fail;
2453 if (!(e2 & DESC_S_MASK)) {
2454 goto fail;
2456 rpl = selector & 3;
2457 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2458 cpl = env->hflags & HF_CPL_MASK;
2459 if (e2 & DESC_CS_MASK) {
2460 goto fail;
2461 } else {
2462 if (dpl < cpl || dpl < rpl) {
2463 goto fail;
2465 if (!(e2 & DESC_W_MASK)) {
2466 fail:
2467 CC_SRC = eflags & ~CC_Z;
2468 return;
2471 CC_SRC = eflags | CC_Z;
2474 #if defined(CONFIG_USER_ONLY)
2475 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2477 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2478 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2479 selector &= 0xffff;
2480 cpu_x86_load_seg_cache(env, seg_reg, selector,
2481 (selector << 4), 0xffff,
2482 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2483 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2484 } else {
2485 helper_load_seg(env, seg_reg, selector);
2488 #endif
2490 /* check if Port I/O is allowed in TSS */
2491 static inline void check_io(CPUX86State *env, int addr, int size)
2493 int io_offset, val, mask;
2495 /* TSS must be a valid 32 bit one */
2496 if (!(env->tr.flags & DESC_P_MASK) ||
2497 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2498 env->tr.limit < 103) {
2499 goto fail;
2501 io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
2502 io_offset += (addr >> 3);
2503 /* Note: the check needs two bytes */
2504 if ((io_offset + 1) > env->tr.limit) {
2505 goto fail;
2507 val = cpu_lduw_kernel(env, env->tr.base + io_offset);
2508 val >>= (addr & 7);
2509 mask = (1 << size) - 1;
2510 /* all bits must be zero to allow the I/O */
2511 if ((val & mask) != 0) {
2512 fail:
2513 raise_exception_err(env, EXCP0D_GPF, 0);
2517 void helper_check_iob(CPUX86State *env, uint32_t t0)
2519 check_io(env, t0, 1);
2522 void helper_check_iow(CPUX86State *env, uint32_t t0)
2524 check_io(env, t0, 2);
2527 void helper_check_iol(CPUX86State *env, uint32_t t0)
2529 check_io(env, t0, 4);