Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20141211' into...
[qemu.git] / target-i386 / seg_helper.c
blobc98eeb4351d928bf464734fa3382b4a15e3431b2
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
26 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
35 #endif
37 #ifndef CONFIG_USER_ONLY
38 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
39 #define MEMSUFFIX _kernel
40 #define DATA_SIZE 1
41 #include "exec/cpu_ldst_template.h"
43 #define DATA_SIZE 2
44 #include "exec/cpu_ldst_template.h"
46 #define DATA_SIZE 4
47 #include "exec/cpu_ldst_template.h"
49 #define DATA_SIZE 8
50 #include "exec/cpu_ldst_template.h"
51 #undef CPU_MMU_INDEX
52 #undef MEMSUFFIX
53 #endif
55 /* return non zero if error */
56 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
57 uint32_t *e2_ptr, int selector)
59 SegmentCache *dt;
60 int index;
61 target_ulong ptr;
63 if (selector & 0x4) {
64 dt = &env->ldt;
65 } else {
66 dt = &env->gdt;
68 index = selector & ~7;
69 if ((index + 7) > dt->limit) {
70 return -1;
72 ptr = dt->base + index;
73 *e1_ptr = cpu_ldl_kernel(env, ptr);
74 *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
75 return 0;
78 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
80 unsigned int limit;
82 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
83 if (e2 & DESC_G_MASK) {
84 limit = (limit << 12) | 0xfff;
86 return limit;
89 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
91 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
94 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
95 uint32_t e2)
97 sc->base = get_seg_base(e1, e2);
98 sc->limit = get_seg_limit(e1, e2);
99 sc->flags = e2;
102 /* init the segment cache in vm86 mode. */
103 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
105 selector &= 0xffff;
107 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
108 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
109 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
112 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
113 uint32_t *esp_ptr, int dpl)
115 X86CPU *cpu = x86_env_get_cpu(env);
116 int type, index, shift;
118 #if 0
120 int i;
121 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
122 for (i = 0; i < env->tr.limit; i++) {
123 printf("%02x ", env->tr.base[i]);
124 if ((i & 7) == 7) {
125 printf("\n");
128 printf("\n");
130 #endif
132 if (!(env->tr.flags & DESC_P_MASK)) {
133 cpu_abort(CPU(cpu), "invalid tss");
135 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
136 if ((type & 7) != 1) {
137 cpu_abort(CPU(cpu), "invalid tss type");
139 shift = type >> 3;
140 index = (dpl * 4 + 2) << shift;
141 if (index + (4 << shift) - 1 > env->tr.limit) {
142 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
144 if (shift == 0) {
145 *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
146 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
147 } else {
148 *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
149 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
153 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
155 uint32_t e1, e2;
156 int rpl, dpl;
158 if ((selector & 0xfffc) != 0) {
159 if (load_segment(env, &e1, &e2, selector) != 0) {
160 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
162 if (!(e2 & DESC_S_MASK)) {
163 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
165 rpl = selector & 3;
166 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
167 if (seg_reg == R_CS) {
168 if (!(e2 & DESC_CS_MASK)) {
169 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
171 if (dpl != rpl) {
172 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
174 } else if (seg_reg == R_SS) {
175 /* SS must be writable data */
176 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
177 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
179 if (dpl != cpl || dpl != rpl) {
180 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
182 } else {
183 /* not readable code */
184 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
185 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
187 /* if data or non conforming code, checks the rights */
188 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
189 if (dpl < cpl || dpl < rpl) {
190 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
194 if (!(e2 & DESC_P_MASK)) {
195 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
197 cpu_x86_load_seg_cache(env, seg_reg, selector,
198 get_seg_base(e1, e2),
199 get_seg_limit(e1, e2),
200 e2);
201 } else {
202 if (seg_reg == R_SS || seg_reg == R_CS) {
203 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
208 #define SWITCH_TSS_JMP 0
209 #define SWITCH_TSS_IRET 1
210 #define SWITCH_TSS_CALL 2
212 /* XXX: restore CPU state in registers (PowerPC case) */
213 static void switch_tss(CPUX86State *env, int tss_selector,
214 uint32_t e1, uint32_t e2, int source,
215 uint32_t next_eip)
217 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
218 target_ulong tss_base;
219 uint32_t new_regs[8], new_segs[6];
220 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
221 uint32_t old_eflags, eflags_mask;
222 SegmentCache *dt;
223 int index;
224 target_ulong ptr;
226 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
227 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
228 source);
230 /* if task gate, we read the TSS segment and we load it */
231 if (type == 5) {
232 if (!(e2 & DESC_P_MASK)) {
233 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
235 tss_selector = e1 >> 16;
236 if (tss_selector & 4) {
237 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
239 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
240 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
242 if (e2 & DESC_S_MASK) {
243 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
245 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
246 if ((type & 7) != 1) {
247 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
251 if (!(e2 & DESC_P_MASK)) {
252 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
255 if (type & 8) {
256 tss_limit_max = 103;
257 } else {
258 tss_limit_max = 43;
260 tss_limit = get_seg_limit(e1, e2);
261 tss_base = get_seg_base(e1, e2);
262 if ((tss_selector & 4) != 0 ||
263 tss_limit < tss_limit_max) {
264 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
266 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
267 if (old_type & 8) {
268 old_tss_limit_max = 103;
269 } else {
270 old_tss_limit_max = 43;
273 /* read all the registers from the new TSS */
274 if (type & 8) {
275 /* 32 bit */
276 new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
277 new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
278 new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
279 for (i = 0; i < 8; i++) {
280 new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
282 for (i = 0; i < 6; i++) {
283 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
285 new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
286 new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
287 } else {
288 /* 16 bit */
289 new_cr3 = 0;
290 new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
291 new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
292 for (i = 0; i < 8; i++) {
293 new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
294 0xffff0000;
296 for (i = 0; i < 4; i++) {
297 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
299 new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
300 new_segs[R_FS] = 0;
301 new_segs[R_GS] = 0;
302 new_trap = 0;
304 /* XXX: avoid a compiler warning, see
305 http://support.amd.com/us/Processor_TechDocs/24593.pdf
306 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
307 (void)new_trap;
309 /* NOTE: we must avoid memory exceptions during the task switch,
310 so we make dummy accesses before */
311 /* XXX: it can still fail in some cases, so a bigger hack is
312 necessary to valid the TLB after having done the accesses */
314 v1 = cpu_ldub_kernel(env, env->tr.base);
315 v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
316 cpu_stb_kernel(env, env->tr.base, v1);
317 cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
319 /* clear busy bit (it is restartable) */
320 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
321 target_ulong ptr;
322 uint32_t e2;
324 ptr = env->gdt.base + (env->tr.selector & ~7);
325 e2 = cpu_ldl_kernel(env, ptr + 4);
326 e2 &= ~DESC_TSS_BUSY_MASK;
327 cpu_stl_kernel(env, ptr + 4, e2);
329 old_eflags = cpu_compute_eflags(env);
330 if (source == SWITCH_TSS_IRET) {
331 old_eflags &= ~NT_MASK;
334 /* save the current state in the old TSS */
335 if (type & 8) {
336 /* 32 bit */
337 cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
338 cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
339 cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
340 cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
341 cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
342 cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
343 cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
344 cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
345 cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
346 cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
347 for (i = 0; i < 6; i++) {
348 cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
349 env->segs[i].selector);
351 } else {
352 /* 16 bit */
353 cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
354 cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
355 cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
356 cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
357 cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
358 cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
359 cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
360 cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
361 cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
362 cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
363 for (i = 0; i < 4; i++) {
364 cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
365 env->segs[i].selector);
369 /* now if an exception occurs, it will occurs in the next task
370 context */
372 if (source == SWITCH_TSS_CALL) {
373 cpu_stw_kernel(env, tss_base, env->tr.selector);
374 new_eflags |= NT_MASK;
377 /* set busy bit */
378 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
379 target_ulong ptr;
380 uint32_t e2;
382 ptr = env->gdt.base + (tss_selector & ~7);
383 e2 = cpu_ldl_kernel(env, ptr + 4);
384 e2 |= DESC_TSS_BUSY_MASK;
385 cpu_stl_kernel(env, ptr + 4, e2);
388 /* set the new CPU state */
389 /* from this point, any exception which occurs can give problems */
390 env->cr[0] |= CR0_TS_MASK;
391 env->hflags |= HF_TS_MASK;
392 env->tr.selector = tss_selector;
393 env->tr.base = tss_base;
394 env->tr.limit = tss_limit;
395 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
397 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
398 cpu_x86_update_cr3(env, new_cr3);
401 /* load all registers without an exception, then reload them with
402 possible exception */
403 env->eip = new_eip;
404 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
405 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
406 if (!(type & 8)) {
407 eflags_mask &= 0xffff;
409 cpu_load_eflags(env, new_eflags, eflags_mask);
410 /* XXX: what to do in 16 bit case? */
411 env->regs[R_EAX] = new_regs[0];
412 env->regs[R_ECX] = new_regs[1];
413 env->regs[R_EDX] = new_regs[2];
414 env->regs[R_EBX] = new_regs[3];
415 env->regs[R_ESP] = new_regs[4];
416 env->regs[R_EBP] = new_regs[5];
417 env->regs[R_ESI] = new_regs[6];
418 env->regs[R_EDI] = new_regs[7];
419 if (new_eflags & VM_MASK) {
420 for (i = 0; i < 6; i++) {
421 load_seg_vm(env, i, new_segs[i]);
423 } else {
424 /* first just selectors as the rest may trigger exceptions */
425 for (i = 0; i < 6; i++) {
426 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
430 env->ldt.selector = new_ldt & ~4;
431 env->ldt.base = 0;
432 env->ldt.limit = 0;
433 env->ldt.flags = 0;
435 /* load the LDT */
436 if (new_ldt & 4) {
437 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
440 if ((new_ldt & 0xfffc) != 0) {
441 dt = &env->gdt;
442 index = new_ldt & ~7;
443 if ((index + 7) > dt->limit) {
444 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
446 ptr = dt->base + index;
447 e1 = cpu_ldl_kernel(env, ptr);
448 e2 = cpu_ldl_kernel(env, ptr + 4);
449 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
450 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
452 if (!(e2 & DESC_P_MASK)) {
453 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
455 load_seg_cache_raw_dt(&env->ldt, e1, e2);
458 /* load the segments */
459 if (!(new_eflags & VM_MASK)) {
460 int cpl = new_segs[R_CS] & 3;
461 tss_load_seg(env, R_CS, new_segs[R_CS], cpl);
462 tss_load_seg(env, R_SS, new_segs[R_SS], cpl);
463 tss_load_seg(env, R_ES, new_segs[R_ES], cpl);
464 tss_load_seg(env, R_DS, new_segs[R_DS], cpl);
465 tss_load_seg(env, R_FS, new_segs[R_FS], cpl);
466 tss_load_seg(env, R_GS, new_segs[R_GS], cpl);
469 /* check that env->eip is in the CS segment limits */
470 if (new_eip > env->segs[R_CS].limit) {
471 /* XXX: different exception if CALL? */
472 raise_exception_err(env, EXCP0D_GPF, 0);
475 #ifndef CONFIG_USER_ONLY
476 /* reset local breakpoints */
477 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
478 for (i = 0; i < DR7_MAX_BP; i++) {
479 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
480 !hw_global_breakpoint_enabled(env->dr[7], i)) {
481 hw_breakpoint_remove(env, i);
484 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
486 #endif
489 static inline unsigned int get_sp_mask(unsigned int e2)
491 if (e2 & DESC_B_MASK) {
492 return 0xffffffff;
493 } else {
494 return 0xffff;
498 static int exception_has_error_code(int intno)
500 switch (intno) {
501 case 8:
502 case 10:
503 case 11:
504 case 12:
505 case 13:
506 case 14:
507 case 17:
508 return 1;
510 return 0;
513 #ifdef TARGET_X86_64
514 #define SET_ESP(val, sp_mask) \
515 do { \
516 if ((sp_mask) == 0xffff) { \
517 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
518 ((val) & 0xffff); \
519 } else if ((sp_mask) == 0xffffffffLL) { \
520 env->regs[R_ESP] = (uint32_t)(val); \
521 } else { \
522 env->regs[R_ESP] = (val); \
524 } while (0)
525 #else
526 #define SET_ESP(val, sp_mask) \
527 do { \
528 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
529 ((val) & (sp_mask)); \
530 } while (0)
531 #endif
533 /* in 64-bit machines, this can overflow. So this segment addition macro
534 * can be used to trim the value to 32-bit whenever needed */
535 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
537 /* XXX: add a is_user flag to have proper security support */
538 #define PUSHW(ssp, sp, sp_mask, val) \
540 sp -= 2; \
541 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
544 #define PUSHL(ssp, sp, sp_mask, val) \
546 sp -= 4; \
547 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
550 #define POPW(ssp, sp, sp_mask, val) \
552 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
553 sp += 2; \
556 #define POPL(ssp, sp, sp_mask, val) \
558 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
559 sp += 4; \
562 /* protected mode interrupt */
563 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
564 int error_code, unsigned int next_eip,
565 int is_hw)
567 SegmentCache *dt;
568 target_ulong ptr, ssp;
569 int type, dpl, selector, ss_dpl, cpl;
570 int has_error_code, new_stack, shift;
571 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
572 uint32_t old_eip, sp_mask;
573 int vm86 = env->eflags & VM_MASK;
575 has_error_code = 0;
576 if (!is_int && !is_hw) {
577 has_error_code = exception_has_error_code(intno);
579 if (is_int) {
580 old_eip = next_eip;
581 } else {
582 old_eip = env->eip;
585 dt = &env->idt;
586 if (intno * 8 + 7 > dt->limit) {
587 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
589 ptr = dt->base + intno * 8;
590 e1 = cpu_ldl_kernel(env, ptr);
591 e2 = cpu_ldl_kernel(env, ptr + 4);
592 /* check gate type */
593 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
594 switch (type) {
595 case 5: /* task gate */
596 /* must do that check here to return the correct error code */
597 if (!(e2 & DESC_P_MASK)) {
598 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
600 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
601 if (has_error_code) {
602 int type;
603 uint32_t mask;
605 /* push the error code */
606 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
607 shift = type >> 3;
608 if (env->segs[R_SS].flags & DESC_B_MASK) {
609 mask = 0xffffffff;
610 } else {
611 mask = 0xffff;
613 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
614 ssp = env->segs[R_SS].base + esp;
615 if (shift) {
616 cpu_stl_kernel(env, ssp, error_code);
617 } else {
618 cpu_stw_kernel(env, ssp, error_code);
620 SET_ESP(esp, mask);
622 return;
623 case 6: /* 286 interrupt gate */
624 case 7: /* 286 trap gate */
625 case 14: /* 386 interrupt gate */
626 case 15: /* 386 trap gate */
627 break;
628 default:
629 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
630 break;
632 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
633 cpl = env->hflags & HF_CPL_MASK;
634 /* check privilege if software int */
635 if (is_int && dpl < cpl) {
636 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
638 /* check valid bit */
639 if (!(e2 & DESC_P_MASK)) {
640 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
642 selector = e1 >> 16;
643 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
644 if ((selector & 0xfffc) == 0) {
645 raise_exception_err(env, EXCP0D_GPF, 0);
647 if (load_segment(env, &e1, &e2, selector) != 0) {
648 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
650 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
651 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
653 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
654 if (dpl > cpl) {
655 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
657 if (!(e2 & DESC_P_MASK)) {
658 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
660 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
661 /* to inner privilege */
662 get_ss_esp_from_tss(env, &ss, &esp, dpl);
663 if ((ss & 0xfffc) == 0) {
664 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
666 if ((ss & 3) != dpl) {
667 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
669 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
670 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
672 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
673 if (ss_dpl != dpl) {
674 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
676 if (!(ss_e2 & DESC_S_MASK) ||
677 (ss_e2 & DESC_CS_MASK) ||
678 !(ss_e2 & DESC_W_MASK)) {
679 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
681 if (!(ss_e2 & DESC_P_MASK)) {
682 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
684 new_stack = 1;
685 sp_mask = get_sp_mask(ss_e2);
686 ssp = get_seg_base(ss_e1, ss_e2);
687 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
688 /* to same privilege */
689 if (vm86) {
690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
692 new_stack = 0;
693 sp_mask = get_sp_mask(env->segs[R_SS].flags);
694 ssp = env->segs[R_SS].base;
695 esp = env->regs[R_ESP];
696 dpl = cpl;
697 } else {
698 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
699 new_stack = 0; /* avoid warning */
700 sp_mask = 0; /* avoid warning */
701 ssp = 0; /* avoid warning */
702 esp = 0; /* avoid warning */
705 shift = type >> 3;
707 #if 0
708 /* XXX: check that enough room is available */
709 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
710 if (vm86) {
711 push_size += 8;
713 push_size <<= shift;
714 #endif
715 if (shift == 1) {
716 if (new_stack) {
717 if (vm86) {
718 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
719 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
720 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
721 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
723 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
724 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
726 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
727 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
728 PUSHL(ssp, esp, sp_mask, old_eip);
729 if (has_error_code) {
730 PUSHL(ssp, esp, sp_mask, error_code);
732 } else {
733 if (new_stack) {
734 if (vm86) {
735 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
736 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
737 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
738 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
740 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
741 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
743 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
744 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
745 PUSHW(ssp, esp, sp_mask, old_eip);
746 if (has_error_code) {
747 PUSHW(ssp, esp, sp_mask, error_code);
751 /* interrupt gate clear IF mask */
752 if ((type & 1) == 0) {
753 env->eflags &= ~IF_MASK;
755 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
757 if (new_stack) {
758 if (vm86) {
759 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
761 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
762 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
764 ss = (ss & ~3) | dpl;
765 cpu_x86_load_seg_cache(env, R_SS, ss,
766 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
768 SET_ESP(esp, sp_mask);
770 selector = (selector & ~3) | dpl;
771 cpu_x86_load_seg_cache(env, R_CS, selector,
772 get_seg_base(e1, e2),
773 get_seg_limit(e1, e2),
774 e2);
775 env->eip = offset;
778 #ifdef TARGET_X86_64
780 #define PUSHQ(sp, val) \
782 sp -= 8; \
783 cpu_stq_kernel(env, sp, (val)); \
786 #define POPQ(sp, val) \
788 val = cpu_ldq_kernel(env, sp); \
789 sp += 8; \
792 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
794 X86CPU *cpu = x86_env_get_cpu(env);
795 int index;
797 #if 0
798 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
799 env->tr.base, env->tr.limit);
800 #endif
802 if (!(env->tr.flags & DESC_P_MASK)) {
803 cpu_abort(CPU(cpu), "invalid tss");
805 index = 8 * level + 4;
806 if ((index + 7) > env->tr.limit) {
807 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
809 return cpu_ldq_kernel(env, env->tr.base + index);
812 /* 64 bit interrupt */
813 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
814 int error_code, target_ulong next_eip, int is_hw)
816 SegmentCache *dt;
817 target_ulong ptr;
818 int type, dpl, selector, cpl, ist;
819 int has_error_code, new_stack;
820 uint32_t e1, e2, e3, ss;
821 target_ulong old_eip, esp, offset;
823 has_error_code = 0;
824 if (!is_int && !is_hw) {
825 has_error_code = exception_has_error_code(intno);
827 if (is_int) {
828 old_eip = next_eip;
829 } else {
830 old_eip = env->eip;
833 dt = &env->idt;
834 if (intno * 16 + 15 > dt->limit) {
835 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
837 ptr = dt->base + intno * 16;
838 e1 = cpu_ldl_kernel(env, ptr);
839 e2 = cpu_ldl_kernel(env, ptr + 4);
840 e3 = cpu_ldl_kernel(env, ptr + 8);
841 /* check gate type */
842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
843 switch (type) {
844 case 14: /* 386 interrupt gate */
845 case 15: /* 386 trap gate */
846 break;
847 default:
848 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
849 break;
851 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
852 cpl = env->hflags & HF_CPL_MASK;
853 /* check privilege if software int */
854 if (is_int && dpl < cpl) {
855 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
857 /* check valid bit */
858 if (!(e2 & DESC_P_MASK)) {
859 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
861 selector = e1 >> 16;
862 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
863 ist = e2 & 7;
864 if ((selector & 0xfffc) == 0) {
865 raise_exception_err(env, EXCP0D_GPF, 0);
868 if (load_segment(env, &e1, &e2, selector) != 0) {
869 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
871 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
872 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
874 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
875 if (dpl > cpl) {
876 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
878 if (!(e2 & DESC_P_MASK)) {
879 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
881 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
882 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
884 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
885 /* to inner privilege */
886 new_stack = 1;
887 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
888 ss = 0;
889 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
890 /* to same privilege */
891 if (env->eflags & VM_MASK) {
892 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
894 new_stack = 0;
895 esp = env->regs[R_ESP];
896 dpl = cpl;
897 } else {
898 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
899 new_stack = 0; /* avoid warning */
900 esp = 0; /* avoid warning */
902 esp &= ~0xfLL; /* align stack */
904 PUSHQ(esp, env->segs[R_SS].selector);
905 PUSHQ(esp, env->regs[R_ESP]);
906 PUSHQ(esp, cpu_compute_eflags(env));
907 PUSHQ(esp, env->segs[R_CS].selector);
908 PUSHQ(esp, old_eip);
909 if (has_error_code) {
910 PUSHQ(esp, error_code);
913 /* interrupt gate clear IF mask */
914 if ((type & 1) == 0) {
915 env->eflags &= ~IF_MASK;
917 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
919 if (new_stack) {
920 ss = 0 | dpl;
921 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
923 env->regs[R_ESP] = esp;
925 selector = (selector & ~3) | dpl;
926 cpu_x86_load_seg_cache(env, R_CS, selector,
927 get_seg_base(e1, e2),
928 get_seg_limit(e1, e2),
929 e2);
930 env->eip = offset;
932 #endif
934 #ifdef TARGET_X86_64
935 #if defined(CONFIG_USER_ONLY)
936 void helper_syscall(CPUX86State *env, int next_eip_addend)
938 CPUState *cs = CPU(x86_env_get_cpu(env));
940 cs->exception_index = EXCP_SYSCALL;
941 env->exception_next_eip = env->eip + next_eip_addend;
942 cpu_loop_exit(cs);
944 #else
945 void helper_syscall(CPUX86State *env, int next_eip_addend)
947 int selector;
949 if (!(env->efer & MSR_EFER_SCE)) {
950 raise_exception_err(env, EXCP06_ILLOP, 0);
952 selector = (env->star >> 32) & 0xffff;
953 if (env->hflags & HF_LMA_MASK) {
954 int code64;
956 env->regs[R_ECX] = env->eip + next_eip_addend;
957 env->regs[11] = cpu_compute_eflags(env);
959 code64 = env->hflags & HF_CS64_MASK;
961 env->eflags &= ~env->fmask;
962 cpu_load_eflags(env, env->eflags, 0);
963 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
964 0, 0xffffffff,
965 DESC_G_MASK | DESC_P_MASK |
966 DESC_S_MASK |
967 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
968 DESC_L_MASK);
969 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
970 0, 0xffffffff,
971 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
972 DESC_S_MASK |
973 DESC_W_MASK | DESC_A_MASK);
974 if (code64) {
975 env->eip = env->lstar;
976 } else {
977 env->eip = env->cstar;
979 } else {
980 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
982 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
983 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
984 0, 0xffffffff,
985 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
986 DESC_S_MASK |
987 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
988 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
989 0, 0xffffffff,
990 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
991 DESC_S_MASK |
992 DESC_W_MASK | DESC_A_MASK);
993 env->eip = (uint32_t)env->star;
996 #endif
997 #endif
999 #ifdef TARGET_X86_64
1000 void helper_sysret(CPUX86State *env, int dflag)
1002 int cpl, selector;
1004 if (!(env->efer & MSR_EFER_SCE)) {
1005 raise_exception_err(env, EXCP06_ILLOP, 0);
1007 cpl = env->hflags & HF_CPL_MASK;
1008 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1009 raise_exception_err(env, EXCP0D_GPF, 0);
1011 selector = (env->star >> 48) & 0xffff;
1012 if (env->hflags & HF_LMA_MASK) {
1013 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1014 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1015 NT_MASK);
1016 if (dflag == 2) {
1017 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1018 0, 0xffffffff,
1019 DESC_G_MASK | DESC_P_MASK |
1020 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1021 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1022 DESC_L_MASK);
1023 env->eip = env->regs[R_ECX];
1024 } else {
1025 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1026 0, 0xffffffff,
1027 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1028 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1029 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1030 env->eip = (uint32_t)env->regs[R_ECX];
1032 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1033 0, 0xffffffff,
1034 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1035 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1036 DESC_W_MASK | DESC_A_MASK);
1037 } else {
1038 env->eflags |= IF_MASK;
1039 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1040 0, 0xffffffff,
1041 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1042 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1043 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1044 env->eip = (uint32_t)env->regs[R_ECX];
1045 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1046 0, 0xffffffff,
1047 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1048 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1049 DESC_W_MASK | DESC_A_MASK);
1052 #endif
1054 /* real mode interrupt */
1055 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1056 int error_code, unsigned int next_eip)
1058 SegmentCache *dt;
1059 target_ulong ptr, ssp;
1060 int selector;
1061 uint32_t offset, esp;
1062 uint32_t old_cs, old_eip;
1064 /* real mode (simpler!) */
1065 dt = &env->idt;
1066 if (intno * 4 + 3 > dt->limit) {
1067 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1069 ptr = dt->base + intno * 4;
1070 offset = cpu_lduw_kernel(env, ptr);
1071 selector = cpu_lduw_kernel(env, ptr + 2);
1072 esp = env->regs[R_ESP];
1073 ssp = env->segs[R_SS].base;
1074 if (is_int) {
1075 old_eip = next_eip;
1076 } else {
1077 old_eip = env->eip;
1079 old_cs = env->segs[R_CS].selector;
1080 /* XXX: use SS segment size? */
1081 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1082 PUSHW(ssp, esp, 0xffff, old_cs);
1083 PUSHW(ssp, esp, 0xffff, old_eip);
1085 /* update processor state */
1086 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1087 env->eip = offset;
1088 env->segs[R_CS].selector = selector;
1089 env->segs[R_CS].base = (selector << 4);
1090 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1093 #if defined(CONFIG_USER_ONLY)
1094 /* fake user mode interrupt */
1095 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1096 int error_code, target_ulong next_eip)
1098 SegmentCache *dt;
1099 target_ulong ptr;
1100 int dpl, cpl, shift;
1101 uint32_t e2;
1103 dt = &env->idt;
1104 if (env->hflags & HF_LMA_MASK) {
1105 shift = 4;
1106 } else {
1107 shift = 3;
1109 ptr = dt->base + (intno << shift);
1110 e2 = cpu_ldl_kernel(env, ptr + 4);
1112 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1113 cpl = env->hflags & HF_CPL_MASK;
1114 /* check privilege if software int */
1115 if (is_int && dpl < cpl) {
1116 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1119 /* Since we emulate only user space, we cannot do more than
1120 exiting the emulation with the suitable exception and error
1121 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1122 if (is_int || intno == EXCP_SYSCALL) {
1123 env->eip = next_eip;
1127 #else
1129 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1130 int error_code, int is_hw, int rm)
1132 CPUState *cs = CPU(x86_env_get_cpu(env));
1133 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1134 control.event_inj));
1136 if (!(event_inj & SVM_EVTINJ_VALID)) {
1137 int type;
1139 if (is_int) {
1140 type = SVM_EVTINJ_TYPE_SOFT;
1141 } else {
1142 type = SVM_EVTINJ_TYPE_EXEPT;
1144 event_inj = intno | type | SVM_EVTINJ_VALID;
1145 if (!rm && exception_has_error_code(intno)) {
1146 event_inj |= SVM_EVTINJ_VALID_ERR;
1147 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1148 control.event_inj_err),
1149 error_code);
1151 stl_phys(cs->as,
1152 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1153 event_inj);
1156 #endif
1159 * Begin execution of an interruption. is_int is TRUE if coming from
1160 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1161 * instruction. It is only relevant if is_int is TRUE.
1163 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1164 int error_code, target_ulong next_eip, int is_hw)
1166 CPUX86State *env = &cpu->env;
1168 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1169 if ((env->cr[0] & CR0_PE_MASK)) {
1170 static int count;
1172 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1173 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1174 count, intno, error_code, is_int,
1175 env->hflags & HF_CPL_MASK,
1176 env->segs[R_CS].selector, env->eip,
1177 (int)env->segs[R_CS].base + env->eip,
1178 env->segs[R_SS].selector, env->regs[R_ESP]);
1179 if (intno == 0x0e) {
1180 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1181 } else {
1182 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1184 qemu_log("\n");
1185 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1186 #if 0
1188 int i;
1189 target_ulong ptr;
1191 qemu_log(" code=");
1192 ptr = env->segs[R_CS].base + env->eip;
1193 for (i = 0; i < 16; i++) {
1194 qemu_log(" %02x", ldub(ptr + i));
1196 qemu_log("\n");
1198 #endif
1199 count++;
1202 if (env->cr[0] & CR0_PE_MASK) {
1203 #if !defined(CONFIG_USER_ONLY)
1204 if (env->hflags & HF_SVMI_MASK) {
1205 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1207 #endif
1208 #ifdef TARGET_X86_64
1209 if (env->hflags & HF_LMA_MASK) {
1210 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1211 } else
1212 #endif
1214 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1215 is_hw);
1217 } else {
1218 #if !defined(CONFIG_USER_ONLY)
1219 if (env->hflags & HF_SVMI_MASK) {
1220 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1222 #endif
1223 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1226 #if !defined(CONFIG_USER_ONLY)
1227 if (env->hflags & HF_SVMI_MASK) {
1228 CPUState *cs = CPU(cpu);
1229 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb +
1230 offsetof(struct vmcb,
1231 control.event_inj));
1233 stl_phys(cs->as,
1234 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1235 event_inj & ~SVM_EVTINJ_VALID);
1237 #endif
1240 void x86_cpu_do_interrupt(CPUState *cs)
1242 X86CPU *cpu = X86_CPU(cs);
1243 CPUX86State *env = &cpu->env;
1245 #if defined(CONFIG_USER_ONLY)
1246 /* if user mode only, we simulate a fake exception
1247 which will be handled outside the cpu execution
1248 loop */
1249 do_interrupt_user(env, cs->exception_index,
1250 env->exception_is_int,
1251 env->error_code,
1252 env->exception_next_eip);
1253 /* successfully delivered */
1254 env->old_exception = -1;
1255 #else
1256 /* simulate a real cpu exception. On i386, it can
1257 trigger new exceptions, but we do not handle
1258 double or triple faults yet. */
1259 do_interrupt_all(cpu, cs->exception_index,
1260 env->exception_is_int,
1261 env->error_code,
1262 env->exception_next_eip, 0);
1263 /* successfully delivered */
1264 env->old_exception = -1;
1265 #endif
1268 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1270 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1273 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1275 X86CPU *cpu = X86_CPU(cs);
1276 CPUX86State *env = &cpu->env;
1277 bool ret = false;
1279 #if !defined(CONFIG_USER_ONLY)
1280 if (interrupt_request & CPU_INTERRUPT_POLL) {
1281 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1282 apic_poll_irq(cpu->apic_state);
1284 #endif
1285 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1286 do_cpu_sipi(cpu);
1287 } else if (env->hflags2 & HF2_GIF_MASK) {
1288 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1289 !(env->hflags & HF_SMM_MASK)) {
1290 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1291 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1292 do_smm_enter(cpu);
1293 ret = true;
1294 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1295 !(env->hflags2 & HF2_NMI_MASK)) {
1296 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1297 env->hflags2 |= HF2_NMI_MASK;
1298 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1299 ret = true;
1300 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1301 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1302 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1303 ret = true;
1304 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1305 (((env->hflags2 & HF2_VINTR_MASK) &&
1306 (env->hflags2 & HF2_HIF_MASK)) ||
1307 (!(env->hflags2 & HF2_VINTR_MASK) &&
1308 (env->eflags & IF_MASK &&
1309 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1310 int intno;
1311 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1312 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1313 CPU_INTERRUPT_VIRQ);
1314 intno = cpu_get_pic_interrupt(env);
1315 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1316 "Servicing hardware INT=0x%02x\n", intno);
1317 do_interrupt_x86_hardirq(env, intno, 1);
1318 /* ensure that no TB jump will be modified as
1319 the program flow was changed */
1320 ret = true;
1321 #if !defined(CONFIG_USER_ONLY)
1322 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1323 (env->eflags & IF_MASK) &&
1324 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1325 int intno;
1326 /* FIXME: this should respect TPR */
1327 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1328 intno = ldl_phys(cs->as, env->vm_vmcb
1329 + offsetof(struct vmcb, control.int_vector));
1330 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1331 "Servicing virtual hardware INT=0x%02x\n", intno);
1332 do_interrupt_x86_hardirq(env, intno, 1);
1333 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1334 ret = true;
1335 #endif
1339 return ret;
1342 void helper_enter_level(CPUX86State *env, int level, int data32,
1343 target_ulong t1)
1345 target_ulong ssp;
1346 uint32_t esp_mask, esp, ebp;
1348 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1349 ssp = env->segs[R_SS].base;
1350 ebp = env->regs[R_EBP];
1351 esp = env->regs[R_ESP];
1352 if (data32) {
1353 /* 32 bit */
1354 esp -= 4;
1355 while (--level) {
1356 esp -= 4;
1357 ebp -= 4;
1358 cpu_stl_data(env, ssp + (esp & esp_mask),
1359 cpu_ldl_data(env, ssp + (ebp & esp_mask)));
1361 esp -= 4;
1362 cpu_stl_data(env, ssp + (esp & esp_mask), t1);
1363 } else {
1364 /* 16 bit */
1365 esp -= 2;
1366 while (--level) {
1367 esp -= 2;
1368 ebp -= 2;
1369 cpu_stw_data(env, ssp + (esp & esp_mask),
1370 cpu_lduw_data(env, ssp + (ebp & esp_mask)));
1372 esp -= 2;
1373 cpu_stw_data(env, ssp + (esp & esp_mask), t1);
1377 #ifdef TARGET_X86_64
1378 void helper_enter64_level(CPUX86State *env, int level, int data64,
1379 target_ulong t1)
1381 target_ulong esp, ebp;
1383 ebp = env->regs[R_EBP];
1384 esp = env->regs[R_ESP];
1386 if (data64) {
1387 /* 64 bit */
1388 esp -= 8;
1389 while (--level) {
1390 esp -= 8;
1391 ebp -= 8;
1392 cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
1394 esp -= 8;
1395 cpu_stq_data(env, esp, t1);
1396 } else {
1397 /* 16 bit */
1398 esp -= 2;
1399 while (--level) {
1400 esp -= 2;
1401 ebp -= 2;
1402 cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
1404 esp -= 2;
1405 cpu_stw_data(env, esp, t1);
1408 #endif
1410 void helper_lldt(CPUX86State *env, int selector)
1412 SegmentCache *dt;
1413 uint32_t e1, e2;
1414 int index, entry_limit;
1415 target_ulong ptr;
1417 selector &= 0xffff;
1418 if ((selector & 0xfffc) == 0) {
1419 /* XXX: NULL selector case: invalid LDT */
1420 env->ldt.base = 0;
1421 env->ldt.limit = 0;
1422 } else {
1423 if (selector & 0x4) {
1424 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1426 dt = &env->gdt;
1427 index = selector & ~7;
1428 #ifdef TARGET_X86_64
1429 if (env->hflags & HF_LMA_MASK) {
1430 entry_limit = 15;
1431 } else
1432 #endif
1434 entry_limit = 7;
1436 if ((index + entry_limit) > dt->limit) {
1437 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1439 ptr = dt->base + index;
1440 e1 = cpu_ldl_kernel(env, ptr);
1441 e2 = cpu_ldl_kernel(env, ptr + 4);
1442 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1443 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1445 if (!(e2 & DESC_P_MASK)) {
1446 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1448 #ifdef TARGET_X86_64
1449 if (env->hflags & HF_LMA_MASK) {
1450 uint32_t e3;
1452 e3 = cpu_ldl_kernel(env, ptr + 8);
1453 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1454 env->ldt.base |= (target_ulong)e3 << 32;
1455 } else
1456 #endif
1458 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1461 env->ldt.selector = selector;
1464 void helper_ltr(CPUX86State *env, int selector)
1466 SegmentCache *dt;
1467 uint32_t e1, e2;
1468 int index, type, entry_limit;
1469 target_ulong ptr;
1471 selector &= 0xffff;
1472 if ((selector & 0xfffc) == 0) {
1473 /* NULL selector case: invalid TR */
1474 env->tr.base = 0;
1475 env->tr.limit = 0;
1476 env->tr.flags = 0;
1477 } else {
1478 if (selector & 0x4) {
1479 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1481 dt = &env->gdt;
1482 index = selector & ~7;
1483 #ifdef TARGET_X86_64
1484 if (env->hflags & HF_LMA_MASK) {
1485 entry_limit = 15;
1486 } else
1487 #endif
1489 entry_limit = 7;
1491 if ((index + entry_limit) > dt->limit) {
1492 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1494 ptr = dt->base + index;
1495 e1 = cpu_ldl_kernel(env, ptr);
1496 e2 = cpu_ldl_kernel(env, ptr + 4);
1497 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1498 if ((e2 & DESC_S_MASK) ||
1499 (type != 1 && type != 9)) {
1500 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1502 if (!(e2 & DESC_P_MASK)) {
1503 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1505 #ifdef TARGET_X86_64
1506 if (env->hflags & HF_LMA_MASK) {
1507 uint32_t e3, e4;
1509 e3 = cpu_ldl_kernel(env, ptr + 8);
1510 e4 = cpu_ldl_kernel(env, ptr + 12);
1511 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1512 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1514 load_seg_cache_raw_dt(&env->tr, e1, e2);
1515 env->tr.base |= (target_ulong)e3 << 32;
1516 } else
1517 #endif
1519 load_seg_cache_raw_dt(&env->tr, e1, e2);
1521 e2 |= DESC_TSS_BUSY_MASK;
1522 cpu_stl_kernel(env, ptr + 4, e2);
1524 env->tr.selector = selector;
1527 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1528 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1530 uint32_t e1, e2;
1531 int cpl, dpl, rpl;
1532 SegmentCache *dt;
1533 int index;
1534 target_ulong ptr;
1536 selector &= 0xffff;
1537 cpl = env->hflags & HF_CPL_MASK;
1538 if ((selector & 0xfffc) == 0) {
1539 /* null selector case */
1540 if (seg_reg == R_SS
1541 #ifdef TARGET_X86_64
1542 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1543 #endif
1545 raise_exception_err(env, EXCP0D_GPF, 0);
1547 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1548 } else {
1550 if (selector & 0x4) {
1551 dt = &env->ldt;
1552 } else {
1553 dt = &env->gdt;
1555 index = selector & ~7;
1556 if ((index + 7) > dt->limit) {
1557 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1559 ptr = dt->base + index;
1560 e1 = cpu_ldl_kernel(env, ptr);
1561 e2 = cpu_ldl_kernel(env, ptr + 4);
1563 if (!(e2 & DESC_S_MASK)) {
1564 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1566 rpl = selector & 3;
1567 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1568 if (seg_reg == R_SS) {
1569 /* must be writable segment */
1570 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1571 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1573 if (rpl != cpl || dpl != cpl) {
1574 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1576 } else {
1577 /* must be readable segment */
1578 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1579 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1582 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1583 /* if not conforming code, test rights */
1584 if (dpl < cpl || dpl < rpl) {
1585 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1590 if (!(e2 & DESC_P_MASK)) {
1591 if (seg_reg == R_SS) {
1592 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
1593 } else {
1594 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1598 /* set the access bit if not already set */
1599 if (!(e2 & DESC_A_MASK)) {
1600 e2 |= DESC_A_MASK;
1601 cpu_stl_kernel(env, ptr + 4, e2);
1604 cpu_x86_load_seg_cache(env, seg_reg, selector,
1605 get_seg_base(e1, e2),
1606 get_seg_limit(e1, e2),
1607 e2);
1608 #if 0
1609 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1610 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1611 #endif
1615 /* protected mode jump */
1616 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1617 int next_eip_addend)
1619 int gate_cs, type;
1620 uint32_t e1, e2, cpl, dpl, rpl, limit;
1621 target_ulong next_eip;
1623 if ((new_cs & 0xfffc) == 0) {
1624 raise_exception_err(env, EXCP0D_GPF, 0);
1626 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1627 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1629 cpl = env->hflags & HF_CPL_MASK;
1630 if (e2 & DESC_S_MASK) {
1631 if (!(e2 & DESC_CS_MASK)) {
1632 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1634 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1635 if (e2 & DESC_C_MASK) {
1636 /* conforming code segment */
1637 if (dpl > cpl) {
1638 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1640 } else {
1641 /* non conforming code segment */
1642 rpl = new_cs & 3;
1643 if (rpl > cpl) {
1644 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1646 if (dpl != cpl) {
1647 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1650 if (!(e2 & DESC_P_MASK)) {
1651 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1653 limit = get_seg_limit(e1, e2);
1654 if (new_eip > limit &&
1655 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1656 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1658 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1659 get_seg_base(e1, e2), limit, e2);
1660 env->eip = new_eip;
1661 } else {
1662 /* jump to call or task gate */
1663 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1664 rpl = new_cs & 3;
1665 cpl = env->hflags & HF_CPL_MASK;
1666 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1667 switch (type) {
1668 case 1: /* 286 TSS */
1669 case 9: /* 386 TSS */
1670 case 5: /* task gate */
1671 if (dpl < cpl || dpl < rpl) {
1672 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1674 next_eip = env->eip + next_eip_addend;
1675 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1676 break;
1677 case 4: /* 286 call gate */
1678 case 12: /* 386 call gate */
1679 if ((dpl < cpl) || (dpl < rpl)) {
1680 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1682 if (!(e2 & DESC_P_MASK)) {
1683 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1685 gate_cs = e1 >> 16;
1686 new_eip = (e1 & 0xffff);
1687 if (type == 12) {
1688 new_eip |= (e2 & 0xffff0000);
1690 if (load_segment(env, &e1, &e2, gate_cs) != 0) {
1691 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1693 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1694 /* must be code segment */
1695 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1696 (DESC_S_MASK | DESC_CS_MASK))) {
1697 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1699 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1700 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1701 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1703 if (!(e2 & DESC_P_MASK)) {
1704 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1706 limit = get_seg_limit(e1, e2);
1707 if (new_eip > limit) {
1708 raise_exception_err(env, EXCP0D_GPF, 0);
1710 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1711 get_seg_base(e1, e2), limit, e2);
1712 env->eip = new_eip;
1713 break;
1714 default:
1715 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1716 break;
1721 /* real mode call */
1722 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1723 int shift, int next_eip)
1725 int new_eip;
1726 uint32_t esp, esp_mask;
1727 target_ulong ssp;
1729 new_eip = new_eip1;
1730 esp = env->regs[R_ESP];
1731 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1732 ssp = env->segs[R_SS].base;
1733 if (shift) {
1734 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1735 PUSHL(ssp, esp, esp_mask, next_eip);
1736 } else {
1737 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1738 PUSHW(ssp, esp, esp_mask, next_eip);
1741 SET_ESP(esp, esp_mask);
1742 env->eip = new_eip;
1743 env->segs[R_CS].selector = new_cs;
1744 env->segs[R_CS].base = (new_cs << 4);
1747 /* protected mode call */
1748 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1749 int shift, int next_eip_addend)
1751 int new_stack, i;
1752 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1753 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1754 uint32_t val, limit, old_sp_mask;
1755 target_ulong ssp, old_ssp, next_eip;
1757 next_eip = env->eip + next_eip_addend;
1758 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1759 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1760 if ((new_cs & 0xfffc) == 0) {
1761 raise_exception_err(env, EXCP0D_GPF, 0);
1763 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1764 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1766 cpl = env->hflags & HF_CPL_MASK;
1767 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1768 if (e2 & DESC_S_MASK) {
1769 if (!(e2 & DESC_CS_MASK)) {
1770 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1772 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1773 if (e2 & DESC_C_MASK) {
1774 /* conforming code segment */
1775 if (dpl > cpl) {
1776 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1778 } else {
1779 /* non conforming code segment */
1780 rpl = new_cs & 3;
1781 if (rpl > cpl) {
1782 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1784 if (dpl != cpl) {
1785 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1788 if (!(e2 & DESC_P_MASK)) {
1789 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1792 #ifdef TARGET_X86_64
1793 /* XXX: check 16/32 bit cases in long mode */
1794 if (shift == 2) {
1795 target_ulong rsp;
1797 /* 64 bit case */
1798 rsp = env->regs[R_ESP];
1799 PUSHQ(rsp, env->segs[R_CS].selector);
1800 PUSHQ(rsp, next_eip);
1801 /* from this point, not restartable */
1802 env->regs[R_ESP] = rsp;
1803 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1804 get_seg_base(e1, e2),
1805 get_seg_limit(e1, e2), e2);
1806 env->eip = new_eip;
1807 } else
1808 #endif
1810 sp = env->regs[R_ESP];
1811 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1812 ssp = env->segs[R_SS].base;
1813 if (shift) {
1814 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1815 PUSHL(ssp, sp, sp_mask, next_eip);
1816 } else {
1817 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1818 PUSHW(ssp, sp, sp_mask, next_eip);
1821 limit = get_seg_limit(e1, e2);
1822 if (new_eip > limit) {
1823 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1825 /* from this point, not restartable */
1826 SET_ESP(sp, sp_mask);
1827 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1828 get_seg_base(e1, e2), limit, e2);
1829 env->eip = new_eip;
1831 } else {
1832 /* check gate type */
1833 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1834 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1835 rpl = new_cs & 3;
1836 switch (type) {
1837 case 1: /* available 286 TSS */
1838 case 9: /* available 386 TSS */
1839 case 5: /* task gate */
1840 if (dpl < cpl || dpl < rpl) {
1841 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1843 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1844 return;
1845 case 4: /* 286 call gate */
1846 case 12: /* 386 call gate */
1847 break;
1848 default:
1849 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1850 break;
1852 shift = type >> 3;
1854 if (dpl < cpl || dpl < rpl) {
1855 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1857 /* check valid bit */
1858 if (!(e2 & DESC_P_MASK)) {
1859 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1861 selector = e1 >> 16;
1862 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1863 param_count = e2 & 0x1f;
1864 if ((selector & 0xfffc) == 0) {
1865 raise_exception_err(env, EXCP0D_GPF, 0);
1868 if (load_segment(env, &e1, &e2, selector) != 0) {
1869 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1871 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1872 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1874 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1875 if (dpl > cpl) {
1876 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1878 if (!(e2 & DESC_P_MASK)) {
1879 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1882 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1883 /* to inner privilege */
1884 get_ss_esp_from_tss(env, &ss, &sp, dpl);
1885 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1886 TARGET_FMT_lx "\n", ss, sp, param_count,
1887 env->regs[R_ESP]);
1888 if ((ss & 0xfffc) == 0) {
1889 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1891 if ((ss & 3) != dpl) {
1892 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1894 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
1895 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1897 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1898 if (ss_dpl != dpl) {
1899 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1901 if (!(ss_e2 & DESC_S_MASK) ||
1902 (ss_e2 & DESC_CS_MASK) ||
1903 !(ss_e2 & DESC_W_MASK)) {
1904 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1906 if (!(ss_e2 & DESC_P_MASK)) {
1907 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1910 /* push_size = ((param_count * 2) + 8) << shift; */
1912 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1913 old_ssp = env->segs[R_SS].base;
1915 sp_mask = get_sp_mask(ss_e2);
1916 ssp = get_seg_base(ss_e1, ss_e2);
1917 if (shift) {
1918 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1919 PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
1920 for (i = param_count - 1; i >= 0; i--) {
1921 val = cpu_ldl_kernel(env, old_ssp +
1922 ((env->regs[R_ESP] + i * 4) &
1923 old_sp_mask));
1924 PUSHL(ssp, sp, sp_mask, val);
1926 } else {
1927 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1928 PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
1929 for (i = param_count - 1; i >= 0; i--) {
1930 val = cpu_lduw_kernel(env, old_ssp +
1931 ((env->regs[R_ESP] + i * 2) &
1932 old_sp_mask));
1933 PUSHW(ssp, sp, sp_mask, val);
1936 new_stack = 1;
1937 } else {
1938 /* to same privilege */
1939 sp = env->regs[R_ESP];
1940 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1941 ssp = env->segs[R_SS].base;
1942 /* push_size = (4 << shift); */
1943 new_stack = 0;
1946 if (shift) {
1947 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1948 PUSHL(ssp, sp, sp_mask, next_eip);
1949 } else {
1950 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1951 PUSHW(ssp, sp, sp_mask, next_eip);
1954 /* from this point, not restartable */
1956 if (new_stack) {
1957 ss = (ss & ~3) | dpl;
1958 cpu_x86_load_seg_cache(env, R_SS, ss,
1959 ssp,
1960 get_seg_limit(ss_e1, ss_e2),
1961 ss_e2);
1964 selector = (selector & ~3) | dpl;
1965 cpu_x86_load_seg_cache(env, R_CS, selector,
1966 get_seg_base(e1, e2),
1967 get_seg_limit(e1, e2),
1968 e2);
1969 SET_ESP(sp, sp_mask);
1970 env->eip = offset;
1974 /* real and vm86 mode iret */
1975 void helper_iret_real(CPUX86State *env, int shift)
1977 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1978 target_ulong ssp;
1979 int eflags_mask;
1981 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1982 sp = env->regs[R_ESP];
1983 ssp = env->segs[R_SS].base;
1984 if (shift == 1) {
1985 /* 32 bits */
1986 POPL(ssp, sp, sp_mask, new_eip);
1987 POPL(ssp, sp, sp_mask, new_cs);
1988 new_cs &= 0xffff;
1989 POPL(ssp, sp, sp_mask, new_eflags);
1990 } else {
1991 /* 16 bits */
1992 POPW(ssp, sp, sp_mask, new_eip);
1993 POPW(ssp, sp, sp_mask, new_cs);
1994 POPW(ssp, sp, sp_mask, new_eflags);
1996 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1997 env->segs[R_CS].selector = new_cs;
1998 env->segs[R_CS].base = (new_cs << 4);
1999 env->eip = new_eip;
2000 if (env->eflags & VM_MASK) {
2001 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2002 NT_MASK;
2003 } else {
2004 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2005 RF_MASK | NT_MASK;
2007 if (shift == 0) {
2008 eflags_mask &= 0xffff;
2010 cpu_load_eflags(env, new_eflags, eflags_mask);
2011 env->hflags2 &= ~HF2_NMI_MASK;
2014 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2016 int dpl;
2017 uint32_t e2;
2019 /* XXX: on x86_64, we do not want to nullify FS and GS because
2020 they may still contain a valid base. I would be interested to
2021 know how a real x86_64 CPU behaves */
2022 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2023 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2024 return;
2027 e2 = env->segs[seg_reg].flags;
2028 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2029 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2030 /* data or non conforming code segment */
2031 if (dpl < cpl) {
2032 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2037 /* protected mode iret */
2038 static inline void helper_ret_protected(CPUX86State *env, int shift,
2039 int is_iret, int addend)
2041 uint32_t new_cs, new_eflags, new_ss;
2042 uint32_t new_es, new_ds, new_fs, new_gs;
2043 uint32_t e1, e2, ss_e1, ss_e2;
2044 int cpl, dpl, rpl, eflags_mask, iopl;
2045 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2047 #ifdef TARGET_X86_64
2048 if (shift == 2) {
2049 sp_mask = -1;
2050 } else
2051 #endif
2053 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2055 sp = env->regs[R_ESP];
2056 ssp = env->segs[R_SS].base;
2057 new_eflags = 0; /* avoid warning */
2058 #ifdef TARGET_X86_64
2059 if (shift == 2) {
2060 POPQ(sp, new_eip);
2061 POPQ(sp, new_cs);
2062 new_cs &= 0xffff;
2063 if (is_iret) {
2064 POPQ(sp, new_eflags);
2066 } else
2067 #endif
2069 if (shift == 1) {
2070 /* 32 bits */
2071 POPL(ssp, sp, sp_mask, new_eip);
2072 POPL(ssp, sp, sp_mask, new_cs);
2073 new_cs &= 0xffff;
2074 if (is_iret) {
2075 POPL(ssp, sp, sp_mask, new_eflags);
2076 if (new_eflags & VM_MASK) {
2077 goto return_to_vm86;
2080 } else {
2081 /* 16 bits */
2082 POPW(ssp, sp, sp_mask, new_eip);
2083 POPW(ssp, sp, sp_mask, new_cs);
2084 if (is_iret) {
2085 POPW(ssp, sp, sp_mask, new_eflags);
2089 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2090 new_cs, new_eip, shift, addend);
2091 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2092 if ((new_cs & 0xfffc) == 0) {
2093 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2095 if (load_segment(env, &e1, &e2, new_cs) != 0) {
2096 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2098 if (!(e2 & DESC_S_MASK) ||
2099 !(e2 & DESC_CS_MASK)) {
2100 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2102 cpl = env->hflags & HF_CPL_MASK;
2103 rpl = new_cs & 3;
2104 if (rpl < cpl) {
2105 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2107 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2108 if (e2 & DESC_C_MASK) {
2109 if (dpl > rpl) {
2110 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2112 } else {
2113 if (dpl != rpl) {
2114 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2117 if (!(e2 & DESC_P_MASK)) {
2118 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2121 sp += addend;
2122 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2123 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2124 /* return to same privilege level */
2125 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2126 get_seg_base(e1, e2),
2127 get_seg_limit(e1, e2),
2128 e2);
2129 } else {
2130 /* return to different privilege level */
2131 #ifdef TARGET_X86_64
2132 if (shift == 2) {
2133 POPQ(sp, new_esp);
2134 POPQ(sp, new_ss);
2135 new_ss &= 0xffff;
2136 } else
2137 #endif
2139 if (shift == 1) {
2140 /* 32 bits */
2141 POPL(ssp, sp, sp_mask, new_esp);
2142 POPL(ssp, sp, sp_mask, new_ss);
2143 new_ss &= 0xffff;
2144 } else {
2145 /* 16 bits */
2146 POPW(ssp, sp, sp_mask, new_esp);
2147 POPW(ssp, sp, sp_mask, new_ss);
2150 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2151 new_ss, new_esp);
2152 if ((new_ss & 0xfffc) == 0) {
2153 #ifdef TARGET_X86_64
2154 /* NULL ss is allowed in long mode if cpl != 3 */
2155 /* XXX: test CS64? */
2156 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2157 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2158 0, 0xffffffff,
2159 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2160 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2161 DESC_W_MASK | DESC_A_MASK);
2162 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2163 } else
2164 #endif
2166 raise_exception_err(env, EXCP0D_GPF, 0);
2168 } else {
2169 if ((new_ss & 3) != rpl) {
2170 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2172 if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
2173 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2175 if (!(ss_e2 & DESC_S_MASK) ||
2176 (ss_e2 & DESC_CS_MASK) ||
2177 !(ss_e2 & DESC_W_MASK)) {
2178 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2180 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2181 if (dpl != rpl) {
2182 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2184 if (!(ss_e2 & DESC_P_MASK)) {
2185 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2187 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2188 get_seg_base(ss_e1, ss_e2),
2189 get_seg_limit(ss_e1, ss_e2),
2190 ss_e2);
2193 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2194 get_seg_base(e1, e2),
2195 get_seg_limit(e1, e2),
2196 e2);
2197 sp = new_esp;
2198 #ifdef TARGET_X86_64
2199 if (env->hflags & HF_CS64_MASK) {
2200 sp_mask = -1;
2201 } else
2202 #endif
2204 sp_mask = get_sp_mask(ss_e2);
2207 /* validate data segments */
2208 validate_seg(env, R_ES, rpl);
2209 validate_seg(env, R_DS, rpl);
2210 validate_seg(env, R_FS, rpl);
2211 validate_seg(env, R_GS, rpl);
2213 sp += addend;
2215 SET_ESP(sp, sp_mask);
2216 env->eip = new_eip;
2217 if (is_iret) {
2218 /* NOTE: 'cpl' is the _old_ CPL */
2219 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2220 if (cpl == 0) {
2221 eflags_mask |= IOPL_MASK;
2223 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2224 if (cpl <= iopl) {
2225 eflags_mask |= IF_MASK;
2227 if (shift == 0) {
2228 eflags_mask &= 0xffff;
2230 cpu_load_eflags(env, new_eflags, eflags_mask);
2232 return;
2234 return_to_vm86:
2235 POPL(ssp, sp, sp_mask, new_esp);
2236 POPL(ssp, sp, sp_mask, new_ss);
2237 POPL(ssp, sp, sp_mask, new_es);
2238 POPL(ssp, sp, sp_mask, new_ds);
2239 POPL(ssp, sp, sp_mask, new_fs);
2240 POPL(ssp, sp, sp_mask, new_gs);
2242 /* modify processor state */
2243 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2244 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2245 VIP_MASK);
2246 load_seg_vm(env, R_CS, new_cs & 0xffff);
2247 load_seg_vm(env, R_SS, new_ss & 0xffff);
2248 load_seg_vm(env, R_ES, new_es & 0xffff);
2249 load_seg_vm(env, R_DS, new_ds & 0xffff);
2250 load_seg_vm(env, R_FS, new_fs & 0xffff);
2251 load_seg_vm(env, R_GS, new_gs & 0xffff);
2253 env->eip = new_eip & 0xffff;
2254 env->regs[R_ESP] = new_esp;
2257 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2259 int tss_selector, type;
2260 uint32_t e1, e2;
2262 /* specific case for TSS */
2263 if (env->eflags & NT_MASK) {
2264 #ifdef TARGET_X86_64
2265 if (env->hflags & HF_LMA_MASK) {
2266 raise_exception_err(env, EXCP0D_GPF, 0);
2268 #endif
2269 tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
2270 if (tss_selector & 4) {
2271 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2273 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
2274 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2276 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2277 /* NOTE: we check both segment and busy TSS */
2278 if (type != 3) {
2279 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2281 switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2282 } else {
2283 helper_ret_protected(env, shift, 1, 0);
2285 env->hflags2 &= ~HF2_NMI_MASK;
2288 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2290 helper_ret_protected(env, shift, 0, addend);
2293 void helper_sysenter(CPUX86State *env)
2295 if (env->sysenter_cs == 0) {
2296 raise_exception_err(env, EXCP0D_GPF, 0);
2298 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2300 #ifdef TARGET_X86_64
2301 if (env->hflags & HF_LMA_MASK) {
2302 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2303 0, 0xffffffff,
2304 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2305 DESC_S_MASK |
2306 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2307 DESC_L_MASK);
2308 } else
2309 #endif
2311 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2312 0, 0xffffffff,
2313 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2314 DESC_S_MASK |
2315 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2317 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2318 0, 0xffffffff,
2319 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2320 DESC_S_MASK |
2321 DESC_W_MASK | DESC_A_MASK);
2322 env->regs[R_ESP] = env->sysenter_esp;
2323 env->eip = env->sysenter_eip;
2326 void helper_sysexit(CPUX86State *env, int dflag)
2328 int cpl;
2330 cpl = env->hflags & HF_CPL_MASK;
2331 if (env->sysenter_cs == 0 || cpl != 0) {
2332 raise_exception_err(env, EXCP0D_GPF, 0);
2334 #ifdef TARGET_X86_64
2335 if (dflag == 2) {
2336 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2337 3, 0, 0xffffffff,
2338 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2339 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2340 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2341 DESC_L_MASK);
2342 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2343 3, 0, 0xffffffff,
2344 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2345 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2346 DESC_W_MASK | DESC_A_MASK);
2347 } else
2348 #endif
2350 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2351 3, 0, 0xffffffff,
2352 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2353 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2354 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2355 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2356 3, 0, 0xffffffff,
2357 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2358 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2359 DESC_W_MASK | DESC_A_MASK);
2361 env->regs[R_ESP] = env->regs[R_ECX];
2362 env->eip = env->regs[R_EDX];
2365 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2367 unsigned int limit;
2368 uint32_t e1, e2, eflags, selector;
2369 int rpl, dpl, cpl, type;
2371 selector = selector1 & 0xffff;
2372 eflags = cpu_cc_compute_all(env, CC_OP);
2373 if ((selector & 0xfffc) == 0) {
2374 goto fail;
2376 if (load_segment(env, &e1, &e2, selector) != 0) {
2377 goto fail;
2379 rpl = selector & 3;
2380 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2381 cpl = env->hflags & HF_CPL_MASK;
2382 if (e2 & DESC_S_MASK) {
2383 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2384 /* conforming */
2385 } else {
2386 if (dpl < cpl || dpl < rpl) {
2387 goto fail;
2390 } else {
2391 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2392 switch (type) {
2393 case 1:
2394 case 2:
2395 case 3:
2396 case 9:
2397 case 11:
2398 break;
2399 default:
2400 goto fail;
2402 if (dpl < cpl || dpl < rpl) {
2403 fail:
2404 CC_SRC = eflags & ~CC_Z;
2405 return 0;
2408 limit = get_seg_limit(e1, e2);
2409 CC_SRC = eflags | CC_Z;
2410 return limit;
2413 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2415 uint32_t e1, e2, eflags, selector;
2416 int rpl, dpl, cpl, type;
2418 selector = selector1 & 0xffff;
2419 eflags = cpu_cc_compute_all(env, CC_OP);
2420 if ((selector & 0xfffc) == 0) {
2421 goto fail;
2423 if (load_segment(env, &e1, &e2, selector) != 0) {
2424 goto fail;
2426 rpl = selector & 3;
2427 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2428 cpl = env->hflags & HF_CPL_MASK;
2429 if (e2 & DESC_S_MASK) {
2430 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2431 /* conforming */
2432 } else {
2433 if (dpl < cpl || dpl < rpl) {
2434 goto fail;
2437 } else {
2438 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2439 switch (type) {
2440 case 1:
2441 case 2:
2442 case 3:
2443 case 4:
2444 case 5:
2445 case 9:
2446 case 11:
2447 case 12:
2448 break;
2449 default:
2450 goto fail;
2452 if (dpl < cpl || dpl < rpl) {
2453 fail:
2454 CC_SRC = eflags & ~CC_Z;
2455 return 0;
2458 CC_SRC = eflags | CC_Z;
2459 return e2 & 0x00f0ff00;
2462 void helper_verr(CPUX86State *env, target_ulong selector1)
2464 uint32_t e1, e2, eflags, selector;
2465 int rpl, dpl, cpl;
2467 selector = selector1 & 0xffff;
2468 eflags = cpu_cc_compute_all(env, CC_OP);
2469 if ((selector & 0xfffc) == 0) {
2470 goto fail;
2472 if (load_segment(env, &e1, &e2, selector) != 0) {
2473 goto fail;
2475 if (!(e2 & DESC_S_MASK)) {
2476 goto fail;
2478 rpl = selector & 3;
2479 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2480 cpl = env->hflags & HF_CPL_MASK;
2481 if (e2 & DESC_CS_MASK) {
2482 if (!(e2 & DESC_R_MASK)) {
2483 goto fail;
2485 if (!(e2 & DESC_C_MASK)) {
2486 if (dpl < cpl || dpl < rpl) {
2487 goto fail;
2490 } else {
2491 if (dpl < cpl || dpl < rpl) {
2492 fail:
2493 CC_SRC = eflags & ~CC_Z;
2494 return;
2497 CC_SRC = eflags | CC_Z;
2500 void helper_verw(CPUX86State *env, target_ulong selector1)
2502 uint32_t e1, e2, eflags, selector;
2503 int rpl, dpl, cpl;
2505 selector = selector1 & 0xffff;
2506 eflags = cpu_cc_compute_all(env, CC_OP);
2507 if ((selector & 0xfffc) == 0) {
2508 goto fail;
2510 if (load_segment(env, &e1, &e2, selector) != 0) {
2511 goto fail;
2513 if (!(e2 & DESC_S_MASK)) {
2514 goto fail;
2516 rpl = selector & 3;
2517 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2518 cpl = env->hflags & HF_CPL_MASK;
2519 if (e2 & DESC_CS_MASK) {
2520 goto fail;
2521 } else {
2522 if (dpl < cpl || dpl < rpl) {
2523 goto fail;
2525 if (!(e2 & DESC_W_MASK)) {
2526 fail:
2527 CC_SRC = eflags & ~CC_Z;
2528 return;
2531 CC_SRC = eflags | CC_Z;
2534 #if defined(CONFIG_USER_ONLY)
2535 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2537 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2538 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2539 selector &= 0xffff;
2540 cpu_x86_load_seg_cache(env, seg_reg, selector,
2541 (selector << 4), 0xffff,
2542 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2543 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2544 } else {
2545 helper_load_seg(env, seg_reg, selector);
2548 #endif
2550 /* check if Port I/O is allowed in TSS */
2551 static inline void check_io(CPUX86State *env, int addr, int size)
2553 int io_offset, val, mask;
2555 /* TSS must be a valid 32 bit one */
2556 if (!(env->tr.flags & DESC_P_MASK) ||
2557 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2558 env->tr.limit < 103) {
2559 goto fail;
2561 io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
2562 io_offset += (addr >> 3);
2563 /* Note: the check needs two bytes */
2564 if ((io_offset + 1) > env->tr.limit) {
2565 goto fail;
2567 val = cpu_lduw_kernel(env, env->tr.base + io_offset);
2568 val >>= (addr & 7);
2569 mask = (1 << size) - 1;
2570 /* all bits must be zero to allow the I/O */
2571 if ((val & mask) != 0) {
2572 fail:
2573 raise_exception_err(env, EXCP0D_GPF, 0);
2577 void helper_check_iob(CPUX86State *env, uint32_t t0)
2579 check_io(env, t0, 1);
2582 void helper_check_iow(CPUX86State *env, uint32_t t0)
2584 check_io(env, t0, 2);
2587 void helper_check_iol(CPUX86State *env, uint32_t t0)
2589 check_io(env, t0, 4);