linux-user: Convert blkpg to use a special subop handler
[qemu.git] / target-i386 / seg_helper.c
blobaf5c1c6830873e4fef0da2ae6fe1e04c458d665a
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
26 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(cpu) do { } while (0)
35 #endif
37 #ifndef CONFIG_USER_ONLY
38 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
39 #define MEMSUFFIX _kernel
40 #define DATA_SIZE 1
41 #include "exec/cpu_ldst_template.h"
43 #define DATA_SIZE 2
44 #include "exec/cpu_ldst_template.h"
46 #define DATA_SIZE 4
47 #include "exec/cpu_ldst_template.h"
49 #define DATA_SIZE 8
50 #include "exec/cpu_ldst_template.h"
51 #undef CPU_MMU_INDEX
52 #undef MEMSUFFIX
53 #endif
55 /* return non zero if error */
56 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
57 uint32_t *e2_ptr, int selector)
59 SegmentCache *dt;
60 int index;
61 target_ulong ptr;
63 if (selector & 0x4) {
64 dt = &env->ldt;
65 } else {
66 dt = &env->gdt;
68 index = selector & ~7;
69 if ((index + 7) > dt->limit) {
70 return -1;
72 ptr = dt->base + index;
73 *e1_ptr = cpu_ldl_kernel(env, ptr);
74 *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
75 return 0;
78 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
80 unsigned int limit;
82 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
83 if (e2 & DESC_G_MASK) {
84 limit = (limit << 12) | 0xfff;
86 return limit;
89 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
91 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
94 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
95 uint32_t e2)
97 sc->base = get_seg_base(e1, e2);
98 sc->limit = get_seg_limit(e1, e2);
99 sc->flags = e2;
102 /* init the segment cache in vm86 mode. */
103 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
105 selector &= 0xffff;
107 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
108 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
109 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
112 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
113 uint32_t *esp_ptr, int dpl)
115 X86CPU *cpu = x86_env_get_cpu(env);
116 int type, index, shift;
118 #if 0
120 int i;
121 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
122 for (i = 0; i < env->tr.limit; i++) {
123 printf("%02x ", env->tr.base[i]);
124 if ((i & 7) == 7) {
125 printf("\n");
128 printf("\n");
130 #endif
132 if (!(env->tr.flags & DESC_P_MASK)) {
133 cpu_abort(CPU(cpu), "invalid tss");
135 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
136 if ((type & 7) != 1) {
137 cpu_abort(CPU(cpu), "invalid tss type");
139 shift = type >> 3;
140 index = (dpl * 4 + 2) << shift;
141 if (index + (4 << shift) - 1 > env->tr.limit) {
142 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
144 if (shift == 0) {
145 *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
146 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
147 } else {
148 *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
149 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
153 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
155 uint32_t e1, e2;
156 int rpl, dpl;
158 if ((selector & 0xfffc) != 0) {
159 if (load_segment(env, &e1, &e2, selector) != 0) {
160 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
162 if (!(e2 & DESC_S_MASK)) {
163 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
165 rpl = selector & 3;
166 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
167 if (seg_reg == R_CS) {
168 if (!(e2 & DESC_CS_MASK)) {
169 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
171 if (dpl != rpl) {
172 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
174 } else if (seg_reg == R_SS) {
175 /* SS must be writable data */
176 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
177 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
179 if (dpl != cpl || dpl != rpl) {
180 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
182 } else {
183 /* not readable code */
184 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
185 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
187 /* if data or non conforming code, checks the rights */
188 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
189 if (dpl < cpl || dpl < rpl) {
190 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
194 if (!(e2 & DESC_P_MASK)) {
195 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
197 cpu_x86_load_seg_cache(env, seg_reg, selector,
198 get_seg_base(e1, e2),
199 get_seg_limit(e1, e2),
200 e2);
201 } else {
202 if (seg_reg == R_SS || seg_reg == R_CS) {
203 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
208 #define SWITCH_TSS_JMP 0
209 #define SWITCH_TSS_IRET 1
210 #define SWITCH_TSS_CALL 2
212 /* XXX: restore CPU state in registers (PowerPC case) */
213 static void switch_tss(CPUX86State *env, int tss_selector,
214 uint32_t e1, uint32_t e2, int source,
215 uint32_t next_eip)
217 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
218 target_ulong tss_base;
219 uint32_t new_regs[8], new_segs[6];
220 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
221 uint32_t old_eflags, eflags_mask;
222 SegmentCache *dt;
223 int index;
224 target_ulong ptr;
226 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
227 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
228 source);
230 /* if task gate, we read the TSS segment and we load it */
231 if (type == 5) {
232 if (!(e2 & DESC_P_MASK)) {
233 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
235 tss_selector = e1 >> 16;
236 if (tss_selector & 4) {
237 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
239 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
240 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
242 if (e2 & DESC_S_MASK) {
243 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
245 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
246 if ((type & 7) != 1) {
247 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
251 if (!(e2 & DESC_P_MASK)) {
252 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
255 if (type & 8) {
256 tss_limit_max = 103;
257 } else {
258 tss_limit_max = 43;
260 tss_limit = get_seg_limit(e1, e2);
261 tss_base = get_seg_base(e1, e2);
262 if ((tss_selector & 4) != 0 ||
263 tss_limit < tss_limit_max) {
264 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
266 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
267 if (old_type & 8) {
268 old_tss_limit_max = 103;
269 } else {
270 old_tss_limit_max = 43;
273 /* read all the registers from the new TSS */
274 if (type & 8) {
275 /* 32 bit */
276 new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
277 new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
278 new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
279 for (i = 0; i < 8; i++) {
280 new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
282 for (i = 0; i < 6; i++) {
283 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
285 new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
286 new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
287 } else {
288 /* 16 bit */
289 new_cr3 = 0;
290 new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
291 new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
292 for (i = 0; i < 8; i++) {
293 new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
294 0xffff0000;
296 for (i = 0; i < 4; i++) {
297 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
299 new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
300 new_segs[R_FS] = 0;
301 new_segs[R_GS] = 0;
302 new_trap = 0;
304 /* XXX: avoid a compiler warning, see
305 http://support.amd.com/us/Processor_TechDocs/24593.pdf
306 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
307 (void)new_trap;
309 /* NOTE: we must avoid memory exceptions during the task switch,
310 so we make dummy accesses before */
311 /* XXX: it can still fail in some cases, so a bigger hack is
312 necessary to valid the TLB after having done the accesses */
314 v1 = cpu_ldub_kernel(env, env->tr.base);
315 v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
316 cpu_stb_kernel(env, env->tr.base, v1);
317 cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
319 /* clear busy bit (it is restartable) */
320 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
321 target_ulong ptr;
322 uint32_t e2;
324 ptr = env->gdt.base + (env->tr.selector & ~7);
325 e2 = cpu_ldl_kernel(env, ptr + 4);
326 e2 &= ~DESC_TSS_BUSY_MASK;
327 cpu_stl_kernel(env, ptr + 4, e2);
329 old_eflags = cpu_compute_eflags(env);
330 if (source == SWITCH_TSS_IRET) {
331 old_eflags &= ~NT_MASK;
334 /* save the current state in the old TSS */
335 if (type & 8) {
336 /* 32 bit */
337 cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
338 cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
339 cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
340 cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
341 cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
342 cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
343 cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
344 cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
345 cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
346 cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
347 for (i = 0; i < 6; i++) {
348 cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
349 env->segs[i].selector);
351 } else {
352 /* 16 bit */
353 cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
354 cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
355 cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
356 cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
357 cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
358 cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
359 cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
360 cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
361 cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
362 cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
363 for (i = 0; i < 4; i++) {
364 cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
365 env->segs[i].selector);
369 /* now if an exception occurs, it will occurs in the next task
370 context */
372 if (source == SWITCH_TSS_CALL) {
373 cpu_stw_kernel(env, tss_base, env->tr.selector);
374 new_eflags |= NT_MASK;
377 /* set busy bit */
378 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
379 target_ulong ptr;
380 uint32_t e2;
382 ptr = env->gdt.base + (tss_selector & ~7);
383 e2 = cpu_ldl_kernel(env, ptr + 4);
384 e2 |= DESC_TSS_BUSY_MASK;
385 cpu_stl_kernel(env, ptr + 4, e2);
388 /* set the new CPU state */
389 /* from this point, any exception which occurs can give problems */
390 env->cr[0] |= CR0_TS_MASK;
391 env->hflags |= HF_TS_MASK;
392 env->tr.selector = tss_selector;
393 env->tr.base = tss_base;
394 env->tr.limit = tss_limit;
395 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
397 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
398 cpu_x86_update_cr3(env, new_cr3);
401 /* load all registers without an exception, then reload them with
402 possible exception */
403 env->eip = new_eip;
404 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
405 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
406 if (!(type & 8)) {
407 eflags_mask &= 0xffff;
409 cpu_load_eflags(env, new_eflags, eflags_mask);
410 /* XXX: what to do in 16 bit case? */
411 env->regs[R_EAX] = new_regs[0];
412 env->regs[R_ECX] = new_regs[1];
413 env->regs[R_EDX] = new_regs[2];
414 env->regs[R_EBX] = new_regs[3];
415 env->regs[R_ESP] = new_regs[4];
416 env->regs[R_EBP] = new_regs[5];
417 env->regs[R_ESI] = new_regs[6];
418 env->regs[R_EDI] = new_regs[7];
419 if (new_eflags & VM_MASK) {
420 for (i = 0; i < 6; i++) {
421 load_seg_vm(env, i, new_segs[i]);
423 } else {
424 /* first just selectors as the rest may trigger exceptions */
425 for (i = 0; i < 6; i++) {
426 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
430 env->ldt.selector = new_ldt & ~4;
431 env->ldt.base = 0;
432 env->ldt.limit = 0;
433 env->ldt.flags = 0;
435 /* load the LDT */
436 if (new_ldt & 4) {
437 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
440 if ((new_ldt & 0xfffc) != 0) {
441 dt = &env->gdt;
442 index = new_ldt & ~7;
443 if ((index + 7) > dt->limit) {
444 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
446 ptr = dt->base + index;
447 e1 = cpu_ldl_kernel(env, ptr);
448 e2 = cpu_ldl_kernel(env, ptr + 4);
449 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
450 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
452 if (!(e2 & DESC_P_MASK)) {
453 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
455 load_seg_cache_raw_dt(&env->ldt, e1, e2);
458 /* load the segments */
459 if (!(new_eflags & VM_MASK)) {
460 int cpl = new_segs[R_CS] & 3;
461 tss_load_seg(env, R_CS, new_segs[R_CS], cpl);
462 tss_load_seg(env, R_SS, new_segs[R_SS], cpl);
463 tss_load_seg(env, R_ES, new_segs[R_ES], cpl);
464 tss_load_seg(env, R_DS, new_segs[R_DS], cpl);
465 tss_load_seg(env, R_FS, new_segs[R_FS], cpl);
466 tss_load_seg(env, R_GS, new_segs[R_GS], cpl);
469 /* check that env->eip is in the CS segment limits */
470 if (new_eip > env->segs[R_CS].limit) {
471 /* XXX: different exception if CALL? */
472 raise_exception_err(env, EXCP0D_GPF, 0);
475 #ifndef CONFIG_USER_ONLY
476 /* reset local breakpoints */
477 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
478 for (i = 0; i < DR7_MAX_BP; i++) {
479 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
480 !hw_global_breakpoint_enabled(env->dr[7], i)) {
481 hw_breakpoint_remove(env, i);
484 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
486 #endif
489 static inline unsigned int get_sp_mask(unsigned int e2)
491 if (e2 & DESC_B_MASK) {
492 return 0xffffffff;
493 } else {
494 return 0xffff;
498 static int exception_has_error_code(int intno)
500 switch (intno) {
501 case 8:
502 case 10:
503 case 11:
504 case 12:
505 case 13:
506 case 14:
507 case 17:
508 return 1;
510 return 0;
513 #ifdef TARGET_X86_64
514 #define SET_ESP(val, sp_mask) \
515 do { \
516 if ((sp_mask) == 0xffff) { \
517 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
518 ((val) & 0xffff); \
519 } else if ((sp_mask) == 0xffffffffLL) { \
520 env->regs[R_ESP] = (uint32_t)(val); \
521 } else { \
522 env->regs[R_ESP] = (val); \
524 } while (0)
525 #else
526 #define SET_ESP(val, sp_mask) \
527 do { \
528 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
529 ((val) & (sp_mask)); \
530 } while (0)
531 #endif
533 /* in 64-bit machines, this can overflow. So this segment addition macro
534 * can be used to trim the value to 32-bit whenever needed */
535 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
537 /* XXX: add a is_user flag to have proper security support */
538 #define PUSHW(ssp, sp, sp_mask, val) \
540 sp -= 2; \
541 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
544 #define PUSHL(ssp, sp, sp_mask, val) \
546 sp -= 4; \
547 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
550 #define POPW(ssp, sp, sp_mask, val) \
552 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
553 sp += 2; \
556 #define POPL(ssp, sp, sp_mask, val) \
558 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
559 sp += 4; \
562 /* protected mode interrupt */
563 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
564 int error_code, unsigned int next_eip,
565 int is_hw)
567 SegmentCache *dt;
568 target_ulong ptr, ssp;
569 int type, dpl, selector, ss_dpl, cpl;
570 int has_error_code, new_stack, shift;
571 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
572 uint32_t old_eip, sp_mask;
573 int vm86 = env->eflags & VM_MASK;
575 has_error_code = 0;
576 if (!is_int && !is_hw) {
577 has_error_code = exception_has_error_code(intno);
579 if (is_int) {
580 old_eip = next_eip;
581 } else {
582 old_eip = env->eip;
585 dt = &env->idt;
586 if (intno * 8 + 7 > dt->limit) {
587 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
589 ptr = dt->base + intno * 8;
590 e1 = cpu_ldl_kernel(env, ptr);
591 e2 = cpu_ldl_kernel(env, ptr + 4);
592 /* check gate type */
593 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
594 switch (type) {
595 case 5: /* task gate */
596 /* must do that check here to return the correct error code */
597 if (!(e2 & DESC_P_MASK)) {
598 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
600 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
601 if (has_error_code) {
602 int type;
603 uint32_t mask;
605 /* push the error code */
606 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
607 shift = type >> 3;
608 if (env->segs[R_SS].flags & DESC_B_MASK) {
609 mask = 0xffffffff;
610 } else {
611 mask = 0xffff;
613 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
614 ssp = env->segs[R_SS].base + esp;
615 if (shift) {
616 cpu_stl_kernel(env, ssp, error_code);
617 } else {
618 cpu_stw_kernel(env, ssp, error_code);
620 SET_ESP(esp, mask);
622 return;
623 case 6: /* 286 interrupt gate */
624 case 7: /* 286 trap gate */
625 case 14: /* 386 interrupt gate */
626 case 15: /* 386 trap gate */
627 break;
628 default:
629 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
630 break;
632 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
633 cpl = env->hflags & HF_CPL_MASK;
634 /* check privilege if software int */
635 if (is_int && dpl < cpl) {
636 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
638 /* check valid bit */
639 if (!(e2 & DESC_P_MASK)) {
640 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
642 selector = e1 >> 16;
643 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
644 if ((selector & 0xfffc) == 0) {
645 raise_exception_err(env, EXCP0D_GPF, 0);
647 if (load_segment(env, &e1, &e2, selector) != 0) {
648 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
650 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
651 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
653 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
654 if (dpl > cpl) {
655 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
657 if (!(e2 & DESC_P_MASK)) {
658 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
660 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
661 /* to inner privilege */
662 get_ss_esp_from_tss(env, &ss, &esp, dpl);
663 if ((ss & 0xfffc) == 0) {
664 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
666 if ((ss & 3) != dpl) {
667 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
669 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
670 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
672 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
673 if (ss_dpl != dpl) {
674 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
676 if (!(ss_e2 & DESC_S_MASK) ||
677 (ss_e2 & DESC_CS_MASK) ||
678 !(ss_e2 & DESC_W_MASK)) {
679 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
681 if (!(ss_e2 & DESC_P_MASK)) {
682 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
684 new_stack = 1;
685 sp_mask = get_sp_mask(ss_e2);
686 ssp = get_seg_base(ss_e1, ss_e2);
687 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
688 /* to same privilege */
689 if (vm86) {
690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
692 new_stack = 0;
693 sp_mask = get_sp_mask(env->segs[R_SS].flags);
694 ssp = env->segs[R_SS].base;
695 esp = env->regs[R_ESP];
696 dpl = cpl;
697 } else {
698 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
699 new_stack = 0; /* avoid warning */
700 sp_mask = 0; /* avoid warning */
701 ssp = 0; /* avoid warning */
702 esp = 0; /* avoid warning */
705 shift = type >> 3;
707 #if 0
708 /* XXX: check that enough room is available */
709 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
710 if (vm86) {
711 push_size += 8;
713 push_size <<= shift;
714 #endif
715 if (shift == 1) {
716 if (new_stack) {
717 if (vm86) {
718 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
719 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
720 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
721 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
723 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
724 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
726 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
727 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
728 PUSHL(ssp, esp, sp_mask, old_eip);
729 if (has_error_code) {
730 PUSHL(ssp, esp, sp_mask, error_code);
732 } else {
733 if (new_stack) {
734 if (vm86) {
735 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
736 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
737 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
738 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
740 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
741 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
743 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
744 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
745 PUSHW(ssp, esp, sp_mask, old_eip);
746 if (has_error_code) {
747 PUSHW(ssp, esp, sp_mask, error_code);
751 /* interrupt gate clear IF mask */
752 if ((type & 1) == 0) {
753 env->eflags &= ~IF_MASK;
755 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
757 if (new_stack) {
758 if (vm86) {
759 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
761 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
762 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
764 ss = (ss & ~3) | dpl;
765 cpu_x86_load_seg_cache(env, R_SS, ss,
766 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
768 SET_ESP(esp, sp_mask);
770 selector = (selector & ~3) | dpl;
771 cpu_x86_load_seg_cache(env, R_CS, selector,
772 get_seg_base(e1, e2),
773 get_seg_limit(e1, e2),
774 e2);
775 env->eip = offset;
778 #ifdef TARGET_X86_64
780 #define PUSHQ(sp, val) \
782 sp -= 8; \
783 cpu_stq_kernel(env, sp, (val)); \
786 #define POPQ(sp, val) \
788 val = cpu_ldq_kernel(env, sp); \
789 sp += 8; \
792 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
794 X86CPU *cpu = x86_env_get_cpu(env);
795 int index;
797 #if 0
798 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
799 env->tr.base, env->tr.limit);
800 #endif
802 if (!(env->tr.flags & DESC_P_MASK)) {
803 cpu_abort(CPU(cpu), "invalid tss");
805 index = 8 * level + 4;
806 if ((index + 7) > env->tr.limit) {
807 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
809 return cpu_ldq_kernel(env, env->tr.base + index);
812 /* 64 bit interrupt */
813 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
814 int error_code, target_ulong next_eip, int is_hw)
816 SegmentCache *dt;
817 target_ulong ptr;
818 int type, dpl, selector, cpl, ist;
819 int has_error_code, new_stack;
820 uint32_t e1, e2, e3, ss;
821 target_ulong old_eip, esp, offset;
823 has_error_code = 0;
824 if (!is_int && !is_hw) {
825 has_error_code = exception_has_error_code(intno);
827 if (is_int) {
828 old_eip = next_eip;
829 } else {
830 old_eip = env->eip;
833 dt = &env->idt;
834 if (intno * 16 + 15 > dt->limit) {
835 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
837 ptr = dt->base + intno * 16;
838 e1 = cpu_ldl_kernel(env, ptr);
839 e2 = cpu_ldl_kernel(env, ptr + 4);
840 e3 = cpu_ldl_kernel(env, ptr + 8);
841 /* check gate type */
842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
843 switch (type) {
844 case 14: /* 386 interrupt gate */
845 case 15: /* 386 trap gate */
846 break;
847 default:
848 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
849 break;
851 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
852 cpl = env->hflags & HF_CPL_MASK;
853 /* check privilege if software int */
854 if (is_int && dpl < cpl) {
855 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
857 /* check valid bit */
858 if (!(e2 & DESC_P_MASK)) {
859 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
861 selector = e1 >> 16;
862 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
863 ist = e2 & 7;
864 if ((selector & 0xfffc) == 0) {
865 raise_exception_err(env, EXCP0D_GPF, 0);
868 if (load_segment(env, &e1, &e2, selector) != 0) {
869 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
871 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
872 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
874 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
875 if (dpl > cpl) {
876 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
878 if (!(e2 & DESC_P_MASK)) {
879 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
881 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
882 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
884 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
885 /* to inner privilege */
886 if (ist != 0) {
887 esp = get_rsp_from_tss(env, ist + 3);
888 } else {
889 esp = get_rsp_from_tss(env, dpl);
891 esp &= ~0xfLL; /* align stack */
892 ss = 0;
893 new_stack = 1;
894 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
895 /* to same privilege */
896 if (env->eflags & VM_MASK) {
897 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
899 new_stack = 0;
900 if (ist != 0) {
901 esp = get_rsp_from_tss(env, ist + 3);
902 } else {
903 esp = env->regs[R_ESP];
905 esp &= ~0xfLL; /* align stack */
906 dpl = cpl;
907 } else {
908 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
909 new_stack = 0; /* avoid warning */
910 esp = 0; /* avoid warning */
913 PUSHQ(esp, env->segs[R_SS].selector);
914 PUSHQ(esp, env->regs[R_ESP]);
915 PUSHQ(esp, cpu_compute_eflags(env));
916 PUSHQ(esp, env->segs[R_CS].selector);
917 PUSHQ(esp, old_eip);
918 if (has_error_code) {
919 PUSHQ(esp, error_code);
922 /* interrupt gate clear IF mask */
923 if ((type & 1) == 0) {
924 env->eflags &= ~IF_MASK;
926 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
928 if (new_stack) {
929 ss = 0 | dpl;
930 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
932 env->regs[R_ESP] = esp;
934 selector = (selector & ~3) | dpl;
935 cpu_x86_load_seg_cache(env, R_CS, selector,
936 get_seg_base(e1, e2),
937 get_seg_limit(e1, e2),
938 e2);
939 env->eip = offset;
941 #endif
943 #ifdef TARGET_X86_64
944 #if defined(CONFIG_USER_ONLY)
945 void helper_syscall(CPUX86State *env, int next_eip_addend)
947 CPUState *cs = CPU(x86_env_get_cpu(env));
949 cs->exception_index = EXCP_SYSCALL;
950 env->exception_next_eip = env->eip + next_eip_addend;
951 cpu_loop_exit(cs);
953 #else
954 void helper_syscall(CPUX86State *env, int next_eip_addend)
956 int selector;
958 if (!(env->efer & MSR_EFER_SCE)) {
959 raise_exception_err(env, EXCP06_ILLOP, 0);
961 selector = (env->star >> 32) & 0xffff;
962 if (env->hflags & HF_LMA_MASK) {
963 int code64;
965 env->regs[R_ECX] = env->eip + next_eip_addend;
966 env->regs[11] = cpu_compute_eflags(env);
968 code64 = env->hflags & HF_CS64_MASK;
970 env->eflags &= ~env->fmask;
971 cpu_load_eflags(env, env->eflags, 0);
972 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
973 0, 0xffffffff,
974 DESC_G_MASK | DESC_P_MASK |
975 DESC_S_MASK |
976 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
977 DESC_L_MASK);
978 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
979 0, 0xffffffff,
980 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
981 DESC_S_MASK |
982 DESC_W_MASK | DESC_A_MASK);
983 if (code64) {
984 env->eip = env->lstar;
985 } else {
986 env->eip = env->cstar;
988 } else {
989 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
991 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
992 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
993 0, 0xffffffff,
994 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
995 DESC_S_MASK |
996 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
997 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
998 0, 0xffffffff,
999 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1000 DESC_S_MASK |
1001 DESC_W_MASK | DESC_A_MASK);
1002 env->eip = (uint32_t)env->star;
1005 #endif
1006 #endif
1008 #ifdef TARGET_X86_64
1009 void helper_sysret(CPUX86State *env, int dflag)
1011 int cpl, selector;
1013 if (!(env->efer & MSR_EFER_SCE)) {
1014 raise_exception_err(env, EXCP06_ILLOP, 0);
1016 cpl = env->hflags & HF_CPL_MASK;
1017 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1018 raise_exception_err(env, EXCP0D_GPF, 0);
1020 selector = (env->star >> 48) & 0xffff;
1021 if (env->hflags & HF_LMA_MASK) {
1022 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1023 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1024 NT_MASK);
1025 if (dflag == 2) {
1026 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_P_MASK |
1029 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1030 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1031 DESC_L_MASK);
1032 env->eip = env->regs[R_ECX];
1033 } else {
1034 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1035 0, 0xffffffff,
1036 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1038 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1039 env->eip = (uint32_t)env->regs[R_ECX];
1041 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1042 0, 0xffffffff,
1043 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1044 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1045 DESC_W_MASK | DESC_A_MASK);
1046 } else {
1047 env->eflags |= IF_MASK;
1048 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1049 0, 0xffffffff,
1050 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1051 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1052 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1053 env->eip = (uint32_t)env->regs[R_ECX];
1054 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1057 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1058 DESC_W_MASK | DESC_A_MASK);
1061 #endif
1063 /* real mode interrupt */
1064 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1065 int error_code, unsigned int next_eip)
1067 SegmentCache *dt;
1068 target_ulong ptr, ssp;
1069 int selector;
1070 uint32_t offset, esp;
1071 uint32_t old_cs, old_eip;
1073 /* real mode (simpler!) */
1074 dt = &env->idt;
1075 if (intno * 4 + 3 > dt->limit) {
1076 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1078 ptr = dt->base + intno * 4;
1079 offset = cpu_lduw_kernel(env, ptr);
1080 selector = cpu_lduw_kernel(env, ptr + 2);
1081 esp = env->regs[R_ESP];
1082 ssp = env->segs[R_SS].base;
1083 if (is_int) {
1084 old_eip = next_eip;
1085 } else {
1086 old_eip = env->eip;
1088 old_cs = env->segs[R_CS].selector;
1089 /* XXX: use SS segment size? */
1090 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1091 PUSHW(ssp, esp, 0xffff, old_cs);
1092 PUSHW(ssp, esp, 0xffff, old_eip);
1094 /* update processor state */
1095 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1096 env->eip = offset;
1097 env->segs[R_CS].selector = selector;
1098 env->segs[R_CS].base = (selector << 4);
1099 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1102 #if defined(CONFIG_USER_ONLY)
1103 /* fake user mode interrupt */
1104 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1105 int error_code, target_ulong next_eip)
1107 SegmentCache *dt;
1108 target_ulong ptr;
1109 int dpl, cpl, shift;
1110 uint32_t e2;
1112 dt = &env->idt;
1113 if (env->hflags & HF_LMA_MASK) {
1114 shift = 4;
1115 } else {
1116 shift = 3;
1118 ptr = dt->base + (intno << shift);
1119 e2 = cpu_ldl_kernel(env, ptr + 4);
1121 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1122 cpl = env->hflags & HF_CPL_MASK;
1123 /* check privilege if software int */
1124 if (is_int && dpl < cpl) {
1125 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1128 /* Since we emulate only user space, we cannot do more than
1129 exiting the emulation with the suitable exception and error
1130 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1131 if (is_int || intno == EXCP_SYSCALL) {
1132 env->eip = next_eip;
1136 #else
1138 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1139 int error_code, int is_hw, int rm)
1141 CPUState *cs = CPU(x86_env_get_cpu(env));
1142 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1143 control.event_inj));
1145 if (!(event_inj & SVM_EVTINJ_VALID)) {
1146 int type;
1148 if (is_int) {
1149 type = SVM_EVTINJ_TYPE_SOFT;
1150 } else {
1151 type = SVM_EVTINJ_TYPE_EXEPT;
1153 event_inj = intno | type | SVM_EVTINJ_VALID;
1154 if (!rm && exception_has_error_code(intno)) {
1155 event_inj |= SVM_EVTINJ_VALID_ERR;
1156 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1157 control.event_inj_err),
1158 error_code);
1160 stl_phys(cs->as,
1161 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1162 event_inj);
1165 #endif
1168 * Begin execution of an interruption. is_int is TRUE if coming from
1169 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1170 * instruction. It is only relevant if is_int is TRUE.
1172 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1173 int error_code, target_ulong next_eip, int is_hw)
1175 CPUX86State *env = &cpu->env;
1177 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1178 if ((env->cr[0] & CR0_PE_MASK)) {
1179 static int count;
1181 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1182 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1183 count, intno, error_code, is_int,
1184 env->hflags & HF_CPL_MASK,
1185 env->segs[R_CS].selector, env->eip,
1186 (int)env->segs[R_CS].base + env->eip,
1187 env->segs[R_SS].selector, env->regs[R_ESP]);
1188 if (intno == 0x0e) {
1189 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1190 } else {
1191 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1193 qemu_log("\n");
1194 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1195 #if 0
1197 int i;
1198 target_ulong ptr;
1200 qemu_log(" code=");
1201 ptr = env->segs[R_CS].base + env->eip;
1202 for (i = 0; i < 16; i++) {
1203 qemu_log(" %02x", ldub(ptr + i));
1205 qemu_log("\n");
1207 #endif
1208 count++;
1211 if (env->cr[0] & CR0_PE_MASK) {
1212 #if !defined(CONFIG_USER_ONLY)
1213 if (env->hflags & HF_SVMI_MASK) {
1214 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1216 #endif
1217 #ifdef TARGET_X86_64
1218 if (env->hflags & HF_LMA_MASK) {
1219 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1220 } else
1221 #endif
1223 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1224 is_hw);
1226 } else {
1227 #if !defined(CONFIG_USER_ONLY)
1228 if (env->hflags & HF_SVMI_MASK) {
1229 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1231 #endif
1232 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1235 #if !defined(CONFIG_USER_ONLY)
1236 if (env->hflags & HF_SVMI_MASK) {
1237 CPUState *cs = CPU(cpu);
1238 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb +
1239 offsetof(struct vmcb,
1240 control.event_inj));
1242 stl_phys(cs->as,
1243 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1244 event_inj & ~SVM_EVTINJ_VALID);
1246 #endif
1249 void x86_cpu_do_interrupt(CPUState *cs)
1251 X86CPU *cpu = X86_CPU(cs);
1252 CPUX86State *env = &cpu->env;
1254 #if defined(CONFIG_USER_ONLY)
1255 /* if user mode only, we simulate a fake exception
1256 which will be handled outside the cpu execution
1257 loop */
1258 do_interrupt_user(env, cs->exception_index,
1259 env->exception_is_int,
1260 env->error_code,
1261 env->exception_next_eip);
1262 /* successfully delivered */
1263 env->old_exception = -1;
1264 #else
1265 /* simulate a real cpu exception. On i386, it can
1266 trigger new exceptions, but we do not handle
1267 double or triple faults yet. */
1268 do_interrupt_all(cpu, cs->exception_index,
1269 env->exception_is_int,
1270 env->error_code,
1271 env->exception_next_eip, 0);
1272 /* successfully delivered */
1273 env->old_exception = -1;
1274 #endif
1277 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1279 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1282 bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1284 X86CPU *cpu = X86_CPU(cs);
1285 CPUX86State *env = &cpu->env;
1286 bool ret = false;
1288 #if !defined(CONFIG_USER_ONLY)
1289 if (interrupt_request & CPU_INTERRUPT_POLL) {
1290 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1291 apic_poll_irq(cpu->apic_state);
1293 #endif
1294 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1295 do_cpu_sipi(cpu);
1296 } else if (env->hflags2 & HF2_GIF_MASK) {
1297 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1298 !(env->hflags & HF_SMM_MASK)) {
1299 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1300 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1301 do_smm_enter(cpu);
1302 ret = true;
1303 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1304 !(env->hflags2 & HF2_NMI_MASK)) {
1305 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1306 env->hflags2 |= HF2_NMI_MASK;
1307 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1308 ret = true;
1309 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1310 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1311 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1312 ret = true;
1313 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1314 (((env->hflags2 & HF2_VINTR_MASK) &&
1315 (env->hflags2 & HF2_HIF_MASK)) ||
1316 (!(env->hflags2 & HF2_VINTR_MASK) &&
1317 (env->eflags & IF_MASK &&
1318 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1319 int intno;
1320 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1321 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1322 CPU_INTERRUPT_VIRQ);
1323 intno = cpu_get_pic_interrupt(env);
1324 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1325 "Servicing hardware INT=0x%02x\n", intno);
1326 do_interrupt_x86_hardirq(env, intno, 1);
1327 /* ensure that no TB jump will be modified as
1328 the program flow was changed */
1329 ret = true;
1330 #if !defined(CONFIG_USER_ONLY)
1331 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1332 (env->eflags & IF_MASK) &&
1333 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1334 int intno;
1335 /* FIXME: this should respect TPR */
1336 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
1337 intno = ldl_phys(cs->as, env->vm_vmcb
1338 + offsetof(struct vmcb, control.int_vector));
1339 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1340 "Servicing virtual hardware INT=0x%02x\n", intno);
1341 do_interrupt_x86_hardirq(env, intno, 1);
1342 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1343 ret = true;
1344 #endif
1348 return ret;
1351 void helper_enter_level(CPUX86State *env, int level, int data32,
1352 target_ulong t1)
1354 target_ulong ssp;
1355 uint32_t esp_mask, esp, ebp;
1357 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1358 ssp = env->segs[R_SS].base;
1359 ebp = env->regs[R_EBP];
1360 esp = env->regs[R_ESP];
1361 if (data32) {
1362 /* 32 bit */
1363 esp -= 4;
1364 while (--level) {
1365 esp -= 4;
1366 ebp -= 4;
1367 cpu_stl_data(env, ssp + (esp & esp_mask),
1368 cpu_ldl_data(env, ssp + (ebp & esp_mask)));
1370 esp -= 4;
1371 cpu_stl_data(env, ssp + (esp & esp_mask), t1);
1372 } else {
1373 /* 16 bit */
1374 esp -= 2;
1375 while (--level) {
1376 esp -= 2;
1377 ebp -= 2;
1378 cpu_stw_data(env, ssp + (esp & esp_mask),
1379 cpu_lduw_data(env, ssp + (ebp & esp_mask)));
1381 esp -= 2;
1382 cpu_stw_data(env, ssp + (esp & esp_mask), t1);
1386 #ifdef TARGET_X86_64
1387 void helper_enter64_level(CPUX86State *env, int level, int data64,
1388 target_ulong t1)
1390 target_ulong esp, ebp;
1392 ebp = env->regs[R_EBP];
1393 esp = env->regs[R_ESP];
1395 if (data64) {
1396 /* 64 bit */
1397 esp -= 8;
1398 while (--level) {
1399 esp -= 8;
1400 ebp -= 8;
1401 cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
1403 esp -= 8;
1404 cpu_stq_data(env, esp, t1);
1405 } else {
1406 /* 16 bit */
1407 esp -= 2;
1408 while (--level) {
1409 esp -= 2;
1410 ebp -= 2;
1411 cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
1413 esp -= 2;
1414 cpu_stw_data(env, esp, t1);
1417 #endif
1419 void helper_lldt(CPUX86State *env, int selector)
1421 SegmentCache *dt;
1422 uint32_t e1, e2;
1423 int index, entry_limit;
1424 target_ulong ptr;
1426 selector &= 0xffff;
1427 if ((selector & 0xfffc) == 0) {
1428 /* XXX: NULL selector case: invalid LDT */
1429 env->ldt.base = 0;
1430 env->ldt.limit = 0;
1431 } else {
1432 if (selector & 0x4) {
1433 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1435 dt = &env->gdt;
1436 index = selector & ~7;
1437 #ifdef TARGET_X86_64
1438 if (env->hflags & HF_LMA_MASK) {
1439 entry_limit = 15;
1440 } else
1441 #endif
1443 entry_limit = 7;
1445 if ((index + entry_limit) > dt->limit) {
1446 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1448 ptr = dt->base + index;
1449 e1 = cpu_ldl_kernel(env, ptr);
1450 e2 = cpu_ldl_kernel(env, ptr + 4);
1451 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1452 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1454 if (!(e2 & DESC_P_MASK)) {
1455 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1457 #ifdef TARGET_X86_64
1458 if (env->hflags & HF_LMA_MASK) {
1459 uint32_t e3;
1461 e3 = cpu_ldl_kernel(env, ptr + 8);
1462 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1463 env->ldt.base |= (target_ulong)e3 << 32;
1464 } else
1465 #endif
1467 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1470 env->ldt.selector = selector;
1473 void helper_ltr(CPUX86State *env, int selector)
1475 SegmentCache *dt;
1476 uint32_t e1, e2;
1477 int index, type, entry_limit;
1478 target_ulong ptr;
1480 selector &= 0xffff;
1481 if ((selector & 0xfffc) == 0) {
1482 /* NULL selector case: invalid TR */
1483 env->tr.base = 0;
1484 env->tr.limit = 0;
1485 env->tr.flags = 0;
1486 } else {
1487 if (selector & 0x4) {
1488 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1490 dt = &env->gdt;
1491 index = selector & ~7;
1492 #ifdef TARGET_X86_64
1493 if (env->hflags & HF_LMA_MASK) {
1494 entry_limit = 15;
1495 } else
1496 #endif
1498 entry_limit = 7;
1500 if ((index + entry_limit) > dt->limit) {
1501 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1503 ptr = dt->base + index;
1504 e1 = cpu_ldl_kernel(env, ptr);
1505 e2 = cpu_ldl_kernel(env, ptr + 4);
1506 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1507 if ((e2 & DESC_S_MASK) ||
1508 (type != 1 && type != 9)) {
1509 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1511 if (!(e2 & DESC_P_MASK)) {
1512 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1514 #ifdef TARGET_X86_64
1515 if (env->hflags & HF_LMA_MASK) {
1516 uint32_t e3, e4;
1518 e3 = cpu_ldl_kernel(env, ptr + 8);
1519 e4 = cpu_ldl_kernel(env, ptr + 12);
1520 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1521 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1523 load_seg_cache_raw_dt(&env->tr, e1, e2);
1524 env->tr.base |= (target_ulong)e3 << 32;
1525 } else
1526 #endif
1528 load_seg_cache_raw_dt(&env->tr, e1, e2);
1530 e2 |= DESC_TSS_BUSY_MASK;
1531 cpu_stl_kernel(env, ptr + 4, e2);
1533 env->tr.selector = selector;
1536 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1537 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1539 uint32_t e1, e2;
1540 int cpl, dpl, rpl;
1541 SegmentCache *dt;
1542 int index;
1543 target_ulong ptr;
1545 selector &= 0xffff;
1546 cpl = env->hflags & HF_CPL_MASK;
1547 if ((selector & 0xfffc) == 0) {
1548 /* null selector case */
1549 if (seg_reg == R_SS
1550 #ifdef TARGET_X86_64
1551 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1552 #endif
1554 raise_exception_err(env, EXCP0D_GPF, 0);
1556 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1557 } else {
1559 if (selector & 0x4) {
1560 dt = &env->ldt;
1561 } else {
1562 dt = &env->gdt;
1564 index = selector & ~7;
1565 if ((index + 7) > dt->limit) {
1566 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1568 ptr = dt->base + index;
1569 e1 = cpu_ldl_kernel(env, ptr);
1570 e2 = cpu_ldl_kernel(env, ptr + 4);
1572 if (!(e2 & DESC_S_MASK)) {
1573 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1575 rpl = selector & 3;
1576 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1577 if (seg_reg == R_SS) {
1578 /* must be writable segment */
1579 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1580 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1582 if (rpl != cpl || dpl != cpl) {
1583 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1585 } else {
1586 /* must be readable segment */
1587 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1588 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1591 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1592 /* if not conforming code, test rights */
1593 if (dpl < cpl || dpl < rpl) {
1594 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1599 if (!(e2 & DESC_P_MASK)) {
1600 if (seg_reg == R_SS) {
1601 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
1602 } else {
1603 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1607 /* set the access bit if not already set */
1608 if (!(e2 & DESC_A_MASK)) {
1609 e2 |= DESC_A_MASK;
1610 cpu_stl_kernel(env, ptr + 4, e2);
1613 cpu_x86_load_seg_cache(env, seg_reg, selector,
1614 get_seg_base(e1, e2),
1615 get_seg_limit(e1, e2),
1616 e2);
1617 #if 0
1618 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1619 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1620 #endif
1624 /* protected mode jump */
1625 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1626 int next_eip_addend)
1628 int gate_cs, type;
1629 uint32_t e1, e2, cpl, dpl, rpl, limit;
1630 target_ulong next_eip;
1632 if ((new_cs & 0xfffc) == 0) {
1633 raise_exception_err(env, EXCP0D_GPF, 0);
1635 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1636 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1638 cpl = env->hflags & HF_CPL_MASK;
1639 if (e2 & DESC_S_MASK) {
1640 if (!(e2 & DESC_CS_MASK)) {
1641 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1643 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1644 if (e2 & DESC_C_MASK) {
1645 /* conforming code segment */
1646 if (dpl > cpl) {
1647 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1649 } else {
1650 /* non conforming code segment */
1651 rpl = new_cs & 3;
1652 if (rpl > cpl) {
1653 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1655 if (dpl != cpl) {
1656 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1659 if (!(e2 & DESC_P_MASK)) {
1660 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1662 limit = get_seg_limit(e1, e2);
1663 if (new_eip > limit &&
1664 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1665 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1667 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1668 get_seg_base(e1, e2), limit, e2);
1669 env->eip = new_eip;
1670 } else {
1671 /* jump to call or task gate */
1672 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1673 rpl = new_cs & 3;
1674 cpl = env->hflags & HF_CPL_MASK;
1675 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1676 switch (type) {
1677 case 1: /* 286 TSS */
1678 case 9: /* 386 TSS */
1679 case 5: /* task gate */
1680 if (dpl < cpl || dpl < rpl) {
1681 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1683 next_eip = env->eip + next_eip_addend;
1684 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1685 break;
1686 case 4: /* 286 call gate */
1687 case 12: /* 386 call gate */
1688 if ((dpl < cpl) || (dpl < rpl)) {
1689 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1691 if (!(e2 & DESC_P_MASK)) {
1692 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1694 gate_cs = e1 >> 16;
1695 new_eip = (e1 & 0xffff);
1696 if (type == 12) {
1697 new_eip |= (e2 & 0xffff0000);
1699 if (load_segment(env, &e1, &e2, gate_cs) != 0) {
1700 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1702 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1703 /* must be code segment */
1704 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1705 (DESC_S_MASK | DESC_CS_MASK))) {
1706 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1708 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1709 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1710 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1712 if (!(e2 & DESC_P_MASK)) {
1713 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1715 limit = get_seg_limit(e1, e2);
1716 if (new_eip > limit) {
1717 raise_exception_err(env, EXCP0D_GPF, 0);
1719 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1720 get_seg_base(e1, e2), limit, e2);
1721 env->eip = new_eip;
1722 break;
1723 default:
1724 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1725 break;
1730 /* real mode call */
1731 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1732 int shift, int next_eip)
1734 int new_eip;
1735 uint32_t esp, esp_mask;
1736 target_ulong ssp;
1738 new_eip = new_eip1;
1739 esp = env->regs[R_ESP];
1740 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1741 ssp = env->segs[R_SS].base;
1742 if (shift) {
1743 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1744 PUSHL(ssp, esp, esp_mask, next_eip);
1745 } else {
1746 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1747 PUSHW(ssp, esp, esp_mask, next_eip);
1750 SET_ESP(esp, esp_mask);
1751 env->eip = new_eip;
1752 env->segs[R_CS].selector = new_cs;
1753 env->segs[R_CS].base = (new_cs << 4);
1756 /* protected mode call */
1757 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1758 int shift, int next_eip_addend)
1760 int new_stack, i;
1761 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1762 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1763 uint32_t val, limit, old_sp_mask;
1764 target_ulong ssp, old_ssp, next_eip;
1766 next_eip = env->eip + next_eip_addend;
1767 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1768 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1769 if ((new_cs & 0xfffc) == 0) {
1770 raise_exception_err(env, EXCP0D_GPF, 0);
1772 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1773 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1775 cpl = env->hflags & HF_CPL_MASK;
1776 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1777 if (e2 & DESC_S_MASK) {
1778 if (!(e2 & DESC_CS_MASK)) {
1779 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1781 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1782 if (e2 & DESC_C_MASK) {
1783 /* conforming code segment */
1784 if (dpl > cpl) {
1785 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1787 } else {
1788 /* non conforming code segment */
1789 rpl = new_cs & 3;
1790 if (rpl > cpl) {
1791 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1793 if (dpl != cpl) {
1794 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1797 if (!(e2 & DESC_P_MASK)) {
1798 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1801 #ifdef TARGET_X86_64
1802 /* XXX: check 16/32 bit cases in long mode */
1803 if (shift == 2) {
1804 target_ulong rsp;
1806 /* 64 bit case */
1807 rsp = env->regs[R_ESP];
1808 PUSHQ(rsp, env->segs[R_CS].selector);
1809 PUSHQ(rsp, next_eip);
1810 /* from this point, not restartable */
1811 env->regs[R_ESP] = rsp;
1812 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1813 get_seg_base(e1, e2),
1814 get_seg_limit(e1, e2), e2);
1815 env->eip = new_eip;
1816 } else
1817 #endif
1819 sp = env->regs[R_ESP];
1820 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1821 ssp = env->segs[R_SS].base;
1822 if (shift) {
1823 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1824 PUSHL(ssp, sp, sp_mask, next_eip);
1825 } else {
1826 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1827 PUSHW(ssp, sp, sp_mask, next_eip);
1830 limit = get_seg_limit(e1, e2);
1831 if (new_eip > limit) {
1832 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1834 /* from this point, not restartable */
1835 SET_ESP(sp, sp_mask);
1836 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1837 get_seg_base(e1, e2), limit, e2);
1838 env->eip = new_eip;
1840 } else {
1841 /* check gate type */
1842 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1843 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1844 rpl = new_cs & 3;
1845 switch (type) {
1846 case 1: /* available 286 TSS */
1847 case 9: /* available 386 TSS */
1848 case 5: /* task gate */
1849 if (dpl < cpl || dpl < rpl) {
1850 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1852 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1853 return;
1854 case 4: /* 286 call gate */
1855 case 12: /* 386 call gate */
1856 break;
1857 default:
1858 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1859 break;
1861 shift = type >> 3;
1863 if (dpl < cpl || dpl < rpl) {
1864 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1866 /* check valid bit */
1867 if (!(e2 & DESC_P_MASK)) {
1868 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1870 selector = e1 >> 16;
1871 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1872 param_count = e2 & 0x1f;
1873 if ((selector & 0xfffc) == 0) {
1874 raise_exception_err(env, EXCP0D_GPF, 0);
1877 if (load_segment(env, &e1, &e2, selector) != 0) {
1878 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1880 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1881 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1883 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1884 if (dpl > cpl) {
1885 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1887 if (!(e2 & DESC_P_MASK)) {
1888 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1891 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1892 /* to inner privilege */
1893 get_ss_esp_from_tss(env, &ss, &sp, dpl);
1894 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1895 TARGET_FMT_lx "\n", ss, sp, param_count,
1896 env->regs[R_ESP]);
1897 if ((ss & 0xfffc) == 0) {
1898 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1900 if ((ss & 3) != dpl) {
1901 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1903 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
1904 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1906 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1907 if (ss_dpl != dpl) {
1908 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1910 if (!(ss_e2 & DESC_S_MASK) ||
1911 (ss_e2 & DESC_CS_MASK) ||
1912 !(ss_e2 & DESC_W_MASK)) {
1913 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1915 if (!(ss_e2 & DESC_P_MASK)) {
1916 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1919 /* push_size = ((param_count * 2) + 8) << shift; */
1921 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1922 old_ssp = env->segs[R_SS].base;
1924 sp_mask = get_sp_mask(ss_e2);
1925 ssp = get_seg_base(ss_e1, ss_e2);
1926 if (shift) {
1927 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1928 PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
1929 for (i = param_count - 1; i >= 0; i--) {
1930 val = cpu_ldl_kernel(env, old_ssp +
1931 ((env->regs[R_ESP] + i * 4) &
1932 old_sp_mask));
1933 PUSHL(ssp, sp, sp_mask, val);
1935 } else {
1936 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1937 PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
1938 for (i = param_count - 1; i >= 0; i--) {
1939 val = cpu_lduw_kernel(env, old_ssp +
1940 ((env->regs[R_ESP] + i * 2) &
1941 old_sp_mask));
1942 PUSHW(ssp, sp, sp_mask, val);
1945 new_stack = 1;
1946 } else {
1947 /* to same privilege */
1948 sp = env->regs[R_ESP];
1949 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1950 ssp = env->segs[R_SS].base;
1951 /* push_size = (4 << shift); */
1952 new_stack = 0;
1955 if (shift) {
1956 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1957 PUSHL(ssp, sp, sp_mask, next_eip);
1958 } else {
1959 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1960 PUSHW(ssp, sp, sp_mask, next_eip);
1963 /* from this point, not restartable */
1965 if (new_stack) {
1966 ss = (ss & ~3) | dpl;
1967 cpu_x86_load_seg_cache(env, R_SS, ss,
1968 ssp,
1969 get_seg_limit(ss_e1, ss_e2),
1970 ss_e2);
1973 selector = (selector & ~3) | dpl;
1974 cpu_x86_load_seg_cache(env, R_CS, selector,
1975 get_seg_base(e1, e2),
1976 get_seg_limit(e1, e2),
1977 e2);
1978 SET_ESP(sp, sp_mask);
1979 env->eip = offset;
1983 /* real and vm86 mode iret */
1984 void helper_iret_real(CPUX86State *env, int shift)
1986 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1987 target_ulong ssp;
1988 int eflags_mask;
1990 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1991 sp = env->regs[R_ESP];
1992 ssp = env->segs[R_SS].base;
1993 if (shift == 1) {
1994 /* 32 bits */
1995 POPL(ssp, sp, sp_mask, new_eip);
1996 POPL(ssp, sp, sp_mask, new_cs);
1997 new_cs &= 0xffff;
1998 POPL(ssp, sp, sp_mask, new_eflags);
1999 } else {
2000 /* 16 bits */
2001 POPW(ssp, sp, sp_mask, new_eip);
2002 POPW(ssp, sp, sp_mask, new_cs);
2003 POPW(ssp, sp, sp_mask, new_eflags);
2005 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
2006 env->segs[R_CS].selector = new_cs;
2007 env->segs[R_CS].base = (new_cs << 4);
2008 env->eip = new_eip;
2009 if (env->eflags & VM_MASK) {
2010 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2011 NT_MASK;
2012 } else {
2013 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2014 RF_MASK | NT_MASK;
2016 if (shift == 0) {
2017 eflags_mask &= 0xffff;
2019 cpu_load_eflags(env, new_eflags, eflags_mask);
2020 env->hflags2 &= ~HF2_NMI_MASK;
2023 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
2025 int dpl;
2026 uint32_t e2;
2028 /* XXX: on x86_64, we do not want to nullify FS and GS because
2029 they may still contain a valid base. I would be interested to
2030 know how a real x86_64 CPU behaves */
2031 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2032 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2033 return;
2036 e2 = env->segs[seg_reg].flags;
2037 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2038 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2039 /* data or non conforming code segment */
2040 if (dpl < cpl) {
2041 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2046 /* protected mode iret */
2047 static inline void helper_ret_protected(CPUX86State *env, int shift,
2048 int is_iret, int addend)
2050 uint32_t new_cs, new_eflags, new_ss;
2051 uint32_t new_es, new_ds, new_fs, new_gs;
2052 uint32_t e1, e2, ss_e1, ss_e2;
2053 int cpl, dpl, rpl, eflags_mask, iopl;
2054 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2056 #ifdef TARGET_X86_64
2057 if (shift == 2) {
2058 sp_mask = -1;
2059 } else
2060 #endif
2062 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2064 sp = env->regs[R_ESP];
2065 ssp = env->segs[R_SS].base;
2066 new_eflags = 0; /* avoid warning */
2067 #ifdef TARGET_X86_64
2068 if (shift == 2) {
2069 POPQ(sp, new_eip);
2070 POPQ(sp, new_cs);
2071 new_cs &= 0xffff;
2072 if (is_iret) {
2073 POPQ(sp, new_eflags);
2075 } else
2076 #endif
2078 if (shift == 1) {
2079 /* 32 bits */
2080 POPL(ssp, sp, sp_mask, new_eip);
2081 POPL(ssp, sp, sp_mask, new_cs);
2082 new_cs &= 0xffff;
2083 if (is_iret) {
2084 POPL(ssp, sp, sp_mask, new_eflags);
2085 if (new_eflags & VM_MASK) {
2086 goto return_to_vm86;
2089 } else {
2090 /* 16 bits */
2091 POPW(ssp, sp, sp_mask, new_eip);
2092 POPW(ssp, sp, sp_mask, new_cs);
2093 if (is_iret) {
2094 POPW(ssp, sp, sp_mask, new_eflags);
2098 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2099 new_cs, new_eip, shift, addend);
2100 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2101 if ((new_cs & 0xfffc) == 0) {
2102 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2104 if (load_segment(env, &e1, &e2, new_cs) != 0) {
2105 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2107 if (!(e2 & DESC_S_MASK) ||
2108 !(e2 & DESC_CS_MASK)) {
2109 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2111 cpl = env->hflags & HF_CPL_MASK;
2112 rpl = new_cs & 3;
2113 if (rpl < cpl) {
2114 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2116 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2117 if (e2 & DESC_C_MASK) {
2118 if (dpl > rpl) {
2119 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2121 } else {
2122 if (dpl != rpl) {
2123 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2126 if (!(e2 & DESC_P_MASK)) {
2127 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2130 sp += addend;
2131 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2132 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2133 /* return to same privilege level */
2134 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2135 get_seg_base(e1, e2),
2136 get_seg_limit(e1, e2),
2137 e2);
2138 } else {
2139 /* return to different privilege level */
2140 #ifdef TARGET_X86_64
2141 if (shift == 2) {
2142 POPQ(sp, new_esp);
2143 POPQ(sp, new_ss);
2144 new_ss &= 0xffff;
2145 } else
2146 #endif
2148 if (shift == 1) {
2149 /* 32 bits */
2150 POPL(ssp, sp, sp_mask, new_esp);
2151 POPL(ssp, sp, sp_mask, new_ss);
2152 new_ss &= 0xffff;
2153 } else {
2154 /* 16 bits */
2155 POPW(ssp, sp, sp_mask, new_esp);
2156 POPW(ssp, sp, sp_mask, new_ss);
2159 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2160 new_ss, new_esp);
2161 if ((new_ss & 0xfffc) == 0) {
2162 #ifdef TARGET_X86_64
2163 /* NULL ss is allowed in long mode if cpl != 3 */
2164 /* XXX: test CS64? */
2165 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2166 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2167 0, 0xffffffff,
2168 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2169 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2170 DESC_W_MASK | DESC_A_MASK);
2171 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2172 } else
2173 #endif
2175 raise_exception_err(env, EXCP0D_GPF, 0);
2177 } else {
2178 if ((new_ss & 3) != rpl) {
2179 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2181 if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
2182 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2184 if (!(ss_e2 & DESC_S_MASK) ||
2185 (ss_e2 & DESC_CS_MASK) ||
2186 !(ss_e2 & DESC_W_MASK)) {
2187 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2189 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2190 if (dpl != rpl) {
2191 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2193 if (!(ss_e2 & DESC_P_MASK)) {
2194 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2196 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2197 get_seg_base(ss_e1, ss_e2),
2198 get_seg_limit(ss_e1, ss_e2),
2199 ss_e2);
2202 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2203 get_seg_base(e1, e2),
2204 get_seg_limit(e1, e2),
2205 e2);
2206 sp = new_esp;
2207 #ifdef TARGET_X86_64
2208 if (env->hflags & HF_CS64_MASK) {
2209 sp_mask = -1;
2210 } else
2211 #endif
2213 sp_mask = get_sp_mask(ss_e2);
2216 /* validate data segments */
2217 validate_seg(env, R_ES, rpl);
2218 validate_seg(env, R_DS, rpl);
2219 validate_seg(env, R_FS, rpl);
2220 validate_seg(env, R_GS, rpl);
2222 sp += addend;
2224 SET_ESP(sp, sp_mask);
2225 env->eip = new_eip;
2226 if (is_iret) {
2227 /* NOTE: 'cpl' is the _old_ CPL */
2228 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2229 if (cpl == 0) {
2230 eflags_mask |= IOPL_MASK;
2232 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2233 if (cpl <= iopl) {
2234 eflags_mask |= IF_MASK;
2236 if (shift == 0) {
2237 eflags_mask &= 0xffff;
2239 cpu_load_eflags(env, new_eflags, eflags_mask);
2241 return;
2243 return_to_vm86:
2244 POPL(ssp, sp, sp_mask, new_esp);
2245 POPL(ssp, sp, sp_mask, new_ss);
2246 POPL(ssp, sp, sp_mask, new_es);
2247 POPL(ssp, sp, sp_mask, new_ds);
2248 POPL(ssp, sp, sp_mask, new_fs);
2249 POPL(ssp, sp, sp_mask, new_gs);
2251 /* modify processor state */
2252 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2253 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2254 VIP_MASK);
2255 load_seg_vm(env, R_CS, new_cs & 0xffff);
2256 load_seg_vm(env, R_SS, new_ss & 0xffff);
2257 load_seg_vm(env, R_ES, new_es & 0xffff);
2258 load_seg_vm(env, R_DS, new_ds & 0xffff);
2259 load_seg_vm(env, R_FS, new_fs & 0xffff);
2260 load_seg_vm(env, R_GS, new_gs & 0xffff);
2262 env->eip = new_eip & 0xffff;
2263 env->regs[R_ESP] = new_esp;
2266 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2268 int tss_selector, type;
2269 uint32_t e1, e2;
2271 /* specific case for TSS */
2272 if (env->eflags & NT_MASK) {
2273 #ifdef TARGET_X86_64
2274 if (env->hflags & HF_LMA_MASK) {
2275 raise_exception_err(env, EXCP0D_GPF, 0);
2277 #endif
2278 tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
2279 if (tss_selector & 4) {
2280 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2282 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
2283 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2285 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2286 /* NOTE: we check both segment and busy TSS */
2287 if (type != 3) {
2288 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2290 switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2291 } else {
2292 helper_ret_protected(env, shift, 1, 0);
2294 env->hflags2 &= ~HF2_NMI_MASK;
2297 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2299 helper_ret_protected(env, shift, 0, addend);
2302 void helper_sysenter(CPUX86State *env)
2304 if (env->sysenter_cs == 0) {
2305 raise_exception_err(env, EXCP0D_GPF, 0);
2307 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2309 #ifdef TARGET_X86_64
2310 if (env->hflags & HF_LMA_MASK) {
2311 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2312 0, 0xffffffff,
2313 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2314 DESC_S_MASK |
2315 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2316 DESC_L_MASK);
2317 } else
2318 #endif
2320 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2321 0, 0xffffffff,
2322 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2323 DESC_S_MASK |
2324 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2326 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2327 0, 0xffffffff,
2328 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2329 DESC_S_MASK |
2330 DESC_W_MASK | DESC_A_MASK);
2331 env->regs[R_ESP] = env->sysenter_esp;
2332 env->eip = env->sysenter_eip;
2335 void helper_sysexit(CPUX86State *env, int dflag)
2337 int cpl;
2339 cpl = env->hflags & HF_CPL_MASK;
2340 if (env->sysenter_cs == 0 || cpl != 0) {
2341 raise_exception_err(env, EXCP0D_GPF, 0);
2343 #ifdef TARGET_X86_64
2344 if (dflag == 2) {
2345 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2346 3, 0, 0xffffffff,
2347 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2348 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2349 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2350 DESC_L_MASK);
2351 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2352 3, 0, 0xffffffff,
2353 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2354 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2355 DESC_W_MASK | DESC_A_MASK);
2356 } else
2357 #endif
2359 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2360 3, 0, 0xffffffff,
2361 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2362 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2363 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2364 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2365 3, 0, 0xffffffff,
2366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2367 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2368 DESC_W_MASK | DESC_A_MASK);
2370 env->regs[R_ESP] = env->regs[R_ECX];
2371 env->eip = env->regs[R_EDX];
2374 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2376 unsigned int limit;
2377 uint32_t e1, e2, eflags, selector;
2378 int rpl, dpl, cpl, type;
2380 selector = selector1 & 0xffff;
2381 eflags = cpu_cc_compute_all(env, CC_OP);
2382 if ((selector & 0xfffc) == 0) {
2383 goto fail;
2385 if (load_segment(env, &e1, &e2, selector) != 0) {
2386 goto fail;
2388 rpl = selector & 3;
2389 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2390 cpl = env->hflags & HF_CPL_MASK;
2391 if (e2 & DESC_S_MASK) {
2392 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2393 /* conforming */
2394 } else {
2395 if (dpl < cpl || dpl < rpl) {
2396 goto fail;
2399 } else {
2400 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2401 switch (type) {
2402 case 1:
2403 case 2:
2404 case 3:
2405 case 9:
2406 case 11:
2407 break;
2408 default:
2409 goto fail;
2411 if (dpl < cpl || dpl < rpl) {
2412 fail:
2413 CC_SRC = eflags & ~CC_Z;
2414 return 0;
2417 limit = get_seg_limit(e1, e2);
2418 CC_SRC = eflags | CC_Z;
2419 return limit;
2422 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2424 uint32_t e1, e2, eflags, selector;
2425 int rpl, dpl, cpl, type;
2427 selector = selector1 & 0xffff;
2428 eflags = cpu_cc_compute_all(env, CC_OP);
2429 if ((selector & 0xfffc) == 0) {
2430 goto fail;
2432 if (load_segment(env, &e1, &e2, selector) != 0) {
2433 goto fail;
2435 rpl = selector & 3;
2436 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2437 cpl = env->hflags & HF_CPL_MASK;
2438 if (e2 & DESC_S_MASK) {
2439 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2440 /* conforming */
2441 } else {
2442 if (dpl < cpl || dpl < rpl) {
2443 goto fail;
2446 } else {
2447 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2448 switch (type) {
2449 case 1:
2450 case 2:
2451 case 3:
2452 case 4:
2453 case 5:
2454 case 9:
2455 case 11:
2456 case 12:
2457 break;
2458 default:
2459 goto fail;
2461 if (dpl < cpl || dpl < rpl) {
2462 fail:
2463 CC_SRC = eflags & ~CC_Z;
2464 return 0;
2467 CC_SRC = eflags | CC_Z;
2468 return e2 & 0x00f0ff00;
2471 void helper_verr(CPUX86State *env, target_ulong selector1)
2473 uint32_t e1, e2, eflags, selector;
2474 int rpl, dpl, cpl;
2476 selector = selector1 & 0xffff;
2477 eflags = cpu_cc_compute_all(env, CC_OP);
2478 if ((selector & 0xfffc) == 0) {
2479 goto fail;
2481 if (load_segment(env, &e1, &e2, selector) != 0) {
2482 goto fail;
2484 if (!(e2 & DESC_S_MASK)) {
2485 goto fail;
2487 rpl = selector & 3;
2488 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2489 cpl = env->hflags & HF_CPL_MASK;
2490 if (e2 & DESC_CS_MASK) {
2491 if (!(e2 & DESC_R_MASK)) {
2492 goto fail;
2494 if (!(e2 & DESC_C_MASK)) {
2495 if (dpl < cpl || dpl < rpl) {
2496 goto fail;
2499 } else {
2500 if (dpl < cpl || dpl < rpl) {
2501 fail:
2502 CC_SRC = eflags & ~CC_Z;
2503 return;
2506 CC_SRC = eflags | CC_Z;
2509 void helper_verw(CPUX86State *env, target_ulong selector1)
2511 uint32_t e1, e2, eflags, selector;
2512 int rpl, dpl, cpl;
2514 selector = selector1 & 0xffff;
2515 eflags = cpu_cc_compute_all(env, CC_OP);
2516 if ((selector & 0xfffc) == 0) {
2517 goto fail;
2519 if (load_segment(env, &e1, &e2, selector) != 0) {
2520 goto fail;
2522 if (!(e2 & DESC_S_MASK)) {
2523 goto fail;
2525 rpl = selector & 3;
2526 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2527 cpl = env->hflags & HF_CPL_MASK;
2528 if (e2 & DESC_CS_MASK) {
2529 goto fail;
2530 } else {
2531 if (dpl < cpl || dpl < rpl) {
2532 goto fail;
2534 if (!(e2 & DESC_W_MASK)) {
2535 fail:
2536 CC_SRC = eflags & ~CC_Z;
2537 return;
2540 CC_SRC = eflags | CC_Z;
2543 #if defined(CONFIG_USER_ONLY)
2544 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2546 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2547 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
2548 selector &= 0xffff;
2549 cpu_x86_load_seg_cache(env, seg_reg, selector,
2550 (selector << 4), 0xffff,
2551 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2552 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
2553 } else {
2554 helper_load_seg(env, seg_reg, selector);
2557 #endif
2559 /* check if Port I/O is allowed in TSS */
2560 static inline void check_io(CPUX86State *env, int addr, int size)
2562 int io_offset, val, mask;
2564 /* TSS must be a valid 32 bit one */
2565 if (!(env->tr.flags & DESC_P_MASK) ||
2566 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2567 env->tr.limit < 103) {
2568 goto fail;
2570 io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
2571 io_offset += (addr >> 3);
2572 /* Note: the check needs two bytes */
2573 if ((io_offset + 1) > env->tr.limit) {
2574 goto fail;
2576 val = cpu_lduw_kernel(env, env->tr.base + io_offset);
2577 val >>= (addr & 7);
2578 mask = (1 << size) - 1;
2579 /* all bits must be zero to allow the I/O */
2580 if ((val & mask) != 0) {
2581 fail:
2582 raise_exception_err(env, EXCP0D_GPF, 0);
2586 void helper_check_iob(CPUX86State *env, uint32_t t0)
2588 check_io(env, t0, 1);
2591 void helper_check_iow(CPUX86State *env, uint32_t t0)
2593 check_io(env, t0, 2);
2596 void helper_check_iol(CPUX86State *env, uint32_t t0)
2598 check_io(env, t0, 4);