target-i386: Fix vm86 mode regression introduced in fd460606fd6f.
[qemu/ar7.git] / target-i386 / seg_helper.c
blobcc7eadf9e2e1c473a590f7a02d2a4f288c722ca9
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "helper.h"
25 //#define DEBUG_PCALL
27 #if !defined(CONFIG_USER_ONLY)
28 #include "exec/softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifdef DEBUG_PCALL
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35 #else
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
38 #endif
40 /* return non zero if error */
41 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
42 uint32_t *e2_ptr, int selector)
44 SegmentCache *dt;
45 int index;
46 target_ulong ptr;
48 if (selector & 0x4) {
49 dt = &env->ldt;
50 } else {
51 dt = &env->gdt;
53 index = selector & ~7;
54 if ((index + 7) > dt->limit) {
55 return -1;
57 ptr = dt->base + index;
58 *e1_ptr = cpu_ldl_kernel(env, ptr);
59 *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
60 return 0;
63 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
65 unsigned int limit;
67 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
68 if (e2 & DESC_G_MASK) {
69 limit = (limit << 12) | 0xfff;
71 return limit;
74 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
76 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
79 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
80 uint32_t e2)
82 sc->base = get_seg_base(e1, e2);
83 sc->limit = get_seg_limit(e1, e2);
84 sc->flags = e2;
87 /* init the segment cache in vm86 mode. */
88 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
90 selector &= 0xffff;
91 cpu_x86_load_seg_cache(env, seg, selector,
92 (selector << 4), 0xffff, 0);
95 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
96 uint32_t *esp_ptr, int dpl)
98 X86CPU *cpu = x86_env_get_cpu(env);
99 int type, index, shift;
101 #if 0
103 int i;
104 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
105 for (i = 0; i < env->tr.limit; i++) {
106 printf("%02x ", env->tr.base[i]);
107 if ((i & 7) == 7) {
108 printf("\n");
111 printf("\n");
113 #endif
115 if (!(env->tr.flags & DESC_P_MASK)) {
116 cpu_abort(CPU(cpu), "invalid tss");
118 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
119 if ((type & 7) != 1) {
120 cpu_abort(CPU(cpu), "invalid tss type");
122 shift = type >> 3;
123 index = (dpl * 4 + 2) << shift;
124 if (index + (4 << shift) - 1 > env->tr.limit) {
125 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
127 if (shift == 0) {
128 *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
129 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
130 } else {
131 *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
132 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
136 /* XXX: merge with load_seg() */
137 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector)
139 uint32_t e1, e2;
140 int rpl, dpl, cpl;
142 if ((selector & 0xfffc) != 0) {
143 if (load_segment(env, &e1, &e2, selector) != 0) {
144 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
146 if (!(e2 & DESC_S_MASK)) {
147 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
149 rpl = selector & 3;
150 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
151 cpl = env->hflags & HF_CPL_MASK;
152 if (seg_reg == R_CS) {
153 if (!(e2 & DESC_CS_MASK)) {
154 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
156 /* XXX: is it correct? */
157 if (dpl != rpl) {
158 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
160 if ((e2 & DESC_C_MASK) && dpl > rpl) {
161 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
163 } else if (seg_reg == R_SS) {
164 /* SS must be writable data */
165 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
166 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
168 if (dpl != cpl || dpl != rpl) {
169 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
171 } else {
172 /* not readable code */
173 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
174 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
176 /* if data or non conforming code, checks the rights */
177 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
178 if (dpl < cpl || dpl < rpl) {
179 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
183 if (!(e2 & DESC_P_MASK)) {
184 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
186 cpu_x86_load_seg_cache(env, seg_reg, selector,
187 get_seg_base(e1, e2),
188 get_seg_limit(e1, e2),
189 e2);
190 } else {
191 if (seg_reg == R_SS || seg_reg == R_CS) {
192 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
197 #define SWITCH_TSS_JMP 0
198 #define SWITCH_TSS_IRET 1
199 #define SWITCH_TSS_CALL 2
201 /* XXX: restore CPU state in registers (PowerPC case) */
202 static void switch_tss(CPUX86State *env, int tss_selector,
203 uint32_t e1, uint32_t e2, int source,
204 uint32_t next_eip)
206 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
207 target_ulong tss_base;
208 uint32_t new_regs[8], new_segs[6];
209 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
210 uint32_t old_eflags, eflags_mask;
211 SegmentCache *dt;
212 int index;
213 target_ulong ptr;
215 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
216 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
217 source);
219 /* if task gate, we read the TSS segment and we load it */
220 if (type == 5) {
221 if (!(e2 & DESC_P_MASK)) {
222 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
224 tss_selector = e1 >> 16;
225 if (tss_selector & 4) {
226 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
228 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
229 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
231 if (e2 & DESC_S_MASK) {
232 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
234 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
235 if ((type & 7) != 1) {
236 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
240 if (!(e2 & DESC_P_MASK)) {
241 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
244 if (type & 8) {
245 tss_limit_max = 103;
246 } else {
247 tss_limit_max = 43;
249 tss_limit = get_seg_limit(e1, e2);
250 tss_base = get_seg_base(e1, e2);
251 if ((tss_selector & 4) != 0 ||
252 tss_limit < tss_limit_max) {
253 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
255 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
256 if (old_type & 8) {
257 old_tss_limit_max = 103;
258 } else {
259 old_tss_limit_max = 43;
262 /* read all the registers from the new TSS */
263 if (type & 8) {
264 /* 32 bit */
265 new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
266 new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
267 new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
268 for (i = 0; i < 8; i++) {
269 new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
271 for (i = 0; i < 6; i++) {
272 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
274 new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
275 new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
276 } else {
277 /* 16 bit */
278 new_cr3 = 0;
279 new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
280 new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
281 for (i = 0; i < 8; i++) {
282 new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
283 0xffff0000;
285 for (i = 0; i < 4; i++) {
286 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
288 new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
289 new_segs[R_FS] = 0;
290 new_segs[R_GS] = 0;
291 new_trap = 0;
293 /* XXX: avoid a compiler warning, see
294 http://support.amd.com/us/Processor_TechDocs/24593.pdf
295 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
296 (void)new_trap;
298 /* NOTE: we must avoid memory exceptions during the task switch,
299 so we make dummy accesses before */
300 /* XXX: it can still fail in some cases, so a bigger hack is
301 necessary to valid the TLB after having done the accesses */
303 v1 = cpu_ldub_kernel(env, env->tr.base);
304 v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
305 cpu_stb_kernel(env, env->tr.base, v1);
306 cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
308 /* clear busy bit (it is restartable) */
309 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
310 target_ulong ptr;
311 uint32_t e2;
313 ptr = env->gdt.base + (env->tr.selector & ~7);
314 e2 = cpu_ldl_kernel(env, ptr + 4);
315 e2 &= ~DESC_TSS_BUSY_MASK;
316 cpu_stl_kernel(env, ptr + 4, e2);
318 old_eflags = cpu_compute_eflags(env);
319 if (source == SWITCH_TSS_IRET) {
320 old_eflags &= ~NT_MASK;
323 /* save the current state in the old TSS */
324 if (type & 8) {
325 /* 32 bit */
326 cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
327 cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
328 cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
329 cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
330 cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
331 cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
332 cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
333 cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
334 cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
335 cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
336 for (i = 0; i < 6; i++) {
337 cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
338 env->segs[i].selector);
340 } else {
341 /* 16 bit */
342 cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
343 cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
344 cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
345 cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
346 cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
347 cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
348 cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
349 cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
350 cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
351 cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
352 for (i = 0; i < 4; i++) {
353 cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
354 env->segs[i].selector);
358 /* now if an exception occurs, it will occurs in the next task
359 context */
361 if (source == SWITCH_TSS_CALL) {
362 cpu_stw_kernel(env, tss_base, env->tr.selector);
363 new_eflags |= NT_MASK;
366 /* set busy bit */
367 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
368 target_ulong ptr;
369 uint32_t e2;
371 ptr = env->gdt.base + (tss_selector & ~7);
372 e2 = cpu_ldl_kernel(env, ptr + 4);
373 e2 |= DESC_TSS_BUSY_MASK;
374 cpu_stl_kernel(env, ptr + 4, e2);
377 /* set the new CPU state */
378 /* from this point, any exception which occurs can give problems */
379 env->cr[0] |= CR0_TS_MASK;
380 env->hflags |= HF_TS_MASK;
381 env->tr.selector = tss_selector;
382 env->tr.base = tss_base;
383 env->tr.limit = tss_limit;
384 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
386 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
387 cpu_x86_update_cr3(env, new_cr3);
390 /* load all registers without an exception, then reload them with
391 possible exception */
392 env->eip = new_eip;
393 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
394 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
395 if (!(type & 8)) {
396 eflags_mask &= 0xffff;
398 cpu_load_eflags(env, new_eflags, eflags_mask);
399 /* XXX: what to do in 16 bit case? */
400 env->regs[R_EAX] = new_regs[0];
401 env->regs[R_ECX] = new_regs[1];
402 env->regs[R_EDX] = new_regs[2];
403 env->regs[R_EBX] = new_regs[3];
404 env->regs[R_ESP] = new_regs[4];
405 env->regs[R_EBP] = new_regs[5];
406 env->regs[R_ESI] = new_regs[6];
407 env->regs[R_EDI] = new_regs[7];
408 if (new_eflags & VM_MASK) {
409 for (i = 0; i < 6; i++) {
410 load_seg_vm(env, i, new_segs[i]);
412 } else {
413 /* first just selectors as the rest may trigger exceptions */
414 for (i = 0; i < 6; i++) {
415 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
419 env->ldt.selector = new_ldt & ~4;
420 env->ldt.base = 0;
421 env->ldt.limit = 0;
422 env->ldt.flags = 0;
424 /* load the LDT */
425 if (new_ldt & 4) {
426 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
429 if ((new_ldt & 0xfffc) != 0) {
430 dt = &env->gdt;
431 index = new_ldt & ~7;
432 if ((index + 7) > dt->limit) {
433 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
435 ptr = dt->base + index;
436 e1 = cpu_ldl_kernel(env, ptr);
437 e2 = cpu_ldl_kernel(env, ptr + 4);
438 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
439 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
441 if (!(e2 & DESC_P_MASK)) {
442 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
444 load_seg_cache_raw_dt(&env->ldt, e1, e2);
447 /* load the segments */
448 if (!(new_eflags & VM_MASK)) {
449 tss_load_seg(env, R_CS, new_segs[R_CS]);
450 tss_load_seg(env, R_SS, new_segs[R_SS]);
451 tss_load_seg(env, R_ES, new_segs[R_ES]);
452 tss_load_seg(env, R_DS, new_segs[R_DS]);
453 tss_load_seg(env, R_FS, new_segs[R_FS]);
454 tss_load_seg(env, R_GS, new_segs[R_GS]);
457 /* check that env->eip is in the CS segment limits */
458 if (new_eip > env->segs[R_CS].limit) {
459 /* XXX: different exception if CALL? */
460 raise_exception_err(env, EXCP0D_GPF, 0);
463 #ifndef CONFIG_USER_ONLY
464 /* reset local breakpoints */
465 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
466 for (i = 0; i < DR7_MAX_BP; i++) {
467 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
468 !hw_global_breakpoint_enabled(env->dr[7], i)) {
469 hw_breakpoint_remove(env, i);
472 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
474 #endif
477 static inline unsigned int get_sp_mask(unsigned int e2)
479 if (e2 & DESC_B_MASK) {
480 return 0xffffffff;
481 } else {
482 return 0xffff;
486 static int exception_has_error_code(int intno)
488 switch (intno) {
489 case 8:
490 case 10:
491 case 11:
492 case 12:
493 case 13:
494 case 14:
495 case 17:
496 return 1;
498 return 0;
501 #ifdef TARGET_X86_64
502 #define SET_ESP(val, sp_mask) \
503 do { \
504 if ((sp_mask) == 0xffff) { \
505 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
506 ((val) & 0xffff); \
507 } else if ((sp_mask) == 0xffffffffLL) { \
508 env->regs[R_ESP] = (uint32_t)(val); \
509 } else { \
510 env->regs[R_ESP] = (val); \
512 } while (0)
513 #else
514 #define SET_ESP(val, sp_mask) \
515 do { \
516 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
517 ((val) & (sp_mask)); \
518 } while (0)
519 #endif
521 /* in 64-bit machines, this can overflow. So this segment addition macro
522 * can be used to trim the value to 32-bit whenever needed */
523 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
525 /* XXX: add a is_user flag to have proper security support */
526 #define PUSHW(ssp, sp, sp_mask, val) \
528 sp -= 2; \
529 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
532 #define PUSHL(ssp, sp, sp_mask, val) \
534 sp -= 4; \
535 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
538 #define POPW(ssp, sp, sp_mask, val) \
540 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
541 sp += 2; \
544 #define POPL(ssp, sp, sp_mask, val) \
546 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
547 sp += 4; \
550 /* protected mode interrupt */
551 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
552 int error_code, unsigned int next_eip,
553 int is_hw)
555 SegmentCache *dt;
556 target_ulong ptr, ssp;
557 int type, dpl, selector, ss_dpl, cpl;
558 int has_error_code, new_stack, shift;
559 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
560 uint32_t old_eip, sp_mask;
561 int vm86 = env->eflags & VM_MASK;
563 has_error_code = 0;
564 if (!is_int && !is_hw) {
565 has_error_code = exception_has_error_code(intno);
567 if (is_int) {
568 old_eip = next_eip;
569 } else {
570 old_eip = env->eip;
573 dt = &env->idt;
574 if (intno * 8 + 7 > dt->limit) {
575 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
577 ptr = dt->base + intno * 8;
578 e1 = cpu_ldl_kernel(env, ptr);
579 e2 = cpu_ldl_kernel(env, ptr + 4);
580 /* check gate type */
581 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
582 switch (type) {
583 case 5: /* task gate */
584 /* must do that check here to return the correct error code */
585 if (!(e2 & DESC_P_MASK)) {
586 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
588 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
589 if (has_error_code) {
590 int type;
591 uint32_t mask;
593 /* push the error code */
594 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
595 shift = type >> 3;
596 if (env->segs[R_SS].flags & DESC_B_MASK) {
597 mask = 0xffffffff;
598 } else {
599 mask = 0xffff;
601 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
602 ssp = env->segs[R_SS].base + esp;
603 if (shift) {
604 cpu_stl_kernel(env, ssp, error_code);
605 } else {
606 cpu_stw_kernel(env, ssp, error_code);
608 SET_ESP(esp, mask);
610 return;
611 case 6: /* 286 interrupt gate */
612 case 7: /* 286 trap gate */
613 case 14: /* 386 interrupt gate */
614 case 15: /* 386 trap gate */
615 break;
616 default:
617 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
618 break;
620 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
621 cpl = env->hflags & HF_CPL_MASK;
622 /* check privilege if software int */
623 if (is_int && dpl < cpl) {
624 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
626 /* check valid bit */
627 if (!(e2 & DESC_P_MASK)) {
628 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
630 selector = e1 >> 16;
631 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
632 if ((selector & 0xfffc) == 0) {
633 raise_exception_err(env, EXCP0D_GPF, 0);
635 if (load_segment(env, &e1, &e2, selector) != 0) {
636 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
638 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
639 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
641 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
642 if (dpl > cpl) {
643 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
645 if (!(e2 & DESC_P_MASK)) {
646 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
648 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
649 /* to inner privilege */
650 get_ss_esp_from_tss(env, &ss, &esp, dpl);
651 if ((ss & 0xfffc) == 0) {
652 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
654 if ((ss & 3) != dpl) {
655 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
657 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
658 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
660 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
661 if (ss_dpl != dpl) {
662 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
664 if (!(ss_e2 & DESC_S_MASK) ||
665 (ss_e2 & DESC_CS_MASK) ||
666 !(ss_e2 & DESC_W_MASK)) {
667 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
669 if (!(ss_e2 & DESC_P_MASK)) {
670 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
672 new_stack = 1;
673 sp_mask = get_sp_mask(ss_e2);
674 ssp = get_seg_base(ss_e1, ss_e2);
675 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
676 /* to same privilege */
677 if (vm86) {
678 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
680 new_stack = 0;
681 sp_mask = get_sp_mask(env->segs[R_SS].flags);
682 ssp = env->segs[R_SS].base;
683 esp = env->regs[R_ESP];
684 dpl = cpl;
685 } else {
686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
687 new_stack = 0; /* avoid warning */
688 sp_mask = 0; /* avoid warning */
689 ssp = 0; /* avoid warning */
690 esp = 0; /* avoid warning */
693 shift = type >> 3;
695 #if 0
696 /* XXX: check that enough room is available */
697 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
698 if (vm86) {
699 push_size += 8;
701 push_size <<= shift;
702 #endif
703 if (shift == 1) {
704 if (new_stack) {
705 if (vm86) {
706 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
707 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
708 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
709 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
711 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
712 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
714 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
715 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
716 PUSHL(ssp, esp, sp_mask, old_eip);
717 if (has_error_code) {
718 PUSHL(ssp, esp, sp_mask, error_code);
720 } else {
721 if (new_stack) {
722 if (vm86) {
723 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
724 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
725 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
726 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
728 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
729 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
731 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
732 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
733 PUSHW(ssp, esp, sp_mask, old_eip);
734 if (has_error_code) {
735 PUSHW(ssp, esp, sp_mask, error_code);
739 /* interrupt gate clear IF mask */
740 if ((type & 1) == 0) {
741 env->eflags &= ~IF_MASK;
743 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
745 if (new_stack) {
746 if (vm86) {
747 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
748 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
749 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
750 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
752 ss = (ss & ~3) | dpl;
753 cpu_x86_load_seg_cache(env, R_SS, ss,
754 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
756 SET_ESP(esp, sp_mask);
758 selector = (selector & ~3) | dpl;
759 cpu_x86_load_seg_cache(env, R_CS, selector,
760 get_seg_base(e1, e2),
761 get_seg_limit(e1, e2),
762 e2);
763 env->eip = offset;
766 #ifdef TARGET_X86_64
768 #define PUSHQ(sp, val) \
770 sp -= 8; \
771 cpu_stq_kernel(env, sp, (val)); \
774 #define POPQ(sp, val) \
776 val = cpu_ldq_kernel(env, sp); \
777 sp += 8; \
780 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
782 X86CPU *cpu = x86_env_get_cpu(env);
783 int index;
785 #if 0
786 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
787 env->tr.base, env->tr.limit);
788 #endif
790 if (!(env->tr.flags & DESC_P_MASK)) {
791 cpu_abort(CPU(cpu), "invalid tss");
793 index = 8 * level + 4;
794 if ((index + 7) > env->tr.limit) {
795 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
797 return cpu_ldq_kernel(env, env->tr.base + index);
800 /* 64 bit interrupt */
801 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
802 int error_code, target_ulong next_eip, int is_hw)
804 SegmentCache *dt;
805 target_ulong ptr;
806 int type, dpl, selector, cpl, ist;
807 int has_error_code, new_stack;
808 uint32_t e1, e2, e3, ss;
809 target_ulong old_eip, esp, offset;
811 has_error_code = 0;
812 if (!is_int && !is_hw) {
813 has_error_code = exception_has_error_code(intno);
815 if (is_int) {
816 old_eip = next_eip;
817 } else {
818 old_eip = env->eip;
821 dt = &env->idt;
822 if (intno * 16 + 15 > dt->limit) {
823 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
825 ptr = dt->base + intno * 16;
826 e1 = cpu_ldl_kernel(env, ptr);
827 e2 = cpu_ldl_kernel(env, ptr + 4);
828 e3 = cpu_ldl_kernel(env, ptr + 8);
829 /* check gate type */
830 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
831 switch (type) {
832 case 14: /* 386 interrupt gate */
833 case 15: /* 386 trap gate */
834 break;
835 default:
836 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
837 break;
839 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
840 cpl = env->hflags & HF_CPL_MASK;
841 /* check privilege if software int */
842 if (is_int && dpl < cpl) {
843 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
845 /* check valid bit */
846 if (!(e2 & DESC_P_MASK)) {
847 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
849 selector = e1 >> 16;
850 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
851 ist = e2 & 7;
852 if ((selector & 0xfffc) == 0) {
853 raise_exception_err(env, EXCP0D_GPF, 0);
856 if (load_segment(env, &e1, &e2, selector) != 0) {
857 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
859 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
860 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
862 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
863 if (dpl > cpl) {
864 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
866 if (!(e2 & DESC_P_MASK)) {
867 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
869 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
870 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
872 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
873 /* to inner privilege */
874 if (ist != 0) {
875 esp = get_rsp_from_tss(env, ist + 3);
876 } else {
877 esp = get_rsp_from_tss(env, dpl);
879 esp &= ~0xfLL; /* align stack */
880 ss = 0;
881 new_stack = 1;
882 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
883 /* to same privilege */
884 if (env->eflags & VM_MASK) {
885 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
887 new_stack = 0;
888 if (ist != 0) {
889 esp = get_rsp_from_tss(env, ist + 3);
890 } else {
891 esp = env->regs[R_ESP];
893 esp &= ~0xfLL; /* align stack */
894 dpl = cpl;
895 } else {
896 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
897 new_stack = 0; /* avoid warning */
898 esp = 0; /* avoid warning */
901 PUSHQ(esp, env->segs[R_SS].selector);
902 PUSHQ(esp, env->regs[R_ESP]);
903 PUSHQ(esp, cpu_compute_eflags(env));
904 PUSHQ(esp, env->segs[R_CS].selector);
905 PUSHQ(esp, old_eip);
906 if (has_error_code) {
907 PUSHQ(esp, error_code);
910 /* interrupt gate clear IF mask */
911 if ((type & 1) == 0) {
912 env->eflags &= ~IF_MASK;
914 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
916 if (new_stack) {
917 ss = 0 | dpl;
918 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
920 env->regs[R_ESP] = esp;
922 selector = (selector & ~3) | dpl;
923 cpu_x86_load_seg_cache(env, R_CS, selector,
924 get_seg_base(e1, e2),
925 get_seg_limit(e1, e2),
926 e2);
927 env->eip = offset;
929 #endif
931 #ifdef TARGET_X86_64
932 #if defined(CONFIG_USER_ONLY)
933 void helper_syscall(CPUX86State *env, int next_eip_addend)
935 CPUState *cs = CPU(x86_env_get_cpu(env));
937 cs->exception_index = EXCP_SYSCALL;
938 env->exception_next_eip = env->eip + next_eip_addend;
939 cpu_loop_exit(cs);
941 #else
942 void helper_syscall(CPUX86State *env, int next_eip_addend)
944 int selector;
946 if (!(env->efer & MSR_EFER_SCE)) {
947 raise_exception_err(env, EXCP06_ILLOP, 0);
949 selector = (env->star >> 32) & 0xffff;
950 if (env->hflags & HF_LMA_MASK) {
951 int code64;
953 env->regs[R_ECX] = env->eip + next_eip_addend;
954 env->regs[11] = cpu_compute_eflags(env);
956 code64 = env->hflags & HF_CS64_MASK;
958 env->eflags &= ~env->fmask;
959 cpu_load_eflags(env, env->eflags, 0);
960 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
961 0, 0xffffffff,
962 DESC_G_MASK | DESC_P_MASK |
963 DESC_S_MASK |
964 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
965 DESC_L_MASK);
966 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
967 0, 0xffffffff,
968 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
969 DESC_S_MASK |
970 DESC_W_MASK | DESC_A_MASK);
971 if (code64) {
972 env->eip = env->lstar;
973 } else {
974 env->eip = env->cstar;
976 } else {
977 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
979 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
980 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
981 0, 0xffffffff,
982 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
983 DESC_S_MASK |
984 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
985 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
986 0, 0xffffffff,
987 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
988 DESC_S_MASK |
989 DESC_W_MASK | DESC_A_MASK);
990 env->eip = (uint32_t)env->star;
993 #endif
994 #endif
996 #ifdef TARGET_X86_64
997 void helper_sysret(CPUX86State *env, int dflag)
999 int cpl, selector;
1001 if (!(env->efer & MSR_EFER_SCE)) {
1002 raise_exception_err(env, EXCP06_ILLOP, 0);
1004 cpl = env->hflags & HF_CPL_MASK;
1005 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1006 raise_exception_err(env, EXCP0D_GPF, 0);
1008 selector = (env->star >> 48) & 0xffff;
1009 if (env->hflags & HF_LMA_MASK) {
1010 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1011 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1012 NT_MASK);
1013 if (dflag == 2) {
1014 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1015 0, 0xffffffff,
1016 DESC_G_MASK | DESC_P_MASK |
1017 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1018 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1019 DESC_L_MASK);
1020 env->eip = env->regs[R_ECX];
1021 } else {
1022 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1023 0, 0xffffffff,
1024 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1025 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1026 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1027 env->eip = (uint32_t)env->regs[R_ECX];
1029 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1030 0, 0xffffffff,
1031 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1033 DESC_W_MASK | DESC_A_MASK);
1034 } else {
1035 env->eflags |= IF_MASK;
1036 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1037 0, 0xffffffff,
1038 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1040 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1041 env->eip = (uint32_t)env->regs[R_ECX];
1042 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1043 0, 0xffffffff,
1044 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1045 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1046 DESC_W_MASK | DESC_A_MASK);
1049 #endif
1051 /* real mode interrupt */
1052 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1053 int error_code, unsigned int next_eip)
1055 SegmentCache *dt;
1056 target_ulong ptr, ssp;
1057 int selector;
1058 uint32_t offset, esp;
1059 uint32_t old_cs, old_eip;
1061 /* real mode (simpler!) */
1062 dt = &env->idt;
1063 if (intno * 4 + 3 > dt->limit) {
1064 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1066 ptr = dt->base + intno * 4;
1067 offset = cpu_lduw_kernel(env, ptr);
1068 selector = cpu_lduw_kernel(env, ptr + 2);
1069 esp = env->regs[R_ESP];
1070 ssp = env->segs[R_SS].base;
1071 if (is_int) {
1072 old_eip = next_eip;
1073 } else {
1074 old_eip = env->eip;
1076 old_cs = env->segs[R_CS].selector;
1077 /* XXX: use SS segment size? */
1078 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1079 PUSHW(ssp, esp, 0xffff, old_cs);
1080 PUSHW(ssp, esp, 0xffff, old_eip);
1082 /* update processor state */
1083 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1084 env->eip = offset;
1085 env->segs[R_CS].selector = selector;
1086 env->segs[R_CS].base = (selector << 4);
1087 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1090 #if defined(CONFIG_USER_ONLY)
1091 /* fake user mode interrupt */
1092 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1093 int error_code, target_ulong next_eip)
1095 SegmentCache *dt;
1096 target_ulong ptr;
1097 int dpl, cpl, shift;
1098 uint32_t e2;
1100 dt = &env->idt;
1101 if (env->hflags & HF_LMA_MASK) {
1102 shift = 4;
1103 } else {
1104 shift = 3;
1106 ptr = dt->base + (intno << shift);
1107 e2 = cpu_ldl_kernel(env, ptr + 4);
1109 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1110 cpl = env->hflags & HF_CPL_MASK;
1111 /* check privilege if software int */
1112 if (is_int && dpl < cpl) {
1113 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1116 /* Since we emulate only user space, we cannot do more than
1117 exiting the emulation with the suitable exception and error
1118 code */
1119 if (is_int) {
1120 env->eip = next_eip;
1124 #else
1126 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1127 int error_code, int is_hw, int rm)
1129 CPUState *cs = CPU(x86_env_get_cpu(env));
1130 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1131 control.event_inj));
1133 if (!(event_inj & SVM_EVTINJ_VALID)) {
1134 int type;
1136 if (is_int) {
1137 type = SVM_EVTINJ_TYPE_SOFT;
1138 } else {
1139 type = SVM_EVTINJ_TYPE_EXEPT;
1141 event_inj = intno | type | SVM_EVTINJ_VALID;
1142 if (!rm && exception_has_error_code(intno)) {
1143 event_inj |= SVM_EVTINJ_VALID_ERR;
1144 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1145 control.event_inj_err),
1146 error_code);
1148 stl_phys(cs->as,
1149 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1150 event_inj);
1153 #endif
1156 * Begin execution of an interruption. is_int is TRUE if coming from
1157 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1158 * instruction. It is only relevant if is_int is TRUE.
1160 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1161 int error_code, target_ulong next_eip, int is_hw)
1163 CPUX86State *env = &cpu->env;
1165 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1166 if ((env->cr[0] & CR0_PE_MASK)) {
1167 static int count;
1169 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1170 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1171 count, intno, error_code, is_int,
1172 env->hflags & HF_CPL_MASK,
1173 env->segs[R_CS].selector, env->eip,
1174 (int)env->segs[R_CS].base + env->eip,
1175 env->segs[R_SS].selector, env->regs[R_ESP]);
1176 if (intno == 0x0e) {
1177 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1178 } else {
1179 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1181 qemu_log("\n");
1182 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1183 #if 0
1185 int i;
1186 target_ulong ptr;
1188 qemu_log(" code=");
1189 ptr = env->segs[R_CS].base + env->eip;
1190 for (i = 0; i < 16; i++) {
1191 qemu_log(" %02x", ldub(ptr + i));
1193 qemu_log("\n");
1195 #endif
1196 count++;
1199 if (env->cr[0] & CR0_PE_MASK) {
1200 #if !defined(CONFIG_USER_ONLY)
1201 if (env->hflags & HF_SVMI_MASK) {
1202 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1204 #endif
1205 #ifdef TARGET_X86_64
1206 if (env->hflags & HF_LMA_MASK) {
1207 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1208 } else
1209 #endif
1211 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1212 is_hw);
1214 } else {
1215 #if !defined(CONFIG_USER_ONLY)
1216 if (env->hflags & HF_SVMI_MASK) {
1217 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1219 #endif
1220 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1223 #if !defined(CONFIG_USER_ONLY)
1224 if (env->hflags & HF_SVMI_MASK) {
1225 CPUState *cs = CPU(cpu);
1226 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb +
1227 offsetof(struct vmcb,
1228 control.event_inj));
1230 stl_phys(cs->as,
1231 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1232 event_inj & ~SVM_EVTINJ_VALID);
1234 #endif
1237 void x86_cpu_do_interrupt(CPUState *cs)
1239 X86CPU *cpu = X86_CPU(cs);
1240 CPUX86State *env = &cpu->env;
1242 #if defined(CONFIG_USER_ONLY)
1243 /* if user mode only, we simulate a fake exception
1244 which will be handled outside the cpu execution
1245 loop */
1246 do_interrupt_user(env, cs->exception_index,
1247 env->exception_is_int,
1248 env->error_code,
1249 env->exception_next_eip);
1250 /* successfully delivered */
1251 env->old_exception = -1;
1252 #else
1253 /* simulate a real cpu exception. On i386, it can
1254 trigger new exceptions, but we do not handle
1255 double or triple faults yet. */
1256 do_interrupt_all(cpu, cs->exception_index,
1257 env->exception_is_int,
1258 env->error_code,
1259 env->exception_next_eip, 0);
1260 /* successfully delivered */
1261 env->old_exception = -1;
1262 #endif
1265 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1267 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1270 void helper_enter_level(CPUX86State *env, int level, int data32,
1271 target_ulong t1)
1273 target_ulong ssp;
1274 uint32_t esp_mask, esp, ebp;
1276 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1277 ssp = env->segs[R_SS].base;
1278 ebp = env->regs[R_EBP];
1279 esp = env->regs[R_ESP];
1280 if (data32) {
1281 /* 32 bit */
1282 esp -= 4;
1283 while (--level) {
1284 esp -= 4;
1285 ebp -= 4;
1286 cpu_stl_data(env, ssp + (esp & esp_mask),
1287 cpu_ldl_data(env, ssp + (ebp & esp_mask)));
1289 esp -= 4;
1290 cpu_stl_data(env, ssp + (esp & esp_mask), t1);
1291 } else {
1292 /* 16 bit */
1293 esp -= 2;
1294 while (--level) {
1295 esp -= 2;
1296 ebp -= 2;
1297 cpu_stw_data(env, ssp + (esp & esp_mask),
1298 cpu_lduw_data(env, ssp + (ebp & esp_mask)));
1300 esp -= 2;
1301 cpu_stw_data(env, ssp + (esp & esp_mask), t1);
1305 #ifdef TARGET_X86_64
1306 void helper_enter64_level(CPUX86State *env, int level, int data64,
1307 target_ulong t1)
1309 target_ulong esp, ebp;
1311 ebp = env->regs[R_EBP];
1312 esp = env->regs[R_ESP];
1314 if (data64) {
1315 /* 64 bit */
1316 esp -= 8;
1317 while (--level) {
1318 esp -= 8;
1319 ebp -= 8;
1320 cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
1322 esp -= 8;
1323 cpu_stq_data(env, esp, t1);
1324 } else {
1325 /* 16 bit */
1326 esp -= 2;
1327 while (--level) {
1328 esp -= 2;
1329 ebp -= 2;
1330 cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
1332 esp -= 2;
1333 cpu_stw_data(env, esp, t1);
1336 #endif
1338 void helper_lldt(CPUX86State *env, int selector)
1340 SegmentCache *dt;
1341 uint32_t e1, e2;
1342 int index, entry_limit;
1343 target_ulong ptr;
1345 selector &= 0xffff;
1346 if ((selector & 0xfffc) == 0) {
1347 /* XXX: NULL selector case: invalid LDT */
1348 env->ldt.base = 0;
1349 env->ldt.limit = 0;
1350 } else {
1351 if (selector & 0x4) {
1352 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1354 dt = &env->gdt;
1355 index = selector & ~7;
1356 #ifdef TARGET_X86_64
1357 if (env->hflags & HF_LMA_MASK) {
1358 entry_limit = 15;
1359 } else
1360 #endif
1362 entry_limit = 7;
1364 if ((index + entry_limit) > dt->limit) {
1365 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1367 ptr = dt->base + index;
1368 e1 = cpu_ldl_kernel(env, ptr);
1369 e2 = cpu_ldl_kernel(env, ptr + 4);
1370 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1371 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1373 if (!(e2 & DESC_P_MASK)) {
1374 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1376 #ifdef TARGET_X86_64
1377 if (env->hflags & HF_LMA_MASK) {
1378 uint32_t e3;
1380 e3 = cpu_ldl_kernel(env, ptr + 8);
1381 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1382 env->ldt.base |= (target_ulong)e3 << 32;
1383 } else
1384 #endif
1386 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1389 env->ldt.selector = selector;
1392 void helper_ltr(CPUX86State *env, int selector)
1394 SegmentCache *dt;
1395 uint32_t e1, e2;
1396 int index, type, entry_limit;
1397 target_ulong ptr;
1399 selector &= 0xffff;
1400 if ((selector & 0xfffc) == 0) {
1401 /* NULL selector case: invalid TR */
1402 env->tr.base = 0;
1403 env->tr.limit = 0;
1404 env->tr.flags = 0;
1405 } else {
1406 if (selector & 0x4) {
1407 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1409 dt = &env->gdt;
1410 index = selector & ~7;
1411 #ifdef TARGET_X86_64
1412 if (env->hflags & HF_LMA_MASK) {
1413 entry_limit = 15;
1414 } else
1415 #endif
1417 entry_limit = 7;
1419 if ((index + entry_limit) > dt->limit) {
1420 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1422 ptr = dt->base + index;
1423 e1 = cpu_ldl_kernel(env, ptr);
1424 e2 = cpu_ldl_kernel(env, ptr + 4);
1425 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1426 if ((e2 & DESC_S_MASK) ||
1427 (type != 1 && type != 9)) {
1428 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1430 if (!(e2 & DESC_P_MASK)) {
1431 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1433 #ifdef TARGET_X86_64
1434 if (env->hflags & HF_LMA_MASK) {
1435 uint32_t e3, e4;
1437 e3 = cpu_ldl_kernel(env, ptr + 8);
1438 e4 = cpu_ldl_kernel(env, ptr + 12);
1439 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1440 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1442 load_seg_cache_raw_dt(&env->tr, e1, e2);
1443 env->tr.base |= (target_ulong)e3 << 32;
1444 } else
1445 #endif
1447 load_seg_cache_raw_dt(&env->tr, e1, e2);
1449 e2 |= DESC_TSS_BUSY_MASK;
1450 cpu_stl_kernel(env, ptr + 4, e2);
1452 env->tr.selector = selector;
1455 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1456 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1458 uint32_t e1, e2;
1459 int cpl, dpl, rpl;
1460 SegmentCache *dt;
1461 int index;
1462 target_ulong ptr;
1464 selector &= 0xffff;
1465 cpl = env->hflags & HF_CPL_MASK;
1466 if ((selector & 0xfffc) == 0) {
1467 /* null selector case */
1468 if (seg_reg == R_SS
1469 #ifdef TARGET_X86_64
1470 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1471 #endif
1473 raise_exception_err(env, EXCP0D_GPF, 0);
1475 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1476 } else {
1478 if (selector & 0x4) {
1479 dt = &env->ldt;
1480 } else {
1481 dt = &env->gdt;
1483 index = selector & ~7;
1484 if ((index + 7) > dt->limit) {
1485 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1487 ptr = dt->base + index;
1488 e1 = cpu_ldl_kernel(env, ptr);
1489 e2 = cpu_ldl_kernel(env, ptr + 4);
1491 if (!(e2 & DESC_S_MASK)) {
1492 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1494 rpl = selector & 3;
1495 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1496 if (seg_reg == R_SS) {
1497 /* must be writable segment */
1498 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1499 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1501 if (rpl != cpl || dpl != cpl) {
1502 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1504 } else {
1505 /* must be readable segment */
1506 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1507 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1510 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1511 /* if not conforming code, test rights */
1512 if (dpl < cpl || dpl < rpl) {
1513 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1518 if (!(e2 & DESC_P_MASK)) {
1519 if (seg_reg == R_SS) {
1520 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
1521 } else {
1522 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1526 /* set the access bit if not already set */
1527 if (!(e2 & DESC_A_MASK)) {
1528 e2 |= DESC_A_MASK;
1529 cpu_stl_kernel(env, ptr + 4, e2);
1532 cpu_x86_load_seg_cache(env, seg_reg, selector,
1533 get_seg_base(e1, e2),
1534 get_seg_limit(e1, e2),
1535 e2);
1536 #if 0
1537 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1538 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1539 #endif
1543 /* protected mode jump */
1544 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1545 int next_eip_addend)
1547 int gate_cs, type;
1548 uint32_t e1, e2, cpl, dpl, rpl, limit;
1549 target_ulong next_eip;
1551 if ((new_cs & 0xfffc) == 0) {
1552 raise_exception_err(env, EXCP0D_GPF, 0);
1554 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1555 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1557 cpl = env->hflags & HF_CPL_MASK;
1558 if (e2 & DESC_S_MASK) {
1559 if (!(e2 & DESC_CS_MASK)) {
1560 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1562 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1563 if (e2 & DESC_C_MASK) {
1564 /* conforming code segment */
1565 if (dpl > cpl) {
1566 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1568 } else {
1569 /* non conforming code segment */
1570 rpl = new_cs & 3;
1571 if (rpl > cpl) {
1572 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1574 if (dpl != cpl) {
1575 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1578 if (!(e2 & DESC_P_MASK)) {
1579 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1581 limit = get_seg_limit(e1, e2);
1582 if (new_eip > limit &&
1583 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1584 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1586 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1587 get_seg_base(e1, e2), limit, e2);
1588 env->eip = new_eip;
1589 } else {
1590 /* jump to call or task gate */
1591 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1592 rpl = new_cs & 3;
1593 cpl = env->hflags & HF_CPL_MASK;
1594 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1595 switch (type) {
1596 case 1: /* 286 TSS */
1597 case 9: /* 386 TSS */
1598 case 5: /* task gate */
1599 if (dpl < cpl || dpl < rpl) {
1600 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1602 next_eip = env->eip + next_eip_addend;
1603 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1604 CC_OP = CC_OP_EFLAGS;
1605 break;
1606 case 4: /* 286 call gate */
1607 case 12: /* 386 call gate */
1608 if ((dpl < cpl) || (dpl < rpl)) {
1609 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1611 if (!(e2 & DESC_P_MASK)) {
1612 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1614 gate_cs = e1 >> 16;
1615 new_eip = (e1 & 0xffff);
1616 if (type == 12) {
1617 new_eip |= (e2 & 0xffff0000);
1619 if (load_segment(env, &e1, &e2, gate_cs) != 0) {
1620 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1622 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1623 /* must be code segment */
1624 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1625 (DESC_S_MASK | DESC_CS_MASK))) {
1626 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1628 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1629 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1630 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1632 if (!(e2 & DESC_P_MASK)) {
1633 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1635 limit = get_seg_limit(e1, e2);
1636 if (new_eip > limit) {
1637 raise_exception_err(env, EXCP0D_GPF, 0);
1639 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1640 get_seg_base(e1, e2), limit, e2);
1641 env->eip = new_eip;
1642 break;
1643 default:
1644 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1645 break;
1650 /* real mode call */
1651 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1652 int shift, int next_eip)
1654 int new_eip;
1655 uint32_t esp, esp_mask;
1656 target_ulong ssp;
1658 new_eip = new_eip1;
1659 esp = env->regs[R_ESP];
1660 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1661 ssp = env->segs[R_SS].base;
1662 if (shift) {
1663 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1664 PUSHL(ssp, esp, esp_mask, next_eip);
1665 } else {
1666 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1667 PUSHW(ssp, esp, esp_mask, next_eip);
1670 SET_ESP(esp, esp_mask);
1671 env->eip = new_eip;
1672 env->segs[R_CS].selector = new_cs;
1673 env->segs[R_CS].base = (new_cs << 4);
1676 /* protected mode call */
1677 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1678 int shift, int next_eip_addend)
1680 int new_stack, i;
1681 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1682 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1683 uint32_t val, limit, old_sp_mask;
1684 target_ulong ssp, old_ssp, next_eip;
1686 next_eip = env->eip + next_eip_addend;
1687 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1688 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1689 if ((new_cs & 0xfffc) == 0) {
1690 raise_exception_err(env, EXCP0D_GPF, 0);
1692 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1693 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1695 cpl = env->hflags & HF_CPL_MASK;
1696 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1697 if (e2 & DESC_S_MASK) {
1698 if (!(e2 & DESC_CS_MASK)) {
1699 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1701 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1702 if (e2 & DESC_C_MASK) {
1703 /* conforming code segment */
1704 if (dpl > cpl) {
1705 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1707 } else {
1708 /* non conforming code segment */
1709 rpl = new_cs & 3;
1710 if (rpl > cpl) {
1711 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1713 if (dpl != cpl) {
1714 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1717 if (!(e2 & DESC_P_MASK)) {
1718 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1721 #ifdef TARGET_X86_64
1722 /* XXX: check 16/32 bit cases in long mode */
1723 if (shift == 2) {
1724 target_ulong rsp;
1726 /* 64 bit case */
1727 rsp = env->regs[R_ESP];
1728 PUSHQ(rsp, env->segs[R_CS].selector);
1729 PUSHQ(rsp, next_eip);
1730 /* from this point, not restartable */
1731 env->regs[R_ESP] = rsp;
1732 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1733 get_seg_base(e1, e2),
1734 get_seg_limit(e1, e2), e2);
1735 env->eip = new_eip;
1736 } else
1737 #endif
1739 sp = env->regs[R_ESP];
1740 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1741 ssp = env->segs[R_SS].base;
1742 if (shift) {
1743 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1744 PUSHL(ssp, sp, sp_mask, next_eip);
1745 } else {
1746 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1747 PUSHW(ssp, sp, sp_mask, next_eip);
1750 limit = get_seg_limit(e1, e2);
1751 if (new_eip > limit) {
1752 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1754 /* from this point, not restartable */
1755 SET_ESP(sp, sp_mask);
1756 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1757 get_seg_base(e1, e2), limit, e2);
1758 env->eip = new_eip;
1760 } else {
1761 /* check gate type */
1762 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1763 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1764 rpl = new_cs & 3;
1765 switch (type) {
1766 case 1: /* available 286 TSS */
1767 case 9: /* available 386 TSS */
1768 case 5: /* task gate */
1769 if (dpl < cpl || dpl < rpl) {
1770 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1772 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1773 CC_OP = CC_OP_EFLAGS;
1774 return;
1775 case 4: /* 286 call gate */
1776 case 12: /* 386 call gate */
1777 break;
1778 default:
1779 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1780 break;
1782 shift = type >> 3;
1784 if (dpl < cpl || dpl < rpl) {
1785 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1787 /* check valid bit */
1788 if (!(e2 & DESC_P_MASK)) {
1789 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1791 selector = e1 >> 16;
1792 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1793 param_count = e2 & 0x1f;
1794 if ((selector & 0xfffc) == 0) {
1795 raise_exception_err(env, EXCP0D_GPF, 0);
1798 if (load_segment(env, &e1, &e2, selector) != 0) {
1799 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1801 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1802 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1804 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1805 if (dpl > cpl) {
1806 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1808 if (!(e2 & DESC_P_MASK)) {
1809 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1812 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1813 /* to inner privilege */
1814 get_ss_esp_from_tss(env, &ss, &sp, dpl);
1815 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1816 TARGET_FMT_lx "\n", ss, sp, param_count,
1817 env->regs[R_ESP]);
1818 if ((ss & 0xfffc) == 0) {
1819 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1821 if ((ss & 3) != dpl) {
1822 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1824 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
1825 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1827 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1828 if (ss_dpl != dpl) {
1829 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1831 if (!(ss_e2 & DESC_S_MASK) ||
1832 (ss_e2 & DESC_CS_MASK) ||
1833 !(ss_e2 & DESC_W_MASK)) {
1834 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1836 if (!(ss_e2 & DESC_P_MASK)) {
1837 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1840 /* push_size = ((param_count * 2) + 8) << shift; */
1842 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1843 old_ssp = env->segs[R_SS].base;
1845 sp_mask = get_sp_mask(ss_e2);
1846 ssp = get_seg_base(ss_e1, ss_e2);
1847 if (shift) {
1848 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1849 PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
1850 for (i = param_count - 1; i >= 0; i--) {
1851 val = cpu_ldl_kernel(env, old_ssp +
1852 ((env->regs[R_ESP] + i * 4) &
1853 old_sp_mask));
1854 PUSHL(ssp, sp, sp_mask, val);
1856 } else {
1857 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1858 PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
1859 for (i = param_count - 1; i >= 0; i--) {
1860 val = cpu_lduw_kernel(env, old_ssp +
1861 ((env->regs[R_ESP] + i * 2) &
1862 old_sp_mask));
1863 PUSHW(ssp, sp, sp_mask, val);
1866 new_stack = 1;
1867 } else {
1868 /* to same privilege */
1869 sp = env->regs[R_ESP];
1870 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1871 ssp = env->segs[R_SS].base;
1872 /* push_size = (4 << shift); */
1873 new_stack = 0;
1876 if (shift) {
1877 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1878 PUSHL(ssp, sp, sp_mask, next_eip);
1879 } else {
1880 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1881 PUSHW(ssp, sp, sp_mask, next_eip);
1884 /* from this point, not restartable */
1886 if (new_stack) {
1887 ss = (ss & ~3) | dpl;
1888 cpu_x86_load_seg_cache(env, R_SS, ss,
1889 ssp,
1890 get_seg_limit(ss_e1, ss_e2),
1891 ss_e2);
1894 selector = (selector & ~3) | dpl;
1895 cpu_x86_load_seg_cache(env, R_CS, selector,
1896 get_seg_base(e1, e2),
1897 get_seg_limit(e1, e2),
1898 e2);
1899 SET_ESP(sp, sp_mask);
1900 env->eip = offset;
1904 /* real and vm86 mode iret */
1905 void helper_iret_real(CPUX86State *env, int shift)
1907 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1908 target_ulong ssp;
1909 int eflags_mask;
1911 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1912 sp = env->regs[R_ESP];
1913 ssp = env->segs[R_SS].base;
1914 if (shift == 1) {
1915 /* 32 bits */
1916 POPL(ssp, sp, sp_mask, new_eip);
1917 POPL(ssp, sp, sp_mask, new_cs);
1918 new_cs &= 0xffff;
1919 POPL(ssp, sp, sp_mask, new_eflags);
1920 } else {
1921 /* 16 bits */
1922 POPW(ssp, sp, sp_mask, new_eip);
1923 POPW(ssp, sp, sp_mask, new_cs);
1924 POPW(ssp, sp, sp_mask, new_eflags);
1926 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1927 env->segs[R_CS].selector = new_cs;
1928 env->segs[R_CS].base = (new_cs << 4);
1929 env->eip = new_eip;
1930 if (env->eflags & VM_MASK) {
1931 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1932 NT_MASK;
1933 } else {
1934 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1935 RF_MASK | NT_MASK;
1937 if (shift == 0) {
1938 eflags_mask &= 0xffff;
1940 cpu_load_eflags(env, new_eflags, eflags_mask);
1941 env->hflags2 &= ~HF2_NMI_MASK;
1944 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1946 int dpl;
1947 uint32_t e2;
1949 /* XXX: on x86_64, we do not want to nullify FS and GS because
1950 they may still contain a valid base. I would be interested to
1951 know how a real x86_64 CPU behaves */
1952 if ((seg_reg == R_FS || seg_reg == R_GS) &&
1953 (env->segs[seg_reg].selector & 0xfffc) == 0) {
1954 return;
1957 e2 = env->segs[seg_reg].flags;
1958 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1959 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1960 /* data or non conforming code segment */
1961 if (dpl < cpl) {
1962 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
1967 /* protected mode iret */
1968 static inline void helper_ret_protected(CPUX86State *env, int shift,
1969 int is_iret, int addend)
1971 uint32_t new_cs, new_eflags, new_ss;
1972 uint32_t new_es, new_ds, new_fs, new_gs;
1973 uint32_t e1, e2, ss_e1, ss_e2;
1974 int cpl, dpl, rpl, eflags_mask, iopl;
1975 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
1977 #ifdef TARGET_X86_64
1978 if (shift == 2) {
1979 sp_mask = -1;
1980 } else
1981 #endif
1983 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1985 sp = env->regs[R_ESP];
1986 ssp = env->segs[R_SS].base;
1987 new_eflags = 0; /* avoid warning */
1988 #ifdef TARGET_X86_64
1989 if (shift == 2) {
1990 POPQ(sp, new_eip);
1991 POPQ(sp, new_cs);
1992 new_cs &= 0xffff;
1993 if (is_iret) {
1994 POPQ(sp, new_eflags);
1996 } else
1997 #endif
1999 if (shift == 1) {
2000 /* 32 bits */
2001 POPL(ssp, sp, sp_mask, new_eip);
2002 POPL(ssp, sp, sp_mask, new_cs);
2003 new_cs &= 0xffff;
2004 if (is_iret) {
2005 POPL(ssp, sp, sp_mask, new_eflags);
2006 if (new_eflags & VM_MASK) {
2007 goto return_to_vm86;
2010 } else {
2011 /* 16 bits */
2012 POPW(ssp, sp, sp_mask, new_eip);
2013 POPW(ssp, sp, sp_mask, new_cs);
2014 if (is_iret) {
2015 POPW(ssp, sp, sp_mask, new_eflags);
2019 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2020 new_cs, new_eip, shift, addend);
2021 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2022 if ((new_cs & 0xfffc) == 0) {
2023 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2025 if (load_segment(env, &e1, &e2, new_cs) != 0) {
2026 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2028 if (!(e2 & DESC_S_MASK) ||
2029 !(e2 & DESC_CS_MASK)) {
2030 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2032 cpl = env->hflags & HF_CPL_MASK;
2033 rpl = new_cs & 3;
2034 if (rpl < cpl) {
2035 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2037 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2038 if (e2 & DESC_C_MASK) {
2039 if (dpl > rpl) {
2040 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2042 } else {
2043 if (dpl != rpl) {
2044 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2047 if (!(e2 & DESC_P_MASK)) {
2048 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2051 sp += addend;
2052 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2053 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2054 /* return to same privilege level */
2055 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2056 get_seg_base(e1, e2),
2057 get_seg_limit(e1, e2),
2058 e2);
2059 } else {
2060 /* return to different privilege level */
2061 #ifdef TARGET_X86_64
2062 if (shift == 2) {
2063 POPQ(sp, new_esp);
2064 POPQ(sp, new_ss);
2065 new_ss &= 0xffff;
2066 } else
2067 #endif
2069 if (shift == 1) {
2070 /* 32 bits */
2071 POPL(ssp, sp, sp_mask, new_esp);
2072 POPL(ssp, sp, sp_mask, new_ss);
2073 new_ss &= 0xffff;
2074 } else {
2075 /* 16 bits */
2076 POPW(ssp, sp, sp_mask, new_esp);
2077 POPW(ssp, sp, sp_mask, new_ss);
2080 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2081 new_ss, new_esp);
2082 if ((new_ss & 0xfffc) == 0) {
2083 #ifdef TARGET_X86_64
2084 /* NULL ss is allowed in long mode if cpl != 3 */
2085 /* XXX: test CS64? */
2086 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2087 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2088 0, 0xffffffff,
2089 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2090 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2091 DESC_W_MASK | DESC_A_MASK);
2092 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2093 } else
2094 #endif
2096 raise_exception_err(env, EXCP0D_GPF, 0);
2098 } else {
2099 if ((new_ss & 3) != rpl) {
2100 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2102 if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
2103 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2105 if (!(ss_e2 & DESC_S_MASK) ||
2106 (ss_e2 & DESC_CS_MASK) ||
2107 !(ss_e2 & DESC_W_MASK)) {
2108 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2110 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2111 if (dpl != rpl) {
2112 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2114 if (!(ss_e2 & DESC_P_MASK)) {
2115 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2117 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2118 get_seg_base(ss_e1, ss_e2),
2119 get_seg_limit(ss_e1, ss_e2),
2120 ss_e2);
2123 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2124 get_seg_base(e1, e2),
2125 get_seg_limit(e1, e2),
2126 e2);
2127 sp = new_esp;
2128 #ifdef TARGET_X86_64
2129 if (env->hflags & HF_CS64_MASK) {
2130 sp_mask = -1;
2131 } else
2132 #endif
2134 sp_mask = get_sp_mask(ss_e2);
2137 /* validate data segments */
2138 validate_seg(env, R_ES, rpl);
2139 validate_seg(env, R_DS, rpl);
2140 validate_seg(env, R_FS, rpl);
2141 validate_seg(env, R_GS, rpl);
2143 sp += addend;
2145 SET_ESP(sp, sp_mask);
2146 env->eip = new_eip;
2147 if (is_iret) {
2148 /* NOTE: 'cpl' is the _old_ CPL */
2149 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2150 if (cpl == 0) {
2151 eflags_mask |= IOPL_MASK;
2153 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2154 if (cpl <= iopl) {
2155 eflags_mask |= IF_MASK;
2157 if (shift == 0) {
2158 eflags_mask &= 0xffff;
2160 cpu_load_eflags(env, new_eflags, eflags_mask);
2162 return;
2164 return_to_vm86:
2165 POPL(ssp, sp, sp_mask, new_esp);
2166 POPL(ssp, sp, sp_mask, new_ss);
2167 POPL(ssp, sp, sp_mask, new_es);
2168 POPL(ssp, sp, sp_mask, new_ds);
2169 POPL(ssp, sp, sp_mask, new_fs);
2170 POPL(ssp, sp, sp_mask, new_gs);
2172 /* modify processor state */
2173 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2174 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2175 VIP_MASK);
2176 load_seg_vm(env, R_CS, new_cs & 0xffff);
2177 load_seg_vm(env, R_SS, new_ss & 0xffff);
2178 load_seg_vm(env, R_ES, new_es & 0xffff);
2179 load_seg_vm(env, R_DS, new_ds & 0xffff);
2180 load_seg_vm(env, R_FS, new_fs & 0xffff);
2181 load_seg_vm(env, R_GS, new_gs & 0xffff);
2183 env->eip = new_eip & 0xffff;
2184 env->regs[R_ESP] = new_esp;
2187 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2189 int tss_selector, type;
2190 uint32_t e1, e2;
2192 /* specific case for TSS */
2193 if (env->eflags & NT_MASK) {
2194 #ifdef TARGET_X86_64
2195 if (env->hflags & HF_LMA_MASK) {
2196 raise_exception_err(env, EXCP0D_GPF, 0);
2198 #endif
2199 tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
2200 if (tss_selector & 4) {
2201 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2203 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
2204 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2206 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2207 /* NOTE: we check both segment and busy TSS */
2208 if (type != 3) {
2209 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2211 switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2212 } else {
2213 helper_ret_protected(env, shift, 1, 0);
2215 env->hflags2 &= ~HF2_NMI_MASK;
2218 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2220 helper_ret_protected(env, shift, 0, addend);
2223 void helper_sysenter(CPUX86State *env)
2225 if (env->sysenter_cs == 0) {
2226 raise_exception_err(env, EXCP0D_GPF, 0);
2228 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2230 #ifdef TARGET_X86_64
2231 if (env->hflags & HF_LMA_MASK) {
2232 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2233 0, 0xffffffff,
2234 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2235 DESC_S_MASK |
2236 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2237 DESC_L_MASK);
2238 } else
2239 #endif
2241 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2242 0, 0xffffffff,
2243 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2244 DESC_S_MASK |
2245 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2247 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2248 0, 0xffffffff,
2249 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2250 DESC_S_MASK |
2251 DESC_W_MASK | DESC_A_MASK);
2252 env->regs[R_ESP] = env->sysenter_esp;
2253 env->eip = env->sysenter_eip;
2256 void helper_sysexit(CPUX86State *env, int dflag)
2258 int cpl;
2260 cpl = env->hflags & HF_CPL_MASK;
2261 if (env->sysenter_cs == 0 || cpl != 0) {
2262 raise_exception_err(env, EXCP0D_GPF, 0);
2264 #ifdef TARGET_X86_64
2265 if (dflag == 2) {
2266 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2267 3, 0, 0xffffffff,
2268 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2269 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2270 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2271 DESC_L_MASK);
2272 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2273 3, 0, 0xffffffff,
2274 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2275 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2276 DESC_W_MASK | DESC_A_MASK);
2277 } else
2278 #endif
2280 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2281 3, 0, 0xffffffff,
2282 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2283 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2284 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2285 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2286 3, 0, 0xffffffff,
2287 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2288 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2289 DESC_W_MASK | DESC_A_MASK);
2291 env->regs[R_ESP] = env->regs[R_ECX];
2292 env->eip = env->regs[R_EDX];
2295 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2297 unsigned int limit;
2298 uint32_t e1, e2, eflags, selector;
2299 int rpl, dpl, cpl, type;
2301 selector = selector1 & 0xffff;
2302 eflags = cpu_cc_compute_all(env, CC_OP);
2303 if ((selector & 0xfffc) == 0) {
2304 goto fail;
2306 if (load_segment(env, &e1, &e2, selector) != 0) {
2307 goto fail;
2309 rpl = selector & 3;
2310 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2311 cpl = env->hflags & HF_CPL_MASK;
2312 if (e2 & DESC_S_MASK) {
2313 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2314 /* conforming */
2315 } else {
2316 if (dpl < cpl || dpl < rpl) {
2317 goto fail;
2320 } else {
2321 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2322 switch (type) {
2323 case 1:
2324 case 2:
2325 case 3:
2326 case 9:
2327 case 11:
2328 break;
2329 default:
2330 goto fail;
2332 if (dpl < cpl || dpl < rpl) {
2333 fail:
2334 CC_SRC = eflags & ~CC_Z;
2335 return 0;
2338 limit = get_seg_limit(e1, e2);
2339 CC_SRC = eflags | CC_Z;
2340 return limit;
2343 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2345 uint32_t e1, e2, eflags, selector;
2346 int rpl, dpl, cpl, type;
2348 selector = selector1 & 0xffff;
2349 eflags = cpu_cc_compute_all(env, CC_OP);
2350 if ((selector & 0xfffc) == 0) {
2351 goto fail;
2353 if (load_segment(env, &e1, &e2, selector) != 0) {
2354 goto fail;
2356 rpl = selector & 3;
2357 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2358 cpl = env->hflags & HF_CPL_MASK;
2359 if (e2 & DESC_S_MASK) {
2360 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2361 /* conforming */
2362 } else {
2363 if (dpl < cpl || dpl < rpl) {
2364 goto fail;
2367 } else {
2368 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2369 switch (type) {
2370 case 1:
2371 case 2:
2372 case 3:
2373 case 4:
2374 case 5:
2375 case 9:
2376 case 11:
2377 case 12:
2378 break;
2379 default:
2380 goto fail;
2382 if (dpl < cpl || dpl < rpl) {
2383 fail:
2384 CC_SRC = eflags & ~CC_Z;
2385 return 0;
2388 CC_SRC = eflags | CC_Z;
2389 return e2 & 0x00f0ff00;
2392 void helper_verr(CPUX86State *env, target_ulong selector1)
2394 uint32_t e1, e2, eflags, selector;
2395 int rpl, dpl, cpl;
2397 selector = selector1 & 0xffff;
2398 eflags = cpu_cc_compute_all(env, CC_OP);
2399 if ((selector & 0xfffc) == 0) {
2400 goto fail;
2402 if (load_segment(env, &e1, &e2, selector) != 0) {
2403 goto fail;
2405 if (!(e2 & DESC_S_MASK)) {
2406 goto fail;
2408 rpl = selector & 3;
2409 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2410 cpl = env->hflags & HF_CPL_MASK;
2411 if (e2 & DESC_CS_MASK) {
2412 if (!(e2 & DESC_R_MASK)) {
2413 goto fail;
2415 if (!(e2 & DESC_C_MASK)) {
2416 if (dpl < cpl || dpl < rpl) {
2417 goto fail;
2420 } else {
2421 if (dpl < cpl || dpl < rpl) {
2422 fail:
2423 CC_SRC = eflags & ~CC_Z;
2424 return;
2427 CC_SRC = eflags | CC_Z;
2430 void helper_verw(CPUX86State *env, target_ulong selector1)
2432 uint32_t e1, e2, eflags, selector;
2433 int rpl, dpl, cpl;
2435 selector = selector1 & 0xffff;
2436 eflags = cpu_cc_compute_all(env, CC_OP);
2437 if ((selector & 0xfffc) == 0) {
2438 goto fail;
2440 if (load_segment(env, &e1, &e2, selector) != 0) {
2441 goto fail;
2443 if (!(e2 & DESC_S_MASK)) {
2444 goto fail;
2446 rpl = selector & 3;
2447 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2448 cpl = env->hflags & HF_CPL_MASK;
2449 if (e2 & DESC_CS_MASK) {
2450 goto fail;
2451 } else {
2452 if (dpl < cpl || dpl < rpl) {
2453 goto fail;
2455 if (!(e2 & DESC_W_MASK)) {
2456 fail:
2457 CC_SRC = eflags & ~CC_Z;
2458 return;
2461 CC_SRC = eflags | CC_Z;
2464 #if defined(CONFIG_USER_ONLY)
2465 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2467 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2468 selector &= 0xffff;
2469 cpu_x86_load_seg_cache(env, seg_reg, selector,
2470 (selector << 4), 0xffff, 0);
2471 } else {
2472 helper_load_seg(env, seg_reg, selector);
2475 #endif