MAINTAINERS: mark megasas as maintained
[qemu/ar7.git] / target-i386 / seg_helper.c
blob8c3f92c22b55fd74bc3983b64472342a74871d0c
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "cpu.h"
22 #include "qemu/log.h"
23 #include "helper.h"
25 //#define DEBUG_PCALL
27 #if !defined(CONFIG_USER_ONLY)
28 #include "exec/softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 #ifdef DEBUG_PCALL
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
35 #else
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
38 #endif
40 /* return non zero if error */
41 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
42 uint32_t *e2_ptr, int selector)
44 SegmentCache *dt;
45 int index;
46 target_ulong ptr;
48 if (selector & 0x4) {
49 dt = &env->ldt;
50 } else {
51 dt = &env->gdt;
53 index = selector & ~7;
54 if ((index + 7) > dt->limit) {
55 return -1;
57 ptr = dt->base + index;
58 *e1_ptr = cpu_ldl_kernel(env, ptr);
59 *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
60 return 0;
63 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
65 unsigned int limit;
67 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
68 if (e2 & DESC_G_MASK) {
69 limit = (limit << 12) | 0xfff;
71 return limit;
74 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
76 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
79 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
80 uint32_t e2)
82 sc->base = get_seg_base(e1, e2);
83 sc->limit = get_seg_limit(e1, e2);
84 sc->flags = e2;
87 /* init the segment cache in vm86 mode. */
88 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
90 selector &= 0xffff;
91 cpu_x86_load_seg_cache(env, seg, selector,
92 (selector << 4), 0xffff, 0);
95 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
96 uint32_t *esp_ptr, int dpl)
98 X86CPU *cpu = x86_env_get_cpu(env);
99 int type, index, shift;
101 #if 0
103 int i;
104 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
105 for (i = 0; i < env->tr.limit; i++) {
106 printf("%02x ", env->tr.base[i]);
107 if ((i & 7) == 7) {
108 printf("\n");
111 printf("\n");
113 #endif
115 if (!(env->tr.flags & DESC_P_MASK)) {
116 cpu_abort(CPU(cpu), "invalid tss");
118 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
119 if ((type & 7) != 1) {
120 cpu_abort(CPU(cpu), "invalid tss type");
122 shift = type >> 3;
123 index = (dpl * 4 + 2) << shift;
124 if (index + (4 << shift) - 1 > env->tr.limit) {
125 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
127 if (shift == 0) {
128 *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
129 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
130 } else {
131 *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
132 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
136 /* XXX: merge with load_seg() */
137 static void tss_load_seg(CPUX86State *env, int seg_reg, int selector)
139 uint32_t e1, e2;
140 int rpl, dpl, cpl;
142 if ((selector & 0xfffc) != 0) {
143 if (load_segment(env, &e1, &e2, selector) != 0) {
144 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
146 if (!(e2 & DESC_S_MASK)) {
147 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
149 rpl = selector & 3;
150 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
151 cpl = env->hflags & HF_CPL_MASK;
152 if (seg_reg == R_CS) {
153 if (!(e2 & DESC_CS_MASK)) {
154 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
156 /* XXX: is it correct? */
157 if (dpl != rpl) {
158 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
160 if ((e2 & DESC_C_MASK) && dpl > rpl) {
161 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
163 } else if (seg_reg == R_SS) {
164 /* SS must be writable data */
165 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
166 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
168 if (dpl != cpl || dpl != rpl) {
169 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
171 } else {
172 /* not readable code */
173 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
174 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
176 /* if data or non conforming code, checks the rights */
177 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
178 if (dpl < cpl || dpl < rpl) {
179 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
183 if (!(e2 & DESC_P_MASK)) {
184 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
186 cpu_x86_load_seg_cache(env, seg_reg, selector,
187 get_seg_base(e1, e2),
188 get_seg_limit(e1, e2),
189 e2);
190 } else {
191 if (seg_reg == R_SS || seg_reg == R_CS) {
192 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
197 #define SWITCH_TSS_JMP 0
198 #define SWITCH_TSS_IRET 1
199 #define SWITCH_TSS_CALL 2
201 /* XXX: restore CPU state in registers (PowerPC case) */
202 static void switch_tss(CPUX86State *env, int tss_selector,
203 uint32_t e1, uint32_t e2, int source,
204 uint32_t next_eip)
206 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
207 target_ulong tss_base;
208 uint32_t new_regs[8], new_segs[6];
209 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
210 uint32_t old_eflags, eflags_mask;
211 SegmentCache *dt;
212 int index;
213 target_ulong ptr;
215 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
216 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
217 source);
219 /* if task gate, we read the TSS segment and we load it */
220 if (type == 5) {
221 if (!(e2 & DESC_P_MASK)) {
222 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
224 tss_selector = e1 >> 16;
225 if (tss_selector & 4) {
226 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
228 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
229 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
231 if (e2 & DESC_S_MASK) {
232 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
234 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
235 if ((type & 7) != 1) {
236 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
240 if (!(e2 & DESC_P_MASK)) {
241 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
244 if (type & 8) {
245 tss_limit_max = 103;
246 } else {
247 tss_limit_max = 43;
249 tss_limit = get_seg_limit(e1, e2);
250 tss_base = get_seg_base(e1, e2);
251 if ((tss_selector & 4) != 0 ||
252 tss_limit < tss_limit_max) {
253 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
255 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
256 if (old_type & 8) {
257 old_tss_limit_max = 103;
258 } else {
259 old_tss_limit_max = 43;
262 /* read all the registers from the new TSS */
263 if (type & 8) {
264 /* 32 bit */
265 new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
266 new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
267 new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
268 for (i = 0; i < 8; i++) {
269 new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
271 for (i = 0; i < 6; i++) {
272 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
274 new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
275 new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
276 } else {
277 /* 16 bit */
278 new_cr3 = 0;
279 new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
280 new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
281 for (i = 0; i < 8; i++) {
282 new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
283 0xffff0000;
285 for (i = 0; i < 4; i++) {
286 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
288 new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
289 new_segs[R_FS] = 0;
290 new_segs[R_GS] = 0;
291 new_trap = 0;
293 /* XXX: avoid a compiler warning, see
294 http://support.amd.com/us/Processor_TechDocs/24593.pdf
295 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
296 (void)new_trap;
298 /* NOTE: we must avoid memory exceptions during the task switch,
299 so we make dummy accesses before */
300 /* XXX: it can still fail in some cases, so a bigger hack is
301 necessary to valid the TLB after having done the accesses */
303 v1 = cpu_ldub_kernel(env, env->tr.base);
304 v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
305 cpu_stb_kernel(env, env->tr.base, v1);
306 cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
308 /* clear busy bit (it is restartable) */
309 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
310 target_ulong ptr;
311 uint32_t e2;
313 ptr = env->gdt.base + (env->tr.selector & ~7);
314 e2 = cpu_ldl_kernel(env, ptr + 4);
315 e2 &= ~DESC_TSS_BUSY_MASK;
316 cpu_stl_kernel(env, ptr + 4, e2);
318 old_eflags = cpu_compute_eflags(env);
319 if (source == SWITCH_TSS_IRET) {
320 old_eflags &= ~NT_MASK;
323 /* save the current state in the old TSS */
324 if (type & 8) {
325 /* 32 bit */
326 cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
327 cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
328 cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
329 cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
330 cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
331 cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
332 cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
333 cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
334 cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
335 cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
336 for (i = 0; i < 6; i++) {
337 cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
338 env->segs[i].selector);
340 } else {
341 /* 16 bit */
342 cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
343 cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
344 cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
345 cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
346 cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
347 cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
348 cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
349 cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
350 cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
351 cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
352 for (i = 0; i < 4; i++) {
353 cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
354 env->segs[i].selector);
358 /* now if an exception occurs, it will occurs in the next task
359 context */
361 if (source == SWITCH_TSS_CALL) {
362 cpu_stw_kernel(env, tss_base, env->tr.selector);
363 new_eflags |= NT_MASK;
366 /* set busy bit */
367 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
368 target_ulong ptr;
369 uint32_t e2;
371 ptr = env->gdt.base + (tss_selector & ~7);
372 e2 = cpu_ldl_kernel(env, ptr + 4);
373 e2 |= DESC_TSS_BUSY_MASK;
374 cpu_stl_kernel(env, ptr + 4, e2);
377 /* set the new CPU state */
378 /* from this point, any exception which occurs can give problems */
379 env->cr[0] |= CR0_TS_MASK;
380 env->hflags |= HF_TS_MASK;
381 env->tr.selector = tss_selector;
382 env->tr.base = tss_base;
383 env->tr.limit = tss_limit;
384 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
386 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
387 cpu_x86_update_cr3(env, new_cr3);
390 /* load all registers without an exception, then reload them with
391 possible exception */
392 env->eip = new_eip;
393 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
394 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
395 if (!(type & 8)) {
396 eflags_mask &= 0xffff;
398 cpu_load_eflags(env, new_eflags, eflags_mask);
399 /* XXX: what to do in 16 bit case? */
400 env->regs[R_EAX] = new_regs[0];
401 env->regs[R_ECX] = new_regs[1];
402 env->regs[R_EDX] = new_regs[2];
403 env->regs[R_EBX] = new_regs[3];
404 env->regs[R_ESP] = new_regs[4];
405 env->regs[R_EBP] = new_regs[5];
406 env->regs[R_ESI] = new_regs[6];
407 env->regs[R_EDI] = new_regs[7];
408 if (new_eflags & VM_MASK) {
409 for (i = 0; i < 6; i++) {
410 load_seg_vm(env, i, new_segs[i]);
412 /* in vm86, CPL is always 3 */
413 cpu_x86_set_cpl(env, 3);
414 } else {
415 /* CPL is set the RPL of CS */
416 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
417 /* first just selectors as the rest may trigger exceptions */
418 for (i = 0; i < 6; i++) {
419 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
423 env->ldt.selector = new_ldt & ~4;
424 env->ldt.base = 0;
425 env->ldt.limit = 0;
426 env->ldt.flags = 0;
428 /* load the LDT */
429 if (new_ldt & 4) {
430 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
433 if ((new_ldt & 0xfffc) != 0) {
434 dt = &env->gdt;
435 index = new_ldt & ~7;
436 if ((index + 7) > dt->limit) {
437 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
439 ptr = dt->base + index;
440 e1 = cpu_ldl_kernel(env, ptr);
441 e2 = cpu_ldl_kernel(env, ptr + 4);
442 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
443 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
445 if (!(e2 & DESC_P_MASK)) {
446 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
448 load_seg_cache_raw_dt(&env->ldt, e1, e2);
451 /* load the segments */
452 if (!(new_eflags & VM_MASK)) {
453 tss_load_seg(env, R_CS, new_segs[R_CS]);
454 tss_load_seg(env, R_SS, new_segs[R_SS]);
455 tss_load_seg(env, R_ES, new_segs[R_ES]);
456 tss_load_seg(env, R_DS, new_segs[R_DS]);
457 tss_load_seg(env, R_FS, new_segs[R_FS]);
458 tss_load_seg(env, R_GS, new_segs[R_GS]);
461 /* check that env->eip is in the CS segment limits */
462 if (new_eip > env->segs[R_CS].limit) {
463 /* XXX: different exception if CALL? */
464 raise_exception_err(env, EXCP0D_GPF, 0);
467 #ifndef CONFIG_USER_ONLY
468 /* reset local breakpoints */
469 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
470 for (i = 0; i < DR7_MAX_BP; i++) {
471 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
472 !hw_global_breakpoint_enabled(env->dr[7], i)) {
473 hw_breakpoint_remove(env, i);
476 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
478 #endif
481 static inline unsigned int get_sp_mask(unsigned int e2)
483 if (e2 & DESC_B_MASK) {
484 return 0xffffffff;
485 } else {
486 return 0xffff;
490 static int exception_has_error_code(int intno)
492 switch (intno) {
493 case 8:
494 case 10:
495 case 11:
496 case 12:
497 case 13:
498 case 14:
499 case 17:
500 return 1;
502 return 0;
505 #ifdef TARGET_X86_64
506 #define SET_ESP(val, sp_mask) \
507 do { \
508 if ((sp_mask) == 0xffff) { \
509 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
510 ((val) & 0xffff); \
511 } else if ((sp_mask) == 0xffffffffLL) { \
512 env->regs[R_ESP] = (uint32_t)(val); \
513 } else { \
514 env->regs[R_ESP] = (val); \
516 } while (0)
517 #else
518 #define SET_ESP(val, sp_mask) \
519 do { \
520 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
521 ((val) & (sp_mask)); \
522 } while (0)
523 #endif
525 /* in 64-bit machines, this can overflow. So this segment addition macro
526 * can be used to trim the value to 32-bit whenever needed */
527 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
529 /* XXX: add a is_user flag to have proper security support */
530 #define PUSHW(ssp, sp, sp_mask, val) \
532 sp -= 2; \
533 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
536 #define PUSHL(ssp, sp, sp_mask, val) \
538 sp -= 4; \
539 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
542 #define POPW(ssp, sp, sp_mask, val) \
544 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
545 sp += 2; \
548 #define POPL(ssp, sp, sp_mask, val) \
550 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
551 sp += 4; \
554 /* protected mode interrupt */
555 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
556 int error_code, unsigned int next_eip,
557 int is_hw)
559 SegmentCache *dt;
560 target_ulong ptr, ssp;
561 int type, dpl, selector, ss_dpl, cpl;
562 int has_error_code, new_stack, shift;
563 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
564 uint32_t old_eip, sp_mask;
566 has_error_code = 0;
567 if (!is_int && !is_hw) {
568 has_error_code = exception_has_error_code(intno);
570 if (is_int) {
571 old_eip = next_eip;
572 } else {
573 old_eip = env->eip;
576 dt = &env->idt;
577 if (intno * 8 + 7 > dt->limit) {
578 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
580 ptr = dt->base + intno * 8;
581 e1 = cpu_ldl_kernel(env, ptr);
582 e2 = cpu_ldl_kernel(env, ptr + 4);
583 /* check gate type */
584 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
585 switch (type) {
586 case 5: /* task gate */
587 /* must do that check here to return the correct error code */
588 if (!(e2 & DESC_P_MASK)) {
589 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
591 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
592 if (has_error_code) {
593 int type;
594 uint32_t mask;
596 /* push the error code */
597 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
598 shift = type >> 3;
599 if (env->segs[R_SS].flags & DESC_B_MASK) {
600 mask = 0xffffffff;
601 } else {
602 mask = 0xffff;
604 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
605 ssp = env->segs[R_SS].base + esp;
606 if (shift) {
607 cpu_stl_kernel(env, ssp, error_code);
608 } else {
609 cpu_stw_kernel(env, ssp, error_code);
611 SET_ESP(esp, mask);
613 return;
614 case 6: /* 286 interrupt gate */
615 case 7: /* 286 trap gate */
616 case 14: /* 386 interrupt gate */
617 case 15: /* 386 trap gate */
618 break;
619 default:
620 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
621 break;
623 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
624 cpl = env->hflags & HF_CPL_MASK;
625 /* check privilege if software int */
626 if (is_int && dpl < cpl) {
627 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
629 /* check valid bit */
630 if (!(e2 & DESC_P_MASK)) {
631 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
633 selector = e1 >> 16;
634 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
635 if ((selector & 0xfffc) == 0) {
636 raise_exception_err(env, EXCP0D_GPF, 0);
638 if (load_segment(env, &e1, &e2, selector) != 0) {
639 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
641 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
642 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
644 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
645 if (dpl > cpl) {
646 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
648 if (!(e2 & DESC_P_MASK)) {
649 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
651 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
652 /* to inner privilege */
653 get_ss_esp_from_tss(env, &ss, &esp, dpl);
654 if ((ss & 0xfffc) == 0) {
655 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
657 if ((ss & 3) != dpl) {
658 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
660 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
661 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
663 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
664 if (ss_dpl != dpl) {
665 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
667 if (!(ss_e2 & DESC_S_MASK) ||
668 (ss_e2 & DESC_CS_MASK) ||
669 !(ss_e2 & DESC_W_MASK)) {
670 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
672 if (!(ss_e2 & DESC_P_MASK)) {
673 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
675 new_stack = 1;
676 sp_mask = get_sp_mask(ss_e2);
677 ssp = get_seg_base(ss_e1, ss_e2);
678 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
679 /* to same privilege */
680 if (env->eflags & VM_MASK) {
681 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
683 new_stack = 0;
684 sp_mask = get_sp_mask(env->segs[R_SS].flags);
685 ssp = env->segs[R_SS].base;
686 esp = env->regs[R_ESP];
687 dpl = cpl;
688 } else {
689 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
690 new_stack = 0; /* avoid warning */
691 sp_mask = 0; /* avoid warning */
692 ssp = 0; /* avoid warning */
693 esp = 0; /* avoid warning */
696 shift = type >> 3;
698 #if 0
699 /* XXX: check that enough room is available */
700 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
701 if (env->eflags & VM_MASK) {
702 push_size += 8;
704 push_size <<= shift;
705 #endif
706 if (shift == 1) {
707 if (new_stack) {
708 if (env->eflags & VM_MASK) {
709 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
710 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
711 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
712 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
714 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
715 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
717 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
718 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
719 PUSHL(ssp, esp, sp_mask, old_eip);
720 if (has_error_code) {
721 PUSHL(ssp, esp, sp_mask, error_code);
723 } else {
724 if (new_stack) {
725 if (env->eflags & VM_MASK) {
726 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
727 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
728 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
729 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
731 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
732 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
734 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
735 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
736 PUSHW(ssp, esp, sp_mask, old_eip);
737 if (has_error_code) {
738 PUSHW(ssp, esp, sp_mask, error_code);
742 if (new_stack) {
743 if (env->eflags & VM_MASK) {
744 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
745 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
746 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
747 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
749 ss = (ss & ~3) | dpl;
750 cpu_x86_load_seg_cache(env, R_SS, ss,
751 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
753 SET_ESP(esp, sp_mask);
755 selector = (selector & ~3) | dpl;
756 cpu_x86_load_seg_cache(env, R_CS, selector,
757 get_seg_base(e1, e2),
758 get_seg_limit(e1, e2),
759 e2);
760 cpu_x86_set_cpl(env, dpl);
761 env->eip = offset;
763 /* interrupt gate clear IF mask */
764 if ((type & 1) == 0) {
765 env->eflags &= ~IF_MASK;
767 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
770 #ifdef TARGET_X86_64
772 #define PUSHQ(sp, val) \
774 sp -= 8; \
775 cpu_stq_kernel(env, sp, (val)); \
778 #define POPQ(sp, val) \
780 val = cpu_ldq_kernel(env, sp); \
781 sp += 8; \
784 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
786 X86CPU *cpu = x86_env_get_cpu(env);
787 int index;
789 #if 0
790 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
791 env->tr.base, env->tr.limit);
792 #endif
794 if (!(env->tr.flags & DESC_P_MASK)) {
795 cpu_abort(CPU(cpu), "invalid tss");
797 index = 8 * level + 4;
798 if ((index + 7) > env->tr.limit) {
799 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
801 return cpu_ldq_kernel(env, env->tr.base + index);
804 /* 64 bit interrupt */
805 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
806 int error_code, target_ulong next_eip, int is_hw)
808 SegmentCache *dt;
809 target_ulong ptr;
810 int type, dpl, selector, cpl, ist;
811 int has_error_code, new_stack;
812 uint32_t e1, e2, e3, ss;
813 target_ulong old_eip, esp, offset;
815 has_error_code = 0;
816 if (!is_int && !is_hw) {
817 has_error_code = exception_has_error_code(intno);
819 if (is_int) {
820 old_eip = next_eip;
821 } else {
822 old_eip = env->eip;
825 dt = &env->idt;
826 if (intno * 16 + 15 > dt->limit) {
827 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
829 ptr = dt->base + intno * 16;
830 e1 = cpu_ldl_kernel(env, ptr);
831 e2 = cpu_ldl_kernel(env, ptr + 4);
832 e3 = cpu_ldl_kernel(env, ptr + 8);
833 /* check gate type */
834 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
835 switch (type) {
836 case 14: /* 386 interrupt gate */
837 case 15: /* 386 trap gate */
838 break;
839 default:
840 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
841 break;
843 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
844 cpl = env->hflags & HF_CPL_MASK;
845 /* check privilege if software int */
846 if (is_int && dpl < cpl) {
847 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
849 /* check valid bit */
850 if (!(e2 & DESC_P_MASK)) {
851 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
853 selector = e1 >> 16;
854 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
855 ist = e2 & 7;
856 if ((selector & 0xfffc) == 0) {
857 raise_exception_err(env, EXCP0D_GPF, 0);
860 if (load_segment(env, &e1, &e2, selector) != 0) {
861 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
863 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
864 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
866 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
867 if (dpl > cpl) {
868 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
870 if (!(e2 & DESC_P_MASK)) {
871 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
873 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
874 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
876 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
877 /* to inner privilege */
878 if (ist != 0) {
879 esp = get_rsp_from_tss(env, ist + 3);
880 } else {
881 esp = get_rsp_from_tss(env, dpl);
883 esp &= ~0xfLL; /* align stack */
884 ss = 0;
885 new_stack = 1;
886 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
887 /* to same privilege */
888 if (env->eflags & VM_MASK) {
889 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
891 new_stack = 0;
892 if (ist != 0) {
893 esp = get_rsp_from_tss(env, ist + 3);
894 } else {
895 esp = env->regs[R_ESP];
897 esp &= ~0xfLL; /* align stack */
898 dpl = cpl;
899 } else {
900 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
901 new_stack = 0; /* avoid warning */
902 esp = 0; /* avoid warning */
905 PUSHQ(esp, env->segs[R_SS].selector);
906 PUSHQ(esp, env->regs[R_ESP]);
907 PUSHQ(esp, cpu_compute_eflags(env));
908 PUSHQ(esp, env->segs[R_CS].selector);
909 PUSHQ(esp, old_eip);
910 if (has_error_code) {
911 PUSHQ(esp, error_code);
914 if (new_stack) {
915 ss = 0 | dpl;
916 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
918 env->regs[R_ESP] = esp;
920 selector = (selector & ~3) | dpl;
921 cpu_x86_load_seg_cache(env, R_CS, selector,
922 get_seg_base(e1, e2),
923 get_seg_limit(e1, e2),
924 e2);
925 cpu_x86_set_cpl(env, dpl);
926 env->eip = offset;
928 /* interrupt gate clear IF mask */
929 if ((type & 1) == 0) {
930 env->eflags &= ~IF_MASK;
932 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
934 #endif
936 #ifdef TARGET_X86_64
937 #if defined(CONFIG_USER_ONLY)
938 void helper_syscall(CPUX86State *env, int next_eip_addend)
940 CPUState *cs = CPU(x86_env_get_cpu(env));
942 cs->exception_index = EXCP_SYSCALL;
943 env->exception_next_eip = env->eip + next_eip_addend;
944 cpu_loop_exit(cs);
946 #else
947 void helper_syscall(CPUX86State *env, int next_eip_addend)
949 int selector;
951 if (!(env->efer & MSR_EFER_SCE)) {
952 raise_exception_err(env, EXCP06_ILLOP, 0);
954 selector = (env->star >> 32) & 0xffff;
955 if (env->hflags & HF_LMA_MASK) {
956 int code64;
958 env->regs[R_ECX] = env->eip + next_eip_addend;
959 env->regs[11] = cpu_compute_eflags(env);
961 code64 = env->hflags & HF_CS64_MASK;
963 cpu_x86_set_cpl(env, 0);
964 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
965 0, 0xffffffff,
966 DESC_G_MASK | DESC_P_MASK |
967 DESC_S_MASK |
968 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
969 DESC_L_MASK);
970 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
971 0, 0xffffffff,
972 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
973 DESC_S_MASK |
974 DESC_W_MASK | DESC_A_MASK);
975 env->eflags &= ~env->fmask;
976 cpu_load_eflags(env, env->eflags, 0);
977 if (code64) {
978 env->eip = env->lstar;
979 } else {
980 env->eip = env->cstar;
982 } else {
983 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
985 cpu_x86_set_cpl(env, 0);
986 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
987 0, 0xffffffff,
988 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
989 DESC_S_MASK |
990 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
991 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
992 0, 0xffffffff,
993 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
994 DESC_S_MASK |
995 DESC_W_MASK | DESC_A_MASK);
996 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
997 env->eip = (uint32_t)env->star;
1000 #endif
1001 #endif
1003 #ifdef TARGET_X86_64
1004 void helper_sysret(CPUX86State *env, int dflag)
1006 int cpl, selector;
1008 if (!(env->efer & MSR_EFER_SCE)) {
1009 raise_exception_err(env, EXCP06_ILLOP, 0);
1011 cpl = env->hflags & HF_CPL_MASK;
1012 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1013 raise_exception_err(env, EXCP0D_GPF, 0);
1015 selector = (env->star >> 48) & 0xffff;
1016 if (env->hflags & HF_LMA_MASK) {
1017 if (dflag == 2) {
1018 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1019 0, 0xffffffff,
1020 DESC_G_MASK | DESC_P_MASK |
1021 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1022 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1023 DESC_L_MASK);
1024 env->eip = env->regs[R_ECX];
1025 } else {
1026 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1030 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1031 env->eip = (uint32_t)env->regs[R_ECX];
1033 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1034 0, 0xffffffff,
1035 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1036 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1037 DESC_W_MASK | DESC_A_MASK);
1038 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1039 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1040 NT_MASK);
1041 cpu_x86_set_cpl(env, 3);
1042 } else {
1043 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1044 0, 0xffffffff,
1045 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1047 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1048 env->eip = (uint32_t)env->regs[R_ECX];
1049 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1050 0, 0xffffffff,
1051 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1053 DESC_W_MASK | DESC_A_MASK);
1054 env->eflags |= IF_MASK;
1055 cpu_x86_set_cpl(env, 3);
1058 #endif
1060 /* real mode interrupt */
1061 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1062 int error_code, unsigned int next_eip)
1064 SegmentCache *dt;
1065 target_ulong ptr, ssp;
1066 int selector;
1067 uint32_t offset, esp;
1068 uint32_t old_cs, old_eip;
1070 /* real mode (simpler!) */
1071 dt = &env->idt;
1072 if (intno * 4 + 3 > dt->limit) {
1073 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1075 ptr = dt->base + intno * 4;
1076 offset = cpu_lduw_kernel(env, ptr);
1077 selector = cpu_lduw_kernel(env, ptr + 2);
1078 esp = env->regs[R_ESP];
1079 ssp = env->segs[R_SS].base;
1080 if (is_int) {
1081 old_eip = next_eip;
1082 } else {
1083 old_eip = env->eip;
1085 old_cs = env->segs[R_CS].selector;
1086 /* XXX: use SS segment size? */
1087 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1088 PUSHW(ssp, esp, 0xffff, old_cs);
1089 PUSHW(ssp, esp, 0xffff, old_eip);
1091 /* update processor state */
1092 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
1093 env->eip = offset;
1094 env->segs[R_CS].selector = selector;
1095 env->segs[R_CS].base = (selector << 4);
1096 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1099 #if defined(CONFIG_USER_ONLY)
1100 /* fake user mode interrupt */
1101 static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1102 int error_code, target_ulong next_eip)
1104 SegmentCache *dt;
1105 target_ulong ptr;
1106 int dpl, cpl, shift;
1107 uint32_t e2;
1109 dt = &env->idt;
1110 if (env->hflags & HF_LMA_MASK) {
1111 shift = 4;
1112 } else {
1113 shift = 3;
1115 ptr = dt->base + (intno << shift);
1116 e2 = cpu_ldl_kernel(env, ptr + 4);
1118 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1119 cpl = env->hflags & HF_CPL_MASK;
1120 /* check privilege if software int */
1121 if (is_int && dpl < cpl) {
1122 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1125 /* Since we emulate only user space, we cannot do more than
1126 exiting the emulation with the suitable exception and error
1127 code */
1128 if (is_int) {
1129 env->eip = next_eip;
1133 #else
1135 static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1136 int error_code, int is_hw, int rm)
1138 CPUState *cs = CPU(x86_env_get_cpu(env));
1139 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1140 control.event_inj));
1142 if (!(event_inj & SVM_EVTINJ_VALID)) {
1143 int type;
1145 if (is_int) {
1146 type = SVM_EVTINJ_TYPE_SOFT;
1147 } else {
1148 type = SVM_EVTINJ_TYPE_EXEPT;
1150 event_inj = intno | type | SVM_EVTINJ_VALID;
1151 if (!rm && exception_has_error_code(intno)) {
1152 event_inj |= SVM_EVTINJ_VALID_ERR;
1153 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
1154 control.event_inj_err),
1155 error_code);
1157 stl_phys(cs->as,
1158 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1159 event_inj);
1162 #endif
1165 * Begin execution of an interruption. is_int is TRUE if coming from
1166 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1167 * instruction. It is only relevant if is_int is TRUE.
1169 static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1170 int error_code, target_ulong next_eip, int is_hw)
1172 CPUX86State *env = &cpu->env;
1174 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1175 if ((env->cr[0] & CR0_PE_MASK)) {
1176 static int count;
1178 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1179 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1180 count, intno, error_code, is_int,
1181 env->hflags & HF_CPL_MASK,
1182 env->segs[R_CS].selector, env->eip,
1183 (int)env->segs[R_CS].base + env->eip,
1184 env->segs[R_SS].selector, env->regs[R_ESP]);
1185 if (intno == 0x0e) {
1186 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1187 } else {
1188 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1190 qemu_log("\n");
1191 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1192 #if 0
1194 int i;
1195 target_ulong ptr;
1197 qemu_log(" code=");
1198 ptr = env->segs[R_CS].base + env->eip;
1199 for (i = 0; i < 16; i++) {
1200 qemu_log(" %02x", ldub(ptr + i));
1202 qemu_log("\n");
1204 #endif
1205 count++;
1208 if (env->cr[0] & CR0_PE_MASK) {
1209 #if !defined(CONFIG_USER_ONLY)
1210 if (env->hflags & HF_SVMI_MASK) {
1211 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1213 #endif
1214 #ifdef TARGET_X86_64
1215 if (env->hflags & HF_LMA_MASK) {
1216 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1217 } else
1218 #endif
1220 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1221 is_hw);
1223 } else {
1224 #if !defined(CONFIG_USER_ONLY)
1225 if (env->hflags & HF_SVMI_MASK) {
1226 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1228 #endif
1229 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1232 #if !defined(CONFIG_USER_ONLY)
1233 if (env->hflags & HF_SVMI_MASK) {
1234 CPUState *cs = CPU(cpu);
1235 uint32_t event_inj = ldl_phys(cs->as, env->vm_vmcb +
1236 offsetof(struct vmcb,
1237 control.event_inj));
1239 stl_phys(cs->as,
1240 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1241 event_inj & ~SVM_EVTINJ_VALID);
1243 #endif
1246 void x86_cpu_do_interrupt(CPUState *cs)
1248 X86CPU *cpu = X86_CPU(cs);
1249 CPUX86State *env = &cpu->env;
1251 #if defined(CONFIG_USER_ONLY)
1252 /* if user mode only, we simulate a fake exception
1253 which will be handled outside the cpu execution
1254 loop */
1255 do_interrupt_user(env, cs->exception_index,
1256 env->exception_is_int,
1257 env->error_code,
1258 env->exception_next_eip);
1259 /* successfully delivered */
1260 env->old_exception = -1;
1261 #else
1262 /* simulate a real cpu exception. On i386, it can
1263 trigger new exceptions, but we do not handle
1264 double or triple faults yet. */
1265 do_interrupt_all(cpu, cs->exception_index,
1266 env->exception_is_int,
1267 env->error_code,
1268 env->exception_next_eip, 0);
1269 /* successfully delivered */
1270 env->old_exception = -1;
1271 #endif
1274 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1276 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
1279 void helper_enter_level(CPUX86State *env, int level, int data32,
1280 target_ulong t1)
1282 target_ulong ssp;
1283 uint32_t esp_mask, esp, ebp;
1285 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1286 ssp = env->segs[R_SS].base;
1287 ebp = env->regs[R_EBP];
1288 esp = env->regs[R_ESP];
1289 if (data32) {
1290 /* 32 bit */
1291 esp -= 4;
1292 while (--level) {
1293 esp -= 4;
1294 ebp -= 4;
1295 cpu_stl_data(env, ssp + (esp & esp_mask),
1296 cpu_ldl_data(env, ssp + (ebp & esp_mask)));
1298 esp -= 4;
1299 cpu_stl_data(env, ssp + (esp & esp_mask), t1);
1300 } else {
1301 /* 16 bit */
1302 esp -= 2;
1303 while (--level) {
1304 esp -= 2;
1305 ebp -= 2;
1306 cpu_stw_data(env, ssp + (esp & esp_mask),
1307 cpu_lduw_data(env, ssp + (ebp & esp_mask)));
1309 esp -= 2;
1310 cpu_stw_data(env, ssp + (esp & esp_mask), t1);
1314 #ifdef TARGET_X86_64
1315 void helper_enter64_level(CPUX86State *env, int level, int data64,
1316 target_ulong t1)
1318 target_ulong esp, ebp;
1320 ebp = env->regs[R_EBP];
1321 esp = env->regs[R_ESP];
1323 if (data64) {
1324 /* 64 bit */
1325 esp -= 8;
1326 while (--level) {
1327 esp -= 8;
1328 ebp -= 8;
1329 cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
1331 esp -= 8;
1332 cpu_stq_data(env, esp, t1);
1333 } else {
1334 /* 16 bit */
1335 esp -= 2;
1336 while (--level) {
1337 esp -= 2;
1338 ebp -= 2;
1339 cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
1341 esp -= 2;
1342 cpu_stw_data(env, esp, t1);
1345 #endif
1347 void helper_lldt(CPUX86State *env, int selector)
1349 SegmentCache *dt;
1350 uint32_t e1, e2;
1351 int index, entry_limit;
1352 target_ulong ptr;
1354 selector &= 0xffff;
1355 if ((selector & 0xfffc) == 0) {
1356 /* XXX: NULL selector case: invalid LDT */
1357 env->ldt.base = 0;
1358 env->ldt.limit = 0;
1359 } else {
1360 if (selector & 0x4) {
1361 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1363 dt = &env->gdt;
1364 index = selector & ~7;
1365 #ifdef TARGET_X86_64
1366 if (env->hflags & HF_LMA_MASK) {
1367 entry_limit = 15;
1368 } else
1369 #endif
1371 entry_limit = 7;
1373 if ((index + entry_limit) > dt->limit) {
1374 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1376 ptr = dt->base + index;
1377 e1 = cpu_ldl_kernel(env, ptr);
1378 e2 = cpu_ldl_kernel(env, ptr + 4);
1379 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1380 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1382 if (!(e2 & DESC_P_MASK)) {
1383 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1385 #ifdef TARGET_X86_64
1386 if (env->hflags & HF_LMA_MASK) {
1387 uint32_t e3;
1389 e3 = cpu_ldl_kernel(env, ptr + 8);
1390 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1391 env->ldt.base |= (target_ulong)e3 << 32;
1392 } else
1393 #endif
1395 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1398 env->ldt.selector = selector;
1401 void helper_ltr(CPUX86State *env, int selector)
1403 SegmentCache *dt;
1404 uint32_t e1, e2;
1405 int index, type, entry_limit;
1406 target_ulong ptr;
1408 selector &= 0xffff;
1409 if ((selector & 0xfffc) == 0) {
1410 /* NULL selector case: invalid TR */
1411 env->tr.base = 0;
1412 env->tr.limit = 0;
1413 env->tr.flags = 0;
1414 } else {
1415 if (selector & 0x4) {
1416 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1418 dt = &env->gdt;
1419 index = selector & ~7;
1420 #ifdef TARGET_X86_64
1421 if (env->hflags & HF_LMA_MASK) {
1422 entry_limit = 15;
1423 } else
1424 #endif
1426 entry_limit = 7;
1428 if ((index + entry_limit) > dt->limit) {
1429 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1431 ptr = dt->base + index;
1432 e1 = cpu_ldl_kernel(env, ptr);
1433 e2 = cpu_ldl_kernel(env, ptr + 4);
1434 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1435 if ((e2 & DESC_S_MASK) ||
1436 (type != 1 && type != 9)) {
1437 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1439 if (!(e2 & DESC_P_MASK)) {
1440 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1442 #ifdef TARGET_X86_64
1443 if (env->hflags & HF_LMA_MASK) {
1444 uint32_t e3, e4;
1446 e3 = cpu_ldl_kernel(env, ptr + 8);
1447 e4 = cpu_ldl_kernel(env, ptr + 12);
1448 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1449 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1451 load_seg_cache_raw_dt(&env->tr, e1, e2);
1452 env->tr.base |= (target_ulong)e3 << 32;
1453 } else
1454 #endif
1456 load_seg_cache_raw_dt(&env->tr, e1, e2);
1458 e2 |= DESC_TSS_BUSY_MASK;
1459 cpu_stl_kernel(env, ptr + 4, e2);
1461 env->tr.selector = selector;
1464 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1465 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1467 uint32_t e1, e2;
1468 int cpl, dpl, rpl;
1469 SegmentCache *dt;
1470 int index;
1471 target_ulong ptr;
1473 selector &= 0xffff;
1474 cpl = env->hflags & HF_CPL_MASK;
1475 if ((selector & 0xfffc) == 0) {
1476 /* null selector case */
1477 if (seg_reg == R_SS
1478 #ifdef TARGET_X86_64
1479 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1480 #endif
1482 raise_exception_err(env, EXCP0D_GPF, 0);
1484 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1485 } else {
1487 if (selector & 0x4) {
1488 dt = &env->ldt;
1489 } else {
1490 dt = &env->gdt;
1492 index = selector & ~7;
1493 if ((index + 7) > dt->limit) {
1494 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1496 ptr = dt->base + index;
1497 e1 = cpu_ldl_kernel(env, ptr);
1498 e2 = cpu_ldl_kernel(env, ptr + 4);
1500 if (!(e2 & DESC_S_MASK)) {
1501 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1503 rpl = selector & 3;
1504 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1505 if (seg_reg == R_SS) {
1506 /* must be writable segment */
1507 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1508 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1510 if (rpl != cpl || dpl != cpl) {
1511 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1513 } else {
1514 /* must be readable segment */
1515 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1516 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1519 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1520 /* if not conforming code, test rights */
1521 if (dpl < cpl || dpl < rpl) {
1522 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1527 if (!(e2 & DESC_P_MASK)) {
1528 if (seg_reg == R_SS) {
1529 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
1530 } else {
1531 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1535 /* set the access bit if not already set */
1536 if (!(e2 & DESC_A_MASK)) {
1537 e2 |= DESC_A_MASK;
1538 cpu_stl_kernel(env, ptr + 4, e2);
1541 cpu_x86_load_seg_cache(env, seg_reg, selector,
1542 get_seg_base(e1, e2),
1543 get_seg_limit(e1, e2),
1544 e2);
1545 #if 0
1546 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1547 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1548 #endif
1552 /* protected mode jump */
1553 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1554 int next_eip_addend)
1556 int gate_cs, type;
1557 uint32_t e1, e2, cpl, dpl, rpl, limit;
1558 target_ulong next_eip;
1560 if ((new_cs & 0xfffc) == 0) {
1561 raise_exception_err(env, EXCP0D_GPF, 0);
1563 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1564 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1566 cpl = env->hflags & HF_CPL_MASK;
1567 if (e2 & DESC_S_MASK) {
1568 if (!(e2 & DESC_CS_MASK)) {
1569 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1571 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1572 if (e2 & DESC_C_MASK) {
1573 /* conforming code segment */
1574 if (dpl > cpl) {
1575 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1577 } else {
1578 /* non conforming code segment */
1579 rpl = new_cs & 3;
1580 if (rpl > cpl) {
1581 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1583 if (dpl != cpl) {
1584 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1587 if (!(e2 & DESC_P_MASK)) {
1588 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1590 limit = get_seg_limit(e1, e2);
1591 if (new_eip > limit &&
1592 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
1593 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1595 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1596 get_seg_base(e1, e2), limit, e2);
1597 env->eip = new_eip;
1598 } else {
1599 /* jump to call or task gate */
1600 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1601 rpl = new_cs & 3;
1602 cpl = env->hflags & HF_CPL_MASK;
1603 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1604 switch (type) {
1605 case 1: /* 286 TSS */
1606 case 9: /* 386 TSS */
1607 case 5: /* task gate */
1608 if (dpl < cpl || dpl < rpl) {
1609 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1611 next_eip = env->eip + next_eip_addend;
1612 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
1613 CC_OP = CC_OP_EFLAGS;
1614 break;
1615 case 4: /* 286 call gate */
1616 case 12: /* 386 call gate */
1617 if ((dpl < cpl) || (dpl < rpl)) {
1618 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1620 if (!(e2 & DESC_P_MASK)) {
1621 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1623 gate_cs = e1 >> 16;
1624 new_eip = (e1 & 0xffff);
1625 if (type == 12) {
1626 new_eip |= (e2 & 0xffff0000);
1628 if (load_segment(env, &e1, &e2, gate_cs) != 0) {
1629 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1631 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1632 /* must be code segment */
1633 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1634 (DESC_S_MASK | DESC_CS_MASK))) {
1635 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1637 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1638 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1639 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1641 if (!(e2 & DESC_P_MASK)) {
1642 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
1644 limit = get_seg_limit(e1, e2);
1645 if (new_eip > limit) {
1646 raise_exception_err(env, EXCP0D_GPF, 0);
1648 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1649 get_seg_base(e1, e2), limit, e2);
1650 env->eip = new_eip;
1651 break;
1652 default:
1653 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1654 break;
1659 /* real mode call */
1660 void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
1661 int shift, int next_eip)
1663 int new_eip;
1664 uint32_t esp, esp_mask;
1665 target_ulong ssp;
1667 new_eip = new_eip1;
1668 esp = env->regs[R_ESP];
1669 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1670 ssp = env->segs[R_SS].base;
1671 if (shift) {
1672 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1673 PUSHL(ssp, esp, esp_mask, next_eip);
1674 } else {
1675 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1676 PUSHW(ssp, esp, esp_mask, next_eip);
1679 SET_ESP(esp, esp_mask);
1680 env->eip = new_eip;
1681 env->segs[R_CS].selector = new_cs;
1682 env->segs[R_CS].base = (new_cs << 4);
1685 /* protected mode call */
1686 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1687 int shift, int next_eip_addend)
1689 int new_stack, i;
1690 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1691 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
1692 uint32_t val, limit, old_sp_mask;
1693 target_ulong ssp, old_ssp, next_eip;
1695 next_eip = env->eip + next_eip_addend;
1696 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
1697 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
1698 if ((new_cs & 0xfffc) == 0) {
1699 raise_exception_err(env, EXCP0D_GPF, 0);
1701 if (load_segment(env, &e1, &e2, new_cs) != 0) {
1702 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1704 cpl = env->hflags & HF_CPL_MASK;
1705 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1706 if (e2 & DESC_S_MASK) {
1707 if (!(e2 & DESC_CS_MASK)) {
1708 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1710 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1711 if (e2 & DESC_C_MASK) {
1712 /* conforming code segment */
1713 if (dpl > cpl) {
1714 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1716 } else {
1717 /* non conforming code segment */
1718 rpl = new_cs & 3;
1719 if (rpl > cpl) {
1720 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1722 if (dpl != cpl) {
1723 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1726 if (!(e2 & DESC_P_MASK)) {
1727 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1730 #ifdef TARGET_X86_64
1731 /* XXX: check 16/32 bit cases in long mode */
1732 if (shift == 2) {
1733 target_ulong rsp;
1735 /* 64 bit case */
1736 rsp = env->regs[R_ESP];
1737 PUSHQ(rsp, env->segs[R_CS].selector);
1738 PUSHQ(rsp, next_eip);
1739 /* from this point, not restartable */
1740 env->regs[R_ESP] = rsp;
1741 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1742 get_seg_base(e1, e2),
1743 get_seg_limit(e1, e2), e2);
1744 env->eip = new_eip;
1745 } else
1746 #endif
1748 sp = env->regs[R_ESP];
1749 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1750 ssp = env->segs[R_SS].base;
1751 if (shift) {
1752 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1753 PUSHL(ssp, sp, sp_mask, next_eip);
1754 } else {
1755 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1756 PUSHW(ssp, sp, sp_mask, next_eip);
1759 limit = get_seg_limit(e1, e2);
1760 if (new_eip > limit) {
1761 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1763 /* from this point, not restartable */
1764 SET_ESP(sp, sp_mask);
1765 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1766 get_seg_base(e1, e2), limit, e2);
1767 env->eip = new_eip;
1769 } else {
1770 /* check gate type */
1771 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1772 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1773 rpl = new_cs & 3;
1774 switch (type) {
1775 case 1: /* available 286 TSS */
1776 case 9: /* available 386 TSS */
1777 case 5: /* task gate */
1778 if (dpl < cpl || dpl < rpl) {
1779 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1781 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
1782 CC_OP = CC_OP_EFLAGS;
1783 return;
1784 case 4: /* 286 call gate */
1785 case 12: /* 386 call gate */
1786 break;
1787 default:
1788 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1789 break;
1791 shift = type >> 3;
1793 if (dpl < cpl || dpl < rpl) {
1794 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
1796 /* check valid bit */
1797 if (!(e2 & DESC_P_MASK)) {
1798 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
1800 selector = e1 >> 16;
1801 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1802 param_count = e2 & 0x1f;
1803 if ((selector & 0xfffc) == 0) {
1804 raise_exception_err(env, EXCP0D_GPF, 0);
1807 if (load_segment(env, &e1, &e2, selector) != 0) {
1808 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1810 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1811 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1813 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1814 if (dpl > cpl) {
1815 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1817 if (!(e2 & DESC_P_MASK)) {
1818 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1821 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1822 /* to inner privilege */
1823 get_ss_esp_from_tss(env, &ss, &sp, dpl);
1824 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1825 TARGET_FMT_lx "\n", ss, sp, param_count,
1826 env->regs[R_ESP]);
1827 if ((ss & 0xfffc) == 0) {
1828 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1830 if ((ss & 3) != dpl) {
1831 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1833 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
1834 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1836 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1837 if (ss_dpl != dpl) {
1838 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1840 if (!(ss_e2 & DESC_S_MASK) ||
1841 (ss_e2 & DESC_CS_MASK) ||
1842 !(ss_e2 & DESC_W_MASK)) {
1843 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1845 if (!(ss_e2 & DESC_P_MASK)) {
1846 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
1849 /* push_size = ((param_count * 2) + 8) << shift; */
1851 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1852 old_ssp = env->segs[R_SS].base;
1854 sp_mask = get_sp_mask(ss_e2);
1855 ssp = get_seg_base(ss_e1, ss_e2);
1856 if (shift) {
1857 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1858 PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
1859 for (i = param_count - 1; i >= 0; i--) {
1860 val = cpu_ldl_kernel(env, old_ssp +
1861 ((env->regs[R_ESP] + i * 4) &
1862 old_sp_mask));
1863 PUSHL(ssp, sp, sp_mask, val);
1865 } else {
1866 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1867 PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
1868 for (i = param_count - 1; i >= 0; i--) {
1869 val = cpu_lduw_kernel(env, old_ssp +
1870 ((env->regs[R_ESP] + i * 2) &
1871 old_sp_mask));
1872 PUSHW(ssp, sp, sp_mask, val);
1875 new_stack = 1;
1876 } else {
1877 /* to same privilege */
1878 sp = env->regs[R_ESP];
1879 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1880 ssp = env->segs[R_SS].base;
1881 /* push_size = (4 << shift); */
1882 new_stack = 0;
1885 if (shift) {
1886 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1887 PUSHL(ssp, sp, sp_mask, next_eip);
1888 } else {
1889 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1890 PUSHW(ssp, sp, sp_mask, next_eip);
1893 /* from this point, not restartable */
1895 if (new_stack) {
1896 ss = (ss & ~3) | dpl;
1897 cpu_x86_load_seg_cache(env, R_SS, ss,
1898 ssp,
1899 get_seg_limit(ss_e1, ss_e2),
1900 ss_e2);
1903 selector = (selector & ~3) | dpl;
1904 cpu_x86_load_seg_cache(env, R_CS, selector,
1905 get_seg_base(e1, e2),
1906 get_seg_limit(e1, e2),
1907 e2);
1908 cpu_x86_set_cpl(env, dpl);
1909 SET_ESP(sp, sp_mask);
1910 env->eip = offset;
1914 /* real and vm86 mode iret */
1915 void helper_iret_real(CPUX86State *env, int shift)
1917 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1918 target_ulong ssp;
1919 int eflags_mask;
1921 sp_mask = 0xffff; /* XXXX: use SS segment size? */
1922 sp = env->regs[R_ESP];
1923 ssp = env->segs[R_SS].base;
1924 if (shift == 1) {
1925 /* 32 bits */
1926 POPL(ssp, sp, sp_mask, new_eip);
1927 POPL(ssp, sp, sp_mask, new_cs);
1928 new_cs &= 0xffff;
1929 POPL(ssp, sp, sp_mask, new_eflags);
1930 } else {
1931 /* 16 bits */
1932 POPW(ssp, sp, sp_mask, new_eip);
1933 POPW(ssp, sp, sp_mask, new_cs);
1934 POPW(ssp, sp, sp_mask, new_eflags);
1936 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
1937 env->segs[R_CS].selector = new_cs;
1938 env->segs[R_CS].base = (new_cs << 4);
1939 env->eip = new_eip;
1940 if (env->eflags & VM_MASK) {
1941 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1942 NT_MASK;
1943 } else {
1944 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1945 RF_MASK | NT_MASK;
1947 if (shift == 0) {
1948 eflags_mask &= 0xffff;
1950 cpu_load_eflags(env, new_eflags, eflags_mask);
1951 env->hflags2 &= ~HF2_NMI_MASK;
1954 static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
1956 int dpl;
1957 uint32_t e2;
1959 /* XXX: on x86_64, we do not want to nullify FS and GS because
1960 they may still contain a valid base. I would be interested to
1961 know how a real x86_64 CPU behaves */
1962 if ((seg_reg == R_FS || seg_reg == R_GS) &&
1963 (env->segs[seg_reg].selector & 0xfffc) == 0) {
1964 return;
1967 e2 = env->segs[seg_reg].flags;
1968 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1969 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1970 /* data or non conforming code segment */
1971 if (dpl < cpl) {
1972 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
1977 /* protected mode iret */
1978 static inline void helper_ret_protected(CPUX86State *env, int shift,
1979 int is_iret, int addend)
1981 uint32_t new_cs, new_eflags, new_ss;
1982 uint32_t new_es, new_ds, new_fs, new_gs;
1983 uint32_t e1, e2, ss_e1, ss_e2;
1984 int cpl, dpl, rpl, eflags_mask, iopl;
1985 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
1987 #ifdef TARGET_X86_64
1988 if (shift == 2) {
1989 sp_mask = -1;
1990 } else
1991 #endif
1993 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1995 sp = env->regs[R_ESP];
1996 ssp = env->segs[R_SS].base;
1997 new_eflags = 0; /* avoid warning */
1998 #ifdef TARGET_X86_64
1999 if (shift == 2) {
2000 POPQ(sp, new_eip);
2001 POPQ(sp, new_cs);
2002 new_cs &= 0xffff;
2003 if (is_iret) {
2004 POPQ(sp, new_eflags);
2006 } else
2007 #endif
2009 if (shift == 1) {
2010 /* 32 bits */
2011 POPL(ssp, sp, sp_mask, new_eip);
2012 POPL(ssp, sp, sp_mask, new_cs);
2013 new_cs &= 0xffff;
2014 if (is_iret) {
2015 POPL(ssp, sp, sp_mask, new_eflags);
2016 if (new_eflags & VM_MASK) {
2017 goto return_to_vm86;
2020 } else {
2021 /* 16 bits */
2022 POPW(ssp, sp, sp_mask, new_eip);
2023 POPW(ssp, sp, sp_mask, new_cs);
2024 if (is_iret) {
2025 POPW(ssp, sp, sp_mask, new_eflags);
2029 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2030 new_cs, new_eip, shift, addend);
2031 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
2032 if ((new_cs & 0xfffc) == 0) {
2033 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2035 if (load_segment(env, &e1, &e2, new_cs) != 0) {
2036 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2038 if (!(e2 & DESC_S_MASK) ||
2039 !(e2 & DESC_CS_MASK)) {
2040 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2042 cpl = env->hflags & HF_CPL_MASK;
2043 rpl = new_cs & 3;
2044 if (rpl < cpl) {
2045 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2047 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2048 if (e2 & DESC_C_MASK) {
2049 if (dpl > rpl) {
2050 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2052 } else {
2053 if (dpl != rpl) {
2054 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2057 if (!(e2 & DESC_P_MASK)) {
2058 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2061 sp += addend;
2062 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2063 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2064 /* return to same privilege level */
2065 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2066 get_seg_base(e1, e2),
2067 get_seg_limit(e1, e2),
2068 e2);
2069 } else {
2070 /* return to different privilege level */
2071 #ifdef TARGET_X86_64
2072 if (shift == 2) {
2073 POPQ(sp, new_esp);
2074 POPQ(sp, new_ss);
2075 new_ss &= 0xffff;
2076 } else
2077 #endif
2079 if (shift == 1) {
2080 /* 32 bits */
2081 POPL(ssp, sp, sp_mask, new_esp);
2082 POPL(ssp, sp, sp_mask, new_ss);
2083 new_ss &= 0xffff;
2084 } else {
2085 /* 16 bits */
2086 POPW(ssp, sp, sp_mask, new_esp);
2087 POPW(ssp, sp, sp_mask, new_ss);
2090 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2091 new_ss, new_esp);
2092 if ((new_ss & 0xfffc) == 0) {
2093 #ifdef TARGET_X86_64
2094 /* NULL ss is allowed in long mode if cpl != 3 */
2095 /* XXX: test CS64? */
2096 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2097 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2098 0, 0xffffffff,
2099 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2100 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2101 DESC_W_MASK | DESC_A_MASK);
2102 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2103 } else
2104 #endif
2106 raise_exception_err(env, EXCP0D_GPF, 0);
2108 } else {
2109 if ((new_ss & 3) != rpl) {
2110 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2112 if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
2113 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2115 if (!(ss_e2 & DESC_S_MASK) ||
2116 (ss_e2 & DESC_CS_MASK) ||
2117 !(ss_e2 & DESC_W_MASK)) {
2118 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2120 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2121 if (dpl != rpl) {
2122 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2124 if (!(ss_e2 & DESC_P_MASK)) {
2125 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2127 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2128 get_seg_base(ss_e1, ss_e2),
2129 get_seg_limit(ss_e1, ss_e2),
2130 ss_e2);
2133 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2134 get_seg_base(e1, e2),
2135 get_seg_limit(e1, e2),
2136 e2);
2137 cpu_x86_set_cpl(env, rpl);
2138 sp = new_esp;
2139 #ifdef TARGET_X86_64
2140 if (env->hflags & HF_CS64_MASK) {
2141 sp_mask = -1;
2142 } else
2143 #endif
2145 sp_mask = get_sp_mask(ss_e2);
2148 /* validate data segments */
2149 validate_seg(env, R_ES, rpl);
2150 validate_seg(env, R_DS, rpl);
2151 validate_seg(env, R_FS, rpl);
2152 validate_seg(env, R_GS, rpl);
2154 sp += addend;
2156 SET_ESP(sp, sp_mask);
2157 env->eip = new_eip;
2158 if (is_iret) {
2159 /* NOTE: 'cpl' is the _old_ CPL */
2160 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2161 if (cpl == 0) {
2162 eflags_mask |= IOPL_MASK;
2164 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2165 if (cpl <= iopl) {
2166 eflags_mask |= IF_MASK;
2168 if (shift == 0) {
2169 eflags_mask &= 0xffff;
2171 cpu_load_eflags(env, new_eflags, eflags_mask);
2173 return;
2175 return_to_vm86:
2176 POPL(ssp, sp, sp_mask, new_esp);
2177 POPL(ssp, sp, sp_mask, new_ss);
2178 POPL(ssp, sp, sp_mask, new_es);
2179 POPL(ssp, sp, sp_mask, new_ds);
2180 POPL(ssp, sp, sp_mask, new_fs);
2181 POPL(ssp, sp, sp_mask, new_gs);
2183 /* modify processor state */
2184 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2185 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2186 VIP_MASK);
2187 load_seg_vm(env, R_CS, new_cs & 0xffff);
2188 cpu_x86_set_cpl(env, 3);
2189 load_seg_vm(env, R_SS, new_ss & 0xffff);
2190 load_seg_vm(env, R_ES, new_es & 0xffff);
2191 load_seg_vm(env, R_DS, new_ds & 0xffff);
2192 load_seg_vm(env, R_FS, new_fs & 0xffff);
2193 load_seg_vm(env, R_GS, new_gs & 0xffff);
2195 env->eip = new_eip & 0xffff;
2196 env->regs[R_ESP] = new_esp;
2199 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2201 int tss_selector, type;
2202 uint32_t e1, e2;
2204 /* specific case for TSS */
2205 if (env->eflags & NT_MASK) {
2206 #ifdef TARGET_X86_64
2207 if (env->hflags & HF_LMA_MASK) {
2208 raise_exception_err(env, EXCP0D_GPF, 0);
2210 #endif
2211 tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
2212 if (tss_selector & 4) {
2213 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2215 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
2216 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2218 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2219 /* NOTE: we check both segment and busy TSS */
2220 if (type != 3) {
2221 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2223 switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2224 } else {
2225 helper_ret_protected(env, shift, 1, 0);
2227 env->hflags2 &= ~HF2_NMI_MASK;
2230 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2232 helper_ret_protected(env, shift, 0, addend);
2235 void helper_sysenter(CPUX86State *env)
2237 if (env->sysenter_cs == 0) {
2238 raise_exception_err(env, EXCP0D_GPF, 0);
2240 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2241 cpu_x86_set_cpl(env, 0);
2243 #ifdef TARGET_X86_64
2244 if (env->hflags & HF_LMA_MASK) {
2245 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2246 0, 0xffffffff,
2247 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2248 DESC_S_MASK |
2249 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2250 DESC_L_MASK);
2251 } else
2252 #endif
2254 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2255 0, 0xffffffff,
2256 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2257 DESC_S_MASK |
2258 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2260 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2261 0, 0xffffffff,
2262 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2263 DESC_S_MASK |
2264 DESC_W_MASK | DESC_A_MASK);
2265 env->regs[R_ESP] = env->sysenter_esp;
2266 env->eip = env->sysenter_eip;
2269 void helper_sysexit(CPUX86State *env, int dflag)
2271 int cpl;
2273 cpl = env->hflags & HF_CPL_MASK;
2274 if (env->sysenter_cs == 0 || cpl != 0) {
2275 raise_exception_err(env, EXCP0D_GPF, 0);
2277 cpu_x86_set_cpl(env, 3);
2278 #ifdef TARGET_X86_64
2279 if (dflag == 2) {
2280 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2281 3, 0, 0xffffffff,
2282 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2283 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2284 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2285 DESC_L_MASK);
2286 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2287 3, 0, 0xffffffff,
2288 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2289 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2290 DESC_W_MASK | DESC_A_MASK);
2291 } else
2292 #endif
2294 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2295 3, 0, 0xffffffff,
2296 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2297 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2298 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2299 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2300 3, 0, 0xffffffff,
2301 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2302 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2303 DESC_W_MASK | DESC_A_MASK);
2305 env->regs[R_ESP] = env->regs[R_ECX];
2306 env->eip = env->regs[R_EDX];
2309 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2311 unsigned int limit;
2312 uint32_t e1, e2, eflags, selector;
2313 int rpl, dpl, cpl, type;
2315 selector = selector1 & 0xffff;
2316 eflags = cpu_cc_compute_all(env, CC_OP);
2317 if ((selector & 0xfffc) == 0) {
2318 goto fail;
2320 if (load_segment(env, &e1, &e2, selector) != 0) {
2321 goto fail;
2323 rpl = selector & 3;
2324 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2325 cpl = env->hflags & HF_CPL_MASK;
2326 if (e2 & DESC_S_MASK) {
2327 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2328 /* conforming */
2329 } else {
2330 if (dpl < cpl || dpl < rpl) {
2331 goto fail;
2334 } else {
2335 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2336 switch (type) {
2337 case 1:
2338 case 2:
2339 case 3:
2340 case 9:
2341 case 11:
2342 break;
2343 default:
2344 goto fail;
2346 if (dpl < cpl || dpl < rpl) {
2347 fail:
2348 CC_SRC = eflags & ~CC_Z;
2349 return 0;
2352 limit = get_seg_limit(e1, e2);
2353 CC_SRC = eflags | CC_Z;
2354 return limit;
2357 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2359 uint32_t e1, e2, eflags, selector;
2360 int rpl, dpl, cpl, type;
2362 selector = selector1 & 0xffff;
2363 eflags = cpu_cc_compute_all(env, CC_OP);
2364 if ((selector & 0xfffc) == 0) {
2365 goto fail;
2367 if (load_segment(env, &e1, &e2, selector) != 0) {
2368 goto fail;
2370 rpl = selector & 3;
2371 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2372 cpl = env->hflags & HF_CPL_MASK;
2373 if (e2 & DESC_S_MASK) {
2374 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2375 /* conforming */
2376 } else {
2377 if (dpl < cpl || dpl < rpl) {
2378 goto fail;
2381 } else {
2382 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2383 switch (type) {
2384 case 1:
2385 case 2:
2386 case 3:
2387 case 4:
2388 case 5:
2389 case 9:
2390 case 11:
2391 case 12:
2392 break;
2393 default:
2394 goto fail;
2396 if (dpl < cpl || dpl < rpl) {
2397 fail:
2398 CC_SRC = eflags & ~CC_Z;
2399 return 0;
2402 CC_SRC = eflags | CC_Z;
2403 return e2 & 0x00f0ff00;
2406 void helper_verr(CPUX86State *env, target_ulong selector1)
2408 uint32_t e1, e2, eflags, selector;
2409 int rpl, dpl, cpl;
2411 selector = selector1 & 0xffff;
2412 eflags = cpu_cc_compute_all(env, CC_OP);
2413 if ((selector & 0xfffc) == 0) {
2414 goto fail;
2416 if (load_segment(env, &e1, &e2, selector) != 0) {
2417 goto fail;
2419 if (!(e2 & DESC_S_MASK)) {
2420 goto fail;
2422 rpl = selector & 3;
2423 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2424 cpl = env->hflags & HF_CPL_MASK;
2425 if (e2 & DESC_CS_MASK) {
2426 if (!(e2 & DESC_R_MASK)) {
2427 goto fail;
2429 if (!(e2 & DESC_C_MASK)) {
2430 if (dpl < cpl || dpl < rpl) {
2431 goto fail;
2434 } else {
2435 if (dpl < cpl || dpl < rpl) {
2436 fail:
2437 CC_SRC = eflags & ~CC_Z;
2438 return;
2441 CC_SRC = eflags | CC_Z;
2444 void helper_verw(CPUX86State *env, target_ulong selector1)
2446 uint32_t e1, e2, eflags, selector;
2447 int rpl, dpl, cpl;
2449 selector = selector1 & 0xffff;
2450 eflags = cpu_cc_compute_all(env, CC_OP);
2451 if ((selector & 0xfffc) == 0) {
2452 goto fail;
2454 if (load_segment(env, &e1, &e2, selector) != 0) {
2455 goto fail;
2457 if (!(e2 & DESC_S_MASK)) {
2458 goto fail;
2460 rpl = selector & 3;
2461 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2462 cpl = env->hflags & HF_CPL_MASK;
2463 if (e2 & DESC_CS_MASK) {
2464 goto fail;
2465 } else {
2466 if (dpl < cpl || dpl < rpl) {
2467 goto fail;
2469 if (!(e2 & DESC_W_MASK)) {
2470 fail:
2471 CC_SRC = eflags & ~CC_Z;
2472 return;
2475 CC_SRC = eflags | CC_Z;
2478 #if defined(CONFIG_USER_ONLY)
2479 void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
2481 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
2482 selector &= 0xffff;
2483 cpu_x86_load_seg_cache(env, seg_reg, selector,
2484 (selector << 4), 0xffff, 0);
2485 } else {
2486 helper_load_seg(env, seg_reg, selector);
2489 #endif