x86: split off SVM helpers
[qemu.git] / target-i386 / op_helper.c
blob4c4974eae81bbd3881a40536ba32bc72d7a335cb
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "dyngen-exec.h"
22 #include "ioport.h"
23 #include "qemu-log.h"
24 #include "cpu-defs.h"
25 #include "helper.h"
27 #if !defined(CONFIG_USER_ONLY)
28 #include "softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
31 //#define DEBUG_PCALL
33 #ifdef DEBUG_PCALL
34 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
35 # define LOG_PCALL_STATE(env) \
36 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
37 #else
38 # define LOG_PCALL(...) do { } while (0)
39 # define LOG_PCALL_STATE(env) do { } while (0)
40 #endif
42 /* broken thread support */
44 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
46 void helper_lock(void)
48 spin_lock(&global_cpu_lock);
51 void helper_unlock(void)
53 spin_unlock(&global_cpu_lock);
56 /* return non zero if error */
57 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
58 int selector)
60 SegmentCache *dt;
61 int index;
62 target_ulong ptr;
64 if (selector & 0x4) {
65 dt = &env->ldt;
66 } else {
67 dt = &env->gdt;
69 index = selector & ~7;
70 if ((index + 7) > dt->limit) {
71 return -1;
73 ptr = dt->base + index;
74 *e1_ptr = ldl_kernel(ptr);
75 *e2_ptr = ldl_kernel(ptr + 4);
76 return 0;
79 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
81 unsigned int limit;
83 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
84 if (e2 & DESC_G_MASK) {
85 limit = (limit << 12) | 0xfff;
87 return limit;
90 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
92 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
95 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
96 uint32_t e2)
98 sc->base = get_seg_base(e1, e2);
99 sc->limit = get_seg_limit(e1, e2);
100 sc->flags = e2;
103 /* init the segment cache in vm86 mode. */
104 static inline void load_seg_vm(int seg, int selector)
106 selector &= 0xffff;
107 cpu_x86_load_seg_cache(env, seg, selector,
108 (selector << 4), 0xffff, 0);
111 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
112 uint32_t *esp_ptr, int dpl)
114 int type, index, shift;
116 #if 0
118 int i;
119 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
120 for (i = 0; i < env->tr.limit; i++) {
121 printf("%02x ", env->tr.base[i]);
122 if ((i & 7) == 7) {
123 printf("\n");
126 printf("\n");
128 #endif
130 if (!(env->tr.flags & DESC_P_MASK)) {
131 cpu_abort(env, "invalid tss");
133 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
134 if ((type & 7) != 1) {
135 cpu_abort(env, "invalid tss type");
137 shift = type >> 3;
138 index = (dpl * 4 + 2) << shift;
139 if (index + (4 << shift) - 1 > env->tr.limit) {
140 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
142 if (shift == 0) {
143 *esp_ptr = lduw_kernel(env->tr.base + index);
144 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
145 } else {
146 *esp_ptr = ldl_kernel(env->tr.base + index);
147 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
151 /* XXX: merge with load_seg() */
152 static void tss_load_seg(int seg_reg, int selector)
154 uint32_t e1, e2;
155 int rpl, dpl, cpl;
157 if ((selector & 0xfffc) != 0) {
158 if (load_segment(&e1, &e2, selector) != 0) {
159 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
161 if (!(e2 & DESC_S_MASK)) {
162 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
164 rpl = selector & 3;
165 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
166 cpl = env->hflags & HF_CPL_MASK;
167 if (seg_reg == R_CS) {
168 if (!(e2 & DESC_CS_MASK)) {
169 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
171 /* XXX: is it correct? */
172 if (dpl != rpl) {
173 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
175 if ((e2 & DESC_C_MASK) && dpl > rpl) {
176 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
178 } else if (seg_reg == R_SS) {
179 /* SS must be writable data */
180 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
181 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
183 if (dpl != cpl || dpl != rpl) {
184 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
186 } else {
187 /* not readable code */
188 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
189 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
191 /* if data or non conforming code, checks the rights */
192 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
193 if (dpl < cpl || dpl < rpl) {
194 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
198 if (!(e2 & DESC_P_MASK)) {
199 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
201 cpu_x86_load_seg_cache(env, seg_reg, selector,
202 get_seg_base(e1, e2),
203 get_seg_limit(e1, e2),
204 e2);
205 } else {
206 if (seg_reg == R_SS || seg_reg == R_CS) {
207 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
212 #define SWITCH_TSS_JMP 0
213 #define SWITCH_TSS_IRET 1
214 #define SWITCH_TSS_CALL 2
216 /* XXX: restore CPU state in registers (PowerPC case) */
217 static void switch_tss(int tss_selector,
218 uint32_t e1, uint32_t e2, int source,
219 uint32_t next_eip)
221 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
222 target_ulong tss_base;
223 uint32_t new_regs[8], new_segs[6];
224 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
225 uint32_t old_eflags, eflags_mask;
226 SegmentCache *dt;
227 int index;
228 target_ulong ptr;
230 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
231 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
232 source);
234 /* if task gate, we read the TSS segment and we load it */
235 if (type == 5) {
236 if (!(e2 & DESC_P_MASK)) {
237 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
239 tss_selector = e1 >> 16;
240 if (tss_selector & 4) {
241 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
243 if (load_segment(&e1, &e2, tss_selector) != 0) {
244 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
246 if (e2 & DESC_S_MASK) {
247 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
250 if ((type & 7) != 1) {
251 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
255 if (!(e2 & DESC_P_MASK)) {
256 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
259 if (type & 8) {
260 tss_limit_max = 103;
261 } else {
262 tss_limit_max = 43;
264 tss_limit = get_seg_limit(e1, e2);
265 tss_base = get_seg_base(e1, e2);
266 if ((tss_selector & 4) != 0 ||
267 tss_limit < tss_limit_max) {
268 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
270 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
271 if (old_type & 8) {
272 old_tss_limit_max = 103;
273 } else {
274 old_tss_limit_max = 43;
277 /* read all the registers from the new TSS */
278 if (type & 8) {
279 /* 32 bit */
280 new_cr3 = ldl_kernel(tss_base + 0x1c);
281 new_eip = ldl_kernel(tss_base + 0x20);
282 new_eflags = ldl_kernel(tss_base + 0x24);
283 for (i = 0; i < 8; i++) {
284 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
286 for (i = 0; i < 6; i++) {
287 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
289 new_ldt = lduw_kernel(tss_base + 0x60);
290 new_trap = ldl_kernel(tss_base + 0x64);
291 } else {
292 /* 16 bit */
293 new_cr3 = 0;
294 new_eip = lduw_kernel(tss_base + 0x0e);
295 new_eflags = lduw_kernel(tss_base + 0x10);
296 for (i = 0; i < 8; i++) {
297 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
299 for (i = 0; i < 4; i++) {
300 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
302 new_ldt = lduw_kernel(tss_base + 0x2a);
303 new_segs[R_FS] = 0;
304 new_segs[R_GS] = 0;
305 new_trap = 0;
307 /* XXX: avoid a compiler warning, see
308 http://support.amd.com/us/Processor_TechDocs/24593.pdf
309 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
310 (void)new_trap;
312 /* NOTE: we must avoid memory exceptions during the task switch,
313 so we make dummy accesses before */
314 /* XXX: it can still fail in some cases, so a bigger hack is
315 necessary to valid the TLB after having done the accesses */
317 v1 = ldub_kernel(env->tr.base);
318 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
319 stb_kernel(env->tr.base, v1);
320 stb_kernel(env->tr.base + old_tss_limit_max, v2);
322 /* clear busy bit (it is restartable) */
323 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
324 target_ulong ptr;
325 uint32_t e2;
327 ptr = env->gdt.base + (env->tr.selector & ~7);
328 e2 = ldl_kernel(ptr + 4);
329 e2 &= ~DESC_TSS_BUSY_MASK;
330 stl_kernel(ptr + 4, e2);
332 old_eflags = cpu_compute_eflags(env);
333 if (source == SWITCH_TSS_IRET) {
334 old_eflags &= ~NT_MASK;
337 /* save the current state in the old TSS */
338 if (type & 8) {
339 /* 32 bit */
340 stl_kernel(env->tr.base + 0x20, next_eip);
341 stl_kernel(env->tr.base + 0x24, old_eflags);
342 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
343 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
344 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
345 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
346 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
347 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
348 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
349 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
350 for (i = 0; i < 6; i++) {
351 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
353 } else {
354 /* 16 bit */
355 stw_kernel(env->tr.base + 0x0e, next_eip);
356 stw_kernel(env->tr.base + 0x10, old_eflags);
357 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
358 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
359 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
360 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
361 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
362 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
363 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
364 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
365 for (i = 0; i < 4; i++) {
366 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
370 /* now if an exception occurs, it will occurs in the next task
371 context */
373 if (source == SWITCH_TSS_CALL) {
374 stw_kernel(tss_base, env->tr.selector);
375 new_eflags |= NT_MASK;
378 /* set busy bit */
379 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
380 target_ulong ptr;
381 uint32_t e2;
383 ptr = env->gdt.base + (tss_selector & ~7);
384 e2 = ldl_kernel(ptr + 4);
385 e2 |= DESC_TSS_BUSY_MASK;
386 stl_kernel(ptr + 4, e2);
389 /* set the new CPU state */
390 /* from this point, any exception which occurs can give problems */
391 env->cr[0] |= CR0_TS_MASK;
392 env->hflags |= HF_TS_MASK;
393 env->tr.selector = tss_selector;
394 env->tr.base = tss_base;
395 env->tr.limit = tss_limit;
396 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
398 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
399 cpu_x86_update_cr3(env, new_cr3);
402 /* load all registers without an exception, then reload them with
403 possible exception */
404 env->eip = new_eip;
405 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
406 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
407 if (!(type & 8)) {
408 eflags_mask &= 0xffff;
410 cpu_load_eflags(env, new_eflags, eflags_mask);
411 /* XXX: what to do in 16 bit case? */
412 EAX = new_regs[0];
413 ECX = new_regs[1];
414 EDX = new_regs[2];
415 EBX = new_regs[3];
416 ESP = new_regs[4];
417 EBP = new_regs[5];
418 ESI = new_regs[6];
419 EDI = new_regs[7];
420 if (new_eflags & VM_MASK) {
421 for (i = 0; i < 6; i++) {
422 load_seg_vm(i, new_segs[i]);
424 /* in vm86, CPL is always 3 */
425 cpu_x86_set_cpl(env, 3);
426 } else {
427 /* CPL is set the RPL of CS */
428 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
429 /* first just selectors as the rest may trigger exceptions */
430 for (i = 0; i < 6; i++) {
431 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
435 env->ldt.selector = new_ldt & ~4;
436 env->ldt.base = 0;
437 env->ldt.limit = 0;
438 env->ldt.flags = 0;
440 /* load the LDT */
441 if (new_ldt & 4) {
442 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
445 if ((new_ldt & 0xfffc) != 0) {
446 dt = &env->gdt;
447 index = new_ldt & ~7;
448 if ((index + 7) > dt->limit) {
449 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
451 ptr = dt->base + index;
452 e1 = ldl_kernel(ptr);
453 e2 = ldl_kernel(ptr + 4);
454 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
455 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
457 if (!(e2 & DESC_P_MASK)) {
458 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
460 load_seg_cache_raw_dt(&env->ldt, e1, e2);
463 /* load the segments */
464 if (!(new_eflags & VM_MASK)) {
465 tss_load_seg(R_CS, new_segs[R_CS]);
466 tss_load_seg(R_SS, new_segs[R_SS]);
467 tss_load_seg(R_ES, new_segs[R_ES]);
468 tss_load_seg(R_DS, new_segs[R_DS]);
469 tss_load_seg(R_FS, new_segs[R_FS]);
470 tss_load_seg(R_GS, new_segs[R_GS]);
473 /* check that EIP is in the CS segment limits */
474 if (new_eip > env->segs[R_CS].limit) {
475 /* XXX: different exception if CALL? */
476 raise_exception_err(env, EXCP0D_GPF, 0);
479 #ifndef CONFIG_USER_ONLY
480 /* reset local breakpoints */
481 if (env->dr[7] & 0x55) {
482 for (i = 0; i < 4; i++) {
483 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) {
484 hw_breakpoint_remove(env, i);
487 env->dr[7] &= ~0x55;
489 #endif
492 /* check if Port I/O is allowed in TSS */
493 static inline void check_io(int addr, int size)
495 int io_offset, val, mask;
497 /* TSS must be a valid 32 bit one */
498 if (!(env->tr.flags & DESC_P_MASK) ||
499 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
500 env->tr.limit < 103) {
501 goto fail;
503 io_offset = lduw_kernel(env->tr.base + 0x66);
504 io_offset += (addr >> 3);
505 /* Note: the check needs two bytes */
506 if ((io_offset + 1) > env->tr.limit) {
507 goto fail;
509 val = lduw_kernel(env->tr.base + io_offset);
510 val >>= (addr & 7);
511 mask = (1 << size) - 1;
512 /* all bits must be zero to allow the I/O */
513 if ((val & mask) != 0) {
514 fail:
515 raise_exception_err(env, EXCP0D_GPF, 0);
519 void helper_check_iob(uint32_t t0)
521 check_io(t0, 1);
524 void helper_check_iow(uint32_t t0)
526 check_io(t0, 2);
529 void helper_check_iol(uint32_t t0)
531 check_io(t0, 4);
534 void helper_outb(uint32_t port, uint32_t data)
536 cpu_outb(port, data & 0xff);
539 target_ulong helper_inb(uint32_t port)
541 return cpu_inb(port);
544 void helper_outw(uint32_t port, uint32_t data)
546 cpu_outw(port, data & 0xffff);
549 target_ulong helper_inw(uint32_t port)
551 return cpu_inw(port);
554 void helper_outl(uint32_t port, uint32_t data)
556 cpu_outl(port, data);
559 target_ulong helper_inl(uint32_t port)
561 return cpu_inl(port);
564 static inline unsigned int get_sp_mask(unsigned int e2)
566 if (e2 & DESC_B_MASK) {
567 return 0xffffffff;
568 } else {
569 return 0xffff;
573 static int exception_has_error_code(int intno)
575 switch (intno) {
576 case 8:
577 case 10:
578 case 11:
579 case 12:
580 case 13:
581 case 14:
582 case 17:
583 return 1;
585 return 0;
588 #ifdef TARGET_X86_64
589 #define SET_ESP(val, sp_mask) \
590 do { \
591 if ((sp_mask) == 0xffff) { \
592 ESP = (ESP & ~0xffff) | ((val) & 0xffff); \
593 } else if ((sp_mask) == 0xffffffffLL) { \
594 ESP = (uint32_t)(val); \
595 } else { \
596 ESP = (val); \
598 } while (0)
599 #else
600 #define SET_ESP(val, sp_mask) \
601 do { \
602 ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
603 } while (0)
604 #endif
606 /* in 64-bit machines, this can overflow. So this segment addition macro
607 * can be used to trim the value to 32-bit whenever needed */
608 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
610 /* XXX: add a is_user flag to have proper security support */
611 #define PUSHW(ssp, sp, sp_mask, val) \
613 sp -= 2; \
614 stw_kernel((ssp) + (sp & (sp_mask)), (val)); \
617 #define PUSHL(ssp, sp, sp_mask, val) \
619 sp -= 4; \
620 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
623 #define POPW(ssp, sp, sp_mask, val) \
625 val = lduw_kernel((ssp) + (sp & (sp_mask))); \
626 sp += 2; \
629 #define POPL(ssp, sp, sp_mask, val) \
631 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \
632 sp += 4; \
635 /* protected mode interrupt */
636 static void do_interrupt_protected(int intno, int is_int, int error_code,
637 unsigned int next_eip, int is_hw)
639 SegmentCache *dt;
640 target_ulong ptr, ssp;
641 int type, dpl, selector, ss_dpl, cpl;
642 int has_error_code, new_stack, shift;
643 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
644 uint32_t old_eip, sp_mask;
646 has_error_code = 0;
647 if (!is_int && !is_hw) {
648 has_error_code = exception_has_error_code(intno);
650 if (is_int) {
651 old_eip = next_eip;
652 } else {
653 old_eip = env->eip;
656 dt = &env->idt;
657 if (intno * 8 + 7 > dt->limit) {
658 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
660 ptr = dt->base + intno * 8;
661 e1 = ldl_kernel(ptr);
662 e2 = ldl_kernel(ptr + 4);
663 /* check gate type */
664 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
665 switch (type) {
666 case 5: /* task gate */
667 /* must do that check here to return the correct error code */
668 if (!(e2 & DESC_P_MASK)) {
669 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
671 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
672 if (has_error_code) {
673 int type;
674 uint32_t mask;
676 /* push the error code */
677 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
678 shift = type >> 3;
679 if (env->segs[R_SS].flags & DESC_B_MASK) {
680 mask = 0xffffffff;
681 } else {
682 mask = 0xffff;
684 esp = (ESP - (2 << shift)) & mask;
685 ssp = env->segs[R_SS].base + esp;
686 if (shift) {
687 stl_kernel(ssp, error_code);
688 } else {
689 stw_kernel(ssp, error_code);
691 SET_ESP(esp, mask);
693 return;
694 case 6: /* 286 interrupt gate */
695 case 7: /* 286 trap gate */
696 case 14: /* 386 interrupt gate */
697 case 15: /* 386 trap gate */
698 break;
699 default:
700 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
701 break;
703 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
704 cpl = env->hflags & HF_CPL_MASK;
705 /* check privilege if software int */
706 if (is_int && dpl < cpl) {
707 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
709 /* check valid bit */
710 if (!(e2 & DESC_P_MASK)) {
711 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
713 selector = e1 >> 16;
714 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
715 if ((selector & 0xfffc) == 0) {
716 raise_exception_err(env, EXCP0D_GPF, 0);
718 if (load_segment(&e1, &e2, selector) != 0) {
719 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
721 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
722 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
724 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
725 if (dpl > cpl) {
726 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
728 if (!(e2 & DESC_P_MASK)) {
729 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
731 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
732 /* to inner privilege */
733 get_ss_esp_from_tss(&ss, &esp, dpl);
734 if ((ss & 0xfffc) == 0) {
735 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
737 if ((ss & 3) != dpl) {
738 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
740 if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
741 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
743 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
744 if (ss_dpl != dpl) {
745 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
747 if (!(ss_e2 & DESC_S_MASK) ||
748 (ss_e2 & DESC_CS_MASK) ||
749 !(ss_e2 & DESC_W_MASK)) {
750 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
752 if (!(ss_e2 & DESC_P_MASK)) {
753 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
755 new_stack = 1;
756 sp_mask = get_sp_mask(ss_e2);
757 ssp = get_seg_base(ss_e1, ss_e2);
758 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
759 /* to same privilege */
760 if (env->eflags & VM_MASK) {
761 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
763 new_stack = 0;
764 sp_mask = get_sp_mask(env->segs[R_SS].flags);
765 ssp = env->segs[R_SS].base;
766 esp = ESP;
767 dpl = cpl;
768 } else {
769 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
770 new_stack = 0; /* avoid warning */
771 sp_mask = 0; /* avoid warning */
772 ssp = 0; /* avoid warning */
773 esp = 0; /* avoid warning */
776 shift = type >> 3;
778 #if 0
779 /* XXX: check that enough room is available */
780 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
781 if (env->eflags & VM_MASK) {
782 push_size += 8;
784 push_size <<= shift;
785 #endif
786 if (shift == 1) {
787 if (new_stack) {
788 if (env->eflags & VM_MASK) {
789 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
790 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
791 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
792 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
794 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
795 PUSHL(ssp, esp, sp_mask, ESP);
797 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
798 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
799 PUSHL(ssp, esp, sp_mask, old_eip);
800 if (has_error_code) {
801 PUSHL(ssp, esp, sp_mask, error_code);
803 } else {
804 if (new_stack) {
805 if (env->eflags & VM_MASK) {
806 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
807 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
808 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
809 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
811 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
812 PUSHW(ssp, esp, sp_mask, ESP);
814 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
815 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
816 PUSHW(ssp, esp, sp_mask, old_eip);
817 if (has_error_code) {
818 PUSHW(ssp, esp, sp_mask, error_code);
822 if (new_stack) {
823 if (env->eflags & VM_MASK) {
824 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
827 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
829 ss = (ss & ~3) | dpl;
830 cpu_x86_load_seg_cache(env, R_SS, ss,
831 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
833 SET_ESP(esp, sp_mask);
835 selector = (selector & ~3) | dpl;
836 cpu_x86_load_seg_cache(env, R_CS, selector,
837 get_seg_base(e1, e2),
838 get_seg_limit(e1, e2),
839 e2);
840 cpu_x86_set_cpl(env, dpl);
841 env->eip = offset;
843 /* interrupt gate clear IF mask */
844 if ((type & 1) == 0) {
845 env->eflags &= ~IF_MASK;
847 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
850 #ifdef TARGET_X86_64
852 #define PUSHQ(sp, val) \
854 sp -= 8; \
855 stq_kernel(sp, (val)); \
858 #define POPQ(sp, val) \
860 val = ldq_kernel(sp); \
861 sp += 8; \
864 static inline target_ulong get_rsp_from_tss(int level)
866 int index;
868 #if 0
869 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
870 env->tr.base, env->tr.limit);
871 #endif
873 if (!(env->tr.flags & DESC_P_MASK)) {
874 cpu_abort(env, "invalid tss");
876 index = 8 * level + 4;
877 if ((index + 7) > env->tr.limit) {
878 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
880 return ldq_kernel(env->tr.base + index);
883 /* 64 bit interrupt */
884 static void do_interrupt64(int intno, int is_int, int error_code,
885 target_ulong next_eip, int is_hw)
887 SegmentCache *dt;
888 target_ulong ptr;
889 int type, dpl, selector, cpl, ist;
890 int has_error_code, new_stack;
891 uint32_t e1, e2, e3, ss;
892 target_ulong old_eip, esp, offset;
894 has_error_code = 0;
895 if (!is_int && !is_hw) {
896 has_error_code = exception_has_error_code(intno);
898 if (is_int) {
899 old_eip = next_eip;
900 } else {
901 old_eip = env->eip;
904 dt = &env->idt;
905 if (intno * 16 + 15 > dt->limit) {
906 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
908 ptr = dt->base + intno * 16;
909 e1 = ldl_kernel(ptr);
910 e2 = ldl_kernel(ptr + 4);
911 e3 = ldl_kernel(ptr + 8);
912 /* check gate type */
913 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
914 switch (type) {
915 case 14: /* 386 interrupt gate */
916 case 15: /* 386 trap gate */
917 break;
918 default:
919 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
920 break;
922 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
923 cpl = env->hflags & HF_CPL_MASK;
924 /* check privilege if software int */
925 if (is_int && dpl < cpl) {
926 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
928 /* check valid bit */
929 if (!(e2 & DESC_P_MASK)) {
930 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
932 selector = e1 >> 16;
933 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
934 ist = e2 & 7;
935 if ((selector & 0xfffc) == 0) {
936 raise_exception_err(env, EXCP0D_GPF, 0);
939 if (load_segment(&e1, &e2, selector) != 0) {
940 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
942 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
943 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
945 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
946 if (dpl > cpl) {
947 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
949 if (!(e2 & DESC_P_MASK)) {
950 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
952 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
953 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
955 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
956 /* to inner privilege */
957 if (ist != 0) {
958 esp = get_rsp_from_tss(ist + 3);
959 } else {
960 esp = get_rsp_from_tss(dpl);
962 esp &= ~0xfLL; /* align stack */
963 ss = 0;
964 new_stack = 1;
965 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
966 /* to same privilege */
967 if (env->eflags & VM_MASK) {
968 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
970 new_stack = 0;
971 if (ist != 0) {
972 esp = get_rsp_from_tss(ist + 3);
973 } else {
974 esp = ESP;
976 esp &= ~0xfLL; /* align stack */
977 dpl = cpl;
978 } else {
979 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
980 new_stack = 0; /* avoid warning */
981 esp = 0; /* avoid warning */
984 PUSHQ(esp, env->segs[R_SS].selector);
985 PUSHQ(esp, ESP);
986 PUSHQ(esp, cpu_compute_eflags(env));
987 PUSHQ(esp, env->segs[R_CS].selector);
988 PUSHQ(esp, old_eip);
989 if (has_error_code) {
990 PUSHQ(esp, error_code);
993 if (new_stack) {
994 ss = 0 | dpl;
995 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
997 ESP = esp;
999 selector = (selector & ~3) | dpl;
1000 cpu_x86_load_seg_cache(env, R_CS, selector,
1001 get_seg_base(e1, e2),
1002 get_seg_limit(e1, e2),
1003 e2);
1004 cpu_x86_set_cpl(env, dpl);
1005 env->eip = offset;
1007 /* interrupt gate clear IF mask */
1008 if ((type & 1) == 0) {
1009 env->eflags &= ~IF_MASK;
1011 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1013 #endif
1015 #ifdef TARGET_X86_64
1016 #if defined(CONFIG_USER_ONLY)
1017 void helper_syscall(int next_eip_addend)
1019 env->exception_index = EXCP_SYSCALL;
1020 env->exception_next_eip = env->eip + next_eip_addend;
1021 cpu_loop_exit(env);
1023 #else
1024 void helper_syscall(int next_eip_addend)
1026 int selector;
1028 if (!(env->efer & MSR_EFER_SCE)) {
1029 raise_exception_err(env, EXCP06_ILLOP, 0);
1031 selector = (env->star >> 32) & 0xffff;
1032 if (env->hflags & HF_LMA_MASK) {
1033 int code64;
1035 ECX = env->eip + next_eip_addend;
1036 env->regs[11] = cpu_compute_eflags(env);
1038 code64 = env->hflags & HF_CS64_MASK;
1040 cpu_x86_set_cpl(env, 0);
1041 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1042 0, 0xffffffff,
1043 DESC_G_MASK | DESC_P_MASK |
1044 DESC_S_MASK |
1045 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1046 DESC_L_MASK);
1047 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1048 0, 0xffffffff,
1049 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1050 DESC_S_MASK |
1051 DESC_W_MASK | DESC_A_MASK);
1052 env->eflags &= ~env->fmask;
1053 cpu_load_eflags(env, env->eflags, 0);
1054 if (code64) {
1055 env->eip = env->lstar;
1056 } else {
1057 env->eip = env->cstar;
1059 } else {
1060 ECX = (uint32_t)(env->eip + next_eip_addend);
1062 cpu_x86_set_cpl(env, 0);
1063 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1064 0, 0xffffffff,
1065 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066 DESC_S_MASK |
1067 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1068 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1069 0, 0xffffffff,
1070 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071 DESC_S_MASK |
1072 DESC_W_MASK | DESC_A_MASK);
1073 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1074 env->eip = (uint32_t)env->star;
1077 #endif
1078 #endif
1080 #ifdef TARGET_X86_64
1081 void helper_sysret(int dflag)
1083 int cpl, selector;
1085 if (!(env->efer & MSR_EFER_SCE)) {
1086 raise_exception_err(env, EXCP06_ILLOP, 0);
1088 cpl = env->hflags & HF_CPL_MASK;
1089 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1090 raise_exception_err(env, EXCP0D_GPF, 0);
1092 selector = (env->star >> 48) & 0xffff;
1093 if (env->hflags & HF_LMA_MASK) {
1094 if (dflag == 2) {
1095 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1096 0, 0xffffffff,
1097 DESC_G_MASK | DESC_P_MASK |
1098 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1099 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1100 DESC_L_MASK);
1101 env->eip = ECX;
1102 } else {
1103 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1104 0, 0xffffffff,
1105 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1106 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1107 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1108 env->eip = (uint32_t)ECX;
1110 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1111 0, 0xffffffff,
1112 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1113 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1114 DESC_W_MASK | DESC_A_MASK);
1115 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1116 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1117 NT_MASK);
1118 cpu_x86_set_cpl(env, 3);
1119 } else {
1120 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1121 0, 0xffffffff,
1122 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1123 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1124 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1125 env->eip = (uint32_t)ECX;
1126 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1127 0, 0xffffffff,
1128 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1129 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1130 DESC_W_MASK | DESC_A_MASK);
1131 env->eflags |= IF_MASK;
1132 cpu_x86_set_cpl(env, 3);
1135 #endif
1137 /* real mode interrupt */
1138 static void do_interrupt_real(int intno, int is_int, int error_code,
1139 unsigned int next_eip)
1141 SegmentCache *dt;
1142 target_ulong ptr, ssp;
1143 int selector;
1144 uint32_t offset, esp;
1145 uint32_t old_cs, old_eip;
1147 /* real mode (simpler!) */
1148 dt = &env->idt;
1149 if (intno * 4 + 3 > dt->limit) {
1150 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1152 ptr = dt->base + intno * 4;
1153 offset = lduw_kernel(ptr);
1154 selector = lduw_kernel(ptr + 2);
1155 esp = ESP;
1156 ssp = env->segs[R_SS].base;
1157 if (is_int) {
1158 old_eip = next_eip;
1159 } else {
1160 old_eip = env->eip;
1162 old_cs = env->segs[R_CS].selector;
1163 /* XXX: use SS segment size? */
1164 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
1165 PUSHW(ssp, esp, 0xffff, old_cs);
1166 PUSHW(ssp, esp, 0xffff, old_eip);
1168 /* update processor state */
1169 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1170 env->eip = offset;
1171 env->segs[R_CS].selector = selector;
1172 env->segs[R_CS].base = (selector << 4);
1173 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1176 #if defined(CONFIG_USER_ONLY)
1177 /* fake user mode interrupt */
1178 static void do_interrupt_user(int intno, int is_int, int error_code,
1179 target_ulong next_eip)
1181 SegmentCache *dt;
1182 target_ulong ptr;
1183 int dpl, cpl, shift;
1184 uint32_t e2;
1186 dt = &env->idt;
1187 if (env->hflags & HF_LMA_MASK) {
1188 shift = 4;
1189 } else {
1190 shift = 3;
1192 ptr = dt->base + (intno << shift);
1193 e2 = ldl_kernel(ptr + 4);
1195 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1196 cpl = env->hflags & HF_CPL_MASK;
1197 /* check privilege if software int */
1198 if (is_int && dpl < cpl) {
1199 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1202 /* Since we emulate only user space, we cannot do more than
1203 exiting the emulation with the suitable exception and error
1204 code */
1205 if (is_int) {
1206 EIP = next_eip;
1210 #else
1212 static void handle_even_inj(int intno, int is_int, int error_code,
1213 int is_hw, int rm)
1215 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
1216 control.event_inj));
1218 if (!(event_inj & SVM_EVTINJ_VALID)) {
1219 int type;
1221 if (is_int) {
1222 type = SVM_EVTINJ_TYPE_SOFT;
1223 } else {
1224 type = SVM_EVTINJ_TYPE_EXEPT;
1226 event_inj = intno | type | SVM_EVTINJ_VALID;
1227 if (!rm && exception_has_error_code(intno)) {
1228 event_inj |= SVM_EVTINJ_VALID_ERR;
1229 stl_phys(env->vm_vmcb + offsetof(struct vmcb,
1230 control.event_inj_err),
1231 error_code);
1233 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1234 event_inj);
1237 #endif
1240 * Begin execution of an interruption. is_int is TRUE if coming from
1241 * the int instruction. next_eip is the EIP value AFTER the interrupt
1242 * instruction. It is only relevant if is_int is TRUE.
1244 static void do_interrupt_all(int intno, int is_int, int error_code,
1245 target_ulong next_eip, int is_hw)
1247 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1248 if ((env->cr[0] & CR0_PE_MASK)) {
1249 static int count;
1251 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1252 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1253 count, intno, error_code, is_int,
1254 env->hflags & HF_CPL_MASK,
1255 env->segs[R_CS].selector, EIP,
1256 (int)env->segs[R_CS].base + EIP,
1257 env->segs[R_SS].selector, ESP);
1258 if (intno == 0x0e) {
1259 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1260 } else {
1261 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1263 qemu_log("\n");
1264 log_cpu_state(env, X86_DUMP_CCOP);
1265 #if 0
1267 int i;
1268 target_ulong ptr;
1270 qemu_log(" code=");
1271 ptr = env->segs[R_CS].base + env->eip;
1272 for (i = 0; i < 16; i++) {
1273 qemu_log(" %02x", ldub(ptr + i));
1275 qemu_log("\n");
1277 #endif
1278 count++;
1281 if (env->cr[0] & CR0_PE_MASK) {
1282 #if !defined(CONFIG_USER_ONLY)
1283 if (env->hflags & HF_SVMI_MASK) {
1284 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1286 #endif
1287 #ifdef TARGET_X86_64
1288 if (env->hflags & HF_LMA_MASK) {
1289 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1290 } else
1291 #endif
1293 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1295 } else {
1296 #if !defined(CONFIG_USER_ONLY)
1297 if (env->hflags & HF_SVMI_MASK) {
1298 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1300 #endif
1301 do_interrupt_real(intno, is_int, error_code, next_eip);
1304 #if !defined(CONFIG_USER_ONLY)
1305 if (env->hflags & HF_SVMI_MASK) {
1306 uint32_t event_inj = ldl_phys(env->vm_vmcb +
1307 offsetof(struct vmcb,
1308 control.event_inj));
1310 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1311 event_inj & ~SVM_EVTINJ_VALID);
1313 #endif
1316 void do_interrupt(CPUX86State *env1)
1318 CPUX86State *saved_env;
1320 saved_env = env;
1321 env = env1;
1322 #if defined(CONFIG_USER_ONLY)
1323 /* if user mode only, we simulate a fake exception
1324 which will be handled outside the cpu execution
1325 loop */
1326 do_interrupt_user(env->exception_index,
1327 env->exception_is_int,
1328 env->error_code,
1329 env->exception_next_eip);
1330 /* successfully delivered */
1331 env->old_exception = -1;
1332 #else
1333 /* simulate a real cpu exception. On i386, it can
1334 trigger new exceptions, but we do not handle
1335 double or triple faults yet. */
1336 do_interrupt_all(env->exception_index,
1337 env->exception_is_int,
1338 env->error_code,
1339 env->exception_next_eip, 0);
1340 /* successfully delivered */
1341 env->old_exception = -1;
1342 #endif
1343 env = saved_env;
1346 void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
1348 CPUX86State *saved_env;
1350 saved_env = env;
1351 env = env1;
1352 do_interrupt_all(intno, 0, 0, 0, is_hw);
1353 env = saved_env;
1356 /* SMM support */
1358 #if defined(CONFIG_USER_ONLY)
1360 void do_smm_enter(CPUX86State *env1)
1364 void helper_rsm(void)
1368 #else
1370 #ifdef TARGET_X86_64
1371 #define SMM_REVISION_ID 0x00020064
1372 #else
1373 #define SMM_REVISION_ID 0x00020000
1374 #endif
1376 void do_smm_enter(CPUX86State *env1)
1378 target_ulong sm_state;
1379 SegmentCache *dt;
1380 int i, offset;
1381 CPUX86State *saved_env;
1383 saved_env = env;
1384 env = env1;
1386 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1387 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1389 env->hflags |= HF_SMM_MASK;
1390 cpu_smm_update(env);
1392 sm_state = env->smbase + 0x8000;
1394 #ifdef TARGET_X86_64
1395 for (i = 0; i < 6; i++) {
1396 dt = &env->segs[i];
1397 offset = 0x7e00 + i * 16;
1398 stw_phys(sm_state + offset, dt->selector);
1399 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1400 stl_phys(sm_state + offset + 4, dt->limit);
1401 stq_phys(sm_state + offset + 8, dt->base);
1404 stq_phys(sm_state + 0x7e68, env->gdt.base);
1405 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1407 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1408 stq_phys(sm_state + 0x7e78, env->ldt.base);
1409 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1410 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1412 stq_phys(sm_state + 0x7e88, env->idt.base);
1413 stl_phys(sm_state + 0x7e84, env->idt.limit);
1415 stw_phys(sm_state + 0x7e90, env->tr.selector);
1416 stq_phys(sm_state + 0x7e98, env->tr.base);
1417 stl_phys(sm_state + 0x7e94, env->tr.limit);
1418 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1420 stq_phys(sm_state + 0x7ed0, env->efer);
1422 stq_phys(sm_state + 0x7ff8, EAX);
1423 stq_phys(sm_state + 0x7ff0, ECX);
1424 stq_phys(sm_state + 0x7fe8, EDX);
1425 stq_phys(sm_state + 0x7fe0, EBX);
1426 stq_phys(sm_state + 0x7fd8, ESP);
1427 stq_phys(sm_state + 0x7fd0, EBP);
1428 stq_phys(sm_state + 0x7fc8, ESI);
1429 stq_phys(sm_state + 0x7fc0, EDI);
1430 for (i = 8; i < 16; i++) {
1431 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1433 stq_phys(sm_state + 0x7f78, env->eip);
1434 stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env));
1435 stl_phys(sm_state + 0x7f68, env->dr[6]);
1436 stl_phys(sm_state + 0x7f60, env->dr[7]);
1438 stl_phys(sm_state + 0x7f48, env->cr[4]);
1439 stl_phys(sm_state + 0x7f50, env->cr[3]);
1440 stl_phys(sm_state + 0x7f58, env->cr[0]);
1442 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1443 stl_phys(sm_state + 0x7f00, env->smbase);
1444 #else
1445 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1446 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1447 stl_phys(sm_state + 0x7ff4, cpu_compute_eflags(env));
1448 stl_phys(sm_state + 0x7ff0, env->eip);
1449 stl_phys(sm_state + 0x7fec, EDI);
1450 stl_phys(sm_state + 0x7fe8, ESI);
1451 stl_phys(sm_state + 0x7fe4, EBP);
1452 stl_phys(sm_state + 0x7fe0, ESP);
1453 stl_phys(sm_state + 0x7fdc, EBX);
1454 stl_phys(sm_state + 0x7fd8, EDX);
1455 stl_phys(sm_state + 0x7fd4, ECX);
1456 stl_phys(sm_state + 0x7fd0, EAX);
1457 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1458 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1460 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1461 stl_phys(sm_state + 0x7f64, env->tr.base);
1462 stl_phys(sm_state + 0x7f60, env->tr.limit);
1463 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1465 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1466 stl_phys(sm_state + 0x7f80, env->ldt.base);
1467 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1468 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1470 stl_phys(sm_state + 0x7f74, env->gdt.base);
1471 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1473 stl_phys(sm_state + 0x7f58, env->idt.base);
1474 stl_phys(sm_state + 0x7f54, env->idt.limit);
1476 for (i = 0; i < 6; i++) {
1477 dt = &env->segs[i];
1478 if (i < 3) {
1479 offset = 0x7f84 + i * 12;
1480 } else {
1481 offset = 0x7f2c + (i - 3) * 12;
1483 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1484 stl_phys(sm_state + offset + 8, dt->base);
1485 stl_phys(sm_state + offset + 4, dt->limit);
1486 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1488 stl_phys(sm_state + 0x7f14, env->cr[4]);
1490 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1491 stl_phys(sm_state + 0x7ef8, env->smbase);
1492 #endif
1493 /* init SMM cpu state */
1495 #ifdef TARGET_X86_64
1496 cpu_load_efer(env, 0);
1497 #endif
1498 cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
1499 DF_MASK));
1500 env->eip = 0x00008000;
1501 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1502 0xffffffff, 0);
1503 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1504 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1509 cpu_x86_update_cr0(env,
1510 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
1511 CR0_PG_MASK));
1512 cpu_x86_update_cr4(env, 0);
1513 env->dr[7] = 0x00000400;
1514 CC_OP = CC_OP_EFLAGS;
1515 env = saved_env;
1518 void helper_rsm(void)
1520 target_ulong sm_state;
1521 int i, offset;
1522 uint32_t val;
1524 sm_state = env->smbase + 0x8000;
1525 #ifdef TARGET_X86_64
1526 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1528 for (i = 0; i < 6; i++) {
1529 offset = 0x7e00 + i * 16;
1530 cpu_x86_load_seg_cache(env, i,
1531 lduw_phys(sm_state + offset),
1532 ldq_phys(sm_state + offset + 8),
1533 ldl_phys(sm_state + offset + 4),
1534 (lduw_phys(sm_state + offset + 2) &
1535 0xf0ff) << 8);
1538 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1539 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1541 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1542 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1543 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1544 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1546 env->idt.base = ldq_phys(sm_state + 0x7e88);
1547 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1549 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1550 env->tr.base = ldq_phys(sm_state + 0x7e98);
1551 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1552 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1554 EAX = ldq_phys(sm_state + 0x7ff8);
1555 ECX = ldq_phys(sm_state + 0x7ff0);
1556 EDX = ldq_phys(sm_state + 0x7fe8);
1557 EBX = ldq_phys(sm_state + 0x7fe0);
1558 ESP = ldq_phys(sm_state + 0x7fd8);
1559 EBP = ldq_phys(sm_state + 0x7fd0);
1560 ESI = ldq_phys(sm_state + 0x7fc8);
1561 EDI = ldq_phys(sm_state + 0x7fc0);
1562 for (i = 8; i < 16; i++) {
1563 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1565 env->eip = ldq_phys(sm_state + 0x7f78);
1566 cpu_load_eflags(env, ldl_phys(sm_state + 0x7f70),
1567 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1568 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1569 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1571 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1572 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1573 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1575 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1576 if (val & 0x20000) {
1577 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1579 #else
1580 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1581 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1582 cpu_load_eflags(env, ldl_phys(sm_state + 0x7ff4),
1583 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1584 env->eip = ldl_phys(sm_state + 0x7ff0);
1585 EDI = ldl_phys(sm_state + 0x7fec);
1586 ESI = ldl_phys(sm_state + 0x7fe8);
1587 EBP = ldl_phys(sm_state + 0x7fe4);
1588 ESP = ldl_phys(sm_state + 0x7fe0);
1589 EBX = ldl_phys(sm_state + 0x7fdc);
1590 EDX = ldl_phys(sm_state + 0x7fd8);
1591 ECX = ldl_phys(sm_state + 0x7fd4);
1592 EAX = ldl_phys(sm_state + 0x7fd0);
1593 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1594 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1596 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1597 env->tr.base = ldl_phys(sm_state + 0x7f64);
1598 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1599 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1601 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1602 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1603 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1604 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1606 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1607 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1609 env->idt.base = ldl_phys(sm_state + 0x7f58);
1610 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1612 for (i = 0; i < 6; i++) {
1613 if (i < 3) {
1614 offset = 0x7f84 + i * 12;
1615 } else {
1616 offset = 0x7f2c + (i - 3) * 12;
1618 cpu_x86_load_seg_cache(env, i,
1619 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1620 ldl_phys(sm_state + offset + 8),
1621 ldl_phys(sm_state + offset + 4),
1622 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1624 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1626 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1627 if (val & 0x20000) {
1628 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1630 #endif
1631 CC_OP = CC_OP_EFLAGS;
1632 env->hflags &= ~HF_SMM_MASK;
1633 cpu_smm_update(env);
1635 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1636 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1639 #endif /* !CONFIG_USER_ONLY */
1641 void helper_into(int next_eip_addend)
1643 int eflags;
1645 eflags = helper_cc_compute_all(CC_OP);
1646 if (eflags & CC_O) {
1647 raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
1651 void helper_cmpxchg8b(target_ulong a0)
1653 uint64_t d;
1654 int eflags;
1656 eflags = helper_cc_compute_all(CC_OP);
1657 d = ldq(a0);
1658 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1659 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1660 eflags |= CC_Z;
1661 } else {
1662 /* always do the store */
1663 stq(a0, d);
1664 EDX = (uint32_t)(d >> 32);
1665 EAX = (uint32_t)d;
1666 eflags &= ~CC_Z;
1668 CC_SRC = eflags;
1671 #ifdef TARGET_X86_64
1672 void helper_cmpxchg16b(target_ulong a0)
1674 uint64_t d0, d1;
1675 int eflags;
1677 if ((a0 & 0xf) != 0) {
1678 raise_exception(env, EXCP0D_GPF);
1680 eflags = helper_cc_compute_all(CC_OP);
1681 d0 = ldq(a0);
1682 d1 = ldq(a0 + 8);
1683 if (d0 == EAX && d1 == EDX) {
1684 stq(a0, EBX);
1685 stq(a0 + 8, ECX);
1686 eflags |= CC_Z;
1687 } else {
1688 /* always do the store */
1689 stq(a0, d0);
1690 stq(a0 + 8, d1);
1691 EDX = d1;
1692 EAX = d0;
1693 eflags &= ~CC_Z;
1695 CC_SRC = eflags;
1697 #endif
1699 void helper_single_step(void)
1701 #ifndef CONFIG_USER_ONLY
1702 check_hw_breakpoints(env, 1);
1703 env->dr[6] |= DR6_BS;
1704 #endif
1705 raise_exception(env, EXCP01_DB);
1708 void helper_cpuid(void)
1710 uint32_t eax, ebx, ecx, edx;
1712 cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0);
1714 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1715 EAX = eax;
1716 EBX = ebx;
1717 ECX = ecx;
1718 EDX = edx;
1721 void helper_enter_level(int level, int data32, target_ulong t1)
1723 target_ulong ssp;
1724 uint32_t esp_mask, esp, ebp;
1726 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1727 ssp = env->segs[R_SS].base;
1728 ebp = EBP;
1729 esp = ESP;
1730 if (data32) {
1731 /* 32 bit */
1732 esp -= 4;
1733 while (--level) {
1734 esp -= 4;
1735 ebp -= 4;
1736 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1738 esp -= 4;
1739 stl(ssp + (esp & esp_mask), t1);
1740 } else {
1741 /* 16 bit */
1742 esp -= 2;
1743 while (--level) {
1744 esp -= 2;
1745 ebp -= 2;
1746 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1748 esp -= 2;
1749 stw(ssp + (esp & esp_mask), t1);
1753 #ifdef TARGET_X86_64
1754 void helper_enter64_level(int level, int data64, target_ulong t1)
1756 target_ulong esp, ebp;
1758 ebp = EBP;
1759 esp = ESP;
1761 if (data64) {
1762 /* 64 bit */
1763 esp -= 8;
1764 while (--level) {
1765 esp -= 8;
1766 ebp -= 8;
1767 stq(esp, ldq(ebp));
1769 esp -= 8;
1770 stq(esp, t1);
1771 } else {
1772 /* 16 bit */
1773 esp -= 2;
1774 while (--level) {
1775 esp -= 2;
1776 ebp -= 2;
1777 stw(esp, lduw(ebp));
1779 esp -= 2;
1780 stw(esp, t1);
1783 #endif
1785 void helper_lldt(int selector)
1787 SegmentCache *dt;
1788 uint32_t e1, e2;
1789 int index, entry_limit;
1790 target_ulong ptr;
1792 selector &= 0xffff;
1793 if ((selector & 0xfffc) == 0) {
1794 /* XXX: NULL selector case: invalid LDT */
1795 env->ldt.base = 0;
1796 env->ldt.limit = 0;
1797 } else {
1798 if (selector & 0x4) {
1799 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1801 dt = &env->gdt;
1802 index = selector & ~7;
1803 #ifdef TARGET_X86_64
1804 if (env->hflags & HF_LMA_MASK) {
1805 entry_limit = 15;
1806 } else
1807 #endif
1809 entry_limit = 7;
1811 if ((index + entry_limit) > dt->limit) {
1812 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1814 ptr = dt->base + index;
1815 e1 = ldl_kernel(ptr);
1816 e2 = ldl_kernel(ptr + 4);
1817 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1818 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1820 if (!(e2 & DESC_P_MASK)) {
1821 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1823 #ifdef TARGET_X86_64
1824 if (env->hflags & HF_LMA_MASK) {
1825 uint32_t e3;
1827 e3 = ldl_kernel(ptr + 8);
1828 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1829 env->ldt.base |= (target_ulong)e3 << 32;
1830 } else
1831 #endif
1833 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1836 env->ldt.selector = selector;
1839 void helper_ltr(int selector)
1841 SegmentCache *dt;
1842 uint32_t e1, e2;
1843 int index, type, entry_limit;
1844 target_ulong ptr;
1846 selector &= 0xffff;
1847 if ((selector & 0xfffc) == 0) {
1848 /* NULL selector case: invalid TR */
1849 env->tr.base = 0;
1850 env->tr.limit = 0;
1851 env->tr.flags = 0;
1852 } else {
1853 if (selector & 0x4) {
1854 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1856 dt = &env->gdt;
1857 index = selector & ~7;
1858 #ifdef TARGET_X86_64
1859 if (env->hflags & HF_LMA_MASK) {
1860 entry_limit = 15;
1861 } else
1862 #endif
1864 entry_limit = 7;
1866 if ((index + entry_limit) > dt->limit) {
1867 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1869 ptr = dt->base + index;
1870 e1 = ldl_kernel(ptr);
1871 e2 = ldl_kernel(ptr + 4);
1872 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1873 if ((e2 & DESC_S_MASK) ||
1874 (type != 1 && type != 9)) {
1875 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1877 if (!(e2 & DESC_P_MASK)) {
1878 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1880 #ifdef TARGET_X86_64
1881 if (env->hflags & HF_LMA_MASK) {
1882 uint32_t e3, e4;
1884 e3 = ldl_kernel(ptr + 8);
1885 e4 = ldl_kernel(ptr + 12);
1886 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1887 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1889 load_seg_cache_raw_dt(&env->tr, e1, e2);
1890 env->tr.base |= (target_ulong)e3 << 32;
1891 } else
1892 #endif
1894 load_seg_cache_raw_dt(&env->tr, e1, e2);
1896 e2 |= DESC_TSS_BUSY_MASK;
1897 stl_kernel(ptr + 4, e2);
1899 env->tr.selector = selector;
1902 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1903 void helper_load_seg(int seg_reg, int selector)
1905 uint32_t e1, e2;
1906 int cpl, dpl, rpl;
1907 SegmentCache *dt;
1908 int index;
1909 target_ulong ptr;
1911 selector &= 0xffff;
1912 cpl = env->hflags & HF_CPL_MASK;
1913 if ((selector & 0xfffc) == 0) {
1914 /* null selector case */
1915 if (seg_reg == R_SS
1916 #ifdef TARGET_X86_64
1917 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1918 #endif
1920 raise_exception_err(env, EXCP0D_GPF, 0);
1922 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1923 } else {
1925 if (selector & 0x4) {
1926 dt = &env->ldt;
1927 } else {
1928 dt = &env->gdt;
1930 index = selector & ~7;
1931 if ((index + 7) > dt->limit) {
1932 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1934 ptr = dt->base + index;
1935 e1 = ldl_kernel(ptr);
1936 e2 = ldl_kernel(ptr + 4);
1938 if (!(e2 & DESC_S_MASK)) {
1939 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1941 rpl = selector & 3;
1942 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1943 if (seg_reg == R_SS) {
1944 /* must be writable segment */
1945 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1946 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1948 if (rpl != cpl || dpl != cpl) {
1949 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1951 } else {
1952 /* must be readable segment */
1953 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1954 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1957 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1958 /* if not conforming code, test rights */
1959 if (dpl < cpl || dpl < rpl) {
1960 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1965 if (!(e2 & DESC_P_MASK)) {
1966 if (seg_reg == R_SS) {
1967 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
1968 } else {
1969 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
1973 /* set the access bit if not already set */
1974 if (!(e2 & DESC_A_MASK)) {
1975 e2 |= DESC_A_MASK;
1976 stl_kernel(ptr + 4, e2);
1979 cpu_x86_load_seg_cache(env, seg_reg, selector,
1980 get_seg_base(e1, e2),
1981 get_seg_limit(e1, e2),
1982 e2);
1983 #if 0
1984 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1985 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1986 #endif
1990 /* protected mode jump */
1991 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
1992 int next_eip_addend)
1994 int gate_cs, type;
1995 uint32_t e1, e2, cpl, dpl, rpl, limit;
1996 target_ulong next_eip;
1998 if ((new_cs & 0xfffc) == 0) {
1999 raise_exception_err(env, EXCP0D_GPF, 0);
2001 if (load_segment(&e1, &e2, new_cs) != 0) {
2002 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2004 cpl = env->hflags & HF_CPL_MASK;
2005 if (e2 & DESC_S_MASK) {
2006 if (!(e2 & DESC_CS_MASK)) {
2007 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2009 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2010 if (e2 & DESC_C_MASK) {
2011 /* conforming code segment */
2012 if (dpl > cpl) {
2013 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2015 } else {
2016 /* non conforming code segment */
2017 rpl = new_cs & 3;
2018 if (rpl > cpl) {
2019 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2021 if (dpl != cpl) {
2022 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2025 if (!(e2 & DESC_P_MASK)) {
2026 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2028 limit = get_seg_limit(e1, e2);
2029 if (new_eip > limit &&
2030 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
2031 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2033 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2034 get_seg_base(e1, e2), limit, e2);
2035 EIP = new_eip;
2036 } else {
2037 /* jump to call or task gate */
2038 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2039 rpl = new_cs & 3;
2040 cpl = env->hflags & HF_CPL_MASK;
2041 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2042 switch (type) {
2043 case 1: /* 286 TSS */
2044 case 9: /* 386 TSS */
2045 case 5: /* task gate */
2046 if (dpl < cpl || dpl < rpl) {
2047 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2049 next_eip = env->eip + next_eip_addend;
2050 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2051 CC_OP = CC_OP_EFLAGS;
2052 break;
2053 case 4: /* 286 call gate */
2054 case 12: /* 386 call gate */
2055 if ((dpl < cpl) || (dpl < rpl)) {
2056 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2058 if (!(e2 & DESC_P_MASK)) {
2059 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2061 gate_cs = e1 >> 16;
2062 new_eip = (e1 & 0xffff);
2063 if (type == 12) {
2064 new_eip |= (e2 & 0xffff0000);
2066 if (load_segment(&e1, &e2, gate_cs) != 0) {
2067 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2069 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2070 /* must be code segment */
2071 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2072 (DESC_S_MASK | DESC_CS_MASK))) {
2073 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2075 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2076 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
2077 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2079 if (!(e2 & DESC_P_MASK)) {
2080 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
2082 limit = get_seg_limit(e1, e2);
2083 if (new_eip > limit) {
2084 raise_exception_err(env, EXCP0D_GPF, 0);
2086 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2087 get_seg_base(e1, e2), limit, e2);
2088 EIP = new_eip;
2089 break;
2090 default:
2091 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2092 break;
2097 /* real mode call */
2098 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2099 int shift, int next_eip)
2101 int new_eip;
2102 uint32_t esp, esp_mask;
2103 target_ulong ssp;
2105 new_eip = new_eip1;
2106 esp = ESP;
2107 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2108 ssp = env->segs[R_SS].base;
2109 if (shift) {
2110 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2111 PUSHL(ssp, esp, esp_mask, next_eip);
2112 } else {
2113 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2114 PUSHW(ssp, esp, esp_mask, next_eip);
2117 SET_ESP(esp, esp_mask);
2118 env->eip = new_eip;
2119 env->segs[R_CS].selector = new_cs;
2120 env->segs[R_CS].base = (new_cs << 4);
2123 /* protected mode call */
2124 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2125 int shift, int next_eip_addend)
2127 int new_stack, i;
2128 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2129 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2130 uint32_t val, limit, old_sp_mask;
2131 target_ulong ssp, old_ssp, next_eip;
2133 next_eip = env->eip + next_eip_addend;
2134 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2135 LOG_PCALL_STATE(env);
2136 if ((new_cs & 0xfffc) == 0) {
2137 raise_exception_err(env, EXCP0D_GPF, 0);
2139 if (load_segment(&e1, &e2, new_cs) != 0) {
2140 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2142 cpl = env->hflags & HF_CPL_MASK;
2143 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2144 if (e2 & DESC_S_MASK) {
2145 if (!(e2 & DESC_CS_MASK)) {
2146 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2148 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2149 if (e2 & DESC_C_MASK) {
2150 /* conforming code segment */
2151 if (dpl > cpl) {
2152 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2154 } else {
2155 /* non conforming code segment */
2156 rpl = new_cs & 3;
2157 if (rpl > cpl) {
2158 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2160 if (dpl != cpl) {
2161 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2164 if (!(e2 & DESC_P_MASK)) {
2165 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2168 #ifdef TARGET_X86_64
2169 /* XXX: check 16/32 bit cases in long mode */
2170 if (shift == 2) {
2171 target_ulong rsp;
2173 /* 64 bit case */
2174 rsp = ESP;
2175 PUSHQ(rsp, env->segs[R_CS].selector);
2176 PUSHQ(rsp, next_eip);
2177 /* from this point, not restartable */
2178 ESP = rsp;
2179 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2180 get_seg_base(e1, e2),
2181 get_seg_limit(e1, e2), e2);
2182 EIP = new_eip;
2183 } else
2184 #endif
2186 sp = ESP;
2187 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2188 ssp = env->segs[R_SS].base;
2189 if (shift) {
2190 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2191 PUSHL(ssp, sp, sp_mask, next_eip);
2192 } else {
2193 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2194 PUSHW(ssp, sp, sp_mask, next_eip);
2197 limit = get_seg_limit(e1, e2);
2198 if (new_eip > limit) {
2199 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2201 /* from this point, not restartable */
2202 SET_ESP(sp, sp_mask);
2203 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2204 get_seg_base(e1, e2), limit, e2);
2205 EIP = new_eip;
2207 } else {
2208 /* check gate type */
2209 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2210 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2211 rpl = new_cs & 3;
2212 switch (type) {
2213 case 1: /* available 286 TSS */
2214 case 9: /* available 386 TSS */
2215 case 5: /* task gate */
2216 if (dpl < cpl || dpl < rpl) {
2217 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2219 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2220 CC_OP = CC_OP_EFLAGS;
2221 return;
2222 case 4: /* 286 call gate */
2223 case 12: /* 386 call gate */
2224 break;
2225 default:
2226 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2227 break;
2229 shift = type >> 3;
2231 if (dpl < cpl || dpl < rpl) {
2232 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2234 /* check valid bit */
2235 if (!(e2 & DESC_P_MASK)) {
2236 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2238 selector = e1 >> 16;
2239 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2240 param_count = e2 & 0x1f;
2241 if ((selector & 0xfffc) == 0) {
2242 raise_exception_err(env, EXCP0D_GPF, 0);
2245 if (load_segment(&e1, &e2, selector) != 0) {
2246 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2248 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
2249 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2251 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2252 if (dpl > cpl) {
2253 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
2255 if (!(e2 & DESC_P_MASK)) {
2256 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
2259 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2260 /* to inner privilege */
2261 get_ss_esp_from_tss(&ss, &sp, dpl);
2262 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
2263 "\n",
2264 ss, sp, param_count, ESP);
2265 if ((ss & 0xfffc) == 0) {
2266 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2268 if ((ss & 3) != dpl) {
2269 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2271 if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
2272 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2274 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2275 if (ss_dpl != dpl) {
2276 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2278 if (!(ss_e2 & DESC_S_MASK) ||
2279 (ss_e2 & DESC_CS_MASK) ||
2280 !(ss_e2 & DESC_W_MASK)) {
2281 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2283 if (!(ss_e2 & DESC_P_MASK)) {
2284 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
2287 /* push_size = ((param_count * 2) + 8) << shift; */
2289 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2290 old_ssp = env->segs[R_SS].base;
2292 sp_mask = get_sp_mask(ss_e2);
2293 ssp = get_seg_base(ss_e1, ss_e2);
2294 if (shift) {
2295 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2296 PUSHL(ssp, sp, sp_mask, ESP);
2297 for (i = param_count - 1; i >= 0; i--) {
2298 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2299 PUSHL(ssp, sp, sp_mask, val);
2301 } else {
2302 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2303 PUSHW(ssp, sp, sp_mask, ESP);
2304 for (i = param_count - 1; i >= 0; i--) {
2305 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2306 PUSHW(ssp, sp, sp_mask, val);
2309 new_stack = 1;
2310 } else {
2311 /* to same privilege */
2312 sp = ESP;
2313 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2314 ssp = env->segs[R_SS].base;
2315 /* push_size = (4 << shift); */
2316 new_stack = 0;
2319 if (shift) {
2320 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2321 PUSHL(ssp, sp, sp_mask, next_eip);
2322 } else {
2323 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2324 PUSHW(ssp, sp, sp_mask, next_eip);
2327 /* from this point, not restartable */
2329 if (new_stack) {
2330 ss = (ss & ~3) | dpl;
2331 cpu_x86_load_seg_cache(env, R_SS, ss,
2332 ssp,
2333 get_seg_limit(ss_e1, ss_e2),
2334 ss_e2);
2337 selector = (selector & ~3) | dpl;
2338 cpu_x86_load_seg_cache(env, R_CS, selector,
2339 get_seg_base(e1, e2),
2340 get_seg_limit(e1, e2),
2341 e2);
2342 cpu_x86_set_cpl(env, dpl);
2343 SET_ESP(sp, sp_mask);
2344 EIP = offset;
2348 /* real and vm86 mode iret */
2349 void helper_iret_real(int shift)
2351 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2352 target_ulong ssp;
2353 int eflags_mask;
2355 sp_mask = 0xffff; /* XXXX: use SS segment size? */
2356 sp = ESP;
2357 ssp = env->segs[R_SS].base;
2358 if (shift == 1) {
2359 /* 32 bits */
2360 POPL(ssp, sp, sp_mask, new_eip);
2361 POPL(ssp, sp, sp_mask, new_cs);
2362 new_cs &= 0xffff;
2363 POPL(ssp, sp, sp_mask, new_eflags);
2364 } else {
2365 /* 16 bits */
2366 POPW(ssp, sp, sp_mask, new_eip);
2367 POPW(ssp, sp, sp_mask, new_cs);
2368 POPW(ssp, sp, sp_mask, new_eflags);
2370 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2371 env->segs[R_CS].selector = new_cs;
2372 env->segs[R_CS].base = (new_cs << 4);
2373 env->eip = new_eip;
2374 if (env->eflags & VM_MASK) {
2375 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2376 NT_MASK;
2377 } else {
2378 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2379 RF_MASK | NT_MASK;
2381 if (shift == 0) {
2382 eflags_mask &= 0xffff;
2384 cpu_load_eflags(env, new_eflags, eflags_mask);
2385 env->hflags2 &= ~HF2_NMI_MASK;
2388 static inline void validate_seg(int seg_reg, int cpl)
2390 int dpl;
2391 uint32_t e2;
2393 /* XXX: on x86_64, we do not want to nullify FS and GS because
2394 they may still contain a valid base. I would be interested to
2395 know how a real x86_64 CPU behaves */
2396 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2397 (env->segs[seg_reg].selector & 0xfffc) == 0) {
2398 return;
2401 e2 = env->segs[seg_reg].flags;
2402 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2403 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2404 /* data or non conforming code segment */
2405 if (dpl < cpl) {
2406 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2411 /* protected mode iret */
2412 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2414 uint32_t new_cs, new_eflags, new_ss;
2415 uint32_t new_es, new_ds, new_fs, new_gs;
2416 uint32_t e1, e2, ss_e1, ss_e2;
2417 int cpl, dpl, rpl, eflags_mask, iopl;
2418 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2420 #ifdef TARGET_X86_64
2421 if (shift == 2) {
2422 sp_mask = -1;
2423 } else
2424 #endif
2426 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2428 sp = ESP;
2429 ssp = env->segs[R_SS].base;
2430 new_eflags = 0; /* avoid warning */
2431 #ifdef TARGET_X86_64
2432 if (shift == 2) {
2433 POPQ(sp, new_eip);
2434 POPQ(sp, new_cs);
2435 new_cs &= 0xffff;
2436 if (is_iret) {
2437 POPQ(sp, new_eflags);
2439 } else
2440 #endif
2442 if (shift == 1) {
2443 /* 32 bits */
2444 POPL(ssp, sp, sp_mask, new_eip);
2445 POPL(ssp, sp, sp_mask, new_cs);
2446 new_cs &= 0xffff;
2447 if (is_iret) {
2448 POPL(ssp, sp, sp_mask, new_eflags);
2449 if (new_eflags & VM_MASK) {
2450 goto return_to_vm86;
2453 } else {
2454 /* 16 bits */
2455 POPW(ssp, sp, sp_mask, new_eip);
2456 POPW(ssp, sp, sp_mask, new_cs);
2457 if (is_iret) {
2458 POPW(ssp, sp, sp_mask, new_eflags);
2462 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2463 new_cs, new_eip, shift, addend);
2464 LOG_PCALL_STATE(env);
2465 if ((new_cs & 0xfffc) == 0) {
2466 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2468 if (load_segment(&e1, &e2, new_cs) != 0) {
2469 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2471 if (!(e2 & DESC_S_MASK) ||
2472 !(e2 & DESC_CS_MASK)) {
2473 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2475 cpl = env->hflags & HF_CPL_MASK;
2476 rpl = new_cs & 3;
2477 if (rpl < cpl) {
2478 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2480 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2481 if (e2 & DESC_C_MASK) {
2482 if (dpl > rpl) {
2483 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2485 } else {
2486 if (dpl != rpl) {
2487 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
2490 if (!(e2 & DESC_P_MASK)) {
2491 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
2494 sp += addend;
2495 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2496 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2497 /* return to same privilege level */
2498 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2499 get_seg_base(e1, e2),
2500 get_seg_limit(e1, e2),
2501 e2);
2502 } else {
2503 /* return to different privilege level */
2504 #ifdef TARGET_X86_64
2505 if (shift == 2) {
2506 POPQ(sp, new_esp);
2507 POPQ(sp, new_ss);
2508 new_ss &= 0xffff;
2509 } else
2510 #endif
2512 if (shift == 1) {
2513 /* 32 bits */
2514 POPL(ssp, sp, sp_mask, new_esp);
2515 POPL(ssp, sp, sp_mask, new_ss);
2516 new_ss &= 0xffff;
2517 } else {
2518 /* 16 bits */
2519 POPW(ssp, sp, sp_mask, new_esp);
2520 POPW(ssp, sp, sp_mask, new_ss);
2523 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2524 new_ss, new_esp);
2525 if ((new_ss & 0xfffc) == 0) {
2526 #ifdef TARGET_X86_64
2527 /* NULL ss is allowed in long mode if cpl != 3 */
2528 /* XXX: test CS64? */
2529 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2530 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2531 0, 0xffffffff,
2532 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2533 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2534 DESC_W_MASK | DESC_A_MASK);
2535 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2536 } else
2537 #endif
2539 raise_exception_err(env, EXCP0D_GPF, 0);
2541 } else {
2542 if ((new_ss & 3) != rpl) {
2543 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2545 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
2546 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2548 if (!(ss_e2 & DESC_S_MASK) ||
2549 (ss_e2 & DESC_CS_MASK) ||
2550 !(ss_e2 & DESC_W_MASK)) {
2551 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2553 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2554 if (dpl != rpl) {
2555 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
2557 if (!(ss_e2 & DESC_P_MASK)) {
2558 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
2560 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2561 get_seg_base(ss_e1, ss_e2),
2562 get_seg_limit(ss_e1, ss_e2),
2563 ss_e2);
2566 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2567 get_seg_base(e1, e2),
2568 get_seg_limit(e1, e2),
2569 e2);
2570 cpu_x86_set_cpl(env, rpl);
2571 sp = new_esp;
2572 #ifdef TARGET_X86_64
2573 if (env->hflags & HF_CS64_MASK) {
2574 sp_mask = -1;
2575 } else
2576 #endif
2578 sp_mask = get_sp_mask(ss_e2);
2581 /* validate data segments */
2582 validate_seg(R_ES, rpl);
2583 validate_seg(R_DS, rpl);
2584 validate_seg(R_FS, rpl);
2585 validate_seg(R_GS, rpl);
2587 sp += addend;
2589 SET_ESP(sp, sp_mask);
2590 env->eip = new_eip;
2591 if (is_iret) {
2592 /* NOTE: 'cpl' is the _old_ CPL */
2593 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2594 if (cpl == 0) {
2595 eflags_mask |= IOPL_MASK;
2597 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2598 if (cpl <= iopl) {
2599 eflags_mask |= IF_MASK;
2601 if (shift == 0) {
2602 eflags_mask &= 0xffff;
2604 cpu_load_eflags(env, new_eflags, eflags_mask);
2606 return;
2608 return_to_vm86:
2609 POPL(ssp, sp, sp_mask, new_esp);
2610 POPL(ssp, sp, sp_mask, new_ss);
2611 POPL(ssp, sp, sp_mask, new_es);
2612 POPL(ssp, sp, sp_mask, new_ds);
2613 POPL(ssp, sp, sp_mask, new_fs);
2614 POPL(ssp, sp, sp_mask, new_gs);
2616 /* modify processor state */
2617 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2618 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2619 VIP_MASK);
2620 load_seg_vm(R_CS, new_cs & 0xffff);
2621 cpu_x86_set_cpl(env, 3);
2622 load_seg_vm(R_SS, new_ss & 0xffff);
2623 load_seg_vm(R_ES, new_es & 0xffff);
2624 load_seg_vm(R_DS, new_ds & 0xffff);
2625 load_seg_vm(R_FS, new_fs & 0xffff);
2626 load_seg_vm(R_GS, new_gs & 0xffff);
2628 env->eip = new_eip & 0xffff;
2629 ESP = new_esp;
2632 void helper_iret_protected(int shift, int next_eip)
2634 int tss_selector, type;
2635 uint32_t e1, e2;
2637 /* specific case for TSS */
2638 if (env->eflags & NT_MASK) {
2639 #ifdef TARGET_X86_64
2640 if (env->hflags & HF_LMA_MASK) {
2641 raise_exception_err(env, EXCP0D_GPF, 0);
2643 #endif
2644 tss_selector = lduw_kernel(env->tr.base + 0);
2645 if (tss_selector & 4) {
2646 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2648 if (load_segment(&e1, &e2, tss_selector) != 0) {
2649 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2651 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2652 /* NOTE: we check both segment and busy TSS */
2653 if (type != 3) {
2654 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
2656 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2657 } else {
2658 helper_ret_protected(shift, 1, 0);
2660 env->hflags2 &= ~HF2_NMI_MASK;
2663 void helper_lret_protected(int shift, int addend)
2665 helper_ret_protected(shift, 0, addend);
2668 void helper_sysenter(void)
2670 if (env->sysenter_cs == 0) {
2671 raise_exception_err(env, EXCP0D_GPF, 0);
2673 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2674 cpu_x86_set_cpl(env, 0);
2676 #ifdef TARGET_X86_64
2677 if (env->hflags & HF_LMA_MASK) {
2678 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2679 0, 0xffffffff,
2680 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2681 DESC_S_MASK |
2682 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2683 DESC_L_MASK);
2684 } else
2685 #endif
2687 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2688 0, 0xffffffff,
2689 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2690 DESC_S_MASK |
2691 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2693 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2694 0, 0xffffffff,
2695 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2696 DESC_S_MASK |
2697 DESC_W_MASK | DESC_A_MASK);
2698 ESP = env->sysenter_esp;
2699 EIP = env->sysenter_eip;
2702 void helper_sysexit(int dflag)
2704 int cpl;
2706 cpl = env->hflags & HF_CPL_MASK;
2707 if (env->sysenter_cs == 0 || cpl != 0) {
2708 raise_exception_err(env, EXCP0D_GPF, 0);
2710 cpu_x86_set_cpl(env, 3);
2711 #ifdef TARGET_X86_64
2712 if (dflag == 2) {
2713 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2714 3, 0, 0xffffffff,
2715 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2716 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2717 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2718 DESC_L_MASK);
2719 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2720 3, 0, 0xffffffff,
2721 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2722 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2723 DESC_W_MASK | DESC_A_MASK);
2724 } else
2725 #endif
2727 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2728 3, 0, 0xffffffff,
2729 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2730 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2731 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2732 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2733 3, 0, 0xffffffff,
2734 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2735 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2736 DESC_W_MASK | DESC_A_MASK);
2738 ESP = ECX;
2739 EIP = EDX;
2742 #if defined(CONFIG_USER_ONLY)
2743 target_ulong helper_read_crN(int reg)
2745 return 0;
2748 void helper_write_crN(int reg, target_ulong t0)
2752 void helper_movl_drN_T0(int reg, target_ulong t0)
2755 #else
2756 target_ulong helper_read_crN(int reg)
2758 target_ulong val;
2760 cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0);
2761 switch (reg) {
2762 default:
2763 val = env->cr[reg];
2764 break;
2765 case 8:
2766 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2767 val = cpu_get_apic_tpr(env->apic_state);
2768 } else {
2769 val = env->v_tpr;
2771 break;
2773 return val;
2776 void helper_write_crN(int reg, target_ulong t0)
2778 cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0);
2779 switch (reg) {
2780 case 0:
2781 cpu_x86_update_cr0(env, t0);
2782 break;
2783 case 3:
2784 cpu_x86_update_cr3(env, t0);
2785 break;
2786 case 4:
2787 cpu_x86_update_cr4(env, t0);
2788 break;
2789 case 8:
2790 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2791 cpu_set_apic_tpr(env->apic_state, t0);
2793 env->v_tpr = t0 & 0x0f;
2794 break;
2795 default:
2796 env->cr[reg] = t0;
2797 break;
2801 void helper_movl_drN_T0(int reg, target_ulong t0)
2803 int i;
2805 if (reg < 4) {
2806 hw_breakpoint_remove(env, reg);
2807 env->dr[reg] = t0;
2808 hw_breakpoint_insert(env, reg);
2809 } else if (reg == 7) {
2810 for (i = 0; i < 4; i++) {
2811 hw_breakpoint_remove(env, i);
2813 env->dr[7] = t0;
2814 for (i = 0; i < 4; i++) {
2815 hw_breakpoint_insert(env, i);
2817 } else {
2818 env->dr[reg] = t0;
2821 #endif
2823 void helper_lmsw(target_ulong t0)
2825 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2826 if already set to one. */
2827 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2828 helper_write_crN(0, t0);
2831 void helper_invlpg(target_ulong addr)
2833 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
2834 tlb_flush_page(env, addr);
2837 void helper_rdtsc(void)
2839 uint64_t val;
2841 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2842 raise_exception(env, EXCP0D_GPF);
2844 cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0);
2846 val = cpu_get_tsc(env) + env->tsc_offset;
2847 EAX = (uint32_t)(val);
2848 EDX = (uint32_t)(val >> 32);
2851 void helper_rdtscp(void)
2853 helper_rdtsc();
2854 ECX = (uint32_t)(env->tsc_aux);
2857 void helper_rdpmc(void)
2859 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2860 raise_exception(env, EXCP0D_GPF);
2862 cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0);
2864 /* currently unimplemented */
2865 qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
2866 raise_exception_err(env, EXCP06_ILLOP, 0);
2869 #if defined(CONFIG_USER_ONLY)
2870 void helper_wrmsr(void)
2874 void helper_rdmsr(void)
2877 #else
2878 void helper_wrmsr(void)
2880 uint64_t val;
2882 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1);
2884 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2886 switch ((uint32_t)ECX) {
2887 case MSR_IA32_SYSENTER_CS:
2888 env->sysenter_cs = val & 0xffff;
2889 break;
2890 case MSR_IA32_SYSENTER_ESP:
2891 env->sysenter_esp = val;
2892 break;
2893 case MSR_IA32_SYSENTER_EIP:
2894 env->sysenter_eip = val;
2895 break;
2896 case MSR_IA32_APICBASE:
2897 cpu_set_apic_base(env->apic_state, val);
2898 break;
2899 case MSR_EFER:
2901 uint64_t update_mask;
2903 update_mask = 0;
2904 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) {
2905 update_mask |= MSR_EFER_SCE;
2907 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2908 update_mask |= MSR_EFER_LME;
2910 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
2911 update_mask |= MSR_EFER_FFXSR;
2913 if (env->cpuid_ext2_features & CPUID_EXT2_NX) {
2914 update_mask |= MSR_EFER_NXE;
2916 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
2917 update_mask |= MSR_EFER_SVME;
2919 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
2920 update_mask |= MSR_EFER_FFXSR;
2922 cpu_load_efer(env, (env->efer & ~update_mask) |
2923 (val & update_mask));
2925 break;
2926 case MSR_STAR:
2927 env->star = val;
2928 break;
2929 case MSR_PAT:
2930 env->pat = val;
2931 break;
2932 case MSR_VM_HSAVE_PA:
2933 env->vm_hsave = val;
2934 break;
2935 #ifdef TARGET_X86_64
2936 case MSR_LSTAR:
2937 env->lstar = val;
2938 break;
2939 case MSR_CSTAR:
2940 env->cstar = val;
2941 break;
2942 case MSR_FMASK:
2943 env->fmask = val;
2944 break;
2945 case MSR_FSBASE:
2946 env->segs[R_FS].base = val;
2947 break;
2948 case MSR_GSBASE:
2949 env->segs[R_GS].base = val;
2950 break;
2951 case MSR_KERNELGSBASE:
2952 env->kernelgsbase = val;
2953 break;
2954 #endif
2955 case MSR_MTRRphysBase(0):
2956 case MSR_MTRRphysBase(1):
2957 case MSR_MTRRphysBase(2):
2958 case MSR_MTRRphysBase(3):
2959 case MSR_MTRRphysBase(4):
2960 case MSR_MTRRphysBase(5):
2961 case MSR_MTRRphysBase(6):
2962 case MSR_MTRRphysBase(7):
2963 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
2964 break;
2965 case MSR_MTRRphysMask(0):
2966 case MSR_MTRRphysMask(1):
2967 case MSR_MTRRphysMask(2):
2968 case MSR_MTRRphysMask(3):
2969 case MSR_MTRRphysMask(4):
2970 case MSR_MTRRphysMask(5):
2971 case MSR_MTRRphysMask(6):
2972 case MSR_MTRRphysMask(7):
2973 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
2974 break;
2975 case MSR_MTRRfix64K_00000:
2976 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
2977 break;
2978 case MSR_MTRRfix16K_80000:
2979 case MSR_MTRRfix16K_A0000:
2980 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
2981 break;
2982 case MSR_MTRRfix4K_C0000:
2983 case MSR_MTRRfix4K_C8000:
2984 case MSR_MTRRfix4K_D0000:
2985 case MSR_MTRRfix4K_D8000:
2986 case MSR_MTRRfix4K_E0000:
2987 case MSR_MTRRfix4K_E8000:
2988 case MSR_MTRRfix4K_F0000:
2989 case MSR_MTRRfix4K_F8000:
2990 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
2991 break;
2992 case MSR_MTRRdefType:
2993 env->mtrr_deftype = val;
2994 break;
2995 case MSR_MCG_STATUS:
2996 env->mcg_status = val;
2997 break;
2998 case MSR_MCG_CTL:
2999 if ((env->mcg_cap & MCG_CTL_P)
3000 && (val == 0 || val == ~(uint64_t)0)) {
3001 env->mcg_ctl = val;
3003 break;
3004 case MSR_TSC_AUX:
3005 env->tsc_aux = val;
3006 break;
3007 case MSR_IA32_MISC_ENABLE:
3008 env->msr_ia32_misc_enable = val;
3009 break;
3010 default:
3011 if ((uint32_t)ECX >= MSR_MC0_CTL
3012 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3013 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3014 if ((offset & 0x3) != 0
3015 || (val == 0 || val == ~(uint64_t)0)) {
3016 env->mce_banks[offset] = val;
3018 break;
3020 /* XXX: exception? */
3021 break;
3025 void helper_rdmsr(void)
3027 uint64_t val;
3029 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0);
3031 switch ((uint32_t)ECX) {
3032 case MSR_IA32_SYSENTER_CS:
3033 val = env->sysenter_cs;
3034 break;
3035 case MSR_IA32_SYSENTER_ESP:
3036 val = env->sysenter_esp;
3037 break;
3038 case MSR_IA32_SYSENTER_EIP:
3039 val = env->sysenter_eip;
3040 break;
3041 case MSR_IA32_APICBASE:
3042 val = cpu_get_apic_base(env->apic_state);
3043 break;
3044 case MSR_EFER:
3045 val = env->efer;
3046 break;
3047 case MSR_STAR:
3048 val = env->star;
3049 break;
3050 case MSR_PAT:
3051 val = env->pat;
3052 break;
3053 case MSR_VM_HSAVE_PA:
3054 val = env->vm_hsave;
3055 break;
3056 case MSR_IA32_PERF_STATUS:
3057 /* tsc_increment_by_tick */
3058 val = 1000ULL;
3059 /* CPU multiplier */
3060 val |= (((uint64_t)4ULL) << 40);
3061 break;
3062 #ifdef TARGET_X86_64
3063 case MSR_LSTAR:
3064 val = env->lstar;
3065 break;
3066 case MSR_CSTAR:
3067 val = env->cstar;
3068 break;
3069 case MSR_FMASK:
3070 val = env->fmask;
3071 break;
3072 case MSR_FSBASE:
3073 val = env->segs[R_FS].base;
3074 break;
3075 case MSR_GSBASE:
3076 val = env->segs[R_GS].base;
3077 break;
3078 case MSR_KERNELGSBASE:
3079 val = env->kernelgsbase;
3080 break;
3081 case MSR_TSC_AUX:
3082 val = env->tsc_aux;
3083 break;
3084 #endif
3085 case MSR_MTRRphysBase(0):
3086 case MSR_MTRRphysBase(1):
3087 case MSR_MTRRphysBase(2):
3088 case MSR_MTRRphysBase(3):
3089 case MSR_MTRRphysBase(4):
3090 case MSR_MTRRphysBase(5):
3091 case MSR_MTRRphysBase(6):
3092 case MSR_MTRRphysBase(7):
3093 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3094 break;
3095 case MSR_MTRRphysMask(0):
3096 case MSR_MTRRphysMask(1):
3097 case MSR_MTRRphysMask(2):
3098 case MSR_MTRRphysMask(3):
3099 case MSR_MTRRphysMask(4):
3100 case MSR_MTRRphysMask(5):
3101 case MSR_MTRRphysMask(6):
3102 case MSR_MTRRphysMask(7):
3103 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3104 break;
3105 case MSR_MTRRfix64K_00000:
3106 val = env->mtrr_fixed[0];
3107 break;
3108 case MSR_MTRRfix16K_80000:
3109 case MSR_MTRRfix16K_A0000:
3110 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3111 break;
3112 case MSR_MTRRfix4K_C0000:
3113 case MSR_MTRRfix4K_C8000:
3114 case MSR_MTRRfix4K_D0000:
3115 case MSR_MTRRfix4K_D8000:
3116 case MSR_MTRRfix4K_E0000:
3117 case MSR_MTRRfix4K_E8000:
3118 case MSR_MTRRfix4K_F0000:
3119 case MSR_MTRRfix4K_F8000:
3120 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3121 break;
3122 case MSR_MTRRdefType:
3123 val = env->mtrr_deftype;
3124 break;
3125 case MSR_MTRRcap:
3126 if (env->cpuid_features & CPUID_MTRR) {
3127 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
3128 MSR_MTRRcap_WC_SUPPORTED;
3129 } else {
3130 /* XXX: exception? */
3131 val = 0;
3133 break;
3134 case MSR_MCG_CAP:
3135 val = env->mcg_cap;
3136 break;
3137 case MSR_MCG_CTL:
3138 if (env->mcg_cap & MCG_CTL_P) {
3139 val = env->mcg_ctl;
3140 } else {
3141 val = 0;
3143 break;
3144 case MSR_MCG_STATUS:
3145 val = env->mcg_status;
3146 break;
3147 case MSR_IA32_MISC_ENABLE:
3148 val = env->msr_ia32_misc_enable;
3149 break;
3150 default:
3151 if ((uint32_t)ECX >= MSR_MC0_CTL
3152 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3153 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3154 val = env->mce_banks[offset];
3155 break;
3157 /* XXX: exception? */
3158 val = 0;
3159 break;
3161 EAX = (uint32_t)(val);
3162 EDX = (uint32_t)(val >> 32);
3164 #endif
3166 target_ulong helper_lsl(target_ulong selector1)
3168 unsigned int limit;
3169 uint32_t e1, e2, eflags, selector;
3170 int rpl, dpl, cpl, type;
3172 selector = selector1 & 0xffff;
3173 eflags = helper_cc_compute_all(CC_OP);
3174 if ((selector & 0xfffc) == 0) {
3175 goto fail;
3177 if (load_segment(&e1, &e2, selector) != 0) {
3178 goto fail;
3180 rpl = selector & 3;
3181 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3182 cpl = env->hflags & HF_CPL_MASK;
3183 if (e2 & DESC_S_MASK) {
3184 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3185 /* conforming */
3186 } else {
3187 if (dpl < cpl || dpl < rpl) {
3188 goto fail;
3191 } else {
3192 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3193 switch (type) {
3194 case 1:
3195 case 2:
3196 case 3:
3197 case 9:
3198 case 11:
3199 break;
3200 default:
3201 goto fail;
3203 if (dpl < cpl || dpl < rpl) {
3204 fail:
3205 CC_SRC = eflags & ~CC_Z;
3206 return 0;
3209 limit = get_seg_limit(e1, e2);
3210 CC_SRC = eflags | CC_Z;
3211 return limit;
3214 target_ulong helper_lar(target_ulong selector1)
3216 uint32_t e1, e2, eflags, selector;
3217 int rpl, dpl, cpl, type;
3219 selector = selector1 & 0xffff;
3220 eflags = helper_cc_compute_all(CC_OP);
3221 if ((selector & 0xfffc) == 0) {
3222 goto fail;
3224 if (load_segment(&e1, &e2, selector) != 0) {
3225 goto fail;
3227 rpl = selector & 3;
3228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3229 cpl = env->hflags & HF_CPL_MASK;
3230 if (e2 & DESC_S_MASK) {
3231 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3232 /* conforming */
3233 } else {
3234 if (dpl < cpl || dpl < rpl) {
3235 goto fail;
3238 } else {
3239 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3240 switch (type) {
3241 case 1:
3242 case 2:
3243 case 3:
3244 case 4:
3245 case 5:
3246 case 9:
3247 case 11:
3248 case 12:
3249 break;
3250 default:
3251 goto fail;
3253 if (dpl < cpl || dpl < rpl) {
3254 fail:
3255 CC_SRC = eflags & ~CC_Z;
3256 return 0;
3259 CC_SRC = eflags | CC_Z;
3260 return e2 & 0x00f0ff00;
3263 void helper_verr(target_ulong selector1)
3265 uint32_t e1, e2, eflags, selector;
3266 int rpl, dpl, cpl;
3268 selector = selector1 & 0xffff;
3269 eflags = helper_cc_compute_all(CC_OP);
3270 if ((selector & 0xfffc) == 0) {
3271 goto fail;
3273 if (load_segment(&e1, &e2, selector) != 0) {
3274 goto fail;
3276 if (!(e2 & DESC_S_MASK)) {
3277 goto fail;
3279 rpl = selector & 3;
3280 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3281 cpl = env->hflags & HF_CPL_MASK;
3282 if (e2 & DESC_CS_MASK) {
3283 if (!(e2 & DESC_R_MASK)) {
3284 goto fail;
3286 if (!(e2 & DESC_C_MASK)) {
3287 if (dpl < cpl || dpl < rpl) {
3288 goto fail;
3291 } else {
3292 if (dpl < cpl || dpl < rpl) {
3293 fail:
3294 CC_SRC = eflags & ~CC_Z;
3295 return;
3298 CC_SRC = eflags | CC_Z;
3301 void helper_verw(target_ulong selector1)
3303 uint32_t e1, e2, eflags, selector;
3304 int rpl, dpl, cpl;
3306 selector = selector1 & 0xffff;
3307 eflags = helper_cc_compute_all(CC_OP);
3308 if ((selector & 0xfffc) == 0) {
3309 goto fail;
3311 if (load_segment(&e1, &e2, selector) != 0) {
3312 goto fail;
3314 if (!(e2 & DESC_S_MASK)) {
3315 goto fail;
3317 rpl = selector & 3;
3318 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3319 cpl = env->hflags & HF_CPL_MASK;
3320 if (e2 & DESC_CS_MASK) {
3321 goto fail;
3322 } else {
3323 if (dpl < cpl || dpl < rpl) {
3324 goto fail;
3326 if (!(e2 & DESC_W_MASK)) {
3327 fail:
3328 CC_SRC = eflags & ~CC_Z;
3329 return;
3332 CC_SRC = eflags | CC_Z;
3335 #if defined(CONFIG_USER_ONLY)
3336 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
3338 CPUX86State *saved_env;
3340 saved_env = env;
3341 env = s;
3342 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
3343 selector &= 0xffff;
3344 cpu_x86_load_seg_cache(env, seg_reg, selector,
3345 (selector << 4), 0xffff, 0);
3346 } else {
3347 helper_load_seg(seg_reg, selector);
3349 env = saved_env;
3351 #endif
3353 static void do_hlt(void)
3355 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3356 env->halted = 1;
3357 env->exception_index = EXCP_HLT;
3358 cpu_loop_exit(env);
3361 void helper_hlt(int next_eip_addend)
3363 cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0);
3364 EIP += next_eip_addend;
3366 do_hlt();
3369 void helper_monitor(target_ulong ptr)
3371 if ((uint32_t)ECX != 0) {
3372 raise_exception(env, EXCP0D_GPF);
3374 /* XXX: store address? */
3375 cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0);
3378 void helper_mwait(int next_eip_addend)
3380 if ((uint32_t)ECX != 0) {
3381 raise_exception(env, EXCP0D_GPF);
3383 cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
3384 EIP += next_eip_addend;
3386 /* XXX: not complete but not completely erroneous */
3387 if (env->cpu_index != 0 || env->next_cpu != NULL) {
3388 /* more than one CPU: do not sleep because another CPU may
3389 wake this one */
3390 } else {
3391 do_hlt();
3395 void helper_debug(void)
3397 env->exception_index = EXCP_DEBUG;
3398 cpu_loop_exit(env);
3401 void helper_boundw(target_ulong a0, int v)
3403 int low, high;
3405 low = ldsw(a0);
3406 high = ldsw(a0 + 2);
3407 v = (int16_t)v;
3408 if (v < low || v > high) {
3409 raise_exception(env, EXCP05_BOUND);
3413 void helper_boundl(target_ulong a0, int v)
3415 int low, high;
3417 low = ldl(a0);
3418 high = ldl(a0 + 4);
3419 if (v < low || v > high) {
3420 raise_exception(env, EXCP05_BOUND);
3424 #if !defined(CONFIG_USER_ONLY)
3426 #define MMUSUFFIX _mmu
3428 #define SHIFT 0
3429 #include "softmmu_template.h"
3431 #define SHIFT 1
3432 #include "softmmu_template.h"
3434 #define SHIFT 2
3435 #include "softmmu_template.h"
3437 #define SHIFT 3
3438 #include "softmmu_template.h"
3440 #endif
3442 #if !defined(CONFIG_USER_ONLY)
3443 /* try to fill the TLB and return an exception if error. If retaddr is
3444 NULL, it means that the function was called in C code (i.e. not
3445 from generated code or from helper.c) */
3446 /* XXX: fix it to restore all registers */
3447 void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
3448 uintptr_t retaddr)
3450 TranslationBlock *tb;
3451 int ret;
3452 CPUX86State *saved_env;
3454 saved_env = env;
3455 env = env1;
3457 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
3458 if (ret) {
3459 if (retaddr) {
3460 /* now we have a real cpu fault */
3461 tb = tb_find_pc(retaddr);
3462 if (tb) {
3463 /* the PC is inside the translated code. It means that we have
3464 a virtual CPU fault */
3465 cpu_restore_state(tb, env, retaddr);
3468 raise_exception_err(env, env->exception_index, env->error_code);
3470 env = saved_env;
3472 #endif