acpi_piix4: Re-define PCI hotplug eject register read
[qemu/kevin.git] / target-i386 / op_helper.c
blobc04ae4464f7edc6add82626a9a6ee55b23dee315
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <math.h>
21 #include "cpu.h"
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
24 #include "ioport.h"
25 #include "qemu-log.h"
26 #include "cpu-defs.h"
27 #include "helper.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "softmmu_exec.h"
31 #endif /* !defined(CONFIG_USER_ONLY) */
33 //#define DEBUG_PCALL
35 #ifdef DEBUG_PCALL
36 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
37 # define LOG_PCALL_STATE(env) \
38 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
39 #else
40 # define LOG_PCALL(...) do { } while (0)
41 # define LOG_PCALL_STATE(env) do { } while (0)
42 #endif
44 /* n must be a constant to be efficient */
45 static inline target_long lshift(target_long x, int n)
47 if (n >= 0) {
48 return x << n;
49 } else {
50 return x >> (-n);
54 #define FPU_RC_MASK 0xc00
55 #define FPU_RC_NEAR 0x000
56 #define FPU_RC_DOWN 0x400
57 #define FPU_RC_UP 0x800
58 #define FPU_RC_CHOP 0xc00
60 #define MAXTAN 9223372036854775808.0
62 /* the following deal with x86 long double-precision numbers */
63 #define MAXEXPD 0x7fff
64 #define EXPBIAS 16383
65 #define EXPD(fp) (fp.l.upper & 0x7fff)
66 #define SIGND(fp) ((fp.l.upper) & 0x8000)
67 #define MANTD(fp) (fp.l.lower)
68 #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
70 static inline void fpush(void)
72 env->fpstt = (env->fpstt - 1) & 7;
73 env->fptags[env->fpstt] = 0; /* validate stack entry */
76 static inline void fpop(void)
78 env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
79 env->fpstt = (env->fpstt + 1) & 7;
82 static inline floatx80 helper_fldt(target_ulong ptr)
84 CPU_LDoubleU temp;
86 temp.l.lower = ldq(ptr);
87 temp.l.upper = lduw(ptr + 8);
88 return temp.d;
91 static inline void helper_fstt(floatx80 f, target_ulong ptr)
93 CPU_LDoubleU temp;
95 temp.d = f;
96 stq(ptr, temp.l.lower);
97 stw(ptr + 8, temp.l.upper);
100 #define FPUS_IE (1 << 0)
101 #define FPUS_DE (1 << 1)
102 #define FPUS_ZE (1 << 2)
103 #define FPUS_OE (1 << 3)
104 #define FPUS_UE (1 << 4)
105 #define FPUS_PE (1 << 5)
106 #define FPUS_SF (1 << 6)
107 #define FPUS_SE (1 << 7)
108 #define FPUS_B (1 << 15)
110 #define FPUC_EM 0x3f
112 static inline uint32_t compute_eflags(void)
114 return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
117 /* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
118 static inline void load_eflags(int eflags, int update_mask)
120 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
121 DF = 1 - (2 * ((eflags >> 10) & 1));
122 env->eflags = (env->eflags & ~update_mask) |
123 (eflags & update_mask) | 0x2;
126 /* load efer and update the corresponding hflags. XXX: do consistency
127 checks with cpuid bits ? */
128 static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
130 env->efer = val;
131 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
132 if (env->efer & MSR_EFER_LMA) {
133 env->hflags |= HF_LMA_MASK;
135 if (env->efer & MSR_EFER_SVME) {
136 env->hflags |= HF_SVME_MASK;
140 #if 0
141 #define raise_exception_err(a, b)\
142 do {\
143 qemu_log("raise_exception line=%d\n", __LINE__);\
144 (raise_exception_err)(a, b);\
145 } while (0)
146 #endif
148 static void QEMU_NORETURN raise_exception_err(int exception_index,
149 int error_code);
151 static const uint8_t parity_table[256] = {
152 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
153 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
154 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
155 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
156 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
157 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
158 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
159 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
160 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
161 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
162 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
163 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
164 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
165 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
166 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
167 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
168 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
169 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
170 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
171 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
172 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
173 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
174 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
175 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
176 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
177 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
178 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
179 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
180 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
181 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
182 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
183 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
186 /* modulo 17 table */
187 static const uint8_t rclw_table[32] = {
188 0, 1, 2, 3, 4, 5, 6, 7,
189 8, 9,10,11,12,13,14,15,
190 16, 0, 1, 2, 3, 4, 5, 6,
191 7, 8, 9,10,11,12,13,14,
194 /* modulo 9 table */
195 static const uint8_t rclb_table[32] = {
196 0, 1, 2, 3, 4, 5, 6, 7,
197 8, 0, 1, 2, 3, 4, 5, 6,
198 7, 8, 0, 1, 2, 3, 4, 5,
199 6, 7, 8, 0, 1, 2, 3, 4,
202 #define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
203 #define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
204 #define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
206 /* broken thread support */
208 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
210 void helper_lock(void)
212 spin_lock(&global_cpu_lock);
215 void helper_unlock(void)
217 spin_unlock(&global_cpu_lock);
220 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
222 load_eflags(t0, update_mask);
225 target_ulong helper_read_eflags(void)
227 uint32_t eflags;
228 eflags = helper_cc_compute_all(CC_OP);
229 eflags |= (DF & DF_MASK);
230 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
231 return eflags;
234 /* return non zero if error */
235 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
236 int selector)
238 SegmentCache *dt;
239 int index;
240 target_ulong ptr;
242 if (selector & 0x4)
243 dt = &env->ldt;
244 else
245 dt = &env->gdt;
246 index = selector & ~7;
247 if ((index + 7) > dt->limit)
248 return -1;
249 ptr = dt->base + index;
250 *e1_ptr = ldl_kernel(ptr);
251 *e2_ptr = ldl_kernel(ptr + 4);
252 return 0;
255 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257 unsigned int limit;
258 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
259 if (e2 & DESC_G_MASK)
260 limit = (limit << 12) | 0xfff;
261 return limit;
264 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
269 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271 sc->base = get_seg_base(e1, e2);
272 sc->limit = get_seg_limit(e1, e2);
273 sc->flags = e2;
276 /* init the segment cache in vm86 mode. */
277 static inline void load_seg_vm(int seg, int selector)
279 selector &= 0xffff;
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
284 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
285 uint32_t *esp_ptr, int dpl)
287 int type, index, shift;
289 #if 0
291 int i;
292 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
293 for(i=0;i<env->tr.limit;i++) {
294 printf("%02x ", env->tr.base[i]);
295 if ((i & 7) == 7) printf("\n");
297 printf("\n");
299 #endif
301 if (!(env->tr.flags & DESC_P_MASK))
302 cpu_abort(env, "invalid tss");
303 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
304 if ((type & 7) != 1)
305 cpu_abort(env, "invalid tss type");
306 shift = type >> 3;
307 index = (dpl * 4 + 2) << shift;
308 if (index + (4 << shift) - 1 > env->tr.limit)
309 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
310 if (shift == 0) {
311 *esp_ptr = lduw_kernel(env->tr.base + index);
312 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
313 } else {
314 *esp_ptr = ldl_kernel(env->tr.base + index);
315 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
319 /* XXX: merge with load_seg() */
320 static void tss_load_seg(int seg_reg, int selector)
322 uint32_t e1, e2;
323 int rpl, dpl, cpl;
325 if ((selector & 0xfffc) != 0) {
326 if (load_segment(&e1, &e2, selector) != 0)
327 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
328 if (!(e2 & DESC_S_MASK))
329 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
330 rpl = selector & 3;
331 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
332 cpl = env->hflags & HF_CPL_MASK;
333 if (seg_reg == R_CS) {
334 if (!(e2 & DESC_CS_MASK))
335 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336 /* XXX: is it correct ? */
337 if (dpl != rpl)
338 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
339 if ((e2 & DESC_C_MASK) && dpl > rpl)
340 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
341 } else if (seg_reg == R_SS) {
342 /* SS must be writable data */
343 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 if (dpl != cpl || dpl != rpl)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 } else {
348 /* not readable code */
349 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 /* if data or non conforming code, checks the rights */
352 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
353 if (dpl < cpl || dpl < rpl)
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 if (!(e2 & DESC_P_MASK))
358 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
359 cpu_x86_load_seg_cache(env, seg_reg, selector,
360 get_seg_base(e1, e2),
361 get_seg_limit(e1, e2),
362 e2);
363 } else {
364 if (seg_reg == R_SS || seg_reg == R_CS)
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
369 #define SWITCH_TSS_JMP 0
370 #define SWITCH_TSS_IRET 1
371 #define SWITCH_TSS_CALL 2
373 /* XXX: restore CPU state in registers (PowerPC case) */
374 static void switch_tss(int tss_selector,
375 uint32_t e1, uint32_t e2, int source,
376 uint32_t next_eip)
378 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
379 target_ulong tss_base;
380 uint32_t new_regs[8], new_segs[6];
381 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
382 uint32_t old_eflags, eflags_mask;
383 SegmentCache *dt;
384 int index;
385 target_ulong ptr;
387 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
388 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
390 /* if task gate, we read the TSS segment and we load it */
391 if (type == 5) {
392 if (!(e2 & DESC_P_MASK))
393 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
394 tss_selector = e1 >> 16;
395 if (tss_selector & 4)
396 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
397 if (load_segment(&e1, &e2, tss_selector) != 0)
398 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
399 if (e2 & DESC_S_MASK)
400 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
401 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
402 if ((type & 7) != 1)
403 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
406 if (!(e2 & DESC_P_MASK))
407 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
409 if (type & 8)
410 tss_limit_max = 103;
411 else
412 tss_limit_max = 43;
413 tss_limit = get_seg_limit(e1, e2);
414 tss_base = get_seg_base(e1, e2);
415 if ((tss_selector & 4) != 0 ||
416 tss_limit < tss_limit_max)
417 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
418 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
419 if (old_type & 8)
420 old_tss_limit_max = 103;
421 else
422 old_tss_limit_max = 43;
424 /* read all the registers from the new TSS */
425 if (type & 8) {
426 /* 32 bit */
427 new_cr3 = ldl_kernel(tss_base + 0x1c);
428 new_eip = ldl_kernel(tss_base + 0x20);
429 new_eflags = ldl_kernel(tss_base + 0x24);
430 for(i = 0; i < 8; i++)
431 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
432 for(i = 0; i < 6; i++)
433 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
434 new_ldt = lduw_kernel(tss_base + 0x60);
435 new_trap = ldl_kernel(tss_base + 0x64);
436 } else {
437 /* 16 bit */
438 new_cr3 = 0;
439 new_eip = lduw_kernel(tss_base + 0x0e);
440 new_eflags = lduw_kernel(tss_base + 0x10);
441 for(i = 0; i < 8; i++)
442 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
443 for(i = 0; i < 4; i++)
444 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
445 new_ldt = lduw_kernel(tss_base + 0x2a);
446 new_segs[R_FS] = 0;
447 new_segs[R_GS] = 0;
448 new_trap = 0;
450 /* XXX: avoid a compiler warning, see
451 http://support.amd.com/us/Processor_TechDocs/24593.pdf
452 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
453 (void)new_trap;
455 /* NOTE: we must avoid memory exceptions during the task switch,
456 so we make dummy accesses before */
457 /* XXX: it can still fail in some cases, so a bigger hack is
458 necessary to valid the TLB after having done the accesses */
460 v1 = ldub_kernel(env->tr.base);
461 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
462 stb_kernel(env->tr.base, v1);
463 stb_kernel(env->tr.base + old_tss_limit_max, v2);
465 /* clear busy bit (it is restartable) */
466 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
467 target_ulong ptr;
468 uint32_t e2;
469 ptr = env->gdt.base + (env->tr.selector & ~7);
470 e2 = ldl_kernel(ptr + 4);
471 e2 &= ~DESC_TSS_BUSY_MASK;
472 stl_kernel(ptr + 4, e2);
474 old_eflags = compute_eflags();
475 if (source == SWITCH_TSS_IRET)
476 old_eflags &= ~NT_MASK;
478 /* save the current state in the old TSS */
479 if (type & 8) {
480 /* 32 bit */
481 stl_kernel(env->tr.base + 0x20, next_eip);
482 stl_kernel(env->tr.base + 0x24, old_eflags);
483 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
484 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
485 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
486 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
487 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
488 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
489 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
490 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
491 for(i = 0; i < 6; i++)
492 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
493 } else {
494 /* 16 bit */
495 stw_kernel(env->tr.base + 0x0e, next_eip);
496 stw_kernel(env->tr.base + 0x10, old_eflags);
497 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
498 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
499 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
500 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
501 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
502 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
503 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
504 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
505 for(i = 0; i < 4; i++)
506 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
509 /* now if an exception occurs, it will occurs in the next task
510 context */
512 if (source == SWITCH_TSS_CALL) {
513 stw_kernel(tss_base, env->tr.selector);
514 new_eflags |= NT_MASK;
517 /* set busy bit */
518 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
519 target_ulong ptr;
520 uint32_t e2;
521 ptr = env->gdt.base + (tss_selector & ~7);
522 e2 = ldl_kernel(ptr + 4);
523 e2 |= DESC_TSS_BUSY_MASK;
524 stl_kernel(ptr + 4, e2);
527 /* set the new CPU state */
528 /* from this point, any exception which occurs can give problems */
529 env->cr[0] |= CR0_TS_MASK;
530 env->hflags |= HF_TS_MASK;
531 env->tr.selector = tss_selector;
532 env->tr.base = tss_base;
533 env->tr.limit = tss_limit;
534 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
536 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
537 cpu_x86_update_cr3(env, new_cr3);
540 /* load all registers without an exception, then reload them with
541 possible exception */
542 env->eip = new_eip;
543 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
544 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
545 if (!(type & 8))
546 eflags_mask &= 0xffff;
547 load_eflags(new_eflags, eflags_mask);
548 /* XXX: what to do in 16 bit case ? */
549 EAX = new_regs[0];
550 ECX = new_regs[1];
551 EDX = new_regs[2];
552 EBX = new_regs[3];
553 ESP = new_regs[4];
554 EBP = new_regs[5];
555 ESI = new_regs[6];
556 EDI = new_regs[7];
557 if (new_eflags & VM_MASK) {
558 for(i = 0; i < 6; i++)
559 load_seg_vm(i, new_segs[i]);
560 /* in vm86, CPL is always 3 */
561 cpu_x86_set_cpl(env, 3);
562 } else {
563 /* CPL is set the RPL of CS */
564 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
565 /* first just selectors as the rest may trigger exceptions */
566 for(i = 0; i < 6; i++)
567 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
570 env->ldt.selector = new_ldt & ~4;
571 env->ldt.base = 0;
572 env->ldt.limit = 0;
573 env->ldt.flags = 0;
575 /* load the LDT */
576 if (new_ldt & 4)
577 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
579 if ((new_ldt & 0xfffc) != 0) {
580 dt = &env->gdt;
581 index = new_ldt & ~7;
582 if ((index + 7) > dt->limit)
583 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
584 ptr = dt->base + index;
585 e1 = ldl_kernel(ptr);
586 e2 = ldl_kernel(ptr + 4);
587 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
588 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
589 if (!(e2 & DESC_P_MASK))
590 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
591 load_seg_cache_raw_dt(&env->ldt, e1, e2);
594 /* load the segments */
595 if (!(new_eflags & VM_MASK)) {
596 tss_load_seg(R_CS, new_segs[R_CS]);
597 tss_load_seg(R_SS, new_segs[R_SS]);
598 tss_load_seg(R_ES, new_segs[R_ES]);
599 tss_load_seg(R_DS, new_segs[R_DS]);
600 tss_load_seg(R_FS, new_segs[R_FS]);
601 tss_load_seg(R_GS, new_segs[R_GS]);
604 /* check that EIP is in the CS segment limits */
605 if (new_eip > env->segs[R_CS].limit) {
606 /* XXX: different exception if CALL ? */
607 raise_exception_err(EXCP0D_GPF, 0);
610 #ifndef CONFIG_USER_ONLY
611 /* reset local breakpoints */
612 if (env->dr[7] & 0x55) {
613 for (i = 0; i < 4; i++) {
614 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
615 hw_breakpoint_remove(env, i);
617 env->dr[7] &= ~0x55;
619 #endif
622 /* check if Port I/O is allowed in TSS */
623 static inline void check_io(int addr, int size)
625 int io_offset, val, mask;
627 /* TSS must be a valid 32 bit one */
628 if (!(env->tr.flags & DESC_P_MASK) ||
629 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
630 env->tr.limit < 103)
631 goto fail;
632 io_offset = lduw_kernel(env->tr.base + 0x66);
633 io_offset += (addr >> 3);
634 /* Note: the check needs two bytes */
635 if ((io_offset + 1) > env->tr.limit)
636 goto fail;
637 val = lduw_kernel(env->tr.base + io_offset);
638 val >>= (addr & 7);
639 mask = (1 << size) - 1;
640 /* all bits must be zero to allow the I/O */
641 if ((val & mask) != 0) {
642 fail:
643 raise_exception_err(EXCP0D_GPF, 0);
647 void helper_check_iob(uint32_t t0)
649 check_io(t0, 1);
652 void helper_check_iow(uint32_t t0)
654 check_io(t0, 2);
657 void helper_check_iol(uint32_t t0)
659 check_io(t0, 4);
662 void helper_outb(uint32_t port, uint32_t data)
664 cpu_outb(port, data & 0xff);
667 target_ulong helper_inb(uint32_t port)
669 return cpu_inb(port);
672 void helper_outw(uint32_t port, uint32_t data)
674 cpu_outw(port, data & 0xffff);
677 target_ulong helper_inw(uint32_t port)
679 return cpu_inw(port);
682 void helper_outl(uint32_t port, uint32_t data)
684 cpu_outl(port, data);
687 target_ulong helper_inl(uint32_t port)
689 return cpu_inl(port);
692 static inline unsigned int get_sp_mask(unsigned int e2)
694 if (e2 & DESC_B_MASK)
695 return 0xffffffff;
696 else
697 return 0xffff;
700 static int exeption_has_error_code(int intno)
702 switch(intno) {
703 case 8:
704 case 10:
705 case 11:
706 case 12:
707 case 13:
708 case 14:
709 case 17:
710 return 1;
712 return 0;
715 #ifdef TARGET_X86_64
716 #define SET_ESP(val, sp_mask)\
717 do {\
718 if ((sp_mask) == 0xffff)\
719 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
720 else if ((sp_mask) == 0xffffffffLL)\
721 ESP = (uint32_t)(val);\
722 else\
723 ESP = (val);\
724 } while (0)
725 #else
726 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
727 #endif
729 /* in 64-bit machines, this can overflow. So this segment addition macro
730 * can be used to trim the value to 32-bit whenever needed */
731 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
733 /* XXX: add a is_user flag to have proper security support */
734 #define PUSHW(ssp, sp, sp_mask, val)\
736 sp -= 2;\
737 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
740 #define PUSHL(ssp, sp, sp_mask, val)\
742 sp -= 4;\
743 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
746 #define POPW(ssp, sp, sp_mask, val)\
748 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
749 sp += 2;\
752 #define POPL(ssp, sp, sp_mask, val)\
754 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
755 sp += 4;\
758 /* protected mode interrupt */
759 static void do_interrupt_protected(int intno, int is_int, int error_code,
760 unsigned int next_eip, int is_hw)
762 SegmentCache *dt;
763 target_ulong ptr, ssp;
764 int type, dpl, selector, ss_dpl, cpl;
765 int has_error_code, new_stack, shift;
766 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
767 uint32_t old_eip, sp_mask;
769 has_error_code = 0;
770 if (!is_int && !is_hw)
771 has_error_code = exeption_has_error_code(intno);
772 if (is_int)
773 old_eip = next_eip;
774 else
775 old_eip = env->eip;
777 dt = &env->idt;
778 if (intno * 8 + 7 > dt->limit)
779 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
780 ptr = dt->base + intno * 8;
781 e1 = ldl_kernel(ptr);
782 e2 = ldl_kernel(ptr + 4);
783 /* check gate type */
784 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
785 switch(type) {
786 case 5: /* task gate */
787 /* must do that check here to return the correct error code */
788 if (!(e2 & DESC_P_MASK))
789 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
790 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
791 if (has_error_code) {
792 int type;
793 uint32_t mask;
794 /* push the error code */
795 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
796 shift = type >> 3;
797 if (env->segs[R_SS].flags & DESC_B_MASK)
798 mask = 0xffffffff;
799 else
800 mask = 0xffff;
801 esp = (ESP - (2 << shift)) & mask;
802 ssp = env->segs[R_SS].base + esp;
803 if (shift)
804 stl_kernel(ssp, error_code);
805 else
806 stw_kernel(ssp, error_code);
807 SET_ESP(esp, mask);
809 return;
810 case 6: /* 286 interrupt gate */
811 case 7: /* 286 trap gate */
812 case 14: /* 386 interrupt gate */
813 case 15: /* 386 trap gate */
814 break;
815 default:
816 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
817 break;
819 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
820 cpl = env->hflags & HF_CPL_MASK;
821 /* check privilege if software int */
822 if (is_int && dpl < cpl)
823 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
824 /* check valid bit */
825 if (!(e2 & DESC_P_MASK))
826 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
827 selector = e1 >> 16;
828 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
829 if ((selector & 0xfffc) == 0)
830 raise_exception_err(EXCP0D_GPF, 0);
832 if (load_segment(&e1, &e2, selector) != 0)
833 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
834 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
835 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
836 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
837 if (dpl > cpl)
838 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
839 if (!(e2 & DESC_P_MASK))
840 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
841 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
842 /* to inner privilege */
843 get_ss_esp_from_tss(&ss, &esp, dpl);
844 if ((ss & 0xfffc) == 0)
845 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
846 if ((ss & 3) != dpl)
847 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
848 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
849 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
850 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
851 if (ss_dpl != dpl)
852 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
853 if (!(ss_e2 & DESC_S_MASK) ||
854 (ss_e2 & DESC_CS_MASK) ||
855 !(ss_e2 & DESC_W_MASK))
856 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
857 if (!(ss_e2 & DESC_P_MASK))
858 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
859 new_stack = 1;
860 sp_mask = get_sp_mask(ss_e2);
861 ssp = get_seg_base(ss_e1, ss_e2);
862 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
863 /* to same privilege */
864 if (env->eflags & VM_MASK)
865 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
866 new_stack = 0;
867 sp_mask = get_sp_mask(env->segs[R_SS].flags);
868 ssp = env->segs[R_SS].base;
869 esp = ESP;
870 dpl = cpl;
871 } else {
872 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
873 new_stack = 0; /* avoid warning */
874 sp_mask = 0; /* avoid warning */
875 ssp = 0; /* avoid warning */
876 esp = 0; /* avoid warning */
879 shift = type >> 3;
881 #if 0
882 /* XXX: check that enough room is available */
883 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
884 if (env->eflags & VM_MASK)
885 push_size += 8;
886 push_size <<= shift;
887 #endif
888 if (shift == 1) {
889 if (new_stack) {
890 if (env->eflags & VM_MASK) {
891 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
892 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
893 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
894 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
896 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
897 PUSHL(ssp, esp, sp_mask, ESP);
899 PUSHL(ssp, esp, sp_mask, compute_eflags());
900 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
901 PUSHL(ssp, esp, sp_mask, old_eip);
902 if (has_error_code) {
903 PUSHL(ssp, esp, sp_mask, error_code);
905 } else {
906 if (new_stack) {
907 if (env->eflags & VM_MASK) {
908 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
909 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
910 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
911 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
913 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
914 PUSHW(ssp, esp, sp_mask, ESP);
916 PUSHW(ssp, esp, sp_mask, compute_eflags());
917 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
918 PUSHW(ssp, esp, sp_mask, old_eip);
919 if (has_error_code) {
920 PUSHW(ssp, esp, sp_mask, error_code);
924 if (new_stack) {
925 if (env->eflags & VM_MASK) {
926 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
927 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
928 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
929 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
931 ss = (ss & ~3) | dpl;
932 cpu_x86_load_seg_cache(env, R_SS, ss,
933 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
935 SET_ESP(esp, sp_mask);
937 selector = (selector & ~3) | dpl;
938 cpu_x86_load_seg_cache(env, R_CS, selector,
939 get_seg_base(e1, e2),
940 get_seg_limit(e1, e2),
941 e2);
942 cpu_x86_set_cpl(env, dpl);
943 env->eip = offset;
945 /* interrupt gate clear IF mask */
946 if ((type & 1) == 0) {
947 env->eflags &= ~IF_MASK;
949 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
952 #ifdef TARGET_X86_64
954 #define PUSHQ(sp, val)\
956 sp -= 8;\
957 stq_kernel(sp, (val));\
960 #define POPQ(sp, val)\
962 val = ldq_kernel(sp);\
963 sp += 8;\
966 static inline target_ulong get_rsp_from_tss(int level)
968 int index;
970 #if 0
971 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
972 env->tr.base, env->tr.limit);
973 #endif
975 if (!(env->tr.flags & DESC_P_MASK))
976 cpu_abort(env, "invalid tss");
977 index = 8 * level + 4;
978 if ((index + 7) > env->tr.limit)
979 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
980 return ldq_kernel(env->tr.base + index);
983 /* 64 bit interrupt */
984 static void do_interrupt64(int intno, int is_int, int error_code,
985 target_ulong next_eip, int is_hw)
987 SegmentCache *dt;
988 target_ulong ptr;
989 int type, dpl, selector, cpl, ist;
990 int has_error_code, new_stack;
991 uint32_t e1, e2, e3, ss;
992 target_ulong old_eip, esp, offset;
994 has_error_code = 0;
995 if (!is_int && !is_hw)
996 has_error_code = exeption_has_error_code(intno);
997 if (is_int)
998 old_eip = next_eip;
999 else
1000 old_eip = env->eip;
1002 dt = &env->idt;
1003 if (intno * 16 + 15 > dt->limit)
1004 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1005 ptr = dt->base + intno * 16;
1006 e1 = ldl_kernel(ptr);
1007 e2 = ldl_kernel(ptr + 4);
1008 e3 = ldl_kernel(ptr + 8);
1009 /* check gate type */
1010 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1011 switch(type) {
1012 case 14: /* 386 interrupt gate */
1013 case 15: /* 386 trap gate */
1014 break;
1015 default:
1016 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1017 break;
1019 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1020 cpl = env->hflags & HF_CPL_MASK;
1021 /* check privilege if software int */
1022 if (is_int && dpl < cpl)
1023 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1024 /* check valid bit */
1025 if (!(e2 & DESC_P_MASK))
1026 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1027 selector = e1 >> 16;
1028 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1029 ist = e2 & 7;
1030 if ((selector & 0xfffc) == 0)
1031 raise_exception_err(EXCP0D_GPF, 0);
1033 if (load_segment(&e1, &e2, selector) != 0)
1034 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1035 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1036 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1037 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1038 if (dpl > cpl)
1039 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1040 if (!(e2 & DESC_P_MASK))
1041 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1042 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1043 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1044 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1045 /* to inner privilege */
1046 if (ist != 0)
1047 esp = get_rsp_from_tss(ist + 3);
1048 else
1049 esp = get_rsp_from_tss(dpl);
1050 esp &= ~0xfLL; /* align stack */
1051 ss = 0;
1052 new_stack = 1;
1053 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1054 /* to same privilege */
1055 if (env->eflags & VM_MASK)
1056 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1057 new_stack = 0;
1058 if (ist != 0)
1059 esp = get_rsp_from_tss(ist + 3);
1060 else
1061 esp = ESP;
1062 esp &= ~0xfLL; /* align stack */
1063 dpl = cpl;
1064 } else {
1065 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1066 new_stack = 0; /* avoid warning */
1067 esp = 0; /* avoid warning */
1070 PUSHQ(esp, env->segs[R_SS].selector);
1071 PUSHQ(esp, ESP);
1072 PUSHQ(esp, compute_eflags());
1073 PUSHQ(esp, env->segs[R_CS].selector);
1074 PUSHQ(esp, old_eip);
1075 if (has_error_code) {
1076 PUSHQ(esp, error_code);
1079 if (new_stack) {
1080 ss = 0 | dpl;
1081 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1083 ESP = esp;
1085 selector = (selector & ~3) | dpl;
1086 cpu_x86_load_seg_cache(env, R_CS, selector,
1087 get_seg_base(e1, e2),
1088 get_seg_limit(e1, e2),
1089 e2);
1090 cpu_x86_set_cpl(env, dpl);
1091 env->eip = offset;
1093 /* interrupt gate clear IF mask */
1094 if ((type & 1) == 0) {
1095 env->eflags &= ~IF_MASK;
1097 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1099 #endif
1101 #ifdef TARGET_X86_64
1102 #if defined(CONFIG_USER_ONLY)
1103 void helper_syscall(int next_eip_addend)
1105 env->exception_index = EXCP_SYSCALL;
1106 env->exception_next_eip = env->eip + next_eip_addend;
1107 cpu_loop_exit(env);
1109 #else
1110 void helper_syscall(int next_eip_addend)
1112 int selector;
1114 if (!(env->efer & MSR_EFER_SCE)) {
1115 raise_exception_err(EXCP06_ILLOP, 0);
1117 selector = (env->star >> 32) & 0xffff;
1118 if (env->hflags & HF_LMA_MASK) {
1119 int code64;
1121 ECX = env->eip + next_eip_addend;
1122 env->regs[11] = compute_eflags();
1124 code64 = env->hflags & HF_CS64_MASK;
1126 cpu_x86_set_cpl(env, 0);
1127 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1128 0, 0xffffffff,
1129 DESC_G_MASK | DESC_P_MASK |
1130 DESC_S_MASK |
1131 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1132 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1133 0, 0xffffffff,
1134 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1135 DESC_S_MASK |
1136 DESC_W_MASK | DESC_A_MASK);
1137 env->eflags &= ~env->fmask;
1138 load_eflags(env->eflags, 0);
1139 if (code64)
1140 env->eip = env->lstar;
1141 else
1142 env->eip = env->cstar;
1143 } else {
1144 ECX = (uint32_t)(env->eip + next_eip_addend);
1146 cpu_x86_set_cpl(env, 0);
1147 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1148 0, 0xffffffff,
1149 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1150 DESC_S_MASK |
1151 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1152 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1153 0, 0xffffffff,
1154 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1155 DESC_S_MASK |
1156 DESC_W_MASK | DESC_A_MASK);
1157 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1158 env->eip = (uint32_t)env->star;
1161 #endif
1162 #endif
1164 #ifdef TARGET_X86_64
1165 void helper_sysret(int dflag)
1167 int cpl, selector;
1169 if (!(env->efer & MSR_EFER_SCE)) {
1170 raise_exception_err(EXCP06_ILLOP, 0);
1172 cpl = env->hflags & HF_CPL_MASK;
1173 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1174 raise_exception_err(EXCP0D_GPF, 0);
1176 selector = (env->star >> 48) & 0xffff;
1177 if (env->hflags & HF_LMA_MASK) {
1178 if (dflag == 2) {
1179 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1180 0, 0xffffffff,
1181 DESC_G_MASK | DESC_P_MASK |
1182 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1183 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1184 DESC_L_MASK);
1185 env->eip = ECX;
1186 } else {
1187 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1188 0, 0xffffffff,
1189 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1190 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1191 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1192 env->eip = (uint32_t)ECX;
1194 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1195 0, 0xffffffff,
1196 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1197 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1198 DESC_W_MASK | DESC_A_MASK);
1199 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1200 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1201 cpu_x86_set_cpl(env, 3);
1202 } else {
1203 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1204 0, 0xffffffff,
1205 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1206 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1207 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1208 env->eip = (uint32_t)ECX;
1209 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1210 0, 0xffffffff,
1211 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1212 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1213 DESC_W_MASK | DESC_A_MASK);
1214 env->eflags |= IF_MASK;
1215 cpu_x86_set_cpl(env, 3);
1218 #endif
1220 /* real mode interrupt */
1221 static void do_interrupt_real(int intno, int is_int, int error_code,
1222 unsigned int next_eip)
1224 SegmentCache *dt;
1225 target_ulong ptr, ssp;
1226 int selector;
1227 uint32_t offset, esp;
1228 uint32_t old_cs, old_eip;
1230 /* real mode (simpler !) */
1231 dt = &env->idt;
1232 if (intno * 4 + 3 > dt->limit)
1233 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1234 ptr = dt->base + intno * 4;
1235 offset = lduw_kernel(ptr);
1236 selector = lduw_kernel(ptr + 2);
1237 esp = ESP;
1238 ssp = env->segs[R_SS].base;
1239 if (is_int)
1240 old_eip = next_eip;
1241 else
1242 old_eip = env->eip;
1243 old_cs = env->segs[R_CS].selector;
1244 /* XXX: use SS segment size ? */
1245 PUSHW(ssp, esp, 0xffff, compute_eflags());
1246 PUSHW(ssp, esp, 0xffff, old_cs);
1247 PUSHW(ssp, esp, 0xffff, old_eip);
1249 /* update processor state */
1250 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1251 env->eip = offset;
1252 env->segs[R_CS].selector = selector;
1253 env->segs[R_CS].base = (selector << 4);
1254 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1257 #if defined(CONFIG_USER_ONLY)
1258 /* fake user mode interrupt */
1259 static void do_interrupt_user(int intno, int is_int, int error_code,
1260 target_ulong next_eip)
1262 SegmentCache *dt;
1263 target_ulong ptr;
1264 int dpl, cpl, shift;
1265 uint32_t e2;
1267 dt = &env->idt;
1268 if (env->hflags & HF_LMA_MASK) {
1269 shift = 4;
1270 } else {
1271 shift = 3;
1273 ptr = dt->base + (intno << shift);
1274 e2 = ldl_kernel(ptr + 4);
1276 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1277 cpl = env->hflags & HF_CPL_MASK;
1278 /* check privilege if software int */
1279 if (is_int && dpl < cpl)
1280 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1282 /* Since we emulate only user space, we cannot do more than
1283 exiting the emulation with the suitable exception and error
1284 code */
1285 if (is_int)
1286 EIP = next_eip;
1289 #else
1291 static void handle_even_inj(int intno, int is_int, int error_code,
1292 int is_hw, int rm)
1294 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1295 if (!(event_inj & SVM_EVTINJ_VALID)) {
1296 int type;
1297 if (is_int)
1298 type = SVM_EVTINJ_TYPE_SOFT;
1299 else
1300 type = SVM_EVTINJ_TYPE_EXEPT;
1301 event_inj = intno | type | SVM_EVTINJ_VALID;
1302 if (!rm && exeption_has_error_code(intno)) {
1303 event_inj |= SVM_EVTINJ_VALID_ERR;
1304 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1306 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1309 #endif
1312 * Begin execution of an interruption. is_int is TRUE if coming from
1313 * the int instruction. next_eip is the EIP value AFTER the interrupt
1314 * instruction. It is only relevant if is_int is TRUE.
1316 static void do_interrupt_all(int intno, int is_int, int error_code,
1317 target_ulong next_eip, int is_hw)
1319 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1320 if ((env->cr[0] & CR0_PE_MASK)) {
1321 static int count;
1322 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1323 count, intno, error_code, is_int,
1324 env->hflags & HF_CPL_MASK,
1325 env->segs[R_CS].selector, EIP,
1326 (int)env->segs[R_CS].base + EIP,
1327 env->segs[R_SS].selector, ESP);
1328 if (intno == 0x0e) {
1329 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1330 } else {
1331 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1333 qemu_log("\n");
1334 log_cpu_state(env, X86_DUMP_CCOP);
1335 #if 0
1337 int i;
1338 target_ulong ptr;
1339 qemu_log(" code=");
1340 ptr = env->segs[R_CS].base + env->eip;
1341 for(i = 0; i < 16; i++) {
1342 qemu_log(" %02x", ldub(ptr + i));
1344 qemu_log("\n");
1346 #endif
1347 count++;
1350 if (env->cr[0] & CR0_PE_MASK) {
1351 #if !defined(CONFIG_USER_ONLY)
1352 if (env->hflags & HF_SVMI_MASK)
1353 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1354 #endif
1355 #ifdef TARGET_X86_64
1356 if (env->hflags & HF_LMA_MASK) {
1357 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1358 } else
1359 #endif
1361 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1363 } else {
1364 #if !defined(CONFIG_USER_ONLY)
1365 if (env->hflags & HF_SVMI_MASK)
1366 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1367 #endif
1368 do_interrupt_real(intno, is_int, error_code, next_eip);
1371 #if !defined(CONFIG_USER_ONLY)
1372 if (env->hflags & HF_SVMI_MASK) {
1373 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1374 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1376 #endif
1379 void do_interrupt(CPUX86State *env1)
1381 CPUX86State *saved_env;
1383 saved_env = env;
1384 env = env1;
1385 #if defined(CONFIG_USER_ONLY)
1386 /* if user mode only, we simulate a fake exception
1387 which will be handled outside the cpu execution
1388 loop */
1389 do_interrupt_user(env->exception_index,
1390 env->exception_is_int,
1391 env->error_code,
1392 env->exception_next_eip);
1393 /* successfully delivered */
1394 env->old_exception = -1;
1395 #else
1396 /* simulate a real cpu exception. On i386, it can
1397 trigger new exceptions, but we do not handle
1398 double or triple faults yet. */
1399 do_interrupt_all(env->exception_index,
1400 env->exception_is_int,
1401 env->error_code,
1402 env->exception_next_eip, 0);
1403 /* successfully delivered */
1404 env->old_exception = -1;
1405 #endif
1406 env = saved_env;
1409 void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
1411 CPUX86State *saved_env;
1413 saved_env = env;
1414 env = env1;
1415 do_interrupt_all(intno, 0, 0, 0, is_hw);
1416 env = saved_env;
1419 /* This should come from sysemu.h - if we could include it here... */
1420 void qemu_system_reset_request(void);
1423 * Check nested exceptions and change to double or triple fault if
1424 * needed. It should only be called, if this is not an interrupt.
1425 * Returns the new exception number.
1427 static int check_exception(int intno, int *error_code)
1429 int first_contributory = env->old_exception == 0 ||
1430 (env->old_exception >= 10 &&
1431 env->old_exception <= 13);
1432 int second_contributory = intno == 0 ||
1433 (intno >= 10 && intno <= 13);
1435 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1436 env->old_exception, intno);
1438 #if !defined(CONFIG_USER_ONLY)
1439 if (env->old_exception == EXCP08_DBLE) {
1440 if (env->hflags & HF_SVMI_MASK)
1441 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1443 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1445 qemu_system_reset_request();
1446 return EXCP_HLT;
1448 #endif
1450 if ((first_contributory && second_contributory)
1451 || (env->old_exception == EXCP0E_PAGE &&
1452 (second_contributory || (intno == EXCP0E_PAGE)))) {
1453 intno = EXCP08_DBLE;
1454 *error_code = 0;
1457 if (second_contributory || (intno == EXCP0E_PAGE) ||
1458 (intno == EXCP08_DBLE))
1459 env->old_exception = intno;
1461 return intno;
1465 * Signal an interruption. It is executed in the main CPU loop.
1466 * is_int is TRUE if coming from the int instruction. next_eip is the
1467 * EIP value AFTER the interrupt instruction. It is only relevant if
1468 * is_int is TRUE.
1470 static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1471 int next_eip_addend)
1473 if (!is_int) {
1474 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1475 intno = check_exception(intno, &error_code);
1476 } else {
1477 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1480 env->exception_index = intno;
1481 env->error_code = error_code;
1482 env->exception_is_int = is_int;
1483 env->exception_next_eip = env->eip + next_eip_addend;
1484 cpu_loop_exit(env);
1487 /* shortcuts to generate exceptions */
1489 static void QEMU_NORETURN raise_exception_err(int exception_index,
1490 int error_code)
1492 raise_interrupt(exception_index, 0, error_code, 0);
1495 void raise_exception_err_env(CPUX86State *nenv, int exception_index,
1496 int error_code)
1498 env = nenv;
1499 raise_interrupt(exception_index, 0, error_code, 0);
1502 static void QEMU_NORETURN raise_exception(int exception_index)
1504 raise_interrupt(exception_index, 0, 0, 0);
1507 void raise_exception_env(int exception_index, CPUX86State *nenv)
1509 env = nenv;
1510 raise_exception(exception_index);
1512 /* SMM support */
1514 #if defined(CONFIG_USER_ONLY)
1516 void do_smm_enter(CPUX86State *env1)
1520 void helper_rsm(void)
1524 #else
1526 #ifdef TARGET_X86_64
1527 #define SMM_REVISION_ID 0x00020064
1528 #else
1529 #define SMM_REVISION_ID 0x00020000
1530 #endif
1532 void do_smm_enter(CPUX86State *env1)
1534 target_ulong sm_state;
1535 SegmentCache *dt;
1536 int i, offset;
1537 CPUX86State *saved_env;
1539 saved_env = env;
1540 env = env1;
1542 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1543 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1545 env->hflags |= HF_SMM_MASK;
1546 cpu_smm_update(env);
1548 sm_state = env->smbase + 0x8000;
1550 #ifdef TARGET_X86_64
1551 for(i = 0; i < 6; i++) {
1552 dt = &env->segs[i];
1553 offset = 0x7e00 + i * 16;
1554 stw_phys(sm_state + offset, dt->selector);
1555 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1556 stl_phys(sm_state + offset + 4, dt->limit);
1557 stq_phys(sm_state + offset + 8, dt->base);
1560 stq_phys(sm_state + 0x7e68, env->gdt.base);
1561 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1563 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1564 stq_phys(sm_state + 0x7e78, env->ldt.base);
1565 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1566 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1568 stq_phys(sm_state + 0x7e88, env->idt.base);
1569 stl_phys(sm_state + 0x7e84, env->idt.limit);
1571 stw_phys(sm_state + 0x7e90, env->tr.selector);
1572 stq_phys(sm_state + 0x7e98, env->tr.base);
1573 stl_phys(sm_state + 0x7e94, env->tr.limit);
1574 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1576 stq_phys(sm_state + 0x7ed0, env->efer);
1578 stq_phys(sm_state + 0x7ff8, EAX);
1579 stq_phys(sm_state + 0x7ff0, ECX);
1580 stq_phys(sm_state + 0x7fe8, EDX);
1581 stq_phys(sm_state + 0x7fe0, EBX);
1582 stq_phys(sm_state + 0x7fd8, ESP);
1583 stq_phys(sm_state + 0x7fd0, EBP);
1584 stq_phys(sm_state + 0x7fc8, ESI);
1585 stq_phys(sm_state + 0x7fc0, EDI);
1586 for(i = 8; i < 16; i++)
1587 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1588 stq_phys(sm_state + 0x7f78, env->eip);
1589 stl_phys(sm_state + 0x7f70, compute_eflags());
1590 stl_phys(sm_state + 0x7f68, env->dr[6]);
1591 stl_phys(sm_state + 0x7f60, env->dr[7]);
1593 stl_phys(sm_state + 0x7f48, env->cr[4]);
1594 stl_phys(sm_state + 0x7f50, env->cr[3]);
1595 stl_phys(sm_state + 0x7f58, env->cr[0]);
1597 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1598 stl_phys(sm_state + 0x7f00, env->smbase);
1599 #else
1600 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1601 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1602 stl_phys(sm_state + 0x7ff4, compute_eflags());
1603 stl_phys(sm_state + 0x7ff0, env->eip);
1604 stl_phys(sm_state + 0x7fec, EDI);
1605 stl_phys(sm_state + 0x7fe8, ESI);
1606 stl_phys(sm_state + 0x7fe4, EBP);
1607 stl_phys(sm_state + 0x7fe0, ESP);
1608 stl_phys(sm_state + 0x7fdc, EBX);
1609 stl_phys(sm_state + 0x7fd8, EDX);
1610 stl_phys(sm_state + 0x7fd4, ECX);
1611 stl_phys(sm_state + 0x7fd0, EAX);
1612 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1613 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1615 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1616 stl_phys(sm_state + 0x7f64, env->tr.base);
1617 stl_phys(sm_state + 0x7f60, env->tr.limit);
1618 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1620 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1621 stl_phys(sm_state + 0x7f80, env->ldt.base);
1622 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1623 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1625 stl_phys(sm_state + 0x7f74, env->gdt.base);
1626 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1628 stl_phys(sm_state + 0x7f58, env->idt.base);
1629 stl_phys(sm_state + 0x7f54, env->idt.limit);
1631 for(i = 0; i < 6; i++) {
1632 dt = &env->segs[i];
1633 if (i < 3)
1634 offset = 0x7f84 + i * 12;
1635 else
1636 offset = 0x7f2c + (i - 3) * 12;
1637 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1638 stl_phys(sm_state + offset + 8, dt->base);
1639 stl_phys(sm_state + offset + 4, dt->limit);
1640 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1642 stl_phys(sm_state + 0x7f14, env->cr[4]);
1644 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1645 stl_phys(sm_state + 0x7ef8, env->smbase);
1646 #endif
1647 /* init SMM cpu state */
1649 #ifdef TARGET_X86_64
1650 cpu_load_efer(env, 0);
1651 #endif
1652 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1653 env->eip = 0x00008000;
1654 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1655 0xffffffff, 0);
1656 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1657 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1658 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1659 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1660 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1662 cpu_x86_update_cr0(env,
1663 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1664 cpu_x86_update_cr4(env, 0);
1665 env->dr[7] = 0x00000400;
1666 CC_OP = CC_OP_EFLAGS;
1667 env = saved_env;
1670 void helper_rsm(void)
1672 target_ulong sm_state;
1673 int i, offset;
1674 uint32_t val;
1676 sm_state = env->smbase + 0x8000;
1677 #ifdef TARGET_X86_64
1678 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1680 for(i = 0; i < 6; i++) {
1681 offset = 0x7e00 + i * 16;
1682 cpu_x86_load_seg_cache(env, i,
1683 lduw_phys(sm_state + offset),
1684 ldq_phys(sm_state + offset + 8),
1685 ldl_phys(sm_state + offset + 4),
1686 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1689 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1690 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1692 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1693 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1694 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1695 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1697 env->idt.base = ldq_phys(sm_state + 0x7e88);
1698 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1700 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1701 env->tr.base = ldq_phys(sm_state + 0x7e98);
1702 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1703 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1705 EAX = ldq_phys(sm_state + 0x7ff8);
1706 ECX = ldq_phys(sm_state + 0x7ff0);
1707 EDX = ldq_phys(sm_state + 0x7fe8);
1708 EBX = ldq_phys(sm_state + 0x7fe0);
1709 ESP = ldq_phys(sm_state + 0x7fd8);
1710 EBP = ldq_phys(sm_state + 0x7fd0);
1711 ESI = ldq_phys(sm_state + 0x7fc8);
1712 EDI = ldq_phys(sm_state + 0x7fc0);
1713 for(i = 8; i < 16; i++)
1714 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1715 env->eip = ldq_phys(sm_state + 0x7f78);
1716 load_eflags(ldl_phys(sm_state + 0x7f70),
1717 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1718 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1719 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1721 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1722 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1723 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1725 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1726 if (val & 0x20000) {
1727 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1729 #else
1730 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1731 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1732 load_eflags(ldl_phys(sm_state + 0x7ff4),
1733 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1734 env->eip = ldl_phys(sm_state + 0x7ff0);
1735 EDI = ldl_phys(sm_state + 0x7fec);
1736 ESI = ldl_phys(sm_state + 0x7fe8);
1737 EBP = ldl_phys(sm_state + 0x7fe4);
1738 ESP = ldl_phys(sm_state + 0x7fe0);
1739 EBX = ldl_phys(sm_state + 0x7fdc);
1740 EDX = ldl_phys(sm_state + 0x7fd8);
1741 ECX = ldl_phys(sm_state + 0x7fd4);
1742 EAX = ldl_phys(sm_state + 0x7fd0);
1743 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1744 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1746 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1747 env->tr.base = ldl_phys(sm_state + 0x7f64);
1748 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1749 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1751 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1752 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1753 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1754 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1756 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1757 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1759 env->idt.base = ldl_phys(sm_state + 0x7f58);
1760 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1762 for(i = 0; i < 6; i++) {
1763 if (i < 3)
1764 offset = 0x7f84 + i * 12;
1765 else
1766 offset = 0x7f2c + (i - 3) * 12;
1767 cpu_x86_load_seg_cache(env, i,
1768 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1769 ldl_phys(sm_state + offset + 8),
1770 ldl_phys(sm_state + offset + 4),
1771 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1773 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1775 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1776 if (val & 0x20000) {
1777 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1779 #endif
1780 CC_OP = CC_OP_EFLAGS;
1781 env->hflags &= ~HF_SMM_MASK;
1782 cpu_smm_update(env);
1784 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1785 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1788 #endif /* !CONFIG_USER_ONLY */
1791 /* division, flags are undefined */
1793 void helper_divb_AL(target_ulong t0)
1795 unsigned int num, den, q, r;
1797 num = (EAX & 0xffff);
1798 den = (t0 & 0xff);
1799 if (den == 0) {
1800 raise_exception(EXCP00_DIVZ);
1802 q = (num / den);
1803 if (q > 0xff)
1804 raise_exception(EXCP00_DIVZ);
1805 q &= 0xff;
1806 r = (num % den) & 0xff;
1807 EAX = (EAX & ~0xffff) | (r << 8) | q;
1810 void helper_idivb_AL(target_ulong t0)
1812 int num, den, q, r;
1814 num = (int16_t)EAX;
1815 den = (int8_t)t0;
1816 if (den == 0) {
1817 raise_exception(EXCP00_DIVZ);
1819 q = (num / den);
1820 if (q != (int8_t)q)
1821 raise_exception(EXCP00_DIVZ);
1822 q &= 0xff;
1823 r = (num % den) & 0xff;
1824 EAX = (EAX & ~0xffff) | (r << 8) | q;
1827 void helper_divw_AX(target_ulong t0)
1829 unsigned int num, den, q, r;
1831 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1832 den = (t0 & 0xffff);
1833 if (den == 0) {
1834 raise_exception(EXCP00_DIVZ);
1836 q = (num / den);
1837 if (q > 0xffff)
1838 raise_exception(EXCP00_DIVZ);
1839 q &= 0xffff;
1840 r = (num % den) & 0xffff;
1841 EAX = (EAX & ~0xffff) | q;
1842 EDX = (EDX & ~0xffff) | r;
1845 void helper_idivw_AX(target_ulong t0)
1847 int num, den, q, r;
1849 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1850 den = (int16_t)t0;
1851 if (den == 0) {
1852 raise_exception(EXCP00_DIVZ);
1854 q = (num / den);
1855 if (q != (int16_t)q)
1856 raise_exception(EXCP00_DIVZ);
1857 q &= 0xffff;
1858 r = (num % den) & 0xffff;
1859 EAX = (EAX & ~0xffff) | q;
1860 EDX = (EDX & ~0xffff) | r;
1863 void helper_divl_EAX(target_ulong t0)
1865 unsigned int den, r;
1866 uint64_t num, q;
1868 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1869 den = t0;
1870 if (den == 0) {
1871 raise_exception(EXCP00_DIVZ);
1873 q = (num / den);
1874 r = (num % den);
1875 if (q > 0xffffffff)
1876 raise_exception(EXCP00_DIVZ);
1877 EAX = (uint32_t)q;
1878 EDX = (uint32_t)r;
1881 void helper_idivl_EAX(target_ulong t0)
1883 int den, r;
1884 int64_t num, q;
1886 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1887 den = t0;
1888 if (den == 0) {
1889 raise_exception(EXCP00_DIVZ);
1891 q = (num / den);
1892 r = (num % den);
1893 if (q != (int32_t)q)
1894 raise_exception(EXCP00_DIVZ);
1895 EAX = (uint32_t)q;
1896 EDX = (uint32_t)r;
1899 /* bcd */
1901 /* XXX: exception */
1902 void helper_aam(int base)
1904 int al, ah;
1905 al = EAX & 0xff;
1906 ah = al / base;
1907 al = al % base;
1908 EAX = (EAX & ~0xffff) | al | (ah << 8);
1909 CC_DST = al;
1912 void helper_aad(int base)
1914 int al, ah;
1915 al = EAX & 0xff;
1916 ah = (EAX >> 8) & 0xff;
1917 al = ((ah * base) + al) & 0xff;
1918 EAX = (EAX & ~0xffff) | al;
1919 CC_DST = al;
1922 void helper_aaa(void)
1924 int icarry;
1925 int al, ah, af;
1926 int eflags;
1928 eflags = helper_cc_compute_all(CC_OP);
1929 af = eflags & CC_A;
1930 al = EAX & 0xff;
1931 ah = (EAX >> 8) & 0xff;
1933 icarry = (al > 0xf9);
1934 if (((al & 0x0f) > 9 ) || af) {
1935 al = (al + 6) & 0x0f;
1936 ah = (ah + 1 + icarry) & 0xff;
1937 eflags |= CC_C | CC_A;
1938 } else {
1939 eflags &= ~(CC_C | CC_A);
1940 al &= 0x0f;
1942 EAX = (EAX & ~0xffff) | al | (ah << 8);
1943 CC_SRC = eflags;
1946 void helper_aas(void)
1948 int icarry;
1949 int al, ah, af;
1950 int eflags;
1952 eflags = helper_cc_compute_all(CC_OP);
1953 af = eflags & CC_A;
1954 al = EAX & 0xff;
1955 ah = (EAX >> 8) & 0xff;
1957 icarry = (al < 6);
1958 if (((al & 0x0f) > 9 ) || af) {
1959 al = (al - 6) & 0x0f;
1960 ah = (ah - 1 - icarry) & 0xff;
1961 eflags |= CC_C | CC_A;
1962 } else {
1963 eflags &= ~(CC_C | CC_A);
1964 al &= 0x0f;
1966 EAX = (EAX & ~0xffff) | al | (ah << 8);
1967 CC_SRC = eflags;
1970 void helper_daa(void)
1972 int old_al, al, af, cf;
1973 int eflags;
1975 eflags = helper_cc_compute_all(CC_OP);
1976 cf = eflags & CC_C;
1977 af = eflags & CC_A;
1978 old_al = al = EAX & 0xff;
1980 eflags = 0;
1981 if (((al & 0x0f) > 9 ) || af) {
1982 al = (al + 6) & 0xff;
1983 eflags |= CC_A;
1985 if ((old_al > 0x99) || cf) {
1986 al = (al + 0x60) & 0xff;
1987 eflags |= CC_C;
1989 EAX = (EAX & ~0xff) | al;
1990 /* well, speed is not an issue here, so we compute the flags by hand */
1991 eflags |= (al == 0) << 6; /* zf */
1992 eflags |= parity_table[al]; /* pf */
1993 eflags |= (al & 0x80); /* sf */
1994 CC_SRC = eflags;
1997 void helper_das(void)
1999 int al, al1, af, cf;
2000 int eflags;
2002 eflags = helper_cc_compute_all(CC_OP);
2003 cf = eflags & CC_C;
2004 af = eflags & CC_A;
2005 al = EAX & 0xff;
2007 eflags = 0;
2008 al1 = al;
2009 if (((al & 0x0f) > 9 ) || af) {
2010 eflags |= CC_A;
2011 if (al < 6 || cf)
2012 eflags |= CC_C;
2013 al = (al - 6) & 0xff;
2015 if ((al1 > 0x99) || cf) {
2016 al = (al - 0x60) & 0xff;
2017 eflags |= CC_C;
2019 EAX = (EAX & ~0xff) | al;
2020 /* well, speed is not an issue here, so we compute the flags by hand */
2021 eflags |= (al == 0) << 6; /* zf */
2022 eflags |= parity_table[al]; /* pf */
2023 eflags |= (al & 0x80); /* sf */
2024 CC_SRC = eflags;
2027 void helper_into(int next_eip_addend)
2029 int eflags;
2030 eflags = helper_cc_compute_all(CC_OP);
2031 if (eflags & CC_O) {
2032 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2036 void helper_cmpxchg8b(target_ulong a0)
2038 uint64_t d;
2039 int eflags;
2041 eflags = helper_cc_compute_all(CC_OP);
2042 d = ldq(a0);
2043 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2044 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2045 eflags |= CC_Z;
2046 } else {
2047 /* always do the store */
2048 stq(a0, d);
2049 EDX = (uint32_t)(d >> 32);
2050 EAX = (uint32_t)d;
2051 eflags &= ~CC_Z;
2053 CC_SRC = eflags;
2056 #ifdef TARGET_X86_64
2057 void helper_cmpxchg16b(target_ulong a0)
2059 uint64_t d0, d1;
2060 int eflags;
2062 if ((a0 & 0xf) != 0)
2063 raise_exception(EXCP0D_GPF);
2064 eflags = helper_cc_compute_all(CC_OP);
2065 d0 = ldq(a0);
2066 d1 = ldq(a0 + 8);
2067 if (d0 == EAX && d1 == EDX) {
2068 stq(a0, EBX);
2069 stq(a0 + 8, ECX);
2070 eflags |= CC_Z;
2071 } else {
2072 /* always do the store */
2073 stq(a0, d0);
2074 stq(a0 + 8, d1);
2075 EDX = d1;
2076 EAX = d0;
2077 eflags &= ~CC_Z;
2079 CC_SRC = eflags;
2081 #endif
2083 void helper_single_step(void)
2085 #ifndef CONFIG_USER_ONLY
2086 check_hw_breakpoints(env, 1);
2087 env->dr[6] |= DR6_BS;
2088 #endif
2089 raise_exception(EXCP01_DB);
2092 void helper_cpuid(void)
2094 uint32_t eax, ebx, ecx, edx;
2096 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2098 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2099 EAX = eax;
2100 EBX = ebx;
2101 ECX = ecx;
2102 EDX = edx;
2105 void helper_enter_level(int level, int data32, target_ulong t1)
2107 target_ulong ssp;
2108 uint32_t esp_mask, esp, ebp;
2110 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2111 ssp = env->segs[R_SS].base;
2112 ebp = EBP;
2113 esp = ESP;
2114 if (data32) {
2115 /* 32 bit */
2116 esp -= 4;
2117 while (--level) {
2118 esp -= 4;
2119 ebp -= 4;
2120 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2122 esp -= 4;
2123 stl(ssp + (esp & esp_mask), t1);
2124 } else {
2125 /* 16 bit */
2126 esp -= 2;
2127 while (--level) {
2128 esp -= 2;
2129 ebp -= 2;
2130 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2132 esp -= 2;
2133 stw(ssp + (esp & esp_mask), t1);
2137 #ifdef TARGET_X86_64
2138 void helper_enter64_level(int level, int data64, target_ulong t1)
2140 target_ulong esp, ebp;
2141 ebp = EBP;
2142 esp = ESP;
2144 if (data64) {
2145 /* 64 bit */
2146 esp -= 8;
2147 while (--level) {
2148 esp -= 8;
2149 ebp -= 8;
2150 stq(esp, ldq(ebp));
2152 esp -= 8;
2153 stq(esp, t1);
2154 } else {
2155 /* 16 bit */
2156 esp -= 2;
2157 while (--level) {
2158 esp -= 2;
2159 ebp -= 2;
2160 stw(esp, lduw(ebp));
2162 esp -= 2;
2163 stw(esp, t1);
2166 #endif
2168 void helper_lldt(int selector)
2170 SegmentCache *dt;
2171 uint32_t e1, e2;
2172 int index, entry_limit;
2173 target_ulong ptr;
2175 selector &= 0xffff;
2176 if ((selector & 0xfffc) == 0) {
2177 /* XXX: NULL selector case: invalid LDT */
2178 env->ldt.base = 0;
2179 env->ldt.limit = 0;
2180 } else {
2181 if (selector & 0x4)
2182 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2183 dt = &env->gdt;
2184 index = selector & ~7;
2185 #ifdef TARGET_X86_64
2186 if (env->hflags & HF_LMA_MASK)
2187 entry_limit = 15;
2188 else
2189 #endif
2190 entry_limit = 7;
2191 if ((index + entry_limit) > dt->limit)
2192 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2193 ptr = dt->base + index;
2194 e1 = ldl_kernel(ptr);
2195 e2 = ldl_kernel(ptr + 4);
2196 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2197 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198 if (!(e2 & DESC_P_MASK))
2199 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2200 #ifdef TARGET_X86_64
2201 if (env->hflags & HF_LMA_MASK) {
2202 uint32_t e3;
2203 e3 = ldl_kernel(ptr + 8);
2204 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2205 env->ldt.base |= (target_ulong)e3 << 32;
2206 } else
2207 #endif
2209 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2212 env->ldt.selector = selector;
2215 void helper_ltr(int selector)
2217 SegmentCache *dt;
2218 uint32_t e1, e2;
2219 int index, type, entry_limit;
2220 target_ulong ptr;
2222 selector &= 0xffff;
2223 if ((selector & 0xfffc) == 0) {
2224 /* NULL selector case: invalid TR */
2225 env->tr.base = 0;
2226 env->tr.limit = 0;
2227 env->tr.flags = 0;
2228 } else {
2229 if (selector & 0x4)
2230 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2231 dt = &env->gdt;
2232 index = selector & ~7;
2233 #ifdef TARGET_X86_64
2234 if (env->hflags & HF_LMA_MASK)
2235 entry_limit = 15;
2236 else
2237 #endif
2238 entry_limit = 7;
2239 if ((index + entry_limit) > dt->limit)
2240 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2241 ptr = dt->base + index;
2242 e1 = ldl_kernel(ptr);
2243 e2 = ldl_kernel(ptr + 4);
2244 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2245 if ((e2 & DESC_S_MASK) ||
2246 (type != 1 && type != 9))
2247 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2248 if (!(e2 & DESC_P_MASK))
2249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2250 #ifdef TARGET_X86_64
2251 if (env->hflags & HF_LMA_MASK) {
2252 uint32_t e3, e4;
2253 e3 = ldl_kernel(ptr + 8);
2254 e4 = ldl_kernel(ptr + 12);
2255 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2256 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2257 load_seg_cache_raw_dt(&env->tr, e1, e2);
2258 env->tr.base |= (target_ulong)e3 << 32;
2259 } else
2260 #endif
2262 load_seg_cache_raw_dt(&env->tr, e1, e2);
2264 e2 |= DESC_TSS_BUSY_MASK;
2265 stl_kernel(ptr + 4, e2);
2267 env->tr.selector = selector;
2270 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2271 void helper_load_seg(int seg_reg, int selector)
2273 uint32_t e1, e2;
2274 int cpl, dpl, rpl;
2275 SegmentCache *dt;
2276 int index;
2277 target_ulong ptr;
2279 selector &= 0xffff;
2280 cpl = env->hflags & HF_CPL_MASK;
2281 if ((selector & 0xfffc) == 0) {
2282 /* null selector case */
2283 if (seg_reg == R_SS
2284 #ifdef TARGET_X86_64
2285 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2286 #endif
2288 raise_exception_err(EXCP0D_GPF, 0);
2289 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2290 } else {
2292 if (selector & 0x4)
2293 dt = &env->ldt;
2294 else
2295 dt = &env->gdt;
2296 index = selector & ~7;
2297 if ((index + 7) > dt->limit)
2298 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2299 ptr = dt->base + index;
2300 e1 = ldl_kernel(ptr);
2301 e2 = ldl_kernel(ptr + 4);
2303 if (!(e2 & DESC_S_MASK))
2304 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2305 rpl = selector & 3;
2306 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2307 if (seg_reg == R_SS) {
2308 /* must be writable segment */
2309 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2310 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2311 if (rpl != cpl || dpl != cpl)
2312 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2313 } else {
2314 /* must be readable segment */
2315 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2316 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2318 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2319 /* if not conforming code, test rights */
2320 if (dpl < cpl || dpl < rpl)
2321 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2325 if (!(e2 & DESC_P_MASK)) {
2326 if (seg_reg == R_SS)
2327 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2328 else
2329 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2332 /* set the access bit if not already set */
2333 if (!(e2 & DESC_A_MASK)) {
2334 e2 |= DESC_A_MASK;
2335 stl_kernel(ptr + 4, e2);
2338 cpu_x86_load_seg_cache(env, seg_reg, selector,
2339 get_seg_base(e1, e2),
2340 get_seg_limit(e1, e2),
2341 e2);
2342 #if 0
2343 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2344 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2345 #endif
2349 /* protected mode jump */
2350 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2351 int next_eip_addend)
2353 int gate_cs, type;
2354 uint32_t e1, e2, cpl, dpl, rpl, limit;
2355 target_ulong next_eip;
2357 if ((new_cs & 0xfffc) == 0)
2358 raise_exception_err(EXCP0D_GPF, 0);
2359 if (load_segment(&e1, &e2, new_cs) != 0)
2360 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2361 cpl = env->hflags & HF_CPL_MASK;
2362 if (e2 & DESC_S_MASK) {
2363 if (!(e2 & DESC_CS_MASK))
2364 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2365 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2366 if (e2 & DESC_C_MASK) {
2367 /* conforming code segment */
2368 if (dpl > cpl)
2369 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2370 } else {
2371 /* non conforming code segment */
2372 rpl = new_cs & 3;
2373 if (rpl > cpl)
2374 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2375 if (dpl != cpl)
2376 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2378 if (!(e2 & DESC_P_MASK))
2379 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2380 limit = get_seg_limit(e1, e2);
2381 if (new_eip > limit &&
2382 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2383 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2385 get_seg_base(e1, e2), limit, e2);
2386 EIP = new_eip;
2387 } else {
2388 /* jump to call or task gate */
2389 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2390 rpl = new_cs & 3;
2391 cpl = env->hflags & HF_CPL_MASK;
2392 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2393 switch(type) {
2394 case 1: /* 286 TSS */
2395 case 9: /* 386 TSS */
2396 case 5: /* task gate */
2397 if (dpl < cpl || dpl < rpl)
2398 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2399 next_eip = env->eip + next_eip_addend;
2400 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2401 CC_OP = CC_OP_EFLAGS;
2402 break;
2403 case 4: /* 286 call gate */
2404 case 12: /* 386 call gate */
2405 if ((dpl < cpl) || (dpl < rpl))
2406 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407 if (!(e2 & DESC_P_MASK))
2408 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2409 gate_cs = e1 >> 16;
2410 new_eip = (e1 & 0xffff);
2411 if (type == 12)
2412 new_eip |= (e2 & 0xffff0000);
2413 if (load_segment(&e1, &e2, gate_cs) != 0)
2414 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2415 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2416 /* must be code segment */
2417 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2418 (DESC_S_MASK | DESC_CS_MASK)))
2419 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2420 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2421 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2422 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2423 if (!(e2 & DESC_P_MASK))
2424 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2425 limit = get_seg_limit(e1, e2);
2426 if (new_eip > limit)
2427 raise_exception_err(EXCP0D_GPF, 0);
2428 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2429 get_seg_base(e1, e2), limit, e2);
2430 EIP = new_eip;
2431 break;
2432 default:
2433 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2434 break;
2439 /* real mode call */
2440 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2441 int shift, int next_eip)
2443 int new_eip;
2444 uint32_t esp, esp_mask;
2445 target_ulong ssp;
2447 new_eip = new_eip1;
2448 esp = ESP;
2449 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2450 ssp = env->segs[R_SS].base;
2451 if (shift) {
2452 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2453 PUSHL(ssp, esp, esp_mask, next_eip);
2454 } else {
2455 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2456 PUSHW(ssp, esp, esp_mask, next_eip);
2459 SET_ESP(esp, esp_mask);
2460 env->eip = new_eip;
2461 env->segs[R_CS].selector = new_cs;
2462 env->segs[R_CS].base = (new_cs << 4);
2465 /* protected mode call */
2466 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2467 int shift, int next_eip_addend)
2469 int new_stack, i;
2470 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2471 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2472 uint32_t val, limit, old_sp_mask;
2473 target_ulong ssp, old_ssp, next_eip;
2475 next_eip = env->eip + next_eip_addend;
2476 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2477 LOG_PCALL_STATE(env);
2478 if ((new_cs & 0xfffc) == 0)
2479 raise_exception_err(EXCP0D_GPF, 0);
2480 if (load_segment(&e1, &e2, new_cs) != 0)
2481 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2482 cpl = env->hflags & HF_CPL_MASK;
2483 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2484 if (e2 & DESC_S_MASK) {
2485 if (!(e2 & DESC_CS_MASK))
2486 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2487 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2488 if (e2 & DESC_C_MASK) {
2489 /* conforming code segment */
2490 if (dpl > cpl)
2491 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2492 } else {
2493 /* non conforming code segment */
2494 rpl = new_cs & 3;
2495 if (rpl > cpl)
2496 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2497 if (dpl != cpl)
2498 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2500 if (!(e2 & DESC_P_MASK))
2501 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2503 #ifdef TARGET_X86_64
2504 /* XXX: check 16/32 bit cases in long mode */
2505 if (shift == 2) {
2506 target_ulong rsp;
2507 /* 64 bit case */
2508 rsp = ESP;
2509 PUSHQ(rsp, env->segs[R_CS].selector);
2510 PUSHQ(rsp, next_eip);
2511 /* from this point, not restartable */
2512 ESP = rsp;
2513 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2514 get_seg_base(e1, e2),
2515 get_seg_limit(e1, e2), e2);
2516 EIP = new_eip;
2517 } else
2518 #endif
2520 sp = ESP;
2521 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2522 ssp = env->segs[R_SS].base;
2523 if (shift) {
2524 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2525 PUSHL(ssp, sp, sp_mask, next_eip);
2526 } else {
2527 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2528 PUSHW(ssp, sp, sp_mask, next_eip);
2531 limit = get_seg_limit(e1, e2);
2532 if (new_eip > limit)
2533 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2534 /* from this point, not restartable */
2535 SET_ESP(sp, sp_mask);
2536 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2537 get_seg_base(e1, e2), limit, e2);
2538 EIP = new_eip;
2540 } else {
2541 /* check gate type */
2542 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2543 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2544 rpl = new_cs & 3;
2545 switch(type) {
2546 case 1: /* available 286 TSS */
2547 case 9: /* available 386 TSS */
2548 case 5: /* task gate */
2549 if (dpl < cpl || dpl < rpl)
2550 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2551 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2552 CC_OP = CC_OP_EFLAGS;
2553 return;
2554 case 4: /* 286 call gate */
2555 case 12: /* 386 call gate */
2556 break;
2557 default:
2558 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2559 break;
2561 shift = type >> 3;
2563 if (dpl < cpl || dpl < rpl)
2564 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2565 /* check valid bit */
2566 if (!(e2 & DESC_P_MASK))
2567 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2568 selector = e1 >> 16;
2569 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2570 param_count = e2 & 0x1f;
2571 if ((selector & 0xfffc) == 0)
2572 raise_exception_err(EXCP0D_GPF, 0);
2574 if (load_segment(&e1, &e2, selector) != 0)
2575 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2576 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2577 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2578 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2579 if (dpl > cpl)
2580 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2581 if (!(e2 & DESC_P_MASK))
2582 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2584 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2585 /* to inner privilege */
2586 get_ss_esp_from_tss(&ss, &sp, dpl);
2587 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2588 ss, sp, param_count, ESP);
2589 if ((ss & 0xfffc) == 0)
2590 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2591 if ((ss & 3) != dpl)
2592 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2593 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2594 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2595 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2596 if (ss_dpl != dpl)
2597 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2598 if (!(ss_e2 & DESC_S_MASK) ||
2599 (ss_e2 & DESC_CS_MASK) ||
2600 !(ss_e2 & DESC_W_MASK))
2601 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2602 if (!(ss_e2 & DESC_P_MASK))
2603 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2605 // push_size = ((param_count * 2) + 8) << shift;
2607 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2608 old_ssp = env->segs[R_SS].base;
2610 sp_mask = get_sp_mask(ss_e2);
2611 ssp = get_seg_base(ss_e1, ss_e2);
2612 if (shift) {
2613 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2614 PUSHL(ssp, sp, sp_mask, ESP);
2615 for(i = param_count - 1; i >= 0; i--) {
2616 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2617 PUSHL(ssp, sp, sp_mask, val);
2619 } else {
2620 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2621 PUSHW(ssp, sp, sp_mask, ESP);
2622 for(i = param_count - 1; i >= 0; i--) {
2623 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2624 PUSHW(ssp, sp, sp_mask, val);
2627 new_stack = 1;
2628 } else {
2629 /* to same privilege */
2630 sp = ESP;
2631 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2632 ssp = env->segs[R_SS].base;
2633 // push_size = (4 << shift);
2634 new_stack = 0;
2637 if (shift) {
2638 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2639 PUSHL(ssp, sp, sp_mask, next_eip);
2640 } else {
2641 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2642 PUSHW(ssp, sp, sp_mask, next_eip);
2645 /* from this point, not restartable */
2647 if (new_stack) {
2648 ss = (ss & ~3) | dpl;
2649 cpu_x86_load_seg_cache(env, R_SS, ss,
2650 ssp,
2651 get_seg_limit(ss_e1, ss_e2),
2652 ss_e2);
2655 selector = (selector & ~3) | dpl;
2656 cpu_x86_load_seg_cache(env, R_CS, selector,
2657 get_seg_base(e1, e2),
2658 get_seg_limit(e1, e2),
2659 e2);
2660 cpu_x86_set_cpl(env, dpl);
2661 SET_ESP(sp, sp_mask);
2662 EIP = offset;
2666 /* real and vm86 mode iret */
2667 void helper_iret_real(int shift)
2669 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2670 target_ulong ssp;
2671 int eflags_mask;
2673 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2674 sp = ESP;
2675 ssp = env->segs[R_SS].base;
2676 if (shift == 1) {
2677 /* 32 bits */
2678 POPL(ssp, sp, sp_mask, new_eip);
2679 POPL(ssp, sp, sp_mask, new_cs);
2680 new_cs &= 0xffff;
2681 POPL(ssp, sp, sp_mask, new_eflags);
2682 } else {
2683 /* 16 bits */
2684 POPW(ssp, sp, sp_mask, new_eip);
2685 POPW(ssp, sp, sp_mask, new_cs);
2686 POPW(ssp, sp, sp_mask, new_eflags);
2688 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2689 env->segs[R_CS].selector = new_cs;
2690 env->segs[R_CS].base = (new_cs << 4);
2691 env->eip = new_eip;
2692 if (env->eflags & VM_MASK)
2693 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2694 else
2695 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2696 if (shift == 0)
2697 eflags_mask &= 0xffff;
2698 load_eflags(new_eflags, eflags_mask);
2699 env->hflags2 &= ~HF2_NMI_MASK;
2702 static inline void validate_seg(int seg_reg, int cpl)
2704 int dpl;
2705 uint32_t e2;
2707 /* XXX: on x86_64, we do not want to nullify FS and GS because
2708 they may still contain a valid base. I would be interested to
2709 know how a real x86_64 CPU behaves */
2710 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2711 (env->segs[seg_reg].selector & 0xfffc) == 0)
2712 return;
2714 e2 = env->segs[seg_reg].flags;
2715 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2716 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2717 /* data or non conforming code segment */
2718 if (dpl < cpl) {
2719 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2724 /* protected mode iret */
2725 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2727 uint32_t new_cs, new_eflags, new_ss;
2728 uint32_t new_es, new_ds, new_fs, new_gs;
2729 uint32_t e1, e2, ss_e1, ss_e2;
2730 int cpl, dpl, rpl, eflags_mask, iopl;
2731 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2733 #ifdef TARGET_X86_64
2734 if (shift == 2)
2735 sp_mask = -1;
2736 else
2737 #endif
2738 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2739 sp = ESP;
2740 ssp = env->segs[R_SS].base;
2741 new_eflags = 0; /* avoid warning */
2742 #ifdef TARGET_X86_64
2743 if (shift == 2) {
2744 POPQ(sp, new_eip);
2745 POPQ(sp, new_cs);
2746 new_cs &= 0xffff;
2747 if (is_iret) {
2748 POPQ(sp, new_eflags);
2750 } else
2751 #endif
2752 if (shift == 1) {
2753 /* 32 bits */
2754 POPL(ssp, sp, sp_mask, new_eip);
2755 POPL(ssp, sp, sp_mask, new_cs);
2756 new_cs &= 0xffff;
2757 if (is_iret) {
2758 POPL(ssp, sp, sp_mask, new_eflags);
2759 if (new_eflags & VM_MASK)
2760 goto return_to_vm86;
2762 } else {
2763 /* 16 bits */
2764 POPW(ssp, sp, sp_mask, new_eip);
2765 POPW(ssp, sp, sp_mask, new_cs);
2766 if (is_iret)
2767 POPW(ssp, sp, sp_mask, new_eflags);
2769 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2770 new_cs, new_eip, shift, addend);
2771 LOG_PCALL_STATE(env);
2772 if ((new_cs & 0xfffc) == 0)
2773 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2774 if (load_segment(&e1, &e2, new_cs) != 0)
2775 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2776 if (!(e2 & DESC_S_MASK) ||
2777 !(e2 & DESC_CS_MASK))
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 cpl = env->hflags & HF_CPL_MASK;
2780 rpl = new_cs & 3;
2781 if (rpl < cpl)
2782 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2783 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2784 if (e2 & DESC_C_MASK) {
2785 if (dpl > rpl)
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 } else {
2788 if (dpl != rpl)
2789 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791 if (!(e2 & DESC_P_MASK))
2792 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2794 sp += addend;
2795 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2796 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2797 /* return to same privilege level */
2798 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2799 get_seg_base(e1, e2),
2800 get_seg_limit(e1, e2),
2801 e2);
2802 } else {
2803 /* return to different privilege level */
2804 #ifdef TARGET_X86_64
2805 if (shift == 2) {
2806 POPQ(sp, new_esp);
2807 POPQ(sp, new_ss);
2808 new_ss &= 0xffff;
2809 } else
2810 #endif
2811 if (shift == 1) {
2812 /* 32 bits */
2813 POPL(ssp, sp, sp_mask, new_esp);
2814 POPL(ssp, sp, sp_mask, new_ss);
2815 new_ss &= 0xffff;
2816 } else {
2817 /* 16 bits */
2818 POPW(ssp, sp, sp_mask, new_esp);
2819 POPW(ssp, sp, sp_mask, new_ss);
2821 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2822 new_ss, new_esp);
2823 if ((new_ss & 0xfffc) == 0) {
2824 #ifdef TARGET_X86_64
2825 /* NULL ss is allowed in long mode if cpl != 3*/
2826 /* XXX: test CS64 ? */
2827 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2828 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2829 0, 0xffffffff,
2830 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2831 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2832 DESC_W_MASK | DESC_A_MASK);
2833 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2834 } else
2835 #endif
2837 raise_exception_err(EXCP0D_GPF, 0);
2839 } else {
2840 if ((new_ss & 3) != rpl)
2841 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2842 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2843 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2844 if (!(ss_e2 & DESC_S_MASK) ||
2845 (ss_e2 & DESC_CS_MASK) ||
2846 !(ss_e2 & DESC_W_MASK))
2847 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2848 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2849 if (dpl != rpl)
2850 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2851 if (!(ss_e2 & DESC_P_MASK))
2852 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2853 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2854 get_seg_base(ss_e1, ss_e2),
2855 get_seg_limit(ss_e1, ss_e2),
2856 ss_e2);
2859 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2860 get_seg_base(e1, e2),
2861 get_seg_limit(e1, e2),
2862 e2);
2863 cpu_x86_set_cpl(env, rpl);
2864 sp = new_esp;
2865 #ifdef TARGET_X86_64
2866 if (env->hflags & HF_CS64_MASK)
2867 sp_mask = -1;
2868 else
2869 #endif
2870 sp_mask = get_sp_mask(ss_e2);
2872 /* validate data segments */
2873 validate_seg(R_ES, rpl);
2874 validate_seg(R_DS, rpl);
2875 validate_seg(R_FS, rpl);
2876 validate_seg(R_GS, rpl);
2878 sp += addend;
2880 SET_ESP(sp, sp_mask);
2881 env->eip = new_eip;
2882 if (is_iret) {
2883 /* NOTE: 'cpl' is the _old_ CPL */
2884 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2885 if (cpl == 0)
2886 eflags_mask |= IOPL_MASK;
2887 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2888 if (cpl <= iopl)
2889 eflags_mask |= IF_MASK;
2890 if (shift == 0)
2891 eflags_mask &= 0xffff;
2892 load_eflags(new_eflags, eflags_mask);
2894 return;
2896 return_to_vm86:
2897 POPL(ssp, sp, sp_mask, new_esp);
2898 POPL(ssp, sp, sp_mask, new_ss);
2899 POPL(ssp, sp, sp_mask, new_es);
2900 POPL(ssp, sp, sp_mask, new_ds);
2901 POPL(ssp, sp, sp_mask, new_fs);
2902 POPL(ssp, sp, sp_mask, new_gs);
2904 /* modify processor state */
2905 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2906 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2907 load_seg_vm(R_CS, new_cs & 0xffff);
2908 cpu_x86_set_cpl(env, 3);
2909 load_seg_vm(R_SS, new_ss & 0xffff);
2910 load_seg_vm(R_ES, new_es & 0xffff);
2911 load_seg_vm(R_DS, new_ds & 0xffff);
2912 load_seg_vm(R_FS, new_fs & 0xffff);
2913 load_seg_vm(R_GS, new_gs & 0xffff);
2915 env->eip = new_eip & 0xffff;
2916 ESP = new_esp;
2919 void helper_iret_protected(int shift, int next_eip)
2921 int tss_selector, type;
2922 uint32_t e1, e2;
2924 /* specific case for TSS */
2925 if (env->eflags & NT_MASK) {
2926 #ifdef TARGET_X86_64
2927 if (env->hflags & HF_LMA_MASK)
2928 raise_exception_err(EXCP0D_GPF, 0);
2929 #endif
2930 tss_selector = lduw_kernel(env->tr.base + 0);
2931 if (tss_selector & 4)
2932 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2933 if (load_segment(&e1, &e2, tss_selector) != 0)
2934 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2935 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2936 /* NOTE: we check both segment and busy TSS */
2937 if (type != 3)
2938 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2939 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2940 } else {
2941 helper_ret_protected(shift, 1, 0);
2943 env->hflags2 &= ~HF2_NMI_MASK;
2946 void helper_lret_protected(int shift, int addend)
2948 helper_ret_protected(shift, 0, addend);
2951 void helper_sysenter(void)
2953 if (env->sysenter_cs == 0) {
2954 raise_exception_err(EXCP0D_GPF, 0);
2956 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2957 cpu_x86_set_cpl(env, 0);
2959 #ifdef TARGET_X86_64
2960 if (env->hflags & HF_LMA_MASK) {
2961 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2962 0, 0xffffffff,
2963 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2964 DESC_S_MASK |
2965 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2966 } else
2967 #endif
2969 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2970 0, 0xffffffff,
2971 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2972 DESC_S_MASK |
2973 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2975 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2976 0, 0xffffffff,
2977 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2978 DESC_S_MASK |
2979 DESC_W_MASK | DESC_A_MASK);
2980 ESP = env->sysenter_esp;
2981 EIP = env->sysenter_eip;
2984 void helper_sysexit(int dflag)
2986 int cpl;
2988 cpl = env->hflags & HF_CPL_MASK;
2989 if (env->sysenter_cs == 0 || cpl != 0) {
2990 raise_exception_err(EXCP0D_GPF, 0);
2992 cpu_x86_set_cpl(env, 3);
2993 #ifdef TARGET_X86_64
2994 if (dflag == 2) {
2995 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2996 0, 0xffffffff,
2997 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2998 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2999 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3000 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3001 0, 0xffffffff,
3002 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3003 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3004 DESC_W_MASK | DESC_A_MASK);
3005 } else
3006 #endif
3008 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3009 0, 0xffffffff,
3010 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3011 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3012 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3013 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3014 0, 0xffffffff,
3015 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3016 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3017 DESC_W_MASK | DESC_A_MASK);
3019 ESP = ECX;
3020 EIP = EDX;
3023 #if defined(CONFIG_USER_ONLY)
3024 target_ulong helper_read_crN(int reg)
3026 return 0;
3029 void helper_write_crN(int reg, target_ulong t0)
3033 void helper_movl_drN_T0(int reg, target_ulong t0)
3036 #else
3037 target_ulong helper_read_crN(int reg)
3039 target_ulong val;
3041 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3042 switch(reg) {
3043 default:
3044 val = env->cr[reg];
3045 break;
3046 case 8:
3047 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3048 val = cpu_get_apic_tpr(env->apic_state);
3049 } else {
3050 val = env->v_tpr;
3052 break;
3054 return val;
3057 void helper_write_crN(int reg, target_ulong t0)
3059 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3060 switch(reg) {
3061 case 0:
3062 cpu_x86_update_cr0(env, t0);
3063 break;
3064 case 3:
3065 cpu_x86_update_cr3(env, t0);
3066 break;
3067 case 4:
3068 cpu_x86_update_cr4(env, t0);
3069 break;
3070 case 8:
3071 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3072 cpu_set_apic_tpr(env->apic_state, t0);
3074 env->v_tpr = t0 & 0x0f;
3075 break;
3076 default:
3077 env->cr[reg] = t0;
3078 break;
3082 void helper_movl_drN_T0(int reg, target_ulong t0)
3084 int i;
3086 if (reg < 4) {
3087 hw_breakpoint_remove(env, reg);
3088 env->dr[reg] = t0;
3089 hw_breakpoint_insert(env, reg);
3090 } else if (reg == 7) {
3091 for (i = 0; i < 4; i++)
3092 hw_breakpoint_remove(env, i);
3093 env->dr[7] = t0;
3094 for (i = 0; i < 4; i++)
3095 hw_breakpoint_insert(env, i);
3096 } else
3097 env->dr[reg] = t0;
3099 #endif
3101 void helper_lmsw(target_ulong t0)
3103 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3104 if already set to one. */
3105 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3106 helper_write_crN(0, t0);
3109 void helper_clts(void)
3111 env->cr[0] &= ~CR0_TS_MASK;
3112 env->hflags &= ~HF_TS_MASK;
3115 void helper_invlpg(target_ulong addr)
3117 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3118 tlb_flush_page(env, addr);
3121 void helper_rdtsc(void)
3123 uint64_t val;
3125 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3126 raise_exception(EXCP0D_GPF);
3128 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3130 val = cpu_get_tsc(env) + env->tsc_offset;
3131 EAX = (uint32_t)(val);
3132 EDX = (uint32_t)(val >> 32);
3135 void helper_rdtscp(void)
3137 helper_rdtsc();
3138 ECX = (uint32_t)(env->tsc_aux);
3141 void helper_rdpmc(void)
3143 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3144 raise_exception(EXCP0D_GPF);
3146 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3148 /* currently unimplemented */
3149 raise_exception_err(EXCP06_ILLOP, 0);
3152 #if defined(CONFIG_USER_ONLY)
3153 void helper_wrmsr(void)
3157 void helper_rdmsr(void)
3160 #else
3161 void helper_wrmsr(void)
3163 uint64_t val;
3165 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3167 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3169 switch((uint32_t)ECX) {
3170 case MSR_IA32_SYSENTER_CS:
3171 env->sysenter_cs = val & 0xffff;
3172 break;
3173 case MSR_IA32_SYSENTER_ESP:
3174 env->sysenter_esp = val;
3175 break;
3176 case MSR_IA32_SYSENTER_EIP:
3177 env->sysenter_eip = val;
3178 break;
3179 case MSR_IA32_APICBASE:
3180 cpu_set_apic_base(env->apic_state, val);
3181 break;
3182 case MSR_EFER:
3184 uint64_t update_mask;
3185 update_mask = 0;
3186 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3187 update_mask |= MSR_EFER_SCE;
3188 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3189 update_mask |= MSR_EFER_LME;
3190 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3191 update_mask |= MSR_EFER_FFXSR;
3192 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3193 update_mask |= MSR_EFER_NXE;
3194 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3195 update_mask |= MSR_EFER_SVME;
3196 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3197 update_mask |= MSR_EFER_FFXSR;
3198 cpu_load_efer(env, (env->efer & ~update_mask) |
3199 (val & update_mask));
3201 break;
3202 case MSR_STAR:
3203 env->star = val;
3204 break;
3205 case MSR_PAT:
3206 env->pat = val;
3207 break;
3208 case MSR_VM_HSAVE_PA:
3209 env->vm_hsave = val;
3210 break;
3211 #ifdef TARGET_X86_64
3212 case MSR_LSTAR:
3213 env->lstar = val;
3214 break;
3215 case MSR_CSTAR:
3216 env->cstar = val;
3217 break;
3218 case MSR_FMASK:
3219 env->fmask = val;
3220 break;
3221 case MSR_FSBASE:
3222 env->segs[R_FS].base = val;
3223 break;
3224 case MSR_GSBASE:
3225 env->segs[R_GS].base = val;
3226 break;
3227 case MSR_KERNELGSBASE:
3228 env->kernelgsbase = val;
3229 break;
3230 #endif
3231 case MSR_MTRRphysBase(0):
3232 case MSR_MTRRphysBase(1):
3233 case MSR_MTRRphysBase(2):
3234 case MSR_MTRRphysBase(3):
3235 case MSR_MTRRphysBase(4):
3236 case MSR_MTRRphysBase(5):
3237 case MSR_MTRRphysBase(6):
3238 case MSR_MTRRphysBase(7):
3239 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3240 break;
3241 case MSR_MTRRphysMask(0):
3242 case MSR_MTRRphysMask(1):
3243 case MSR_MTRRphysMask(2):
3244 case MSR_MTRRphysMask(3):
3245 case MSR_MTRRphysMask(4):
3246 case MSR_MTRRphysMask(5):
3247 case MSR_MTRRphysMask(6):
3248 case MSR_MTRRphysMask(7):
3249 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3250 break;
3251 case MSR_MTRRfix64K_00000:
3252 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3253 break;
3254 case MSR_MTRRfix16K_80000:
3255 case MSR_MTRRfix16K_A0000:
3256 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3257 break;
3258 case MSR_MTRRfix4K_C0000:
3259 case MSR_MTRRfix4K_C8000:
3260 case MSR_MTRRfix4K_D0000:
3261 case MSR_MTRRfix4K_D8000:
3262 case MSR_MTRRfix4K_E0000:
3263 case MSR_MTRRfix4K_E8000:
3264 case MSR_MTRRfix4K_F0000:
3265 case MSR_MTRRfix4K_F8000:
3266 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3267 break;
3268 case MSR_MTRRdefType:
3269 env->mtrr_deftype = val;
3270 break;
3271 case MSR_MCG_STATUS:
3272 env->mcg_status = val;
3273 break;
3274 case MSR_MCG_CTL:
3275 if ((env->mcg_cap & MCG_CTL_P)
3276 && (val == 0 || val == ~(uint64_t)0))
3277 env->mcg_ctl = val;
3278 break;
3279 case MSR_TSC_AUX:
3280 env->tsc_aux = val;
3281 break;
3282 case MSR_IA32_MISC_ENABLE:
3283 env->msr_ia32_misc_enable = val;
3284 break;
3285 default:
3286 if ((uint32_t)ECX >= MSR_MC0_CTL
3287 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3288 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3289 if ((offset & 0x3) != 0
3290 || (val == 0 || val == ~(uint64_t)0))
3291 env->mce_banks[offset] = val;
3292 break;
3294 /* XXX: exception ? */
3295 break;
3299 void helper_rdmsr(void)
3301 uint64_t val;
3303 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3305 switch((uint32_t)ECX) {
3306 case MSR_IA32_SYSENTER_CS:
3307 val = env->sysenter_cs;
3308 break;
3309 case MSR_IA32_SYSENTER_ESP:
3310 val = env->sysenter_esp;
3311 break;
3312 case MSR_IA32_SYSENTER_EIP:
3313 val = env->sysenter_eip;
3314 break;
3315 case MSR_IA32_APICBASE:
3316 val = cpu_get_apic_base(env->apic_state);
3317 break;
3318 case MSR_EFER:
3319 val = env->efer;
3320 break;
3321 case MSR_STAR:
3322 val = env->star;
3323 break;
3324 case MSR_PAT:
3325 val = env->pat;
3326 break;
3327 case MSR_VM_HSAVE_PA:
3328 val = env->vm_hsave;
3329 break;
3330 case MSR_IA32_PERF_STATUS:
3331 /* tsc_increment_by_tick */
3332 val = 1000ULL;
3333 /* CPU multiplier */
3334 val |= (((uint64_t)4ULL) << 40);
3335 break;
3336 #ifdef TARGET_X86_64
3337 case MSR_LSTAR:
3338 val = env->lstar;
3339 break;
3340 case MSR_CSTAR:
3341 val = env->cstar;
3342 break;
3343 case MSR_FMASK:
3344 val = env->fmask;
3345 break;
3346 case MSR_FSBASE:
3347 val = env->segs[R_FS].base;
3348 break;
3349 case MSR_GSBASE:
3350 val = env->segs[R_GS].base;
3351 break;
3352 case MSR_KERNELGSBASE:
3353 val = env->kernelgsbase;
3354 break;
3355 case MSR_TSC_AUX:
3356 val = env->tsc_aux;
3357 break;
3358 #endif
3359 case MSR_MTRRphysBase(0):
3360 case MSR_MTRRphysBase(1):
3361 case MSR_MTRRphysBase(2):
3362 case MSR_MTRRphysBase(3):
3363 case MSR_MTRRphysBase(4):
3364 case MSR_MTRRphysBase(5):
3365 case MSR_MTRRphysBase(6):
3366 case MSR_MTRRphysBase(7):
3367 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3368 break;
3369 case MSR_MTRRphysMask(0):
3370 case MSR_MTRRphysMask(1):
3371 case MSR_MTRRphysMask(2):
3372 case MSR_MTRRphysMask(3):
3373 case MSR_MTRRphysMask(4):
3374 case MSR_MTRRphysMask(5):
3375 case MSR_MTRRphysMask(6):
3376 case MSR_MTRRphysMask(7):
3377 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3378 break;
3379 case MSR_MTRRfix64K_00000:
3380 val = env->mtrr_fixed[0];
3381 break;
3382 case MSR_MTRRfix16K_80000:
3383 case MSR_MTRRfix16K_A0000:
3384 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3385 break;
3386 case MSR_MTRRfix4K_C0000:
3387 case MSR_MTRRfix4K_C8000:
3388 case MSR_MTRRfix4K_D0000:
3389 case MSR_MTRRfix4K_D8000:
3390 case MSR_MTRRfix4K_E0000:
3391 case MSR_MTRRfix4K_E8000:
3392 case MSR_MTRRfix4K_F0000:
3393 case MSR_MTRRfix4K_F8000:
3394 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3395 break;
3396 case MSR_MTRRdefType:
3397 val = env->mtrr_deftype;
3398 break;
3399 case MSR_MTRRcap:
3400 if (env->cpuid_features & CPUID_MTRR)
3401 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3402 else
3403 /* XXX: exception ? */
3404 val = 0;
3405 break;
3406 case MSR_MCG_CAP:
3407 val = env->mcg_cap;
3408 break;
3409 case MSR_MCG_CTL:
3410 if (env->mcg_cap & MCG_CTL_P)
3411 val = env->mcg_ctl;
3412 else
3413 val = 0;
3414 break;
3415 case MSR_MCG_STATUS:
3416 val = env->mcg_status;
3417 break;
3418 case MSR_IA32_MISC_ENABLE:
3419 val = env->msr_ia32_misc_enable;
3420 break;
3421 default:
3422 if ((uint32_t)ECX >= MSR_MC0_CTL
3423 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3424 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3425 val = env->mce_banks[offset];
3426 break;
3428 /* XXX: exception ? */
3429 val = 0;
3430 break;
3432 EAX = (uint32_t)(val);
3433 EDX = (uint32_t)(val >> 32);
3435 #endif
3437 target_ulong helper_lsl(target_ulong selector1)
3439 unsigned int limit;
3440 uint32_t e1, e2, eflags, selector;
3441 int rpl, dpl, cpl, type;
3443 selector = selector1 & 0xffff;
3444 eflags = helper_cc_compute_all(CC_OP);
3445 if ((selector & 0xfffc) == 0)
3446 goto fail;
3447 if (load_segment(&e1, &e2, selector) != 0)
3448 goto fail;
3449 rpl = selector & 3;
3450 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3451 cpl = env->hflags & HF_CPL_MASK;
3452 if (e2 & DESC_S_MASK) {
3453 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3454 /* conforming */
3455 } else {
3456 if (dpl < cpl || dpl < rpl)
3457 goto fail;
3459 } else {
3460 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3461 switch(type) {
3462 case 1:
3463 case 2:
3464 case 3:
3465 case 9:
3466 case 11:
3467 break;
3468 default:
3469 goto fail;
3471 if (dpl < cpl || dpl < rpl) {
3472 fail:
3473 CC_SRC = eflags & ~CC_Z;
3474 return 0;
3477 limit = get_seg_limit(e1, e2);
3478 CC_SRC = eflags | CC_Z;
3479 return limit;
3482 target_ulong helper_lar(target_ulong selector1)
3484 uint32_t e1, e2, eflags, selector;
3485 int rpl, dpl, cpl, type;
3487 selector = selector1 & 0xffff;
3488 eflags = helper_cc_compute_all(CC_OP);
3489 if ((selector & 0xfffc) == 0)
3490 goto fail;
3491 if (load_segment(&e1, &e2, selector) != 0)
3492 goto fail;
3493 rpl = selector & 3;
3494 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3495 cpl = env->hflags & HF_CPL_MASK;
3496 if (e2 & DESC_S_MASK) {
3497 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3498 /* conforming */
3499 } else {
3500 if (dpl < cpl || dpl < rpl)
3501 goto fail;
3503 } else {
3504 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3505 switch(type) {
3506 case 1:
3507 case 2:
3508 case 3:
3509 case 4:
3510 case 5:
3511 case 9:
3512 case 11:
3513 case 12:
3514 break;
3515 default:
3516 goto fail;
3518 if (dpl < cpl || dpl < rpl) {
3519 fail:
3520 CC_SRC = eflags & ~CC_Z;
3521 return 0;
3524 CC_SRC = eflags | CC_Z;
3525 return e2 & 0x00f0ff00;
3528 void helper_verr(target_ulong selector1)
3530 uint32_t e1, e2, eflags, selector;
3531 int rpl, dpl, cpl;
3533 selector = selector1 & 0xffff;
3534 eflags = helper_cc_compute_all(CC_OP);
3535 if ((selector & 0xfffc) == 0)
3536 goto fail;
3537 if (load_segment(&e1, &e2, selector) != 0)
3538 goto fail;
3539 if (!(e2 & DESC_S_MASK))
3540 goto fail;
3541 rpl = selector & 3;
3542 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3543 cpl = env->hflags & HF_CPL_MASK;
3544 if (e2 & DESC_CS_MASK) {
3545 if (!(e2 & DESC_R_MASK))
3546 goto fail;
3547 if (!(e2 & DESC_C_MASK)) {
3548 if (dpl < cpl || dpl < rpl)
3549 goto fail;
3551 } else {
3552 if (dpl < cpl || dpl < rpl) {
3553 fail:
3554 CC_SRC = eflags & ~CC_Z;
3555 return;
3558 CC_SRC = eflags | CC_Z;
3561 void helper_verw(target_ulong selector1)
3563 uint32_t e1, e2, eflags, selector;
3564 int rpl, dpl, cpl;
3566 selector = selector1 & 0xffff;
3567 eflags = helper_cc_compute_all(CC_OP);
3568 if ((selector & 0xfffc) == 0)
3569 goto fail;
3570 if (load_segment(&e1, &e2, selector) != 0)
3571 goto fail;
3572 if (!(e2 & DESC_S_MASK))
3573 goto fail;
3574 rpl = selector & 3;
3575 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3576 cpl = env->hflags & HF_CPL_MASK;
3577 if (e2 & DESC_CS_MASK) {
3578 goto fail;
3579 } else {
3580 if (dpl < cpl || dpl < rpl)
3581 goto fail;
3582 if (!(e2 & DESC_W_MASK)) {
3583 fail:
3584 CC_SRC = eflags & ~CC_Z;
3585 return;
3588 CC_SRC = eflags | CC_Z;
3591 /* x87 FPU helpers */
3593 static inline double floatx80_to_double(floatx80 a)
3595 union {
3596 float64 f64;
3597 double d;
3598 } u;
3600 u.f64 = floatx80_to_float64(a, &env->fp_status);
3601 return u.d;
3604 static inline floatx80 double_to_floatx80(double a)
3606 union {
3607 float64 f64;
3608 double d;
3609 } u;
3611 u.d = a;
3612 return float64_to_floatx80(u.f64, &env->fp_status);
3615 static void fpu_set_exception(int mask)
3617 env->fpus |= mask;
3618 if (env->fpus & (~env->fpuc & FPUC_EM))
3619 env->fpus |= FPUS_SE | FPUS_B;
3622 static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
3624 if (floatx80_is_zero(b)) {
3625 fpu_set_exception(FPUS_ZE);
3627 return floatx80_div(a, b, &env->fp_status);
3630 static void fpu_raise_exception(void)
3632 if (env->cr[0] & CR0_NE_MASK) {
3633 raise_exception(EXCP10_COPR);
3635 #if !defined(CONFIG_USER_ONLY)
3636 else {
3637 cpu_set_ferr(env);
3639 #endif
3642 void helper_flds_FT0(uint32_t val)
3644 union {
3645 float32 f;
3646 uint32_t i;
3647 } u;
3648 u.i = val;
3649 FT0 = float32_to_floatx80(u.f, &env->fp_status);
3652 void helper_fldl_FT0(uint64_t val)
3654 union {
3655 float64 f;
3656 uint64_t i;
3657 } u;
3658 u.i = val;
3659 FT0 = float64_to_floatx80(u.f, &env->fp_status);
3662 void helper_fildl_FT0(int32_t val)
3664 FT0 = int32_to_floatx80(val, &env->fp_status);
3667 void helper_flds_ST0(uint32_t val)
3669 int new_fpstt;
3670 union {
3671 float32 f;
3672 uint32_t i;
3673 } u;
3674 new_fpstt = (env->fpstt - 1) & 7;
3675 u.i = val;
3676 env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
3677 env->fpstt = new_fpstt;
3678 env->fptags[new_fpstt] = 0; /* validate stack entry */
3681 void helper_fldl_ST0(uint64_t val)
3683 int new_fpstt;
3684 union {
3685 float64 f;
3686 uint64_t i;
3687 } u;
3688 new_fpstt = (env->fpstt - 1) & 7;
3689 u.i = val;
3690 env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
3691 env->fpstt = new_fpstt;
3692 env->fptags[new_fpstt] = 0; /* validate stack entry */
3695 void helper_fildl_ST0(int32_t val)
3697 int new_fpstt;
3698 new_fpstt = (env->fpstt - 1) & 7;
3699 env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
3700 env->fpstt = new_fpstt;
3701 env->fptags[new_fpstt] = 0; /* validate stack entry */
3704 void helper_fildll_ST0(int64_t val)
3706 int new_fpstt;
3707 new_fpstt = (env->fpstt - 1) & 7;
3708 env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
3709 env->fpstt = new_fpstt;
3710 env->fptags[new_fpstt] = 0; /* validate stack entry */
3713 uint32_t helper_fsts_ST0(void)
3715 union {
3716 float32 f;
3717 uint32_t i;
3718 } u;
3719 u.f = floatx80_to_float32(ST0, &env->fp_status);
3720 return u.i;
3723 uint64_t helper_fstl_ST0(void)
3725 union {
3726 float64 f;
3727 uint64_t i;
3728 } u;
3729 u.f = floatx80_to_float64(ST0, &env->fp_status);
3730 return u.i;
3733 int32_t helper_fist_ST0(void)
3735 int32_t val;
3736 val = floatx80_to_int32(ST0, &env->fp_status);
3737 if (val != (int16_t)val)
3738 val = -32768;
3739 return val;
3742 int32_t helper_fistl_ST0(void)
3744 int32_t val;
3745 val = floatx80_to_int32(ST0, &env->fp_status);
3746 return val;
3749 int64_t helper_fistll_ST0(void)
3751 int64_t val;
3752 val = floatx80_to_int64(ST0, &env->fp_status);
3753 return val;
3756 int32_t helper_fistt_ST0(void)
3758 int32_t val;
3759 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3760 if (val != (int16_t)val)
3761 val = -32768;
3762 return val;
3765 int32_t helper_fisttl_ST0(void)
3767 int32_t val;
3768 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3769 return val;
3772 int64_t helper_fisttll_ST0(void)
3774 int64_t val;
3775 val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
3776 return val;
3779 void helper_fldt_ST0(target_ulong ptr)
3781 int new_fpstt;
3782 new_fpstt = (env->fpstt - 1) & 7;
3783 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3784 env->fpstt = new_fpstt;
3785 env->fptags[new_fpstt] = 0; /* validate stack entry */
3788 void helper_fstt_ST0(target_ulong ptr)
3790 helper_fstt(ST0, ptr);
3793 void helper_fpush(void)
3795 fpush();
3798 void helper_fpop(void)
3800 fpop();
3803 void helper_fdecstp(void)
3805 env->fpstt = (env->fpstt - 1) & 7;
3806 env->fpus &= (~0x4700);
3809 void helper_fincstp(void)
3811 env->fpstt = (env->fpstt + 1) & 7;
3812 env->fpus &= (~0x4700);
3815 /* FPU move */
3817 void helper_ffree_STN(int st_index)
3819 env->fptags[(env->fpstt + st_index) & 7] = 1;
3822 void helper_fmov_ST0_FT0(void)
3824 ST0 = FT0;
3827 void helper_fmov_FT0_STN(int st_index)
3829 FT0 = ST(st_index);
3832 void helper_fmov_ST0_STN(int st_index)
3834 ST0 = ST(st_index);
3837 void helper_fmov_STN_ST0(int st_index)
3839 ST(st_index) = ST0;
3842 void helper_fxchg_ST0_STN(int st_index)
3844 floatx80 tmp;
3845 tmp = ST(st_index);
3846 ST(st_index) = ST0;
3847 ST0 = tmp;
3850 /* FPU operations */
3852 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3854 void helper_fcom_ST0_FT0(void)
3856 int ret;
3858 ret = floatx80_compare(ST0, FT0, &env->fp_status);
3859 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3862 void helper_fucom_ST0_FT0(void)
3864 int ret;
3866 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3867 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3870 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3872 void helper_fcomi_ST0_FT0(void)
3874 int eflags;
3875 int ret;
3877 ret = floatx80_compare(ST0, FT0, &env->fp_status);
3878 eflags = helper_cc_compute_all(CC_OP);
3879 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3880 CC_SRC = eflags;
3883 void helper_fucomi_ST0_FT0(void)
3885 int eflags;
3886 int ret;
3888 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3889 eflags = helper_cc_compute_all(CC_OP);
3890 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3891 CC_SRC = eflags;
3894 void helper_fadd_ST0_FT0(void)
3896 ST0 = floatx80_add(ST0, FT0, &env->fp_status);
3899 void helper_fmul_ST0_FT0(void)
3901 ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
3904 void helper_fsub_ST0_FT0(void)
3906 ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
3909 void helper_fsubr_ST0_FT0(void)
3911 ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
3914 void helper_fdiv_ST0_FT0(void)
3916 ST0 = helper_fdiv(ST0, FT0);
3919 void helper_fdivr_ST0_FT0(void)
3921 ST0 = helper_fdiv(FT0, ST0);
3924 /* fp operations between STN and ST0 */
3926 void helper_fadd_STN_ST0(int st_index)
3928 ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
3931 void helper_fmul_STN_ST0(int st_index)
3933 ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
3936 void helper_fsub_STN_ST0(int st_index)
3938 ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
3941 void helper_fsubr_STN_ST0(int st_index)
3943 ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
3946 void helper_fdiv_STN_ST0(int st_index)
3948 floatx80 *p;
3949 p = &ST(st_index);
3950 *p = helper_fdiv(*p, ST0);
3953 void helper_fdivr_STN_ST0(int st_index)
3955 floatx80 *p;
3956 p = &ST(st_index);
3957 *p = helper_fdiv(ST0, *p);
3960 /* misc FPU operations */
3961 void helper_fchs_ST0(void)
3963 ST0 = floatx80_chs(ST0);
3966 void helper_fabs_ST0(void)
3968 ST0 = floatx80_abs(ST0);
3971 void helper_fld1_ST0(void)
3973 ST0 = floatx80_one;
3976 void helper_fldl2t_ST0(void)
3978 ST0 = floatx80_l2t;
3981 void helper_fldl2e_ST0(void)
3983 ST0 = floatx80_l2e;
3986 void helper_fldpi_ST0(void)
3988 ST0 = floatx80_pi;
3991 void helper_fldlg2_ST0(void)
3993 ST0 = floatx80_lg2;
3996 void helper_fldln2_ST0(void)
3998 ST0 = floatx80_ln2;
4001 void helper_fldz_ST0(void)
4003 ST0 = floatx80_zero;
4006 void helper_fldz_FT0(void)
4008 FT0 = floatx80_zero;
4011 uint32_t helper_fnstsw(void)
4013 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4016 uint32_t helper_fnstcw(void)
4018 return env->fpuc;
4021 static void update_fp_status(void)
4023 int rnd_type;
4025 /* set rounding mode */
4026 switch(env->fpuc & FPU_RC_MASK) {
4027 default:
4028 case FPU_RC_NEAR:
4029 rnd_type = float_round_nearest_even;
4030 break;
4031 case FPU_RC_DOWN:
4032 rnd_type = float_round_down;
4033 break;
4034 case FPU_RC_UP:
4035 rnd_type = float_round_up;
4036 break;
4037 case FPU_RC_CHOP:
4038 rnd_type = float_round_to_zero;
4039 break;
4041 set_float_rounding_mode(rnd_type, &env->fp_status);
4042 switch((env->fpuc >> 8) & 3) {
4043 case 0:
4044 rnd_type = 32;
4045 break;
4046 case 2:
4047 rnd_type = 64;
4048 break;
4049 case 3:
4050 default:
4051 rnd_type = 80;
4052 break;
4054 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4057 void helper_fldcw(uint32_t val)
4059 env->fpuc = val;
4060 update_fp_status();
4063 void helper_fclex(void)
4065 env->fpus &= 0x7f00;
4068 void helper_fwait(void)
4070 if (env->fpus & FPUS_SE)
4071 fpu_raise_exception();
4074 void helper_fninit(void)
4076 env->fpus = 0;
4077 env->fpstt = 0;
4078 env->fpuc = 0x37f;
4079 env->fptags[0] = 1;
4080 env->fptags[1] = 1;
4081 env->fptags[2] = 1;
4082 env->fptags[3] = 1;
4083 env->fptags[4] = 1;
4084 env->fptags[5] = 1;
4085 env->fptags[6] = 1;
4086 env->fptags[7] = 1;
4089 /* BCD ops */
4091 void helper_fbld_ST0(target_ulong ptr)
4093 floatx80 tmp;
4094 uint64_t val;
4095 unsigned int v;
4096 int i;
4098 val = 0;
4099 for(i = 8; i >= 0; i--) {
4100 v = ldub(ptr + i);
4101 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4103 tmp = int64_to_floatx80(val, &env->fp_status);
4104 if (ldub(ptr + 9) & 0x80) {
4105 floatx80_chs(tmp);
4107 fpush();
4108 ST0 = tmp;
4111 void helper_fbst_ST0(target_ulong ptr)
4113 int v;
4114 target_ulong mem_ref, mem_end;
4115 int64_t val;
4117 val = floatx80_to_int64(ST0, &env->fp_status);
4118 mem_ref = ptr;
4119 mem_end = mem_ref + 9;
4120 if (val < 0) {
4121 stb(mem_end, 0x80);
4122 val = -val;
4123 } else {
4124 stb(mem_end, 0x00);
4126 while (mem_ref < mem_end) {
4127 if (val == 0)
4128 break;
4129 v = val % 100;
4130 val = val / 100;
4131 v = ((v / 10) << 4) | (v % 10);
4132 stb(mem_ref++, v);
4134 while (mem_ref < mem_end) {
4135 stb(mem_ref++, 0);
4139 void helper_f2xm1(void)
4141 double val = floatx80_to_double(ST0);
4142 val = pow(2.0, val) - 1.0;
4143 ST0 = double_to_floatx80(val);
4146 void helper_fyl2x(void)
4148 double fptemp = floatx80_to_double(ST0);
4150 if (fptemp>0.0){
4151 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4152 fptemp *= floatx80_to_double(ST1);
4153 ST1 = double_to_floatx80(fptemp);
4154 fpop();
4155 } else {
4156 env->fpus &= (~0x4700);
4157 env->fpus |= 0x400;
4161 void helper_fptan(void)
4163 double fptemp = floatx80_to_double(ST0);
4165 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4166 env->fpus |= 0x400;
4167 } else {
4168 fptemp = tan(fptemp);
4169 ST0 = double_to_floatx80(fptemp);
4170 fpush();
4171 ST0 = floatx80_one;
4172 env->fpus &= (~0x400); /* C2 <-- 0 */
4173 /* the above code is for |arg| < 2**52 only */
4177 void helper_fpatan(void)
4179 double fptemp, fpsrcop;
4181 fpsrcop = floatx80_to_double(ST1);
4182 fptemp = floatx80_to_double(ST0);
4183 ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
4184 fpop();
4187 void helper_fxtract(void)
4189 CPU_LDoubleU temp;
4191 temp.d = ST0;
4193 if (floatx80_is_zero(ST0)) {
4194 /* Easy way to generate -inf and raising division by 0 exception */
4195 ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
4196 fpush();
4197 ST0 = temp.d;
4198 } else {
4199 int expdif;
4201 expdif = EXPD(temp) - EXPBIAS;
4202 /*DP exponent bias*/
4203 ST0 = int32_to_floatx80(expdif, &env->fp_status);
4204 fpush();
4205 BIASEXPONENT(temp);
4206 ST0 = temp.d;
4210 void helper_fprem1(void)
4212 double st0, st1, dblq, fpsrcop, fptemp;
4213 CPU_LDoubleU fpsrcop1, fptemp1;
4214 int expdif;
4215 signed long long int q;
4217 st0 = floatx80_to_double(ST0);
4218 st1 = floatx80_to_double(ST1);
4220 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4221 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4222 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4223 return;
4226 fpsrcop = st0;
4227 fptemp = st1;
4228 fpsrcop1.d = ST0;
4229 fptemp1.d = ST1;
4230 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4232 if (expdif < 0) {
4233 /* optimisation? taken from the AMD docs */
4234 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4235 /* ST0 is unchanged */
4236 return;
4239 if (expdif < 53) {
4240 dblq = fpsrcop / fptemp;
4241 /* round dblq towards nearest integer */
4242 dblq = rint(dblq);
4243 st0 = fpsrcop - fptemp * dblq;
4245 /* convert dblq to q by truncating towards zero */
4246 if (dblq < 0.0)
4247 q = (signed long long int)(-dblq);
4248 else
4249 q = (signed long long int)dblq;
4251 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4252 /* (C0,C3,C1) <-- (q2,q1,q0) */
4253 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4254 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4255 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4256 } else {
4257 env->fpus |= 0x400; /* C2 <-- 1 */
4258 fptemp = pow(2.0, expdif - 50);
4259 fpsrcop = (st0 / st1) / fptemp;
4260 /* fpsrcop = integer obtained by chopping */
4261 fpsrcop = (fpsrcop < 0.0) ?
4262 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4263 st0 -= (st1 * fpsrcop * fptemp);
4265 ST0 = double_to_floatx80(st0);
4268 void helper_fprem(void)
4270 double st0, st1, dblq, fpsrcop, fptemp;
4271 CPU_LDoubleU fpsrcop1, fptemp1;
4272 int expdif;
4273 signed long long int q;
4275 st0 = floatx80_to_double(ST0);
4276 st1 = floatx80_to_double(ST1);
4278 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4279 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4280 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4281 return;
4284 fpsrcop = st0;
4285 fptemp = st1;
4286 fpsrcop1.d = ST0;
4287 fptemp1.d = ST1;
4288 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4290 if (expdif < 0) {
4291 /* optimisation? taken from the AMD docs */
4292 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4293 /* ST0 is unchanged */
4294 return;
4297 if ( expdif < 53 ) {
4298 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4299 /* round dblq towards zero */
4300 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4301 st0 = fpsrcop/*ST0*/ - fptemp * dblq;
4303 /* convert dblq to q by truncating towards zero */
4304 if (dblq < 0.0)
4305 q = (signed long long int)(-dblq);
4306 else
4307 q = (signed long long int)dblq;
4309 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4310 /* (C0,C3,C1) <-- (q2,q1,q0) */
4311 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4312 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4313 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4314 } else {
4315 int N = 32 + (expdif % 32); /* as per AMD docs */
4316 env->fpus |= 0x400; /* C2 <-- 1 */
4317 fptemp = pow(2.0, (double)(expdif - N));
4318 fpsrcop = (st0 / st1) / fptemp;
4319 /* fpsrcop = integer obtained by chopping */
4320 fpsrcop = (fpsrcop < 0.0) ?
4321 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4322 st0 -= (st1 * fpsrcop * fptemp);
4324 ST0 = double_to_floatx80(st0);
4327 void helper_fyl2xp1(void)
4329 double fptemp = floatx80_to_double(ST0);
4331 if ((fptemp+1.0)>0.0) {
4332 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4333 fptemp *= floatx80_to_double(ST1);
4334 ST1 = double_to_floatx80(fptemp);
4335 fpop();
4336 } else {
4337 env->fpus &= (~0x4700);
4338 env->fpus |= 0x400;
4342 void helper_fsqrt(void)
4344 if (floatx80_is_neg(ST0)) {
4345 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4346 env->fpus |= 0x400;
4348 ST0 = floatx80_sqrt(ST0, &env->fp_status);
4351 void helper_fsincos(void)
4353 double fptemp = floatx80_to_double(ST0);
4355 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4356 env->fpus |= 0x400;
4357 } else {
4358 ST0 = double_to_floatx80(sin(fptemp));
4359 fpush();
4360 ST0 = double_to_floatx80(cos(fptemp));
4361 env->fpus &= (~0x400); /* C2 <-- 0 */
4362 /* the above code is for |arg| < 2**63 only */
4366 void helper_frndint(void)
4368 ST0 = floatx80_round_to_int(ST0, &env->fp_status);
4371 void helper_fscale(void)
4373 if (floatx80_is_any_nan(ST1)) {
4374 ST0 = ST1;
4375 } else {
4376 int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4377 ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
4381 void helper_fsin(void)
4383 double fptemp = floatx80_to_double(ST0);
4385 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4386 env->fpus |= 0x400;
4387 } else {
4388 ST0 = double_to_floatx80(sin(fptemp));
4389 env->fpus &= (~0x400); /* C2 <-- 0 */
4390 /* the above code is for |arg| < 2**53 only */
4394 void helper_fcos(void)
4396 double fptemp = floatx80_to_double(ST0);
4398 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4399 env->fpus |= 0x400;
4400 } else {
4401 ST0 = double_to_floatx80(cos(fptemp));
4402 env->fpus &= (~0x400); /* C2 <-- 0 */
4403 /* the above code is for |arg5 < 2**63 only */
4407 void helper_fxam_ST0(void)
4409 CPU_LDoubleU temp;
4410 int expdif;
4412 temp.d = ST0;
4414 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4415 if (SIGND(temp))
4416 env->fpus |= 0x200; /* C1 <-- 1 */
4418 /* XXX: test fptags too */
4419 expdif = EXPD(temp);
4420 if (expdif == MAXEXPD) {
4421 if (MANTD(temp) == 0x8000000000000000ULL)
4422 env->fpus |= 0x500 /*Infinity*/;
4423 else
4424 env->fpus |= 0x100 /*NaN*/;
4425 } else if (expdif == 0) {
4426 if (MANTD(temp) == 0)
4427 env->fpus |= 0x4000 /*Zero*/;
4428 else
4429 env->fpus |= 0x4400 /*Denormal*/;
4430 } else {
4431 env->fpus |= 0x400;
4435 void helper_fstenv(target_ulong ptr, int data32)
4437 int fpus, fptag, exp, i;
4438 uint64_t mant;
4439 CPU_LDoubleU tmp;
4441 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4442 fptag = 0;
4443 for (i=7; i>=0; i--) {
4444 fptag <<= 2;
4445 if (env->fptags[i]) {
4446 fptag |= 3;
4447 } else {
4448 tmp.d = env->fpregs[i].d;
4449 exp = EXPD(tmp);
4450 mant = MANTD(tmp);
4451 if (exp == 0 && mant == 0) {
4452 /* zero */
4453 fptag |= 1;
4454 } else if (exp == 0 || exp == MAXEXPD
4455 || (mant & (1LL << 63)) == 0
4457 /* NaNs, infinity, denormal */
4458 fptag |= 2;
4462 if (data32) {
4463 /* 32 bit */
4464 stl(ptr, env->fpuc);
4465 stl(ptr + 4, fpus);
4466 stl(ptr + 8, fptag);
4467 stl(ptr + 12, 0); /* fpip */
4468 stl(ptr + 16, 0); /* fpcs */
4469 stl(ptr + 20, 0); /* fpoo */
4470 stl(ptr + 24, 0); /* fpos */
4471 } else {
4472 /* 16 bit */
4473 stw(ptr, env->fpuc);
4474 stw(ptr + 2, fpus);
4475 stw(ptr + 4, fptag);
4476 stw(ptr + 6, 0);
4477 stw(ptr + 8, 0);
4478 stw(ptr + 10, 0);
4479 stw(ptr + 12, 0);
4483 void helper_fldenv(target_ulong ptr, int data32)
4485 int i, fpus, fptag;
4487 if (data32) {
4488 env->fpuc = lduw(ptr);
4489 fpus = lduw(ptr + 4);
4490 fptag = lduw(ptr + 8);
4492 else {
4493 env->fpuc = lduw(ptr);
4494 fpus = lduw(ptr + 2);
4495 fptag = lduw(ptr + 4);
4497 env->fpstt = (fpus >> 11) & 7;
4498 env->fpus = fpus & ~0x3800;
4499 for(i = 0;i < 8; i++) {
4500 env->fptags[i] = ((fptag & 3) == 3);
4501 fptag >>= 2;
4505 void helper_fsave(target_ulong ptr, int data32)
4507 floatx80 tmp;
4508 int i;
4510 helper_fstenv(ptr, data32);
4512 ptr += (14 << data32);
4513 for(i = 0;i < 8; i++) {
4514 tmp = ST(i);
4515 helper_fstt(tmp, ptr);
4516 ptr += 10;
4519 /* fninit */
4520 env->fpus = 0;
4521 env->fpstt = 0;
4522 env->fpuc = 0x37f;
4523 env->fptags[0] = 1;
4524 env->fptags[1] = 1;
4525 env->fptags[2] = 1;
4526 env->fptags[3] = 1;
4527 env->fptags[4] = 1;
4528 env->fptags[5] = 1;
4529 env->fptags[6] = 1;
4530 env->fptags[7] = 1;
4533 void helper_frstor(target_ulong ptr, int data32)
4535 floatx80 tmp;
4536 int i;
4538 helper_fldenv(ptr, data32);
4539 ptr += (14 << data32);
4541 for(i = 0;i < 8; i++) {
4542 tmp = helper_fldt(ptr);
4543 ST(i) = tmp;
4544 ptr += 10;
4549 #if defined(CONFIG_USER_ONLY)
4550 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
4552 CPUX86State *saved_env;
4554 saved_env = env;
4555 env = s;
4556 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
4557 selector &= 0xffff;
4558 cpu_x86_load_seg_cache(env, seg_reg, selector,
4559 (selector << 4), 0xffff, 0);
4560 } else {
4561 helper_load_seg(seg_reg, selector);
4563 env = saved_env;
4566 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
4568 CPUX86State *saved_env;
4570 saved_env = env;
4571 env = s;
4573 helper_fsave(ptr, data32);
4575 env = saved_env;
4578 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
4580 CPUX86State *saved_env;
4582 saved_env = env;
4583 env = s;
4585 helper_frstor(ptr, data32);
4587 env = saved_env;
4589 #endif
4591 void helper_fxsave(target_ulong ptr, int data64)
4593 int fpus, fptag, i, nb_xmm_regs;
4594 floatx80 tmp;
4595 target_ulong addr;
4597 /* The operand must be 16 byte aligned */
4598 if (ptr & 0xf) {
4599 raise_exception(EXCP0D_GPF);
4602 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4603 fptag = 0;
4604 for(i = 0; i < 8; i++) {
4605 fptag |= (env->fptags[i] << i);
4607 stw(ptr, env->fpuc);
4608 stw(ptr + 2, fpus);
4609 stw(ptr + 4, fptag ^ 0xff);
4610 #ifdef TARGET_X86_64
4611 if (data64) {
4612 stq(ptr + 0x08, 0); /* rip */
4613 stq(ptr + 0x10, 0); /* rdp */
4614 } else
4615 #endif
4617 stl(ptr + 0x08, 0); /* eip */
4618 stl(ptr + 0x0c, 0); /* sel */
4619 stl(ptr + 0x10, 0); /* dp */
4620 stl(ptr + 0x14, 0); /* sel */
4623 addr = ptr + 0x20;
4624 for(i = 0;i < 8; i++) {
4625 tmp = ST(i);
4626 helper_fstt(tmp, addr);
4627 addr += 16;
4630 if (env->cr[4] & CR4_OSFXSR_MASK) {
4631 /* XXX: finish it */
4632 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4633 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4634 if (env->hflags & HF_CS64_MASK)
4635 nb_xmm_regs = 16;
4636 else
4637 nb_xmm_regs = 8;
4638 addr = ptr + 0xa0;
4639 /* Fast FXSAVE leaves out the XMM registers */
4640 if (!(env->efer & MSR_EFER_FFXSR)
4641 || (env->hflags & HF_CPL_MASK)
4642 || !(env->hflags & HF_LMA_MASK)) {
4643 for(i = 0; i < nb_xmm_regs; i++) {
4644 stq(addr, env->xmm_regs[i].XMM_Q(0));
4645 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4646 addr += 16;
4652 void helper_fxrstor(target_ulong ptr, int data64)
4654 int i, fpus, fptag, nb_xmm_regs;
4655 floatx80 tmp;
4656 target_ulong addr;
4658 /* The operand must be 16 byte aligned */
4659 if (ptr & 0xf) {
4660 raise_exception(EXCP0D_GPF);
4663 env->fpuc = lduw(ptr);
4664 fpus = lduw(ptr + 2);
4665 fptag = lduw(ptr + 4);
4666 env->fpstt = (fpus >> 11) & 7;
4667 env->fpus = fpus & ~0x3800;
4668 fptag ^= 0xff;
4669 for(i = 0;i < 8; i++) {
4670 env->fptags[i] = ((fptag >> i) & 1);
4673 addr = ptr + 0x20;
4674 for(i = 0;i < 8; i++) {
4675 tmp = helper_fldt(addr);
4676 ST(i) = tmp;
4677 addr += 16;
4680 if (env->cr[4] & CR4_OSFXSR_MASK) {
4681 /* XXX: finish it */
4682 env->mxcsr = ldl(ptr + 0x18);
4683 //ldl(ptr + 0x1c);
4684 if (env->hflags & HF_CS64_MASK)
4685 nb_xmm_regs = 16;
4686 else
4687 nb_xmm_regs = 8;
4688 addr = ptr + 0xa0;
4689 /* Fast FXRESTORE leaves out the XMM registers */
4690 if (!(env->efer & MSR_EFER_FFXSR)
4691 || (env->hflags & HF_CPL_MASK)
4692 || !(env->hflags & HF_LMA_MASK)) {
4693 for(i = 0; i < nb_xmm_regs; i++) {
4694 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4695 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4696 addr += 16;
4702 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
4704 CPU_LDoubleU temp;
4706 temp.d = f;
4707 *pmant = temp.l.lower;
4708 *pexp = temp.l.upper;
4711 floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
4713 CPU_LDoubleU temp;
4715 temp.l.upper = upper;
4716 temp.l.lower = mant;
4717 return temp.d;
4720 #ifdef TARGET_X86_64
4722 //#define DEBUG_MULDIV
4724 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4726 *plow += a;
4727 /* carry test */
4728 if (*plow < a)
4729 (*phigh)++;
4730 *phigh += b;
4733 static void neg128(uint64_t *plow, uint64_t *phigh)
4735 *plow = ~ *plow;
4736 *phigh = ~ *phigh;
4737 add128(plow, phigh, 1, 0);
4740 /* return TRUE if overflow */
4741 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4743 uint64_t q, r, a1, a0;
4744 int i, qb, ab;
4746 a0 = *plow;
4747 a1 = *phigh;
4748 if (a1 == 0) {
4749 q = a0 / b;
4750 r = a0 % b;
4751 *plow = q;
4752 *phigh = r;
4753 } else {
4754 if (a1 >= b)
4755 return 1;
4756 /* XXX: use a better algorithm */
4757 for(i = 0; i < 64; i++) {
4758 ab = a1 >> 63;
4759 a1 = (a1 << 1) | (a0 >> 63);
4760 if (ab || a1 >= b) {
4761 a1 -= b;
4762 qb = 1;
4763 } else {
4764 qb = 0;
4766 a0 = (a0 << 1) | qb;
4768 #if defined(DEBUG_MULDIV)
4769 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4770 *phigh, *plow, b, a0, a1);
4771 #endif
4772 *plow = a0;
4773 *phigh = a1;
4775 return 0;
4778 /* return TRUE if overflow */
4779 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4781 int sa, sb;
4782 sa = ((int64_t)*phigh < 0);
4783 if (sa)
4784 neg128(plow, phigh);
4785 sb = (b < 0);
4786 if (sb)
4787 b = -b;
4788 if (div64(plow, phigh, b) != 0)
4789 return 1;
4790 if (sa ^ sb) {
4791 if (*plow > (1ULL << 63))
4792 return 1;
4793 *plow = - *plow;
4794 } else {
4795 if (*plow >= (1ULL << 63))
4796 return 1;
4798 if (sa)
4799 *phigh = - *phigh;
4800 return 0;
4803 void helper_mulq_EAX_T0(target_ulong t0)
4805 uint64_t r0, r1;
4807 mulu64(&r0, &r1, EAX, t0);
4808 EAX = r0;
4809 EDX = r1;
4810 CC_DST = r0;
4811 CC_SRC = r1;
4814 void helper_imulq_EAX_T0(target_ulong t0)
4816 uint64_t r0, r1;
4818 muls64(&r0, &r1, EAX, t0);
4819 EAX = r0;
4820 EDX = r1;
4821 CC_DST = r0;
4822 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4825 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4827 uint64_t r0, r1;
4829 muls64(&r0, &r1, t0, t1);
4830 CC_DST = r0;
4831 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4832 return r0;
4835 void helper_divq_EAX(target_ulong t0)
4837 uint64_t r0, r1;
4838 if (t0 == 0) {
4839 raise_exception(EXCP00_DIVZ);
4841 r0 = EAX;
4842 r1 = EDX;
4843 if (div64(&r0, &r1, t0))
4844 raise_exception(EXCP00_DIVZ);
4845 EAX = r0;
4846 EDX = r1;
4849 void helper_idivq_EAX(target_ulong t0)
4851 uint64_t r0, r1;
4852 if (t0 == 0) {
4853 raise_exception(EXCP00_DIVZ);
4855 r0 = EAX;
4856 r1 = EDX;
4857 if (idiv64(&r0, &r1, t0))
4858 raise_exception(EXCP00_DIVZ);
4859 EAX = r0;
4860 EDX = r1;
4862 #endif
4864 static void do_hlt(void)
4866 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4867 env->halted = 1;
4868 env->exception_index = EXCP_HLT;
4869 cpu_loop_exit(env);
4872 void helper_hlt(int next_eip_addend)
4874 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4875 EIP += next_eip_addend;
4877 do_hlt();
4880 void helper_monitor(target_ulong ptr)
4882 if ((uint32_t)ECX != 0)
4883 raise_exception(EXCP0D_GPF);
4884 /* XXX: store address ? */
4885 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4888 void helper_mwait(int next_eip_addend)
4890 if ((uint32_t)ECX != 0)
4891 raise_exception(EXCP0D_GPF);
4892 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4893 EIP += next_eip_addend;
4895 /* XXX: not complete but not completely erroneous */
4896 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4897 /* more than one CPU: do not sleep because another CPU may
4898 wake this one */
4899 } else {
4900 do_hlt();
4904 void helper_debug(void)
4906 env->exception_index = EXCP_DEBUG;
4907 cpu_loop_exit(env);
4910 void helper_reset_rf(void)
4912 env->eflags &= ~RF_MASK;
4915 void helper_raise_interrupt(int intno, int next_eip_addend)
4917 raise_interrupt(intno, 1, 0, next_eip_addend);
4920 void helper_raise_exception(int exception_index)
4922 raise_exception(exception_index);
4925 void helper_cli(void)
4927 env->eflags &= ~IF_MASK;
4930 void helper_sti(void)
4932 env->eflags |= IF_MASK;
4935 #if 0
4936 /* vm86plus instructions */
4937 void helper_cli_vm(void)
4939 env->eflags &= ~VIF_MASK;
4942 void helper_sti_vm(void)
4944 env->eflags |= VIF_MASK;
4945 if (env->eflags & VIP_MASK) {
4946 raise_exception(EXCP0D_GPF);
4949 #endif
4951 void helper_set_inhibit_irq(void)
4953 env->hflags |= HF_INHIBIT_IRQ_MASK;
4956 void helper_reset_inhibit_irq(void)
4958 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4961 void helper_boundw(target_ulong a0, int v)
4963 int low, high;
4964 low = ldsw(a0);
4965 high = ldsw(a0 + 2);
4966 v = (int16_t)v;
4967 if (v < low || v > high) {
4968 raise_exception(EXCP05_BOUND);
4972 void helper_boundl(target_ulong a0, int v)
4974 int low, high;
4975 low = ldl(a0);
4976 high = ldl(a0 + 4);
4977 if (v < low || v > high) {
4978 raise_exception(EXCP05_BOUND);
4982 #if !defined(CONFIG_USER_ONLY)
4984 #define MMUSUFFIX _mmu
4986 #define SHIFT 0
4987 #include "softmmu_template.h"
4989 #define SHIFT 1
4990 #include "softmmu_template.h"
4992 #define SHIFT 2
4993 #include "softmmu_template.h"
4995 #define SHIFT 3
4996 #include "softmmu_template.h"
4998 #endif
5000 #if !defined(CONFIG_USER_ONLY)
5001 /* try to fill the TLB and return an exception if error. If retaddr is
5002 NULL, it means that the function was called in C code (i.e. not
5003 from generated code or from helper.c) */
5004 /* XXX: fix it to restore all registers */
5005 void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
5006 void *retaddr)
5008 TranslationBlock *tb;
5009 int ret;
5010 unsigned long pc;
5011 CPUX86State *saved_env;
5013 saved_env = env;
5014 env = env1;
5016 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
5017 if (ret) {
5018 if (retaddr) {
5019 /* now we have a real cpu fault */
5020 pc = (unsigned long)retaddr;
5021 tb = tb_find_pc(pc);
5022 if (tb) {
5023 /* the PC is inside the translated code. It means that we have
5024 a virtual CPU fault */
5025 cpu_restore_state(tb, env, pc);
5028 raise_exception_err(env->exception_index, env->error_code);
5030 env = saved_env;
5032 #endif
5034 /* Secure Virtual Machine helpers */
5036 #if defined(CONFIG_USER_ONLY)
5038 void helper_vmrun(int aflag, int next_eip_addend)
5041 void helper_vmmcall(void)
5044 void helper_vmload(int aflag)
5047 void helper_vmsave(int aflag)
5050 void helper_stgi(void)
5053 void helper_clgi(void)
5056 void helper_skinit(void)
5059 void helper_invlpga(int aflag)
5062 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5065 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5069 void svm_check_intercept(CPUX86State *env1, uint32_t type)
5073 void helper_svm_check_io(uint32_t port, uint32_t param,
5074 uint32_t next_eip_addend)
5077 #else
5079 static inline void svm_save_seg(target_phys_addr_t addr,
5080 const SegmentCache *sc)
5082 stw_phys(addr + offsetof(struct vmcb_seg, selector),
5083 sc->selector);
5084 stq_phys(addr + offsetof(struct vmcb_seg, base),
5085 sc->base);
5086 stl_phys(addr + offsetof(struct vmcb_seg, limit),
5087 sc->limit);
5088 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
5089 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
5092 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
5094 unsigned int flags;
5096 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
5097 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
5098 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
5099 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
5100 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
5103 static inline void svm_load_seg_cache(target_phys_addr_t addr,
5104 CPUX86State *env, int seg_reg)
5106 SegmentCache sc1, *sc = &sc1;
5107 svm_load_seg(addr, sc);
5108 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
5109 sc->base, sc->limit, sc->flags);
5112 void helper_vmrun(int aflag, int next_eip_addend)
5114 target_ulong addr;
5115 uint32_t event_inj;
5116 uint32_t int_ctl;
5118 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
5120 if (aflag == 2)
5121 addr = EAX;
5122 else
5123 addr = (uint32_t)EAX;
5125 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
5127 env->vm_vmcb = addr;
5129 /* save the current CPU state in the hsave page */
5130 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5131 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5133 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5134 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5136 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
5137 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
5138 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
5139 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
5140 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
5141 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
5143 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
5144 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
5146 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
5147 &env->segs[R_ES]);
5148 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
5149 &env->segs[R_CS]);
5150 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
5151 &env->segs[R_SS]);
5152 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
5153 &env->segs[R_DS]);
5155 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
5156 EIP + next_eip_addend);
5157 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
5158 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
5160 /* load the interception bitmaps so we do not need to access the
5161 vmcb in svm mode */
5162 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
5163 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5164 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5165 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5166 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5167 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5169 /* enable intercepts */
5170 env->hflags |= HF_SVMI_MASK;
5172 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5174 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5175 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5177 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5178 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5180 /* clear exit_info_2 so we behave like the real hardware */
5181 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5183 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5184 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5185 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5186 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5187 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5188 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5189 if (int_ctl & V_INTR_MASKING_MASK) {
5190 env->v_tpr = int_ctl & V_TPR_MASK;
5191 env->hflags2 |= HF2_VINTR_MASK;
5192 if (env->eflags & IF_MASK)
5193 env->hflags2 |= HF2_HIF_MASK;
5196 cpu_load_efer(env,
5197 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5198 env->eflags = 0;
5199 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5200 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5201 CC_OP = CC_OP_EFLAGS;
5203 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5204 env, R_ES);
5205 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5206 env, R_CS);
5207 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5208 env, R_SS);
5209 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5210 env, R_DS);
5212 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5213 env->eip = EIP;
5214 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5215 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5216 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5217 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5218 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5220 /* FIXME: guest state consistency checks */
5222 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5223 case TLB_CONTROL_DO_NOTHING:
5224 break;
5225 case TLB_CONTROL_FLUSH_ALL_ASID:
5226 /* FIXME: this is not 100% correct but should work for now */
5227 tlb_flush(env, 1);
5228 break;
5231 env->hflags2 |= HF2_GIF_MASK;
5233 if (int_ctl & V_IRQ_MASK) {
5234 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5237 /* maybe we need to inject an event */
5238 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5239 if (event_inj & SVM_EVTINJ_VALID) {
5240 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5241 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5242 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5244 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5245 /* FIXME: need to implement valid_err */
5246 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5247 case SVM_EVTINJ_TYPE_INTR:
5248 env->exception_index = vector;
5249 env->error_code = event_inj_err;
5250 env->exception_is_int = 0;
5251 env->exception_next_eip = -1;
5252 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5253 /* XXX: is it always correct ? */
5254 do_interrupt_all(vector, 0, 0, 0, 1);
5255 break;
5256 case SVM_EVTINJ_TYPE_NMI:
5257 env->exception_index = EXCP02_NMI;
5258 env->error_code = event_inj_err;
5259 env->exception_is_int = 0;
5260 env->exception_next_eip = EIP;
5261 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5262 cpu_loop_exit(env);
5263 break;
5264 case SVM_EVTINJ_TYPE_EXEPT:
5265 env->exception_index = vector;
5266 env->error_code = event_inj_err;
5267 env->exception_is_int = 0;
5268 env->exception_next_eip = -1;
5269 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5270 cpu_loop_exit(env);
5271 break;
5272 case SVM_EVTINJ_TYPE_SOFT:
5273 env->exception_index = vector;
5274 env->error_code = event_inj_err;
5275 env->exception_is_int = 1;
5276 env->exception_next_eip = EIP;
5277 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5278 cpu_loop_exit(env);
5279 break;
5281 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5285 void helper_vmmcall(void)
5287 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5288 raise_exception(EXCP06_ILLOP);
5291 void helper_vmload(int aflag)
5293 target_ulong addr;
5294 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5296 if (aflag == 2)
5297 addr = EAX;
5298 else
5299 addr = (uint32_t)EAX;
5301 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5302 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5303 env->segs[R_FS].base);
5305 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5306 env, R_FS);
5307 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5308 env, R_GS);
5309 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5310 &env->tr);
5311 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5312 &env->ldt);
5314 #ifdef TARGET_X86_64
5315 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5316 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5317 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5318 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5319 #endif
5320 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5321 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5322 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5323 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5326 void helper_vmsave(int aflag)
5328 target_ulong addr;
5329 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5331 if (aflag == 2)
5332 addr = EAX;
5333 else
5334 addr = (uint32_t)EAX;
5336 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5337 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5338 env->segs[R_FS].base);
5340 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5341 &env->segs[R_FS]);
5342 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5343 &env->segs[R_GS]);
5344 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5345 &env->tr);
5346 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5347 &env->ldt);
5349 #ifdef TARGET_X86_64
5350 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5351 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5352 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5353 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5354 #endif
5355 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5356 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5357 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5358 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5361 void helper_stgi(void)
5363 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5364 env->hflags2 |= HF2_GIF_MASK;
5367 void helper_clgi(void)
5369 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5370 env->hflags2 &= ~HF2_GIF_MASK;
5373 void helper_skinit(void)
5375 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5376 /* XXX: not implemented */
5377 raise_exception(EXCP06_ILLOP);
5380 void helper_invlpga(int aflag)
5382 target_ulong addr;
5383 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5385 if (aflag == 2)
5386 addr = EAX;
5387 else
5388 addr = (uint32_t)EAX;
5390 /* XXX: could use the ASID to see if it is needed to do the
5391 flush */
5392 tlb_flush_page(env, addr);
5395 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5397 if (likely(!(env->hflags & HF_SVMI_MASK)))
5398 return;
5399 switch(type) {
5400 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5401 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5402 helper_vmexit(type, param);
5404 break;
5405 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5406 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5407 helper_vmexit(type, param);
5409 break;
5410 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5411 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5412 helper_vmexit(type, param);
5414 break;
5415 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5416 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5417 helper_vmexit(type, param);
5419 break;
5420 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5421 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5422 helper_vmexit(type, param);
5424 break;
5425 case SVM_EXIT_MSR:
5426 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5427 /* FIXME: this should be read in at vmrun (faster this way?) */
5428 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5429 uint32_t t0, t1;
5430 switch((uint32_t)ECX) {
5431 case 0 ... 0x1fff:
5432 t0 = (ECX * 2) % 8;
5433 t1 = (ECX * 2) / 8;
5434 break;
5435 case 0xc0000000 ... 0xc0001fff:
5436 t0 = (8192 + ECX - 0xc0000000) * 2;
5437 t1 = (t0 / 8);
5438 t0 %= 8;
5439 break;
5440 case 0xc0010000 ... 0xc0011fff:
5441 t0 = (16384 + ECX - 0xc0010000) * 2;
5442 t1 = (t0 / 8);
5443 t0 %= 8;
5444 break;
5445 default:
5446 helper_vmexit(type, param);
5447 t0 = 0;
5448 t1 = 0;
5449 break;
5451 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5452 helper_vmexit(type, param);
5454 break;
5455 default:
5456 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5457 helper_vmexit(type, param);
5459 break;
5463 void svm_check_intercept(CPUX86State *env1, uint32_t type)
5465 CPUX86State *saved_env;
5467 saved_env = env;
5468 env = env1;
5469 helper_svm_check_intercept_param(type, 0);
5470 env = saved_env;
5473 void helper_svm_check_io(uint32_t port, uint32_t param,
5474 uint32_t next_eip_addend)
5476 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5477 /* FIXME: this should be read in at vmrun (faster this way?) */
5478 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5479 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5480 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5481 /* next EIP */
5482 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5483 env->eip + next_eip_addend);
5484 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5489 /* Note: currently only 32 bits of exit_code are used */
5490 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5492 uint32_t int_ctl;
5494 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5495 exit_code, exit_info_1,
5496 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5497 EIP);
5499 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5500 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5501 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5502 } else {
5503 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5506 /* Save the VM state in the vmcb */
5507 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5508 &env->segs[R_ES]);
5509 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5510 &env->segs[R_CS]);
5511 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5512 &env->segs[R_SS]);
5513 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5514 &env->segs[R_DS]);
5516 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5517 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5519 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5520 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5522 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5523 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5524 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5525 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5526 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5528 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5529 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5530 int_ctl |= env->v_tpr & V_TPR_MASK;
5531 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5532 int_ctl |= V_IRQ_MASK;
5533 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5535 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5536 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5537 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5538 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5539 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5540 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5541 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5543 /* Reload the host state from vm_hsave */
5544 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5545 env->hflags &= ~HF_SVMI_MASK;
5546 env->intercept = 0;
5547 env->intercept_exceptions = 0;
5548 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5549 env->tsc_offset = 0;
5551 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5552 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5554 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5555 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5557 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5558 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5559 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5560 /* we need to set the efer after the crs so the hidden flags get
5561 set properly */
5562 cpu_load_efer(env,
5563 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5564 env->eflags = 0;
5565 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5566 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5567 CC_OP = CC_OP_EFLAGS;
5569 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5570 env, R_ES);
5571 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5572 env, R_CS);
5573 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5574 env, R_SS);
5575 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5576 env, R_DS);
5578 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5579 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5580 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5582 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5583 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5585 /* other setups */
5586 cpu_x86_set_cpl(env, 0);
5587 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5588 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5590 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5591 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5592 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5593 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5594 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5596 env->hflags2 &= ~HF2_GIF_MASK;
5597 /* FIXME: Resets the current ASID register to zero (host ASID). */
5599 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5601 /* Clears the TSC_OFFSET inside the processor. */
5603 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5604 from the page table indicated the host's CR3. If the PDPEs contain
5605 illegal state, the processor causes a shutdown. */
5607 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5608 env->cr[0] |= CR0_PE_MASK;
5609 env->eflags &= ~VM_MASK;
5611 /* Disables all breakpoints in the host DR7 register. */
5613 /* Checks the reloaded host state for consistency. */
5615 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5616 host's code segment or non-canonical (in the case of long mode), a
5617 #GP fault is delivered inside the host.) */
5619 /* remove any pending exception */
5620 env->exception_index = -1;
5621 env->error_code = 0;
5622 env->old_exception = -1;
5624 cpu_loop_exit(env);
5627 #endif
5629 /* MMX/SSE */
5630 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5632 #define SSE_DAZ 0x0040
5633 #define SSE_RC_MASK 0x6000
5634 #define SSE_RC_NEAR 0x0000
5635 #define SSE_RC_DOWN 0x2000
5636 #define SSE_RC_UP 0x4000
5637 #define SSE_RC_CHOP 0x6000
5638 #define SSE_FZ 0x8000
5640 static void update_sse_status(void)
5642 int rnd_type;
5644 /* set rounding mode */
5645 switch(env->mxcsr & SSE_RC_MASK) {
5646 default:
5647 case SSE_RC_NEAR:
5648 rnd_type = float_round_nearest_even;
5649 break;
5650 case SSE_RC_DOWN:
5651 rnd_type = float_round_down;
5652 break;
5653 case SSE_RC_UP:
5654 rnd_type = float_round_up;
5655 break;
5656 case SSE_RC_CHOP:
5657 rnd_type = float_round_to_zero;
5658 break;
5660 set_float_rounding_mode(rnd_type, &env->sse_status);
5662 /* set denormals are zero */
5663 set_flush_inputs_to_zero((env->mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
5665 /* set flush to zero */
5666 set_flush_to_zero((env->mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
5669 void helper_ldmxcsr(uint32_t val)
5671 env->mxcsr = val;
5672 update_sse_status();
5675 void helper_enter_mmx(void)
5677 env->fpstt = 0;
5678 *(uint32_t *)(env->fptags) = 0;
5679 *(uint32_t *)(env->fptags + 4) = 0;
5682 void helper_emms(void)
5684 /* set to empty state */
5685 *(uint32_t *)(env->fptags) = 0x01010101;
5686 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5689 /* XXX: suppress */
5690 void helper_movq(void *d, void *s)
5692 *(uint64_t *)d = *(uint64_t *)s;
5695 #define SHIFT 0
5696 #include "ops_sse.h"
5698 #define SHIFT 1
5699 #include "ops_sse.h"
5701 #define SHIFT 0
5702 #include "helper_template.h"
5703 #undef SHIFT
5705 #define SHIFT 1
5706 #include "helper_template.h"
5707 #undef SHIFT
5709 #define SHIFT 2
5710 #include "helper_template.h"
5711 #undef SHIFT
5713 #ifdef TARGET_X86_64
5715 #define SHIFT 3
5716 #include "helper_template.h"
5717 #undef SHIFT
5719 #endif
5721 /* bit operations */
5722 target_ulong helper_bsf(target_ulong t0)
5724 int count;
5725 target_ulong res;
5727 res = t0;
5728 count = 0;
5729 while ((res & 1) == 0) {
5730 count++;
5731 res >>= 1;
5733 return count;
5736 target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5738 int count;
5739 target_ulong res, mask;
5741 if (wordsize > 0 && t0 == 0) {
5742 return wordsize;
5744 res = t0;
5745 count = TARGET_LONG_BITS - 1;
5746 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5747 while ((res & mask) == 0) {
5748 count--;
5749 res <<= 1;
5751 if (wordsize > 0) {
5752 return wordsize - 1 - count;
5754 return count;
5757 target_ulong helper_bsr(target_ulong t0)
5759 return helper_lzcnt(t0, 0);
5762 static int compute_all_eflags(void)
5764 return CC_SRC;
5767 static int compute_c_eflags(void)
5769 return CC_SRC & CC_C;
5772 uint32_t helper_cc_compute_all(int op)
5774 switch (op) {
5775 default: /* should never happen */ return 0;
5777 case CC_OP_EFLAGS: return compute_all_eflags();
5779 case CC_OP_MULB: return compute_all_mulb();
5780 case CC_OP_MULW: return compute_all_mulw();
5781 case CC_OP_MULL: return compute_all_mull();
5783 case CC_OP_ADDB: return compute_all_addb();
5784 case CC_OP_ADDW: return compute_all_addw();
5785 case CC_OP_ADDL: return compute_all_addl();
5787 case CC_OP_ADCB: return compute_all_adcb();
5788 case CC_OP_ADCW: return compute_all_adcw();
5789 case CC_OP_ADCL: return compute_all_adcl();
5791 case CC_OP_SUBB: return compute_all_subb();
5792 case CC_OP_SUBW: return compute_all_subw();
5793 case CC_OP_SUBL: return compute_all_subl();
5795 case CC_OP_SBBB: return compute_all_sbbb();
5796 case CC_OP_SBBW: return compute_all_sbbw();
5797 case CC_OP_SBBL: return compute_all_sbbl();
5799 case CC_OP_LOGICB: return compute_all_logicb();
5800 case CC_OP_LOGICW: return compute_all_logicw();
5801 case CC_OP_LOGICL: return compute_all_logicl();
5803 case CC_OP_INCB: return compute_all_incb();
5804 case CC_OP_INCW: return compute_all_incw();
5805 case CC_OP_INCL: return compute_all_incl();
5807 case CC_OP_DECB: return compute_all_decb();
5808 case CC_OP_DECW: return compute_all_decw();
5809 case CC_OP_DECL: return compute_all_decl();
5811 case CC_OP_SHLB: return compute_all_shlb();
5812 case CC_OP_SHLW: return compute_all_shlw();
5813 case CC_OP_SHLL: return compute_all_shll();
5815 case CC_OP_SARB: return compute_all_sarb();
5816 case CC_OP_SARW: return compute_all_sarw();
5817 case CC_OP_SARL: return compute_all_sarl();
5819 #ifdef TARGET_X86_64
5820 case CC_OP_MULQ: return compute_all_mulq();
5822 case CC_OP_ADDQ: return compute_all_addq();
5824 case CC_OP_ADCQ: return compute_all_adcq();
5826 case CC_OP_SUBQ: return compute_all_subq();
5828 case CC_OP_SBBQ: return compute_all_sbbq();
5830 case CC_OP_LOGICQ: return compute_all_logicq();
5832 case CC_OP_INCQ: return compute_all_incq();
5834 case CC_OP_DECQ: return compute_all_decq();
5836 case CC_OP_SHLQ: return compute_all_shlq();
5838 case CC_OP_SARQ: return compute_all_sarq();
5839 #endif
5843 uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
5845 CPUX86State *saved_env;
5846 uint32_t ret;
5848 saved_env = env;
5849 env = env1;
5850 ret = helper_cc_compute_all(op);
5851 env = saved_env;
5852 return ret;
5855 uint32_t helper_cc_compute_c(int op)
5857 switch (op) {
5858 default: /* should never happen */ return 0;
5860 case CC_OP_EFLAGS: return compute_c_eflags();
5862 case CC_OP_MULB: return compute_c_mull();
5863 case CC_OP_MULW: return compute_c_mull();
5864 case CC_OP_MULL: return compute_c_mull();
5866 case CC_OP_ADDB: return compute_c_addb();
5867 case CC_OP_ADDW: return compute_c_addw();
5868 case CC_OP_ADDL: return compute_c_addl();
5870 case CC_OP_ADCB: return compute_c_adcb();
5871 case CC_OP_ADCW: return compute_c_adcw();
5872 case CC_OP_ADCL: return compute_c_adcl();
5874 case CC_OP_SUBB: return compute_c_subb();
5875 case CC_OP_SUBW: return compute_c_subw();
5876 case CC_OP_SUBL: return compute_c_subl();
5878 case CC_OP_SBBB: return compute_c_sbbb();
5879 case CC_OP_SBBW: return compute_c_sbbw();
5880 case CC_OP_SBBL: return compute_c_sbbl();
5882 case CC_OP_LOGICB: return compute_c_logicb();
5883 case CC_OP_LOGICW: return compute_c_logicw();
5884 case CC_OP_LOGICL: return compute_c_logicl();
5886 case CC_OP_INCB: return compute_c_incl();
5887 case CC_OP_INCW: return compute_c_incl();
5888 case CC_OP_INCL: return compute_c_incl();
5890 case CC_OP_DECB: return compute_c_incl();
5891 case CC_OP_DECW: return compute_c_incl();
5892 case CC_OP_DECL: return compute_c_incl();
5894 case CC_OP_SHLB: return compute_c_shlb();
5895 case CC_OP_SHLW: return compute_c_shlw();
5896 case CC_OP_SHLL: return compute_c_shll();
5898 case CC_OP_SARB: return compute_c_sarl();
5899 case CC_OP_SARW: return compute_c_sarl();
5900 case CC_OP_SARL: return compute_c_sarl();
5902 #ifdef TARGET_X86_64
5903 case CC_OP_MULQ: return compute_c_mull();
5905 case CC_OP_ADDQ: return compute_c_addq();
5907 case CC_OP_ADCQ: return compute_c_adcq();
5909 case CC_OP_SUBQ: return compute_c_subq();
5911 case CC_OP_SBBQ: return compute_c_sbbq();
5913 case CC_OP_LOGICQ: return compute_c_logicq();
5915 case CC_OP_INCQ: return compute_c_incl();
5917 case CC_OP_DECQ: return compute_c_incl();
5919 case CC_OP_SHLQ: return compute_c_shlq();
5921 case CC_OP_SARQ: return compute_c_sarl();
5922 #endif