coroutine: add test-coroutine --benchmark-lifecycle
[qemu/stefanha.git] / target-i386 / op_helper.c
blob315e18b9a4c3f1e2e6962f433e754810574b9867
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <math.h>
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "ioport.h"
25 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(env) do { } while (0)
35 #endif
38 #if 0
39 #define raise_exception_err(a, b)\
40 do {\
41 qemu_log("raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
43 } while (0)
44 #endif
46 static const uint8_t parity_table[256] = {
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81 /* modulo 17 table */
82 static const uint8_t rclw_table[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
89 /* modulo 9 table */
90 static const uint8_t rclb_table[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
97 #define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
98 #define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
99 #define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
101 /* broken thread support */
103 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
105 void helper_lock(void)
107 spin_lock(&global_cpu_lock);
110 void helper_unlock(void)
112 spin_unlock(&global_cpu_lock);
115 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
117 load_eflags(t0, update_mask);
120 target_ulong helper_read_eflags(void)
122 uint32_t eflags;
123 eflags = helper_cc_compute_all(CC_OP);
124 eflags |= (DF & DF_MASK);
125 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
126 return eflags;
129 /* return non zero if error */
130 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
131 int selector)
133 SegmentCache *dt;
134 int index;
135 target_ulong ptr;
137 if (selector & 0x4)
138 dt = &env->ldt;
139 else
140 dt = &env->gdt;
141 index = selector & ~7;
142 if ((index + 7) > dt->limit)
143 return -1;
144 ptr = dt->base + index;
145 *e1_ptr = ldl_kernel(ptr);
146 *e2_ptr = ldl_kernel(ptr + 4);
147 return 0;
150 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
152 unsigned int limit;
153 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
154 if (e2 & DESC_G_MASK)
155 limit = (limit << 12) | 0xfff;
156 return limit;
159 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
161 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
164 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
166 sc->base = get_seg_base(e1, e2);
167 sc->limit = get_seg_limit(e1, e2);
168 sc->flags = e2;
171 /* init the segment cache in vm86 mode. */
172 static inline void load_seg_vm(int seg, int selector)
174 selector &= 0xffff;
175 cpu_x86_load_seg_cache(env, seg, selector,
176 (selector << 4), 0xffff, 0);
179 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
180 uint32_t *esp_ptr, int dpl)
182 int type, index, shift;
184 #if 0
186 int i;
187 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
188 for(i=0;i<env->tr.limit;i++) {
189 printf("%02x ", env->tr.base[i]);
190 if ((i & 7) == 7) printf("\n");
192 printf("\n");
194 #endif
196 if (!(env->tr.flags & DESC_P_MASK))
197 cpu_abort(env, "invalid tss");
198 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
199 if ((type & 7) != 1)
200 cpu_abort(env, "invalid tss type");
201 shift = type >> 3;
202 index = (dpl * 4 + 2) << shift;
203 if (index + (4 << shift) - 1 > env->tr.limit)
204 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
205 if (shift == 0) {
206 *esp_ptr = lduw_kernel(env->tr.base + index);
207 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
208 } else {
209 *esp_ptr = ldl_kernel(env->tr.base + index);
210 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
214 /* XXX: merge with load_seg() */
215 static void tss_load_seg(int seg_reg, int selector)
217 uint32_t e1, e2;
218 int rpl, dpl, cpl;
220 if ((selector & 0xfffc) != 0) {
221 if (load_segment(&e1, &e2, selector) != 0)
222 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223 if (!(e2 & DESC_S_MASK))
224 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225 rpl = selector & 3;
226 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
227 cpl = env->hflags & HF_CPL_MASK;
228 if (seg_reg == R_CS) {
229 if (!(e2 & DESC_CS_MASK))
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231 /* XXX: is it correct ? */
232 if (dpl != rpl)
233 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
234 if ((e2 & DESC_C_MASK) && dpl > rpl)
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 } else if (seg_reg == R_SS) {
237 /* SS must be writable data */
238 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
239 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240 if (dpl != cpl || dpl != rpl)
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 } else {
243 /* not readable code */
244 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246 /* if data or non conforming code, checks the rights */
247 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
248 if (dpl < cpl || dpl < rpl)
249 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252 if (!(e2 & DESC_P_MASK))
253 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
254 cpu_x86_load_seg_cache(env, seg_reg, selector,
255 get_seg_base(e1, e2),
256 get_seg_limit(e1, e2),
257 e2);
258 } else {
259 if (seg_reg == R_SS || seg_reg == R_CS)
260 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
264 #define SWITCH_TSS_JMP 0
265 #define SWITCH_TSS_IRET 1
266 #define SWITCH_TSS_CALL 2
268 /* XXX: restore CPU state in registers (PowerPC case) */
269 static void switch_tss(int tss_selector,
270 uint32_t e1, uint32_t e2, int source,
271 uint32_t next_eip)
273 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
274 target_ulong tss_base;
275 uint32_t new_regs[8], new_segs[6];
276 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
277 uint32_t old_eflags, eflags_mask;
278 SegmentCache *dt;
279 int index;
280 target_ulong ptr;
282 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
283 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
285 /* if task gate, we read the TSS segment and we load it */
286 if (type == 5) {
287 if (!(e2 & DESC_P_MASK))
288 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
289 tss_selector = e1 >> 16;
290 if (tss_selector & 4)
291 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
292 if (load_segment(&e1, &e2, tss_selector) != 0)
293 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294 if (e2 & DESC_S_MASK)
295 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
296 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
297 if ((type & 7) != 1)
298 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301 if (!(e2 & DESC_P_MASK))
302 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
304 if (type & 8)
305 tss_limit_max = 103;
306 else
307 tss_limit_max = 43;
308 tss_limit = get_seg_limit(e1, e2);
309 tss_base = get_seg_base(e1, e2);
310 if ((tss_selector & 4) != 0 ||
311 tss_limit < tss_limit_max)
312 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
313 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
314 if (old_type & 8)
315 old_tss_limit_max = 103;
316 else
317 old_tss_limit_max = 43;
319 /* read all the registers from the new TSS */
320 if (type & 8) {
321 /* 32 bit */
322 new_cr3 = ldl_kernel(tss_base + 0x1c);
323 new_eip = ldl_kernel(tss_base + 0x20);
324 new_eflags = ldl_kernel(tss_base + 0x24);
325 for(i = 0; i < 8; i++)
326 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
327 for(i = 0; i < 6; i++)
328 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
329 new_ldt = lduw_kernel(tss_base + 0x60);
330 new_trap = ldl_kernel(tss_base + 0x64);
331 } else {
332 /* 16 bit */
333 new_cr3 = 0;
334 new_eip = lduw_kernel(tss_base + 0x0e);
335 new_eflags = lduw_kernel(tss_base + 0x10);
336 for(i = 0; i < 8; i++)
337 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
338 for(i = 0; i < 4; i++)
339 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
340 new_ldt = lduw_kernel(tss_base + 0x2a);
341 new_segs[R_FS] = 0;
342 new_segs[R_GS] = 0;
343 new_trap = 0;
345 /* XXX: avoid a compiler warning, see
346 http://support.amd.com/us/Processor_TechDocs/24593.pdf
347 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
348 (void)new_trap;
350 /* NOTE: we must avoid memory exceptions during the task switch,
351 so we make dummy accesses before */
352 /* XXX: it can still fail in some cases, so a bigger hack is
353 necessary to valid the TLB after having done the accesses */
355 v1 = ldub_kernel(env->tr.base);
356 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
357 stb_kernel(env->tr.base, v1);
358 stb_kernel(env->tr.base + old_tss_limit_max, v2);
360 /* clear busy bit (it is restartable) */
361 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
362 target_ulong ptr;
363 uint32_t e2;
364 ptr = env->gdt.base + (env->tr.selector & ~7);
365 e2 = ldl_kernel(ptr + 4);
366 e2 &= ~DESC_TSS_BUSY_MASK;
367 stl_kernel(ptr + 4, e2);
369 old_eflags = compute_eflags();
370 if (source == SWITCH_TSS_IRET)
371 old_eflags &= ~NT_MASK;
373 /* save the current state in the old TSS */
374 if (type & 8) {
375 /* 32 bit */
376 stl_kernel(env->tr.base + 0x20, next_eip);
377 stl_kernel(env->tr.base + 0x24, old_eflags);
378 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
379 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
380 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
381 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
382 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
383 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
384 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
385 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
386 for(i = 0; i < 6; i++)
387 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
388 } else {
389 /* 16 bit */
390 stw_kernel(env->tr.base + 0x0e, next_eip);
391 stw_kernel(env->tr.base + 0x10, old_eflags);
392 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
393 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
394 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
395 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
396 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
397 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
398 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
399 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
400 for(i = 0; i < 4; i++)
401 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
404 /* now if an exception occurs, it will occurs in the next task
405 context */
407 if (source == SWITCH_TSS_CALL) {
408 stw_kernel(tss_base, env->tr.selector);
409 new_eflags |= NT_MASK;
412 /* set busy bit */
413 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
414 target_ulong ptr;
415 uint32_t e2;
416 ptr = env->gdt.base + (tss_selector & ~7);
417 e2 = ldl_kernel(ptr + 4);
418 e2 |= DESC_TSS_BUSY_MASK;
419 stl_kernel(ptr + 4, e2);
422 /* set the new CPU state */
423 /* from this point, any exception which occurs can give problems */
424 env->cr[0] |= CR0_TS_MASK;
425 env->hflags |= HF_TS_MASK;
426 env->tr.selector = tss_selector;
427 env->tr.base = tss_base;
428 env->tr.limit = tss_limit;
429 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
431 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
432 cpu_x86_update_cr3(env, new_cr3);
435 /* load all registers without an exception, then reload them with
436 possible exception */
437 env->eip = new_eip;
438 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
439 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
440 if (!(type & 8))
441 eflags_mask &= 0xffff;
442 load_eflags(new_eflags, eflags_mask);
443 /* XXX: what to do in 16 bit case ? */
444 EAX = new_regs[0];
445 ECX = new_regs[1];
446 EDX = new_regs[2];
447 EBX = new_regs[3];
448 ESP = new_regs[4];
449 EBP = new_regs[5];
450 ESI = new_regs[6];
451 EDI = new_regs[7];
452 if (new_eflags & VM_MASK) {
453 for(i = 0; i < 6; i++)
454 load_seg_vm(i, new_segs[i]);
455 /* in vm86, CPL is always 3 */
456 cpu_x86_set_cpl(env, 3);
457 } else {
458 /* CPL is set the RPL of CS */
459 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
460 /* first just selectors as the rest may trigger exceptions */
461 for(i = 0; i < 6; i++)
462 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
465 env->ldt.selector = new_ldt & ~4;
466 env->ldt.base = 0;
467 env->ldt.limit = 0;
468 env->ldt.flags = 0;
470 /* load the LDT */
471 if (new_ldt & 4)
472 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 if ((new_ldt & 0xfffc) != 0) {
475 dt = &env->gdt;
476 index = new_ldt & ~7;
477 if ((index + 7) > dt->limit)
478 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479 ptr = dt->base + index;
480 e1 = ldl_kernel(ptr);
481 e2 = ldl_kernel(ptr + 4);
482 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
483 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
484 if (!(e2 & DESC_P_MASK))
485 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486 load_seg_cache_raw_dt(&env->ldt, e1, e2);
489 /* load the segments */
490 if (!(new_eflags & VM_MASK)) {
491 tss_load_seg(R_CS, new_segs[R_CS]);
492 tss_load_seg(R_SS, new_segs[R_SS]);
493 tss_load_seg(R_ES, new_segs[R_ES]);
494 tss_load_seg(R_DS, new_segs[R_DS]);
495 tss_load_seg(R_FS, new_segs[R_FS]);
496 tss_load_seg(R_GS, new_segs[R_GS]);
499 /* check that EIP is in the CS segment limits */
500 if (new_eip > env->segs[R_CS].limit) {
501 /* XXX: different exception if CALL ? */
502 raise_exception_err(EXCP0D_GPF, 0);
505 #ifndef CONFIG_USER_ONLY
506 /* reset local breakpoints */
507 if (env->dr[7] & 0x55) {
508 for (i = 0; i < 4; i++) {
509 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
510 hw_breakpoint_remove(env, i);
512 env->dr[7] &= ~0x55;
514 #endif
517 /* check if Port I/O is allowed in TSS */
518 static inline void check_io(int addr, int size)
520 int io_offset, val, mask;
522 /* TSS must be a valid 32 bit one */
523 if (!(env->tr.flags & DESC_P_MASK) ||
524 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
525 env->tr.limit < 103)
526 goto fail;
527 io_offset = lduw_kernel(env->tr.base + 0x66);
528 io_offset += (addr >> 3);
529 /* Note: the check needs two bytes */
530 if ((io_offset + 1) > env->tr.limit)
531 goto fail;
532 val = lduw_kernel(env->tr.base + io_offset);
533 val >>= (addr & 7);
534 mask = (1 << size) - 1;
535 /* all bits must be zero to allow the I/O */
536 if ((val & mask) != 0) {
537 fail:
538 raise_exception_err(EXCP0D_GPF, 0);
542 void helper_check_iob(uint32_t t0)
544 check_io(t0, 1);
547 void helper_check_iow(uint32_t t0)
549 check_io(t0, 2);
552 void helper_check_iol(uint32_t t0)
554 check_io(t0, 4);
557 void helper_outb(uint32_t port, uint32_t data)
559 cpu_outb(port, data & 0xff);
562 target_ulong helper_inb(uint32_t port)
564 return cpu_inb(port);
567 void helper_outw(uint32_t port, uint32_t data)
569 cpu_outw(port, data & 0xffff);
572 target_ulong helper_inw(uint32_t port)
574 return cpu_inw(port);
577 void helper_outl(uint32_t port, uint32_t data)
579 cpu_outl(port, data);
582 target_ulong helper_inl(uint32_t port)
584 return cpu_inl(port);
587 static inline unsigned int get_sp_mask(unsigned int e2)
589 if (e2 & DESC_B_MASK)
590 return 0xffffffff;
591 else
592 return 0xffff;
595 static int exeption_has_error_code(int intno)
597 switch(intno) {
598 case 8:
599 case 10:
600 case 11:
601 case 12:
602 case 13:
603 case 14:
604 case 17:
605 return 1;
607 return 0;
610 #ifdef TARGET_X86_64
611 #define SET_ESP(val, sp_mask)\
612 do {\
613 if ((sp_mask) == 0xffff)\
614 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
615 else if ((sp_mask) == 0xffffffffLL)\
616 ESP = (uint32_t)(val);\
617 else\
618 ESP = (val);\
619 } while (0)
620 #else
621 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
622 #endif
624 /* in 64-bit machines, this can overflow. So this segment addition macro
625 * can be used to trim the value to 32-bit whenever needed */
626 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
628 /* XXX: add a is_user flag to have proper security support */
629 #define PUSHW(ssp, sp, sp_mask, val)\
631 sp -= 2;\
632 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
635 #define PUSHL(ssp, sp, sp_mask, val)\
637 sp -= 4;\
638 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
641 #define POPW(ssp, sp, sp_mask, val)\
643 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
644 sp += 2;\
647 #define POPL(ssp, sp, sp_mask, val)\
649 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
650 sp += 4;\
653 /* protected mode interrupt */
654 static void do_interrupt_protected(int intno, int is_int, int error_code,
655 unsigned int next_eip, int is_hw)
657 SegmentCache *dt;
658 target_ulong ptr, ssp;
659 int type, dpl, selector, ss_dpl, cpl;
660 int has_error_code, new_stack, shift;
661 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
662 uint32_t old_eip, sp_mask;
664 has_error_code = 0;
665 if (!is_int && !is_hw)
666 has_error_code = exeption_has_error_code(intno);
667 if (is_int)
668 old_eip = next_eip;
669 else
670 old_eip = env->eip;
672 dt = &env->idt;
673 if (intno * 8 + 7 > dt->limit)
674 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
675 ptr = dt->base + intno * 8;
676 e1 = ldl_kernel(ptr);
677 e2 = ldl_kernel(ptr + 4);
678 /* check gate type */
679 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
680 switch(type) {
681 case 5: /* task gate */
682 /* must do that check here to return the correct error code */
683 if (!(e2 & DESC_P_MASK))
684 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
685 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
686 if (has_error_code) {
687 int type;
688 uint32_t mask;
689 /* push the error code */
690 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
691 shift = type >> 3;
692 if (env->segs[R_SS].flags & DESC_B_MASK)
693 mask = 0xffffffff;
694 else
695 mask = 0xffff;
696 esp = (ESP - (2 << shift)) & mask;
697 ssp = env->segs[R_SS].base + esp;
698 if (shift)
699 stl_kernel(ssp, error_code);
700 else
701 stw_kernel(ssp, error_code);
702 SET_ESP(esp, mask);
704 return;
705 case 6: /* 286 interrupt gate */
706 case 7: /* 286 trap gate */
707 case 14: /* 386 interrupt gate */
708 case 15: /* 386 trap gate */
709 break;
710 default:
711 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
712 break;
714 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
715 cpl = env->hflags & HF_CPL_MASK;
716 /* check privilege if software int */
717 if (is_int && dpl < cpl)
718 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
719 /* check valid bit */
720 if (!(e2 & DESC_P_MASK))
721 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
722 selector = e1 >> 16;
723 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
724 if ((selector & 0xfffc) == 0)
725 raise_exception_err(EXCP0D_GPF, 0);
727 if (load_segment(&e1, &e2, selector) != 0)
728 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
729 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
730 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
731 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
732 if (dpl > cpl)
733 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734 if (!(e2 & DESC_P_MASK))
735 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
736 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
737 /* to inner privilege */
738 get_ss_esp_from_tss(&ss, &esp, dpl);
739 if ((ss & 0xfffc) == 0)
740 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
741 if ((ss & 3) != dpl)
742 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
743 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
744 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
746 if (ss_dpl != dpl)
747 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748 if (!(ss_e2 & DESC_S_MASK) ||
749 (ss_e2 & DESC_CS_MASK) ||
750 !(ss_e2 & DESC_W_MASK))
751 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752 if (!(ss_e2 & DESC_P_MASK))
753 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
754 new_stack = 1;
755 sp_mask = get_sp_mask(ss_e2);
756 ssp = get_seg_base(ss_e1, ss_e2);
757 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
758 /* to same privilege */
759 if (env->eflags & VM_MASK)
760 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
761 new_stack = 0;
762 sp_mask = get_sp_mask(env->segs[R_SS].flags);
763 ssp = env->segs[R_SS].base;
764 esp = ESP;
765 dpl = cpl;
766 } else {
767 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
768 new_stack = 0; /* avoid warning */
769 sp_mask = 0; /* avoid warning */
770 ssp = 0; /* avoid warning */
771 esp = 0; /* avoid warning */
774 shift = type >> 3;
776 #if 0
777 /* XXX: check that enough room is available */
778 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
779 if (env->eflags & VM_MASK)
780 push_size += 8;
781 push_size <<= shift;
782 #endif
783 if (shift == 1) {
784 if (new_stack) {
785 if (env->eflags & VM_MASK) {
786 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
787 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
788 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
789 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
791 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
792 PUSHL(ssp, esp, sp_mask, ESP);
794 PUSHL(ssp, esp, sp_mask, compute_eflags());
795 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
796 PUSHL(ssp, esp, sp_mask, old_eip);
797 if (has_error_code) {
798 PUSHL(ssp, esp, sp_mask, error_code);
800 } else {
801 if (new_stack) {
802 if (env->eflags & VM_MASK) {
803 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
804 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
805 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
806 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
808 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
809 PUSHW(ssp, esp, sp_mask, ESP);
811 PUSHW(ssp, esp, sp_mask, compute_eflags());
812 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
813 PUSHW(ssp, esp, sp_mask, old_eip);
814 if (has_error_code) {
815 PUSHW(ssp, esp, sp_mask, error_code);
819 if (new_stack) {
820 if (env->eflags & VM_MASK) {
821 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
822 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
824 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
826 ss = (ss & ~3) | dpl;
827 cpu_x86_load_seg_cache(env, R_SS, ss,
828 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
830 SET_ESP(esp, sp_mask);
832 selector = (selector & ~3) | dpl;
833 cpu_x86_load_seg_cache(env, R_CS, selector,
834 get_seg_base(e1, e2),
835 get_seg_limit(e1, e2),
836 e2);
837 cpu_x86_set_cpl(env, dpl);
838 env->eip = offset;
840 /* interrupt gate clear IF mask */
841 if ((type & 1) == 0) {
842 env->eflags &= ~IF_MASK;
844 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
847 #ifdef TARGET_X86_64
849 #define PUSHQ(sp, val)\
851 sp -= 8;\
852 stq_kernel(sp, (val));\
855 #define POPQ(sp, val)\
857 val = ldq_kernel(sp);\
858 sp += 8;\
861 static inline target_ulong get_rsp_from_tss(int level)
863 int index;
865 #if 0
866 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
867 env->tr.base, env->tr.limit);
868 #endif
870 if (!(env->tr.flags & DESC_P_MASK))
871 cpu_abort(env, "invalid tss");
872 index = 8 * level + 4;
873 if ((index + 7) > env->tr.limit)
874 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
875 return ldq_kernel(env->tr.base + index);
878 /* 64 bit interrupt */
879 static void do_interrupt64(int intno, int is_int, int error_code,
880 target_ulong next_eip, int is_hw)
882 SegmentCache *dt;
883 target_ulong ptr;
884 int type, dpl, selector, cpl, ist;
885 int has_error_code, new_stack;
886 uint32_t e1, e2, e3, ss;
887 target_ulong old_eip, esp, offset;
889 has_error_code = 0;
890 if (!is_int && !is_hw)
891 has_error_code = exeption_has_error_code(intno);
892 if (is_int)
893 old_eip = next_eip;
894 else
895 old_eip = env->eip;
897 dt = &env->idt;
898 if (intno * 16 + 15 > dt->limit)
899 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
900 ptr = dt->base + intno * 16;
901 e1 = ldl_kernel(ptr);
902 e2 = ldl_kernel(ptr + 4);
903 e3 = ldl_kernel(ptr + 8);
904 /* check gate type */
905 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
906 switch(type) {
907 case 14: /* 386 interrupt gate */
908 case 15: /* 386 trap gate */
909 break;
910 default:
911 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
912 break;
914 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
915 cpl = env->hflags & HF_CPL_MASK;
916 /* check privilege if software int */
917 if (is_int && dpl < cpl)
918 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
919 /* check valid bit */
920 if (!(e2 & DESC_P_MASK))
921 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
922 selector = e1 >> 16;
923 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
924 ist = e2 & 7;
925 if ((selector & 0xfffc) == 0)
926 raise_exception_err(EXCP0D_GPF, 0);
928 if (load_segment(&e1, &e2, selector) != 0)
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
931 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
933 if (dpl > cpl)
934 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935 if (!(e2 & DESC_P_MASK))
936 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
937 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
940 /* to inner privilege */
941 if (ist != 0)
942 esp = get_rsp_from_tss(ist + 3);
943 else
944 esp = get_rsp_from_tss(dpl);
945 esp &= ~0xfLL; /* align stack */
946 ss = 0;
947 new_stack = 1;
948 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
949 /* to same privilege */
950 if (env->eflags & VM_MASK)
951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952 new_stack = 0;
953 if (ist != 0)
954 esp = get_rsp_from_tss(ist + 3);
955 else
956 esp = ESP;
957 esp &= ~0xfLL; /* align stack */
958 dpl = cpl;
959 } else {
960 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
961 new_stack = 0; /* avoid warning */
962 esp = 0; /* avoid warning */
965 PUSHQ(esp, env->segs[R_SS].selector);
966 PUSHQ(esp, ESP);
967 PUSHQ(esp, compute_eflags());
968 PUSHQ(esp, env->segs[R_CS].selector);
969 PUSHQ(esp, old_eip);
970 if (has_error_code) {
971 PUSHQ(esp, error_code);
974 if (new_stack) {
975 ss = 0 | dpl;
976 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
978 ESP = esp;
980 selector = (selector & ~3) | dpl;
981 cpu_x86_load_seg_cache(env, R_CS, selector,
982 get_seg_base(e1, e2),
983 get_seg_limit(e1, e2),
984 e2);
985 cpu_x86_set_cpl(env, dpl);
986 env->eip = offset;
988 /* interrupt gate clear IF mask */
989 if ((type & 1) == 0) {
990 env->eflags &= ~IF_MASK;
992 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
994 #endif
996 #ifdef TARGET_X86_64
997 #if defined(CONFIG_USER_ONLY)
998 void helper_syscall(int next_eip_addend)
1000 env->exception_index = EXCP_SYSCALL;
1001 env->exception_next_eip = env->eip + next_eip_addend;
1002 cpu_loop_exit(env);
1004 #else
1005 void helper_syscall(int next_eip_addend)
1007 int selector;
1009 if (!(env->efer & MSR_EFER_SCE)) {
1010 raise_exception_err(EXCP06_ILLOP, 0);
1012 selector = (env->star >> 32) & 0xffff;
1013 if (env->hflags & HF_LMA_MASK) {
1014 int code64;
1016 ECX = env->eip + next_eip_addend;
1017 env->regs[11] = compute_eflags();
1019 code64 = env->hflags & HF_CS64_MASK;
1021 cpu_x86_set_cpl(env, 0);
1022 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1023 0, 0xffffffff,
1024 DESC_G_MASK | DESC_P_MASK |
1025 DESC_S_MASK |
1026 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1027 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1028 0, 0xffffffff,
1029 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1030 DESC_S_MASK |
1031 DESC_W_MASK | DESC_A_MASK);
1032 env->eflags &= ~env->fmask;
1033 load_eflags(env->eflags, 0);
1034 if (code64)
1035 env->eip = env->lstar;
1036 else
1037 env->eip = env->cstar;
1038 } else {
1039 ECX = (uint32_t)(env->eip + next_eip_addend);
1041 cpu_x86_set_cpl(env, 0);
1042 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1043 0, 0xffffffff,
1044 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1045 DESC_S_MASK |
1046 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1047 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1048 0, 0xffffffff,
1049 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1050 DESC_S_MASK |
1051 DESC_W_MASK | DESC_A_MASK);
1052 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1053 env->eip = (uint32_t)env->star;
1056 #endif
1057 #endif
1059 #ifdef TARGET_X86_64
1060 void helper_sysret(int dflag)
1062 int cpl, selector;
1064 if (!(env->efer & MSR_EFER_SCE)) {
1065 raise_exception_err(EXCP06_ILLOP, 0);
1067 cpl = env->hflags & HF_CPL_MASK;
1068 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1069 raise_exception_err(EXCP0D_GPF, 0);
1071 selector = (env->star >> 48) & 0xffff;
1072 if (env->hflags & HF_LMA_MASK) {
1073 if (dflag == 2) {
1074 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1075 0, 0xffffffff,
1076 DESC_G_MASK | DESC_P_MASK |
1077 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1079 DESC_L_MASK);
1080 env->eip = ECX;
1081 } else {
1082 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1083 0, 0xffffffff,
1084 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1085 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1087 env->eip = (uint32_t)ECX;
1089 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1090 0, 0xffffffff,
1091 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1092 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1093 DESC_W_MASK | DESC_A_MASK);
1094 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1095 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1096 cpu_x86_set_cpl(env, 3);
1097 } else {
1098 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1099 0, 0xffffffff,
1100 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1103 env->eip = (uint32_t)ECX;
1104 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1105 0, 0xffffffff,
1106 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1107 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1108 DESC_W_MASK | DESC_A_MASK);
1109 env->eflags |= IF_MASK;
1110 cpu_x86_set_cpl(env, 3);
1113 #endif
1115 /* real mode interrupt */
1116 static void do_interrupt_real(int intno, int is_int, int error_code,
1117 unsigned int next_eip)
1119 SegmentCache *dt;
1120 target_ulong ptr, ssp;
1121 int selector;
1122 uint32_t offset, esp;
1123 uint32_t old_cs, old_eip;
1125 /* real mode (simpler !) */
1126 dt = &env->idt;
1127 if (intno * 4 + 3 > dt->limit)
1128 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1129 ptr = dt->base + intno * 4;
1130 offset = lduw_kernel(ptr);
1131 selector = lduw_kernel(ptr + 2);
1132 esp = ESP;
1133 ssp = env->segs[R_SS].base;
1134 if (is_int)
1135 old_eip = next_eip;
1136 else
1137 old_eip = env->eip;
1138 old_cs = env->segs[R_CS].selector;
1139 /* XXX: use SS segment size ? */
1140 PUSHW(ssp, esp, 0xffff, compute_eflags());
1141 PUSHW(ssp, esp, 0xffff, old_cs);
1142 PUSHW(ssp, esp, 0xffff, old_eip);
1144 /* update processor state */
1145 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1146 env->eip = offset;
1147 env->segs[R_CS].selector = selector;
1148 env->segs[R_CS].base = (selector << 4);
1149 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1152 #if defined(CONFIG_USER_ONLY)
1153 /* fake user mode interrupt */
1154 static void do_interrupt_user(int intno, int is_int, int error_code,
1155 target_ulong next_eip)
1157 SegmentCache *dt;
1158 target_ulong ptr;
1159 int dpl, cpl, shift;
1160 uint32_t e2;
1162 dt = &env->idt;
1163 if (env->hflags & HF_LMA_MASK) {
1164 shift = 4;
1165 } else {
1166 shift = 3;
1168 ptr = dt->base + (intno << shift);
1169 e2 = ldl_kernel(ptr + 4);
1171 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172 cpl = env->hflags & HF_CPL_MASK;
1173 /* check privilege if software int */
1174 if (is_int && dpl < cpl)
1175 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1177 /* Since we emulate only user space, we cannot do more than
1178 exiting the emulation with the suitable exception and error
1179 code */
1180 if (is_int)
1181 EIP = next_eip;
1184 #else
1186 static void handle_even_inj(int intno, int is_int, int error_code,
1187 int is_hw, int rm)
1189 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1190 if (!(event_inj & SVM_EVTINJ_VALID)) {
1191 int type;
1192 if (is_int)
1193 type = SVM_EVTINJ_TYPE_SOFT;
1194 else
1195 type = SVM_EVTINJ_TYPE_EXEPT;
1196 event_inj = intno | type | SVM_EVTINJ_VALID;
1197 if (!rm && exeption_has_error_code(intno)) {
1198 event_inj |= SVM_EVTINJ_VALID_ERR;
1199 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1201 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1204 #endif
1207 * Begin execution of an interruption. is_int is TRUE if coming from
1208 * the int instruction. next_eip is the EIP value AFTER the interrupt
1209 * instruction. It is only relevant if is_int is TRUE.
1211 static void do_interrupt_all(int intno, int is_int, int error_code,
1212 target_ulong next_eip, int is_hw)
1214 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1215 if ((env->cr[0] & CR0_PE_MASK)) {
1216 static int count;
1217 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1218 count, intno, error_code, is_int,
1219 env->hflags & HF_CPL_MASK,
1220 env->segs[R_CS].selector, EIP,
1221 (int)env->segs[R_CS].base + EIP,
1222 env->segs[R_SS].selector, ESP);
1223 if (intno == 0x0e) {
1224 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1225 } else {
1226 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1228 qemu_log("\n");
1229 log_cpu_state(env, X86_DUMP_CCOP);
1230 #if 0
1232 int i;
1233 target_ulong ptr;
1234 qemu_log(" code=");
1235 ptr = env->segs[R_CS].base + env->eip;
1236 for(i = 0; i < 16; i++) {
1237 qemu_log(" %02x", ldub(ptr + i));
1239 qemu_log("\n");
1241 #endif
1242 count++;
1245 if (env->cr[0] & CR0_PE_MASK) {
1246 #if !defined(CONFIG_USER_ONLY)
1247 if (env->hflags & HF_SVMI_MASK)
1248 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1249 #endif
1250 #ifdef TARGET_X86_64
1251 if (env->hflags & HF_LMA_MASK) {
1252 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1253 } else
1254 #endif
1256 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1258 } else {
1259 #if !defined(CONFIG_USER_ONLY)
1260 if (env->hflags & HF_SVMI_MASK)
1261 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1262 #endif
1263 do_interrupt_real(intno, is_int, error_code, next_eip);
1266 #if !defined(CONFIG_USER_ONLY)
1267 if (env->hflags & HF_SVMI_MASK) {
1268 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1269 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1271 #endif
1274 void do_interrupt(CPUState *env1)
1276 CPUState *saved_env;
1278 saved_env = env;
1279 env = env1;
1280 #if defined(CONFIG_USER_ONLY)
1281 /* if user mode only, we simulate a fake exception
1282 which will be handled outside the cpu execution
1283 loop */
1284 do_interrupt_user(env->exception_index,
1285 env->exception_is_int,
1286 env->error_code,
1287 env->exception_next_eip);
1288 /* successfully delivered */
1289 env->old_exception = -1;
1290 #else
1291 /* simulate a real cpu exception. On i386, it can
1292 trigger new exceptions, but we do not handle
1293 double or triple faults yet. */
1294 do_interrupt_all(env->exception_index,
1295 env->exception_is_int,
1296 env->error_code,
1297 env->exception_next_eip, 0);
1298 /* successfully delivered */
1299 env->old_exception = -1;
1300 #endif
1301 env = saved_env;
1304 void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw)
1306 CPUState *saved_env;
1308 saved_env = env;
1309 env = env1;
1310 do_interrupt_all(intno, 0, 0, 0, is_hw);
1311 env = saved_env;
1314 /* This should come from sysemu.h - if we could include it here... */
1315 void qemu_system_reset_request(void);
1318 * Check nested exceptions and change to double or triple fault if
1319 * needed. It should only be called, if this is not an interrupt.
1320 * Returns the new exception number.
1322 static int check_exception(int intno, int *error_code)
1324 int first_contributory = env->old_exception == 0 ||
1325 (env->old_exception >= 10 &&
1326 env->old_exception <= 13);
1327 int second_contributory = intno == 0 ||
1328 (intno >= 10 && intno <= 13);
1330 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1331 env->old_exception, intno);
1333 #if !defined(CONFIG_USER_ONLY)
1334 if (env->old_exception == EXCP08_DBLE) {
1335 if (env->hflags & HF_SVMI_MASK)
1336 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1338 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1340 qemu_system_reset_request();
1341 return EXCP_HLT;
1343 #endif
1345 if ((first_contributory && second_contributory)
1346 || (env->old_exception == EXCP0E_PAGE &&
1347 (second_contributory || (intno == EXCP0E_PAGE)))) {
1348 intno = EXCP08_DBLE;
1349 *error_code = 0;
1352 if (second_contributory || (intno == EXCP0E_PAGE) ||
1353 (intno == EXCP08_DBLE))
1354 env->old_exception = intno;
1356 return intno;
1360 * Signal an interruption. It is executed in the main CPU loop.
1361 * is_int is TRUE if coming from the int instruction. next_eip is the
1362 * EIP value AFTER the interrupt instruction. It is only relevant if
1363 * is_int is TRUE.
1365 static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1366 int next_eip_addend)
1368 if (!is_int) {
1369 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1370 intno = check_exception(intno, &error_code);
1371 } else {
1372 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1375 env->exception_index = intno;
1376 env->error_code = error_code;
1377 env->exception_is_int = is_int;
1378 env->exception_next_eip = env->eip + next_eip_addend;
1379 cpu_loop_exit(env);
1382 /* shortcuts to generate exceptions */
1384 void raise_exception_err(int exception_index, int error_code)
1386 raise_interrupt(exception_index, 0, error_code, 0);
1389 void raise_exception(int exception_index)
1391 raise_interrupt(exception_index, 0, 0, 0);
1394 void raise_exception_env(int exception_index, CPUState *nenv)
1396 env = nenv;
1397 raise_exception(exception_index);
1399 /* SMM support */
1401 #if defined(CONFIG_USER_ONLY)
1403 void do_smm_enter(CPUState *env1)
1407 void helper_rsm(void)
1411 #else
1413 #ifdef TARGET_X86_64
1414 #define SMM_REVISION_ID 0x00020064
1415 #else
1416 #define SMM_REVISION_ID 0x00020000
1417 #endif
1419 void do_smm_enter(CPUState *env1)
1421 target_ulong sm_state;
1422 SegmentCache *dt;
1423 int i, offset;
1424 CPUState *saved_env;
1426 saved_env = env;
1427 env = env1;
1429 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1430 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1432 env->hflags |= HF_SMM_MASK;
1433 cpu_smm_update(env);
1435 sm_state = env->smbase + 0x8000;
1437 #ifdef TARGET_X86_64
1438 for(i = 0; i < 6; i++) {
1439 dt = &env->segs[i];
1440 offset = 0x7e00 + i * 16;
1441 stw_phys(sm_state + offset, dt->selector);
1442 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1443 stl_phys(sm_state + offset + 4, dt->limit);
1444 stq_phys(sm_state + offset + 8, dt->base);
1447 stq_phys(sm_state + 0x7e68, env->gdt.base);
1448 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1450 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1451 stq_phys(sm_state + 0x7e78, env->ldt.base);
1452 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1453 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1455 stq_phys(sm_state + 0x7e88, env->idt.base);
1456 stl_phys(sm_state + 0x7e84, env->idt.limit);
1458 stw_phys(sm_state + 0x7e90, env->tr.selector);
1459 stq_phys(sm_state + 0x7e98, env->tr.base);
1460 stl_phys(sm_state + 0x7e94, env->tr.limit);
1461 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1463 stq_phys(sm_state + 0x7ed0, env->efer);
1465 stq_phys(sm_state + 0x7ff8, EAX);
1466 stq_phys(sm_state + 0x7ff0, ECX);
1467 stq_phys(sm_state + 0x7fe8, EDX);
1468 stq_phys(sm_state + 0x7fe0, EBX);
1469 stq_phys(sm_state + 0x7fd8, ESP);
1470 stq_phys(sm_state + 0x7fd0, EBP);
1471 stq_phys(sm_state + 0x7fc8, ESI);
1472 stq_phys(sm_state + 0x7fc0, EDI);
1473 for(i = 8; i < 16; i++)
1474 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1475 stq_phys(sm_state + 0x7f78, env->eip);
1476 stl_phys(sm_state + 0x7f70, compute_eflags());
1477 stl_phys(sm_state + 0x7f68, env->dr[6]);
1478 stl_phys(sm_state + 0x7f60, env->dr[7]);
1480 stl_phys(sm_state + 0x7f48, env->cr[4]);
1481 stl_phys(sm_state + 0x7f50, env->cr[3]);
1482 stl_phys(sm_state + 0x7f58, env->cr[0]);
1484 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1485 stl_phys(sm_state + 0x7f00, env->smbase);
1486 #else
1487 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1488 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1489 stl_phys(sm_state + 0x7ff4, compute_eflags());
1490 stl_phys(sm_state + 0x7ff0, env->eip);
1491 stl_phys(sm_state + 0x7fec, EDI);
1492 stl_phys(sm_state + 0x7fe8, ESI);
1493 stl_phys(sm_state + 0x7fe4, EBP);
1494 stl_phys(sm_state + 0x7fe0, ESP);
1495 stl_phys(sm_state + 0x7fdc, EBX);
1496 stl_phys(sm_state + 0x7fd8, EDX);
1497 stl_phys(sm_state + 0x7fd4, ECX);
1498 stl_phys(sm_state + 0x7fd0, EAX);
1499 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1500 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1502 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1503 stl_phys(sm_state + 0x7f64, env->tr.base);
1504 stl_phys(sm_state + 0x7f60, env->tr.limit);
1505 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1507 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1508 stl_phys(sm_state + 0x7f80, env->ldt.base);
1509 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1510 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1512 stl_phys(sm_state + 0x7f74, env->gdt.base);
1513 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1515 stl_phys(sm_state + 0x7f58, env->idt.base);
1516 stl_phys(sm_state + 0x7f54, env->idt.limit);
1518 for(i = 0; i < 6; i++) {
1519 dt = &env->segs[i];
1520 if (i < 3)
1521 offset = 0x7f84 + i * 12;
1522 else
1523 offset = 0x7f2c + (i - 3) * 12;
1524 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1525 stl_phys(sm_state + offset + 8, dt->base);
1526 stl_phys(sm_state + offset + 4, dt->limit);
1527 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1529 stl_phys(sm_state + 0x7f14, env->cr[4]);
1531 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1532 stl_phys(sm_state + 0x7ef8, env->smbase);
1533 #endif
1534 /* init SMM cpu state */
1536 #ifdef TARGET_X86_64
1537 cpu_load_efer(env, 0);
1538 #endif
1539 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1540 env->eip = 0x00008000;
1541 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1542 0xffffffff, 0);
1543 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1544 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1545 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1546 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1547 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1549 cpu_x86_update_cr0(env,
1550 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1551 cpu_x86_update_cr4(env, 0);
1552 env->dr[7] = 0x00000400;
1553 CC_OP = CC_OP_EFLAGS;
1554 env = saved_env;
1557 void helper_rsm(void)
1559 target_ulong sm_state;
1560 int i, offset;
1561 uint32_t val;
1563 sm_state = env->smbase + 0x8000;
1564 #ifdef TARGET_X86_64
1565 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1567 for(i = 0; i < 6; i++) {
1568 offset = 0x7e00 + i * 16;
1569 cpu_x86_load_seg_cache(env, i,
1570 lduw_phys(sm_state + offset),
1571 ldq_phys(sm_state + offset + 8),
1572 ldl_phys(sm_state + offset + 4),
1573 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1576 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1577 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1579 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1580 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1581 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1582 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1584 env->idt.base = ldq_phys(sm_state + 0x7e88);
1585 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1587 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1588 env->tr.base = ldq_phys(sm_state + 0x7e98);
1589 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1590 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1592 EAX = ldq_phys(sm_state + 0x7ff8);
1593 ECX = ldq_phys(sm_state + 0x7ff0);
1594 EDX = ldq_phys(sm_state + 0x7fe8);
1595 EBX = ldq_phys(sm_state + 0x7fe0);
1596 ESP = ldq_phys(sm_state + 0x7fd8);
1597 EBP = ldq_phys(sm_state + 0x7fd0);
1598 ESI = ldq_phys(sm_state + 0x7fc8);
1599 EDI = ldq_phys(sm_state + 0x7fc0);
1600 for(i = 8; i < 16; i++)
1601 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1602 env->eip = ldq_phys(sm_state + 0x7f78);
1603 load_eflags(ldl_phys(sm_state + 0x7f70),
1604 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1605 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1606 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1608 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1609 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1610 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1612 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1613 if (val & 0x20000) {
1614 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1616 #else
1617 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1618 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1619 load_eflags(ldl_phys(sm_state + 0x7ff4),
1620 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1621 env->eip = ldl_phys(sm_state + 0x7ff0);
1622 EDI = ldl_phys(sm_state + 0x7fec);
1623 ESI = ldl_phys(sm_state + 0x7fe8);
1624 EBP = ldl_phys(sm_state + 0x7fe4);
1625 ESP = ldl_phys(sm_state + 0x7fe0);
1626 EBX = ldl_phys(sm_state + 0x7fdc);
1627 EDX = ldl_phys(sm_state + 0x7fd8);
1628 ECX = ldl_phys(sm_state + 0x7fd4);
1629 EAX = ldl_phys(sm_state + 0x7fd0);
1630 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1631 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1633 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1634 env->tr.base = ldl_phys(sm_state + 0x7f64);
1635 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1636 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1638 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1639 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1640 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1641 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1643 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1644 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1646 env->idt.base = ldl_phys(sm_state + 0x7f58);
1647 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1649 for(i = 0; i < 6; i++) {
1650 if (i < 3)
1651 offset = 0x7f84 + i * 12;
1652 else
1653 offset = 0x7f2c + (i - 3) * 12;
1654 cpu_x86_load_seg_cache(env, i,
1655 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1656 ldl_phys(sm_state + offset + 8),
1657 ldl_phys(sm_state + offset + 4),
1658 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1660 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1662 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1663 if (val & 0x20000) {
1664 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1666 #endif
1667 CC_OP = CC_OP_EFLAGS;
1668 env->hflags &= ~HF_SMM_MASK;
1669 cpu_smm_update(env);
1671 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1672 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1675 #endif /* !CONFIG_USER_ONLY */
1678 /* division, flags are undefined */
1680 void helper_divb_AL(target_ulong t0)
1682 unsigned int num, den, q, r;
1684 num = (EAX & 0xffff);
1685 den = (t0 & 0xff);
1686 if (den == 0) {
1687 raise_exception(EXCP00_DIVZ);
1689 q = (num / den);
1690 if (q > 0xff)
1691 raise_exception(EXCP00_DIVZ);
1692 q &= 0xff;
1693 r = (num % den) & 0xff;
1694 EAX = (EAX & ~0xffff) | (r << 8) | q;
1697 void helper_idivb_AL(target_ulong t0)
1699 int num, den, q, r;
1701 num = (int16_t)EAX;
1702 den = (int8_t)t0;
1703 if (den == 0) {
1704 raise_exception(EXCP00_DIVZ);
1706 q = (num / den);
1707 if (q != (int8_t)q)
1708 raise_exception(EXCP00_DIVZ);
1709 q &= 0xff;
1710 r = (num % den) & 0xff;
1711 EAX = (EAX & ~0xffff) | (r << 8) | q;
1714 void helper_divw_AX(target_ulong t0)
1716 unsigned int num, den, q, r;
1718 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1719 den = (t0 & 0xffff);
1720 if (den == 0) {
1721 raise_exception(EXCP00_DIVZ);
1723 q = (num / den);
1724 if (q > 0xffff)
1725 raise_exception(EXCP00_DIVZ);
1726 q &= 0xffff;
1727 r = (num % den) & 0xffff;
1728 EAX = (EAX & ~0xffff) | q;
1729 EDX = (EDX & ~0xffff) | r;
1732 void helper_idivw_AX(target_ulong t0)
1734 int num, den, q, r;
1736 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1737 den = (int16_t)t0;
1738 if (den == 0) {
1739 raise_exception(EXCP00_DIVZ);
1741 q = (num / den);
1742 if (q != (int16_t)q)
1743 raise_exception(EXCP00_DIVZ);
1744 q &= 0xffff;
1745 r = (num % den) & 0xffff;
1746 EAX = (EAX & ~0xffff) | q;
1747 EDX = (EDX & ~0xffff) | r;
1750 void helper_divl_EAX(target_ulong t0)
1752 unsigned int den, r;
1753 uint64_t num, q;
1755 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1756 den = t0;
1757 if (den == 0) {
1758 raise_exception(EXCP00_DIVZ);
1760 q = (num / den);
1761 r = (num % den);
1762 if (q > 0xffffffff)
1763 raise_exception(EXCP00_DIVZ);
1764 EAX = (uint32_t)q;
1765 EDX = (uint32_t)r;
1768 void helper_idivl_EAX(target_ulong t0)
1770 int den, r;
1771 int64_t num, q;
1773 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1774 den = t0;
1775 if (den == 0) {
1776 raise_exception(EXCP00_DIVZ);
1778 q = (num / den);
1779 r = (num % den);
1780 if (q != (int32_t)q)
1781 raise_exception(EXCP00_DIVZ);
1782 EAX = (uint32_t)q;
1783 EDX = (uint32_t)r;
1786 /* bcd */
1788 /* XXX: exception */
1789 void helper_aam(int base)
1791 int al, ah;
1792 al = EAX & 0xff;
1793 ah = al / base;
1794 al = al % base;
1795 EAX = (EAX & ~0xffff) | al | (ah << 8);
1796 CC_DST = al;
1799 void helper_aad(int base)
1801 int al, ah;
1802 al = EAX & 0xff;
1803 ah = (EAX >> 8) & 0xff;
1804 al = ((ah * base) + al) & 0xff;
1805 EAX = (EAX & ~0xffff) | al;
1806 CC_DST = al;
1809 void helper_aaa(void)
1811 int icarry;
1812 int al, ah, af;
1813 int eflags;
1815 eflags = helper_cc_compute_all(CC_OP);
1816 af = eflags & CC_A;
1817 al = EAX & 0xff;
1818 ah = (EAX >> 8) & 0xff;
1820 icarry = (al > 0xf9);
1821 if (((al & 0x0f) > 9 ) || af) {
1822 al = (al + 6) & 0x0f;
1823 ah = (ah + 1 + icarry) & 0xff;
1824 eflags |= CC_C | CC_A;
1825 } else {
1826 eflags &= ~(CC_C | CC_A);
1827 al &= 0x0f;
1829 EAX = (EAX & ~0xffff) | al | (ah << 8);
1830 CC_SRC = eflags;
1833 void helper_aas(void)
1835 int icarry;
1836 int al, ah, af;
1837 int eflags;
1839 eflags = helper_cc_compute_all(CC_OP);
1840 af = eflags & CC_A;
1841 al = EAX & 0xff;
1842 ah = (EAX >> 8) & 0xff;
1844 icarry = (al < 6);
1845 if (((al & 0x0f) > 9 ) || af) {
1846 al = (al - 6) & 0x0f;
1847 ah = (ah - 1 - icarry) & 0xff;
1848 eflags |= CC_C | CC_A;
1849 } else {
1850 eflags &= ~(CC_C | CC_A);
1851 al &= 0x0f;
1853 EAX = (EAX & ~0xffff) | al | (ah << 8);
1854 CC_SRC = eflags;
1857 void helper_daa(void)
1859 int al, af, cf;
1860 int eflags;
1862 eflags = helper_cc_compute_all(CC_OP);
1863 cf = eflags & CC_C;
1864 af = eflags & CC_A;
1865 al = EAX & 0xff;
1867 eflags = 0;
1868 if (((al & 0x0f) > 9 ) || af) {
1869 al = (al + 6) & 0xff;
1870 eflags |= CC_A;
1872 if ((al > 0x9f) || cf) {
1873 al = (al + 0x60) & 0xff;
1874 eflags |= CC_C;
1876 EAX = (EAX & ~0xff) | al;
1877 /* well, speed is not an issue here, so we compute the flags by hand */
1878 eflags |= (al == 0) << 6; /* zf */
1879 eflags |= parity_table[al]; /* pf */
1880 eflags |= (al & 0x80); /* sf */
1881 CC_SRC = eflags;
1884 void helper_das(void)
1886 int al, al1, af, cf;
1887 int eflags;
1889 eflags = helper_cc_compute_all(CC_OP);
1890 cf = eflags & CC_C;
1891 af = eflags & CC_A;
1892 al = EAX & 0xff;
1894 eflags = 0;
1895 al1 = al;
1896 if (((al & 0x0f) > 9 ) || af) {
1897 eflags |= CC_A;
1898 if (al < 6 || cf)
1899 eflags |= CC_C;
1900 al = (al - 6) & 0xff;
1902 if ((al1 > 0x99) || cf) {
1903 al = (al - 0x60) & 0xff;
1904 eflags |= CC_C;
1906 EAX = (EAX & ~0xff) | al;
1907 /* well, speed is not an issue here, so we compute the flags by hand */
1908 eflags |= (al == 0) << 6; /* zf */
1909 eflags |= parity_table[al]; /* pf */
1910 eflags |= (al & 0x80); /* sf */
1911 CC_SRC = eflags;
1914 void helper_into(int next_eip_addend)
1916 int eflags;
1917 eflags = helper_cc_compute_all(CC_OP);
1918 if (eflags & CC_O) {
1919 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1923 void helper_cmpxchg8b(target_ulong a0)
1925 uint64_t d;
1926 int eflags;
1928 eflags = helper_cc_compute_all(CC_OP);
1929 d = ldq(a0);
1930 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1931 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1932 eflags |= CC_Z;
1933 } else {
1934 /* always do the store */
1935 stq(a0, d);
1936 EDX = (uint32_t)(d >> 32);
1937 EAX = (uint32_t)d;
1938 eflags &= ~CC_Z;
1940 CC_SRC = eflags;
1943 #ifdef TARGET_X86_64
1944 void helper_cmpxchg16b(target_ulong a0)
1946 uint64_t d0, d1;
1947 int eflags;
1949 if ((a0 & 0xf) != 0)
1950 raise_exception(EXCP0D_GPF);
1951 eflags = helper_cc_compute_all(CC_OP);
1952 d0 = ldq(a0);
1953 d1 = ldq(a0 + 8);
1954 if (d0 == EAX && d1 == EDX) {
1955 stq(a0, EBX);
1956 stq(a0 + 8, ECX);
1957 eflags |= CC_Z;
1958 } else {
1959 /* always do the store */
1960 stq(a0, d0);
1961 stq(a0 + 8, d1);
1962 EDX = d1;
1963 EAX = d0;
1964 eflags &= ~CC_Z;
1966 CC_SRC = eflags;
1968 #endif
1970 void helper_single_step(void)
1972 #ifndef CONFIG_USER_ONLY
1973 check_hw_breakpoints(env, 1);
1974 env->dr[6] |= DR6_BS;
1975 #endif
1976 raise_exception(EXCP01_DB);
1979 void helper_cpuid(void)
1981 uint32_t eax, ebx, ecx, edx;
1983 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1985 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1986 EAX = eax;
1987 EBX = ebx;
1988 ECX = ecx;
1989 EDX = edx;
1992 void helper_enter_level(int level, int data32, target_ulong t1)
1994 target_ulong ssp;
1995 uint32_t esp_mask, esp, ebp;
1997 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1998 ssp = env->segs[R_SS].base;
1999 ebp = EBP;
2000 esp = ESP;
2001 if (data32) {
2002 /* 32 bit */
2003 esp -= 4;
2004 while (--level) {
2005 esp -= 4;
2006 ebp -= 4;
2007 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2009 esp -= 4;
2010 stl(ssp + (esp & esp_mask), t1);
2011 } else {
2012 /* 16 bit */
2013 esp -= 2;
2014 while (--level) {
2015 esp -= 2;
2016 ebp -= 2;
2017 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2019 esp -= 2;
2020 stw(ssp + (esp & esp_mask), t1);
2024 #ifdef TARGET_X86_64
2025 void helper_enter64_level(int level, int data64, target_ulong t1)
2027 target_ulong esp, ebp;
2028 ebp = EBP;
2029 esp = ESP;
2031 if (data64) {
2032 /* 64 bit */
2033 esp -= 8;
2034 while (--level) {
2035 esp -= 8;
2036 ebp -= 8;
2037 stq(esp, ldq(ebp));
2039 esp -= 8;
2040 stq(esp, t1);
2041 } else {
2042 /* 16 bit */
2043 esp -= 2;
2044 while (--level) {
2045 esp -= 2;
2046 ebp -= 2;
2047 stw(esp, lduw(ebp));
2049 esp -= 2;
2050 stw(esp, t1);
2053 #endif
2055 void helper_lldt(int selector)
2057 SegmentCache *dt;
2058 uint32_t e1, e2;
2059 int index, entry_limit;
2060 target_ulong ptr;
2062 selector &= 0xffff;
2063 if ((selector & 0xfffc) == 0) {
2064 /* XXX: NULL selector case: invalid LDT */
2065 env->ldt.base = 0;
2066 env->ldt.limit = 0;
2067 } else {
2068 if (selector & 0x4)
2069 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2070 dt = &env->gdt;
2071 index = selector & ~7;
2072 #ifdef TARGET_X86_64
2073 if (env->hflags & HF_LMA_MASK)
2074 entry_limit = 15;
2075 else
2076 #endif
2077 entry_limit = 7;
2078 if ((index + entry_limit) > dt->limit)
2079 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2080 ptr = dt->base + index;
2081 e1 = ldl_kernel(ptr);
2082 e2 = ldl_kernel(ptr + 4);
2083 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2084 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2085 if (!(e2 & DESC_P_MASK))
2086 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2087 #ifdef TARGET_X86_64
2088 if (env->hflags & HF_LMA_MASK) {
2089 uint32_t e3;
2090 e3 = ldl_kernel(ptr + 8);
2091 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2092 env->ldt.base |= (target_ulong)e3 << 32;
2093 } else
2094 #endif
2096 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2099 env->ldt.selector = selector;
2102 void helper_ltr(int selector)
2104 SegmentCache *dt;
2105 uint32_t e1, e2;
2106 int index, type, entry_limit;
2107 target_ulong ptr;
2109 selector &= 0xffff;
2110 if ((selector & 0xfffc) == 0) {
2111 /* NULL selector case: invalid TR */
2112 env->tr.base = 0;
2113 env->tr.limit = 0;
2114 env->tr.flags = 0;
2115 } else {
2116 if (selector & 0x4)
2117 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2118 dt = &env->gdt;
2119 index = selector & ~7;
2120 #ifdef TARGET_X86_64
2121 if (env->hflags & HF_LMA_MASK)
2122 entry_limit = 15;
2123 else
2124 #endif
2125 entry_limit = 7;
2126 if ((index + entry_limit) > dt->limit)
2127 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2128 ptr = dt->base + index;
2129 e1 = ldl_kernel(ptr);
2130 e2 = ldl_kernel(ptr + 4);
2131 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2132 if ((e2 & DESC_S_MASK) ||
2133 (type != 1 && type != 9))
2134 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2135 if (!(e2 & DESC_P_MASK))
2136 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2137 #ifdef TARGET_X86_64
2138 if (env->hflags & HF_LMA_MASK) {
2139 uint32_t e3, e4;
2140 e3 = ldl_kernel(ptr + 8);
2141 e4 = ldl_kernel(ptr + 12);
2142 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2143 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2144 load_seg_cache_raw_dt(&env->tr, e1, e2);
2145 env->tr.base |= (target_ulong)e3 << 32;
2146 } else
2147 #endif
2149 load_seg_cache_raw_dt(&env->tr, e1, e2);
2151 e2 |= DESC_TSS_BUSY_MASK;
2152 stl_kernel(ptr + 4, e2);
2154 env->tr.selector = selector;
2157 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2158 void helper_load_seg(int seg_reg, int selector)
2160 uint32_t e1, e2;
2161 int cpl, dpl, rpl;
2162 SegmentCache *dt;
2163 int index;
2164 target_ulong ptr;
2166 selector &= 0xffff;
2167 cpl = env->hflags & HF_CPL_MASK;
2168 if ((selector & 0xfffc) == 0) {
2169 /* null selector case */
2170 if (seg_reg == R_SS
2171 #ifdef TARGET_X86_64
2172 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2173 #endif
2175 raise_exception_err(EXCP0D_GPF, 0);
2176 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2177 } else {
2179 if (selector & 0x4)
2180 dt = &env->ldt;
2181 else
2182 dt = &env->gdt;
2183 index = selector & ~7;
2184 if ((index + 7) > dt->limit)
2185 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2186 ptr = dt->base + index;
2187 e1 = ldl_kernel(ptr);
2188 e2 = ldl_kernel(ptr + 4);
2190 if (!(e2 & DESC_S_MASK))
2191 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2192 rpl = selector & 3;
2193 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2194 if (seg_reg == R_SS) {
2195 /* must be writable segment */
2196 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2197 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198 if (rpl != cpl || dpl != cpl)
2199 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2200 } else {
2201 /* must be readable segment */
2202 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2203 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2205 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2206 /* if not conforming code, test rights */
2207 if (dpl < cpl || dpl < rpl)
2208 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2212 if (!(e2 & DESC_P_MASK)) {
2213 if (seg_reg == R_SS)
2214 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2215 else
2216 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2219 /* set the access bit if not already set */
2220 if (!(e2 & DESC_A_MASK)) {
2221 e2 |= DESC_A_MASK;
2222 stl_kernel(ptr + 4, e2);
2225 cpu_x86_load_seg_cache(env, seg_reg, selector,
2226 get_seg_base(e1, e2),
2227 get_seg_limit(e1, e2),
2228 e2);
2229 #if 0
2230 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2231 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2232 #endif
2236 /* protected mode jump */
2237 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2238 int next_eip_addend)
2240 int gate_cs, type;
2241 uint32_t e1, e2, cpl, dpl, rpl, limit;
2242 target_ulong next_eip;
2244 if ((new_cs & 0xfffc) == 0)
2245 raise_exception_err(EXCP0D_GPF, 0);
2246 if (load_segment(&e1, &e2, new_cs) != 0)
2247 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2248 cpl = env->hflags & HF_CPL_MASK;
2249 if (e2 & DESC_S_MASK) {
2250 if (!(e2 & DESC_CS_MASK))
2251 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2252 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2253 if (e2 & DESC_C_MASK) {
2254 /* conforming code segment */
2255 if (dpl > cpl)
2256 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2257 } else {
2258 /* non conforming code segment */
2259 rpl = new_cs & 3;
2260 if (rpl > cpl)
2261 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2262 if (dpl != cpl)
2263 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2265 if (!(e2 & DESC_P_MASK))
2266 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2267 limit = get_seg_limit(e1, e2);
2268 if (new_eip > limit &&
2269 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2270 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2271 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2272 get_seg_base(e1, e2), limit, e2);
2273 EIP = new_eip;
2274 } else {
2275 /* jump to call or task gate */
2276 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2277 rpl = new_cs & 3;
2278 cpl = env->hflags & HF_CPL_MASK;
2279 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2280 switch(type) {
2281 case 1: /* 286 TSS */
2282 case 9: /* 386 TSS */
2283 case 5: /* task gate */
2284 if (dpl < cpl || dpl < rpl)
2285 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2286 next_eip = env->eip + next_eip_addend;
2287 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2288 CC_OP = CC_OP_EFLAGS;
2289 break;
2290 case 4: /* 286 call gate */
2291 case 12: /* 386 call gate */
2292 if ((dpl < cpl) || (dpl < rpl))
2293 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2294 if (!(e2 & DESC_P_MASK))
2295 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2296 gate_cs = e1 >> 16;
2297 new_eip = (e1 & 0xffff);
2298 if (type == 12)
2299 new_eip |= (e2 & 0xffff0000);
2300 if (load_segment(&e1, &e2, gate_cs) != 0)
2301 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2302 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2303 /* must be code segment */
2304 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2305 (DESC_S_MASK | DESC_CS_MASK)))
2306 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2307 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2308 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2309 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2310 if (!(e2 & DESC_P_MASK))
2311 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2312 limit = get_seg_limit(e1, e2);
2313 if (new_eip > limit)
2314 raise_exception_err(EXCP0D_GPF, 0);
2315 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2316 get_seg_base(e1, e2), limit, e2);
2317 EIP = new_eip;
2318 break;
2319 default:
2320 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2321 break;
2326 /* real mode call */
2327 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2328 int shift, int next_eip)
2330 int new_eip;
2331 uint32_t esp, esp_mask;
2332 target_ulong ssp;
2334 new_eip = new_eip1;
2335 esp = ESP;
2336 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2337 ssp = env->segs[R_SS].base;
2338 if (shift) {
2339 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2340 PUSHL(ssp, esp, esp_mask, next_eip);
2341 } else {
2342 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2343 PUSHW(ssp, esp, esp_mask, next_eip);
2346 SET_ESP(esp, esp_mask);
2347 env->eip = new_eip;
2348 env->segs[R_CS].selector = new_cs;
2349 env->segs[R_CS].base = (new_cs << 4);
2352 /* protected mode call */
2353 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2354 int shift, int next_eip_addend)
2356 int new_stack, i;
2357 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2358 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2359 uint32_t val, limit, old_sp_mask;
2360 target_ulong ssp, old_ssp, next_eip;
2362 next_eip = env->eip + next_eip_addend;
2363 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2364 LOG_PCALL_STATE(env);
2365 if ((new_cs & 0xfffc) == 0)
2366 raise_exception_err(EXCP0D_GPF, 0);
2367 if (load_segment(&e1, &e2, new_cs) != 0)
2368 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2369 cpl = env->hflags & HF_CPL_MASK;
2370 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2371 if (e2 & DESC_S_MASK) {
2372 if (!(e2 & DESC_CS_MASK))
2373 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2374 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2375 if (e2 & DESC_C_MASK) {
2376 /* conforming code segment */
2377 if (dpl > cpl)
2378 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2379 } else {
2380 /* non conforming code segment */
2381 rpl = new_cs & 3;
2382 if (rpl > cpl)
2383 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384 if (dpl != cpl)
2385 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2387 if (!(e2 & DESC_P_MASK))
2388 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2390 #ifdef TARGET_X86_64
2391 /* XXX: check 16/32 bit cases in long mode */
2392 if (shift == 2) {
2393 target_ulong rsp;
2394 /* 64 bit case */
2395 rsp = ESP;
2396 PUSHQ(rsp, env->segs[R_CS].selector);
2397 PUSHQ(rsp, next_eip);
2398 /* from this point, not restartable */
2399 ESP = rsp;
2400 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2401 get_seg_base(e1, e2),
2402 get_seg_limit(e1, e2), e2);
2403 EIP = new_eip;
2404 } else
2405 #endif
2407 sp = ESP;
2408 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2409 ssp = env->segs[R_SS].base;
2410 if (shift) {
2411 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2412 PUSHL(ssp, sp, sp_mask, next_eip);
2413 } else {
2414 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2415 PUSHW(ssp, sp, sp_mask, next_eip);
2418 limit = get_seg_limit(e1, e2);
2419 if (new_eip > limit)
2420 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2421 /* from this point, not restartable */
2422 SET_ESP(sp, sp_mask);
2423 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2424 get_seg_base(e1, e2), limit, e2);
2425 EIP = new_eip;
2427 } else {
2428 /* check gate type */
2429 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2430 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2431 rpl = new_cs & 3;
2432 switch(type) {
2433 case 1: /* available 286 TSS */
2434 case 9: /* available 386 TSS */
2435 case 5: /* task gate */
2436 if (dpl < cpl || dpl < rpl)
2437 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2438 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2439 CC_OP = CC_OP_EFLAGS;
2440 return;
2441 case 4: /* 286 call gate */
2442 case 12: /* 386 call gate */
2443 break;
2444 default:
2445 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2446 break;
2448 shift = type >> 3;
2450 if (dpl < cpl || dpl < rpl)
2451 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2452 /* check valid bit */
2453 if (!(e2 & DESC_P_MASK))
2454 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2455 selector = e1 >> 16;
2456 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2457 param_count = e2 & 0x1f;
2458 if ((selector & 0xfffc) == 0)
2459 raise_exception_err(EXCP0D_GPF, 0);
2461 if (load_segment(&e1, &e2, selector) != 0)
2462 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2463 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2464 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2465 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2466 if (dpl > cpl)
2467 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2468 if (!(e2 & DESC_P_MASK))
2469 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2471 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2472 /* to inner privilege */
2473 get_ss_esp_from_tss(&ss, &sp, dpl);
2474 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2475 ss, sp, param_count, ESP);
2476 if ((ss & 0xfffc) == 0)
2477 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2478 if ((ss & 3) != dpl)
2479 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2480 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2481 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2482 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2483 if (ss_dpl != dpl)
2484 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2485 if (!(ss_e2 & DESC_S_MASK) ||
2486 (ss_e2 & DESC_CS_MASK) ||
2487 !(ss_e2 & DESC_W_MASK))
2488 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2489 if (!(ss_e2 & DESC_P_MASK))
2490 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492 // push_size = ((param_count * 2) + 8) << shift;
2494 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2495 old_ssp = env->segs[R_SS].base;
2497 sp_mask = get_sp_mask(ss_e2);
2498 ssp = get_seg_base(ss_e1, ss_e2);
2499 if (shift) {
2500 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2501 PUSHL(ssp, sp, sp_mask, ESP);
2502 for(i = param_count - 1; i >= 0; i--) {
2503 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2504 PUSHL(ssp, sp, sp_mask, val);
2506 } else {
2507 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2508 PUSHW(ssp, sp, sp_mask, ESP);
2509 for(i = param_count - 1; i >= 0; i--) {
2510 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2511 PUSHW(ssp, sp, sp_mask, val);
2514 new_stack = 1;
2515 } else {
2516 /* to same privilege */
2517 sp = ESP;
2518 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2519 ssp = env->segs[R_SS].base;
2520 // push_size = (4 << shift);
2521 new_stack = 0;
2524 if (shift) {
2525 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2526 PUSHL(ssp, sp, sp_mask, next_eip);
2527 } else {
2528 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2529 PUSHW(ssp, sp, sp_mask, next_eip);
2532 /* from this point, not restartable */
2534 if (new_stack) {
2535 ss = (ss & ~3) | dpl;
2536 cpu_x86_load_seg_cache(env, R_SS, ss,
2537 ssp,
2538 get_seg_limit(ss_e1, ss_e2),
2539 ss_e2);
2542 selector = (selector & ~3) | dpl;
2543 cpu_x86_load_seg_cache(env, R_CS, selector,
2544 get_seg_base(e1, e2),
2545 get_seg_limit(e1, e2),
2546 e2);
2547 cpu_x86_set_cpl(env, dpl);
2548 SET_ESP(sp, sp_mask);
2549 EIP = offset;
2553 /* real and vm86 mode iret */
2554 void helper_iret_real(int shift)
2556 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2557 target_ulong ssp;
2558 int eflags_mask;
2560 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2561 sp = ESP;
2562 ssp = env->segs[R_SS].base;
2563 if (shift == 1) {
2564 /* 32 bits */
2565 POPL(ssp, sp, sp_mask, new_eip);
2566 POPL(ssp, sp, sp_mask, new_cs);
2567 new_cs &= 0xffff;
2568 POPL(ssp, sp, sp_mask, new_eflags);
2569 } else {
2570 /* 16 bits */
2571 POPW(ssp, sp, sp_mask, new_eip);
2572 POPW(ssp, sp, sp_mask, new_cs);
2573 POPW(ssp, sp, sp_mask, new_eflags);
2575 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2576 env->segs[R_CS].selector = new_cs;
2577 env->segs[R_CS].base = (new_cs << 4);
2578 env->eip = new_eip;
2579 if (env->eflags & VM_MASK)
2580 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2581 else
2582 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2583 if (shift == 0)
2584 eflags_mask &= 0xffff;
2585 load_eflags(new_eflags, eflags_mask);
2586 env->hflags2 &= ~HF2_NMI_MASK;
2589 static inline void validate_seg(int seg_reg, int cpl)
2591 int dpl;
2592 uint32_t e2;
2594 /* XXX: on x86_64, we do not want to nullify FS and GS because
2595 they may still contain a valid base. I would be interested to
2596 know how a real x86_64 CPU behaves */
2597 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2598 (env->segs[seg_reg].selector & 0xfffc) == 0)
2599 return;
2601 e2 = env->segs[seg_reg].flags;
2602 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2603 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2604 /* data or non conforming code segment */
2605 if (dpl < cpl) {
2606 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2611 /* protected mode iret */
2612 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2614 uint32_t new_cs, new_eflags, new_ss;
2615 uint32_t new_es, new_ds, new_fs, new_gs;
2616 uint32_t e1, e2, ss_e1, ss_e2;
2617 int cpl, dpl, rpl, eflags_mask, iopl;
2618 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2620 #ifdef TARGET_X86_64
2621 if (shift == 2)
2622 sp_mask = -1;
2623 else
2624 #endif
2625 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2626 sp = ESP;
2627 ssp = env->segs[R_SS].base;
2628 new_eflags = 0; /* avoid warning */
2629 #ifdef TARGET_X86_64
2630 if (shift == 2) {
2631 POPQ(sp, new_eip);
2632 POPQ(sp, new_cs);
2633 new_cs &= 0xffff;
2634 if (is_iret) {
2635 POPQ(sp, new_eflags);
2637 } else
2638 #endif
2639 if (shift == 1) {
2640 /* 32 bits */
2641 POPL(ssp, sp, sp_mask, new_eip);
2642 POPL(ssp, sp, sp_mask, new_cs);
2643 new_cs &= 0xffff;
2644 if (is_iret) {
2645 POPL(ssp, sp, sp_mask, new_eflags);
2646 if (new_eflags & VM_MASK)
2647 goto return_to_vm86;
2649 } else {
2650 /* 16 bits */
2651 POPW(ssp, sp, sp_mask, new_eip);
2652 POPW(ssp, sp, sp_mask, new_cs);
2653 if (is_iret)
2654 POPW(ssp, sp, sp_mask, new_eflags);
2656 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2657 new_cs, new_eip, shift, addend);
2658 LOG_PCALL_STATE(env);
2659 if ((new_cs & 0xfffc) == 0)
2660 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2661 if (load_segment(&e1, &e2, new_cs) != 0)
2662 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2663 if (!(e2 & DESC_S_MASK) ||
2664 !(e2 & DESC_CS_MASK))
2665 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2666 cpl = env->hflags & HF_CPL_MASK;
2667 rpl = new_cs & 3;
2668 if (rpl < cpl)
2669 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2671 if (e2 & DESC_C_MASK) {
2672 if (dpl > rpl)
2673 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2674 } else {
2675 if (dpl != rpl)
2676 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2678 if (!(e2 & DESC_P_MASK))
2679 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2681 sp += addend;
2682 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2683 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2684 /* return to same privilege level */
2685 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2686 get_seg_base(e1, e2),
2687 get_seg_limit(e1, e2),
2688 e2);
2689 } else {
2690 /* return to different privilege level */
2691 #ifdef TARGET_X86_64
2692 if (shift == 2) {
2693 POPQ(sp, new_esp);
2694 POPQ(sp, new_ss);
2695 new_ss &= 0xffff;
2696 } else
2697 #endif
2698 if (shift == 1) {
2699 /* 32 bits */
2700 POPL(ssp, sp, sp_mask, new_esp);
2701 POPL(ssp, sp, sp_mask, new_ss);
2702 new_ss &= 0xffff;
2703 } else {
2704 /* 16 bits */
2705 POPW(ssp, sp, sp_mask, new_esp);
2706 POPW(ssp, sp, sp_mask, new_ss);
2708 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2709 new_ss, new_esp);
2710 if ((new_ss & 0xfffc) == 0) {
2711 #ifdef TARGET_X86_64
2712 /* NULL ss is allowed in long mode if cpl != 3*/
2713 /* XXX: test CS64 ? */
2714 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2715 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2716 0, 0xffffffff,
2717 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2718 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2719 DESC_W_MASK | DESC_A_MASK);
2720 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2721 } else
2722 #endif
2724 raise_exception_err(EXCP0D_GPF, 0);
2726 } else {
2727 if ((new_ss & 3) != rpl)
2728 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2729 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2730 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2731 if (!(ss_e2 & DESC_S_MASK) ||
2732 (ss_e2 & DESC_CS_MASK) ||
2733 !(ss_e2 & DESC_W_MASK))
2734 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2735 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2736 if (dpl != rpl)
2737 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2738 if (!(ss_e2 & DESC_P_MASK))
2739 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2740 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2741 get_seg_base(ss_e1, ss_e2),
2742 get_seg_limit(ss_e1, ss_e2),
2743 ss_e2);
2746 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2747 get_seg_base(e1, e2),
2748 get_seg_limit(e1, e2),
2749 e2);
2750 cpu_x86_set_cpl(env, rpl);
2751 sp = new_esp;
2752 #ifdef TARGET_X86_64
2753 if (env->hflags & HF_CS64_MASK)
2754 sp_mask = -1;
2755 else
2756 #endif
2757 sp_mask = get_sp_mask(ss_e2);
2759 /* validate data segments */
2760 validate_seg(R_ES, rpl);
2761 validate_seg(R_DS, rpl);
2762 validate_seg(R_FS, rpl);
2763 validate_seg(R_GS, rpl);
2765 sp += addend;
2767 SET_ESP(sp, sp_mask);
2768 env->eip = new_eip;
2769 if (is_iret) {
2770 /* NOTE: 'cpl' is the _old_ CPL */
2771 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2772 if (cpl == 0)
2773 eflags_mask |= IOPL_MASK;
2774 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2775 if (cpl <= iopl)
2776 eflags_mask |= IF_MASK;
2777 if (shift == 0)
2778 eflags_mask &= 0xffff;
2779 load_eflags(new_eflags, eflags_mask);
2781 return;
2783 return_to_vm86:
2784 POPL(ssp, sp, sp_mask, new_esp);
2785 POPL(ssp, sp, sp_mask, new_ss);
2786 POPL(ssp, sp, sp_mask, new_es);
2787 POPL(ssp, sp, sp_mask, new_ds);
2788 POPL(ssp, sp, sp_mask, new_fs);
2789 POPL(ssp, sp, sp_mask, new_gs);
2791 /* modify processor state */
2792 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2793 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2794 load_seg_vm(R_CS, new_cs & 0xffff);
2795 cpu_x86_set_cpl(env, 3);
2796 load_seg_vm(R_SS, new_ss & 0xffff);
2797 load_seg_vm(R_ES, new_es & 0xffff);
2798 load_seg_vm(R_DS, new_ds & 0xffff);
2799 load_seg_vm(R_FS, new_fs & 0xffff);
2800 load_seg_vm(R_GS, new_gs & 0xffff);
2802 env->eip = new_eip & 0xffff;
2803 ESP = new_esp;
2806 void helper_iret_protected(int shift, int next_eip)
2808 int tss_selector, type;
2809 uint32_t e1, e2;
2811 /* specific case for TSS */
2812 if (env->eflags & NT_MASK) {
2813 #ifdef TARGET_X86_64
2814 if (env->hflags & HF_LMA_MASK)
2815 raise_exception_err(EXCP0D_GPF, 0);
2816 #endif
2817 tss_selector = lduw_kernel(env->tr.base + 0);
2818 if (tss_selector & 4)
2819 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2820 if (load_segment(&e1, &e2, tss_selector) != 0)
2821 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2822 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2823 /* NOTE: we check both segment and busy TSS */
2824 if (type != 3)
2825 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2826 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2827 } else {
2828 helper_ret_protected(shift, 1, 0);
2830 env->hflags2 &= ~HF2_NMI_MASK;
2833 void helper_lret_protected(int shift, int addend)
2835 helper_ret_protected(shift, 0, addend);
2838 void helper_sysenter(void)
2840 if (env->sysenter_cs == 0) {
2841 raise_exception_err(EXCP0D_GPF, 0);
2843 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2844 cpu_x86_set_cpl(env, 0);
2846 #ifdef TARGET_X86_64
2847 if (env->hflags & HF_LMA_MASK) {
2848 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2849 0, 0xffffffff,
2850 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2851 DESC_S_MASK |
2852 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2853 } else
2854 #endif
2856 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2857 0, 0xffffffff,
2858 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2859 DESC_S_MASK |
2860 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2862 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2863 0, 0xffffffff,
2864 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2865 DESC_S_MASK |
2866 DESC_W_MASK | DESC_A_MASK);
2867 ESP = env->sysenter_esp;
2868 EIP = env->sysenter_eip;
2871 void helper_sysexit(int dflag)
2873 int cpl;
2875 cpl = env->hflags & HF_CPL_MASK;
2876 if (env->sysenter_cs == 0 || cpl != 0) {
2877 raise_exception_err(EXCP0D_GPF, 0);
2879 cpu_x86_set_cpl(env, 3);
2880 #ifdef TARGET_X86_64
2881 if (dflag == 2) {
2882 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2883 0, 0xffffffff,
2884 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2885 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2886 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2887 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2888 0, 0xffffffff,
2889 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2890 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2891 DESC_W_MASK | DESC_A_MASK);
2892 } else
2893 #endif
2895 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2896 0, 0xffffffff,
2897 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2898 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2899 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2900 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2901 0, 0xffffffff,
2902 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2903 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2904 DESC_W_MASK | DESC_A_MASK);
2906 ESP = ECX;
2907 EIP = EDX;
2910 #if defined(CONFIG_USER_ONLY)
2911 target_ulong helper_read_crN(int reg)
2913 return 0;
2916 void helper_write_crN(int reg, target_ulong t0)
2920 void helper_movl_drN_T0(int reg, target_ulong t0)
2923 #else
2924 target_ulong helper_read_crN(int reg)
2926 target_ulong val;
2928 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2929 switch(reg) {
2930 default:
2931 val = env->cr[reg];
2932 break;
2933 case 8:
2934 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2935 val = cpu_get_apic_tpr(env->apic_state);
2936 } else {
2937 val = env->v_tpr;
2939 break;
2941 return val;
2944 void helper_write_crN(int reg, target_ulong t0)
2946 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2947 switch(reg) {
2948 case 0:
2949 cpu_x86_update_cr0(env, t0);
2950 break;
2951 case 3:
2952 cpu_x86_update_cr3(env, t0);
2953 break;
2954 case 4:
2955 cpu_x86_update_cr4(env, t0);
2956 break;
2957 case 8:
2958 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2959 cpu_set_apic_tpr(env->apic_state, t0);
2961 env->v_tpr = t0 & 0x0f;
2962 break;
2963 default:
2964 env->cr[reg] = t0;
2965 break;
2969 void helper_movl_drN_T0(int reg, target_ulong t0)
2971 int i;
2973 if (reg < 4) {
2974 hw_breakpoint_remove(env, reg);
2975 env->dr[reg] = t0;
2976 hw_breakpoint_insert(env, reg);
2977 } else if (reg == 7) {
2978 for (i = 0; i < 4; i++)
2979 hw_breakpoint_remove(env, i);
2980 env->dr[7] = t0;
2981 for (i = 0; i < 4; i++)
2982 hw_breakpoint_insert(env, i);
2983 } else
2984 env->dr[reg] = t0;
2986 #endif
2988 void helper_lmsw(target_ulong t0)
2990 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2991 if already set to one. */
2992 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2993 helper_write_crN(0, t0);
2996 void helper_clts(void)
2998 env->cr[0] &= ~CR0_TS_MASK;
2999 env->hflags &= ~HF_TS_MASK;
3002 void helper_invlpg(target_ulong addr)
3004 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3005 tlb_flush_page(env, addr);
3008 void helper_rdtsc(void)
3010 uint64_t val;
3012 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3013 raise_exception(EXCP0D_GPF);
3015 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3017 val = cpu_get_tsc(env) + env->tsc_offset;
3018 EAX = (uint32_t)(val);
3019 EDX = (uint32_t)(val >> 32);
3022 void helper_rdtscp(void)
3024 helper_rdtsc();
3025 ECX = (uint32_t)(env->tsc_aux);
3028 void helper_rdpmc(void)
3030 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3031 raise_exception(EXCP0D_GPF);
3033 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3035 /* currently unimplemented */
3036 raise_exception_err(EXCP06_ILLOP, 0);
3039 #if defined(CONFIG_USER_ONLY)
3040 void helper_wrmsr(void)
3044 void helper_rdmsr(void)
3047 #else
3048 void helper_wrmsr(void)
3050 uint64_t val;
3052 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3054 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3056 switch((uint32_t)ECX) {
3057 case MSR_IA32_SYSENTER_CS:
3058 env->sysenter_cs = val & 0xffff;
3059 break;
3060 case MSR_IA32_SYSENTER_ESP:
3061 env->sysenter_esp = val;
3062 break;
3063 case MSR_IA32_SYSENTER_EIP:
3064 env->sysenter_eip = val;
3065 break;
3066 case MSR_IA32_APICBASE:
3067 cpu_set_apic_base(env->apic_state, val);
3068 break;
3069 case MSR_EFER:
3071 uint64_t update_mask;
3072 update_mask = 0;
3073 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3074 update_mask |= MSR_EFER_SCE;
3075 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3076 update_mask |= MSR_EFER_LME;
3077 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3078 update_mask |= MSR_EFER_FFXSR;
3079 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3080 update_mask |= MSR_EFER_NXE;
3081 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3082 update_mask |= MSR_EFER_SVME;
3083 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3084 update_mask |= MSR_EFER_FFXSR;
3085 cpu_load_efer(env, (env->efer & ~update_mask) |
3086 (val & update_mask));
3088 break;
3089 case MSR_STAR:
3090 env->star = val;
3091 break;
3092 case MSR_PAT:
3093 env->pat = val;
3094 break;
3095 case MSR_VM_HSAVE_PA:
3096 env->vm_hsave = val;
3097 break;
3098 #ifdef TARGET_X86_64
3099 case MSR_LSTAR:
3100 env->lstar = val;
3101 break;
3102 case MSR_CSTAR:
3103 env->cstar = val;
3104 break;
3105 case MSR_FMASK:
3106 env->fmask = val;
3107 break;
3108 case MSR_FSBASE:
3109 env->segs[R_FS].base = val;
3110 break;
3111 case MSR_GSBASE:
3112 env->segs[R_GS].base = val;
3113 break;
3114 case MSR_KERNELGSBASE:
3115 env->kernelgsbase = val;
3116 break;
3117 #endif
3118 case MSR_MTRRphysBase(0):
3119 case MSR_MTRRphysBase(1):
3120 case MSR_MTRRphysBase(2):
3121 case MSR_MTRRphysBase(3):
3122 case MSR_MTRRphysBase(4):
3123 case MSR_MTRRphysBase(5):
3124 case MSR_MTRRphysBase(6):
3125 case MSR_MTRRphysBase(7):
3126 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3127 break;
3128 case MSR_MTRRphysMask(0):
3129 case MSR_MTRRphysMask(1):
3130 case MSR_MTRRphysMask(2):
3131 case MSR_MTRRphysMask(3):
3132 case MSR_MTRRphysMask(4):
3133 case MSR_MTRRphysMask(5):
3134 case MSR_MTRRphysMask(6):
3135 case MSR_MTRRphysMask(7):
3136 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3137 break;
3138 case MSR_MTRRfix64K_00000:
3139 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3140 break;
3141 case MSR_MTRRfix16K_80000:
3142 case MSR_MTRRfix16K_A0000:
3143 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3144 break;
3145 case MSR_MTRRfix4K_C0000:
3146 case MSR_MTRRfix4K_C8000:
3147 case MSR_MTRRfix4K_D0000:
3148 case MSR_MTRRfix4K_D8000:
3149 case MSR_MTRRfix4K_E0000:
3150 case MSR_MTRRfix4K_E8000:
3151 case MSR_MTRRfix4K_F0000:
3152 case MSR_MTRRfix4K_F8000:
3153 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3154 break;
3155 case MSR_MTRRdefType:
3156 env->mtrr_deftype = val;
3157 break;
3158 case MSR_MCG_STATUS:
3159 env->mcg_status = val;
3160 break;
3161 case MSR_MCG_CTL:
3162 if ((env->mcg_cap & MCG_CTL_P)
3163 && (val == 0 || val == ~(uint64_t)0))
3164 env->mcg_ctl = val;
3165 break;
3166 case MSR_TSC_AUX:
3167 env->tsc_aux = val;
3168 break;
3169 default:
3170 if ((uint32_t)ECX >= MSR_MC0_CTL
3171 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3172 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3173 if ((offset & 0x3) != 0
3174 || (val == 0 || val == ~(uint64_t)0))
3175 env->mce_banks[offset] = val;
3176 break;
3178 /* XXX: exception ? */
3179 break;
3183 void helper_rdmsr(void)
3185 uint64_t val;
3187 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3189 switch((uint32_t)ECX) {
3190 case MSR_IA32_SYSENTER_CS:
3191 val = env->sysenter_cs;
3192 break;
3193 case MSR_IA32_SYSENTER_ESP:
3194 val = env->sysenter_esp;
3195 break;
3196 case MSR_IA32_SYSENTER_EIP:
3197 val = env->sysenter_eip;
3198 break;
3199 case MSR_IA32_APICBASE:
3200 val = cpu_get_apic_base(env->apic_state);
3201 break;
3202 case MSR_EFER:
3203 val = env->efer;
3204 break;
3205 case MSR_STAR:
3206 val = env->star;
3207 break;
3208 case MSR_PAT:
3209 val = env->pat;
3210 break;
3211 case MSR_VM_HSAVE_PA:
3212 val = env->vm_hsave;
3213 break;
3214 case MSR_IA32_PERF_STATUS:
3215 /* tsc_increment_by_tick */
3216 val = 1000ULL;
3217 /* CPU multiplier */
3218 val |= (((uint64_t)4ULL) << 40);
3219 break;
3220 #ifdef TARGET_X86_64
3221 case MSR_LSTAR:
3222 val = env->lstar;
3223 break;
3224 case MSR_CSTAR:
3225 val = env->cstar;
3226 break;
3227 case MSR_FMASK:
3228 val = env->fmask;
3229 break;
3230 case MSR_FSBASE:
3231 val = env->segs[R_FS].base;
3232 break;
3233 case MSR_GSBASE:
3234 val = env->segs[R_GS].base;
3235 break;
3236 case MSR_KERNELGSBASE:
3237 val = env->kernelgsbase;
3238 break;
3239 case MSR_TSC_AUX:
3240 val = env->tsc_aux;
3241 break;
3242 #endif
3243 case MSR_MTRRphysBase(0):
3244 case MSR_MTRRphysBase(1):
3245 case MSR_MTRRphysBase(2):
3246 case MSR_MTRRphysBase(3):
3247 case MSR_MTRRphysBase(4):
3248 case MSR_MTRRphysBase(5):
3249 case MSR_MTRRphysBase(6):
3250 case MSR_MTRRphysBase(7):
3251 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3252 break;
3253 case MSR_MTRRphysMask(0):
3254 case MSR_MTRRphysMask(1):
3255 case MSR_MTRRphysMask(2):
3256 case MSR_MTRRphysMask(3):
3257 case MSR_MTRRphysMask(4):
3258 case MSR_MTRRphysMask(5):
3259 case MSR_MTRRphysMask(6):
3260 case MSR_MTRRphysMask(7):
3261 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3262 break;
3263 case MSR_MTRRfix64K_00000:
3264 val = env->mtrr_fixed[0];
3265 break;
3266 case MSR_MTRRfix16K_80000:
3267 case MSR_MTRRfix16K_A0000:
3268 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3269 break;
3270 case MSR_MTRRfix4K_C0000:
3271 case MSR_MTRRfix4K_C8000:
3272 case MSR_MTRRfix4K_D0000:
3273 case MSR_MTRRfix4K_D8000:
3274 case MSR_MTRRfix4K_E0000:
3275 case MSR_MTRRfix4K_E8000:
3276 case MSR_MTRRfix4K_F0000:
3277 case MSR_MTRRfix4K_F8000:
3278 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3279 break;
3280 case MSR_MTRRdefType:
3281 val = env->mtrr_deftype;
3282 break;
3283 case MSR_MTRRcap:
3284 if (env->cpuid_features & CPUID_MTRR)
3285 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3286 else
3287 /* XXX: exception ? */
3288 val = 0;
3289 break;
3290 case MSR_MCG_CAP:
3291 val = env->mcg_cap;
3292 break;
3293 case MSR_MCG_CTL:
3294 if (env->mcg_cap & MCG_CTL_P)
3295 val = env->mcg_ctl;
3296 else
3297 val = 0;
3298 break;
3299 case MSR_MCG_STATUS:
3300 val = env->mcg_status;
3301 break;
3302 default:
3303 if ((uint32_t)ECX >= MSR_MC0_CTL
3304 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3305 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3306 val = env->mce_banks[offset];
3307 break;
3309 /* XXX: exception ? */
3310 val = 0;
3311 break;
3313 EAX = (uint32_t)(val);
3314 EDX = (uint32_t)(val >> 32);
3316 #endif
3318 target_ulong helper_lsl(target_ulong selector1)
3320 unsigned int limit;
3321 uint32_t e1, e2, eflags, selector;
3322 int rpl, dpl, cpl, type;
3324 selector = selector1 & 0xffff;
3325 eflags = helper_cc_compute_all(CC_OP);
3326 if ((selector & 0xfffc) == 0)
3327 goto fail;
3328 if (load_segment(&e1, &e2, selector) != 0)
3329 goto fail;
3330 rpl = selector & 3;
3331 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3332 cpl = env->hflags & HF_CPL_MASK;
3333 if (e2 & DESC_S_MASK) {
3334 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3335 /* conforming */
3336 } else {
3337 if (dpl < cpl || dpl < rpl)
3338 goto fail;
3340 } else {
3341 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3342 switch(type) {
3343 case 1:
3344 case 2:
3345 case 3:
3346 case 9:
3347 case 11:
3348 break;
3349 default:
3350 goto fail;
3352 if (dpl < cpl || dpl < rpl) {
3353 fail:
3354 CC_SRC = eflags & ~CC_Z;
3355 return 0;
3358 limit = get_seg_limit(e1, e2);
3359 CC_SRC = eflags | CC_Z;
3360 return limit;
3363 target_ulong helper_lar(target_ulong selector1)
3365 uint32_t e1, e2, eflags, selector;
3366 int rpl, dpl, cpl, type;
3368 selector = selector1 & 0xffff;
3369 eflags = helper_cc_compute_all(CC_OP);
3370 if ((selector & 0xfffc) == 0)
3371 goto fail;
3372 if (load_segment(&e1, &e2, selector) != 0)
3373 goto fail;
3374 rpl = selector & 3;
3375 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3376 cpl = env->hflags & HF_CPL_MASK;
3377 if (e2 & DESC_S_MASK) {
3378 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3379 /* conforming */
3380 } else {
3381 if (dpl < cpl || dpl < rpl)
3382 goto fail;
3384 } else {
3385 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3386 switch(type) {
3387 case 1:
3388 case 2:
3389 case 3:
3390 case 4:
3391 case 5:
3392 case 9:
3393 case 11:
3394 case 12:
3395 break;
3396 default:
3397 goto fail;
3399 if (dpl < cpl || dpl < rpl) {
3400 fail:
3401 CC_SRC = eflags & ~CC_Z;
3402 return 0;
3405 CC_SRC = eflags | CC_Z;
3406 return e2 & 0x00f0ff00;
3409 void helper_verr(target_ulong selector1)
3411 uint32_t e1, e2, eflags, selector;
3412 int rpl, dpl, cpl;
3414 selector = selector1 & 0xffff;
3415 eflags = helper_cc_compute_all(CC_OP);
3416 if ((selector & 0xfffc) == 0)
3417 goto fail;
3418 if (load_segment(&e1, &e2, selector) != 0)
3419 goto fail;
3420 if (!(e2 & DESC_S_MASK))
3421 goto fail;
3422 rpl = selector & 3;
3423 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3424 cpl = env->hflags & HF_CPL_MASK;
3425 if (e2 & DESC_CS_MASK) {
3426 if (!(e2 & DESC_R_MASK))
3427 goto fail;
3428 if (!(e2 & DESC_C_MASK)) {
3429 if (dpl < cpl || dpl < rpl)
3430 goto fail;
3432 } else {
3433 if (dpl < cpl || dpl < rpl) {
3434 fail:
3435 CC_SRC = eflags & ~CC_Z;
3436 return;
3439 CC_SRC = eflags | CC_Z;
3442 void helper_verw(target_ulong selector1)
3444 uint32_t e1, e2, eflags, selector;
3445 int rpl, dpl, cpl;
3447 selector = selector1 & 0xffff;
3448 eflags = helper_cc_compute_all(CC_OP);
3449 if ((selector & 0xfffc) == 0)
3450 goto fail;
3451 if (load_segment(&e1, &e2, selector) != 0)
3452 goto fail;
3453 if (!(e2 & DESC_S_MASK))
3454 goto fail;
3455 rpl = selector & 3;
3456 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3457 cpl = env->hflags & HF_CPL_MASK;
3458 if (e2 & DESC_CS_MASK) {
3459 goto fail;
3460 } else {
3461 if (dpl < cpl || dpl < rpl)
3462 goto fail;
3463 if (!(e2 & DESC_W_MASK)) {
3464 fail:
3465 CC_SRC = eflags & ~CC_Z;
3466 return;
3469 CC_SRC = eflags | CC_Z;
3472 /* x87 FPU helpers */
3474 static inline double floatx80_to_double(floatx80 a)
3476 union {
3477 float64 f64;
3478 double d;
3479 } u;
3481 u.f64 = floatx80_to_float64(a, &env->fp_status);
3482 return u.d;
3485 static inline floatx80 double_to_floatx80(double a)
3487 union {
3488 float64 f64;
3489 double d;
3490 } u;
3492 u.d = a;
3493 return float64_to_floatx80(u.f64, &env->fp_status);
3496 static void fpu_set_exception(int mask)
3498 env->fpus |= mask;
3499 if (env->fpus & (~env->fpuc & FPUC_EM))
3500 env->fpus |= FPUS_SE | FPUS_B;
3503 static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
3505 if (floatx80_is_zero(b)) {
3506 fpu_set_exception(FPUS_ZE);
3508 return floatx80_div(a, b, &env->fp_status);
3511 static void fpu_raise_exception(void)
3513 if (env->cr[0] & CR0_NE_MASK) {
3514 raise_exception(EXCP10_COPR);
3516 #if !defined(CONFIG_USER_ONLY)
3517 else {
3518 cpu_set_ferr(env);
3520 #endif
3523 void helper_flds_FT0(uint32_t val)
3525 union {
3526 float32 f;
3527 uint32_t i;
3528 } u;
3529 u.i = val;
3530 FT0 = float32_to_floatx80(u.f, &env->fp_status);
3533 void helper_fldl_FT0(uint64_t val)
3535 union {
3536 float64 f;
3537 uint64_t i;
3538 } u;
3539 u.i = val;
3540 FT0 = float64_to_floatx80(u.f, &env->fp_status);
3543 void helper_fildl_FT0(int32_t val)
3545 FT0 = int32_to_floatx80(val, &env->fp_status);
3548 void helper_flds_ST0(uint32_t val)
3550 int new_fpstt;
3551 union {
3552 float32 f;
3553 uint32_t i;
3554 } u;
3555 new_fpstt = (env->fpstt - 1) & 7;
3556 u.i = val;
3557 env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
3558 env->fpstt = new_fpstt;
3559 env->fptags[new_fpstt] = 0; /* validate stack entry */
3562 void helper_fldl_ST0(uint64_t val)
3564 int new_fpstt;
3565 union {
3566 float64 f;
3567 uint64_t i;
3568 } u;
3569 new_fpstt = (env->fpstt - 1) & 7;
3570 u.i = val;
3571 env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
3572 env->fpstt = new_fpstt;
3573 env->fptags[new_fpstt] = 0; /* validate stack entry */
3576 void helper_fildl_ST0(int32_t val)
3578 int new_fpstt;
3579 new_fpstt = (env->fpstt - 1) & 7;
3580 env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
3581 env->fpstt = new_fpstt;
3582 env->fptags[new_fpstt] = 0; /* validate stack entry */
3585 void helper_fildll_ST0(int64_t val)
3587 int new_fpstt;
3588 new_fpstt = (env->fpstt - 1) & 7;
3589 env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
3590 env->fpstt = new_fpstt;
3591 env->fptags[new_fpstt] = 0; /* validate stack entry */
3594 uint32_t helper_fsts_ST0(void)
3596 union {
3597 float32 f;
3598 uint32_t i;
3599 } u;
3600 u.f = floatx80_to_float32(ST0, &env->fp_status);
3601 return u.i;
3604 uint64_t helper_fstl_ST0(void)
3606 union {
3607 float64 f;
3608 uint64_t i;
3609 } u;
3610 u.f = floatx80_to_float64(ST0, &env->fp_status);
3611 return u.i;
3614 int32_t helper_fist_ST0(void)
3616 int32_t val;
3617 val = floatx80_to_int32(ST0, &env->fp_status);
3618 if (val != (int16_t)val)
3619 val = -32768;
3620 return val;
3623 int32_t helper_fistl_ST0(void)
3625 int32_t val;
3626 val = floatx80_to_int32(ST0, &env->fp_status);
3627 return val;
3630 int64_t helper_fistll_ST0(void)
3632 int64_t val;
3633 val = floatx80_to_int64(ST0, &env->fp_status);
3634 return val;
3637 int32_t helper_fistt_ST0(void)
3639 int32_t val;
3640 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3641 if (val != (int16_t)val)
3642 val = -32768;
3643 return val;
3646 int32_t helper_fisttl_ST0(void)
3648 int32_t val;
3649 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3650 return val;
3653 int64_t helper_fisttll_ST0(void)
3655 int64_t val;
3656 val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
3657 return val;
3660 void helper_fldt_ST0(target_ulong ptr)
3662 int new_fpstt;
3663 new_fpstt = (env->fpstt - 1) & 7;
3664 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3665 env->fpstt = new_fpstt;
3666 env->fptags[new_fpstt] = 0; /* validate stack entry */
3669 void helper_fstt_ST0(target_ulong ptr)
3671 helper_fstt(ST0, ptr);
3674 void helper_fpush(void)
3676 fpush();
3679 void helper_fpop(void)
3681 fpop();
3684 void helper_fdecstp(void)
3686 env->fpstt = (env->fpstt - 1) & 7;
3687 env->fpus &= (~0x4700);
3690 void helper_fincstp(void)
3692 env->fpstt = (env->fpstt + 1) & 7;
3693 env->fpus &= (~0x4700);
3696 /* FPU move */
3698 void helper_ffree_STN(int st_index)
3700 env->fptags[(env->fpstt + st_index) & 7] = 1;
3703 void helper_fmov_ST0_FT0(void)
3705 ST0 = FT0;
3708 void helper_fmov_FT0_STN(int st_index)
3710 FT0 = ST(st_index);
3713 void helper_fmov_ST0_STN(int st_index)
3715 ST0 = ST(st_index);
3718 void helper_fmov_STN_ST0(int st_index)
3720 ST(st_index) = ST0;
3723 void helper_fxchg_ST0_STN(int st_index)
3725 floatx80 tmp;
3726 tmp = ST(st_index);
3727 ST(st_index) = ST0;
3728 ST0 = tmp;
3731 /* FPU operations */
3733 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3735 void helper_fcom_ST0_FT0(void)
3737 int ret;
3739 ret = floatx80_compare(ST0, FT0, &env->fp_status);
3740 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3743 void helper_fucom_ST0_FT0(void)
3745 int ret;
3747 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3748 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3751 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3753 void helper_fcomi_ST0_FT0(void)
3755 int eflags;
3756 int ret;
3758 ret = floatx80_compare(ST0, FT0, &env->fp_status);
3759 eflags = helper_cc_compute_all(CC_OP);
3760 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3761 CC_SRC = eflags;
3764 void helper_fucomi_ST0_FT0(void)
3766 int eflags;
3767 int ret;
3769 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3770 eflags = helper_cc_compute_all(CC_OP);
3771 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3772 CC_SRC = eflags;
3775 void helper_fadd_ST0_FT0(void)
3777 ST0 = floatx80_add(ST0, FT0, &env->fp_status);
3780 void helper_fmul_ST0_FT0(void)
3782 ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
3785 void helper_fsub_ST0_FT0(void)
3787 ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
3790 void helper_fsubr_ST0_FT0(void)
3792 ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
3795 void helper_fdiv_ST0_FT0(void)
3797 ST0 = helper_fdiv(ST0, FT0);
3800 void helper_fdivr_ST0_FT0(void)
3802 ST0 = helper_fdiv(FT0, ST0);
3805 /* fp operations between STN and ST0 */
3807 void helper_fadd_STN_ST0(int st_index)
3809 ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
3812 void helper_fmul_STN_ST0(int st_index)
3814 ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
3817 void helper_fsub_STN_ST0(int st_index)
3819 ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
3822 void helper_fsubr_STN_ST0(int st_index)
3824 ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
3827 void helper_fdiv_STN_ST0(int st_index)
3829 floatx80 *p;
3830 p = &ST(st_index);
3831 *p = helper_fdiv(*p, ST0);
3834 void helper_fdivr_STN_ST0(int st_index)
3836 floatx80 *p;
3837 p = &ST(st_index);
3838 *p = helper_fdiv(ST0, *p);
3841 /* misc FPU operations */
3842 void helper_fchs_ST0(void)
3844 ST0 = floatx80_chs(ST0);
3847 void helper_fabs_ST0(void)
3849 ST0 = floatx80_abs(ST0);
3852 void helper_fld1_ST0(void)
3854 ST0 = floatx80_one;
3857 void helper_fldl2t_ST0(void)
3859 ST0 = floatx80_l2t;
3862 void helper_fldl2e_ST0(void)
3864 ST0 = floatx80_l2e;
3867 void helper_fldpi_ST0(void)
3869 ST0 = floatx80_pi;
3872 void helper_fldlg2_ST0(void)
3874 ST0 = floatx80_lg2;
3877 void helper_fldln2_ST0(void)
3879 ST0 = floatx80_ln2;
3882 void helper_fldz_ST0(void)
3884 ST0 = floatx80_zero;
3887 void helper_fldz_FT0(void)
3889 FT0 = floatx80_zero;
3892 uint32_t helper_fnstsw(void)
3894 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3897 uint32_t helper_fnstcw(void)
3899 return env->fpuc;
3902 static void update_fp_status(void)
3904 int rnd_type;
3906 /* set rounding mode */
3907 switch(env->fpuc & RC_MASK) {
3908 default:
3909 case RC_NEAR:
3910 rnd_type = float_round_nearest_even;
3911 break;
3912 case RC_DOWN:
3913 rnd_type = float_round_down;
3914 break;
3915 case RC_UP:
3916 rnd_type = float_round_up;
3917 break;
3918 case RC_CHOP:
3919 rnd_type = float_round_to_zero;
3920 break;
3922 set_float_rounding_mode(rnd_type, &env->fp_status);
3923 switch((env->fpuc >> 8) & 3) {
3924 case 0:
3925 rnd_type = 32;
3926 break;
3927 case 2:
3928 rnd_type = 64;
3929 break;
3930 case 3:
3931 default:
3932 rnd_type = 80;
3933 break;
3935 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3938 void helper_fldcw(uint32_t val)
3940 env->fpuc = val;
3941 update_fp_status();
3944 void helper_fclex(void)
3946 env->fpus &= 0x7f00;
3949 void helper_fwait(void)
3951 if (env->fpus & FPUS_SE)
3952 fpu_raise_exception();
3955 void helper_fninit(void)
3957 env->fpus = 0;
3958 env->fpstt = 0;
3959 env->fpuc = 0x37f;
3960 env->fptags[0] = 1;
3961 env->fptags[1] = 1;
3962 env->fptags[2] = 1;
3963 env->fptags[3] = 1;
3964 env->fptags[4] = 1;
3965 env->fptags[5] = 1;
3966 env->fptags[6] = 1;
3967 env->fptags[7] = 1;
3970 /* BCD ops */
3972 void helper_fbld_ST0(target_ulong ptr)
3974 floatx80 tmp;
3975 uint64_t val;
3976 unsigned int v;
3977 int i;
3979 val = 0;
3980 for(i = 8; i >= 0; i--) {
3981 v = ldub(ptr + i);
3982 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3984 tmp = int64_to_floatx80(val, &env->fp_status);
3985 if (ldub(ptr + 9) & 0x80) {
3986 floatx80_chs(tmp);
3988 fpush();
3989 ST0 = tmp;
3992 void helper_fbst_ST0(target_ulong ptr)
3994 int v;
3995 target_ulong mem_ref, mem_end;
3996 int64_t val;
3998 val = floatx80_to_int64(ST0, &env->fp_status);
3999 mem_ref = ptr;
4000 mem_end = mem_ref + 9;
4001 if (val < 0) {
4002 stb(mem_end, 0x80);
4003 val = -val;
4004 } else {
4005 stb(mem_end, 0x00);
4007 while (mem_ref < mem_end) {
4008 if (val == 0)
4009 break;
4010 v = val % 100;
4011 val = val / 100;
4012 v = ((v / 10) << 4) | (v % 10);
4013 stb(mem_ref++, v);
4015 while (mem_ref < mem_end) {
4016 stb(mem_ref++, 0);
4020 void helper_f2xm1(void)
4022 double val = floatx80_to_double(ST0);
4023 val = pow(2.0, val) - 1.0;
4024 ST0 = double_to_floatx80(val);
4027 void helper_fyl2x(void)
4029 double fptemp = floatx80_to_double(ST0);
4031 if (fptemp>0.0){
4032 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4033 fptemp *= floatx80_to_double(ST1);
4034 ST1 = double_to_floatx80(fptemp);
4035 fpop();
4036 } else {
4037 env->fpus &= (~0x4700);
4038 env->fpus |= 0x400;
4042 void helper_fptan(void)
4044 double fptemp = floatx80_to_double(ST0);
4046 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4047 env->fpus |= 0x400;
4048 } else {
4049 fptemp = tan(fptemp);
4050 ST0 = double_to_floatx80(fptemp);
4051 fpush();
4052 ST0 = floatx80_one;
4053 env->fpus &= (~0x400); /* C2 <-- 0 */
4054 /* the above code is for |arg| < 2**52 only */
4058 void helper_fpatan(void)
4060 double fptemp, fpsrcop;
4062 fpsrcop = floatx80_to_double(ST1);
4063 fptemp = floatx80_to_double(ST0);
4064 ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
4065 fpop();
4068 void helper_fxtract(void)
4070 CPU_LDoubleU temp;
4072 temp.d = ST0;
4074 if (floatx80_is_zero(ST0)) {
4075 /* Easy way to generate -inf and raising division by 0 exception */
4076 ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
4077 fpush();
4078 ST0 = temp.d;
4079 } else {
4080 int expdif;
4082 expdif = EXPD(temp) - EXPBIAS;
4083 /*DP exponent bias*/
4084 ST0 = int32_to_floatx80(expdif, &env->fp_status);
4085 fpush();
4086 BIASEXPONENT(temp);
4087 ST0 = temp.d;
4091 void helper_fprem1(void)
4093 double st0, st1, dblq, fpsrcop, fptemp;
4094 CPU_LDoubleU fpsrcop1, fptemp1;
4095 int expdif;
4096 signed long long int q;
4098 st0 = floatx80_to_double(ST0);
4099 st1 = floatx80_to_double(ST1);
4101 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4102 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4103 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4104 return;
4107 fpsrcop = st0;
4108 fptemp = st1;
4109 fpsrcop1.d = ST0;
4110 fptemp1.d = ST1;
4111 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4113 if (expdif < 0) {
4114 /* optimisation? taken from the AMD docs */
4115 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4116 /* ST0 is unchanged */
4117 return;
4120 if (expdif < 53) {
4121 dblq = fpsrcop / fptemp;
4122 /* round dblq towards nearest integer */
4123 dblq = rint(dblq);
4124 st0 = fpsrcop - fptemp * dblq;
4126 /* convert dblq to q by truncating towards zero */
4127 if (dblq < 0.0)
4128 q = (signed long long int)(-dblq);
4129 else
4130 q = (signed long long int)dblq;
4132 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4133 /* (C0,C3,C1) <-- (q2,q1,q0) */
4134 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4135 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4136 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4137 } else {
4138 env->fpus |= 0x400; /* C2 <-- 1 */
4139 fptemp = pow(2.0, expdif - 50);
4140 fpsrcop = (st0 / st1) / fptemp;
4141 /* fpsrcop = integer obtained by chopping */
4142 fpsrcop = (fpsrcop < 0.0) ?
4143 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4144 st0 -= (st1 * fpsrcop * fptemp);
4146 ST0 = double_to_floatx80(st0);
4149 void helper_fprem(void)
4151 double st0, st1, dblq, fpsrcop, fptemp;
4152 CPU_LDoubleU fpsrcop1, fptemp1;
4153 int expdif;
4154 signed long long int q;
4156 st0 = floatx80_to_double(ST0);
4157 st1 = floatx80_to_double(ST1);
4159 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4160 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4161 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4162 return;
4165 fpsrcop = st0;
4166 fptemp = st1;
4167 fpsrcop1.d = ST0;
4168 fptemp1.d = ST1;
4169 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4171 if (expdif < 0) {
4172 /* optimisation? taken from the AMD docs */
4173 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4174 /* ST0 is unchanged */
4175 return;
4178 if ( expdif < 53 ) {
4179 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4180 /* round dblq towards zero */
4181 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4182 st0 = fpsrcop/*ST0*/ - fptemp * dblq;
4184 /* convert dblq to q by truncating towards zero */
4185 if (dblq < 0.0)
4186 q = (signed long long int)(-dblq);
4187 else
4188 q = (signed long long int)dblq;
4190 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4191 /* (C0,C3,C1) <-- (q2,q1,q0) */
4192 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4193 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4194 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4195 } else {
4196 int N = 32 + (expdif % 32); /* as per AMD docs */
4197 env->fpus |= 0x400; /* C2 <-- 1 */
4198 fptemp = pow(2.0, (double)(expdif - N));
4199 fpsrcop = (st0 / st1) / fptemp;
4200 /* fpsrcop = integer obtained by chopping */
4201 fpsrcop = (fpsrcop < 0.0) ?
4202 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4203 st0 -= (st1 * fpsrcop * fptemp);
4205 ST0 = double_to_floatx80(st0);
4208 void helper_fyl2xp1(void)
4210 double fptemp = floatx80_to_double(ST0);
4212 if ((fptemp+1.0)>0.0) {
4213 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4214 fptemp *= floatx80_to_double(ST1);
4215 ST1 = double_to_floatx80(fptemp);
4216 fpop();
4217 } else {
4218 env->fpus &= (~0x4700);
4219 env->fpus |= 0x400;
4223 void helper_fsqrt(void)
4225 if (floatx80_is_neg(ST0)) {
4226 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4227 env->fpus |= 0x400;
4229 ST0 = floatx80_sqrt(ST0, &env->fp_status);
4232 void helper_fsincos(void)
4234 double fptemp = floatx80_to_double(ST0);
4236 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4237 env->fpus |= 0x400;
4238 } else {
4239 ST0 = double_to_floatx80(sin(fptemp));
4240 fpush();
4241 ST0 = double_to_floatx80(cos(fptemp));
4242 env->fpus &= (~0x400); /* C2 <-- 0 */
4243 /* the above code is for |arg| < 2**63 only */
4247 void helper_frndint(void)
4249 ST0 = floatx80_round_to_int(ST0, &env->fp_status);
4252 void helper_fscale(void)
4254 if (floatx80_is_any_nan(ST1)) {
4255 ST0 = ST1;
4256 } else {
4257 int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4258 ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
4262 void helper_fsin(void)
4264 double fptemp = floatx80_to_double(ST0);
4266 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4267 env->fpus |= 0x400;
4268 } else {
4269 ST0 = double_to_floatx80(sin(fptemp));
4270 env->fpus &= (~0x400); /* C2 <-- 0 */
4271 /* the above code is for |arg| < 2**53 only */
4275 void helper_fcos(void)
4277 double fptemp = floatx80_to_double(ST0);
4279 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4280 env->fpus |= 0x400;
4281 } else {
4282 ST0 = double_to_floatx80(cos(fptemp));
4283 env->fpus &= (~0x400); /* C2 <-- 0 */
4284 /* the above code is for |arg5 < 2**63 only */
4288 void helper_fxam_ST0(void)
4290 CPU_LDoubleU temp;
4291 int expdif;
4293 temp.d = ST0;
4295 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4296 if (SIGND(temp))
4297 env->fpus |= 0x200; /* C1 <-- 1 */
4299 /* XXX: test fptags too */
4300 expdif = EXPD(temp);
4301 if (expdif == MAXEXPD) {
4302 if (MANTD(temp) == 0x8000000000000000ULL)
4303 env->fpus |= 0x500 /*Infinity*/;
4304 else
4305 env->fpus |= 0x100 /*NaN*/;
4306 } else if (expdif == 0) {
4307 if (MANTD(temp) == 0)
4308 env->fpus |= 0x4000 /*Zero*/;
4309 else
4310 env->fpus |= 0x4400 /*Denormal*/;
4311 } else {
4312 env->fpus |= 0x400;
4316 void helper_fstenv(target_ulong ptr, int data32)
4318 int fpus, fptag, exp, i;
4319 uint64_t mant;
4320 CPU_LDoubleU tmp;
4322 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4323 fptag = 0;
4324 for (i=7; i>=0; i--) {
4325 fptag <<= 2;
4326 if (env->fptags[i]) {
4327 fptag |= 3;
4328 } else {
4329 tmp.d = env->fpregs[i].d;
4330 exp = EXPD(tmp);
4331 mant = MANTD(tmp);
4332 if (exp == 0 && mant == 0) {
4333 /* zero */
4334 fptag |= 1;
4335 } else if (exp == 0 || exp == MAXEXPD
4336 || (mant & (1LL << 63)) == 0
4338 /* NaNs, infinity, denormal */
4339 fptag |= 2;
4343 if (data32) {
4344 /* 32 bit */
4345 stl(ptr, env->fpuc);
4346 stl(ptr + 4, fpus);
4347 stl(ptr + 8, fptag);
4348 stl(ptr + 12, 0); /* fpip */
4349 stl(ptr + 16, 0); /* fpcs */
4350 stl(ptr + 20, 0); /* fpoo */
4351 stl(ptr + 24, 0); /* fpos */
4352 } else {
4353 /* 16 bit */
4354 stw(ptr, env->fpuc);
4355 stw(ptr + 2, fpus);
4356 stw(ptr + 4, fptag);
4357 stw(ptr + 6, 0);
4358 stw(ptr + 8, 0);
4359 stw(ptr + 10, 0);
4360 stw(ptr + 12, 0);
4364 void helper_fldenv(target_ulong ptr, int data32)
4366 int i, fpus, fptag;
4368 if (data32) {
4369 env->fpuc = lduw(ptr);
4370 fpus = lduw(ptr + 4);
4371 fptag = lduw(ptr + 8);
4373 else {
4374 env->fpuc = lduw(ptr);
4375 fpus = lduw(ptr + 2);
4376 fptag = lduw(ptr + 4);
4378 env->fpstt = (fpus >> 11) & 7;
4379 env->fpus = fpus & ~0x3800;
4380 for(i = 0;i < 8; i++) {
4381 env->fptags[i] = ((fptag & 3) == 3);
4382 fptag >>= 2;
4386 void helper_fsave(target_ulong ptr, int data32)
4388 floatx80 tmp;
4389 int i;
4391 helper_fstenv(ptr, data32);
4393 ptr += (14 << data32);
4394 for(i = 0;i < 8; i++) {
4395 tmp = ST(i);
4396 helper_fstt(tmp, ptr);
4397 ptr += 10;
4400 /* fninit */
4401 env->fpus = 0;
4402 env->fpstt = 0;
4403 env->fpuc = 0x37f;
4404 env->fptags[0] = 1;
4405 env->fptags[1] = 1;
4406 env->fptags[2] = 1;
4407 env->fptags[3] = 1;
4408 env->fptags[4] = 1;
4409 env->fptags[5] = 1;
4410 env->fptags[6] = 1;
4411 env->fptags[7] = 1;
4414 void helper_frstor(target_ulong ptr, int data32)
4416 floatx80 tmp;
4417 int i;
4419 helper_fldenv(ptr, data32);
4420 ptr += (14 << data32);
4422 for(i = 0;i < 8; i++) {
4423 tmp = helper_fldt(ptr);
4424 ST(i) = tmp;
4425 ptr += 10;
4429 void helper_fxsave(target_ulong ptr, int data64)
4431 int fpus, fptag, i, nb_xmm_regs;
4432 floatx80 tmp;
4433 target_ulong addr;
4435 /* The operand must be 16 byte aligned */
4436 if (ptr & 0xf) {
4437 raise_exception(EXCP0D_GPF);
4440 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4441 fptag = 0;
4442 for(i = 0; i < 8; i++) {
4443 fptag |= (env->fptags[i] << i);
4445 stw(ptr, env->fpuc);
4446 stw(ptr + 2, fpus);
4447 stw(ptr + 4, fptag ^ 0xff);
4448 #ifdef TARGET_X86_64
4449 if (data64) {
4450 stq(ptr + 0x08, 0); /* rip */
4451 stq(ptr + 0x10, 0); /* rdp */
4452 } else
4453 #endif
4455 stl(ptr + 0x08, 0); /* eip */
4456 stl(ptr + 0x0c, 0); /* sel */
4457 stl(ptr + 0x10, 0); /* dp */
4458 stl(ptr + 0x14, 0); /* sel */
4461 addr = ptr + 0x20;
4462 for(i = 0;i < 8; i++) {
4463 tmp = ST(i);
4464 helper_fstt(tmp, addr);
4465 addr += 16;
4468 if (env->cr[4] & CR4_OSFXSR_MASK) {
4469 /* XXX: finish it */
4470 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4471 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4472 if (env->hflags & HF_CS64_MASK)
4473 nb_xmm_regs = 16;
4474 else
4475 nb_xmm_regs = 8;
4476 addr = ptr + 0xa0;
4477 /* Fast FXSAVE leaves out the XMM registers */
4478 if (!(env->efer & MSR_EFER_FFXSR)
4479 || (env->hflags & HF_CPL_MASK)
4480 || !(env->hflags & HF_LMA_MASK)) {
4481 for(i = 0; i < nb_xmm_regs; i++) {
4482 stq(addr, env->xmm_regs[i].XMM_Q(0));
4483 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4484 addr += 16;
4490 void helper_fxrstor(target_ulong ptr, int data64)
4492 int i, fpus, fptag, nb_xmm_regs;
4493 floatx80 tmp;
4494 target_ulong addr;
4496 /* The operand must be 16 byte aligned */
4497 if (ptr & 0xf) {
4498 raise_exception(EXCP0D_GPF);
4501 env->fpuc = lduw(ptr);
4502 fpus = lduw(ptr + 2);
4503 fptag = lduw(ptr + 4);
4504 env->fpstt = (fpus >> 11) & 7;
4505 env->fpus = fpus & ~0x3800;
4506 fptag ^= 0xff;
4507 for(i = 0;i < 8; i++) {
4508 env->fptags[i] = ((fptag >> i) & 1);
4511 addr = ptr + 0x20;
4512 for(i = 0;i < 8; i++) {
4513 tmp = helper_fldt(addr);
4514 ST(i) = tmp;
4515 addr += 16;
4518 if (env->cr[4] & CR4_OSFXSR_MASK) {
4519 /* XXX: finish it */
4520 env->mxcsr = ldl(ptr + 0x18);
4521 //ldl(ptr + 0x1c);
4522 if (env->hflags & HF_CS64_MASK)
4523 nb_xmm_regs = 16;
4524 else
4525 nb_xmm_regs = 8;
4526 addr = ptr + 0xa0;
4527 /* Fast FXRESTORE leaves out the XMM registers */
4528 if (!(env->efer & MSR_EFER_FFXSR)
4529 || (env->hflags & HF_CPL_MASK)
4530 || !(env->hflags & HF_LMA_MASK)) {
4531 for(i = 0; i < nb_xmm_regs; i++) {
4532 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4533 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4534 addr += 16;
4540 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
4542 CPU_LDoubleU temp;
4544 temp.d = f;
4545 *pmant = temp.l.lower;
4546 *pexp = temp.l.upper;
4549 floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
4551 CPU_LDoubleU temp;
4553 temp.l.upper = upper;
4554 temp.l.lower = mant;
4555 return temp.d;
4558 #ifdef TARGET_X86_64
4560 //#define DEBUG_MULDIV
4562 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4564 *plow += a;
4565 /* carry test */
4566 if (*plow < a)
4567 (*phigh)++;
4568 *phigh += b;
4571 static void neg128(uint64_t *plow, uint64_t *phigh)
4573 *plow = ~ *plow;
4574 *phigh = ~ *phigh;
4575 add128(plow, phigh, 1, 0);
4578 /* return TRUE if overflow */
4579 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4581 uint64_t q, r, a1, a0;
4582 int i, qb, ab;
4584 a0 = *plow;
4585 a1 = *phigh;
4586 if (a1 == 0) {
4587 q = a0 / b;
4588 r = a0 % b;
4589 *plow = q;
4590 *phigh = r;
4591 } else {
4592 if (a1 >= b)
4593 return 1;
4594 /* XXX: use a better algorithm */
4595 for(i = 0; i < 64; i++) {
4596 ab = a1 >> 63;
4597 a1 = (a1 << 1) | (a0 >> 63);
4598 if (ab || a1 >= b) {
4599 a1 -= b;
4600 qb = 1;
4601 } else {
4602 qb = 0;
4604 a0 = (a0 << 1) | qb;
4606 #if defined(DEBUG_MULDIV)
4607 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4608 *phigh, *plow, b, a0, a1);
4609 #endif
4610 *plow = a0;
4611 *phigh = a1;
4613 return 0;
4616 /* return TRUE if overflow */
4617 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4619 int sa, sb;
4620 sa = ((int64_t)*phigh < 0);
4621 if (sa)
4622 neg128(plow, phigh);
4623 sb = (b < 0);
4624 if (sb)
4625 b = -b;
4626 if (div64(plow, phigh, b) != 0)
4627 return 1;
4628 if (sa ^ sb) {
4629 if (*plow > (1ULL << 63))
4630 return 1;
4631 *plow = - *plow;
4632 } else {
4633 if (*plow >= (1ULL << 63))
4634 return 1;
4636 if (sa)
4637 *phigh = - *phigh;
4638 return 0;
4641 void helper_mulq_EAX_T0(target_ulong t0)
4643 uint64_t r0, r1;
4645 mulu64(&r0, &r1, EAX, t0);
4646 EAX = r0;
4647 EDX = r1;
4648 CC_DST = r0;
4649 CC_SRC = r1;
4652 void helper_imulq_EAX_T0(target_ulong t0)
4654 uint64_t r0, r1;
4656 muls64(&r0, &r1, EAX, t0);
4657 EAX = r0;
4658 EDX = r1;
4659 CC_DST = r0;
4660 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4663 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4665 uint64_t r0, r1;
4667 muls64(&r0, &r1, t0, t1);
4668 CC_DST = r0;
4669 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4670 return r0;
4673 void helper_divq_EAX(target_ulong t0)
4675 uint64_t r0, r1;
4676 if (t0 == 0) {
4677 raise_exception(EXCP00_DIVZ);
4679 r0 = EAX;
4680 r1 = EDX;
4681 if (div64(&r0, &r1, t0))
4682 raise_exception(EXCP00_DIVZ);
4683 EAX = r0;
4684 EDX = r1;
4687 void helper_idivq_EAX(target_ulong t0)
4689 uint64_t r0, r1;
4690 if (t0 == 0) {
4691 raise_exception(EXCP00_DIVZ);
4693 r0 = EAX;
4694 r1 = EDX;
4695 if (idiv64(&r0, &r1, t0))
4696 raise_exception(EXCP00_DIVZ);
4697 EAX = r0;
4698 EDX = r1;
4700 #endif
4702 static void do_hlt(void)
4704 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4705 env->halted = 1;
4706 env->exception_index = EXCP_HLT;
4707 cpu_loop_exit(env);
4710 void helper_hlt(int next_eip_addend)
4712 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4713 EIP += next_eip_addend;
4715 do_hlt();
4718 void helper_monitor(target_ulong ptr)
4720 if ((uint32_t)ECX != 0)
4721 raise_exception(EXCP0D_GPF);
4722 /* XXX: store address ? */
4723 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4726 void helper_mwait(int next_eip_addend)
4728 if ((uint32_t)ECX != 0)
4729 raise_exception(EXCP0D_GPF);
4730 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4731 EIP += next_eip_addend;
4733 /* XXX: not complete but not completely erroneous */
4734 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4735 /* more than one CPU: do not sleep because another CPU may
4736 wake this one */
4737 } else {
4738 do_hlt();
4742 void helper_debug(void)
4744 env->exception_index = EXCP_DEBUG;
4745 cpu_loop_exit(env);
4748 void helper_reset_rf(void)
4750 env->eflags &= ~RF_MASK;
4753 void helper_raise_interrupt(int intno, int next_eip_addend)
4755 raise_interrupt(intno, 1, 0, next_eip_addend);
4758 void helper_raise_exception(int exception_index)
4760 raise_exception(exception_index);
4763 void helper_cli(void)
4765 env->eflags &= ~IF_MASK;
4768 void helper_sti(void)
4770 env->eflags |= IF_MASK;
4773 #if 0
4774 /* vm86plus instructions */
4775 void helper_cli_vm(void)
4777 env->eflags &= ~VIF_MASK;
4780 void helper_sti_vm(void)
4782 env->eflags |= VIF_MASK;
4783 if (env->eflags & VIP_MASK) {
4784 raise_exception(EXCP0D_GPF);
4787 #endif
4789 void helper_set_inhibit_irq(void)
4791 env->hflags |= HF_INHIBIT_IRQ_MASK;
4794 void helper_reset_inhibit_irq(void)
4796 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4799 void helper_boundw(target_ulong a0, int v)
4801 int low, high;
4802 low = ldsw(a0);
4803 high = ldsw(a0 + 2);
4804 v = (int16_t)v;
4805 if (v < low || v > high) {
4806 raise_exception(EXCP05_BOUND);
4810 void helper_boundl(target_ulong a0, int v)
4812 int low, high;
4813 low = ldl(a0);
4814 high = ldl(a0 + 4);
4815 if (v < low || v > high) {
4816 raise_exception(EXCP05_BOUND);
4820 #if !defined(CONFIG_USER_ONLY)
4822 #define MMUSUFFIX _mmu
4824 #define SHIFT 0
4825 #include "softmmu_template.h"
4827 #define SHIFT 1
4828 #include "softmmu_template.h"
4830 #define SHIFT 2
4831 #include "softmmu_template.h"
4833 #define SHIFT 3
4834 #include "softmmu_template.h"
4836 #endif
4838 #if !defined(CONFIG_USER_ONLY)
4839 /* try to fill the TLB and return an exception if error. If retaddr is
4840 NULL, it means that the function was called in C code (i.e. not
4841 from generated code or from helper.c) */
4842 /* XXX: fix it to restore all registers */
4843 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4845 TranslationBlock *tb;
4846 int ret;
4847 unsigned long pc;
4848 CPUX86State *saved_env;
4850 /* XXX: hack to restore env in all cases, even if not called from
4851 generated code */
4852 saved_env = env;
4853 env = cpu_single_env;
4855 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4856 if (ret) {
4857 if (retaddr) {
4858 /* now we have a real cpu fault */
4859 pc = (unsigned long)retaddr;
4860 tb = tb_find_pc(pc);
4861 if (tb) {
4862 /* the PC is inside the translated code. It means that we have
4863 a virtual CPU fault */
4864 cpu_restore_state(tb, env, pc);
4867 raise_exception_err(env->exception_index, env->error_code);
4869 env = saved_env;
4871 #endif
4873 /* Secure Virtual Machine helpers */
4875 #if defined(CONFIG_USER_ONLY)
4877 void helper_vmrun(int aflag, int next_eip_addend)
4880 void helper_vmmcall(void)
4883 void helper_vmload(int aflag)
4886 void helper_vmsave(int aflag)
4889 void helper_stgi(void)
4892 void helper_clgi(void)
4895 void helper_skinit(void)
4898 void helper_invlpga(int aflag)
4901 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4904 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4908 void svm_check_intercept(CPUState *env1, uint32_t type)
4912 void helper_svm_check_io(uint32_t port, uint32_t param,
4913 uint32_t next_eip_addend)
4916 #else
4918 static inline void svm_save_seg(target_phys_addr_t addr,
4919 const SegmentCache *sc)
4921 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4922 sc->selector);
4923 stq_phys(addr + offsetof(struct vmcb_seg, base),
4924 sc->base);
4925 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4926 sc->limit);
4927 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4928 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4931 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4933 unsigned int flags;
4935 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4936 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4937 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4938 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4939 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4942 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4943 CPUState *env, int seg_reg)
4945 SegmentCache sc1, *sc = &sc1;
4946 svm_load_seg(addr, sc);
4947 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4948 sc->base, sc->limit, sc->flags);
4951 void helper_vmrun(int aflag, int next_eip_addend)
4953 target_ulong addr;
4954 uint32_t event_inj;
4955 uint32_t int_ctl;
4957 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4959 if (aflag == 2)
4960 addr = EAX;
4961 else
4962 addr = (uint32_t)EAX;
4964 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4966 env->vm_vmcb = addr;
4968 /* save the current CPU state in the hsave page */
4969 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4970 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4972 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4973 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4975 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4976 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4977 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4978 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4979 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4980 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4982 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4983 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4985 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4986 &env->segs[R_ES]);
4987 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4988 &env->segs[R_CS]);
4989 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4990 &env->segs[R_SS]);
4991 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4992 &env->segs[R_DS]);
4994 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4995 EIP + next_eip_addend);
4996 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4997 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4999 /* load the interception bitmaps so we do not need to access the
5000 vmcb in svm mode */
5001 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
5002 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5003 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5004 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5005 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5006 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5008 /* enable intercepts */
5009 env->hflags |= HF_SVMI_MASK;
5011 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5013 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5014 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5016 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5017 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5019 /* clear exit_info_2 so we behave like the real hardware */
5020 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5022 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5023 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5024 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5025 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5026 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5027 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5028 if (int_ctl & V_INTR_MASKING_MASK) {
5029 env->v_tpr = int_ctl & V_TPR_MASK;
5030 env->hflags2 |= HF2_VINTR_MASK;
5031 if (env->eflags & IF_MASK)
5032 env->hflags2 |= HF2_HIF_MASK;
5035 cpu_load_efer(env,
5036 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5037 env->eflags = 0;
5038 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5039 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5040 CC_OP = CC_OP_EFLAGS;
5042 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5043 env, R_ES);
5044 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5045 env, R_CS);
5046 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5047 env, R_SS);
5048 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5049 env, R_DS);
5051 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5052 env->eip = EIP;
5053 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5054 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5055 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5056 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5057 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5059 /* FIXME: guest state consistency checks */
5061 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5062 case TLB_CONTROL_DO_NOTHING:
5063 break;
5064 case TLB_CONTROL_FLUSH_ALL_ASID:
5065 /* FIXME: this is not 100% correct but should work for now */
5066 tlb_flush(env, 1);
5067 break;
5070 env->hflags2 |= HF2_GIF_MASK;
5072 if (int_ctl & V_IRQ_MASK) {
5073 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5076 /* maybe we need to inject an event */
5077 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5078 if (event_inj & SVM_EVTINJ_VALID) {
5079 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5080 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5081 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5083 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5084 /* FIXME: need to implement valid_err */
5085 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5086 case SVM_EVTINJ_TYPE_INTR:
5087 env->exception_index = vector;
5088 env->error_code = event_inj_err;
5089 env->exception_is_int = 0;
5090 env->exception_next_eip = -1;
5091 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5092 /* XXX: is it always correct ? */
5093 do_interrupt_all(vector, 0, 0, 0, 1);
5094 break;
5095 case SVM_EVTINJ_TYPE_NMI:
5096 env->exception_index = EXCP02_NMI;
5097 env->error_code = event_inj_err;
5098 env->exception_is_int = 0;
5099 env->exception_next_eip = EIP;
5100 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5101 cpu_loop_exit(env);
5102 break;
5103 case SVM_EVTINJ_TYPE_EXEPT:
5104 env->exception_index = vector;
5105 env->error_code = event_inj_err;
5106 env->exception_is_int = 0;
5107 env->exception_next_eip = -1;
5108 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5109 cpu_loop_exit(env);
5110 break;
5111 case SVM_EVTINJ_TYPE_SOFT:
5112 env->exception_index = vector;
5113 env->error_code = event_inj_err;
5114 env->exception_is_int = 1;
5115 env->exception_next_eip = EIP;
5116 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5117 cpu_loop_exit(env);
5118 break;
5120 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5124 void helper_vmmcall(void)
5126 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5127 raise_exception(EXCP06_ILLOP);
5130 void helper_vmload(int aflag)
5132 target_ulong addr;
5133 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5135 if (aflag == 2)
5136 addr = EAX;
5137 else
5138 addr = (uint32_t)EAX;
5140 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5141 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5142 env->segs[R_FS].base);
5144 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5145 env, R_FS);
5146 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5147 env, R_GS);
5148 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5149 &env->tr);
5150 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5151 &env->ldt);
5153 #ifdef TARGET_X86_64
5154 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5155 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5156 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5157 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5158 #endif
5159 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5160 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5161 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5162 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5165 void helper_vmsave(int aflag)
5167 target_ulong addr;
5168 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5170 if (aflag == 2)
5171 addr = EAX;
5172 else
5173 addr = (uint32_t)EAX;
5175 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5176 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5177 env->segs[R_FS].base);
5179 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5180 &env->segs[R_FS]);
5181 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5182 &env->segs[R_GS]);
5183 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5184 &env->tr);
5185 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5186 &env->ldt);
5188 #ifdef TARGET_X86_64
5189 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5190 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5191 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5192 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5193 #endif
5194 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5195 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5196 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5197 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5200 void helper_stgi(void)
5202 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5203 env->hflags2 |= HF2_GIF_MASK;
5206 void helper_clgi(void)
5208 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5209 env->hflags2 &= ~HF2_GIF_MASK;
5212 void helper_skinit(void)
5214 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5215 /* XXX: not implemented */
5216 raise_exception(EXCP06_ILLOP);
5219 void helper_invlpga(int aflag)
5221 target_ulong addr;
5222 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5224 if (aflag == 2)
5225 addr = EAX;
5226 else
5227 addr = (uint32_t)EAX;
5229 /* XXX: could use the ASID to see if it is needed to do the
5230 flush */
5231 tlb_flush_page(env, addr);
5234 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5236 if (likely(!(env->hflags & HF_SVMI_MASK)))
5237 return;
5238 switch(type) {
5239 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5240 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5241 helper_vmexit(type, param);
5243 break;
5244 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5245 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5246 helper_vmexit(type, param);
5248 break;
5249 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5250 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5251 helper_vmexit(type, param);
5253 break;
5254 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5255 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5256 helper_vmexit(type, param);
5258 break;
5259 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5260 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5261 helper_vmexit(type, param);
5263 break;
5264 case SVM_EXIT_MSR:
5265 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5266 /* FIXME: this should be read in at vmrun (faster this way?) */
5267 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5268 uint32_t t0, t1;
5269 switch((uint32_t)ECX) {
5270 case 0 ... 0x1fff:
5271 t0 = (ECX * 2) % 8;
5272 t1 = (ECX * 2) / 8;
5273 break;
5274 case 0xc0000000 ... 0xc0001fff:
5275 t0 = (8192 + ECX - 0xc0000000) * 2;
5276 t1 = (t0 / 8);
5277 t0 %= 8;
5278 break;
5279 case 0xc0010000 ... 0xc0011fff:
5280 t0 = (16384 + ECX - 0xc0010000) * 2;
5281 t1 = (t0 / 8);
5282 t0 %= 8;
5283 break;
5284 default:
5285 helper_vmexit(type, param);
5286 t0 = 0;
5287 t1 = 0;
5288 break;
5290 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5291 helper_vmexit(type, param);
5293 break;
5294 default:
5295 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5296 helper_vmexit(type, param);
5298 break;
5302 void svm_check_intercept(CPUState *env1, uint32_t type)
5304 CPUState *saved_env;
5306 saved_env = env;
5307 env = env1;
5308 helper_svm_check_intercept_param(type, 0);
5309 env = saved_env;
5312 void helper_svm_check_io(uint32_t port, uint32_t param,
5313 uint32_t next_eip_addend)
5315 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5316 /* FIXME: this should be read in at vmrun (faster this way?) */
5317 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5318 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5319 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5320 /* next EIP */
5321 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5322 env->eip + next_eip_addend);
5323 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5328 /* Note: currently only 32 bits of exit_code are used */
5329 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5331 uint32_t int_ctl;
5333 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5334 exit_code, exit_info_1,
5335 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5336 EIP);
5338 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5339 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5340 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5341 } else {
5342 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5345 /* Save the VM state in the vmcb */
5346 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5347 &env->segs[R_ES]);
5348 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5349 &env->segs[R_CS]);
5350 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5351 &env->segs[R_SS]);
5352 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5353 &env->segs[R_DS]);
5355 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5356 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5358 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5359 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5361 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5362 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5363 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5364 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5365 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5367 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5368 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5369 int_ctl |= env->v_tpr & V_TPR_MASK;
5370 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5371 int_ctl |= V_IRQ_MASK;
5372 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5374 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5375 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5376 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5377 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5378 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5379 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5380 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5382 /* Reload the host state from vm_hsave */
5383 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5384 env->hflags &= ~HF_SVMI_MASK;
5385 env->intercept = 0;
5386 env->intercept_exceptions = 0;
5387 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5388 env->tsc_offset = 0;
5390 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5391 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5393 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5394 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5396 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5397 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5398 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5399 /* we need to set the efer after the crs so the hidden flags get
5400 set properly */
5401 cpu_load_efer(env,
5402 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5403 env->eflags = 0;
5404 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5405 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5406 CC_OP = CC_OP_EFLAGS;
5408 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5409 env, R_ES);
5410 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5411 env, R_CS);
5412 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5413 env, R_SS);
5414 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5415 env, R_DS);
5417 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5418 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5419 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5421 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5422 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5424 /* other setups */
5425 cpu_x86_set_cpl(env, 0);
5426 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5427 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5429 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5430 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5431 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5432 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5433 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5435 env->hflags2 &= ~HF2_GIF_MASK;
5436 /* FIXME: Resets the current ASID register to zero (host ASID). */
5438 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5440 /* Clears the TSC_OFFSET inside the processor. */
5442 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5443 from the page table indicated the host's CR3. If the PDPEs contain
5444 illegal state, the processor causes a shutdown. */
5446 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5447 env->cr[0] |= CR0_PE_MASK;
5448 env->eflags &= ~VM_MASK;
5450 /* Disables all breakpoints in the host DR7 register. */
5452 /* Checks the reloaded host state for consistency. */
5454 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5455 host's code segment or non-canonical (in the case of long mode), a
5456 #GP fault is delivered inside the host.) */
5458 /* remove any pending exception */
5459 env->exception_index = -1;
5460 env->error_code = 0;
5461 env->old_exception = -1;
5463 cpu_loop_exit(env);
5466 #endif
5468 /* MMX/SSE */
5469 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5470 void helper_enter_mmx(void)
5472 env->fpstt = 0;
5473 *(uint32_t *)(env->fptags) = 0;
5474 *(uint32_t *)(env->fptags + 4) = 0;
5477 void helper_emms(void)
5479 /* set to empty state */
5480 *(uint32_t *)(env->fptags) = 0x01010101;
5481 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5484 /* XXX: suppress */
5485 void helper_movq(void *d, void *s)
5487 *(uint64_t *)d = *(uint64_t *)s;
5490 #define SHIFT 0
5491 #include "ops_sse.h"
5493 #define SHIFT 1
5494 #include "ops_sse.h"
5496 #define SHIFT 0
5497 #include "helper_template.h"
5498 #undef SHIFT
5500 #define SHIFT 1
5501 #include "helper_template.h"
5502 #undef SHIFT
5504 #define SHIFT 2
5505 #include "helper_template.h"
5506 #undef SHIFT
5508 #ifdef TARGET_X86_64
5510 #define SHIFT 3
5511 #include "helper_template.h"
5512 #undef SHIFT
5514 #endif
5516 /* bit operations */
5517 target_ulong helper_bsf(target_ulong t0)
5519 int count;
5520 target_ulong res;
5522 res = t0;
5523 count = 0;
5524 while ((res & 1) == 0) {
5525 count++;
5526 res >>= 1;
5528 return count;
5531 target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5533 int count;
5534 target_ulong res, mask;
5536 if (wordsize > 0 && t0 == 0) {
5537 return wordsize;
5539 res = t0;
5540 count = TARGET_LONG_BITS - 1;
5541 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5542 while ((res & mask) == 0) {
5543 count--;
5544 res <<= 1;
5546 if (wordsize > 0) {
5547 return wordsize - 1 - count;
5549 return count;
5552 target_ulong helper_bsr(target_ulong t0)
5554 return helper_lzcnt(t0, 0);
5557 static int compute_all_eflags(void)
5559 return CC_SRC;
5562 static int compute_c_eflags(void)
5564 return CC_SRC & CC_C;
5567 uint32_t helper_cc_compute_all(int op)
5569 switch (op) {
5570 default: /* should never happen */ return 0;
5572 case CC_OP_EFLAGS: return compute_all_eflags();
5574 case CC_OP_MULB: return compute_all_mulb();
5575 case CC_OP_MULW: return compute_all_mulw();
5576 case CC_OP_MULL: return compute_all_mull();
5578 case CC_OP_ADDB: return compute_all_addb();
5579 case CC_OP_ADDW: return compute_all_addw();
5580 case CC_OP_ADDL: return compute_all_addl();
5582 case CC_OP_ADCB: return compute_all_adcb();
5583 case CC_OP_ADCW: return compute_all_adcw();
5584 case CC_OP_ADCL: return compute_all_adcl();
5586 case CC_OP_SUBB: return compute_all_subb();
5587 case CC_OP_SUBW: return compute_all_subw();
5588 case CC_OP_SUBL: return compute_all_subl();
5590 case CC_OP_SBBB: return compute_all_sbbb();
5591 case CC_OP_SBBW: return compute_all_sbbw();
5592 case CC_OP_SBBL: return compute_all_sbbl();
5594 case CC_OP_LOGICB: return compute_all_logicb();
5595 case CC_OP_LOGICW: return compute_all_logicw();
5596 case CC_OP_LOGICL: return compute_all_logicl();
5598 case CC_OP_INCB: return compute_all_incb();
5599 case CC_OP_INCW: return compute_all_incw();
5600 case CC_OP_INCL: return compute_all_incl();
5602 case CC_OP_DECB: return compute_all_decb();
5603 case CC_OP_DECW: return compute_all_decw();
5604 case CC_OP_DECL: return compute_all_decl();
5606 case CC_OP_SHLB: return compute_all_shlb();
5607 case CC_OP_SHLW: return compute_all_shlw();
5608 case CC_OP_SHLL: return compute_all_shll();
5610 case CC_OP_SARB: return compute_all_sarb();
5611 case CC_OP_SARW: return compute_all_sarw();
5612 case CC_OP_SARL: return compute_all_sarl();
5614 #ifdef TARGET_X86_64
5615 case CC_OP_MULQ: return compute_all_mulq();
5617 case CC_OP_ADDQ: return compute_all_addq();
5619 case CC_OP_ADCQ: return compute_all_adcq();
5621 case CC_OP_SUBQ: return compute_all_subq();
5623 case CC_OP_SBBQ: return compute_all_sbbq();
5625 case CC_OP_LOGICQ: return compute_all_logicq();
5627 case CC_OP_INCQ: return compute_all_incq();
5629 case CC_OP_DECQ: return compute_all_decq();
5631 case CC_OP_SHLQ: return compute_all_shlq();
5633 case CC_OP_SARQ: return compute_all_sarq();
5634 #endif
5638 uint32_t cpu_cc_compute_all(CPUState *env1, int op)
5640 CPUState *saved_env;
5641 uint32_t ret;
5643 saved_env = env;
5644 env = env1;
5645 ret = helper_cc_compute_all(op);
5646 env = saved_env;
5647 return ret;
5650 uint32_t helper_cc_compute_c(int op)
5652 switch (op) {
5653 default: /* should never happen */ return 0;
5655 case CC_OP_EFLAGS: return compute_c_eflags();
5657 case CC_OP_MULB: return compute_c_mull();
5658 case CC_OP_MULW: return compute_c_mull();
5659 case CC_OP_MULL: return compute_c_mull();
5661 case CC_OP_ADDB: return compute_c_addb();
5662 case CC_OP_ADDW: return compute_c_addw();
5663 case CC_OP_ADDL: return compute_c_addl();
5665 case CC_OP_ADCB: return compute_c_adcb();
5666 case CC_OP_ADCW: return compute_c_adcw();
5667 case CC_OP_ADCL: return compute_c_adcl();
5669 case CC_OP_SUBB: return compute_c_subb();
5670 case CC_OP_SUBW: return compute_c_subw();
5671 case CC_OP_SUBL: return compute_c_subl();
5673 case CC_OP_SBBB: return compute_c_sbbb();
5674 case CC_OP_SBBW: return compute_c_sbbw();
5675 case CC_OP_SBBL: return compute_c_sbbl();
5677 case CC_OP_LOGICB: return compute_c_logicb();
5678 case CC_OP_LOGICW: return compute_c_logicw();
5679 case CC_OP_LOGICL: return compute_c_logicl();
5681 case CC_OP_INCB: return compute_c_incl();
5682 case CC_OP_INCW: return compute_c_incl();
5683 case CC_OP_INCL: return compute_c_incl();
5685 case CC_OP_DECB: return compute_c_incl();
5686 case CC_OP_DECW: return compute_c_incl();
5687 case CC_OP_DECL: return compute_c_incl();
5689 case CC_OP_SHLB: return compute_c_shlb();
5690 case CC_OP_SHLW: return compute_c_shlw();
5691 case CC_OP_SHLL: return compute_c_shll();
5693 case CC_OP_SARB: return compute_c_sarl();
5694 case CC_OP_SARW: return compute_c_sarl();
5695 case CC_OP_SARL: return compute_c_sarl();
5697 #ifdef TARGET_X86_64
5698 case CC_OP_MULQ: return compute_c_mull();
5700 case CC_OP_ADDQ: return compute_c_addq();
5702 case CC_OP_ADCQ: return compute_c_adcq();
5704 case CC_OP_SUBQ: return compute_c_subq();
5706 case CC_OP_SBBQ: return compute_c_sbbq();
5708 case CC_OP_LOGICQ: return compute_c_logicq();
5710 case CC_OP_INCQ: return compute_c_incl();
5712 case CC_OP_DECQ: return compute_c_incl();
5714 case CC_OP_SHLQ: return compute_c_shlq();
5716 case CC_OP_SARQ: return compute_c_sarl();
5717 #endif