Introduce kvm_check_extension to check if KVM extensions are supported
[qemu/mini2440.git] / target-i386 / op_helper.c
blobf4cea083eb1ef40896c5ae15d0f9b22ee4f223c2
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #define CPU_NO_GLOBAL_REGS
21 #include "exec.h"
22 #include "exec-all.h"
23 #include "host-utils.h"
25 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(env) do { } while (0)
35 #endif
38 #if 0
39 #define raise_exception_err(a, b)\
40 do {\
41 qemu_log("raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
43 } while (0)
44 #endif
46 static const uint8_t parity_table[256] = {
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81 /* modulo 17 table */
82 static const uint8_t rclw_table[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
89 /* modulo 9 table */
90 static const uint8_t rclb_table[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
97 static const CPU86_LDouble f15rk[7] =
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
108 /* broken thread support */
110 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
112 void helper_lock(void)
114 spin_lock(&global_cpu_lock);
117 void helper_unlock(void)
119 spin_unlock(&global_cpu_lock);
122 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
124 load_eflags(t0, update_mask);
127 target_ulong helper_read_eflags(void)
129 uint32_t eflags;
130 eflags = helper_cc_compute_all(CC_OP);
131 eflags |= (DF & DF_MASK);
132 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133 return eflags;
136 /* return non zero if error */
137 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138 int selector)
140 SegmentCache *dt;
141 int index;
142 target_ulong ptr;
144 if (selector & 0x4)
145 dt = &env->ldt;
146 else
147 dt = &env->gdt;
148 index = selector & ~7;
149 if ((index + 7) > dt->limit)
150 return -1;
151 ptr = dt->base + index;
152 *e1_ptr = ldl_kernel(ptr);
153 *e2_ptr = ldl_kernel(ptr + 4);
154 return 0;
157 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
159 unsigned int limit;
160 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161 if (e2 & DESC_G_MASK)
162 limit = (limit << 12) | 0xfff;
163 return limit;
166 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
168 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
171 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
173 sc->base = get_seg_base(e1, e2);
174 sc->limit = get_seg_limit(e1, e2);
175 sc->flags = e2;
178 /* init the segment cache in vm86 mode. */
179 static inline void load_seg_vm(int seg, int selector)
181 selector &= 0xffff;
182 cpu_x86_load_seg_cache(env, seg, selector,
183 (selector << 4), 0xffff, 0);
186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187 uint32_t *esp_ptr, int dpl)
189 int type, index, shift;
191 #if 0
193 int i;
194 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195 for(i=0;i<env->tr.limit;i++) {
196 printf("%02x ", env->tr.base[i]);
197 if ((i & 7) == 7) printf("\n");
199 printf("\n");
201 #endif
203 if (!(env->tr.flags & DESC_P_MASK))
204 cpu_abort(env, "invalid tss");
205 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206 if ((type & 7) != 1)
207 cpu_abort(env, "invalid tss type");
208 shift = type >> 3;
209 index = (dpl * 4 + 2) << shift;
210 if (index + (4 << shift) - 1 > env->tr.limit)
211 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212 if (shift == 0) {
213 *esp_ptr = lduw_kernel(env->tr.base + index);
214 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215 } else {
216 *esp_ptr = ldl_kernel(env->tr.base + index);
217 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
221 /* XXX: merge with load_seg() */
222 static void tss_load_seg(int seg_reg, int selector)
224 uint32_t e1, e2;
225 int rpl, dpl, cpl;
227 if ((selector & 0xfffc) != 0) {
228 if (load_segment(&e1, &e2, selector) != 0)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if (!(e2 & DESC_S_MASK))
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 rpl = selector & 3;
233 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234 cpl = env->hflags & HF_CPL_MASK;
235 if (seg_reg == R_CS) {
236 if (!(e2 & DESC_CS_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 /* XXX: is it correct ? */
239 if (dpl != rpl)
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if ((e2 & DESC_C_MASK) && dpl > rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 } else if (seg_reg == R_SS) {
244 /* SS must be writable data */
245 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if (dpl != cpl || dpl != rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 } else {
250 /* not readable code */
251 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255 if (dpl < cpl || dpl < rpl)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 if (!(e2 & DESC_P_MASK))
260 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261 cpu_x86_load_seg_cache(env, seg_reg, selector,
262 get_seg_base(e1, e2),
263 get_seg_limit(e1, e2),
264 e2);
265 } else {
266 if (seg_reg == R_SS || seg_reg == R_CS)
267 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
271 #define SWITCH_TSS_JMP 0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
275 /* XXX: restore CPU state in registers (PowerPC case) */
276 static void switch_tss(int tss_selector,
277 uint32_t e1, uint32_t e2, int source,
278 uint32_t next_eip)
280 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281 target_ulong tss_base;
282 uint32_t new_regs[8], new_segs[6];
283 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284 uint32_t old_eflags, eflags_mask;
285 SegmentCache *dt;
286 int index;
287 target_ulong ptr;
289 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
292 /* if task gate, we read the TSS segment and we load it */
293 if (type == 5) {
294 if (!(e2 & DESC_P_MASK))
295 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296 tss_selector = e1 >> 16;
297 if (tss_selector & 4)
298 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299 if (load_segment(&e1, &e2, tss_selector) != 0)
300 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301 if (e2 & DESC_S_MASK)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304 if ((type & 7) != 1)
305 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
308 if (!(e2 & DESC_P_MASK))
309 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
311 if (type & 8)
312 tss_limit_max = 103;
313 else
314 tss_limit_max = 43;
315 tss_limit = get_seg_limit(e1, e2);
316 tss_base = get_seg_base(e1, e2);
317 if ((tss_selector & 4) != 0 ||
318 tss_limit < tss_limit_max)
319 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321 if (old_type & 8)
322 old_tss_limit_max = 103;
323 else
324 old_tss_limit_max = 43;
326 /* read all the registers from the new TSS */
327 if (type & 8) {
328 /* 32 bit */
329 new_cr3 = ldl_kernel(tss_base + 0x1c);
330 new_eip = ldl_kernel(tss_base + 0x20);
331 new_eflags = ldl_kernel(tss_base + 0x24);
332 for(i = 0; i < 8; i++)
333 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334 for(i = 0; i < 6; i++)
335 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336 new_ldt = lduw_kernel(tss_base + 0x60);
337 new_trap = ldl_kernel(tss_base + 0x64);
338 } else {
339 /* 16 bit */
340 new_cr3 = 0;
341 new_eip = lduw_kernel(tss_base + 0x0e);
342 new_eflags = lduw_kernel(tss_base + 0x10);
343 for(i = 0; i < 8; i++)
344 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345 for(i = 0; i < 4; i++)
346 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347 new_ldt = lduw_kernel(tss_base + 0x2a);
348 new_segs[R_FS] = 0;
349 new_segs[R_GS] = 0;
350 new_trap = 0;
353 /* NOTE: we must avoid memory exceptions during the task switch,
354 so we make dummy accesses before */
355 /* XXX: it can still fail in some cases, so a bigger hack is
356 necessary to valid the TLB after having done the accesses */
358 v1 = ldub_kernel(env->tr.base);
359 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
360 stb_kernel(env->tr.base, v1);
361 stb_kernel(env->tr.base + old_tss_limit_max, v2);
363 /* clear busy bit (it is restartable) */
364 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
365 target_ulong ptr;
366 uint32_t e2;
367 ptr = env->gdt.base + (env->tr.selector & ~7);
368 e2 = ldl_kernel(ptr + 4);
369 e2 &= ~DESC_TSS_BUSY_MASK;
370 stl_kernel(ptr + 4, e2);
372 old_eflags = compute_eflags();
373 if (source == SWITCH_TSS_IRET)
374 old_eflags &= ~NT_MASK;
376 /* save the current state in the old TSS */
377 if (type & 8) {
378 /* 32 bit */
379 stl_kernel(env->tr.base + 0x20, next_eip);
380 stl_kernel(env->tr.base + 0x24, old_eflags);
381 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
382 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
383 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
384 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
385 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
386 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
387 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
388 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
389 for(i = 0; i < 6; i++)
390 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391 } else {
392 /* 16 bit */
393 stw_kernel(env->tr.base + 0x0e, next_eip);
394 stw_kernel(env->tr.base + 0x10, old_eflags);
395 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
396 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
397 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
398 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
399 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
400 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
401 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
402 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
403 for(i = 0; i < 4; i++)
404 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
407 /* now if an exception occurs, it will occurs in the next task
408 context */
410 if (source == SWITCH_TSS_CALL) {
411 stw_kernel(tss_base, env->tr.selector);
412 new_eflags |= NT_MASK;
415 /* set busy bit */
416 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
417 target_ulong ptr;
418 uint32_t e2;
419 ptr = env->gdt.base + (tss_selector & ~7);
420 e2 = ldl_kernel(ptr + 4);
421 e2 |= DESC_TSS_BUSY_MASK;
422 stl_kernel(ptr + 4, e2);
425 /* set the new CPU state */
426 /* from this point, any exception which occurs can give problems */
427 env->cr[0] |= CR0_TS_MASK;
428 env->hflags |= HF_TS_MASK;
429 env->tr.selector = tss_selector;
430 env->tr.base = tss_base;
431 env->tr.limit = tss_limit;
432 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
434 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
435 cpu_x86_update_cr3(env, new_cr3);
438 /* load all registers without an exception, then reload them with
439 possible exception */
440 env->eip = new_eip;
441 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
442 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
443 if (!(type & 8))
444 eflags_mask &= 0xffff;
445 load_eflags(new_eflags, eflags_mask);
446 /* XXX: what to do in 16 bit case ? */
447 EAX = new_regs[0];
448 ECX = new_regs[1];
449 EDX = new_regs[2];
450 EBX = new_regs[3];
451 ESP = new_regs[4];
452 EBP = new_regs[5];
453 ESI = new_regs[6];
454 EDI = new_regs[7];
455 if (new_eflags & VM_MASK) {
456 for(i = 0; i < 6; i++)
457 load_seg_vm(i, new_segs[i]);
458 /* in vm86, CPL is always 3 */
459 cpu_x86_set_cpl(env, 3);
460 } else {
461 /* CPL is set the RPL of CS */
462 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
463 /* first just selectors as the rest may trigger exceptions */
464 for(i = 0; i < 6; i++)
465 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
468 env->ldt.selector = new_ldt & ~4;
469 env->ldt.base = 0;
470 env->ldt.limit = 0;
471 env->ldt.flags = 0;
473 /* load the LDT */
474 if (new_ldt & 4)
475 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
477 if ((new_ldt & 0xfffc) != 0) {
478 dt = &env->gdt;
479 index = new_ldt & ~7;
480 if ((index + 7) > dt->limit)
481 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482 ptr = dt->base + index;
483 e1 = ldl_kernel(ptr);
484 e2 = ldl_kernel(ptr + 4);
485 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 if (!(e2 & DESC_P_MASK))
488 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489 load_seg_cache_raw_dt(&env->ldt, e1, e2);
492 /* load the segments */
493 if (!(new_eflags & VM_MASK)) {
494 tss_load_seg(R_CS, new_segs[R_CS]);
495 tss_load_seg(R_SS, new_segs[R_SS]);
496 tss_load_seg(R_ES, new_segs[R_ES]);
497 tss_load_seg(R_DS, new_segs[R_DS]);
498 tss_load_seg(R_FS, new_segs[R_FS]);
499 tss_load_seg(R_GS, new_segs[R_GS]);
502 /* check that EIP is in the CS segment limits */
503 if (new_eip > env->segs[R_CS].limit) {
504 /* XXX: different exception if CALL ? */
505 raise_exception_err(EXCP0D_GPF, 0);
508 #ifndef CONFIG_USER_ONLY
509 /* reset local breakpoints */
510 if (env->dr[7] & 0x55) {
511 for (i = 0; i < 4; i++) {
512 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
513 hw_breakpoint_remove(env, i);
515 env->dr[7] &= ~0x55;
517 #endif
520 /* check if Port I/O is allowed in TSS */
521 static inline void check_io(int addr, int size)
523 int io_offset, val, mask;
525 /* TSS must be a valid 32 bit one */
526 if (!(env->tr.flags & DESC_P_MASK) ||
527 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
528 env->tr.limit < 103)
529 goto fail;
530 io_offset = lduw_kernel(env->tr.base + 0x66);
531 io_offset += (addr >> 3);
532 /* Note: the check needs two bytes */
533 if ((io_offset + 1) > env->tr.limit)
534 goto fail;
535 val = lduw_kernel(env->tr.base + io_offset);
536 val >>= (addr & 7);
537 mask = (1 << size) - 1;
538 /* all bits must be zero to allow the I/O */
539 if ((val & mask) != 0) {
540 fail:
541 raise_exception_err(EXCP0D_GPF, 0);
545 void helper_check_iob(uint32_t t0)
547 check_io(t0, 1);
550 void helper_check_iow(uint32_t t0)
552 check_io(t0, 2);
555 void helper_check_iol(uint32_t t0)
557 check_io(t0, 4);
560 void helper_outb(uint32_t port, uint32_t data)
562 cpu_outb(env, port, data & 0xff);
565 target_ulong helper_inb(uint32_t port)
567 return cpu_inb(env, port);
570 void helper_outw(uint32_t port, uint32_t data)
572 cpu_outw(env, port, data & 0xffff);
575 target_ulong helper_inw(uint32_t port)
577 return cpu_inw(env, port);
580 void helper_outl(uint32_t port, uint32_t data)
582 cpu_outl(env, port, data);
585 target_ulong helper_inl(uint32_t port)
587 return cpu_inl(env, port);
590 static inline unsigned int get_sp_mask(unsigned int e2)
592 if (e2 & DESC_B_MASK)
593 return 0xffffffff;
594 else
595 return 0xffff;
598 static int exeption_has_error_code(int intno)
600 switch(intno) {
601 case 8:
602 case 10:
603 case 11:
604 case 12:
605 case 13:
606 case 14:
607 case 17:
608 return 1;
610 return 0;
613 #ifdef TARGET_X86_64
614 #define SET_ESP(val, sp_mask)\
615 do {\
616 if ((sp_mask) == 0xffff)\
617 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
618 else if ((sp_mask) == 0xffffffffLL)\
619 ESP = (uint32_t)(val);\
620 else\
621 ESP = (val);\
622 } while (0)
623 #else
624 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
625 #endif
627 /* in 64-bit machines, this can overflow. So this segment addition macro
628 * can be used to trim the value to 32-bit whenever needed */
629 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
631 /* XXX: add a is_user flag to have proper security support */
632 #define PUSHW(ssp, sp, sp_mask, val)\
634 sp -= 2;\
635 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
638 #define PUSHL(ssp, sp, sp_mask, val)\
640 sp -= 4;\
641 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
644 #define POPW(ssp, sp, sp_mask, val)\
646 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
647 sp += 2;\
650 #define POPL(ssp, sp, sp_mask, val)\
652 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
653 sp += 4;\
656 /* protected mode interrupt */
657 static void do_interrupt_protected(int intno, int is_int, int error_code,
658 unsigned int next_eip, int is_hw)
660 SegmentCache *dt;
661 target_ulong ptr, ssp;
662 int type, dpl, selector, ss_dpl, cpl;
663 int has_error_code, new_stack, shift;
664 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
665 uint32_t old_eip, sp_mask;
667 has_error_code = 0;
668 if (!is_int && !is_hw)
669 has_error_code = exeption_has_error_code(intno);
670 if (is_int)
671 old_eip = next_eip;
672 else
673 old_eip = env->eip;
675 dt = &env->idt;
676 if (intno * 8 + 7 > dt->limit)
677 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
678 ptr = dt->base + intno * 8;
679 e1 = ldl_kernel(ptr);
680 e2 = ldl_kernel(ptr + 4);
681 /* check gate type */
682 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
683 switch(type) {
684 case 5: /* task gate */
685 /* must do that check here to return the correct error code */
686 if (!(e2 & DESC_P_MASK))
687 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
688 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
689 if (has_error_code) {
690 int type;
691 uint32_t mask;
692 /* push the error code */
693 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
694 shift = type >> 3;
695 if (env->segs[R_SS].flags & DESC_B_MASK)
696 mask = 0xffffffff;
697 else
698 mask = 0xffff;
699 esp = (ESP - (2 << shift)) & mask;
700 ssp = env->segs[R_SS].base + esp;
701 if (shift)
702 stl_kernel(ssp, error_code);
703 else
704 stw_kernel(ssp, error_code);
705 SET_ESP(esp, mask);
707 return;
708 case 6: /* 286 interrupt gate */
709 case 7: /* 286 trap gate */
710 case 14: /* 386 interrupt gate */
711 case 15: /* 386 trap gate */
712 break;
713 default:
714 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
715 break;
717 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
718 cpl = env->hflags & HF_CPL_MASK;
719 /* check privilege if software int */
720 if (is_int && dpl < cpl)
721 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
722 /* check valid bit */
723 if (!(e2 & DESC_P_MASK))
724 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
725 selector = e1 >> 16;
726 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
727 if ((selector & 0xfffc) == 0)
728 raise_exception_err(EXCP0D_GPF, 0);
730 if (load_segment(&e1, &e2, selector) != 0)
731 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
732 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
733 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
735 if (dpl > cpl)
736 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737 if (!(e2 & DESC_P_MASK))
738 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
739 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
740 /* to inner privilege */
741 get_ss_esp_from_tss(&ss, &esp, dpl);
742 if ((ss & 0xfffc) == 0)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 if ((ss & 3) != dpl)
745 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
747 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
749 if (ss_dpl != dpl)
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (!(ss_e2 & DESC_S_MASK) ||
752 (ss_e2 & DESC_CS_MASK) ||
753 !(ss_e2 & DESC_W_MASK))
754 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755 if (!(ss_e2 & DESC_P_MASK))
756 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
757 new_stack = 1;
758 sp_mask = get_sp_mask(ss_e2);
759 ssp = get_seg_base(ss_e1, ss_e2);
760 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
761 /* to same privilege */
762 if (env->eflags & VM_MASK)
763 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
764 new_stack = 0;
765 sp_mask = get_sp_mask(env->segs[R_SS].flags);
766 ssp = env->segs[R_SS].base;
767 esp = ESP;
768 dpl = cpl;
769 } else {
770 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
771 new_stack = 0; /* avoid warning */
772 sp_mask = 0; /* avoid warning */
773 ssp = 0; /* avoid warning */
774 esp = 0; /* avoid warning */
777 shift = type >> 3;
779 #if 0
780 /* XXX: check that enough room is available */
781 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
782 if (env->eflags & VM_MASK)
783 push_size += 8;
784 push_size <<= shift;
785 #endif
786 if (shift == 1) {
787 if (new_stack) {
788 if (env->eflags & VM_MASK) {
789 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
790 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
791 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
792 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
794 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
795 PUSHL(ssp, esp, sp_mask, ESP);
797 PUSHL(ssp, esp, sp_mask, compute_eflags());
798 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
799 PUSHL(ssp, esp, sp_mask, old_eip);
800 if (has_error_code) {
801 PUSHL(ssp, esp, sp_mask, error_code);
803 } else {
804 if (new_stack) {
805 if (env->eflags & VM_MASK) {
806 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
807 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
808 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
809 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
811 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
812 PUSHW(ssp, esp, sp_mask, ESP);
814 PUSHW(ssp, esp, sp_mask, compute_eflags());
815 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
816 PUSHW(ssp, esp, sp_mask, old_eip);
817 if (has_error_code) {
818 PUSHW(ssp, esp, sp_mask, error_code);
822 if (new_stack) {
823 if (env->eflags & VM_MASK) {
824 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
827 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
829 ss = (ss & ~3) | dpl;
830 cpu_x86_load_seg_cache(env, R_SS, ss,
831 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
833 SET_ESP(esp, sp_mask);
835 selector = (selector & ~3) | dpl;
836 cpu_x86_load_seg_cache(env, R_CS, selector,
837 get_seg_base(e1, e2),
838 get_seg_limit(e1, e2),
839 e2);
840 cpu_x86_set_cpl(env, dpl);
841 env->eip = offset;
843 /* interrupt gate clear IF mask */
844 if ((type & 1) == 0) {
845 env->eflags &= ~IF_MASK;
847 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
850 #ifdef TARGET_X86_64
852 #define PUSHQ(sp, val)\
854 sp -= 8;\
855 stq_kernel(sp, (val));\
858 #define POPQ(sp, val)\
860 val = ldq_kernel(sp);\
861 sp += 8;\
864 static inline target_ulong get_rsp_from_tss(int level)
866 int index;
868 #if 0
869 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
870 env->tr.base, env->tr.limit);
871 #endif
873 if (!(env->tr.flags & DESC_P_MASK))
874 cpu_abort(env, "invalid tss");
875 index = 8 * level + 4;
876 if ((index + 7) > env->tr.limit)
877 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
878 return ldq_kernel(env->tr.base + index);
881 /* 64 bit interrupt */
882 static void do_interrupt64(int intno, int is_int, int error_code,
883 target_ulong next_eip, int is_hw)
885 SegmentCache *dt;
886 target_ulong ptr;
887 int type, dpl, selector, cpl, ist;
888 int has_error_code, new_stack;
889 uint32_t e1, e2, e3, ss;
890 target_ulong old_eip, esp, offset;
892 has_error_code = 0;
893 if (!is_int && !is_hw)
894 has_error_code = exeption_has_error_code(intno);
895 if (is_int)
896 old_eip = next_eip;
897 else
898 old_eip = env->eip;
900 dt = &env->idt;
901 if (intno * 16 + 15 > dt->limit)
902 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903 ptr = dt->base + intno * 16;
904 e1 = ldl_kernel(ptr);
905 e2 = ldl_kernel(ptr + 4);
906 e3 = ldl_kernel(ptr + 8);
907 /* check gate type */
908 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
909 switch(type) {
910 case 14: /* 386 interrupt gate */
911 case 15: /* 386 trap gate */
912 break;
913 default:
914 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
915 break;
917 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
918 cpl = env->hflags & HF_CPL_MASK;
919 /* check privilege if software int */
920 if (is_int && dpl < cpl)
921 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
922 /* check valid bit */
923 if (!(e2 & DESC_P_MASK))
924 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
925 selector = e1 >> 16;
926 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
927 ist = e2 & 7;
928 if ((selector & 0xfffc) == 0)
929 raise_exception_err(EXCP0D_GPF, 0);
931 if (load_segment(&e1, &e2, selector) != 0)
932 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
934 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
936 if (dpl > cpl)
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 if (!(e2 & DESC_P_MASK))
939 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
940 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
941 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
943 /* to inner privilege */
944 if (ist != 0)
945 esp = get_rsp_from_tss(ist + 3);
946 else
947 esp = get_rsp_from_tss(dpl);
948 esp &= ~0xfLL; /* align stack */
949 ss = 0;
950 new_stack = 1;
951 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
952 /* to same privilege */
953 if (env->eflags & VM_MASK)
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0;
956 if (ist != 0)
957 esp = get_rsp_from_tss(ist + 3);
958 else
959 esp = ESP;
960 esp &= ~0xfLL; /* align stack */
961 dpl = cpl;
962 } else {
963 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
964 new_stack = 0; /* avoid warning */
965 esp = 0; /* avoid warning */
968 PUSHQ(esp, env->segs[R_SS].selector);
969 PUSHQ(esp, ESP);
970 PUSHQ(esp, compute_eflags());
971 PUSHQ(esp, env->segs[R_CS].selector);
972 PUSHQ(esp, old_eip);
973 if (has_error_code) {
974 PUSHQ(esp, error_code);
977 if (new_stack) {
978 ss = 0 | dpl;
979 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
981 ESP = esp;
983 selector = (selector & ~3) | dpl;
984 cpu_x86_load_seg_cache(env, R_CS, selector,
985 get_seg_base(e1, e2),
986 get_seg_limit(e1, e2),
987 e2);
988 cpu_x86_set_cpl(env, dpl);
989 env->eip = offset;
991 /* interrupt gate clear IF mask */
992 if ((type & 1) == 0) {
993 env->eflags &= ~IF_MASK;
995 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
997 #endif
999 #ifdef TARGET_X86_64
1000 #if defined(CONFIG_USER_ONLY)
1001 void helper_syscall(int next_eip_addend)
1003 env->exception_index = EXCP_SYSCALL;
1004 env->exception_next_eip = env->eip + next_eip_addend;
1005 cpu_loop_exit();
1007 #else
1008 void helper_syscall(int next_eip_addend)
1010 int selector;
1012 if (!(env->efer & MSR_EFER_SCE)) {
1013 raise_exception_err(EXCP06_ILLOP, 0);
1015 selector = (env->star >> 32) & 0xffff;
1016 if (env->hflags & HF_LMA_MASK) {
1017 int code64;
1019 ECX = env->eip + next_eip_addend;
1020 env->regs[11] = compute_eflags();
1022 code64 = env->hflags & HF_CS64_MASK;
1024 cpu_x86_set_cpl(env, 0);
1025 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1026 0, 0xffffffff,
1027 DESC_G_MASK | DESC_P_MASK |
1028 DESC_S_MASK |
1029 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1030 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1031 0, 0xffffffff,
1032 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1033 DESC_S_MASK |
1034 DESC_W_MASK | DESC_A_MASK);
1035 env->eflags &= ~env->fmask;
1036 load_eflags(env->eflags, 0);
1037 if (code64)
1038 env->eip = env->lstar;
1039 else
1040 env->eip = env->cstar;
1041 } else {
1042 ECX = (uint32_t)(env->eip + next_eip_addend);
1044 cpu_x86_set_cpl(env, 0);
1045 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1046 0, 0xffffffff,
1047 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1048 DESC_S_MASK |
1049 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1050 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1051 0, 0xffffffff,
1052 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053 DESC_S_MASK |
1054 DESC_W_MASK | DESC_A_MASK);
1055 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1056 env->eip = (uint32_t)env->star;
1059 #endif
1060 #endif
1062 #ifdef TARGET_X86_64
1063 void helper_sysret(int dflag)
1065 int cpl, selector;
1067 if (!(env->efer & MSR_EFER_SCE)) {
1068 raise_exception_err(EXCP06_ILLOP, 0);
1070 cpl = env->hflags & HF_CPL_MASK;
1071 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1072 raise_exception_err(EXCP0D_GPF, 0);
1074 selector = (env->star >> 48) & 0xffff;
1075 if (env->hflags & HF_LMA_MASK) {
1076 if (dflag == 2) {
1077 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1082 DESC_L_MASK);
1083 env->eip = ECX;
1084 } else {
1085 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086 0, 0xffffffff,
1087 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090 env->eip = (uint32_t)ECX;
1092 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1093 0, 0xffffffff,
1094 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_W_MASK | DESC_A_MASK);
1097 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1098 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1099 cpu_x86_set_cpl(env, 3);
1100 } else {
1101 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1102 0, 0xffffffff,
1103 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1106 env->eip = (uint32_t)ECX;
1107 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1108 0, 0xffffffff,
1109 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1110 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1111 DESC_W_MASK | DESC_A_MASK);
1112 env->eflags |= IF_MASK;
1113 cpu_x86_set_cpl(env, 3);
1115 #ifdef CONFIG_KQEMU
1116 if (kqemu_is_ok(env)) {
1117 if (env->hflags & HF_LMA_MASK)
1118 CC_OP = CC_OP_EFLAGS;
1119 env->exception_index = -1;
1120 cpu_loop_exit();
1122 #endif
1124 #endif
1126 /* real mode interrupt */
1127 static void do_interrupt_real(int intno, int is_int, int error_code,
1128 unsigned int next_eip)
1130 SegmentCache *dt;
1131 target_ulong ptr, ssp;
1132 int selector;
1133 uint32_t offset, esp;
1134 uint32_t old_cs, old_eip;
1136 /* real mode (simpler !) */
1137 dt = &env->idt;
1138 if (intno * 4 + 3 > dt->limit)
1139 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1140 ptr = dt->base + intno * 4;
1141 offset = lduw_kernel(ptr);
1142 selector = lduw_kernel(ptr + 2);
1143 esp = ESP;
1144 ssp = env->segs[R_SS].base;
1145 if (is_int)
1146 old_eip = next_eip;
1147 else
1148 old_eip = env->eip;
1149 old_cs = env->segs[R_CS].selector;
1150 /* XXX: use SS segment size ? */
1151 PUSHW(ssp, esp, 0xffff, compute_eflags());
1152 PUSHW(ssp, esp, 0xffff, old_cs);
1153 PUSHW(ssp, esp, 0xffff, old_eip);
1155 /* update processor state */
1156 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1157 env->eip = offset;
1158 env->segs[R_CS].selector = selector;
1159 env->segs[R_CS].base = (selector << 4);
1160 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1163 /* fake user mode interrupt */
1164 void do_interrupt_user(int intno, int is_int, int error_code,
1165 target_ulong next_eip)
1167 SegmentCache *dt;
1168 target_ulong ptr;
1169 int dpl, cpl, shift;
1170 uint32_t e2;
1172 dt = &env->idt;
1173 if (env->hflags & HF_LMA_MASK) {
1174 shift = 4;
1175 } else {
1176 shift = 3;
1178 ptr = dt->base + (intno << shift);
1179 e2 = ldl_kernel(ptr + 4);
1181 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1182 cpl = env->hflags & HF_CPL_MASK;
1183 /* check privilege if software int */
1184 if (is_int && dpl < cpl)
1185 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1187 /* Since we emulate only user space, we cannot do more than
1188 exiting the emulation with the suitable exception and error
1189 code */
1190 if (is_int)
1191 EIP = next_eip;
1194 #if !defined(CONFIG_USER_ONLY)
1195 static void handle_even_inj(int intno, int is_int, int error_code,
1196 int is_hw, int rm)
1198 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1199 if (!(event_inj & SVM_EVTINJ_VALID)) {
1200 int type;
1201 if (is_int)
1202 type = SVM_EVTINJ_TYPE_SOFT;
1203 else
1204 type = SVM_EVTINJ_TYPE_EXEPT;
1205 event_inj = intno | type | SVM_EVTINJ_VALID;
1206 if (!rm && exeption_has_error_code(intno)) {
1207 event_inj |= SVM_EVTINJ_VALID_ERR;
1208 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1210 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1213 #endif
1216 * Begin execution of an interruption. is_int is TRUE if coming from
1217 * the int instruction. next_eip is the EIP value AFTER the interrupt
1218 * instruction. It is only relevant if is_int is TRUE.
1220 void do_interrupt(int intno, int is_int, int error_code,
1221 target_ulong next_eip, int is_hw)
1223 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1224 if ((env->cr[0] & CR0_PE_MASK)) {
1225 static int count;
1226 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1227 count, intno, error_code, is_int,
1228 env->hflags & HF_CPL_MASK,
1229 env->segs[R_CS].selector, EIP,
1230 (int)env->segs[R_CS].base + EIP,
1231 env->segs[R_SS].selector, ESP);
1232 if (intno == 0x0e) {
1233 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1234 } else {
1235 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1237 qemu_log("\n");
1238 log_cpu_state(env, X86_DUMP_CCOP);
1239 #if 0
1241 int i;
1242 uint8_t *ptr;
1243 qemu_log(" code=");
1244 ptr = env->segs[R_CS].base + env->eip;
1245 for(i = 0; i < 16; i++) {
1246 qemu_log(" %02x", ldub(ptr + i));
1248 qemu_log("\n");
1250 #endif
1251 count++;
1254 if (env->cr[0] & CR0_PE_MASK) {
1255 #if !defined(CONFIG_USER_ONLY)
1256 if (env->hflags & HF_SVMI_MASK)
1257 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1258 #endif
1259 #ifdef TARGET_X86_64
1260 if (env->hflags & HF_LMA_MASK) {
1261 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1262 } else
1263 #endif
1265 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1267 } else {
1268 #if !defined(CONFIG_USER_ONLY)
1269 if (env->hflags & HF_SVMI_MASK)
1270 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1271 #endif
1272 do_interrupt_real(intno, is_int, error_code, next_eip);
1275 #if !defined(CONFIG_USER_ONLY)
1276 if (env->hflags & HF_SVMI_MASK) {
1277 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1278 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1280 #endif
1283 /* This should come from sysemu.h - if we could include it here... */
1284 void qemu_system_reset_request(void);
1287 * Check nested exceptions and change to double or triple fault if
1288 * needed. It should only be called, if this is not an interrupt.
1289 * Returns the new exception number.
1291 static int check_exception(int intno, int *error_code)
1293 int first_contributory = env->old_exception == 0 ||
1294 (env->old_exception >= 10 &&
1295 env->old_exception <= 13);
1296 int second_contributory = intno == 0 ||
1297 (intno >= 10 && intno <= 13);
1299 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1300 env->old_exception, intno);
1302 #if !defined(CONFIG_USER_ONLY)
1303 if (env->old_exception == EXCP08_DBLE) {
1304 if (env->hflags & HF_SVMI_MASK)
1305 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1307 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1309 qemu_system_reset_request();
1310 return EXCP_HLT;
1312 #endif
1314 if ((first_contributory && second_contributory)
1315 || (env->old_exception == EXCP0E_PAGE &&
1316 (second_contributory || (intno == EXCP0E_PAGE)))) {
1317 intno = EXCP08_DBLE;
1318 *error_code = 0;
1321 if (second_contributory || (intno == EXCP0E_PAGE) ||
1322 (intno == EXCP08_DBLE))
1323 env->old_exception = intno;
1325 return intno;
1329 * Signal an interruption. It is executed in the main CPU loop.
1330 * is_int is TRUE if coming from the int instruction. next_eip is the
1331 * EIP value AFTER the interrupt instruction. It is only relevant if
1332 * is_int is TRUE.
1334 static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1335 int next_eip_addend)
1337 if (!is_int) {
1338 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1339 intno = check_exception(intno, &error_code);
1340 } else {
1341 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1344 env->exception_index = intno;
1345 env->error_code = error_code;
1346 env->exception_is_int = is_int;
1347 env->exception_next_eip = env->eip + next_eip_addend;
1348 cpu_loop_exit();
1351 /* shortcuts to generate exceptions */
1353 void raise_exception_err(int exception_index, int error_code)
1355 raise_interrupt(exception_index, 0, error_code, 0);
1358 void raise_exception(int exception_index)
1360 raise_interrupt(exception_index, 0, 0, 0);
1363 /* SMM support */
1365 #if defined(CONFIG_USER_ONLY)
1367 void do_smm_enter(void)
1371 void helper_rsm(void)
1375 #else
1377 #ifdef TARGET_X86_64
1378 #define SMM_REVISION_ID 0x00020064
1379 #else
1380 #define SMM_REVISION_ID 0x00020000
1381 #endif
1383 void do_smm_enter(void)
1385 target_ulong sm_state;
1386 SegmentCache *dt;
1387 int i, offset;
1389 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1390 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1392 env->hflags |= HF_SMM_MASK;
1393 cpu_smm_update(env);
1395 sm_state = env->smbase + 0x8000;
1397 #ifdef TARGET_X86_64
1398 for(i = 0; i < 6; i++) {
1399 dt = &env->segs[i];
1400 offset = 0x7e00 + i * 16;
1401 stw_phys(sm_state + offset, dt->selector);
1402 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1403 stl_phys(sm_state + offset + 4, dt->limit);
1404 stq_phys(sm_state + offset + 8, dt->base);
1407 stq_phys(sm_state + 0x7e68, env->gdt.base);
1408 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1410 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1411 stq_phys(sm_state + 0x7e78, env->ldt.base);
1412 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1413 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1415 stq_phys(sm_state + 0x7e88, env->idt.base);
1416 stl_phys(sm_state + 0x7e84, env->idt.limit);
1418 stw_phys(sm_state + 0x7e90, env->tr.selector);
1419 stq_phys(sm_state + 0x7e98, env->tr.base);
1420 stl_phys(sm_state + 0x7e94, env->tr.limit);
1421 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1423 stq_phys(sm_state + 0x7ed0, env->efer);
1425 stq_phys(sm_state + 0x7ff8, EAX);
1426 stq_phys(sm_state + 0x7ff0, ECX);
1427 stq_phys(sm_state + 0x7fe8, EDX);
1428 stq_phys(sm_state + 0x7fe0, EBX);
1429 stq_phys(sm_state + 0x7fd8, ESP);
1430 stq_phys(sm_state + 0x7fd0, EBP);
1431 stq_phys(sm_state + 0x7fc8, ESI);
1432 stq_phys(sm_state + 0x7fc0, EDI);
1433 for(i = 8; i < 16; i++)
1434 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1435 stq_phys(sm_state + 0x7f78, env->eip);
1436 stl_phys(sm_state + 0x7f70, compute_eflags());
1437 stl_phys(sm_state + 0x7f68, env->dr[6]);
1438 stl_phys(sm_state + 0x7f60, env->dr[7]);
1440 stl_phys(sm_state + 0x7f48, env->cr[4]);
1441 stl_phys(sm_state + 0x7f50, env->cr[3]);
1442 stl_phys(sm_state + 0x7f58, env->cr[0]);
1444 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1445 stl_phys(sm_state + 0x7f00, env->smbase);
1446 #else
1447 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1448 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1449 stl_phys(sm_state + 0x7ff4, compute_eflags());
1450 stl_phys(sm_state + 0x7ff0, env->eip);
1451 stl_phys(sm_state + 0x7fec, EDI);
1452 stl_phys(sm_state + 0x7fe8, ESI);
1453 stl_phys(sm_state + 0x7fe4, EBP);
1454 stl_phys(sm_state + 0x7fe0, ESP);
1455 stl_phys(sm_state + 0x7fdc, EBX);
1456 stl_phys(sm_state + 0x7fd8, EDX);
1457 stl_phys(sm_state + 0x7fd4, ECX);
1458 stl_phys(sm_state + 0x7fd0, EAX);
1459 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1460 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1462 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1463 stl_phys(sm_state + 0x7f64, env->tr.base);
1464 stl_phys(sm_state + 0x7f60, env->tr.limit);
1465 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1467 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1468 stl_phys(sm_state + 0x7f80, env->ldt.base);
1469 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1470 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1472 stl_phys(sm_state + 0x7f74, env->gdt.base);
1473 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1475 stl_phys(sm_state + 0x7f58, env->idt.base);
1476 stl_phys(sm_state + 0x7f54, env->idt.limit);
1478 for(i = 0; i < 6; i++) {
1479 dt = &env->segs[i];
1480 if (i < 3)
1481 offset = 0x7f84 + i * 12;
1482 else
1483 offset = 0x7f2c + (i - 3) * 12;
1484 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1485 stl_phys(sm_state + offset + 8, dt->base);
1486 stl_phys(sm_state + offset + 4, dt->limit);
1487 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1489 stl_phys(sm_state + 0x7f14, env->cr[4]);
1491 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1492 stl_phys(sm_state + 0x7ef8, env->smbase);
1493 #endif
1494 /* init SMM cpu state */
1496 #ifdef TARGET_X86_64
1497 cpu_load_efer(env, 0);
1498 #endif
1499 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1500 env->eip = 0x00008000;
1501 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1502 0xffffffff, 0);
1503 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1504 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1509 cpu_x86_update_cr0(env,
1510 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1511 cpu_x86_update_cr4(env, 0);
1512 env->dr[7] = 0x00000400;
1513 CC_OP = CC_OP_EFLAGS;
1516 void helper_rsm(void)
1518 target_ulong sm_state;
1519 int i, offset;
1520 uint32_t val;
1522 sm_state = env->smbase + 0x8000;
1523 #ifdef TARGET_X86_64
1524 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1526 for(i = 0; i < 6; i++) {
1527 offset = 0x7e00 + i * 16;
1528 cpu_x86_load_seg_cache(env, i,
1529 lduw_phys(sm_state + offset),
1530 ldq_phys(sm_state + offset + 8),
1531 ldl_phys(sm_state + offset + 4),
1532 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1535 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1536 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1538 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1539 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1540 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1541 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1543 env->idt.base = ldq_phys(sm_state + 0x7e88);
1544 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1546 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1547 env->tr.base = ldq_phys(sm_state + 0x7e98);
1548 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1549 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1551 EAX = ldq_phys(sm_state + 0x7ff8);
1552 ECX = ldq_phys(sm_state + 0x7ff0);
1553 EDX = ldq_phys(sm_state + 0x7fe8);
1554 EBX = ldq_phys(sm_state + 0x7fe0);
1555 ESP = ldq_phys(sm_state + 0x7fd8);
1556 EBP = ldq_phys(sm_state + 0x7fd0);
1557 ESI = ldq_phys(sm_state + 0x7fc8);
1558 EDI = ldq_phys(sm_state + 0x7fc0);
1559 for(i = 8; i < 16; i++)
1560 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1561 env->eip = ldq_phys(sm_state + 0x7f78);
1562 load_eflags(ldl_phys(sm_state + 0x7f70),
1563 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1564 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1565 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1567 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1568 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1569 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1571 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1572 if (val & 0x20000) {
1573 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1575 #else
1576 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1577 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1578 load_eflags(ldl_phys(sm_state + 0x7ff4),
1579 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1580 env->eip = ldl_phys(sm_state + 0x7ff0);
1581 EDI = ldl_phys(sm_state + 0x7fec);
1582 ESI = ldl_phys(sm_state + 0x7fe8);
1583 EBP = ldl_phys(sm_state + 0x7fe4);
1584 ESP = ldl_phys(sm_state + 0x7fe0);
1585 EBX = ldl_phys(sm_state + 0x7fdc);
1586 EDX = ldl_phys(sm_state + 0x7fd8);
1587 ECX = ldl_phys(sm_state + 0x7fd4);
1588 EAX = ldl_phys(sm_state + 0x7fd0);
1589 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1590 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1592 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1593 env->tr.base = ldl_phys(sm_state + 0x7f64);
1594 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1595 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1597 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1598 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1599 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1600 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1602 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1603 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1605 env->idt.base = ldl_phys(sm_state + 0x7f58);
1606 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1608 for(i = 0; i < 6; i++) {
1609 if (i < 3)
1610 offset = 0x7f84 + i * 12;
1611 else
1612 offset = 0x7f2c + (i - 3) * 12;
1613 cpu_x86_load_seg_cache(env, i,
1614 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1615 ldl_phys(sm_state + offset + 8),
1616 ldl_phys(sm_state + offset + 4),
1617 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1619 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1621 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1622 if (val & 0x20000) {
1623 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1625 #endif
1626 CC_OP = CC_OP_EFLAGS;
1627 env->hflags &= ~HF_SMM_MASK;
1628 cpu_smm_update(env);
1630 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1631 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1634 #endif /* !CONFIG_USER_ONLY */
1637 /* division, flags are undefined */
1639 void helper_divb_AL(target_ulong t0)
1641 unsigned int num, den, q, r;
1643 num = (EAX & 0xffff);
1644 den = (t0 & 0xff);
1645 if (den == 0) {
1646 raise_exception(EXCP00_DIVZ);
1648 q = (num / den);
1649 if (q > 0xff)
1650 raise_exception(EXCP00_DIVZ);
1651 q &= 0xff;
1652 r = (num % den) & 0xff;
1653 EAX = (EAX & ~0xffff) | (r << 8) | q;
1656 void helper_idivb_AL(target_ulong t0)
1658 int num, den, q, r;
1660 num = (int16_t)EAX;
1661 den = (int8_t)t0;
1662 if (den == 0) {
1663 raise_exception(EXCP00_DIVZ);
1665 q = (num / den);
1666 if (q != (int8_t)q)
1667 raise_exception(EXCP00_DIVZ);
1668 q &= 0xff;
1669 r = (num % den) & 0xff;
1670 EAX = (EAX & ~0xffff) | (r << 8) | q;
1673 void helper_divw_AX(target_ulong t0)
1675 unsigned int num, den, q, r;
1677 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1678 den = (t0 & 0xffff);
1679 if (den == 0) {
1680 raise_exception(EXCP00_DIVZ);
1682 q = (num / den);
1683 if (q > 0xffff)
1684 raise_exception(EXCP00_DIVZ);
1685 q &= 0xffff;
1686 r = (num % den) & 0xffff;
1687 EAX = (EAX & ~0xffff) | q;
1688 EDX = (EDX & ~0xffff) | r;
1691 void helper_idivw_AX(target_ulong t0)
1693 int num, den, q, r;
1695 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1696 den = (int16_t)t0;
1697 if (den == 0) {
1698 raise_exception(EXCP00_DIVZ);
1700 q = (num / den);
1701 if (q != (int16_t)q)
1702 raise_exception(EXCP00_DIVZ);
1703 q &= 0xffff;
1704 r = (num % den) & 0xffff;
1705 EAX = (EAX & ~0xffff) | q;
1706 EDX = (EDX & ~0xffff) | r;
1709 void helper_divl_EAX(target_ulong t0)
1711 unsigned int den, r;
1712 uint64_t num, q;
1714 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1715 den = t0;
1716 if (den == 0) {
1717 raise_exception(EXCP00_DIVZ);
1719 q = (num / den);
1720 r = (num % den);
1721 if (q > 0xffffffff)
1722 raise_exception(EXCP00_DIVZ);
1723 EAX = (uint32_t)q;
1724 EDX = (uint32_t)r;
1727 void helper_idivl_EAX(target_ulong t0)
1729 int den, r;
1730 int64_t num, q;
1732 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1733 den = t0;
1734 if (den == 0) {
1735 raise_exception(EXCP00_DIVZ);
1737 q = (num / den);
1738 r = (num % den);
1739 if (q != (int32_t)q)
1740 raise_exception(EXCP00_DIVZ);
1741 EAX = (uint32_t)q;
1742 EDX = (uint32_t)r;
1745 /* bcd */
1747 /* XXX: exception */
1748 void helper_aam(int base)
1750 int al, ah;
1751 al = EAX & 0xff;
1752 ah = al / base;
1753 al = al % base;
1754 EAX = (EAX & ~0xffff) | al | (ah << 8);
1755 CC_DST = al;
1758 void helper_aad(int base)
1760 int al, ah;
1761 al = EAX & 0xff;
1762 ah = (EAX >> 8) & 0xff;
1763 al = ((ah * base) + al) & 0xff;
1764 EAX = (EAX & ~0xffff) | al;
1765 CC_DST = al;
1768 void helper_aaa(void)
1770 int icarry;
1771 int al, ah, af;
1772 int eflags;
1774 eflags = helper_cc_compute_all(CC_OP);
1775 af = eflags & CC_A;
1776 al = EAX & 0xff;
1777 ah = (EAX >> 8) & 0xff;
1779 icarry = (al > 0xf9);
1780 if (((al & 0x0f) > 9 ) || af) {
1781 al = (al + 6) & 0x0f;
1782 ah = (ah + 1 + icarry) & 0xff;
1783 eflags |= CC_C | CC_A;
1784 } else {
1785 eflags &= ~(CC_C | CC_A);
1786 al &= 0x0f;
1788 EAX = (EAX & ~0xffff) | al | (ah << 8);
1789 CC_SRC = eflags;
1792 void helper_aas(void)
1794 int icarry;
1795 int al, ah, af;
1796 int eflags;
1798 eflags = helper_cc_compute_all(CC_OP);
1799 af = eflags & CC_A;
1800 al = EAX & 0xff;
1801 ah = (EAX >> 8) & 0xff;
1803 icarry = (al < 6);
1804 if (((al & 0x0f) > 9 ) || af) {
1805 al = (al - 6) & 0x0f;
1806 ah = (ah - 1 - icarry) & 0xff;
1807 eflags |= CC_C | CC_A;
1808 } else {
1809 eflags &= ~(CC_C | CC_A);
1810 al &= 0x0f;
1812 EAX = (EAX & ~0xffff) | al | (ah << 8);
1813 CC_SRC = eflags;
1816 void helper_daa(void)
1818 int al, af, cf;
1819 int eflags;
1821 eflags = helper_cc_compute_all(CC_OP);
1822 cf = eflags & CC_C;
1823 af = eflags & CC_A;
1824 al = EAX & 0xff;
1826 eflags = 0;
1827 if (((al & 0x0f) > 9 ) || af) {
1828 al = (al + 6) & 0xff;
1829 eflags |= CC_A;
1831 if ((al > 0x9f) || cf) {
1832 al = (al + 0x60) & 0xff;
1833 eflags |= CC_C;
1835 EAX = (EAX & ~0xff) | al;
1836 /* well, speed is not an issue here, so we compute the flags by hand */
1837 eflags |= (al == 0) << 6; /* zf */
1838 eflags |= parity_table[al]; /* pf */
1839 eflags |= (al & 0x80); /* sf */
1840 CC_SRC = eflags;
1843 void helper_das(void)
1845 int al, al1, af, cf;
1846 int eflags;
1848 eflags = helper_cc_compute_all(CC_OP);
1849 cf = eflags & CC_C;
1850 af = eflags & CC_A;
1851 al = EAX & 0xff;
1853 eflags = 0;
1854 al1 = al;
1855 if (((al & 0x0f) > 9 ) || af) {
1856 eflags |= CC_A;
1857 if (al < 6 || cf)
1858 eflags |= CC_C;
1859 al = (al - 6) & 0xff;
1861 if ((al1 > 0x99) || cf) {
1862 al = (al - 0x60) & 0xff;
1863 eflags |= CC_C;
1865 EAX = (EAX & ~0xff) | al;
1866 /* well, speed is not an issue here, so we compute the flags by hand */
1867 eflags |= (al == 0) << 6; /* zf */
1868 eflags |= parity_table[al]; /* pf */
1869 eflags |= (al & 0x80); /* sf */
1870 CC_SRC = eflags;
1873 void helper_into(int next_eip_addend)
1875 int eflags;
1876 eflags = helper_cc_compute_all(CC_OP);
1877 if (eflags & CC_O) {
1878 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1882 void helper_cmpxchg8b(target_ulong a0)
1884 uint64_t d;
1885 int eflags;
1887 eflags = helper_cc_compute_all(CC_OP);
1888 d = ldq(a0);
1889 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1890 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1891 eflags |= CC_Z;
1892 } else {
1893 /* always do the store */
1894 stq(a0, d);
1895 EDX = (uint32_t)(d >> 32);
1896 EAX = (uint32_t)d;
1897 eflags &= ~CC_Z;
1899 CC_SRC = eflags;
1902 #ifdef TARGET_X86_64
1903 void helper_cmpxchg16b(target_ulong a0)
1905 uint64_t d0, d1;
1906 int eflags;
1908 if ((a0 & 0xf) != 0)
1909 raise_exception(EXCP0D_GPF);
1910 eflags = helper_cc_compute_all(CC_OP);
1911 d0 = ldq(a0);
1912 d1 = ldq(a0 + 8);
1913 if (d0 == EAX && d1 == EDX) {
1914 stq(a0, EBX);
1915 stq(a0 + 8, ECX);
1916 eflags |= CC_Z;
1917 } else {
1918 /* always do the store */
1919 stq(a0, d0);
1920 stq(a0 + 8, d1);
1921 EDX = d1;
1922 EAX = d0;
1923 eflags &= ~CC_Z;
1925 CC_SRC = eflags;
1927 #endif
1929 void helper_single_step(void)
1931 #ifndef CONFIG_USER_ONLY
1932 check_hw_breakpoints(env, 1);
1933 env->dr[6] |= DR6_BS;
1934 #endif
1935 raise_exception(EXCP01_DB);
1938 void helper_cpuid(void)
1940 uint32_t eax, ebx, ecx, edx;
1942 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1944 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1945 EAX = eax;
1946 EBX = ebx;
1947 ECX = ecx;
1948 EDX = edx;
1951 void helper_enter_level(int level, int data32, target_ulong t1)
1953 target_ulong ssp;
1954 uint32_t esp_mask, esp, ebp;
1956 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1957 ssp = env->segs[R_SS].base;
1958 ebp = EBP;
1959 esp = ESP;
1960 if (data32) {
1961 /* 32 bit */
1962 esp -= 4;
1963 while (--level) {
1964 esp -= 4;
1965 ebp -= 4;
1966 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1968 esp -= 4;
1969 stl(ssp + (esp & esp_mask), t1);
1970 } else {
1971 /* 16 bit */
1972 esp -= 2;
1973 while (--level) {
1974 esp -= 2;
1975 ebp -= 2;
1976 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1978 esp -= 2;
1979 stw(ssp + (esp & esp_mask), t1);
1983 #ifdef TARGET_X86_64
1984 void helper_enter64_level(int level, int data64, target_ulong t1)
1986 target_ulong esp, ebp;
1987 ebp = EBP;
1988 esp = ESP;
1990 if (data64) {
1991 /* 64 bit */
1992 esp -= 8;
1993 while (--level) {
1994 esp -= 8;
1995 ebp -= 8;
1996 stq(esp, ldq(ebp));
1998 esp -= 8;
1999 stq(esp, t1);
2000 } else {
2001 /* 16 bit */
2002 esp -= 2;
2003 while (--level) {
2004 esp -= 2;
2005 ebp -= 2;
2006 stw(esp, lduw(ebp));
2008 esp -= 2;
2009 stw(esp, t1);
2012 #endif
2014 void helper_lldt(int selector)
2016 SegmentCache *dt;
2017 uint32_t e1, e2;
2018 int index, entry_limit;
2019 target_ulong ptr;
2021 selector &= 0xffff;
2022 if ((selector & 0xfffc) == 0) {
2023 /* XXX: NULL selector case: invalid LDT */
2024 env->ldt.base = 0;
2025 env->ldt.limit = 0;
2026 } else {
2027 if (selector & 0x4)
2028 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2029 dt = &env->gdt;
2030 index = selector & ~7;
2031 #ifdef TARGET_X86_64
2032 if (env->hflags & HF_LMA_MASK)
2033 entry_limit = 15;
2034 else
2035 #endif
2036 entry_limit = 7;
2037 if ((index + entry_limit) > dt->limit)
2038 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2039 ptr = dt->base + index;
2040 e1 = ldl_kernel(ptr);
2041 e2 = ldl_kernel(ptr + 4);
2042 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2043 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2044 if (!(e2 & DESC_P_MASK))
2045 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2046 #ifdef TARGET_X86_64
2047 if (env->hflags & HF_LMA_MASK) {
2048 uint32_t e3;
2049 e3 = ldl_kernel(ptr + 8);
2050 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2051 env->ldt.base |= (target_ulong)e3 << 32;
2052 } else
2053 #endif
2055 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2058 env->ldt.selector = selector;
2061 void helper_ltr(int selector)
2063 SegmentCache *dt;
2064 uint32_t e1, e2;
2065 int index, type, entry_limit;
2066 target_ulong ptr;
2068 selector &= 0xffff;
2069 if ((selector & 0xfffc) == 0) {
2070 /* NULL selector case: invalid TR */
2071 env->tr.base = 0;
2072 env->tr.limit = 0;
2073 env->tr.flags = 0;
2074 } else {
2075 if (selector & 0x4)
2076 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2077 dt = &env->gdt;
2078 index = selector & ~7;
2079 #ifdef TARGET_X86_64
2080 if (env->hflags & HF_LMA_MASK)
2081 entry_limit = 15;
2082 else
2083 #endif
2084 entry_limit = 7;
2085 if ((index + entry_limit) > dt->limit)
2086 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2087 ptr = dt->base + index;
2088 e1 = ldl_kernel(ptr);
2089 e2 = ldl_kernel(ptr + 4);
2090 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2091 if ((e2 & DESC_S_MASK) ||
2092 (type != 1 && type != 9))
2093 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2094 if (!(e2 & DESC_P_MASK))
2095 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2096 #ifdef TARGET_X86_64
2097 if (env->hflags & HF_LMA_MASK) {
2098 uint32_t e3, e4;
2099 e3 = ldl_kernel(ptr + 8);
2100 e4 = ldl_kernel(ptr + 12);
2101 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2102 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2103 load_seg_cache_raw_dt(&env->tr, e1, e2);
2104 env->tr.base |= (target_ulong)e3 << 32;
2105 } else
2106 #endif
2108 load_seg_cache_raw_dt(&env->tr, e1, e2);
2110 e2 |= DESC_TSS_BUSY_MASK;
2111 stl_kernel(ptr + 4, e2);
2113 env->tr.selector = selector;
2116 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2117 void helper_load_seg(int seg_reg, int selector)
2119 uint32_t e1, e2;
2120 int cpl, dpl, rpl;
2121 SegmentCache *dt;
2122 int index;
2123 target_ulong ptr;
2125 selector &= 0xffff;
2126 cpl = env->hflags & HF_CPL_MASK;
2127 if ((selector & 0xfffc) == 0) {
2128 /* null selector case */
2129 if (seg_reg == R_SS
2130 #ifdef TARGET_X86_64
2131 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2132 #endif
2134 raise_exception_err(EXCP0D_GPF, 0);
2135 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2136 } else {
2138 if (selector & 0x4)
2139 dt = &env->ldt;
2140 else
2141 dt = &env->gdt;
2142 index = selector & ~7;
2143 if ((index + 7) > dt->limit)
2144 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2145 ptr = dt->base + index;
2146 e1 = ldl_kernel(ptr);
2147 e2 = ldl_kernel(ptr + 4);
2149 if (!(e2 & DESC_S_MASK))
2150 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2151 rpl = selector & 3;
2152 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2153 if (seg_reg == R_SS) {
2154 /* must be writable segment */
2155 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2156 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2157 if (rpl != cpl || dpl != cpl)
2158 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2159 } else {
2160 /* must be readable segment */
2161 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2162 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2164 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2165 /* if not conforming code, test rights */
2166 if (dpl < cpl || dpl < rpl)
2167 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2171 if (!(e2 & DESC_P_MASK)) {
2172 if (seg_reg == R_SS)
2173 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2174 else
2175 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2178 /* set the access bit if not already set */
2179 if (!(e2 & DESC_A_MASK)) {
2180 e2 |= DESC_A_MASK;
2181 stl_kernel(ptr + 4, e2);
2184 cpu_x86_load_seg_cache(env, seg_reg, selector,
2185 get_seg_base(e1, e2),
2186 get_seg_limit(e1, e2),
2187 e2);
2188 #if 0
2189 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2190 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2191 #endif
2195 /* protected mode jump */
2196 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2197 int next_eip_addend)
2199 int gate_cs, type;
2200 uint32_t e1, e2, cpl, dpl, rpl, limit;
2201 target_ulong next_eip;
2203 if ((new_cs & 0xfffc) == 0)
2204 raise_exception_err(EXCP0D_GPF, 0);
2205 if (load_segment(&e1, &e2, new_cs) != 0)
2206 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2207 cpl = env->hflags & HF_CPL_MASK;
2208 if (e2 & DESC_S_MASK) {
2209 if (!(e2 & DESC_CS_MASK))
2210 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2211 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2212 if (e2 & DESC_C_MASK) {
2213 /* conforming code segment */
2214 if (dpl > cpl)
2215 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216 } else {
2217 /* non conforming code segment */
2218 rpl = new_cs & 3;
2219 if (rpl > cpl)
2220 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2221 if (dpl != cpl)
2222 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2224 if (!(e2 & DESC_P_MASK))
2225 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2226 limit = get_seg_limit(e1, e2);
2227 if (new_eip > limit &&
2228 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2229 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2230 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2231 get_seg_base(e1, e2), limit, e2);
2232 EIP = new_eip;
2233 } else {
2234 /* jump to call or task gate */
2235 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2236 rpl = new_cs & 3;
2237 cpl = env->hflags & HF_CPL_MASK;
2238 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2239 switch(type) {
2240 case 1: /* 286 TSS */
2241 case 9: /* 386 TSS */
2242 case 5: /* task gate */
2243 if (dpl < cpl || dpl < rpl)
2244 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2245 next_eip = env->eip + next_eip_addend;
2246 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2247 CC_OP = CC_OP_EFLAGS;
2248 break;
2249 case 4: /* 286 call gate */
2250 case 12: /* 386 call gate */
2251 if ((dpl < cpl) || (dpl < rpl))
2252 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2253 if (!(e2 & DESC_P_MASK))
2254 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2255 gate_cs = e1 >> 16;
2256 new_eip = (e1 & 0xffff);
2257 if (type == 12)
2258 new_eip |= (e2 & 0xffff0000);
2259 if (load_segment(&e1, &e2, gate_cs) != 0)
2260 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2261 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2262 /* must be code segment */
2263 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2264 (DESC_S_MASK | DESC_CS_MASK)))
2265 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2266 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2267 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2268 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2269 if (!(e2 & DESC_P_MASK))
2270 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2271 limit = get_seg_limit(e1, e2);
2272 if (new_eip > limit)
2273 raise_exception_err(EXCP0D_GPF, 0);
2274 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2275 get_seg_base(e1, e2), limit, e2);
2276 EIP = new_eip;
2277 break;
2278 default:
2279 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2280 break;
2285 /* real mode call */
2286 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2287 int shift, int next_eip)
2289 int new_eip;
2290 uint32_t esp, esp_mask;
2291 target_ulong ssp;
2293 new_eip = new_eip1;
2294 esp = ESP;
2295 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2296 ssp = env->segs[R_SS].base;
2297 if (shift) {
2298 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2299 PUSHL(ssp, esp, esp_mask, next_eip);
2300 } else {
2301 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2302 PUSHW(ssp, esp, esp_mask, next_eip);
2305 SET_ESP(esp, esp_mask);
2306 env->eip = new_eip;
2307 env->segs[R_CS].selector = new_cs;
2308 env->segs[R_CS].base = (new_cs << 4);
2311 /* protected mode call */
2312 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2313 int shift, int next_eip_addend)
2315 int new_stack, i;
2316 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2317 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2318 uint32_t val, limit, old_sp_mask;
2319 target_ulong ssp, old_ssp, next_eip;
2321 next_eip = env->eip + next_eip_addend;
2322 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2323 LOG_PCALL_STATE(env);
2324 if ((new_cs & 0xfffc) == 0)
2325 raise_exception_err(EXCP0D_GPF, 0);
2326 if (load_segment(&e1, &e2, new_cs) != 0)
2327 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2328 cpl = env->hflags & HF_CPL_MASK;
2329 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2330 if (e2 & DESC_S_MASK) {
2331 if (!(e2 & DESC_CS_MASK))
2332 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2333 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2334 if (e2 & DESC_C_MASK) {
2335 /* conforming code segment */
2336 if (dpl > cpl)
2337 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2338 } else {
2339 /* non conforming code segment */
2340 rpl = new_cs & 3;
2341 if (rpl > cpl)
2342 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2343 if (dpl != cpl)
2344 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346 if (!(e2 & DESC_P_MASK))
2347 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349 #ifdef TARGET_X86_64
2350 /* XXX: check 16/32 bit cases in long mode */
2351 if (shift == 2) {
2352 target_ulong rsp;
2353 /* 64 bit case */
2354 rsp = ESP;
2355 PUSHQ(rsp, env->segs[R_CS].selector);
2356 PUSHQ(rsp, next_eip);
2357 /* from this point, not restartable */
2358 ESP = rsp;
2359 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2360 get_seg_base(e1, e2),
2361 get_seg_limit(e1, e2), e2);
2362 EIP = new_eip;
2363 } else
2364 #endif
2366 sp = ESP;
2367 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2368 ssp = env->segs[R_SS].base;
2369 if (shift) {
2370 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2371 PUSHL(ssp, sp, sp_mask, next_eip);
2372 } else {
2373 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2374 PUSHW(ssp, sp, sp_mask, next_eip);
2377 limit = get_seg_limit(e1, e2);
2378 if (new_eip > limit)
2379 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2380 /* from this point, not restartable */
2381 SET_ESP(sp, sp_mask);
2382 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2383 get_seg_base(e1, e2), limit, e2);
2384 EIP = new_eip;
2386 } else {
2387 /* check gate type */
2388 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2389 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2390 rpl = new_cs & 3;
2391 switch(type) {
2392 case 1: /* available 286 TSS */
2393 case 9: /* available 386 TSS */
2394 case 5: /* task gate */
2395 if (dpl < cpl || dpl < rpl)
2396 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2397 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2398 CC_OP = CC_OP_EFLAGS;
2399 return;
2400 case 4: /* 286 call gate */
2401 case 12: /* 386 call gate */
2402 break;
2403 default:
2404 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2405 break;
2407 shift = type >> 3;
2409 if (dpl < cpl || dpl < rpl)
2410 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2411 /* check valid bit */
2412 if (!(e2 & DESC_P_MASK))
2413 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2414 selector = e1 >> 16;
2415 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2416 param_count = e2 & 0x1f;
2417 if ((selector & 0xfffc) == 0)
2418 raise_exception_err(EXCP0D_GPF, 0);
2420 if (load_segment(&e1, &e2, selector) != 0)
2421 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2422 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2423 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2424 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2425 if (dpl > cpl)
2426 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2427 if (!(e2 & DESC_P_MASK))
2428 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2431 /* to inner privilege */
2432 get_ss_esp_from_tss(&ss, &sp, dpl);
2433 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2434 ss, sp, param_count, ESP);
2435 if ((ss & 0xfffc) == 0)
2436 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2437 if ((ss & 3) != dpl)
2438 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2439 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2442 if (ss_dpl != dpl)
2443 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2444 if (!(ss_e2 & DESC_S_MASK) ||
2445 (ss_e2 & DESC_CS_MASK) ||
2446 !(ss_e2 & DESC_W_MASK))
2447 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2448 if (!(ss_e2 & DESC_P_MASK))
2449 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2451 // push_size = ((param_count * 2) + 8) << shift;
2453 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2454 old_ssp = env->segs[R_SS].base;
2456 sp_mask = get_sp_mask(ss_e2);
2457 ssp = get_seg_base(ss_e1, ss_e2);
2458 if (shift) {
2459 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2460 PUSHL(ssp, sp, sp_mask, ESP);
2461 for(i = param_count - 1; i >= 0; i--) {
2462 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2463 PUSHL(ssp, sp, sp_mask, val);
2465 } else {
2466 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2467 PUSHW(ssp, sp, sp_mask, ESP);
2468 for(i = param_count - 1; i >= 0; i--) {
2469 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2470 PUSHW(ssp, sp, sp_mask, val);
2473 new_stack = 1;
2474 } else {
2475 /* to same privilege */
2476 sp = ESP;
2477 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2478 ssp = env->segs[R_SS].base;
2479 // push_size = (4 << shift);
2480 new_stack = 0;
2483 if (shift) {
2484 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2485 PUSHL(ssp, sp, sp_mask, next_eip);
2486 } else {
2487 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2488 PUSHW(ssp, sp, sp_mask, next_eip);
2491 /* from this point, not restartable */
2493 if (new_stack) {
2494 ss = (ss & ~3) | dpl;
2495 cpu_x86_load_seg_cache(env, R_SS, ss,
2496 ssp,
2497 get_seg_limit(ss_e1, ss_e2),
2498 ss_e2);
2501 selector = (selector & ~3) | dpl;
2502 cpu_x86_load_seg_cache(env, R_CS, selector,
2503 get_seg_base(e1, e2),
2504 get_seg_limit(e1, e2),
2505 e2);
2506 cpu_x86_set_cpl(env, dpl);
2507 SET_ESP(sp, sp_mask);
2508 EIP = offset;
2510 #ifdef CONFIG_KQEMU
2511 if (kqemu_is_ok(env)) {
2512 env->exception_index = -1;
2513 cpu_loop_exit();
2515 #endif
2518 /* real and vm86 mode iret */
2519 void helper_iret_real(int shift)
2521 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2522 target_ulong ssp;
2523 int eflags_mask;
2525 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2526 sp = ESP;
2527 ssp = env->segs[R_SS].base;
2528 if (shift == 1) {
2529 /* 32 bits */
2530 POPL(ssp, sp, sp_mask, new_eip);
2531 POPL(ssp, sp, sp_mask, new_cs);
2532 new_cs &= 0xffff;
2533 POPL(ssp, sp, sp_mask, new_eflags);
2534 } else {
2535 /* 16 bits */
2536 POPW(ssp, sp, sp_mask, new_eip);
2537 POPW(ssp, sp, sp_mask, new_cs);
2538 POPW(ssp, sp, sp_mask, new_eflags);
2540 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2541 env->segs[R_CS].selector = new_cs;
2542 env->segs[R_CS].base = (new_cs << 4);
2543 env->eip = new_eip;
2544 if (env->eflags & VM_MASK)
2545 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2546 else
2547 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2548 if (shift == 0)
2549 eflags_mask &= 0xffff;
2550 load_eflags(new_eflags, eflags_mask);
2551 env->hflags2 &= ~HF2_NMI_MASK;
2554 static inline void validate_seg(int seg_reg, int cpl)
2556 int dpl;
2557 uint32_t e2;
2559 /* XXX: on x86_64, we do not want to nullify FS and GS because
2560 they may still contain a valid base. I would be interested to
2561 know how a real x86_64 CPU behaves */
2562 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2563 (env->segs[seg_reg].selector & 0xfffc) == 0)
2564 return;
2566 e2 = env->segs[seg_reg].flags;
2567 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2568 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2569 /* data or non conforming code segment */
2570 if (dpl < cpl) {
2571 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2576 /* protected mode iret */
2577 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2579 uint32_t new_cs, new_eflags, new_ss;
2580 uint32_t new_es, new_ds, new_fs, new_gs;
2581 uint32_t e1, e2, ss_e1, ss_e2;
2582 int cpl, dpl, rpl, eflags_mask, iopl;
2583 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2585 #ifdef TARGET_X86_64
2586 if (shift == 2)
2587 sp_mask = -1;
2588 else
2589 #endif
2590 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2591 sp = ESP;
2592 ssp = env->segs[R_SS].base;
2593 new_eflags = 0; /* avoid warning */
2594 #ifdef TARGET_X86_64
2595 if (shift == 2) {
2596 POPQ(sp, new_eip);
2597 POPQ(sp, new_cs);
2598 new_cs &= 0xffff;
2599 if (is_iret) {
2600 POPQ(sp, new_eflags);
2602 } else
2603 #endif
2604 if (shift == 1) {
2605 /* 32 bits */
2606 POPL(ssp, sp, sp_mask, new_eip);
2607 POPL(ssp, sp, sp_mask, new_cs);
2608 new_cs &= 0xffff;
2609 if (is_iret) {
2610 POPL(ssp, sp, sp_mask, new_eflags);
2611 if (new_eflags & VM_MASK)
2612 goto return_to_vm86;
2614 } else {
2615 /* 16 bits */
2616 POPW(ssp, sp, sp_mask, new_eip);
2617 POPW(ssp, sp, sp_mask, new_cs);
2618 if (is_iret)
2619 POPW(ssp, sp, sp_mask, new_eflags);
2621 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2622 new_cs, new_eip, shift, addend);
2623 LOG_PCALL_STATE(env);
2624 if ((new_cs & 0xfffc) == 0)
2625 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626 if (load_segment(&e1, &e2, new_cs) != 0)
2627 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2628 if (!(e2 & DESC_S_MASK) ||
2629 !(e2 & DESC_CS_MASK))
2630 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2631 cpl = env->hflags & HF_CPL_MASK;
2632 rpl = new_cs & 3;
2633 if (rpl < cpl)
2634 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2635 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2636 if (e2 & DESC_C_MASK) {
2637 if (dpl > rpl)
2638 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2639 } else {
2640 if (dpl != rpl)
2641 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2643 if (!(e2 & DESC_P_MASK))
2644 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2646 sp += addend;
2647 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2648 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2649 /* return to same privilege level */
2650 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2651 get_seg_base(e1, e2),
2652 get_seg_limit(e1, e2),
2653 e2);
2654 } else {
2655 /* return to different privilege level */
2656 #ifdef TARGET_X86_64
2657 if (shift == 2) {
2658 POPQ(sp, new_esp);
2659 POPQ(sp, new_ss);
2660 new_ss &= 0xffff;
2661 } else
2662 #endif
2663 if (shift == 1) {
2664 /* 32 bits */
2665 POPL(ssp, sp, sp_mask, new_esp);
2666 POPL(ssp, sp, sp_mask, new_ss);
2667 new_ss &= 0xffff;
2668 } else {
2669 /* 16 bits */
2670 POPW(ssp, sp, sp_mask, new_esp);
2671 POPW(ssp, sp, sp_mask, new_ss);
2673 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2674 new_ss, new_esp);
2675 if ((new_ss & 0xfffc) == 0) {
2676 #ifdef TARGET_X86_64
2677 /* NULL ss is allowed in long mode if cpl != 3*/
2678 /* XXX: test CS64 ? */
2679 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2680 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2681 0, 0xffffffff,
2682 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2683 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2684 DESC_W_MASK | DESC_A_MASK);
2685 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2686 } else
2687 #endif
2689 raise_exception_err(EXCP0D_GPF, 0);
2691 } else {
2692 if ((new_ss & 3) != rpl)
2693 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2694 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2695 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2696 if (!(ss_e2 & DESC_S_MASK) ||
2697 (ss_e2 & DESC_CS_MASK) ||
2698 !(ss_e2 & DESC_W_MASK))
2699 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2700 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2701 if (dpl != rpl)
2702 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2703 if (!(ss_e2 & DESC_P_MASK))
2704 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2705 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2706 get_seg_base(ss_e1, ss_e2),
2707 get_seg_limit(ss_e1, ss_e2),
2708 ss_e2);
2711 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2712 get_seg_base(e1, e2),
2713 get_seg_limit(e1, e2),
2714 e2);
2715 cpu_x86_set_cpl(env, rpl);
2716 sp = new_esp;
2717 #ifdef TARGET_X86_64
2718 if (env->hflags & HF_CS64_MASK)
2719 sp_mask = -1;
2720 else
2721 #endif
2722 sp_mask = get_sp_mask(ss_e2);
2724 /* validate data segments */
2725 validate_seg(R_ES, rpl);
2726 validate_seg(R_DS, rpl);
2727 validate_seg(R_FS, rpl);
2728 validate_seg(R_GS, rpl);
2730 sp += addend;
2732 SET_ESP(sp, sp_mask);
2733 env->eip = new_eip;
2734 if (is_iret) {
2735 /* NOTE: 'cpl' is the _old_ CPL */
2736 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2737 if (cpl == 0)
2738 eflags_mask |= IOPL_MASK;
2739 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2740 if (cpl <= iopl)
2741 eflags_mask |= IF_MASK;
2742 if (shift == 0)
2743 eflags_mask &= 0xffff;
2744 load_eflags(new_eflags, eflags_mask);
2746 return;
2748 return_to_vm86:
2749 POPL(ssp, sp, sp_mask, new_esp);
2750 POPL(ssp, sp, sp_mask, new_ss);
2751 POPL(ssp, sp, sp_mask, new_es);
2752 POPL(ssp, sp, sp_mask, new_ds);
2753 POPL(ssp, sp, sp_mask, new_fs);
2754 POPL(ssp, sp, sp_mask, new_gs);
2756 /* modify processor state */
2757 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2758 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2759 load_seg_vm(R_CS, new_cs & 0xffff);
2760 cpu_x86_set_cpl(env, 3);
2761 load_seg_vm(R_SS, new_ss & 0xffff);
2762 load_seg_vm(R_ES, new_es & 0xffff);
2763 load_seg_vm(R_DS, new_ds & 0xffff);
2764 load_seg_vm(R_FS, new_fs & 0xffff);
2765 load_seg_vm(R_GS, new_gs & 0xffff);
2767 env->eip = new_eip & 0xffff;
2768 ESP = new_esp;
2771 void helper_iret_protected(int shift, int next_eip)
2773 int tss_selector, type;
2774 uint32_t e1, e2;
2776 /* specific case for TSS */
2777 if (env->eflags & NT_MASK) {
2778 #ifdef TARGET_X86_64
2779 if (env->hflags & HF_LMA_MASK)
2780 raise_exception_err(EXCP0D_GPF, 0);
2781 #endif
2782 tss_selector = lduw_kernel(env->tr.base + 0);
2783 if (tss_selector & 4)
2784 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2785 if (load_segment(&e1, &e2, tss_selector) != 0)
2786 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2787 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2788 /* NOTE: we check both segment and busy TSS */
2789 if (type != 3)
2790 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2791 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2792 } else {
2793 helper_ret_protected(shift, 1, 0);
2795 env->hflags2 &= ~HF2_NMI_MASK;
2796 #ifdef CONFIG_KQEMU
2797 if (kqemu_is_ok(env)) {
2798 CC_OP = CC_OP_EFLAGS;
2799 env->exception_index = -1;
2800 cpu_loop_exit();
2802 #endif
2805 void helper_lret_protected(int shift, int addend)
2807 helper_ret_protected(shift, 0, addend);
2808 #ifdef CONFIG_KQEMU
2809 if (kqemu_is_ok(env)) {
2810 env->exception_index = -1;
2811 cpu_loop_exit();
2813 #endif
2816 void helper_sysenter(void)
2818 if (env->sysenter_cs == 0) {
2819 raise_exception_err(EXCP0D_GPF, 0);
2821 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2822 cpu_x86_set_cpl(env, 0);
2824 #ifdef TARGET_X86_64
2825 if (env->hflags & HF_LMA_MASK) {
2826 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2827 0, 0xffffffff,
2828 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2829 DESC_S_MASK |
2830 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2831 } else
2832 #endif
2834 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2835 0, 0xffffffff,
2836 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2837 DESC_S_MASK |
2838 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2840 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2841 0, 0xffffffff,
2842 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2843 DESC_S_MASK |
2844 DESC_W_MASK | DESC_A_MASK);
2845 ESP = env->sysenter_esp;
2846 EIP = env->sysenter_eip;
2849 void helper_sysexit(int dflag)
2851 int cpl;
2853 cpl = env->hflags & HF_CPL_MASK;
2854 if (env->sysenter_cs == 0 || cpl != 0) {
2855 raise_exception_err(EXCP0D_GPF, 0);
2857 cpu_x86_set_cpl(env, 3);
2858 #ifdef TARGET_X86_64
2859 if (dflag == 2) {
2860 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2861 0, 0xffffffff,
2862 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2863 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2864 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2865 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2866 0, 0xffffffff,
2867 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2868 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2869 DESC_W_MASK | DESC_A_MASK);
2870 } else
2871 #endif
2873 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2874 0, 0xffffffff,
2875 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2876 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2877 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2878 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2879 0, 0xffffffff,
2880 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2881 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2882 DESC_W_MASK | DESC_A_MASK);
2884 ESP = ECX;
2885 EIP = EDX;
2886 #ifdef CONFIG_KQEMU
2887 if (kqemu_is_ok(env)) {
2888 env->exception_index = -1;
2889 cpu_loop_exit();
2891 #endif
2894 #if defined(CONFIG_USER_ONLY)
2895 target_ulong helper_read_crN(int reg)
2897 return 0;
2900 void helper_write_crN(int reg, target_ulong t0)
2904 void helper_movl_drN_T0(int reg, target_ulong t0)
2907 #else
2908 target_ulong helper_read_crN(int reg)
2910 target_ulong val;
2912 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2913 switch(reg) {
2914 default:
2915 val = env->cr[reg];
2916 break;
2917 case 8:
2918 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2919 val = cpu_get_apic_tpr(env);
2920 } else {
2921 val = env->v_tpr;
2923 break;
2925 return val;
2928 void helper_write_crN(int reg, target_ulong t0)
2930 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2931 switch(reg) {
2932 case 0:
2933 cpu_x86_update_cr0(env, t0);
2934 break;
2935 case 3:
2936 cpu_x86_update_cr3(env, t0);
2937 break;
2938 case 4:
2939 cpu_x86_update_cr4(env, t0);
2940 break;
2941 case 8:
2942 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2943 cpu_set_apic_tpr(env, t0);
2945 env->v_tpr = t0 & 0x0f;
2946 break;
2947 default:
2948 env->cr[reg] = t0;
2949 break;
2953 void helper_movl_drN_T0(int reg, target_ulong t0)
2955 int i;
2957 if (reg < 4) {
2958 hw_breakpoint_remove(env, reg);
2959 env->dr[reg] = t0;
2960 hw_breakpoint_insert(env, reg);
2961 } else if (reg == 7) {
2962 for (i = 0; i < 4; i++)
2963 hw_breakpoint_remove(env, i);
2964 env->dr[7] = t0;
2965 for (i = 0; i < 4; i++)
2966 hw_breakpoint_insert(env, i);
2967 } else
2968 env->dr[reg] = t0;
2970 #endif
2972 void helper_lmsw(target_ulong t0)
2974 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2975 if already set to one. */
2976 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2977 helper_write_crN(0, t0);
2980 void helper_clts(void)
2982 env->cr[0] &= ~CR0_TS_MASK;
2983 env->hflags &= ~HF_TS_MASK;
2986 void helper_invlpg(target_ulong addr)
2988 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2989 tlb_flush_page(env, addr);
2992 void helper_rdtsc(void)
2994 uint64_t val;
2996 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2997 raise_exception(EXCP0D_GPF);
2999 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3001 val = cpu_get_tsc(env) + env->tsc_offset;
3002 EAX = (uint32_t)(val);
3003 EDX = (uint32_t)(val >> 32);
3006 void helper_rdpmc(void)
3008 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3009 raise_exception(EXCP0D_GPF);
3011 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3013 /* currently unimplemented */
3014 raise_exception_err(EXCP06_ILLOP, 0);
3017 #if defined(CONFIG_USER_ONLY)
3018 void helper_wrmsr(void)
3022 void helper_rdmsr(void)
3025 #else
3026 void helper_wrmsr(void)
3028 uint64_t val;
3030 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3032 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3034 switch((uint32_t)ECX) {
3035 case MSR_IA32_SYSENTER_CS:
3036 env->sysenter_cs = val & 0xffff;
3037 break;
3038 case MSR_IA32_SYSENTER_ESP:
3039 env->sysenter_esp = val;
3040 break;
3041 case MSR_IA32_SYSENTER_EIP:
3042 env->sysenter_eip = val;
3043 break;
3044 case MSR_IA32_APICBASE:
3045 cpu_set_apic_base(env, val);
3046 break;
3047 case MSR_EFER:
3049 uint64_t update_mask;
3050 update_mask = 0;
3051 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3052 update_mask |= MSR_EFER_SCE;
3053 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3054 update_mask |= MSR_EFER_LME;
3055 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3056 update_mask |= MSR_EFER_FFXSR;
3057 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3058 update_mask |= MSR_EFER_NXE;
3059 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3060 update_mask |= MSR_EFER_SVME;
3061 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3062 update_mask |= MSR_EFER_FFXSR;
3063 cpu_load_efer(env, (env->efer & ~update_mask) |
3064 (val & update_mask));
3066 break;
3067 case MSR_STAR:
3068 env->star = val;
3069 break;
3070 case MSR_PAT:
3071 env->pat = val;
3072 break;
3073 case MSR_VM_HSAVE_PA:
3074 env->vm_hsave = val;
3075 break;
3076 #ifdef TARGET_X86_64
3077 case MSR_LSTAR:
3078 env->lstar = val;
3079 break;
3080 case MSR_CSTAR:
3081 env->cstar = val;
3082 break;
3083 case MSR_FMASK:
3084 env->fmask = val;
3085 break;
3086 case MSR_FSBASE:
3087 env->segs[R_FS].base = val;
3088 break;
3089 case MSR_GSBASE:
3090 env->segs[R_GS].base = val;
3091 break;
3092 case MSR_KERNELGSBASE:
3093 env->kernelgsbase = val;
3094 break;
3095 #endif
3096 case MSR_MTRRphysBase(0):
3097 case MSR_MTRRphysBase(1):
3098 case MSR_MTRRphysBase(2):
3099 case MSR_MTRRphysBase(3):
3100 case MSR_MTRRphysBase(4):
3101 case MSR_MTRRphysBase(5):
3102 case MSR_MTRRphysBase(6):
3103 case MSR_MTRRphysBase(7):
3104 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3105 break;
3106 case MSR_MTRRphysMask(0):
3107 case MSR_MTRRphysMask(1):
3108 case MSR_MTRRphysMask(2):
3109 case MSR_MTRRphysMask(3):
3110 case MSR_MTRRphysMask(4):
3111 case MSR_MTRRphysMask(5):
3112 case MSR_MTRRphysMask(6):
3113 case MSR_MTRRphysMask(7):
3114 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3115 break;
3116 case MSR_MTRRfix64K_00000:
3117 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3118 break;
3119 case MSR_MTRRfix16K_80000:
3120 case MSR_MTRRfix16K_A0000:
3121 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3122 break;
3123 case MSR_MTRRfix4K_C0000:
3124 case MSR_MTRRfix4K_C8000:
3125 case MSR_MTRRfix4K_D0000:
3126 case MSR_MTRRfix4K_D8000:
3127 case MSR_MTRRfix4K_E0000:
3128 case MSR_MTRRfix4K_E8000:
3129 case MSR_MTRRfix4K_F0000:
3130 case MSR_MTRRfix4K_F8000:
3131 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3132 break;
3133 case MSR_MTRRdefType:
3134 env->mtrr_deftype = val;
3135 break;
3136 default:
3137 /* XXX: exception ? */
3138 break;
3142 void helper_rdmsr(void)
3144 uint64_t val;
3146 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3148 switch((uint32_t)ECX) {
3149 case MSR_IA32_SYSENTER_CS:
3150 val = env->sysenter_cs;
3151 break;
3152 case MSR_IA32_SYSENTER_ESP:
3153 val = env->sysenter_esp;
3154 break;
3155 case MSR_IA32_SYSENTER_EIP:
3156 val = env->sysenter_eip;
3157 break;
3158 case MSR_IA32_APICBASE:
3159 val = cpu_get_apic_base(env);
3160 break;
3161 case MSR_EFER:
3162 val = env->efer;
3163 break;
3164 case MSR_STAR:
3165 val = env->star;
3166 break;
3167 case MSR_PAT:
3168 val = env->pat;
3169 break;
3170 case MSR_VM_HSAVE_PA:
3171 val = env->vm_hsave;
3172 break;
3173 case MSR_IA32_PERF_STATUS:
3174 /* tsc_increment_by_tick */
3175 val = 1000ULL;
3176 /* CPU multiplier */
3177 val |= (((uint64_t)4ULL) << 40);
3178 break;
3179 #ifdef TARGET_X86_64
3180 case MSR_LSTAR:
3181 val = env->lstar;
3182 break;
3183 case MSR_CSTAR:
3184 val = env->cstar;
3185 break;
3186 case MSR_FMASK:
3187 val = env->fmask;
3188 break;
3189 case MSR_FSBASE:
3190 val = env->segs[R_FS].base;
3191 break;
3192 case MSR_GSBASE:
3193 val = env->segs[R_GS].base;
3194 break;
3195 case MSR_KERNELGSBASE:
3196 val = env->kernelgsbase;
3197 break;
3198 #endif
3199 #ifdef CONFIG_KQEMU
3200 case MSR_QPI_COMMBASE:
3201 if (env->kqemu_enabled) {
3202 val = kqemu_comm_base;
3203 } else {
3204 val = 0;
3206 break;
3207 #endif
3208 case MSR_MTRRphysBase(0):
3209 case MSR_MTRRphysBase(1):
3210 case MSR_MTRRphysBase(2):
3211 case MSR_MTRRphysBase(3):
3212 case MSR_MTRRphysBase(4):
3213 case MSR_MTRRphysBase(5):
3214 case MSR_MTRRphysBase(6):
3215 case MSR_MTRRphysBase(7):
3216 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3217 break;
3218 case MSR_MTRRphysMask(0):
3219 case MSR_MTRRphysMask(1):
3220 case MSR_MTRRphysMask(2):
3221 case MSR_MTRRphysMask(3):
3222 case MSR_MTRRphysMask(4):
3223 case MSR_MTRRphysMask(5):
3224 case MSR_MTRRphysMask(6):
3225 case MSR_MTRRphysMask(7):
3226 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3227 break;
3228 case MSR_MTRRfix64K_00000:
3229 val = env->mtrr_fixed[0];
3230 break;
3231 case MSR_MTRRfix16K_80000:
3232 case MSR_MTRRfix16K_A0000:
3233 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3234 break;
3235 case MSR_MTRRfix4K_C0000:
3236 case MSR_MTRRfix4K_C8000:
3237 case MSR_MTRRfix4K_D0000:
3238 case MSR_MTRRfix4K_D8000:
3239 case MSR_MTRRfix4K_E0000:
3240 case MSR_MTRRfix4K_E8000:
3241 case MSR_MTRRfix4K_F0000:
3242 case MSR_MTRRfix4K_F8000:
3243 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3244 break;
3245 case MSR_MTRRdefType:
3246 val = env->mtrr_deftype;
3247 break;
3248 case MSR_MTRRcap:
3249 if (env->cpuid_features & CPUID_MTRR)
3250 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3251 else
3252 /* XXX: exception ? */
3253 val = 0;
3254 break;
3255 default:
3256 /* XXX: exception ? */
3257 val = 0;
3258 break;
3260 EAX = (uint32_t)(val);
3261 EDX = (uint32_t)(val >> 32);
3263 #endif
3265 target_ulong helper_lsl(target_ulong selector1)
3267 unsigned int limit;
3268 uint32_t e1, e2, eflags, selector;
3269 int rpl, dpl, cpl, type;
3271 selector = selector1 & 0xffff;
3272 eflags = helper_cc_compute_all(CC_OP);
3273 if ((selector & 0xfffc) == 0)
3274 goto fail;
3275 if (load_segment(&e1, &e2, selector) != 0)
3276 goto fail;
3277 rpl = selector & 3;
3278 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3279 cpl = env->hflags & HF_CPL_MASK;
3280 if (e2 & DESC_S_MASK) {
3281 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3282 /* conforming */
3283 } else {
3284 if (dpl < cpl || dpl < rpl)
3285 goto fail;
3287 } else {
3288 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3289 switch(type) {
3290 case 1:
3291 case 2:
3292 case 3:
3293 case 9:
3294 case 11:
3295 break;
3296 default:
3297 goto fail;
3299 if (dpl < cpl || dpl < rpl) {
3300 fail:
3301 CC_SRC = eflags & ~CC_Z;
3302 return 0;
3305 limit = get_seg_limit(e1, e2);
3306 CC_SRC = eflags | CC_Z;
3307 return limit;
3310 target_ulong helper_lar(target_ulong selector1)
3312 uint32_t e1, e2, eflags, selector;
3313 int rpl, dpl, cpl, type;
3315 selector = selector1 & 0xffff;
3316 eflags = helper_cc_compute_all(CC_OP);
3317 if ((selector & 0xfffc) == 0)
3318 goto fail;
3319 if (load_segment(&e1, &e2, selector) != 0)
3320 goto fail;
3321 rpl = selector & 3;
3322 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3323 cpl = env->hflags & HF_CPL_MASK;
3324 if (e2 & DESC_S_MASK) {
3325 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3326 /* conforming */
3327 } else {
3328 if (dpl < cpl || dpl < rpl)
3329 goto fail;
3331 } else {
3332 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3333 switch(type) {
3334 case 1:
3335 case 2:
3336 case 3:
3337 case 4:
3338 case 5:
3339 case 9:
3340 case 11:
3341 case 12:
3342 break;
3343 default:
3344 goto fail;
3346 if (dpl < cpl || dpl < rpl) {
3347 fail:
3348 CC_SRC = eflags & ~CC_Z;
3349 return 0;
3352 CC_SRC = eflags | CC_Z;
3353 return e2 & 0x00f0ff00;
3356 void helper_verr(target_ulong selector1)
3358 uint32_t e1, e2, eflags, selector;
3359 int rpl, dpl, cpl;
3361 selector = selector1 & 0xffff;
3362 eflags = helper_cc_compute_all(CC_OP);
3363 if ((selector & 0xfffc) == 0)
3364 goto fail;
3365 if (load_segment(&e1, &e2, selector) != 0)
3366 goto fail;
3367 if (!(e2 & DESC_S_MASK))
3368 goto fail;
3369 rpl = selector & 3;
3370 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3371 cpl = env->hflags & HF_CPL_MASK;
3372 if (e2 & DESC_CS_MASK) {
3373 if (!(e2 & DESC_R_MASK))
3374 goto fail;
3375 if (!(e2 & DESC_C_MASK)) {
3376 if (dpl < cpl || dpl < rpl)
3377 goto fail;
3379 } else {
3380 if (dpl < cpl || dpl < rpl) {
3381 fail:
3382 CC_SRC = eflags & ~CC_Z;
3383 return;
3386 CC_SRC = eflags | CC_Z;
3389 void helper_verw(target_ulong selector1)
3391 uint32_t e1, e2, eflags, selector;
3392 int rpl, dpl, cpl;
3394 selector = selector1 & 0xffff;
3395 eflags = helper_cc_compute_all(CC_OP);
3396 if ((selector & 0xfffc) == 0)
3397 goto fail;
3398 if (load_segment(&e1, &e2, selector) != 0)
3399 goto fail;
3400 if (!(e2 & DESC_S_MASK))
3401 goto fail;
3402 rpl = selector & 3;
3403 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3404 cpl = env->hflags & HF_CPL_MASK;
3405 if (e2 & DESC_CS_MASK) {
3406 goto fail;
3407 } else {
3408 if (dpl < cpl || dpl < rpl)
3409 goto fail;
3410 if (!(e2 & DESC_W_MASK)) {
3411 fail:
3412 CC_SRC = eflags & ~CC_Z;
3413 return;
3416 CC_SRC = eflags | CC_Z;
3419 /* x87 FPU helpers */
3421 static void fpu_set_exception(int mask)
3423 env->fpus |= mask;
3424 if (env->fpus & (~env->fpuc & FPUC_EM))
3425 env->fpus |= FPUS_SE | FPUS_B;
3428 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3430 if (b == 0.0)
3431 fpu_set_exception(FPUS_ZE);
3432 return a / b;
3435 static void fpu_raise_exception(void)
3437 if (env->cr[0] & CR0_NE_MASK) {
3438 raise_exception(EXCP10_COPR);
3440 #if !defined(CONFIG_USER_ONLY)
3441 else {
3442 cpu_set_ferr(env);
3444 #endif
3447 void helper_flds_FT0(uint32_t val)
3449 union {
3450 float32 f;
3451 uint32_t i;
3452 } u;
3453 u.i = val;
3454 FT0 = float32_to_floatx(u.f, &env->fp_status);
3457 void helper_fldl_FT0(uint64_t val)
3459 union {
3460 float64 f;
3461 uint64_t i;
3462 } u;
3463 u.i = val;
3464 FT0 = float64_to_floatx(u.f, &env->fp_status);
3467 void helper_fildl_FT0(int32_t val)
3469 FT0 = int32_to_floatx(val, &env->fp_status);
3472 void helper_flds_ST0(uint32_t val)
3474 int new_fpstt;
3475 union {
3476 float32 f;
3477 uint32_t i;
3478 } u;
3479 new_fpstt = (env->fpstt - 1) & 7;
3480 u.i = val;
3481 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3482 env->fpstt = new_fpstt;
3483 env->fptags[new_fpstt] = 0; /* validate stack entry */
3486 void helper_fldl_ST0(uint64_t val)
3488 int new_fpstt;
3489 union {
3490 float64 f;
3491 uint64_t i;
3492 } u;
3493 new_fpstt = (env->fpstt - 1) & 7;
3494 u.i = val;
3495 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3496 env->fpstt = new_fpstt;
3497 env->fptags[new_fpstt] = 0; /* validate stack entry */
3500 void helper_fildl_ST0(int32_t val)
3502 int new_fpstt;
3503 new_fpstt = (env->fpstt - 1) & 7;
3504 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3505 env->fpstt = new_fpstt;
3506 env->fptags[new_fpstt] = 0; /* validate stack entry */
3509 void helper_fildll_ST0(int64_t val)
3511 int new_fpstt;
3512 new_fpstt = (env->fpstt - 1) & 7;
3513 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3514 env->fpstt = new_fpstt;
3515 env->fptags[new_fpstt] = 0; /* validate stack entry */
3518 uint32_t helper_fsts_ST0(void)
3520 union {
3521 float32 f;
3522 uint32_t i;
3523 } u;
3524 u.f = floatx_to_float32(ST0, &env->fp_status);
3525 return u.i;
3528 uint64_t helper_fstl_ST0(void)
3530 union {
3531 float64 f;
3532 uint64_t i;
3533 } u;
3534 u.f = floatx_to_float64(ST0, &env->fp_status);
3535 return u.i;
3538 int32_t helper_fist_ST0(void)
3540 int32_t val;
3541 val = floatx_to_int32(ST0, &env->fp_status);
3542 if (val != (int16_t)val)
3543 val = -32768;
3544 return val;
3547 int32_t helper_fistl_ST0(void)
3549 int32_t val;
3550 val = floatx_to_int32(ST0, &env->fp_status);
3551 return val;
3554 int64_t helper_fistll_ST0(void)
3556 int64_t val;
3557 val = floatx_to_int64(ST0, &env->fp_status);
3558 return val;
3561 int32_t helper_fistt_ST0(void)
3563 int32_t val;
3564 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3565 if (val != (int16_t)val)
3566 val = -32768;
3567 return val;
3570 int32_t helper_fisttl_ST0(void)
3572 int32_t val;
3573 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3574 return val;
3577 int64_t helper_fisttll_ST0(void)
3579 int64_t val;
3580 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3581 return val;
3584 void helper_fldt_ST0(target_ulong ptr)
3586 int new_fpstt;
3587 new_fpstt = (env->fpstt - 1) & 7;
3588 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3589 env->fpstt = new_fpstt;
3590 env->fptags[new_fpstt] = 0; /* validate stack entry */
3593 void helper_fstt_ST0(target_ulong ptr)
3595 helper_fstt(ST0, ptr);
3598 void helper_fpush(void)
3600 fpush();
3603 void helper_fpop(void)
3605 fpop();
3608 void helper_fdecstp(void)
3610 env->fpstt = (env->fpstt - 1) & 7;
3611 env->fpus &= (~0x4700);
3614 void helper_fincstp(void)
3616 env->fpstt = (env->fpstt + 1) & 7;
3617 env->fpus &= (~0x4700);
3620 /* FPU move */
3622 void helper_ffree_STN(int st_index)
3624 env->fptags[(env->fpstt + st_index) & 7] = 1;
3627 void helper_fmov_ST0_FT0(void)
3629 ST0 = FT0;
3632 void helper_fmov_FT0_STN(int st_index)
3634 FT0 = ST(st_index);
3637 void helper_fmov_ST0_STN(int st_index)
3639 ST0 = ST(st_index);
3642 void helper_fmov_STN_ST0(int st_index)
3644 ST(st_index) = ST0;
3647 void helper_fxchg_ST0_STN(int st_index)
3649 CPU86_LDouble tmp;
3650 tmp = ST(st_index);
3651 ST(st_index) = ST0;
3652 ST0 = tmp;
3655 /* FPU operations */
3657 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3659 void helper_fcom_ST0_FT0(void)
3661 int ret;
3663 ret = floatx_compare(ST0, FT0, &env->fp_status);
3664 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3667 void helper_fucom_ST0_FT0(void)
3669 int ret;
3671 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3672 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3675 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3677 void helper_fcomi_ST0_FT0(void)
3679 int eflags;
3680 int ret;
3682 ret = floatx_compare(ST0, FT0, &env->fp_status);
3683 eflags = helper_cc_compute_all(CC_OP);
3684 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3685 CC_SRC = eflags;
3688 void helper_fucomi_ST0_FT0(void)
3690 int eflags;
3691 int ret;
3693 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3694 eflags = helper_cc_compute_all(CC_OP);
3695 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3696 CC_SRC = eflags;
3699 void helper_fadd_ST0_FT0(void)
3701 ST0 += FT0;
3704 void helper_fmul_ST0_FT0(void)
3706 ST0 *= FT0;
3709 void helper_fsub_ST0_FT0(void)
3711 ST0 -= FT0;
3714 void helper_fsubr_ST0_FT0(void)
3716 ST0 = FT0 - ST0;
3719 void helper_fdiv_ST0_FT0(void)
3721 ST0 = helper_fdiv(ST0, FT0);
3724 void helper_fdivr_ST0_FT0(void)
3726 ST0 = helper_fdiv(FT0, ST0);
3729 /* fp operations between STN and ST0 */
3731 void helper_fadd_STN_ST0(int st_index)
3733 ST(st_index) += ST0;
3736 void helper_fmul_STN_ST0(int st_index)
3738 ST(st_index) *= ST0;
3741 void helper_fsub_STN_ST0(int st_index)
3743 ST(st_index) -= ST0;
3746 void helper_fsubr_STN_ST0(int st_index)
3748 CPU86_LDouble *p;
3749 p = &ST(st_index);
3750 *p = ST0 - *p;
3753 void helper_fdiv_STN_ST0(int st_index)
3755 CPU86_LDouble *p;
3756 p = &ST(st_index);
3757 *p = helper_fdiv(*p, ST0);
3760 void helper_fdivr_STN_ST0(int st_index)
3762 CPU86_LDouble *p;
3763 p = &ST(st_index);
3764 *p = helper_fdiv(ST0, *p);
3767 /* misc FPU operations */
3768 void helper_fchs_ST0(void)
3770 ST0 = floatx_chs(ST0);
3773 void helper_fabs_ST0(void)
3775 ST0 = floatx_abs(ST0);
3778 void helper_fld1_ST0(void)
3780 ST0 = f15rk[1];
3783 void helper_fldl2t_ST0(void)
3785 ST0 = f15rk[6];
3788 void helper_fldl2e_ST0(void)
3790 ST0 = f15rk[5];
3793 void helper_fldpi_ST0(void)
3795 ST0 = f15rk[2];
3798 void helper_fldlg2_ST0(void)
3800 ST0 = f15rk[3];
3803 void helper_fldln2_ST0(void)
3805 ST0 = f15rk[4];
3808 void helper_fldz_ST0(void)
3810 ST0 = f15rk[0];
3813 void helper_fldz_FT0(void)
3815 FT0 = f15rk[0];
3818 uint32_t helper_fnstsw(void)
3820 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3823 uint32_t helper_fnstcw(void)
3825 return env->fpuc;
3828 static void update_fp_status(void)
3830 int rnd_type;
3832 /* set rounding mode */
3833 switch(env->fpuc & RC_MASK) {
3834 default:
3835 case RC_NEAR:
3836 rnd_type = float_round_nearest_even;
3837 break;
3838 case RC_DOWN:
3839 rnd_type = float_round_down;
3840 break;
3841 case RC_UP:
3842 rnd_type = float_round_up;
3843 break;
3844 case RC_CHOP:
3845 rnd_type = float_round_to_zero;
3846 break;
3848 set_float_rounding_mode(rnd_type, &env->fp_status);
3849 #ifdef FLOATX80
3850 switch((env->fpuc >> 8) & 3) {
3851 case 0:
3852 rnd_type = 32;
3853 break;
3854 case 2:
3855 rnd_type = 64;
3856 break;
3857 case 3:
3858 default:
3859 rnd_type = 80;
3860 break;
3862 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3863 #endif
3866 void helper_fldcw(uint32_t val)
3868 env->fpuc = val;
3869 update_fp_status();
3872 void helper_fclex(void)
3874 env->fpus &= 0x7f00;
3877 void helper_fwait(void)
3879 if (env->fpus & FPUS_SE)
3880 fpu_raise_exception();
3883 void helper_fninit(void)
3885 env->fpus = 0;
3886 env->fpstt = 0;
3887 env->fpuc = 0x37f;
3888 env->fptags[0] = 1;
3889 env->fptags[1] = 1;
3890 env->fptags[2] = 1;
3891 env->fptags[3] = 1;
3892 env->fptags[4] = 1;
3893 env->fptags[5] = 1;
3894 env->fptags[6] = 1;
3895 env->fptags[7] = 1;
3898 /* BCD ops */
3900 void helper_fbld_ST0(target_ulong ptr)
3902 CPU86_LDouble tmp;
3903 uint64_t val;
3904 unsigned int v;
3905 int i;
3907 val = 0;
3908 for(i = 8; i >= 0; i--) {
3909 v = ldub(ptr + i);
3910 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3912 tmp = val;
3913 if (ldub(ptr + 9) & 0x80)
3914 tmp = -tmp;
3915 fpush();
3916 ST0 = tmp;
3919 void helper_fbst_ST0(target_ulong ptr)
3921 int v;
3922 target_ulong mem_ref, mem_end;
3923 int64_t val;
3925 val = floatx_to_int64(ST0, &env->fp_status);
3926 mem_ref = ptr;
3927 mem_end = mem_ref + 9;
3928 if (val < 0) {
3929 stb(mem_end, 0x80);
3930 val = -val;
3931 } else {
3932 stb(mem_end, 0x00);
3934 while (mem_ref < mem_end) {
3935 if (val == 0)
3936 break;
3937 v = val % 100;
3938 val = val / 100;
3939 v = ((v / 10) << 4) | (v % 10);
3940 stb(mem_ref++, v);
3942 while (mem_ref < mem_end) {
3943 stb(mem_ref++, 0);
3947 void helper_f2xm1(void)
3949 ST0 = pow(2.0,ST0) - 1.0;
3952 void helper_fyl2x(void)
3954 CPU86_LDouble fptemp;
3956 fptemp = ST0;
3957 if (fptemp>0.0){
3958 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3959 ST1 *= fptemp;
3960 fpop();
3961 } else {
3962 env->fpus &= (~0x4700);
3963 env->fpus |= 0x400;
3967 void helper_fptan(void)
3969 CPU86_LDouble fptemp;
3971 fptemp = ST0;
3972 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3973 env->fpus |= 0x400;
3974 } else {
3975 ST0 = tan(fptemp);
3976 fpush();
3977 ST0 = 1.0;
3978 env->fpus &= (~0x400); /* C2 <-- 0 */
3979 /* the above code is for |arg| < 2**52 only */
3983 void helper_fpatan(void)
3985 CPU86_LDouble fptemp, fpsrcop;
3987 fpsrcop = ST1;
3988 fptemp = ST0;
3989 ST1 = atan2(fpsrcop,fptemp);
3990 fpop();
3993 void helper_fxtract(void)
3995 CPU86_LDoubleU temp;
3996 unsigned int expdif;
3998 temp.d = ST0;
3999 expdif = EXPD(temp) - EXPBIAS;
4000 /*DP exponent bias*/
4001 ST0 = expdif;
4002 fpush();
4003 BIASEXPONENT(temp);
4004 ST0 = temp.d;
4007 void helper_fprem1(void)
4009 CPU86_LDouble dblq, fpsrcop, fptemp;
4010 CPU86_LDoubleU fpsrcop1, fptemp1;
4011 int expdif;
4012 signed long long int q;
4014 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4015 ST0 = 0.0 / 0.0; /* NaN */
4016 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4017 return;
4020 fpsrcop = ST0;
4021 fptemp = ST1;
4022 fpsrcop1.d = fpsrcop;
4023 fptemp1.d = fptemp;
4024 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4026 if (expdif < 0) {
4027 /* optimisation? taken from the AMD docs */
4028 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4029 /* ST0 is unchanged */
4030 return;
4033 if (expdif < 53) {
4034 dblq = fpsrcop / fptemp;
4035 /* round dblq towards nearest integer */
4036 dblq = rint(dblq);
4037 ST0 = fpsrcop - fptemp * dblq;
4039 /* convert dblq to q by truncating towards zero */
4040 if (dblq < 0.0)
4041 q = (signed long long int)(-dblq);
4042 else
4043 q = (signed long long int)dblq;
4045 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4046 /* (C0,C3,C1) <-- (q2,q1,q0) */
4047 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4048 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4049 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4050 } else {
4051 env->fpus |= 0x400; /* C2 <-- 1 */
4052 fptemp = pow(2.0, expdif - 50);
4053 fpsrcop = (ST0 / ST1) / fptemp;
4054 /* fpsrcop = integer obtained by chopping */
4055 fpsrcop = (fpsrcop < 0.0) ?
4056 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4057 ST0 -= (ST1 * fpsrcop * fptemp);
4061 void helper_fprem(void)
4063 CPU86_LDouble dblq, fpsrcop, fptemp;
4064 CPU86_LDoubleU fpsrcop1, fptemp1;
4065 int expdif;
4066 signed long long int q;
4068 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4069 ST0 = 0.0 / 0.0; /* NaN */
4070 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4071 return;
4074 fpsrcop = (CPU86_LDouble)ST0;
4075 fptemp = (CPU86_LDouble)ST1;
4076 fpsrcop1.d = fpsrcop;
4077 fptemp1.d = fptemp;
4078 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4080 if (expdif < 0) {
4081 /* optimisation? taken from the AMD docs */
4082 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4083 /* ST0 is unchanged */
4084 return;
4087 if ( expdif < 53 ) {
4088 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4089 /* round dblq towards zero */
4090 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4091 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4093 /* convert dblq to q by truncating towards zero */
4094 if (dblq < 0.0)
4095 q = (signed long long int)(-dblq);
4096 else
4097 q = (signed long long int)dblq;
4099 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4100 /* (C0,C3,C1) <-- (q2,q1,q0) */
4101 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4102 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4103 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4104 } else {
4105 int N = 32 + (expdif % 32); /* as per AMD docs */
4106 env->fpus |= 0x400; /* C2 <-- 1 */
4107 fptemp = pow(2.0, (double)(expdif - N));
4108 fpsrcop = (ST0 / ST1) / fptemp;
4109 /* fpsrcop = integer obtained by chopping */
4110 fpsrcop = (fpsrcop < 0.0) ?
4111 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4112 ST0 -= (ST1 * fpsrcop * fptemp);
4116 void helper_fyl2xp1(void)
4118 CPU86_LDouble fptemp;
4120 fptemp = ST0;
4121 if ((fptemp+1.0)>0.0) {
4122 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4123 ST1 *= fptemp;
4124 fpop();
4125 } else {
4126 env->fpus &= (~0x4700);
4127 env->fpus |= 0x400;
4131 void helper_fsqrt(void)
4133 CPU86_LDouble fptemp;
4135 fptemp = ST0;
4136 if (fptemp<0.0) {
4137 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4138 env->fpus |= 0x400;
4140 ST0 = sqrt(fptemp);
4143 void helper_fsincos(void)
4145 CPU86_LDouble fptemp;
4147 fptemp = ST0;
4148 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4149 env->fpus |= 0x400;
4150 } else {
4151 ST0 = sin(fptemp);
4152 fpush();
4153 ST0 = cos(fptemp);
4154 env->fpus &= (~0x400); /* C2 <-- 0 */
4155 /* the above code is for |arg| < 2**63 only */
4159 void helper_frndint(void)
4161 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4164 void helper_fscale(void)
4166 ST0 = ldexp (ST0, (int)(ST1));
4169 void helper_fsin(void)
4171 CPU86_LDouble fptemp;
4173 fptemp = ST0;
4174 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4175 env->fpus |= 0x400;
4176 } else {
4177 ST0 = sin(fptemp);
4178 env->fpus &= (~0x400); /* C2 <-- 0 */
4179 /* the above code is for |arg| < 2**53 only */
4183 void helper_fcos(void)
4185 CPU86_LDouble fptemp;
4187 fptemp = ST0;
4188 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4189 env->fpus |= 0x400;
4190 } else {
4191 ST0 = cos(fptemp);
4192 env->fpus &= (~0x400); /* C2 <-- 0 */
4193 /* the above code is for |arg5 < 2**63 only */
4197 void helper_fxam_ST0(void)
4199 CPU86_LDoubleU temp;
4200 int expdif;
4202 temp.d = ST0;
4204 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4205 if (SIGND(temp))
4206 env->fpus |= 0x200; /* C1 <-- 1 */
4208 /* XXX: test fptags too */
4209 expdif = EXPD(temp);
4210 if (expdif == MAXEXPD) {
4211 #ifdef USE_X86LDOUBLE
4212 if (MANTD(temp) == 0x8000000000000000ULL)
4213 #else
4214 if (MANTD(temp) == 0)
4215 #endif
4216 env->fpus |= 0x500 /*Infinity*/;
4217 else
4218 env->fpus |= 0x100 /*NaN*/;
4219 } else if (expdif == 0) {
4220 if (MANTD(temp) == 0)
4221 env->fpus |= 0x4000 /*Zero*/;
4222 else
4223 env->fpus |= 0x4400 /*Denormal*/;
4224 } else {
4225 env->fpus |= 0x400;
4229 void helper_fstenv(target_ulong ptr, int data32)
4231 int fpus, fptag, exp, i;
4232 uint64_t mant;
4233 CPU86_LDoubleU tmp;
4235 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4236 fptag = 0;
4237 for (i=7; i>=0; i--) {
4238 fptag <<= 2;
4239 if (env->fptags[i]) {
4240 fptag |= 3;
4241 } else {
4242 tmp.d = env->fpregs[i].d;
4243 exp = EXPD(tmp);
4244 mant = MANTD(tmp);
4245 if (exp == 0 && mant == 0) {
4246 /* zero */
4247 fptag |= 1;
4248 } else if (exp == 0 || exp == MAXEXPD
4249 #ifdef USE_X86LDOUBLE
4250 || (mant & (1LL << 63)) == 0
4251 #endif
4253 /* NaNs, infinity, denormal */
4254 fptag |= 2;
4258 if (data32) {
4259 /* 32 bit */
4260 stl(ptr, env->fpuc);
4261 stl(ptr + 4, fpus);
4262 stl(ptr + 8, fptag);
4263 stl(ptr + 12, 0); /* fpip */
4264 stl(ptr + 16, 0); /* fpcs */
4265 stl(ptr + 20, 0); /* fpoo */
4266 stl(ptr + 24, 0); /* fpos */
4267 } else {
4268 /* 16 bit */
4269 stw(ptr, env->fpuc);
4270 stw(ptr + 2, fpus);
4271 stw(ptr + 4, fptag);
4272 stw(ptr + 6, 0);
4273 stw(ptr + 8, 0);
4274 stw(ptr + 10, 0);
4275 stw(ptr + 12, 0);
4279 void helper_fldenv(target_ulong ptr, int data32)
4281 int i, fpus, fptag;
4283 if (data32) {
4284 env->fpuc = lduw(ptr);
4285 fpus = lduw(ptr + 4);
4286 fptag = lduw(ptr + 8);
4288 else {
4289 env->fpuc = lduw(ptr);
4290 fpus = lduw(ptr + 2);
4291 fptag = lduw(ptr + 4);
4293 env->fpstt = (fpus >> 11) & 7;
4294 env->fpus = fpus & ~0x3800;
4295 for(i = 0;i < 8; i++) {
4296 env->fptags[i] = ((fptag & 3) == 3);
4297 fptag >>= 2;
4301 void helper_fsave(target_ulong ptr, int data32)
4303 CPU86_LDouble tmp;
4304 int i;
4306 helper_fstenv(ptr, data32);
4308 ptr += (14 << data32);
4309 for(i = 0;i < 8; i++) {
4310 tmp = ST(i);
4311 helper_fstt(tmp, ptr);
4312 ptr += 10;
4315 /* fninit */
4316 env->fpus = 0;
4317 env->fpstt = 0;
4318 env->fpuc = 0x37f;
4319 env->fptags[0] = 1;
4320 env->fptags[1] = 1;
4321 env->fptags[2] = 1;
4322 env->fptags[3] = 1;
4323 env->fptags[4] = 1;
4324 env->fptags[5] = 1;
4325 env->fptags[6] = 1;
4326 env->fptags[7] = 1;
4329 void helper_frstor(target_ulong ptr, int data32)
4331 CPU86_LDouble tmp;
4332 int i;
4334 helper_fldenv(ptr, data32);
4335 ptr += (14 << data32);
4337 for(i = 0;i < 8; i++) {
4338 tmp = helper_fldt(ptr);
4339 ST(i) = tmp;
4340 ptr += 10;
4344 void helper_fxsave(target_ulong ptr, int data64)
4346 int fpus, fptag, i, nb_xmm_regs;
4347 CPU86_LDouble tmp;
4348 target_ulong addr;
4350 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4351 fptag = 0;
4352 for(i = 0; i < 8; i++) {
4353 fptag |= (env->fptags[i] << i);
4355 stw(ptr, env->fpuc);
4356 stw(ptr + 2, fpus);
4357 stw(ptr + 4, fptag ^ 0xff);
4358 #ifdef TARGET_X86_64
4359 if (data64) {
4360 stq(ptr + 0x08, 0); /* rip */
4361 stq(ptr + 0x10, 0); /* rdp */
4362 } else
4363 #endif
4365 stl(ptr + 0x08, 0); /* eip */
4366 stl(ptr + 0x0c, 0); /* sel */
4367 stl(ptr + 0x10, 0); /* dp */
4368 stl(ptr + 0x14, 0); /* sel */
4371 addr = ptr + 0x20;
4372 for(i = 0;i < 8; i++) {
4373 tmp = ST(i);
4374 helper_fstt(tmp, addr);
4375 addr += 16;
4378 if (env->cr[4] & CR4_OSFXSR_MASK) {
4379 /* XXX: finish it */
4380 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4381 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4382 if (env->hflags & HF_CS64_MASK)
4383 nb_xmm_regs = 16;
4384 else
4385 nb_xmm_regs = 8;
4386 addr = ptr + 0xa0;
4387 /* Fast FXSAVE leaves out the XMM registers */
4388 if (!(env->efer & MSR_EFER_FFXSR)
4389 || (env->hflags & HF_CPL_MASK)
4390 || !(env->hflags & HF_LMA_MASK)) {
4391 for(i = 0; i < nb_xmm_regs; i++) {
4392 stq(addr, env->xmm_regs[i].XMM_Q(0));
4393 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4394 addr += 16;
4400 void helper_fxrstor(target_ulong ptr, int data64)
4402 int i, fpus, fptag, nb_xmm_regs;
4403 CPU86_LDouble tmp;
4404 target_ulong addr;
4406 env->fpuc = lduw(ptr);
4407 fpus = lduw(ptr + 2);
4408 fptag = lduw(ptr + 4);
4409 env->fpstt = (fpus >> 11) & 7;
4410 env->fpus = fpus & ~0x3800;
4411 fptag ^= 0xff;
4412 for(i = 0;i < 8; i++) {
4413 env->fptags[i] = ((fptag >> i) & 1);
4416 addr = ptr + 0x20;
4417 for(i = 0;i < 8; i++) {
4418 tmp = helper_fldt(addr);
4419 ST(i) = tmp;
4420 addr += 16;
4423 if (env->cr[4] & CR4_OSFXSR_MASK) {
4424 /* XXX: finish it */
4425 env->mxcsr = ldl(ptr + 0x18);
4426 //ldl(ptr + 0x1c);
4427 if (env->hflags & HF_CS64_MASK)
4428 nb_xmm_regs = 16;
4429 else
4430 nb_xmm_regs = 8;
4431 addr = ptr + 0xa0;
4432 /* Fast FXRESTORE leaves out the XMM registers */
4433 if (!(env->efer & MSR_EFER_FFXSR)
4434 || (env->hflags & HF_CPL_MASK)
4435 || !(env->hflags & HF_LMA_MASK)) {
4436 for(i = 0; i < nb_xmm_regs; i++) {
4437 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4438 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4439 addr += 16;
4445 #ifndef USE_X86LDOUBLE
4447 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4449 CPU86_LDoubleU temp;
4450 int e;
4452 temp.d = f;
4453 /* mantissa */
4454 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4455 /* exponent + sign */
4456 e = EXPD(temp) - EXPBIAS + 16383;
4457 e |= SIGND(temp) >> 16;
4458 *pexp = e;
4461 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4463 CPU86_LDoubleU temp;
4464 int e;
4465 uint64_t ll;
4467 /* XXX: handle overflow ? */
4468 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4469 e |= (upper >> 4) & 0x800; /* sign */
4470 ll = (mant >> 11) & ((1LL << 52) - 1);
4471 #ifdef __arm__
4472 temp.l.upper = (e << 20) | (ll >> 32);
4473 temp.l.lower = ll;
4474 #else
4475 temp.ll = ll | ((uint64_t)e << 52);
4476 #endif
4477 return temp.d;
4480 #else
4482 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4484 CPU86_LDoubleU temp;
4486 temp.d = f;
4487 *pmant = temp.l.lower;
4488 *pexp = temp.l.upper;
4491 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4493 CPU86_LDoubleU temp;
4495 temp.l.upper = upper;
4496 temp.l.lower = mant;
4497 return temp.d;
4499 #endif
4501 #ifdef TARGET_X86_64
4503 //#define DEBUG_MULDIV
4505 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4507 *plow += a;
4508 /* carry test */
4509 if (*plow < a)
4510 (*phigh)++;
4511 *phigh += b;
4514 static void neg128(uint64_t *plow, uint64_t *phigh)
4516 *plow = ~ *plow;
4517 *phigh = ~ *phigh;
4518 add128(plow, phigh, 1, 0);
4521 /* return TRUE if overflow */
4522 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4524 uint64_t q, r, a1, a0;
4525 int i, qb, ab;
4527 a0 = *plow;
4528 a1 = *phigh;
4529 if (a1 == 0) {
4530 q = a0 / b;
4531 r = a0 % b;
4532 *plow = q;
4533 *phigh = r;
4534 } else {
4535 if (a1 >= b)
4536 return 1;
4537 /* XXX: use a better algorithm */
4538 for(i = 0; i < 64; i++) {
4539 ab = a1 >> 63;
4540 a1 = (a1 << 1) | (a0 >> 63);
4541 if (ab || a1 >= b) {
4542 a1 -= b;
4543 qb = 1;
4544 } else {
4545 qb = 0;
4547 a0 = (a0 << 1) | qb;
4549 #if defined(DEBUG_MULDIV)
4550 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4551 *phigh, *plow, b, a0, a1);
4552 #endif
4553 *plow = a0;
4554 *phigh = a1;
4556 return 0;
4559 /* return TRUE if overflow */
4560 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4562 int sa, sb;
4563 sa = ((int64_t)*phigh < 0);
4564 if (sa)
4565 neg128(plow, phigh);
4566 sb = (b < 0);
4567 if (sb)
4568 b = -b;
4569 if (div64(plow, phigh, b) != 0)
4570 return 1;
4571 if (sa ^ sb) {
4572 if (*plow > (1ULL << 63))
4573 return 1;
4574 *plow = - *plow;
4575 } else {
4576 if (*plow >= (1ULL << 63))
4577 return 1;
4579 if (sa)
4580 *phigh = - *phigh;
4581 return 0;
4584 void helper_mulq_EAX_T0(target_ulong t0)
4586 uint64_t r0, r1;
4588 mulu64(&r0, &r1, EAX, t0);
4589 EAX = r0;
4590 EDX = r1;
4591 CC_DST = r0;
4592 CC_SRC = r1;
4595 void helper_imulq_EAX_T0(target_ulong t0)
4597 uint64_t r0, r1;
4599 muls64(&r0, &r1, EAX, t0);
4600 EAX = r0;
4601 EDX = r1;
4602 CC_DST = r0;
4603 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4606 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4608 uint64_t r0, r1;
4610 muls64(&r0, &r1, t0, t1);
4611 CC_DST = r0;
4612 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4613 return r0;
4616 void helper_divq_EAX(target_ulong t0)
4618 uint64_t r0, r1;
4619 if (t0 == 0) {
4620 raise_exception(EXCP00_DIVZ);
4622 r0 = EAX;
4623 r1 = EDX;
4624 if (div64(&r0, &r1, t0))
4625 raise_exception(EXCP00_DIVZ);
4626 EAX = r0;
4627 EDX = r1;
4630 void helper_idivq_EAX(target_ulong t0)
4632 uint64_t r0, r1;
4633 if (t0 == 0) {
4634 raise_exception(EXCP00_DIVZ);
4636 r0 = EAX;
4637 r1 = EDX;
4638 if (idiv64(&r0, &r1, t0))
4639 raise_exception(EXCP00_DIVZ);
4640 EAX = r0;
4641 EDX = r1;
4643 #endif
4645 static void do_hlt(void)
4647 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4648 env->halted = 1;
4649 env->exception_index = EXCP_HLT;
4650 cpu_loop_exit();
4653 void helper_hlt(int next_eip_addend)
4655 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4656 EIP += next_eip_addend;
4658 do_hlt();
4661 void helper_monitor(target_ulong ptr)
4663 if ((uint32_t)ECX != 0)
4664 raise_exception(EXCP0D_GPF);
4665 /* XXX: store address ? */
4666 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4669 void helper_mwait(int next_eip_addend)
4671 if ((uint32_t)ECX != 0)
4672 raise_exception(EXCP0D_GPF);
4673 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4674 EIP += next_eip_addend;
4676 /* XXX: not complete but not completely erroneous */
4677 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4678 /* more than one CPU: do not sleep because another CPU may
4679 wake this one */
4680 } else {
4681 do_hlt();
4685 void helper_debug(void)
4687 env->exception_index = EXCP_DEBUG;
4688 cpu_loop_exit();
4691 void helper_raise_interrupt(int intno, int next_eip_addend)
4693 raise_interrupt(intno, 1, 0, next_eip_addend);
4696 void helper_raise_exception(int exception_index)
4698 raise_exception(exception_index);
4701 void helper_cli(void)
4703 env->eflags &= ~IF_MASK;
4706 void helper_sti(void)
4708 env->eflags |= IF_MASK;
4711 #if 0
4712 /* vm86plus instructions */
4713 void helper_cli_vm(void)
4715 env->eflags &= ~VIF_MASK;
4718 void helper_sti_vm(void)
4720 env->eflags |= VIF_MASK;
4721 if (env->eflags & VIP_MASK) {
4722 raise_exception(EXCP0D_GPF);
4725 #endif
4727 void helper_set_inhibit_irq(void)
4729 env->hflags |= HF_INHIBIT_IRQ_MASK;
4732 void helper_reset_inhibit_irq(void)
4734 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4737 void helper_boundw(target_ulong a0, int v)
4739 int low, high;
4740 low = ldsw(a0);
4741 high = ldsw(a0 + 2);
4742 v = (int16_t)v;
4743 if (v < low || v > high) {
4744 raise_exception(EXCP05_BOUND);
4748 void helper_boundl(target_ulong a0, int v)
4750 int low, high;
4751 low = ldl(a0);
4752 high = ldl(a0 + 4);
4753 if (v < low || v > high) {
4754 raise_exception(EXCP05_BOUND);
4758 static float approx_rsqrt(float a)
4760 return 1.0 / sqrt(a);
4763 static float approx_rcp(float a)
4765 return 1.0 / a;
4768 #if !defined(CONFIG_USER_ONLY)
4770 #define MMUSUFFIX _mmu
4772 #define SHIFT 0
4773 #include "softmmu_template.h"
4775 #define SHIFT 1
4776 #include "softmmu_template.h"
4778 #define SHIFT 2
4779 #include "softmmu_template.h"
4781 #define SHIFT 3
4782 #include "softmmu_template.h"
4784 #endif
4786 #if !defined(CONFIG_USER_ONLY)
4787 /* try to fill the TLB and return an exception if error. If retaddr is
4788 NULL, it means that the function was called in C code (i.e. not
4789 from generated code or from helper.c) */
4790 /* XXX: fix it to restore all registers */
4791 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4793 TranslationBlock *tb;
4794 int ret;
4795 unsigned long pc;
4796 CPUX86State *saved_env;
4798 /* XXX: hack to restore env in all cases, even if not called from
4799 generated code */
4800 saved_env = env;
4801 env = cpu_single_env;
4803 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4804 if (ret) {
4805 if (retaddr) {
4806 /* now we have a real cpu fault */
4807 pc = (unsigned long)retaddr;
4808 tb = tb_find_pc(pc);
4809 if (tb) {
4810 /* the PC is inside the translated code. It means that we have
4811 a virtual CPU fault */
4812 cpu_restore_state(tb, env, pc, NULL);
4815 raise_exception_err(env->exception_index, env->error_code);
4817 env = saved_env;
4819 #endif
4821 /* Secure Virtual Machine helpers */
4823 #if defined(CONFIG_USER_ONLY)
4825 void helper_vmrun(int aflag, int next_eip_addend)
4828 void helper_vmmcall(void)
4831 void helper_vmload(int aflag)
4834 void helper_vmsave(int aflag)
4837 void helper_stgi(void)
4840 void helper_clgi(void)
4843 void helper_skinit(void)
4846 void helper_invlpga(int aflag)
4849 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4852 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4856 void helper_svm_check_io(uint32_t port, uint32_t param,
4857 uint32_t next_eip_addend)
4860 #else
4862 static inline void svm_save_seg(target_phys_addr_t addr,
4863 const SegmentCache *sc)
4865 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4866 sc->selector);
4867 stq_phys(addr + offsetof(struct vmcb_seg, base),
4868 sc->base);
4869 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4870 sc->limit);
4871 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4872 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4875 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4877 unsigned int flags;
4879 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4880 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4881 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4882 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4883 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4886 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4887 CPUState *env, int seg_reg)
4889 SegmentCache sc1, *sc = &sc1;
4890 svm_load_seg(addr, sc);
4891 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4892 sc->base, sc->limit, sc->flags);
4895 void helper_vmrun(int aflag, int next_eip_addend)
4897 target_ulong addr;
4898 uint32_t event_inj;
4899 uint32_t int_ctl;
4901 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4903 if (aflag == 2)
4904 addr = EAX;
4905 else
4906 addr = (uint32_t)EAX;
4908 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4910 env->vm_vmcb = addr;
4912 /* save the current CPU state in the hsave page */
4913 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4914 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4916 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4917 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4919 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4920 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4921 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4922 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4923 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4924 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4926 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4927 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4929 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4930 &env->segs[R_ES]);
4931 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4932 &env->segs[R_CS]);
4933 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4934 &env->segs[R_SS]);
4935 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4936 &env->segs[R_DS]);
4938 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4939 EIP + next_eip_addend);
4940 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4941 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4943 /* load the interception bitmaps so we do not need to access the
4944 vmcb in svm mode */
4945 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4946 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4947 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4948 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4949 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4950 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4952 /* enable intercepts */
4953 env->hflags |= HF_SVMI_MASK;
4955 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4957 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4958 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4960 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4961 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4963 /* clear exit_info_2 so we behave like the real hardware */
4964 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4966 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4967 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4968 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4969 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4970 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4971 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4972 if (int_ctl & V_INTR_MASKING_MASK) {
4973 env->v_tpr = int_ctl & V_TPR_MASK;
4974 env->hflags2 |= HF2_VINTR_MASK;
4975 if (env->eflags & IF_MASK)
4976 env->hflags2 |= HF2_HIF_MASK;
4979 cpu_load_efer(env,
4980 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4981 env->eflags = 0;
4982 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4983 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4984 CC_OP = CC_OP_EFLAGS;
4986 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4987 env, R_ES);
4988 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4989 env, R_CS);
4990 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4991 env, R_SS);
4992 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4993 env, R_DS);
4995 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4996 env->eip = EIP;
4997 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4998 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4999 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5000 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5001 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5003 /* FIXME: guest state consistency checks */
5005 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5006 case TLB_CONTROL_DO_NOTHING:
5007 break;
5008 case TLB_CONTROL_FLUSH_ALL_ASID:
5009 /* FIXME: this is not 100% correct but should work for now */
5010 tlb_flush(env, 1);
5011 break;
5014 env->hflags2 |= HF2_GIF_MASK;
5016 if (int_ctl & V_IRQ_MASK) {
5017 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5020 /* maybe we need to inject an event */
5021 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5022 if (event_inj & SVM_EVTINJ_VALID) {
5023 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5024 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5025 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5027 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5028 /* FIXME: need to implement valid_err */
5029 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5030 case SVM_EVTINJ_TYPE_INTR:
5031 env->exception_index = vector;
5032 env->error_code = event_inj_err;
5033 env->exception_is_int = 0;
5034 env->exception_next_eip = -1;
5035 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5036 /* XXX: is it always correct ? */
5037 do_interrupt(vector, 0, 0, 0, 1);
5038 break;
5039 case SVM_EVTINJ_TYPE_NMI:
5040 env->exception_index = EXCP02_NMI;
5041 env->error_code = event_inj_err;
5042 env->exception_is_int = 0;
5043 env->exception_next_eip = EIP;
5044 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5045 cpu_loop_exit();
5046 break;
5047 case SVM_EVTINJ_TYPE_EXEPT:
5048 env->exception_index = vector;
5049 env->error_code = event_inj_err;
5050 env->exception_is_int = 0;
5051 env->exception_next_eip = -1;
5052 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5053 cpu_loop_exit();
5054 break;
5055 case SVM_EVTINJ_TYPE_SOFT:
5056 env->exception_index = vector;
5057 env->error_code = event_inj_err;
5058 env->exception_is_int = 1;
5059 env->exception_next_eip = EIP;
5060 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5061 cpu_loop_exit();
5062 break;
5064 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5068 void helper_vmmcall(void)
5070 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5071 raise_exception(EXCP06_ILLOP);
5074 void helper_vmload(int aflag)
5076 target_ulong addr;
5077 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5079 if (aflag == 2)
5080 addr = EAX;
5081 else
5082 addr = (uint32_t)EAX;
5084 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5085 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5086 env->segs[R_FS].base);
5088 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5089 env, R_FS);
5090 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5091 env, R_GS);
5092 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5093 &env->tr);
5094 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5095 &env->ldt);
5097 #ifdef TARGET_X86_64
5098 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5099 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5100 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5101 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5102 #endif
5103 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5104 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5105 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5106 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5109 void helper_vmsave(int aflag)
5111 target_ulong addr;
5112 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5114 if (aflag == 2)
5115 addr = EAX;
5116 else
5117 addr = (uint32_t)EAX;
5119 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5120 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5121 env->segs[R_FS].base);
5123 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5124 &env->segs[R_FS]);
5125 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5126 &env->segs[R_GS]);
5127 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5128 &env->tr);
5129 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5130 &env->ldt);
5132 #ifdef TARGET_X86_64
5133 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5134 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5135 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5136 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5137 #endif
5138 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5139 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5140 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5141 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5144 void helper_stgi(void)
5146 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5147 env->hflags2 |= HF2_GIF_MASK;
5150 void helper_clgi(void)
5152 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5153 env->hflags2 &= ~HF2_GIF_MASK;
5156 void helper_skinit(void)
5158 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5159 /* XXX: not implemented */
5160 raise_exception(EXCP06_ILLOP);
5163 void helper_invlpga(int aflag)
5165 target_ulong addr;
5166 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5168 if (aflag == 2)
5169 addr = EAX;
5170 else
5171 addr = (uint32_t)EAX;
5173 /* XXX: could use the ASID to see if it is needed to do the
5174 flush */
5175 tlb_flush_page(env, addr);
5178 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5180 if (likely(!(env->hflags & HF_SVMI_MASK)))
5181 return;
5182 switch(type) {
5183 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5184 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5185 helper_vmexit(type, param);
5187 break;
5188 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5189 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5190 helper_vmexit(type, param);
5192 break;
5193 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5194 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5195 helper_vmexit(type, param);
5197 break;
5198 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5199 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5200 helper_vmexit(type, param);
5202 break;
5203 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5204 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5205 helper_vmexit(type, param);
5207 break;
5208 case SVM_EXIT_MSR:
5209 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5210 /* FIXME: this should be read in at vmrun (faster this way?) */
5211 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5212 uint32_t t0, t1;
5213 switch((uint32_t)ECX) {
5214 case 0 ... 0x1fff:
5215 t0 = (ECX * 2) % 8;
5216 t1 = ECX / 8;
5217 break;
5218 case 0xc0000000 ... 0xc0001fff:
5219 t0 = (8192 + ECX - 0xc0000000) * 2;
5220 t1 = (t0 / 8);
5221 t0 %= 8;
5222 break;
5223 case 0xc0010000 ... 0xc0011fff:
5224 t0 = (16384 + ECX - 0xc0010000) * 2;
5225 t1 = (t0 / 8);
5226 t0 %= 8;
5227 break;
5228 default:
5229 helper_vmexit(type, param);
5230 t0 = 0;
5231 t1 = 0;
5232 break;
5234 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5235 helper_vmexit(type, param);
5237 break;
5238 default:
5239 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5240 helper_vmexit(type, param);
5242 break;
5246 void helper_svm_check_io(uint32_t port, uint32_t param,
5247 uint32_t next_eip_addend)
5249 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5250 /* FIXME: this should be read in at vmrun (faster this way?) */
5251 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5252 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5253 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5254 /* next EIP */
5255 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5256 env->eip + next_eip_addend);
5257 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5262 /* Note: currently only 32 bits of exit_code are used */
5263 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5265 uint32_t int_ctl;
5267 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5268 exit_code, exit_info_1,
5269 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5270 EIP);
5272 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5273 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5274 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5275 } else {
5276 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5279 /* Save the VM state in the vmcb */
5280 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5281 &env->segs[R_ES]);
5282 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5283 &env->segs[R_CS]);
5284 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5285 &env->segs[R_SS]);
5286 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5287 &env->segs[R_DS]);
5289 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5290 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5292 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5293 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5295 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5296 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5297 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5298 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5299 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5301 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5302 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5303 int_ctl |= env->v_tpr & V_TPR_MASK;
5304 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5305 int_ctl |= V_IRQ_MASK;
5306 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5308 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5309 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5310 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5311 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5312 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5313 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5314 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5316 /* Reload the host state from vm_hsave */
5317 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5318 env->hflags &= ~HF_SVMI_MASK;
5319 env->intercept = 0;
5320 env->intercept_exceptions = 0;
5321 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5322 env->tsc_offset = 0;
5324 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5325 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5327 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5328 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5330 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5331 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5332 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5333 /* we need to set the efer after the crs so the hidden flags get
5334 set properly */
5335 cpu_load_efer(env,
5336 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5337 env->eflags = 0;
5338 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5339 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5340 CC_OP = CC_OP_EFLAGS;
5342 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5343 env, R_ES);
5344 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5345 env, R_CS);
5346 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5347 env, R_SS);
5348 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5349 env, R_DS);
5351 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5352 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5353 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5355 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5356 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5358 /* other setups */
5359 cpu_x86_set_cpl(env, 0);
5360 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5361 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5363 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5364 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5365 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5366 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5368 env->hflags2 &= ~HF2_GIF_MASK;
5369 /* FIXME: Resets the current ASID register to zero (host ASID). */
5371 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5373 /* Clears the TSC_OFFSET inside the processor. */
5375 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5376 from the page table indicated the host's CR3. If the PDPEs contain
5377 illegal state, the processor causes a shutdown. */
5379 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5380 env->cr[0] |= CR0_PE_MASK;
5381 env->eflags &= ~VM_MASK;
5383 /* Disables all breakpoints in the host DR7 register. */
5385 /* Checks the reloaded host state for consistency. */
5387 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5388 host's code segment or non-canonical (in the case of long mode), a
5389 #GP fault is delivered inside the host.) */
5391 /* remove any pending exception */
5392 env->exception_index = -1;
5393 env->error_code = 0;
5394 env->old_exception = -1;
5396 cpu_loop_exit();
5399 #endif
5401 /* MMX/SSE */
5402 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5403 void helper_enter_mmx(void)
5405 env->fpstt = 0;
5406 *(uint32_t *)(env->fptags) = 0;
5407 *(uint32_t *)(env->fptags + 4) = 0;
5410 void helper_emms(void)
5412 /* set to empty state */
5413 *(uint32_t *)(env->fptags) = 0x01010101;
5414 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5417 /* XXX: suppress */
5418 void helper_movq(void *d, void *s)
5420 *(uint64_t *)d = *(uint64_t *)s;
5423 #define SHIFT 0
5424 #include "ops_sse.h"
5426 #define SHIFT 1
5427 #include "ops_sse.h"
5429 #define SHIFT 0
5430 #include "helper_template.h"
5431 #undef SHIFT
5433 #define SHIFT 1
5434 #include "helper_template.h"
5435 #undef SHIFT
5437 #define SHIFT 2
5438 #include "helper_template.h"
5439 #undef SHIFT
5441 #ifdef TARGET_X86_64
5443 #define SHIFT 3
5444 #include "helper_template.h"
5445 #undef SHIFT
5447 #endif
5449 /* bit operations */
5450 target_ulong helper_bsf(target_ulong t0)
5452 int count;
5453 target_ulong res;
5455 res = t0;
5456 count = 0;
5457 while ((res & 1) == 0) {
5458 count++;
5459 res >>= 1;
5461 return count;
5464 target_ulong helper_bsr(target_ulong t0)
5466 int count;
5467 target_ulong res, mask;
5469 res = t0;
5470 count = TARGET_LONG_BITS - 1;
5471 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5472 while ((res & mask) == 0) {
5473 count--;
5474 res <<= 1;
5476 return count;
5480 static int compute_all_eflags(void)
5482 return CC_SRC;
5485 static int compute_c_eflags(void)
5487 return CC_SRC & CC_C;
5490 uint32_t helper_cc_compute_all(int op)
5492 switch (op) {
5493 default: /* should never happen */ return 0;
5495 case CC_OP_EFLAGS: return compute_all_eflags();
5497 case CC_OP_MULB: return compute_all_mulb();
5498 case CC_OP_MULW: return compute_all_mulw();
5499 case CC_OP_MULL: return compute_all_mull();
5501 case CC_OP_ADDB: return compute_all_addb();
5502 case CC_OP_ADDW: return compute_all_addw();
5503 case CC_OP_ADDL: return compute_all_addl();
5505 case CC_OP_ADCB: return compute_all_adcb();
5506 case CC_OP_ADCW: return compute_all_adcw();
5507 case CC_OP_ADCL: return compute_all_adcl();
5509 case CC_OP_SUBB: return compute_all_subb();
5510 case CC_OP_SUBW: return compute_all_subw();
5511 case CC_OP_SUBL: return compute_all_subl();
5513 case CC_OP_SBBB: return compute_all_sbbb();
5514 case CC_OP_SBBW: return compute_all_sbbw();
5515 case CC_OP_SBBL: return compute_all_sbbl();
5517 case CC_OP_LOGICB: return compute_all_logicb();
5518 case CC_OP_LOGICW: return compute_all_logicw();
5519 case CC_OP_LOGICL: return compute_all_logicl();
5521 case CC_OP_INCB: return compute_all_incb();
5522 case CC_OP_INCW: return compute_all_incw();
5523 case CC_OP_INCL: return compute_all_incl();
5525 case CC_OP_DECB: return compute_all_decb();
5526 case CC_OP_DECW: return compute_all_decw();
5527 case CC_OP_DECL: return compute_all_decl();
5529 case CC_OP_SHLB: return compute_all_shlb();
5530 case CC_OP_SHLW: return compute_all_shlw();
5531 case CC_OP_SHLL: return compute_all_shll();
5533 case CC_OP_SARB: return compute_all_sarb();
5534 case CC_OP_SARW: return compute_all_sarw();
5535 case CC_OP_SARL: return compute_all_sarl();
5537 #ifdef TARGET_X86_64
5538 case CC_OP_MULQ: return compute_all_mulq();
5540 case CC_OP_ADDQ: return compute_all_addq();
5542 case CC_OP_ADCQ: return compute_all_adcq();
5544 case CC_OP_SUBQ: return compute_all_subq();
5546 case CC_OP_SBBQ: return compute_all_sbbq();
5548 case CC_OP_LOGICQ: return compute_all_logicq();
5550 case CC_OP_INCQ: return compute_all_incq();
5552 case CC_OP_DECQ: return compute_all_decq();
5554 case CC_OP_SHLQ: return compute_all_shlq();
5556 case CC_OP_SARQ: return compute_all_sarq();
5557 #endif
5561 uint32_t helper_cc_compute_c(int op)
5563 switch (op) {
5564 default: /* should never happen */ return 0;
5566 case CC_OP_EFLAGS: return compute_c_eflags();
5568 case CC_OP_MULB: return compute_c_mull();
5569 case CC_OP_MULW: return compute_c_mull();
5570 case CC_OP_MULL: return compute_c_mull();
5572 case CC_OP_ADDB: return compute_c_addb();
5573 case CC_OP_ADDW: return compute_c_addw();
5574 case CC_OP_ADDL: return compute_c_addl();
5576 case CC_OP_ADCB: return compute_c_adcb();
5577 case CC_OP_ADCW: return compute_c_adcw();
5578 case CC_OP_ADCL: return compute_c_adcl();
5580 case CC_OP_SUBB: return compute_c_subb();
5581 case CC_OP_SUBW: return compute_c_subw();
5582 case CC_OP_SUBL: return compute_c_subl();
5584 case CC_OP_SBBB: return compute_c_sbbb();
5585 case CC_OP_SBBW: return compute_c_sbbw();
5586 case CC_OP_SBBL: return compute_c_sbbl();
5588 case CC_OP_LOGICB: return compute_c_logicb();
5589 case CC_OP_LOGICW: return compute_c_logicw();
5590 case CC_OP_LOGICL: return compute_c_logicl();
5592 case CC_OP_INCB: return compute_c_incl();
5593 case CC_OP_INCW: return compute_c_incl();
5594 case CC_OP_INCL: return compute_c_incl();
5596 case CC_OP_DECB: return compute_c_incl();
5597 case CC_OP_DECW: return compute_c_incl();
5598 case CC_OP_DECL: return compute_c_incl();
5600 case CC_OP_SHLB: return compute_c_shlb();
5601 case CC_OP_SHLW: return compute_c_shlw();
5602 case CC_OP_SHLL: return compute_c_shll();
5604 case CC_OP_SARB: return compute_c_sarl();
5605 case CC_OP_SARW: return compute_c_sarl();
5606 case CC_OP_SARL: return compute_c_sarl();
5608 #ifdef TARGET_X86_64
5609 case CC_OP_MULQ: return compute_c_mull();
5611 case CC_OP_ADDQ: return compute_c_addq();
5613 case CC_OP_ADCQ: return compute_c_adcq();
5615 case CC_OP_SUBQ: return compute_c_subq();
5617 case CC_OP_SBBQ: return compute_c_sbbq();
5619 case CC_OP_LOGICQ: return compute_c_logicq();
5621 case CC_OP_INCQ: return compute_c_incl();
5623 case CC_OP_DECQ: return compute_c_incl();
5625 case CC_OP_SHLQ: return compute_c_shlq();
5627 case CC_OP_SARQ: return compute_c_sarl();
5628 #endif