G364 video adapter enhancement
[qemu/mini2440/sniper_sniper_test.git] / target-i386 / op_helper.c
blobdcbc361a0207e4888170bfbbea1e8446647e5e5d
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #define CPU_NO_GLOBAL_REGS
21 #include "exec.h"
22 #include "exec-all.h"
23 #include "host-utils.h"
25 //#define DEBUG_PCALL
28 #ifdef DEBUG_PCALL
29 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 # define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32 #else
33 # define LOG_PCALL(...) do { } while (0)
34 # define LOG_PCALL_STATE(env) do { } while (0)
35 #endif
38 #if 0
39 #define raise_exception_err(a, b)\
40 do {\
41 qemu_log("raise_exception line=%d\n", __LINE__);\
42 (raise_exception_err)(a, b);\
43 } while (0)
44 #endif
46 static const uint8_t parity_table[256] = {
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
81 /* modulo 17 table */
82 static const uint8_t rclw_table[32] = {
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
89 /* modulo 9 table */
90 static const uint8_t rclb_table[32] = {
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
97 static const CPU86_LDouble f15rk[7] =
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
108 /* broken thread support */
110 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
112 void helper_lock(void)
114 spin_lock(&global_cpu_lock);
117 void helper_unlock(void)
119 spin_unlock(&global_cpu_lock);
122 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
124 load_eflags(t0, update_mask);
127 target_ulong helper_read_eflags(void)
129 uint32_t eflags;
130 eflags = helper_cc_compute_all(CC_OP);
131 eflags |= (DF & DF_MASK);
132 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133 return eflags;
136 /* return non zero if error */
137 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138 int selector)
140 SegmentCache *dt;
141 int index;
142 target_ulong ptr;
144 if (selector & 0x4)
145 dt = &env->ldt;
146 else
147 dt = &env->gdt;
148 index = selector & ~7;
149 if ((index + 7) > dt->limit)
150 return -1;
151 ptr = dt->base + index;
152 *e1_ptr = ldl_kernel(ptr);
153 *e2_ptr = ldl_kernel(ptr + 4);
154 return 0;
157 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
159 unsigned int limit;
160 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161 if (e2 & DESC_G_MASK)
162 limit = (limit << 12) | 0xfff;
163 return limit;
166 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
168 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
171 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
173 sc->base = get_seg_base(e1, e2);
174 sc->limit = get_seg_limit(e1, e2);
175 sc->flags = e2;
178 /* init the segment cache in vm86 mode. */
179 static inline void load_seg_vm(int seg, int selector)
181 selector &= 0xffff;
182 cpu_x86_load_seg_cache(env, seg, selector,
183 (selector << 4), 0xffff, 0);
186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187 uint32_t *esp_ptr, int dpl)
189 int type, index, shift;
191 #if 0
193 int i;
194 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195 for(i=0;i<env->tr.limit;i++) {
196 printf("%02x ", env->tr.base[i]);
197 if ((i & 7) == 7) printf("\n");
199 printf("\n");
201 #endif
203 if (!(env->tr.flags & DESC_P_MASK))
204 cpu_abort(env, "invalid tss");
205 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206 if ((type & 7) != 1)
207 cpu_abort(env, "invalid tss type");
208 shift = type >> 3;
209 index = (dpl * 4 + 2) << shift;
210 if (index + (4 << shift) - 1 > env->tr.limit)
211 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212 if (shift == 0) {
213 *esp_ptr = lduw_kernel(env->tr.base + index);
214 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215 } else {
216 *esp_ptr = ldl_kernel(env->tr.base + index);
217 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
221 /* XXX: merge with load_seg() */
222 static void tss_load_seg(int seg_reg, int selector)
224 uint32_t e1, e2;
225 int rpl, dpl, cpl;
227 if ((selector & 0xfffc) != 0) {
228 if (load_segment(&e1, &e2, selector) != 0)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if (!(e2 & DESC_S_MASK))
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 rpl = selector & 3;
233 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234 cpl = env->hflags & HF_CPL_MASK;
235 if (seg_reg == R_CS) {
236 if (!(e2 & DESC_CS_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 /* XXX: is it correct ? */
239 if (dpl != rpl)
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if ((e2 & DESC_C_MASK) && dpl > rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 } else if (seg_reg == R_SS) {
244 /* SS must be writable data */
245 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if (dpl != cpl || dpl != rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 } else {
250 /* not readable code */
251 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255 if (dpl < cpl || dpl < rpl)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
259 if (!(e2 & DESC_P_MASK))
260 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261 cpu_x86_load_seg_cache(env, seg_reg, selector,
262 get_seg_base(e1, e2),
263 get_seg_limit(e1, e2),
264 e2);
265 } else {
266 if (seg_reg == R_SS || seg_reg == R_CS)
267 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
271 #define SWITCH_TSS_JMP 0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
275 /* XXX: restore CPU state in registers (PowerPC case) */
276 static void switch_tss(int tss_selector,
277 uint32_t e1, uint32_t e2, int source,
278 uint32_t next_eip)
280 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281 target_ulong tss_base;
282 uint32_t new_regs[8], new_segs[6];
283 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284 uint32_t old_eflags, eflags_mask;
285 SegmentCache *dt;
286 int index;
287 target_ulong ptr;
289 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
292 /* if task gate, we read the TSS segment and we load it */
293 if (type == 5) {
294 if (!(e2 & DESC_P_MASK))
295 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296 tss_selector = e1 >> 16;
297 if (tss_selector & 4)
298 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299 if (load_segment(&e1, &e2, tss_selector) != 0)
300 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301 if (e2 & DESC_S_MASK)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304 if ((type & 7) != 1)
305 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
308 if (!(e2 & DESC_P_MASK))
309 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
311 if (type & 8)
312 tss_limit_max = 103;
313 else
314 tss_limit_max = 43;
315 tss_limit = get_seg_limit(e1, e2);
316 tss_base = get_seg_base(e1, e2);
317 if ((tss_selector & 4) != 0 ||
318 tss_limit < tss_limit_max)
319 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321 if (old_type & 8)
322 old_tss_limit_max = 103;
323 else
324 old_tss_limit_max = 43;
326 /* read all the registers from the new TSS */
327 if (type & 8) {
328 /* 32 bit */
329 new_cr3 = ldl_kernel(tss_base + 0x1c);
330 new_eip = ldl_kernel(tss_base + 0x20);
331 new_eflags = ldl_kernel(tss_base + 0x24);
332 for(i = 0; i < 8; i++)
333 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334 for(i = 0; i < 6; i++)
335 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336 new_ldt = lduw_kernel(tss_base + 0x60);
337 new_trap = ldl_kernel(tss_base + 0x64);
338 } else {
339 /* 16 bit */
340 new_cr3 = 0;
341 new_eip = lduw_kernel(tss_base + 0x0e);
342 new_eflags = lduw_kernel(tss_base + 0x10);
343 for(i = 0; i < 8; i++)
344 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345 for(i = 0; i < 4; i++)
346 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347 new_ldt = lduw_kernel(tss_base + 0x2a);
348 new_segs[R_FS] = 0;
349 new_segs[R_GS] = 0;
350 new_trap = 0;
353 /* NOTE: we must avoid memory exceptions during the task switch,
354 so we make dummy accesses before */
355 /* XXX: it can still fail in some cases, so a bigger hack is
356 necessary to valid the TLB after having done the accesses */
358 v1 = ldub_kernel(env->tr.base);
359 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
360 stb_kernel(env->tr.base, v1);
361 stb_kernel(env->tr.base + old_tss_limit_max, v2);
363 /* clear busy bit (it is restartable) */
364 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
365 target_ulong ptr;
366 uint32_t e2;
367 ptr = env->gdt.base + (env->tr.selector & ~7);
368 e2 = ldl_kernel(ptr + 4);
369 e2 &= ~DESC_TSS_BUSY_MASK;
370 stl_kernel(ptr + 4, e2);
372 old_eflags = compute_eflags();
373 if (source == SWITCH_TSS_IRET)
374 old_eflags &= ~NT_MASK;
376 /* save the current state in the old TSS */
377 if (type & 8) {
378 /* 32 bit */
379 stl_kernel(env->tr.base + 0x20, next_eip);
380 stl_kernel(env->tr.base + 0x24, old_eflags);
381 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
382 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
383 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
384 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
385 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
386 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
387 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
388 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
389 for(i = 0; i < 6; i++)
390 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391 } else {
392 /* 16 bit */
393 stw_kernel(env->tr.base + 0x0e, next_eip);
394 stw_kernel(env->tr.base + 0x10, old_eflags);
395 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
396 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
397 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
398 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
399 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
400 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
401 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
402 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
403 for(i = 0; i < 4; i++)
404 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
407 /* now if an exception occurs, it will occurs in the next task
408 context */
410 if (source == SWITCH_TSS_CALL) {
411 stw_kernel(tss_base, env->tr.selector);
412 new_eflags |= NT_MASK;
415 /* set busy bit */
416 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
417 target_ulong ptr;
418 uint32_t e2;
419 ptr = env->gdt.base + (tss_selector & ~7);
420 e2 = ldl_kernel(ptr + 4);
421 e2 |= DESC_TSS_BUSY_MASK;
422 stl_kernel(ptr + 4, e2);
425 /* set the new CPU state */
426 /* from this point, any exception which occurs can give problems */
427 env->cr[0] |= CR0_TS_MASK;
428 env->hflags |= HF_TS_MASK;
429 env->tr.selector = tss_selector;
430 env->tr.base = tss_base;
431 env->tr.limit = tss_limit;
432 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
434 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
435 cpu_x86_update_cr3(env, new_cr3);
438 /* load all registers without an exception, then reload them with
439 possible exception */
440 env->eip = new_eip;
441 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
442 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
443 if (!(type & 8))
444 eflags_mask &= 0xffff;
445 load_eflags(new_eflags, eflags_mask);
446 /* XXX: what to do in 16 bit case ? */
447 EAX = new_regs[0];
448 ECX = new_regs[1];
449 EDX = new_regs[2];
450 EBX = new_regs[3];
451 ESP = new_regs[4];
452 EBP = new_regs[5];
453 ESI = new_regs[6];
454 EDI = new_regs[7];
455 if (new_eflags & VM_MASK) {
456 for(i = 0; i < 6; i++)
457 load_seg_vm(i, new_segs[i]);
458 /* in vm86, CPL is always 3 */
459 cpu_x86_set_cpl(env, 3);
460 } else {
461 /* CPL is set the RPL of CS */
462 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
463 /* first just selectors as the rest may trigger exceptions */
464 for(i = 0; i < 6; i++)
465 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
468 env->ldt.selector = new_ldt & ~4;
469 env->ldt.base = 0;
470 env->ldt.limit = 0;
471 env->ldt.flags = 0;
473 /* load the LDT */
474 if (new_ldt & 4)
475 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
477 if ((new_ldt & 0xfffc) != 0) {
478 dt = &env->gdt;
479 index = new_ldt & ~7;
480 if ((index + 7) > dt->limit)
481 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482 ptr = dt->base + index;
483 e1 = ldl_kernel(ptr);
484 e2 = ldl_kernel(ptr + 4);
485 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 if (!(e2 & DESC_P_MASK))
488 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489 load_seg_cache_raw_dt(&env->ldt, e1, e2);
492 /* load the segments */
493 if (!(new_eflags & VM_MASK)) {
494 tss_load_seg(R_CS, new_segs[R_CS]);
495 tss_load_seg(R_SS, new_segs[R_SS]);
496 tss_load_seg(R_ES, new_segs[R_ES]);
497 tss_load_seg(R_DS, new_segs[R_DS]);
498 tss_load_seg(R_FS, new_segs[R_FS]);
499 tss_load_seg(R_GS, new_segs[R_GS]);
502 /* check that EIP is in the CS segment limits */
503 if (new_eip > env->segs[R_CS].limit) {
504 /* XXX: different exception if CALL ? */
505 raise_exception_err(EXCP0D_GPF, 0);
508 #ifndef CONFIG_USER_ONLY
509 /* reset local breakpoints */
510 if (env->dr[7] & 0x55) {
511 for (i = 0; i < 4; i++) {
512 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
513 hw_breakpoint_remove(env, i);
515 env->dr[7] &= ~0x55;
517 #endif
520 /* check if Port I/O is allowed in TSS */
521 static inline void check_io(int addr, int size)
523 int io_offset, val, mask;
525 /* TSS must be a valid 32 bit one */
526 if (!(env->tr.flags & DESC_P_MASK) ||
527 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
528 env->tr.limit < 103)
529 goto fail;
530 io_offset = lduw_kernel(env->tr.base + 0x66);
531 io_offset += (addr >> 3);
532 /* Note: the check needs two bytes */
533 if ((io_offset + 1) > env->tr.limit)
534 goto fail;
535 val = lduw_kernel(env->tr.base + io_offset);
536 val >>= (addr & 7);
537 mask = (1 << size) - 1;
538 /* all bits must be zero to allow the I/O */
539 if ((val & mask) != 0) {
540 fail:
541 raise_exception_err(EXCP0D_GPF, 0);
545 void helper_check_iob(uint32_t t0)
547 check_io(t0, 1);
550 void helper_check_iow(uint32_t t0)
552 check_io(t0, 2);
555 void helper_check_iol(uint32_t t0)
557 check_io(t0, 4);
560 void helper_outb(uint32_t port, uint32_t data)
562 cpu_outb(env, port, data & 0xff);
565 target_ulong helper_inb(uint32_t port)
567 return cpu_inb(env, port);
570 void helper_outw(uint32_t port, uint32_t data)
572 cpu_outw(env, port, data & 0xffff);
575 target_ulong helper_inw(uint32_t port)
577 return cpu_inw(env, port);
580 void helper_outl(uint32_t port, uint32_t data)
582 cpu_outl(env, port, data);
585 target_ulong helper_inl(uint32_t port)
587 return cpu_inl(env, port);
590 static inline unsigned int get_sp_mask(unsigned int e2)
592 if (e2 & DESC_B_MASK)
593 return 0xffffffff;
594 else
595 return 0xffff;
598 #ifdef TARGET_X86_64
599 #define SET_ESP(val, sp_mask)\
600 do {\
601 if ((sp_mask) == 0xffff)\
602 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
603 else if ((sp_mask) == 0xffffffffLL)\
604 ESP = (uint32_t)(val);\
605 else\
606 ESP = (val);\
607 } while (0)
608 #else
609 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
610 #endif
612 /* in 64-bit machines, this can overflow. So this segment addition macro
613 * can be used to trim the value to 32-bit whenever needed */
614 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
616 /* XXX: add a is_user flag to have proper security support */
617 #define PUSHW(ssp, sp, sp_mask, val)\
619 sp -= 2;\
620 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
623 #define PUSHL(ssp, sp, sp_mask, val)\
625 sp -= 4;\
626 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
629 #define POPW(ssp, sp, sp_mask, val)\
631 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
632 sp += 2;\
635 #define POPL(ssp, sp, sp_mask, val)\
637 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
638 sp += 4;\
641 /* protected mode interrupt */
642 static void do_interrupt_protected(int intno, int is_int, int error_code,
643 unsigned int next_eip, int is_hw)
645 SegmentCache *dt;
646 target_ulong ptr, ssp;
647 int type, dpl, selector, ss_dpl, cpl;
648 int has_error_code, new_stack, shift;
649 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
650 uint32_t old_eip, sp_mask;
652 has_error_code = 0;
653 if (!is_int && !is_hw) {
654 switch(intno) {
655 case 8:
656 case 10:
657 case 11:
658 case 12:
659 case 13:
660 case 14:
661 case 17:
662 has_error_code = 1;
663 break;
666 if (is_int)
667 old_eip = next_eip;
668 else
669 old_eip = env->eip;
671 dt = &env->idt;
672 if (intno * 8 + 7 > dt->limit)
673 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
674 ptr = dt->base + intno * 8;
675 e1 = ldl_kernel(ptr);
676 e2 = ldl_kernel(ptr + 4);
677 /* check gate type */
678 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
679 switch(type) {
680 case 5: /* task gate */
681 /* must do that check here to return the correct error code */
682 if (!(e2 & DESC_P_MASK))
683 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
684 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
685 if (has_error_code) {
686 int type;
687 uint32_t mask;
688 /* push the error code */
689 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
690 shift = type >> 3;
691 if (env->segs[R_SS].flags & DESC_B_MASK)
692 mask = 0xffffffff;
693 else
694 mask = 0xffff;
695 esp = (ESP - (2 << shift)) & mask;
696 ssp = env->segs[R_SS].base + esp;
697 if (shift)
698 stl_kernel(ssp, error_code);
699 else
700 stw_kernel(ssp, error_code);
701 SET_ESP(esp, mask);
703 return;
704 case 6: /* 286 interrupt gate */
705 case 7: /* 286 trap gate */
706 case 14: /* 386 interrupt gate */
707 case 15: /* 386 trap gate */
708 break;
709 default:
710 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
711 break;
713 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
714 cpl = env->hflags & HF_CPL_MASK;
715 /* check privilege if software int */
716 if (is_int && dpl < cpl)
717 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
718 /* check valid bit */
719 if (!(e2 & DESC_P_MASK))
720 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
721 selector = e1 >> 16;
722 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
723 if ((selector & 0xfffc) == 0)
724 raise_exception_err(EXCP0D_GPF, 0);
726 if (load_segment(&e1, &e2, selector) != 0)
727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
728 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
731 if (dpl > cpl)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_P_MASK))
734 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
735 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
736 /* to inner privilege */
737 get_ss_esp_from_tss(&ss, &esp, dpl);
738 if ((ss & 0xfffc) == 0)
739 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
740 if ((ss & 3) != dpl)
741 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
745 if (ss_dpl != dpl)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (!(ss_e2 & DESC_S_MASK) ||
748 (ss_e2 & DESC_CS_MASK) ||
749 !(ss_e2 & DESC_W_MASK))
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (!(ss_e2 & DESC_P_MASK))
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753 new_stack = 1;
754 sp_mask = get_sp_mask(ss_e2);
755 ssp = get_seg_base(ss_e1, ss_e2);
756 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
757 /* to same privilege */
758 if (env->eflags & VM_MASK)
759 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
760 new_stack = 0;
761 sp_mask = get_sp_mask(env->segs[R_SS].flags);
762 ssp = env->segs[R_SS].base;
763 esp = ESP;
764 dpl = cpl;
765 } else {
766 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
767 new_stack = 0; /* avoid warning */
768 sp_mask = 0; /* avoid warning */
769 ssp = 0; /* avoid warning */
770 esp = 0; /* avoid warning */
773 shift = type >> 3;
775 #if 0
776 /* XXX: check that enough room is available */
777 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
778 if (env->eflags & VM_MASK)
779 push_size += 8;
780 push_size <<= shift;
781 #endif
782 if (shift == 1) {
783 if (new_stack) {
784 if (env->eflags & VM_MASK) {
785 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
786 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
787 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
788 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
790 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
791 PUSHL(ssp, esp, sp_mask, ESP);
793 PUSHL(ssp, esp, sp_mask, compute_eflags());
794 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
795 PUSHL(ssp, esp, sp_mask, old_eip);
796 if (has_error_code) {
797 PUSHL(ssp, esp, sp_mask, error_code);
799 } else {
800 if (new_stack) {
801 if (env->eflags & VM_MASK) {
802 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
803 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
804 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
805 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
807 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
808 PUSHW(ssp, esp, sp_mask, ESP);
810 PUSHW(ssp, esp, sp_mask, compute_eflags());
811 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
812 PUSHW(ssp, esp, sp_mask, old_eip);
813 if (has_error_code) {
814 PUSHW(ssp, esp, sp_mask, error_code);
818 if (new_stack) {
819 if (env->eflags & VM_MASK) {
820 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
821 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
822 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
825 ss = (ss & ~3) | dpl;
826 cpu_x86_load_seg_cache(env, R_SS, ss,
827 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
829 SET_ESP(esp, sp_mask);
831 selector = (selector & ~3) | dpl;
832 cpu_x86_load_seg_cache(env, R_CS, selector,
833 get_seg_base(e1, e2),
834 get_seg_limit(e1, e2),
835 e2);
836 cpu_x86_set_cpl(env, dpl);
837 env->eip = offset;
839 /* interrupt gate clear IF mask */
840 if ((type & 1) == 0) {
841 env->eflags &= ~IF_MASK;
843 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
846 #ifdef TARGET_X86_64
848 #define PUSHQ(sp, val)\
850 sp -= 8;\
851 stq_kernel(sp, (val));\
854 #define POPQ(sp, val)\
856 val = ldq_kernel(sp);\
857 sp += 8;\
860 static inline target_ulong get_rsp_from_tss(int level)
862 int index;
864 #if 0
865 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
866 env->tr.base, env->tr.limit);
867 #endif
869 if (!(env->tr.flags & DESC_P_MASK))
870 cpu_abort(env, "invalid tss");
871 index = 8 * level + 4;
872 if ((index + 7) > env->tr.limit)
873 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
874 return ldq_kernel(env->tr.base + index);
877 /* 64 bit interrupt */
878 static void do_interrupt64(int intno, int is_int, int error_code,
879 target_ulong next_eip, int is_hw)
881 SegmentCache *dt;
882 target_ulong ptr;
883 int type, dpl, selector, cpl, ist;
884 int has_error_code, new_stack;
885 uint32_t e1, e2, e3, ss;
886 target_ulong old_eip, esp, offset;
888 has_error_code = 0;
889 if (!is_int && !is_hw) {
890 switch(intno) {
891 case 8:
892 case 10:
893 case 11:
894 case 12:
895 case 13:
896 case 14:
897 case 17:
898 has_error_code = 1;
899 break;
902 if (is_int)
903 old_eip = next_eip;
904 else
905 old_eip = env->eip;
907 dt = &env->idt;
908 if (intno * 16 + 15 > dt->limit)
909 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910 ptr = dt->base + intno * 16;
911 e1 = ldl_kernel(ptr);
912 e2 = ldl_kernel(ptr + 4);
913 e3 = ldl_kernel(ptr + 8);
914 /* check gate type */
915 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
916 switch(type) {
917 case 14: /* 386 interrupt gate */
918 case 15: /* 386 trap gate */
919 break;
920 default:
921 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
922 break;
924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925 cpl = env->hflags & HF_CPL_MASK;
926 /* check privilege if software int */
927 if (is_int && dpl < cpl)
928 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
929 /* check valid bit */
930 if (!(e2 & DESC_P_MASK))
931 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
932 selector = e1 >> 16;
933 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
934 ist = e2 & 7;
935 if ((selector & 0xfffc) == 0)
936 raise_exception_err(EXCP0D_GPF, 0);
938 if (load_segment(&e1, &e2, selector) != 0)
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
941 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
943 if (dpl > cpl)
944 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
945 if (!(e2 & DESC_P_MASK))
946 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
947 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
948 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
949 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
950 /* to inner privilege */
951 if (ist != 0)
952 esp = get_rsp_from_tss(ist + 3);
953 else
954 esp = get_rsp_from_tss(dpl);
955 esp &= ~0xfLL; /* align stack */
956 ss = 0;
957 new_stack = 1;
958 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
959 /* to same privilege */
960 if (env->eflags & VM_MASK)
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 new_stack = 0;
963 if (ist != 0)
964 esp = get_rsp_from_tss(ist + 3);
965 else
966 esp = ESP;
967 esp &= ~0xfLL; /* align stack */
968 dpl = cpl;
969 } else {
970 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
971 new_stack = 0; /* avoid warning */
972 esp = 0; /* avoid warning */
975 PUSHQ(esp, env->segs[R_SS].selector);
976 PUSHQ(esp, ESP);
977 PUSHQ(esp, compute_eflags());
978 PUSHQ(esp, env->segs[R_CS].selector);
979 PUSHQ(esp, old_eip);
980 if (has_error_code) {
981 PUSHQ(esp, error_code);
984 if (new_stack) {
985 ss = 0 | dpl;
986 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
988 ESP = esp;
990 selector = (selector & ~3) | dpl;
991 cpu_x86_load_seg_cache(env, R_CS, selector,
992 get_seg_base(e1, e2),
993 get_seg_limit(e1, e2),
994 e2);
995 cpu_x86_set_cpl(env, dpl);
996 env->eip = offset;
998 /* interrupt gate clear IF mask */
999 if ((type & 1) == 0) {
1000 env->eflags &= ~IF_MASK;
1002 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1004 #endif
1006 #ifdef TARGET_X86_64
1007 #if defined(CONFIG_USER_ONLY)
1008 void helper_syscall(int next_eip_addend)
1010 env->exception_index = EXCP_SYSCALL;
1011 env->exception_next_eip = env->eip + next_eip_addend;
1012 cpu_loop_exit();
1014 #else
1015 void helper_syscall(int next_eip_addend)
1017 int selector;
1019 if (!(env->efer & MSR_EFER_SCE)) {
1020 raise_exception_err(EXCP06_ILLOP, 0);
1022 selector = (env->star >> 32) & 0xffff;
1023 if (env->hflags & HF_LMA_MASK) {
1024 int code64;
1026 ECX = env->eip + next_eip_addend;
1027 env->regs[11] = compute_eflags();
1029 code64 = env->hflags & HF_CS64_MASK;
1031 cpu_x86_set_cpl(env, 0);
1032 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1033 0, 0xffffffff,
1034 DESC_G_MASK | DESC_P_MASK |
1035 DESC_S_MASK |
1036 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1037 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1038 0, 0xffffffff,
1039 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1040 DESC_S_MASK |
1041 DESC_W_MASK | DESC_A_MASK);
1042 env->eflags &= ~env->fmask;
1043 load_eflags(env->eflags, 0);
1044 if (code64)
1045 env->eip = env->lstar;
1046 else
1047 env->eip = env->cstar;
1048 } else {
1049 ECX = (uint32_t)(env->eip + next_eip_addend);
1051 cpu_x86_set_cpl(env, 0);
1052 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1053 0, 0xffffffff,
1054 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1055 DESC_S_MASK |
1056 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1057 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1058 0, 0xffffffff,
1059 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060 DESC_S_MASK |
1061 DESC_W_MASK | DESC_A_MASK);
1062 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1063 env->eip = (uint32_t)env->star;
1066 #endif
1067 #endif
1069 #ifdef TARGET_X86_64
1070 void helper_sysret(int dflag)
1072 int cpl, selector;
1074 if (!(env->efer & MSR_EFER_SCE)) {
1075 raise_exception_err(EXCP06_ILLOP, 0);
1077 cpl = env->hflags & HF_CPL_MASK;
1078 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1079 raise_exception_err(EXCP0D_GPF, 0);
1081 selector = (env->star >> 48) & 0xffff;
1082 if (env->hflags & HF_LMA_MASK) {
1083 if (dflag == 2) {
1084 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1085 0, 0xffffffff,
1086 DESC_G_MASK | DESC_P_MASK |
1087 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1089 DESC_L_MASK);
1090 env->eip = ECX;
1091 } else {
1092 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093 0, 0xffffffff,
1094 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097 env->eip = (uint32_t)ECX;
1099 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1100 0, 0xffffffff,
1101 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1102 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1103 DESC_W_MASK | DESC_A_MASK);
1104 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1105 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1106 cpu_x86_set_cpl(env, 3);
1107 } else {
1108 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1109 0, 0xffffffff,
1110 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1111 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1112 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1113 env->eip = (uint32_t)ECX;
1114 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1115 0, 0xffffffff,
1116 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1117 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1118 DESC_W_MASK | DESC_A_MASK);
1119 env->eflags |= IF_MASK;
1120 cpu_x86_set_cpl(env, 3);
1122 #ifdef USE_KQEMU
1123 if (kqemu_is_ok(env)) {
1124 if (env->hflags & HF_LMA_MASK)
1125 CC_OP = CC_OP_EFLAGS;
1126 env->exception_index = -1;
1127 cpu_loop_exit();
1129 #endif
1131 #endif
1133 /* real mode interrupt */
1134 static void do_interrupt_real(int intno, int is_int, int error_code,
1135 unsigned int next_eip)
1137 SegmentCache *dt;
1138 target_ulong ptr, ssp;
1139 int selector;
1140 uint32_t offset, esp;
1141 uint32_t old_cs, old_eip;
1143 /* real mode (simpler !) */
1144 dt = &env->idt;
1145 if (intno * 4 + 3 > dt->limit)
1146 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1147 ptr = dt->base + intno * 4;
1148 offset = lduw_kernel(ptr);
1149 selector = lduw_kernel(ptr + 2);
1150 esp = ESP;
1151 ssp = env->segs[R_SS].base;
1152 if (is_int)
1153 old_eip = next_eip;
1154 else
1155 old_eip = env->eip;
1156 old_cs = env->segs[R_CS].selector;
1157 /* XXX: use SS segment size ? */
1158 PUSHW(ssp, esp, 0xffff, compute_eflags());
1159 PUSHW(ssp, esp, 0xffff, old_cs);
1160 PUSHW(ssp, esp, 0xffff, old_eip);
1162 /* update processor state */
1163 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1164 env->eip = offset;
1165 env->segs[R_CS].selector = selector;
1166 env->segs[R_CS].base = (selector << 4);
1167 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1170 /* fake user mode interrupt */
1171 void do_interrupt_user(int intno, int is_int, int error_code,
1172 target_ulong next_eip)
1174 SegmentCache *dt;
1175 target_ulong ptr;
1176 int dpl, cpl, shift;
1177 uint32_t e2;
1179 dt = &env->idt;
1180 if (env->hflags & HF_LMA_MASK) {
1181 shift = 4;
1182 } else {
1183 shift = 3;
1185 ptr = dt->base + (intno << shift);
1186 e2 = ldl_kernel(ptr + 4);
1188 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1189 cpl = env->hflags & HF_CPL_MASK;
1190 /* check privilege if software int */
1191 if (is_int && dpl < cpl)
1192 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1194 /* Since we emulate only user space, we cannot do more than
1195 exiting the emulation with the suitable exception and error
1196 code */
1197 if (is_int)
1198 EIP = next_eip;
1202 * Begin execution of an interruption. is_int is TRUE if coming from
1203 * the int instruction. next_eip is the EIP value AFTER the interrupt
1204 * instruction. It is only relevant if is_int is TRUE.
1206 void do_interrupt(int intno, int is_int, int error_code,
1207 target_ulong next_eip, int is_hw)
1209 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1210 if ((env->cr[0] & CR0_PE_MASK)) {
1211 static int count;
1212 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1213 count, intno, error_code, is_int,
1214 env->hflags & HF_CPL_MASK,
1215 env->segs[R_CS].selector, EIP,
1216 (int)env->segs[R_CS].base + EIP,
1217 env->segs[R_SS].selector, ESP);
1218 if (intno == 0x0e) {
1219 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1220 } else {
1221 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1223 qemu_log("\n");
1224 log_cpu_state(env, X86_DUMP_CCOP);
1225 #if 0
1227 int i;
1228 uint8_t *ptr;
1229 qemu_log(" code=");
1230 ptr = env->segs[R_CS].base + env->eip;
1231 for(i = 0; i < 16; i++) {
1232 qemu_log(" %02x", ldub(ptr + i));
1234 qemu_log("\n");
1236 #endif
1237 count++;
1240 if (env->cr[0] & CR0_PE_MASK) {
1241 #ifdef TARGET_X86_64
1242 if (env->hflags & HF_LMA_MASK) {
1243 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1244 } else
1245 #endif
1247 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1249 } else {
1250 do_interrupt_real(intno, is_int, error_code, next_eip);
1255 * Check nested exceptions and change to double or triple fault if
1256 * needed. It should only be called, if this is not an interrupt.
1257 * Returns the new exception number.
1259 static int check_exception(int intno, int *error_code)
1261 int first_contributory = env->old_exception == 0 ||
1262 (env->old_exception >= 10 &&
1263 env->old_exception <= 13);
1264 int second_contributory = intno == 0 ||
1265 (intno >= 10 && intno <= 13);
1267 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1268 env->old_exception, intno);
1270 if (env->old_exception == EXCP08_DBLE)
1271 cpu_abort(env, "triple fault");
1273 if ((first_contributory && second_contributory)
1274 || (env->old_exception == EXCP0E_PAGE &&
1275 (second_contributory || (intno == EXCP0E_PAGE)))) {
1276 intno = EXCP08_DBLE;
1277 *error_code = 0;
1280 if (second_contributory || (intno == EXCP0E_PAGE) ||
1281 (intno == EXCP08_DBLE))
1282 env->old_exception = intno;
1284 return intno;
1288 * Signal an interruption. It is executed in the main CPU loop.
1289 * is_int is TRUE if coming from the int instruction. next_eip is the
1290 * EIP value AFTER the interrupt instruction. It is only relevant if
1291 * is_int is TRUE.
1293 static void noreturn raise_interrupt(int intno, int is_int, int error_code,
1294 int next_eip_addend)
1296 if (!is_int) {
1297 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1298 intno = check_exception(intno, &error_code);
1299 } else {
1300 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1303 env->exception_index = intno;
1304 env->error_code = error_code;
1305 env->exception_is_int = is_int;
1306 env->exception_next_eip = env->eip + next_eip_addend;
1307 cpu_loop_exit();
1310 /* shortcuts to generate exceptions */
1312 void raise_exception_err(int exception_index, int error_code)
1314 raise_interrupt(exception_index, 0, error_code, 0);
1317 void raise_exception(int exception_index)
1319 raise_interrupt(exception_index, 0, 0, 0);
1322 /* SMM support */
1324 #if defined(CONFIG_USER_ONLY)
1326 void do_smm_enter(void)
1330 void helper_rsm(void)
1334 #else
1336 #ifdef TARGET_X86_64
1337 #define SMM_REVISION_ID 0x00020064
1338 #else
1339 #define SMM_REVISION_ID 0x00020000
1340 #endif
1342 void do_smm_enter(void)
1344 target_ulong sm_state;
1345 SegmentCache *dt;
1346 int i, offset;
1348 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1349 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1351 env->hflags |= HF_SMM_MASK;
1352 cpu_smm_update(env);
1354 sm_state = env->smbase + 0x8000;
1356 #ifdef TARGET_X86_64
1357 for(i = 0; i < 6; i++) {
1358 dt = &env->segs[i];
1359 offset = 0x7e00 + i * 16;
1360 stw_phys(sm_state + offset, dt->selector);
1361 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1362 stl_phys(sm_state + offset + 4, dt->limit);
1363 stq_phys(sm_state + offset + 8, dt->base);
1366 stq_phys(sm_state + 0x7e68, env->gdt.base);
1367 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1369 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1370 stq_phys(sm_state + 0x7e78, env->ldt.base);
1371 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1372 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1374 stq_phys(sm_state + 0x7e88, env->idt.base);
1375 stl_phys(sm_state + 0x7e84, env->idt.limit);
1377 stw_phys(sm_state + 0x7e90, env->tr.selector);
1378 stq_phys(sm_state + 0x7e98, env->tr.base);
1379 stl_phys(sm_state + 0x7e94, env->tr.limit);
1380 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1382 stq_phys(sm_state + 0x7ed0, env->efer);
1384 stq_phys(sm_state + 0x7ff8, EAX);
1385 stq_phys(sm_state + 0x7ff0, ECX);
1386 stq_phys(sm_state + 0x7fe8, EDX);
1387 stq_phys(sm_state + 0x7fe0, EBX);
1388 stq_phys(sm_state + 0x7fd8, ESP);
1389 stq_phys(sm_state + 0x7fd0, EBP);
1390 stq_phys(sm_state + 0x7fc8, ESI);
1391 stq_phys(sm_state + 0x7fc0, EDI);
1392 for(i = 8; i < 16; i++)
1393 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1394 stq_phys(sm_state + 0x7f78, env->eip);
1395 stl_phys(sm_state + 0x7f70, compute_eflags());
1396 stl_phys(sm_state + 0x7f68, env->dr[6]);
1397 stl_phys(sm_state + 0x7f60, env->dr[7]);
1399 stl_phys(sm_state + 0x7f48, env->cr[4]);
1400 stl_phys(sm_state + 0x7f50, env->cr[3]);
1401 stl_phys(sm_state + 0x7f58, env->cr[0]);
1403 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1404 stl_phys(sm_state + 0x7f00, env->smbase);
1405 #else
1406 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1407 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1408 stl_phys(sm_state + 0x7ff4, compute_eflags());
1409 stl_phys(sm_state + 0x7ff0, env->eip);
1410 stl_phys(sm_state + 0x7fec, EDI);
1411 stl_phys(sm_state + 0x7fe8, ESI);
1412 stl_phys(sm_state + 0x7fe4, EBP);
1413 stl_phys(sm_state + 0x7fe0, ESP);
1414 stl_phys(sm_state + 0x7fdc, EBX);
1415 stl_phys(sm_state + 0x7fd8, EDX);
1416 stl_phys(sm_state + 0x7fd4, ECX);
1417 stl_phys(sm_state + 0x7fd0, EAX);
1418 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1419 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1421 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1422 stl_phys(sm_state + 0x7f64, env->tr.base);
1423 stl_phys(sm_state + 0x7f60, env->tr.limit);
1424 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1426 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1427 stl_phys(sm_state + 0x7f80, env->ldt.base);
1428 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1429 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1431 stl_phys(sm_state + 0x7f74, env->gdt.base);
1432 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1434 stl_phys(sm_state + 0x7f58, env->idt.base);
1435 stl_phys(sm_state + 0x7f54, env->idt.limit);
1437 for(i = 0; i < 6; i++) {
1438 dt = &env->segs[i];
1439 if (i < 3)
1440 offset = 0x7f84 + i * 12;
1441 else
1442 offset = 0x7f2c + (i - 3) * 12;
1443 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1444 stl_phys(sm_state + offset + 8, dt->base);
1445 stl_phys(sm_state + offset + 4, dt->limit);
1446 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1448 stl_phys(sm_state + 0x7f14, env->cr[4]);
1450 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1451 stl_phys(sm_state + 0x7ef8, env->smbase);
1452 #endif
1453 /* init SMM cpu state */
1455 #ifdef TARGET_X86_64
1456 cpu_load_efer(env, 0);
1457 #endif
1458 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1459 env->eip = 0x00008000;
1460 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1461 0xffffffff, 0);
1462 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1463 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1464 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1465 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1466 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1468 cpu_x86_update_cr0(env,
1469 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1470 cpu_x86_update_cr4(env, 0);
1471 env->dr[7] = 0x00000400;
1472 CC_OP = CC_OP_EFLAGS;
1475 void helper_rsm(void)
1477 target_ulong sm_state;
1478 int i, offset;
1479 uint32_t val;
1481 sm_state = env->smbase + 0x8000;
1482 #ifdef TARGET_X86_64
1483 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1485 for(i = 0; i < 6; i++) {
1486 offset = 0x7e00 + i * 16;
1487 cpu_x86_load_seg_cache(env, i,
1488 lduw_phys(sm_state + offset),
1489 ldq_phys(sm_state + offset + 8),
1490 ldl_phys(sm_state + offset + 4),
1491 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1494 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1495 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1497 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1498 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1499 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1500 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1502 env->idt.base = ldq_phys(sm_state + 0x7e88);
1503 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1505 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1506 env->tr.base = ldq_phys(sm_state + 0x7e98);
1507 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1508 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1510 EAX = ldq_phys(sm_state + 0x7ff8);
1511 ECX = ldq_phys(sm_state + 0x7ff0);
1512 EDX = ldq_phys(sm_state + 0x7fe8);
1513 EBX = ldq_phys(sm_state + 0x7fe0);
1514 ESP = ldq_phys(sm_state + 0x7fd8);
1515 EBP = ldq_phys(sm_state + 0x7fd0);
1516 ESI = ldq_phys(sm_state + 0x7fc8);
1517 EDI = ldq_phys(sm_state + 0x7fc0);
1518 for(i = 8; i < 16; i++)
1519 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1520 env->eip = ldq_phys(sm_state + 0x7f78);
1521 load_eflags(ldl_phys(sm_state + 0x7f70),
1522 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1523 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1524 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1526 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1527 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1528 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1530 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1531 if (val & 0x20000) {
1532 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1534 #else
1535 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1536 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1537 load_eflags(ldl_phys(sm_state + 0x7ff4),
1538 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1539 env->eip = ldl_phys(sm_state + 0x7ff0);
1540 EDI = ldl_phys(sm_state + 0x7fec);
1541 ESI = ldl_phys(sm_state + 0x7fe8);
1542 EBP = ldl_phys(sm_state + 0x7fe4);
1543 ESP = ldl_phys(sm_state + 0x7fe0);
1544 EBX = ldl_phys(sm_state + 0x7fdc);
1545 EDX = ldl_phys(sm_state + 0x7fd8);
1546 ECX = ldl_phys(sm_state + 0x7fd4);
1547 EAX = ldl_phys(sm_state + 0x7fd0);
1548 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1549 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1551 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1552 env->tr.base = ldl_phys(sm_state + 0x7f64);
1553 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1554 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1556 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1557 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1558 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1559 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1561 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1562 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1564 env->idt.base = ldl_phys(sm_state + 0x7f58);
1565 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1567 for(i = 0; i < 6; i++) {
1568 if (i < 3)
1569 offset = 0x7f84 + i * 12;
1570 else
1571 offset = 0x7f2c + (i - 3) * 12;
1572 cpu_x86_load_seg_cache(env, i,
1573 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1574 ldl_phys(sm_state + offset + 8),
1575 ldl_phys(sm_state + offset + 4),
1576 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1578 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1580 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1581 if (val & 0x20000) {
1582 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1584 #endif
1585 CC_OP = CC_OP_EFLAGS;
1586 env->hflags &= ~HF_SMM_MASK;
1587 cpu_smm_update(env);
1589 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1590 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1593 #endif /* !CONFIG_USER_ONLY */
1596 /* division, flags are undefined */
1598 void helper_divb_AL(target_ulong t0)
1600 unsigned int num, den, q, r;
1602 num = (EAX & 0xffff);
1603 den = (t0 & 0xff);
1604 if (den == 0) {
1605 raise_exception(EXCP00_DIVZ);
1607 q = (num / den);
1608 if (q > 0xff)
1609 raise_exception(EXCP00_DIVZ);
1610 q &= 0xff;
1611 r = (num % den) & 0xff;
1612 EAX = (EAX & ~0xffff) | (r << 8) | q;
1615 void helper_idivb_AL(target_ulong t0)
1617 int num, den, q, r;
1619 num = (int16_t)EAX;
1620 den = (int8_t)t0;
1621 if (den == 0) {
1622 raise_exception(EXCP00_DIVZ);
1624 q = (num / den);
1625 if (q != (int8_t)q)
1626 raise_exception(EXCP00_DIVZ);
1627 q &= 0xff;
1628 r = (num % den) & 0xff;
1629 EAX = (EAX & ~0xffff) | (r << 8) | q;
1632 void helper_divw_AX(target_ulong t0)
1634 unsigned int num, den, q, r;
1636 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1637 den = (t0 & 0xffff);
1638 if (den == 0) {
1639 raise_exception(EXCP00_DIVZ);
1641 q = (num / den);
1642 if (q > 0xffff)
1643 raise_exception(EXCP00_DIVZ);
1644 q &= 0xffff;
1645 r = (num % den) & 0xffff;
1646 EAX = (EAX & ~0xffff) | q;
1647 EDX = (EDX & ~0xffff) | r;
1650 void helper_idivw_AX(target_ulong t0)
1652 int num, den, q, r;
1654 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1655 den = (int16_t)t0;
1656 if (den == 0) {
1657 raise_exception(EXCP00_DIVZ);
1659 q = (num / den);
1660 if (q != (int16_t)q)
1661 raise_exception(EXCP00_DIVZ);
1662 q &= 0xffff;
1663 r = (num % den) & 0xffff;
1664 EAX = (EAX & ~0xffff) | q;
1665 EDX = (EDX & ~0xffff) | r;
1668 void helper_divl_EAX(target_ulong t0)
1670 unsigned int den, r;
1671 uint64_t num, q;
1673 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1674 den = t0;
1675 if (den == 0) {
1676 raise_exception(EXCP00_DIVZ);
1678 q = (num / den);
1679 r = (num % den);
1680 if (q > 0xffffffff)
1681 raise_exception(EXCP00_DIVZ);
1682 EAX = (uint32_t)q;
1683 EDX = (uint32_t)r;
1686 void helper_idivl_EAX(target_ulong t0)
1688 int den, r;
1689 int64_t num, q;
1691 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1692 den = t0;
1693 if (den == 0) {
1694 raise_exception(EXCP00_DIVZ);
1696 q = (num / den);
1697 r = (num % den);
1698 if (q != (int32_t)q)
1699 raise_exception(EXCP00_DIVZ);
1700 EAX = (uint32_t)q;
1701 EDX = (uint32_t)r;
1704 /* bcd */
1706 /* XXX: exception */
1707 void helper_aam(int base)
1709 int al, ah;
1710 al = EAX & 0xff;
1711 ah = al / base;
1712 al = al % base;
1713 EAX = (EAX & ~0xffff) | al | (ah << 8);
1714 CC_DST = al;
1717 void helper_aad(int base)
1719 int al, ah;
1720 al = EAX & 0xff;
1721 ah = (EAX >> 8) & 0xff;
1722 al = ((ah * base) + al) & 0xff;
1723 EAX = (EAX & ~0xffff) | al;
1724 CC_DST = al;
1727 void helper_aaa(void)
1729 int icarry;
1730 int al, ah, af;
1731 int eflags;
1733 eflags = helper_cc_compute_all(CC_OP);
1734 af = eflags & CC_A;
1735 al = EAX & 0xff;
1736 ah = (EAX >> 8) & 0xff;
1738 icarry = (al > 0xf9);
1739 if (((al & 0x0f) > 9 ) || af) {
1740 al = (al + 6) & 0x0f;
1741 ah = (ah + 1 + icarry) & 0xff;
1742 eflags |= CC_C | CC_A;
1743 } else {
1744 eflags &= ~(CC_C | CC_A);
1745 al &= 0x0f;
1747 EAX = (EAX & ~0xffff) | al | (ah << 8);
1748 CC_SRC = eflags;
1751 void helper_aas(void)
1753 int icarry;
1754 int al, ah, af;
1755 int eflags;
1757 eflags = helper_cc_compute_all(CC_OP);
1758 af = eflags & CC_A;
1759 al = EAX & 0xff;
1760 ah = (EAX >> 8) & 0xff;
1762 icarry = (al < 6);
1763 if (((al & 0x0f) > 9 ) || af) {
1764 al = (al - 6) & 0x0f;
1765 ah = (ah - 1 - icarry) & 0xff;
1766 eflags |= CC_C | CC_A;
1767 } else {
1768 eflags &= ~(CC_C | CC_A);
1769 al &= 0x0f;
1771 EAX = (EAX & ~0xffff) | al | (ah << 8);
1772 CC_SRC = eflags;
1775 void helper_daa(void)
1777 int al, af, cf;
1778 int eflags;
1780 eflags = helper_cc_compute_all(CC_OP);
1781 cf = eflags & CC_C;
1782 af = eflags & CC_A;
1783 al = EAX & 0xff;
1785 eflags = 0;
1786 if (((al & 0x0f) > 9 ) || af) {
1787 al = (al + 6) & 0xff;
1788 eflags |= CC_A;
1790 if ((al > 0x9f) || cf) {
1791 al = (al + 0x60) & 0xff;
1792 eflags |= CC_C;
1794 EAX = (EAX & ~0xff) | al;
1795 /* well, speed is not an issue here, so we compute the flags by hand */
1796 eflags |= (al == 0) << 6; /* zf */
1797 eflags |= parity_table[al]; /* pf */
1798 eflags |= (al & 0x80); /* sf */
1799 CC_SRC = eflags;
1802 void helper_das(void)
1804 int al, al1, af, cf;
1805 int eflags;
1807 eflags = helper_cc_compute_all(CC_OP);
1808 cf = eflags & CC_C;
1809 af = eflags & CC_A;
1810 al = EAX & 0xff;
1812 eflags = 0;
1813 al1 = al;
1814 if (((al & 0x0f) > 9 ) || af) {
1815 eflags |= CC_A;
1816 if (al < 6 || cf)
1817 eflags |= CC_C;
1818 al = (al - 6) & 0xff;
1820 if ((al1 > 0x99) || cf) {
1821 al = (al - 0x60) & 0xff;
1822 eflags |= CC_C;
1824 EAX = (EAX & ~0xff) | al;
1825 /* well, speed is not an issue here, so we compute the flags by hand */
1826 eflags |= (al == 0) << 6; /* zf */
1827 eflags |= parity_table[al]; /* pf */
1828 eflags |= (al & 0x80); /* sf */
1829 CC_SRC = eflags;
1832 void helper_into(int next_eip_addend)
1834 int eflags;
1835 eflags = helper_cc_compute_all(CC_OP);
1836 if (eflags & CC_O) {
1837 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1841 void helper_cmpxchg8b(target_ulong a0)
1843 uint64_t d;
1844 int eflags;
1846 eflags = helper_cc_compute_all(CC_OP);
1847 d = ldq(a0);
1848 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1849 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1850 eflags |= CC_Z;
1851 } else {
1852 /* always do the store */
1853 stq(a0, d);
1854 EDX = (uint32_t)(d >> 32);
1855 EAX = (uint32_t)d;
1856 eflags &= ~CC_Z;
1858 CC_SRC = eflags;
1861 #ifdef TARGET_X86_64
1862 void helper_cmpxchg16b(target_ulong a0)
1864 uint64_t d0, d1;
1865 int eflags;
1867 if ((a0 & 0xf) != 0)
1868 raise_exception(EXCP0D_GPF);
1869 eflags = helper_cc_compute_all(CC_OP);
1870 d0 = ldq(a0);
1871 d1 = ldq(a0 + 8);
1872 if (d0 == EAX && d1 == EDX) {
1873 stq(a0, EBX);
1874 stq(a0 + 8, ECX);
1875 eflags |= CC_Z;
1876 } else {
1877 /* always do the store */
1878 stq(a0, d0);
1879 stq(a0 + 8, d1);
1880 EDX = d1;
1881 EAX = d0;
1882 eflags &= ~CC_Z;
1884 CC_SRC = eflags;
1886 #endif
1888 void helper_single_step(void)
1890 #ifndef CONFIG_USER_ONLY
1891 check_hw_breakpoints(env, 1);
1892 env->dr[6] |= DR6_BS;
1893 #endif
1894 raise_exception(EXCP01_DB);
1897 void helper_cpuid(void)
1899 uint32_t eax, ebx, ecx, edx;
1901 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1903 cpu_x86_cpuid(env, (uint32_t)EAX, &eax, &ebx, &ecx, &edx);
1904 EAX = eax;
1905 EBX = ebx;
1906 ECX = ecx;
1907 EDX = edx;
1910 void helper_enter_level(int level, int data32, target_ulong t1)
1912 target_ulong ssp;
1913 uint32_t esp_mask, esp, ebp;
1915 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1916 ssp = env->segs[R_SS].base;
1917 ebp = EBP;
1918 esp = ESP;
1919 if (data32) {
1920 /* 32 bit */
1921 esp -= 4;
1922 while (--level) {
1923 esp -= 4;
1924 ebp -= 4;
1925 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1927 esp -= 4;
1928 stl(ssp + (esp & esp_mask), t1);
1929 } else {
1930 /* 16 bit */
1931 esp -= 2;
1932 while (--level) {
1933 esp -= 2;
1934 ebp -= 2;
1935 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1937 esp -= 2;
1938 stw(ssp + (esp & esp_mask), t1);
1942 #ifdef TARGET_X86_64
1943 void helper_enter64_level(int level, int data64, target_ulong t1)
1945 target_ulong esp, ebp;
1946 ebp = EBP;
1947 esp = ESP;
1949 if (data64) {
1950 /* 64 bit */
1951 esp -= 8;
1952 while (--level) {
1953 esp -= 8;
1954 ebp -= 8;
1955 stq(esp, ldq(ebp));
1957 esp -= 8;
1958 stq(esp, t1);
1959 } else {
1960 /* 16 bit */
1961 esp -= 2;
1962 while (--level) {
1963 esp -= 2;
1964 ebp -= 2;
1965 stw(esp, lduw(ebp));
1967 esp -= 2;
1968 stw(esp, t1);
1971 #endif
1973 void helper_lldt(int selector)
1975 SegmentCache *dt;
1976 uint32_t e1, e2;
1977 int index, entry_limit;
1978 target_ulong ptr;
1980 selector &= 0xffff;
1981 if ((selector & 0xfffc) == 0) {
1982 /* XXX: NULL selector case: invalid LDT */
1983 env->ldt.base = 0;
1984 env->ldt.limit = 0;
1985 } else {
1986 if (selector & 0x4)
1987 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1988 dt = &env->gdt;
1989 index = selector & ~7;
1990 #ifdef TARGET_X86_64
1991 if (env->hflags & HF_LMA_MASK)
1992 entry_limit = 15;
1993 else
1994 #endif
1995 entry_limit = 7;
1996 if ((index + entry_limit) > dt->limit)
1997 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1998 ptr = dt->base + index;
1999 e1 = ldl_kernel(ptr);
2000 e2 = ldl_kernel(ptr + 4);
2001 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2002 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2003 if (!(e2 & DESC_P_MASK))
2004 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2005 #ifdef TARGET_X86_64
2006 if (env->hflags & HF_LMA_MASK) {
2007 uint32_t e3;
2008 e3 = ldl_kernel(ptr + 8);
2009 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2010 env->ldt.base |= (target_ulong)e3 << 32;
2011 } else
2012 #endif
2014 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2017 env->ldt.selector = selector;
2020 void helper_ltr(int selector)
2022 SegmentCache *dt;
2023 uint32_t e1, e2;
2024 int index, type, entry_limit;
2025 target_ulong ptr;
2027 selector &= 0xffff;
2028 if ((selector & 0xfffc) == 0) {
2029 /* NULL selector case: invalid TR */
2030 env->tr.base = 0;
2031 env->tr.limit = 0;
2032 env->tr.flags = 0;
2033 } else {
2034 if (selector & 0x4)
2035 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2036 dt = &env->gdt;
2037 index = selector & ~7;
2038 #ifdef TARGET_X86_64
2039 if (env->hflags & HF_LMA_MASK)
2040 entry_limit = 15;
2041 else
2042 #endif
2043 entry_limit = 7;
2044 if ((index + entry_limit) > dt->limit)
2045 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2046 ptr = dt->base + index;
2047 e1 = ldl_kernel(ptr);
2048 e2 = ldl_kernel(ptr + 4);
2049 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2050 if ((e2 & DESC_S_MASK) ||
2051 (type != 1 && type != 9))
2052 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2053 if (!(e2 & DESC_P_MASK))
2054 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2055 #ifdef TARGET_X86_64
2056 if (env->hflags & HF_LMA_MASK) {
2057 uint32_t e3, e4;
2058 e3 = ldl_kernel(ptr + 8);
2059 e4 = ldl_kernel(ptr + 12);
2060 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2061 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2062 load_seg_cache_raw_dt(&env->tr, e1, e2);
2063 env->tr.base |= (target_ulong)e3 << 32;
2064 } else
2065 #endif
2067 load_seg_cache_raw_dt(&env->tr, e1, e2);
2069 e2 |= DESC_TSS_BUSY_MASK;
2070 stl_kernel(ptr + 4, e2);
2072 env->tr.selector = selector;
2075 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2076 void helper_load_seg(int seg_reg, int selector)
2078 uint32_t e1, e2;
2079 int cpl, dpl, rpl;
2080 SegmentCache *dt;
2081 int index;
2082 target_ulong ptr;
2084 selector &= 0xffff;
2085 cpl = env->hflags & HF_CPL_MASK;
2086 if ((selector & 0xfffc) == 0) {
2087 /* null selector case */
2088 if (seg_reg == R_SS
2089 #ifdef TARGET_X86_64
2090 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2091 #endif
2093 raise_exception_err(EXCP0D_GPF, 0);
2094 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2095 } else {
2097 if (selector & 0x4)
2098 dt = &env->ldt;
2099 else
2100 dt = &env->gdt;
2101 index = selector & ~7;
2102 if ((index + 7) > dt->limit)
2103 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104 ptr = dt->base + index;
2105 e1 = ldl_kernel(ptr);
2106 e2 = ldl_kernel(ptr + 4);
2108 if (!(e2 & DESC_S_MASK))
2109 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2110 rpl = selector & 3;
2111 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2112 if (seg_reg == R_SS) {
2113 /* must be writable segment */
2114 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2115 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2116 if (rpl != cpl || dpl != cpl)
2117 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2118 } else {
2119 /* must be readable segment */
2120 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2121 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2123 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2124 /* if not conforming code, test rights */
2125 if (dpl < cpl || dpl < rpl)
2126 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130 if (!(e2 & DESC_P_MASK)) {
2131 if (seg_reg == R_SS)
2132 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2133 else
2134 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2137 /* set the access bit if not already set */
2138 if (!(e2 & DESC_A_MASK)) {
2139 e2 |= DESC_A_MASK;
2140 stl_kernel(ptr + 4, e2);
2143 cpu_x86_load_seg_cache(env, seg_reg, selector,
2144 get_seg_base(e1, e2),
2145 get_seg_limit(e1, e2),
2146 e2);
2147 #if 0
2148 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2149 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2150 #endif
2154 /* protected mode jump */
2155 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2156 int next_eip_addend)
2158 int gate_cs, type;
2159 uint32_t e1, e2, cpl, dpl, rpl, limit;
2160 target_ulong next_eip;
2162 if ((new_cs & 0xfffc) == 0)
2163 raise_exception_err(EXCP0D_GPF, 0);
2164 if (load_segment(&e1, &e2, new_cs) != 0)
2165 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2166 cpl = env->hflags & HF_CPL_MASK;
2167 if (e2 & DESC_S_MASK) {
2168 if (!(e2 & DESC_CS_MASK))
2169 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2170 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2171 if (e2 & DESC_C_MASK) {
2172 /* conforming code segment */
2173 if (dpl > cpl)
2174 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2175 } else {
2176 /* non conforming code segment */
2177 rpl = new_cs & 3;
2178 if (rpl > cpl)
2179 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2180 if (dpl != cpl)
2181 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2183 if (!(e2 & DESC_P_MASK))
2184 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2185 limit = get_seg_limit(e1, e2);
2186 if (new_eip > limit &&
2187 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2188 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2189 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2190 get_seg_base(e1, e2), limit, e2);
2191 EIP = new_eip;
2192 } else {
2193 /* jump to call or task gate */
2194 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2195 rpl = new_cs & 3;
2196 cpl = env->hflags & HF_CPL_MASK;
2197 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2198 switch(type) {
2199 case 1: /* 286 TSS */
2200 case 9: /* 386 TSS */
2201 case 5: /* task gate */
2202 if (dpl < cpl || dpl < rpl)
2203 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2204 next_eip = env->eip + next_eip_addend;
2205 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2206 CC_OP = CC_OP_EFLAGS;
2207 break;
2208 case 4: /* 286 call gate */
2209 case 12: /* 386 call gate */
2210 if ((dpl < cpl) || (dpl < rpl))
2211 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212 if (!(e2 & DESC_P_MASK))
2213 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2214 gate_cs = e1 >> 16;
2215 new_eip = (e1 & 0xffff);
2216 if (type == 12)
2217 new_eip |= (e2 & 0xffff0000);
2218 if (load_segment(&e1, &e2, gate_cs) != 0)
2219 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2220 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2221 /* must be code segment */
2222 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2223 (DESC_S_MASK | DESC_CS_MASK)))
2224 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2225 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2226 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2227 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2228 if (!(e2 & DESC_P_MASK))
2229 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2230 limit = get_seg_limit(e1, e2);
2231 if (new_eip > limit)
2232 raise_exception_err(EXCP0D_GPF, 0);
2233 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2234 get_seg_base(e1, e2), limit, e2);
2235 EIP = new_eip;
2236 break;
2237 default:
2238 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2239 break;
2244 /* real mode call */
2245 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2246 int shift, int next_eip)
2248 int new_eip;
2249 uint32_t esp, esp_mask;
2250 target_ulong ssp;
2252 new_eip = new_eip1;
2253 esp = ESP;
2254 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2255 ssp = env->segs[R_SS].base;
2256 if (shift) {
2257 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2258 PUSHL(ssp, esp, esp_mask, next_eip);
2259 } else {
2260 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2261 PUSHW(ssp, esp, esp_mask, next_eip);
2264 SET_ESP(esp, esp_mask);
2265 env->eip = new_eip;
2266 env->segs[R_CS].selector = new_cs;
2267 env->segs[R_CS].base = (new_cs << 4);
2270 /* protected mode call */
2271 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2272 int shift, int next_eip_addend)
2274 int new_stack, i;
2275 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2276 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2277 uint32_t val, limit, old_sp_mask;
2278 target_ulong ssp, old_ssp, next_eip;
2280 next_eip = env->eip + next_eip_addend;
2281 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2282 LOG_PCALL_STATE(env);
2283 if ((new_cs & 0xfffc) == 0)
2284 raise_exception_err(EXCP0D_GPF, 0);
2285 if (load_segment(&e1, &e2, new_cs) != 0)
2286 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2287 cpl = env->hflags & HF_CPL_MASK;
2288 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2289 if (e2 & DESC_S_MASK) {
2290 if (!(e2 & DESC_CS_MASK))
2291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2292 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2293 if (e2 & DESC_C_MASK) {
2294 /* conforming code segment */
2295 if (dpl > cpl)
2296 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2297 } else {
2298 /* non conforming code segment */
2299 rpl = new_cs & 3;
2300 if (rpl > cpl)
2301 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2302 if (dpl != cpl)
2303 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2305 if (!(e2 & DESC_P_MASK))
2306 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2308 #ifdef TARGET_X86_64
2309 /* XXX: check 16/32 bit cases in long mode */
2310 if (shift == 2) {
2311 target_ulong rsp;
2312 /* 64 bit case */
2313 rsp = ESP;
2314 PUSHQ(rsp, env->segs[R_CS].selector);
2315 PUSHQ(rsp, next_eip);
2316 /* from this point, not restartable */
2317 ESP = rsp;
2318 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2319 get_seg_base(e1, e2),
2320 get_seg_limit(e1, e2), e2);
2321 EIP = new_eip;
2322 } else
2323 #endif
2325 sp = ESP;
2326 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2327 ssp = env->segs[R_SS].base;
2328 if (shift) {
2329 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2330 PUSHL(ssp, sp, sp_mask, next_eip);
2331 } else {
2332 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2333 PUSHW(ssp, sp, sp_mask, next_eip);
2336 limit = get_seg_limit(e1, e2);
2337 if (new_eip > limit)
2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 /* from this point, not restartable */
2340 SET_ESP(sp, sp_mask);
2341 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2342 get_seg_base(e1, e2), limit, e2);
2343 EIP = new_eip;
2345 } else {
2346 /* check gate type */
2347 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2348 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2349 rpl = new_cs & 3;
2350 switch(type) {
2351 case 1: /* available 286 TSS */
2352 case 9: /* available 386 TSS */
2353 case 5: /* task gate */
2354 if (dpl < cpl || dpl < rpl)
2355 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2356 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2357 CC_OP = CC_OP_EFLAGS;
2358 return;
2359 case 4: /* 286 call gate */
2360 case 12: /* 386 call gate */
2361 break;
2362 default:
2363 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2364 break;
2366 shift = type >> 3;
2368 if (dpl < cpl || dpl < rpl)
2369 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2370 /* check valid bit */
2371 if (!(e2 & DESC_P_MASK))
2372 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2373 selector = e1 >> 16;
2374 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2375 param_count = e2 & 0x1f;
2376 if ((selector & 0xfffc) == 0)
2377 raise_exception_err(EXCP0D_GPF, 0);
2379 if (load_segment(&e1, &e2, selector) != 0)
2380 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2381 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2382 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2383 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2384 if (dpl > cpl)
2385 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2386 if (!(e2 & DESC_P_MASK))
2387 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2389 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2390 /* to inner privilege */
2391 get_ss_esp_from_tss(&ss, &sp, dpl);
2392 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2393 ss, sp, param_count, ESP);
2394 if ((ss & 0xfffc) == 0)
2395 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2396 if ((ss & 3) != dpl)
2397 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2398 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2399 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2400 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2401 if (ss_dpl != dpl)
2402 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2403 if (!(ss_e2 & DESC_S_MASK) ||
2404 (ss_e2 & DESC_CS_MASK) ||
2405 !(ss_e2 & DESC_W_MASK))
2406 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2407 if (!(ss_e2 & DESC_P_MASK))
2408 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2410 // push_size = ((param_count * 2) + 8) << shift;
2412 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2413 old_ssp = env->segs[R_SS].base;
2415 sp_mask = get_sp_mask(ss_e2);
2416 ssp = get_seg_base(ss_e1, ss_e2);
2417 if (shift) {
2418 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2419 PUSHL(ssp, sp, sp_mask, ESP);
2420 for(i = param_count - 1; i >= 0; i--) {
2421 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2422 PUSHL(ssp, sp, sp_mask, val);
2424 } else {
2425 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2426 PUSHW(ssp, sp, sp_mask, ESP);
2427 for(i = param_count - 1; i >= 0; i--) {
2428 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2429 PUSHW(ssp, sp, sp_mask, val);
2432 new_stack = 1;
2433 } else {
2434 /* to same privilege */
2435 sp = ESP;
2436 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2437 ssp = env->segs[R_SS].base;
2438 // push_size = (4 << shift);
2439 new_stack = 0;
2442 if (shift) {
2443 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2444 PUSHL(ssp, sp, sp_mask, next_eip);
2445 } else {
2446 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2447 PUSHW(ssp, sp, sp_mask, next_eip);
2450 /* from this point, not restartable */
2452 if (new_stack) {
2453 ss = (ss & ~3) | dpl;
2454 cpu_x86_load_seg_cache(env, R_SS, ss,
2455 ssp,
2456 get_seg_limit(ss_e1, ss_e2),
2457 ss_e2);
2460 selector = (selector & ~3) | dpl;
2461 cpu_x86_load_seg_cache(env, R_CS, selector,
2462 get_seg_base(e1, e2),
2463 get_seg_limit(e1, e2),
2464 e2);
2465 cpu_x86_set_cpl(env, dpl);
2466 SET_ESP(sp, sp_mask);
2467 EIP = offset;
2469 #ifdef USE_KQEMU
2470 if (kqemu_is_ok(env)) {
2471 env->exception_index = -1;
2472 cpu_loop_exit();
2474 #endif
2477 /* real and vm86 mode iret */
2478 void helper_iret_real(int shift)
2480 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2481 target_ulong ssp;
2482 int eflags_mask;
2484 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2485 sp = ESP;
2486 ssp = env->segs[R_SS].base;
2487 if (shift == 1) {
2488 /* 32 bits */
2489 POPL(ssp, sp, sp_mask, new_eip);
2490 POPL(ssp, sp, sp_mask, new_cs);
2491 new_cs &= 0xffff;
2492 POPL(ssp, sp, sp_mask, new_eflags);
2493 } else {
2494 /* 16 bits */
2495 POPW(ssp, sp, sp_mask, new_eip);
2496 POPW(ssp, sp, sp_mask, new_cs);
2497 POPW(ssp, sp, sp_mask, new_eflags);
2499 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2500 env->segs[R_CS].selector = new_cs;
2501 env->segs[R_CS].base = (new_cs << 4);
2502 env->eip = new_eip;
2503 if (env->eflags & VM_MASK)
2504 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2505 else
2506 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2507 if (shift == 0)
2508 eflags_mask &= 0xffff;
2509 load_eflags(new_eflags, eflags_mask);
2510 env->hflags2 &= ~HF2_NMI_MASK;
2513 static inline void validate_seg(int seg_reg, int cpl)
2515 int dpl;
2516 uint32_t e2;
2518 /* XXX: on x86_64, we do not want to nullify FS and GS because
2519 they may still contain a valid base. I would be interested to
2520 know how a real x86_64 CPU behaves */
2521 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2522 (env->segs[seg_reg].selector & 0xfffc) == 0)
2523 return;
2525 e2 = env->segs[seg_reg].flags;
2526 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2527 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2528 /* data or non conforming code segment */
2529 if (dpl < cpl) {
2530 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2535 /* protected mode iret */
2536 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2538 uint32_t new_cs, new_eflags, new_ss;
2539 uint32_t new_es, new_ds, new_fs, new_gs;
2540 uint32_t e1, e2, ss_e1, ss_e2;
2541 int cpl, dpl, rpl, eflags_mask, iopl;
2542 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2544 #ifdef TARGET_X86_64
2545 if (shift == 2)
2546 sp_mask = -1;
2547 else
2548 #endif
2549 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2550 sp = ESP;
2551 ssp = env->segs[R_SS].base;
2552 new_eflags = 0; /* avoid warning */
2553 #ifdef TARGET_X86_64
2554 if (shift == 2) {
2555 POPQ(sp, new_eip);
2556 POPQ(sp, new_cs);
2557 new_cs &= 0xffff;
2558 if (is_iret) {
2559 POPQ(sp, new_eflags);
2561 } else
2562 #endif
2563 if (shift == 1) {
2564 /* 32 bits */
2565 POPL(ssp, sp, sp_mask, new_eip);
2566 POPL(ssp, sp, sp_mask, new_cs);
2567 new_cs &= 0xffff;
2568 if (is_iret) {
2569 POPL(ssp, sp, sp_mask, new_eflags);
2570 if (new_eflags & VM_MASK)
2571 goto return_to_vm86;
2573 } else {
2574 /* 16 bits */
2575 POPW(ssp, sp, sp_mask, new_eip);
2576 POPW(ssp, sp, sp_mask, new_cs);
2577 if (is_iret)
2578 POPW(ssp, sp, sp_mask, new_eflags);
2580 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2581 new_cs, new_eip, shift, addend);
2582 LOG_PCALL_STATE(env);
2583 if ((new_cs & 0xfffc) == 0)
2584 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2585 if (load_segment(&e1, &e2, new_cs) != 0)
2586 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2587 if (!(e2 & DESC_S_MASK) ||
2588 !(e2 & DESC_CS_MASK))
2589 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2590 cpl = env->hflags & HF_CPL_MASK;
2591 rpl = new_cs & 3;
2592 if (rpl < cpl)
2593 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2594 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2595 if (e2 & DESC_C_MASK) {
2596 if (dpl > rpl)
2597 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2598 } else {
2599 if (dpl != rpl)
2600 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2602 if (!(e2 & DESC_P_MASK))
2603 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2605 sp += addend;
2606 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2607 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2608 /* return to same privilege level */
2609 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2610 get_seg_base(e1, e2),
2611 get_seg_limit(e1, e2),
2612 e2);
2613 } else {
2614 /* return to different privilege level */
2615 #ifdef TARGET_X86_64
2616 if (shift == 2) {
2617 POPQ(sp, new_esp);
2618 POPQ(sp, new_ss);
2619 new_ss &= 0xffff;
2620 } else
2621 #endif
2622 if (shift == 1) {
2623 /* 32 bits */
2624 POPL(ssp, sp, sp_mask, new_esp);
2625 POPL(ssp, sp, sp_mask, new_ss);
2626 new_ss &= 0xffff;
2627 } else {
2628 /* 16 bits */
2629 POPW(ssp, sp, sp_mask, new_esp);
2630 POPW(ssp, sp, sp_mask, new_ss);
2632 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2633 new_ss, new_esp);
2634 if ((new_ss & 0xfffc) == 0) {
2635 #ifdef TARGET_X86_64
2636 /* NULL ss is allowed in long mode if cpl != 3*/
2637 /* XXX: test CS64 ? */
2638 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2639 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2640 0, 0xffffffff,
2641 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2642 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2643 DESC_W_MASK | DESC_A_MASK);
2644 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2645 } else
2646 #endif
2648 raise_exception_err(EXCP0D_GPF, 0);
2650 } else {
2651 if ((new_ss & 3) != rpl)
2652 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2653 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2654 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2655 if (!(ss_e2 & DESC_S_MASK) ||
2656 (ss_e2 & DESC_CS_MASK) ||
2657 !(ss_e2 & DESC_W_MASK))
2658 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2659 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2660 if (dpl != rpl)
2661 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2662 if (!(ss_e2 & DESC_P_MASK))
2663 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2664 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2665 get_seg_base(ss_e1, ss_e2),
2666 get_seg_limit(ss_e1, ss_e2),
2667 ss_e2);
2670 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2671 get_seg_base(e1, e2),
2672 get_seg_limit(e1, e2),
2673 e2);
2674 cpu_x86_set_cpl(env, rpl);
2675 sp = new_esp;
2676 #ifdef TARGET_X86_64
2677 if (env->hflags & HF_CS64_MASK)
2678 sp_mask = -1;
2679 else
2680 #endif
2681 sp_mask = get_sp_mask(ss_e2);
2683 /* validate data segments */
2684 validate_seg(R_ES, rpl);
2685 validate_seg(R_DS, rpl);
2686 validate_seg(R_FS, rpl);
2687 validate_seg(R_GS, rpl);
2689 sp += addend;
2691 SET_ESP(sp, sp_mask);
2692 env->eip = new_eip;
2693 if (is_iret) {
2694 /* NOTE: 'cpl' is the _old_ CPL */
2695 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2696 if (cpl == 0)
2697 eflags_mask |= IOPL_MASK;
2698 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2699 if (cpl <= iopl)
2700 eflags_mask |= IF_MASK;
2701 if (shift == 0)
2702 eflags_mask &= 0xffff;
2703 load_eflags(new_eflags, eflags_mask);
2705 return;
2707 return_to_vm86:
2708 POPL(ssp, sp, sp_mask, new_esp);
2709 POPL(ssp, sp, sp_mask, new_ss);
2710 POPL(ssp, sp, sp_mask, new_es);
2711 POPL(ssp, sp, sp_mask, new_ds);
2712 POPL(ssp, sp, sp_mask, new_fs);
2713 POPL(ssp, sp, sp_mask, new_gs);
2715 /* modify processor state */
2716 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2717 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2718 load_seg_vm(R_CS, new_cs & 0xffff);
2719 cpu_x86_set_cpl(env, 3);
2720 load_seg_vm(R_SS, new_ss & 0xffff);
2721 load_seg_vm(R_ES, new_es & 0xffff);
2722 load_seg_vm(R_DS, new_ds & 0xffff);
2723 load_seg_vm(R_FS, new_fs & 0xffff);
2724 load_seg_vm(R_GS, new_gs & 0xffff);
2726 env->eip = new_eip & 0xffff;
2727 ESP = new_esp;
2730 void helper_iret_protected(int shift, int next_eip)
2732 int tss_selector, type;
2733 uint32_t e1, e2;
2735 /* specific case for TSS */
2736 if (env->eflags & NT_MASK) {
2737 #ifdef TARGET_X86_64
2738 if (env->hflags & HF_LMA_MASK)
2739 raise_exception_err(EXCP0D_GPF, 0);
2740 #endif
2741 tss_selector = lduw_kernel(env->tr.base + 0);
2742 if (tss_selector & 4)
2743 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2744 if (load_segment(&e1, &e2, tss_selector) != 0)
2745 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2746 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2747 /* NOTE: we check both segment and busy TSS */
2748 if (type != 3)
2749 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2750 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2751 } else {
2752 helper_ret_protected(shift, 1, 0);
2754 env->hflags2 &= ~HF2_NMI_MASK;
2755 #ifdef USE_KQEMU
2756 if (kqemu_is_ok(env)) {
2757 CC_OP = CC_OP_EFLAGS;
2758 env->exception_index = -1;
2759 cpu_loop_exit();
2761 #endif
2764 void helper_lret_protected(int shift, int addend)
2766 helper_ret_protected(shift, 0, addend);
2767 #ifdef USE_KQEMU
2768 if (kqemu_is_ok(env)) {
2769 env->exception_index = -1;
2770 cpu_loop_exit();
2772 #endif
2775 void helper_sysenter(void)
2777 if (env->sysenter_cs == 0) {
2778 raise_exception_err(EXCP0D_GPF, 0);
2780 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2781 cpu_x86_set_cpl(env, 0);
2783 #ifdef TARGET_X86_64
2784 if (env->hflags & HF_LMA_MASK) {
2785 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2786 0, 0xffffffff,
2787 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2788 DESC_S_MASK |
2789 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2790 } else
2791 #endif
2793 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2794 0, 0xffffffff,
2795 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2796 DESC_S_MASK |
2797 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2799 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2800 0, 0xffffffff,
2801 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2802 DESC_S_MASK |
2803 DESC_W_MASK | DESC_A_MASK);
2804 ESP = env->sysenter_esp;
2805 EIP = env->sysenter_eip;
2808 void helper_sysexit(int dflag)
2810 int cpl;
2812 cpl = env->hflags & HF_CPL_MASK;
2813 if (env->sysenter_cs == 0 || cpl != 0) {
2814 raise_exception_err(EXCP0D_GPF, 0);
2816 cpu_x86_set_cpl(env, 3);
2817 #ifdef TARGET_X86_64
2818 if (dflag == 2) {
2819 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2820 0, 0xffffffff,
2821 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2822 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2823 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2824 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2825 0, 0xffffffff,
2826 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2827 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2828 DESC_W_MASK | DESC_A_MASK);
2829 } else
2830 #endif
2832 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2833 0, 0xffffffff,
2834 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2835 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2836 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2837 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2838 0, 0xffffffff,
2839 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2840 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2841 DESC_W_MASK | DESC_A_MASK);
2843 ESP = ECX;
2844 EIP = EDX;
2845 #ifdef USE_KQEMU
2846 if (kqemu_is_ok(env)) {
2847 env->exception_index = -1;
2848 cpu_loop_exit();
2850 #endif
2853 #if defined(CONFIG_USER_ONLY)
2854 target_ulong helper_read_crN(int reg)
2856 return 0;
2859 void helper_write_crN(int reg, target_ulong t0)
2863 void helper_movl_drN_T0(int reg, target_ulong t0)
2866 #else
2867 target_ulong helper_read_crN(int reg)
2869 target_ulong val;
2871 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2872 switch(reg) {
2873 default:
2874 val = env->cr[reg];
2875 break;
2876 case 8:
2877 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2878 val = cpu_get_apic_tpr(env);
2879 } else {
2880 val = env->v_tpr;
2882 break;
2884 return val;
2887 void helper_write_crN(int reg, target_ulong t0)
2889 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2890 switch(reg) {
2891 case 0:
2892 cpu_x86_update_cr0(env, t0);
2893 break;
2894 case 3:
2895 cpu_x86_update_cr3(env, t0);
2896 break;
2897 case 4:
2898 cpu_x86_update_cr4(env, t0);
2899 break;
2900 case 8:
2901 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2902 cpu_set_apic_tpr(env, t0);
2904 env->v_tpr = t0 & 0x0f;
2905 break;
2906 default:
2907 env->cr[reg] = t0;
2908 break;
2912 void helper_movl_drN_T0(int reg, target_ulong t0)
2914 int i;
2916 if (reg < 4) {
2917 hw_breakpoint_remove(env, reg);
2918 env->dr[reg] = t0;
2919 hw_breakpoint_insert(env, reg);
2920 } else if (reg == 7) {
2921 for (i = 0; i < 4; i++)
2922 hw_breakpoint_remove(env, i);
2923 env->dr[7] = t0;
2924 for (i = 0; i < 4; i++)
2925 hw_breakpoint_insert(env, i);
2926 } else
2927 env->dr[reg] = t0;
2929 #endif
2931 void helper_lmsw(target_ulong t0)
2933 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2934 if already set to one. */
2935 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2936 helper_write_crN(0, t0);
2939 void helper_clts(void)
2941 env->cr[0] &= ~CR0_TS_MASK;
2942 env->hflags &= ~HF_TS_MASK;
2945 void helper_invlpg(target_ulong addr)
2947 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2948 tlb_flush_page(env, addr);
2951 void helper_rdtsc(void)
2953 uint64_t val;
2955 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2956 raise_exception(EXCP0D_GPF);
2958 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2960 val = cpu_get_tsc(env) + env->tsc_offset;
2961 EAX = (uint32_t)(val);
2962 EDX = (uint32_t)(val >> 32);
2965 void helper_rdpmc(void)
2967 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2968 raise_exception(EXCP0D_GPF);
2970 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2972 /* currently unimplemented */
2973 raise_exception_err(EXCP06_ILLOP, 0);
2976 #if defined(CONFIG_USER_ONLY)
2977 void helper_wrmsr(void)
2981 void helper_rdmsr(void)
2984 #else
2985 void helper_wrmsr(void)
2987 uint64_t val;
2989 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
2991 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2993 switch((uint32_t)ECX) {
2994 case MSR_IA32_SYSENTER_CS:
2995 env->sysenter_cs = val & 0xffff;
2996 break;
2997 case MSR_IA32_SYSENTER_ESP:
2998 env->sysenter_esp = val;
2999 break;
3000 case MSR_IA32_SYSENTER_EIP:
3001 env->sysenter_eip = val;
3002 break;
3003 case MSR_IA32_APICBASE:
3004 cpu_set_apic_base(env, val);
3005 break;
3006 case MSR_EFER:
3008 uint64_t update_mask;
3009 update_mask = 0;
3010 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3011 update_mask |= MSR_EFER_SCE;
3012 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3013 update_mask |= MSR_EFER_LME;
3014 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3015 update_mask |= MSR_EFER_FFXSR;
3016 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3017 update_mask |= MSR_EFER_NXE;
3018 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3019 update_mask |= MSR_EFER_SVME;
3020 cpu_load_efer(env, (env->efer & ~update_mask) |
3021 (val & update_mask));
3023 break;
3024 case MSR_STAR:
3025 env->star = val;
3026 break;
3027 case MSR_PAT:
3028 env->pat = val;
3029 break;
3030 case MSR_VM_HSAVE_PA:
3031 env->vm_hsave = val;
3032 break;
3033 #ifdef TARGET_X86_64
3034 case MSR_LSTAR:
3035 env->lstar = val;
3036 break;
3037 case MSR_CSTAR:
3038 env->cstar = val;
3039 break;
3040 case MSR_FMASK:
3041 env->fmask = val;
3042 break;
3043 case MSR_FSBASE:
3044 env->segs[R_FS].base = val;
3045 break;
3046 case MSR_GSBASE:
3047 env->segs[R_GS].base = val;
3048 break;
3049 case MSR_KERNELGSBASE:
3050 env->kernelgsbase = val;
3051 break;
3052 #endif
3053 default:
3054 /* XXX: exception ? */
3055 break;
3059 void helper_rdmsr(void)
3061 uint64_t val;
3063 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3065 switch((uint32_t)ECX) {
3066 case MSR_IA32_SYSENTER_CS:
3067 val = env->sysenter_cs;
3068 break;
3069 case MSR_IA32_SYSENTER_ESP:
3070 val = env->sysenter_esp;
3071 break;
3072 case MSR_IA32_SYSENTER_EIP:
3073 val = env->sysenter_eip;
3074 break;
3075 case MSR_IA32_APICBASE:
3076 val = cpu_get_apic_base(env);
3077 break;
3078 case MSR_EFER:
3079 val = env->efer;
3080 break;
3081 case MSR_STAR:
3082 val = env->star;
3083 break;
3084 case MSR_PAT:
3085 val = env->pat;
3086 break;
3087 case MSR_VM_HSAVE_PA:
3088 val = env->vm_hsave;
3089 break;
3090 case MSR_IA32_PERF_STATUS:
3091 /* tsc_increment_by_tick */
3092 val = 1000ULL;
3093 /* CPU multiplier */
3094 val |= (((uint64_t)4ULL) << 40);
3095 break;
3096 #ifdef TARGET_X86_64
3097 case MSR_LSTAR:
3098 val = env->lstar;
3099 break;
3100 case MSR_CSTAR:
3101 val = env->cstar;
3102 break;
3103 case MSR_FMASK:
3104 val = env->fmask;
3105 break;
3106 case MSR_FSBASE:
3107 val = env->segs[R_FS].base;
3108 break;
3109 case MSR_GSBASE:
3110 val = env->segs[R_GS].base;
3111 break;
3112 case MSR_KERNELGSBASE:
3113 val = env->kernelgsbase;
3114 break;
3115 #endif
3116 #ifdef USE_KQEMU
3117 case MSR_QPI_COMMBASE:
3118 if (env->kqemu_enabled) {
3119 val = kqemu_comm_base;
3120 } else {
3121 val = 0;
3123 break;
3124 #endif
3125 default:
3126 /* XXX: exception ? */
3127 val = 0;
3128 break;
3130 EAX = (uint32_t)(val);
3131 EDX = (uint32_t)(val >> 32);
3133 #endif
3135 target_ulong helper_lsl(target_ulong selector1)
3137 unsigned int limit;
3138 uint32_t e1, e2, eflags, selector;
3139 int rpl, dpl, cpl, type;
3141 selector = selector1 & 0xffff;
3142 eflags = helper_cc_compute_all(CC_OP);
3143 if (load_segment(&e1, &e2, selector) != 0)
3144 goto fail;
3145 rpl = selector & 3;
3146 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3147 cpl = env->hflags & HF_CPL_MASK;
3148 if (e2 & DESC_S_MASK) {
3149 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3150 /* conforming */
3151 } else {
3152 if (dpl < cpl || dpl < rpl)
3153 goto fail;
3155 } else {
3156 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3157 switch(type) {
3158 case 1:
3159 case 2:
3160 case 3:
3161 case 9:
3162 case 11:
3163 break;
3164 default:
3165 goto fail;
3167 if (dpl < cpl || dpl < rpl) {
3168 fail:
3169 CC_SRC = eflags & ~CC_Z;
3170 return 0;
3173 limit = get_seg_limit(e1, e2);
3174 CC_SRC = eflags | CC_Z;
3175 return limit;
3178 target_ulong helper_lar(target_ulong selector1)
3180 uint32_t e1, e2, eflags, selector;
3181 int rpl, dpl, cpl, type;
3183 selector = selector1 & 0xffff;
3184 eflags = helper_cc_compute_all(CC_OP);
3185 if ((selector & 0xfffc) == 0)
3186 goto fail;
3187 if (load_segment(&e1, &e2, selector) != 0)
3188 goto fail;
3189 rpl = selector & 3;
3190 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3191 cpl = env->hflags & HF_CPL_MASK;
3192 if (e2 & DESC_S_MASK) {
3193 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3194 /* conforming */
3195 } else {
3196 if (dpl < cpl || dpl < rpl)
3197 goto fail;
3199 } else {
3200 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3201 switch(type) {
3202 case 1:
3203 case 2:
3204 case 3:
3205 case 4:
3206 case 5:
3207 case 9:
3208 case 11:
3209 case 12:
3210 break;
3211 default:
3212 goto fail;
3214 if (dpl < cpl || dpl < rpl) {
3215 fail:
3216 CC_SRC = eflags & ~CC_Z;
3217 return 0;
3220 CC_SRC = eflags | CC_Z;
3221 return e2 & 0x00f0ff00;
3224 void helper_verr(target_ulong selector1)
3226 uint32_t e1, e2, eflags, selector;
3227 int rpl, dpl, cpl;
3229 selector = selector1 & 0xffff;
3230 eflags = helper_cc_compute_all(CC_OP);
3231 if ((selector & 0xfffc) == 0)
3232 goto fail;
3233 if (load_segment(&e1, &e2, selector) != 0)
3234 goto fail;
3235 if (!(e2 & DESC_S_MASK))
3236 goto fail;
3237 rpl = selector & 3;
3238 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3239 cpl = env->hflags & HF_CPL_MASK;
3240 if (e2 & DESC_CS_MASK) {
3241 if (!(e2 & DESC_R_MASK))
3242 goto fail;
3243 if (!(e2 & DESC_C_MASK)) {
3244 if (dpl < cpl || dpl < rpl)
3245 goto fail;
3247 } else {
3248 if (dpl < cpl || dpl < rpl) {
3249 fail:
3250 CC_SRC = eflags & ~CC_Z;
3251 return;
3254 CC_SRC = eflags | CC_Z;
3257 void helper_verw(target_ulong selector1)
3259 uint32_t e1, e2, eflags, selector;
3260 int rpl, dpl, cpl;
3262 selector = selector1 & 0xffff;
3263 eflags = helper_cc_compute_all(CC_OP);
3264 if ((selector & 0xfffc) == 0)
3265 goto fail;
3266 if (load_segment(&e1, &e2, selector) != 0)
3267 goto fail;
3268 if (!(e2 & DESC_S_MASK))
3269 goto fail;
3270 rpl = selector & 3;
3271 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3272 cpl = env->hflags & HF_CPL_MASK;
3273 if (e2 & DESC_CS_MASK) {
3274 goto fail;
3275 } else {
3276 if (dpl < cpl || dpl < rpl)
3277 goto fail;
3278 if (!(e2 & DESC_W_MASK)) {
3279 fail:
3280 CC_SRC = eflags & ~CC_Z;
3281 return;
3284 CC_SRC = eflags | CC_Z;
3287 /* x87 FPU helpers */
3289 static void fpu_set_exception(int mask)
3291 env->fpus |= mask;
3292 if (env->fpus & (~env->fpuc & FPUC_EM))
3293 env->fpus |= FPUS_SE | FPUS_B;
3296 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3298 if (b == 0.0)
3299 fpu_set_exception(FPUS_ZE);
3300 return a / b;
3303 static void fpu_raise_exception(void)
3305 if (env->cr[0] & CR0_NE_MASK) {
3306 raise_exception(EXCP10_COPR);
3308 #if !defined(CONFIG_USER_ONLY)
3309 else {
3310 cpu_set_ferr(env);
3312 #endif
3315 void helper_flds_FT0(uint32_t val)
3317 union {
3318 float32 f;
3319 uint32_t i;
3320 } u;
3321 u.i = val;
3322 FT0 = float32_to_floatx(u.f, &env->fp_status);
3325 void helper_fldl_FT0(uint64_t val)
3327 union {
3328 float64 f;
3329 uint64_t i;
3330 } u;
3331 u.i = val;
3332 FT0 = float64_to_floatx(u.f, &env->fp_status);
3335 void helper_fildl_FT0(int32_t val)
3337 FT0 = int32_to_floatx(val, &env->fp_status);
3340 void helper_flds_ST0(uint32_t val)
3342 int new_fpstt;
3343 union {
3344 float32 f;
3345 uint32_t i;
3346 } u;
3347 new_fpstt = (env->fpstt - 1) & 7;
3348 u.i = val;
3349 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3350 env->fpstt = new_fpstt;
3351 env->fptags[new_fpstt] = 0; /* validate stack entry */
3354 void helper_fldl_ST0(uint64_t val)
3356 int new_fpstt;
3357 union {
3358 float64 f;
3359 uint64_t i;
3360 } u;
3361 new_fpstt = (env->fpstt - 1) & 7;
3362 u.i = val;
3363 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3364 env->fpstt = new_fpstt;
3365 env->fptags[new_fpstt] = 0; /* validate stack entry */
3368 void helper_fildl_ST0(int32_t val)
3370 int new_fpstt;
3371 new_fpstt = (env->fpstt - 1) & 7;
3372 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3373 env->fpstt = new_fpstt;
3374 env->fptags[new_fpstt] = 0; /* validate stack entry */
3377 void helper_fildll_ST0(int64_t val)
3379 int new_fpstt;
3380 new_fpstt = (env->fpstt - 1) & 7;
3381 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3382 env->fpstt = new_fpstt;
3383 env->fptags[new_fpstt] = 0; /* validate stack entry */
3386 uint32_t helper_fsts_ST0(void)
3388 union {
3389 float32 f;
3390 uint32_t i;
3391 } u;
3392 u.f = floatx_to_float32(ST0, &env->fp_status);
3393 return u.i;
3396 uint64_t helper_fstl_ST0(void)
3398 union {
3399 float64 f;
3400 uint64_t i;
3401 } u;
3402 u.f = floatx_to_float64(ST0, &env->fp_status);
3403 return u.i;
3406 int32_t helper_fist_ST0(void)
3408 int32_t val;
3409 val = floatx_to_int32(ST0, &env->fp_status);
3410 if (val != (int16_t)val)
3411 val = -32768;
3412 return val;
3415 int32_t helper_fistl_ST0(void)
3417 int32_t val;
3418 val = floatx_to_int32(ST0, &env->fp_status);
3419 return val;
3422 int64_t helper_fistll_ST0(void)
3424 int64_t val;
3425 val = floatx_to_int64(ST0, &env->fp_status);
3426 return val;
3429 int32_t helper_fistt_ST0(void)
3431 int32_t val;
3432 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3433 if (val != (int16_t)val)
3434 val = -32768;
3435 return val;
3438 int32_t helper_fisttl_ST0(void)
3440 int32_t val;
3441 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3442 return val;
3445 int64_t helper_fisttll_ST0(void)
3447 int64_t val;
3448 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3449 return val;
3452 void helper_fldt_ST0(target_ulong ptr)
3454 int new_fpstt;
3455 new_fpstt = (env->fpstt - 1) & 7;
3456 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3457 env->fpstt = new_fpstt;
3458 env->fptags[new_fpstt] = 0; /* validate stack entry */
3461 void helper_fstt_ST0(target_ulong ptr)
3463 helper_fstt(ST0, ptr);
3466 void helper_fpush(void)
3468 fpush();
3471 void helper_fpop(void)
3473 fpop();
3476 void helper_fdecstp(void)
3478 env->fpstt = (env->fpstt - 1) & 7;
3479 env->fpus &= (~0x4700);
3482 void helper_fincstp(void)
3484 env->fpstt = (env->fpstt + 1) & 7;
3485 env->fpus &= (~0x4700);
3488 /* FPU move */
3490 void helper_ffree_STN(int st_index)
3492 env->fptags[(env->fpstt + st_index) & 7] = 1;
3495 void helper_fmov_ST0_FT0(void)
3497 ST0 = FT0;
3500 void helper_fmov_FT0_STN(int st_index)
3502 FT0 = ST(st_index);
3505 void helper_fmov_ST0_STN(int st_index)
3507 ST0 = ST(st_index);
3510 void helper_fmov_STN_ST0(int st_index)
3512 ST(st_index) = ST0;
3515 void helper_fxchg_ST0_STN(int st_index)
3517 CPU86_LDouble tmp;
3518 tmp = ST(st_index);
3519 ST(st_index) = ST0;
3520 ST0 = tmp;
3523 /* FPU operations */
3525 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3527 void helper_fcom_ST0_FT0(void)
3529 int ret;
3531 ret = floatx_compare(ST0, FT0, &env->fp_status);
3532 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3535 void helper_fucom_ST0_FT0(void)
3537 int ret;
3539 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3540 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3543 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3545 void helper_fcomi_ST0_FT0(void)
3547 int eflags;
3548 int ret;
3550 ret = floatx_compare(ST0, FT0, &env->fp_status);
3551 eflags = helper_cc_compute_all(CC_OP);
3552 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3553 CC_SRC = eflags;
3556 void helper_fucomi_ST0_FT0(void)
3558 int eflags;
3559 int ret;
3561 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3562 eflags = helper_cc_compute_all(CC_OP);
3563 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3564 CC_SRC = eflags;
3567 void helper_fadd_ST0_FT0(void)
3569 ST0 += FT0;
3572 void helper_fmul_ST0_FT0(void)
3574 ST0 *= FT0;
3577 void helper_fsub_ST0_FT0(void)
3579 ST0 -= FT0;
3582 void helper_fsubr_ST0_FT0(void)
3584 ST0 = FT0 - ST0;
3587 void helper_fdiv_ST0_FT0(void)
3589 ST0 = helper_fdiv(ST0, FT0);
3592 void helper_fdivr_ST0_FT0(void)
3594 ST0 = helper_fdiv(FT0, ST0);
3597 /* fp operations between STN and ST0 */
3599 void helper_fadd_STN_ST0(int st_index)
3601 ST(st_index) += ST0;
3604 void helper_fmul_STN_ST0(int st_index)
3606 ST(st_index) *= ST0;
3609 void helper_fsub_STN_ST0(int st_index)
3611 ST(st_index) -= ST0;
3614 void helper_fsubr_STN_ST0(int st_index)
3616 CPU86_LDouble *p;
3617 p = &ST(st_index);
3618 *p = ST0 - *p;
3621 void helper_fdiv_STN_ST0(int st_index)
3623 CPU86_LDouble *p;
3624 p = &ST(st_index);
3625 *p = helper_fdiv(*p, ST0);
3628 void helper_fdivr_STN_ST0(int st_index)
3630 CPU86_LDouble *p;
3631 p = &ST(st_index);
3632 *p = helper_fdiv(ST0, *p);
3635 /* misc FPU operations */
3636 void helper_fchs_ST0(void)
3638 ST0 = floatx_chs(ST0);
3641 void helper_fabs_ST0(void)
3643 ST0 = floatx_abs(ST0);
3646 void helper_fld1_ST0(void)
3648 ST0 = f15rk[1];
3651 void helper_fldl2t_ST0(void)
3653 ST0 = f15rk[6];
3656 void helper_fldl2e_ST0(void)
3658 ST0 = f15rk[5];
3661 void helper_fldpi_ST0(void)
3663 ST0 = f15rk[2];
3666 void helper_fldlg2_ST0(void)
3668 ST0 = f15rk[3];
3671 void helper_fldln2_ST0(void)
3673 ST0 = f15rk[4];
3676 void helper_fldz_ST0(void)
3678 ST0 = f15rk[0];
3681 void helper_fldz_FT0(void)
3683 FT0 = f15rk[0];
3686 uint32_t helper_fnstsw(void)
3688 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3691 uint32_t helper_fnstcw(void)
3693 return env->fpuc;
3696 static void update_fp_status(void)
3698 int rnd_type;
3700 /* set rounding mode */
3701 switch(env->fpuc & RC_MASK) {
3702 default:
3703 case RC_NEAR:
3704 rnd_type = float_round_nearest_even;
3705 break;
3706 case RC_DOWN:
3707 rnd_type = float_round_down;
3708 break;
3709 case RC_UP:
3710 rnd_type = float_round_up;
3711 break;
3712 case RC_CHOP:
3713 rnd_type = float_round_to_zero;
3714 break;
3716 set_float_rounding_mode(rnd_type, &env->fp_status);
3717 #ifdef FLOATX80
3718 switch((env->fpuc >> 8) & 3) {
3719 case 0:
3720 rnd_type = 32;
3721 break;
3722 case 2:
3723 rnd_type = 64;
3724 break;
3725 case 3:
3726 default:
3727 rnd_type = 80;
3728 break;
3730 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3731 #endif
3734 void helper_fldcw(uint32_t val)
3736 env->fpuc = val;
3737 update_fp_status();
3740 void helper_fclex(void)
3742 env->fpus &= 0x7f00;
3745 void helper_fwait(void)
3747 if (env->fpus & FPUS_SE)
3748 fpu_raise_exception();
3751 void helper_fninit(void)
3753 env->fpus = 0;
3754 env->fpstt = 0;
3755 env->fpuc = 0x37f;
3756 env->fptags[0] = 1;
3757 env->fptags[1] = 1;
3758 env->fptags[2] = 1;
3759 env->fptags[3] = 1;
3760 env->fptags[4] = 1;
3761 env->fptags[5] = 1;
3762 env->fptags[6] = 1;
3763 env->fptags[7] = 1;
3766 /* BCD ops */
3768 void helper_fbld_ST0(target_ulong ptr)
3770 CPU86_LDouble tmp;
3771 uint64_t val;
3772 unsigned int v;
3773 int i;
3775 val = 0;
3776 for(i = 8; i >= 0; i--) {
3777 v = ldub(ptr + i);
3778 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3780 tmp = val;
3781 if (ldub(ptr + 9) & 0x80)
3782 tmp = -tmp;
3783 fpush();
3784 ST0 = tmp;
3787 void helper_fbst_ST0(target_ulong ptr)
3789 int v;
3790 target_ulong mem_ref, mem_end;
3791 int64_t val;
3793 val = floatx_to_int64(ST0, &env->fp_status);
3794 mem_ref = ptr;
3795 mem_end = mem_ref + 9;
3796 if (val < 0) {
3797 stb(mem_end, 0x80);
3798 val = -val;
3799 } else {
3800 stb(mem_end, 0x00);
3802 while (mem_ref < mem_end) {
3803 if (val == 0)
3804 break;
3805 v = val % 100;
3806 val = val / 100;
3807 v = ((v / 10) << 4) | (v % 10);
3808 stb(mem_ref++, v);
3810 while (mem_ref < mem_end) {
3811 stb(mem_ref++, 0);
3815 void helper_f2xm1(void)
3817 ST0 = pow(2.0,ST0) - 1.0;
3820 void helper_fyl2x(void)
3822 CPU86_LDouble fptemp;
3824 fptemp = ST0;
3825 if (fptemp>0.0){
3826 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3827 ST1 *= fptemp;
3828 fpop();
3829 } else {
3830 env->fpus &= (~0x4700);
3831 env->fpus |= 0x400;
3835 void helper_fptan(void)
3837 CPU86_LDouble fptemp;
3839 fptemp = ST0;
3840 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3841 env->fpus |= 0x400;
3842 } else {
3843 ST0 = tan(fptemp);
3844 fpush();
3845 ST0 = 1.0;
3846 env->fpus &= (~0x400); /* C2 <-- 0 */
3847 /* the above code is for |arg| < 2**52 only */
3851 void helper_fpatan(void)
3853 CPU86_LDouble fptemp, fpsrcop;
3855 fpsrcop = ST1;
3856 fptemp = ST0;
3857 ST1 = atan2(fpsrcop,fptemp);
3858 fpop();
3861 void helper_fxtract(void)
3863 CPU86_LDoubleU temp;
3864 unsigned int expdif;
3866 temp.d = ST0;
3867 expdif = EXPD(temp) - EXPBIAS;
3868 /*DP exponent bias*/
3869 ST0 = expdif;
3870 fpush();
3871 BIASEXPONENT(temp);
3872 ST0 = temp.d;
3875 void helper_fprem1(void)
3877 CPU86_LDouble dblq, fpsrcop, fptemp;
3878 CPU86_LDoubleU fpsrcop1, fptemp1;
3879 int expdif;
3880 signed long long int q;
3882 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3883 ST0 = 0.0 / 0.0; /* NaN */
3884 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3885 return;
3888 fpsrcop = ST0;
3889 fptemp = ST1;
3890 fpsrcop1.d = fpsrcop;
3891 fptemp1.d = fptemp;
3892 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3894 if (expdif < 0) {
3895 /* optimisation? taken from the AMD docs */
3896 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3897 /* ST0 is unchanged */
3898 return;
3901 if (expdif < 53) {
3902 dblq = fpsrcop / fptemp;
3903 /* round dblq towards nearest integer */
3904 dblq = rint(dblq);
3905 ST0 = fpsrcop - fptemp * dblq;
3907 /* convert dblq to q by truncating towards zero */
3908 if (dblq < 0.0)
3909 q = (signed long long int)(-dblq);
3910 else
3911 q = (signed long long int)dblq;
3913 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3914 /* (C0,C3,C1) <-- (q2,q1,q0) */
3915 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3916 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3917 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3918 } else {
3919 env->fpus |= 0x400; /* C2 <-- 1 */
3920 fptemp = pow(2.0, expdif - 50);
3921 fpsrcop = (ST0 / ST1) / fptemp;
3922 /* fpsrcop = integer obtained by chopping */
3923 fpsrcop = (fpsrcop < 0.0) ?
3924 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3925 ST0 -= (ST1 * fpsrcop * fptemp);
3929 void helper_fprem(void)
3931 CPU86_LDouble dblq, fpsrcop, fptemp;
3932 CPU86_LDoubleU fpsrcop1, fptemp1;
3933 int expdif;
3934 signed long long int q;
3936 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3937 ST0 = 0.0 / 0.0; /* NaN */
3938 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3939 return;
3942 fpsrcop = (CPU86_LDouble)ST0;
3943 fptemp = (CPU86_LDouble)ST1;
3944 fpsrcop1.d = fpsrcop;
3945 fptemp1.d = fptemp;
3946 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3948 if (expdif < 0) {
3949 /* optimisation? taken from the AMD docs */
3950 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3951 /* ST0 is unchanged */
3952 return;
3955 if ( expdif < 53 ) {
3956 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3957 /* round dblq towards zero */
3958 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3959 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3961 /* convert dblq to q by truncating towards zero */
3962 if (dblq < 0.0)
3963 q = (signed long long int)(-dblq);
3964 else
3965 q = (signed long long int)dblq;
3967 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3968 /* (C0,C3,C1) <-- (q2,q1,q0) */
3969 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3970 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3971 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3972 } else {
3973 int N = 32 + (expdif % 32); /* as per AMD docs */
3974 env->fpus |= 0x400; /* C2 <-- 1 */
3975 fptemp = pow(2.0, (double)(expdif - N));
3976 fpsrcop = (ST0 / ST1) / fptemp;
3977 /* fpsrcop = integer obtained by chopping */
3978 fpsrcop = (fpsrcop < 0.0) ?
3979 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3980 ST0 -= (ST1 * fpsrcop * fptemp);
3984 void helper_fyl2xp1(void)
3986 CPU86_LDouble fptemp;
3988 fptemp = ST0;
3989 if ((fptemp+1.0)>0.0) {
3990 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3991 ST1 *= fptemp;
3992 fpop();
3993 } else {
3994 env->fpus &= (~0x4700);
3995 env->fpus |= 0x400;
3999 void helper_fsqrt(void)
4001 CPU86_LDouble fptemp;
4003 fptemp = ST0;
4004 if (fptemp<0.0) {
4005 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4006 env->fpus |= 0x400;
4008 ST0 = sqrt(fptemp);
4011 void helper_fsincos(void)
4013 CPU86_LDouble fptemp;
4015 fptemp = ST0;
4016 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4017 env->fpus |= 0x400;
4018 } else {
4019 ST0 = sin(fptemp);
4020 fpush();
4021 ST0 = cos(fptemp);
4022 env->fpus &= (~0x400); /* C2 <-- 0 */
4023 /* the above code is for |arg| < 2**63 only */
4027 void helper_frndint(void)
4029 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4032 void helper_fscale(void)
4034 ST0 = ldexp (ST0, (int)(ST1));
4037 void helper_fsin(void)
4039 CPU86_LDouble fptemp;
4041 fptemp = ST0;
4042 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4043 env->fpus |= 0x400;
4044 } else {
4045 ST0 = sin(fptemp);
4046 env->fpus &= (~0x400); /* C2 <-- 0 */
4047 /* the above code is for |arg| < 2**53 only */
4051 void helper_fcos(void)
4053 CPU86_LDouble fptemp;
4055 fptemp = ST0;
4056 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4057 env->fpus |= 0x400;
4058 } else {
4059 ST0 = cos(fptemp);
4060 env->fpus &= (~0x400); /* C2 <-- 0 */
4061 /* the above code is for |arg5 < 2**63 only */
4065 void helper_fxam_ST0(void)
4067 CPU86_LDoubleU temp;
4068 int expdif;
4070 temp.d = ST0;
4072 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4073 if (SIGND(temp))
4074 env->fpus |= 0x200; /* C1 <-- 1 */
4076 /* XXX: test fptags too */
4077 expdif = EXPD(temp);
4078 if (expdif == MAXEXPD) {
4079 #ifdef USE_X86LDOUBLE
4080 if (MANTD(temp) == 0x8000000000000000ULL)
4081 #else
4082 if (MANTD(temp) == 0)
4083 #endif
4084 env->fpus |= 0x500 /*Infinity*/;
4085 else
4086 env->fpus |= 0x100 /*NaN*/;
4087 } else if (expdif == 0) {
4088 if (MANTD(temp) == 0)
4089 env->fpus |= 0x4000 /*Zero*/;
4090 else
4091 env->fpus |= 0x4400 /*Denormal*/;
4092 } else {
4093 env->fpus |= 0x400;
4097 void helper_fstenv(target_ulong ptr, int data32)
4099 int fpus, fptag, exp, i;
4100 uint64_t mant;
4101 CPU86_LDoubleU tmp;
4103 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4104 fptag = 0;
4105 for (i=7; i>=0; i--) {
4106 fptag <<= 2;
4107 if (env->fptags[i]) {
4108 fptag |= 3;
4109 } else {
4110 tmp.d = env->fpregs[i].d;
4111 exp = EXPD(tmp);
4112 mant = MANTD(tmp);
4113 if (exp == 0 && mant == 0) {
4114 /* zero */
4115 fptag |= 1;
4116 } else if (exp == 0 || exp == MAXEXPD
4117 #ifdef USE_X86LDOUBLE
4118 || (mant & (1LL << 63)) == 0
4119 #endif
4121 /* NaNs, infinity, denormal */
4122 fptag |= 2;
4126 if (data32) {
4127 /* 32 bit */
4128 stl(ptr, env->fpuc);
4129 stl(ptr + 4, fpus);
4130 stl(ptr + 8, fptag);
4131 stl(ptr + 12, 0); /* fpip */
4132 stl(ptr + 16, 0); /* fpcs */
4133 stl(ptr + 20, 0); /* fpoo */
4134 stl(ptr + 24, 0); /* fpos */
4135 } else {
4136 /* 16 bit */
4137 stw(ptr, env->fpuc);
4138 stw(ptr + 2, fpus);
4139 stw(ptr + 4, fptag);
4140 stw(ptr + 6, 0);
4141 stw(ptr + 8, 0);
4142 stw(ptr + 10, 0);
4143 stw(ptr + 12, 0);
4147 void helper_fldenv(target_ulong ptr, int data32)
4149 int i, fpus, fptag;
4151 if (data32) {
4152 env->fpuc = lduw(ptr);
4153 fpus = lduw(ptr + 4);
4154 fptag = lduw(ptr + 8);
4156 else {
4157 env->fpuc = lduw(ptr);
4158 fpus = lduw(ptr + 2);
4159 fptag = lduw(ptr + 4);
4161 env->fpstt = (fpus >> 11) & 7;
4162 env->fpus = fpus & ~0x3800;
4163 for(i = 0;i < 8; i++) {
4164 env->fptags[i] = ((fptag & 3) == 3);
4165 fptag >>= 2;
4169 void helper_fsave(target_ulong ptr, int data32)
4171 CPU86_LDouble tmp;
4172 int i;
4174 helper_fstenv(ptr, data32);
4176 ptr += (14 << data32);
4177 for(i = 0;i < 8; i++) {
4178 tmp = ST(i);
4179 helper_fstt(tmp, ptr);
4180 ptr += 10;
4183 /* fninit */
4184 env->fpus = 0;
4185 env->fpstt = 0;
4186 env->fpuc = 0x37f;
4187 env->fptags[0] = 1;
4188 env->fptags[1] = 1;
4189 env->fptags[2] = 1;
4190 env->fptags[3] = 1;
4191 env->fptags[4] = 1;
4192 env->fptags[5] = 1;
4193 env->fptags[6] = 1;
4194 env->fptags[7] = 1;
4197 void helper_frstor(target_ulong ptr, int data32)
4199 CPU86_LDouble tmp;
4200 int i;
4202 helper_fldenv(ptr, data32);
4203 ptr += (14 << data32);
4205 for(i = 0;i < 8; i++) {
4206 tmp = helper_fldt(ptr);
4207 ST(i) = tmp;
4208 ptr += 10;
4212 void helper_fxsave(target_ulong ptr, int data64)
4214 int fpus, fptag, i, nb_xmm_regs;
4215 CPU86_LDouble tmp;
4216 target_ulong addr;
4218 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4219 fptag = 0;
4220 for(i = 0; i < 8; i++) {
4221 fptag |= (env->fptags[i] << i);
4223 stw(ptr, env->fpuc);
4224 stw(ptr + 2, fpus);
4225 stw(ptr + 4, fptag ^ 0xff);
4226 #ifdef TARGET_X86_64
4227 if (data64) {
4228 stq(ptr + 0x08, 0); /* rip */
4229 stq(ptr + 0x10, 0); /* rdp */
4230 } else
4231 #endif
4233 stl(ptr + 0x08, 0); /* eip */
4234 stl(ptr + 0x0c, 0); /* sel */
4235 stl(ptr + 0x10, 0); /* dp */
4236 stl(ptr + 0x14, 0); /* sel */
4239 addr = ptr + 0x20;
4240 for(i = 0;i < 8; i++) {
4241 tmp = ST(i);
4242 helper_fstt(tmp, addr);
4243 addr += 16;
4246 if (env->cr[4] & CR4_OSFXSR_MASK) {
4247 /* XXX: finish it */
4248 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4249 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4250 if (env->hflags & HF_CS64_MASK)
4251 nb_xmm_regs = 16;
4252 else
4253 nb_xmm_regs = 8;
4254 addr = ptr + 0xa0;
4255 for(i = 0; i < nb_xmm_regs; i++) {
4256 stq(addr, env->xmm_regs[i].XMM_Q(0));
4257 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4258 addr += 16;
4263 void helper_fxrstor(target_ulong ptr, int data64)
4265 int i, fpus, fptag, nb_xmm_regs;
4266 CPU86_LDouble tmp;
4267 target_ulong addr;
4269 env->fpuc = lduw(ptr);
4270 fpus = lduw(ptr + 2);
4271 fptag = lduw(ptr + 4);
4272 env->fpstt = (fpus >> 11) & 7;
4273 env->fpus = fpus & ~0x3800;
4274 fptag ^= 0xff;
4275 for(i = 0;i < 8; i++) {
4276 env->fptags[i] = ((fptag >> i) & 1);
4279 addr = ptr + 0x20;
4280 for(i = 0;i < 8; i++) {
4281 tmp = helper_fldt(addr);
4282 ST(i) = tmp;
4283 addr += 16;
4286 if (env->cr[4] & CR4_OSFXSR_MASK) {
4287 /* XXX: finish it */
4288 env->mxcsr = ldl(ptr + 0x18);
4289 //ldl(ptr + 0x1c);
4290 if (env->hflags & HF_CS64_MASK)
4291 nb_xmm_regs = 16;
4292 else
4293 nb_xmm_regs = 8;
4294 addr = ptr + 0xa0;
4295 for(i = 0; i < nb_xmm_regs; i++) {
4296 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4297 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4298 addr += 16;
4303 #ifndef USE_X86LDOUBLE
4305 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4307 CPU86_LDoubleU temp;
4308 int e;
4310 temp.d = f;
4311 /* mantissa */
4312 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4313 /* exponent + sign */
4314 e = EXPD(temp) - EXPBIAS + 16383;
4315 e |= SIGND(temp) >> 16;
4316 *pexp = e;
4319 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4321 CPU86_LDoubleU temp;
4322 int e;
4323 uint64_t ll;
4325 /* XXX: handle overflow ? */
4326 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4327 e |= (upper >> 4) & 0x800; /* sign */
4328 ll = (mant >> 11) & ((1LL << 52) - 1);
4329 #ifdef __arm__
4330 temp.l.upper = (e << 20) | (ll >> 32);
4331 temp.l.lower = ll;
4332 #else
4333 temp.ll = ll | ((uint64_t)e << 52);
4334 #endif
4335 return temp.d;
4338 #else
4340 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4342 CPU86_LDoubleU temp;
4344 temp.d = f;
4345 *pmant = temp.l.lower;
4346 *pexp = temp.l.upper;
4349 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4351 CPU86_LDoubleU temp;
4353 temp.l.upper = upper;
4354 temp.l.lower = mant;
4355 return temp.d;
4357 #endif
4359 #ifdef TARGET_X86_64
4361 //#define DEBUG_MULDIV
4363 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4365 *plow += a;
4366 /* carry test */
4367 if (*plow < a)
4368 (*phigh)++;
4369 *phigh += b;
4372 static void neg128(uint64_t *plow, uint64_t *phigh)
4374 *plow = ~ *plow;
4375 *phigh = ~ *phigh;
4376 add128(plow, phigh, 1, 0);
4379 /* return TRUE if overflow */
4380 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4382 uint64_t q, r, a1, a0;
4383 int i, qb, ab;
4385 a0 = *plow;
4386 a1 = *phigh;
4387 if (a1 == 0) {
4388 q = a0 / b;
4389 r = a0 % b;
4390 *plow = q;
4391 *phigh = r;
4392 } else {
4393 if (a1 >= b)
4394 return 1;
4395 /* XXX: use a better algorithm */
4396 for(i = 0; i < 64; i++) {
4397 ab = a1 >> 63;
4398 a1 = (a1 << 1) | (a0 >> 63);
4399 if (ab || a1 >= b) {
4400 a1 -= b;
4401 qb = 1;
4402 } else {
4403 qb = 0;
4405 a0 = (a0 << 1) | qb;
4407 #if defined(DEBUG_MULDIV)
4408 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4409 *phigh, *plow, b, a0, a1);
4410 #endif
4411 *plow = a0;
4412 *phigh = a1;
4414 return 0;
4417 /* return TRUE if overflow */
4418 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4420 int sa, sb;
4421 sa = ((int64_t)*phigh < 0);
4422 if (sa)
4423 neg128(plow, phigh);
4424 sb = (b < 0);
4425 if (sb)
4426 b = -b;
4427 if (div64(plow, phigh, b) != 0)
4428 return 1;
4429 if (sa ^ sb) {
4430 if (*plow > (1ULL << 63))
4431 return 1;
4432 *plow = - *plow;
4433 } else {
4434 if (*plow >= (1ULL << 63))
4435 return 1;
4437 if (sa)
4438 *phigh = - *phigh;
4439 return 0;
4442 void helper_mulq_EAX_T0(target_ulong t0)
4444 uint64_t r0, r1;
4446 mulu64(&r0, &r1, EAX, t0);
4447 EAX = r0;
4448 EDX = r1;
4449 CC_DST = r0;
4450 CC_SRC = r1;
4453 void helper_imulq_EAX_T0(target_ulong t0)
4455 uint64_t r0, r1;
4457 muls64(&r0, &r1, EAX, t0);
4458 EAX = r0;
4459 EDX = r1;
4460 CC_DST = r0;
4461 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4464 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4466 uint64_t r0, r1;
4468 muls64(&r0, &r1, t0, t1);
4469 CC_DST = r0;
4470 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4471 return r0;
4474 void helper_divq_EAX(target_ulong t0)
4476 uint64_t r0, r1;
4477 if (t0 == 0) {
4478 raise_exception(EXCP00_DIVZ);
4480 r0 = EAX;
4481 r1 = EDX;
4482 if (div64(&r0, &r1, t0))
4483 raise_exception(EXCP00_DIVZ);
4484 EAX = r0;
4485 EDX = r1;
4488 void helper_idivq_EAX(target_ulong t0)
4490 uint64_t r0, r1;
4491 if (t0 == 0) {
4492 raise_exception(EXCP00_DIVZ);
4494 r0 = EAX;
4495 r1 = EDX;
4496 if (idiv64(&r0, &r1, t0))
4497 raise_exception(EXCP00_DIVZ);
4498 EAX = r0;
4499 EDX = r1;
4501 #endif
4503 static void do_hlt(void)
4505 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4506 env->halted = 1;
4507 env->exception_index = EXCP_HLT;
4508 cpu_loop_exit();
4511 void helper_hlt(int next_eip_addend)
4513 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4514 EIP += next_eip_addend;
4516 do_hlt();
4519 void helper_monitor(target_ulong ptr)
4521 if ((uint32_t)ECX != 0)
4522 raise_exception(EXCP0D_GPF);
4523 /* XXX: store address ? */
4524 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4527 void helper_mwait(int next_eip_addend)
4529 if ((uint32_t)ECX != 0)
4530 raise_exception(EXCP0D_GPF);
4531 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4532 EIP += next_eip_addend;
4534 /* XXX: not complete but not completely erroneous */
4535 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4536 /* more than one CPU: do not sleep because another CPU may
4537 wake this one */
4538 } else {
4539 do_hlt();
4543 void helper_debug(void)
4545 env->exception_index = EXCP_DEBUG;
4546 cpu_loop_exit();
4549 void helper_raise_interrupt(int intno, int next_eip_addend)
4551 raise_interrupt(intno, 1, 0, next_eip_addend);
4554 void helper_raise_exception(int exception_index)
4556 raise_exception(exception_index);
4559 void helper_cli(void)
4561 env->eflags &= ~IF_MASK;
4564 void helper_sti(void)
4566 env->eflags |= IF_MASK;
4569 #if 0
4570 /* vm86plus instructions */
4571 void helper_cli_vm(void)
4573 env->eflags &= ~VIF_MASK;
4576 void helper_sti_vm(void)
4578 env->eflags |= VIF_MASK;
4579 if (env->eflags & VIP_MASK) {
4580 raise_exception(EXCP0D_GPF);
4583 #endif
4585 void helper_set_inhibit_irq(void)
4587 env->hflags |= HF_INHIBIT_IRQ_MASK;
4590 void helper_reset_inhibit_irq(void)
4592 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4595 void helper_boundw(target_ulong a0, int v)
4597 int low, high;
4598 low = ldsw(a0);
4599 high = ldsw(a0 + 2);
4600 v = (int16_t)v;
4601 if (v < low || v > high) {
4602 raise_exception(EXCP05_BOUND);
4606 void helper_boundl(target_ulong a0, int v)
4608 int low, high;
4609 low = ldl(a0);
4610 high = ldl(a0 + 4);
4611 if (v < low || v > high) {
4612 raise_exception(EXCP05_BOUND);
4616 static float approx_rsqrt(float a)
4618 return 1.0 / sqrt(a);
4621 static float approx_rcp(float a)
4623 return 1.0 / a;
4626 #if !defined(CONFIG_USER_ONLY)
4628 #define MMUSUFFIX _mmu
4630 #define SHIFT 0
4631 #include "softmmu_template.h"
4633 #define SHIFT 1
4634 #include "softmmu_template.h"
4636 #define SHIFT 2
4637 #include "softmmu_template.h"
4639 #define SHIFT 3
4640 #include "softmmu_template.h"
4642 #endif
4644 #if !defined(CONFIG_USER_ONLY)
4645 /* try to fill the TLB and return an exception if error. If retaddr is
4646 NULL, it means that the function was called in C code (i.e. not
4647 from generated code or from helper.c) */
4648 /* XXX: fix it to restore all registers */
4649 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4651 TranslationBlock *tb;
4652 int ret;
4653 unsigned long pc;
4654 CPUX86State *saved_env;
4656 /* XXX: hack to restore env in all cases, even if not called from
4657 generated code */
4658 saved_env = env;
4659 env = cpu_single_env;
4661 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4662 if (ret) {
4663 if (retaddr) {
4664 /* now we have a real cpu fault */
4665 pc = (unsigned long)retaddr;
4666 tb = tb_find_pc(pc);
4667 if (tb) {
4668 /* the PC is inside the translated code. It means that we have
4669 a virtual CPU fault */
4670 cpu_restore_state(tb, env, pc, NULL);
4673 raise_exception_err(env->exception_index, env->error_code);
4675 env = saved_env;
4677 #endif
4679 /* Secure Virtual Machine helpers */
4681 #if defined(CONFIG_USER_ONLY)
4683 void helper_vmrun(int aflag, int next_eip_addend)
4686 void helper_vmmcall(void)
4689 void helper_vmload(int aflag)
4692 void helper_vmsave(int aflag)
4695 void helper_stgi(void)
4698 void helper_clgi(void)
4701 void helper_skinit(void)
4704 void helper_invlpga(int aflag)
4707 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4710 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4714 void helper_svm_check_io(uint32_t port, uint32_t param,
4715 uint32_t next_eip_addend)
4718 #else
4720 static inline void svm_save_seg(target_phys_addr_t addr,
4721 const SegmentCache *sc)
4723 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4724 sc->selector);
4725 stq_phys(addr + offsetof(struct vmcb_seg, base),
4726 sc->base);
4727 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4728 sc->limit);
4729 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4730 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4733 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4735 unsigned int flags;
4737 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4738 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4739 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4740 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4741 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4744 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4745 CPUState *env, int seg_reg)
4747 SegmentCache sc1, *sc = &sc1;
4748 svm_load_seg(addr, sc);
4749 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4750 sc->base, sc->limit, sc->flags);
4753 void helper_vmrun(int aflag, int next_eip_addend)
4755 target_ulong addr;
4756 uint32_t event_inj;
4757 uint32_t int_ctl;
4759 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4761 if (aflag == 2)
4762 addr = EAX;
4763 else
4764 addr = (uint32_t)EAX;
4766 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4768 env->vm_vmcb = addr;
4770 /* save the current CPU state in the hsave page */
4771 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4772 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4774 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4775 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4777 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4778 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4779 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4780 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4781 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4782 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4784 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4785 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4787 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4788 &env->segs[R_ES]);
4789 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4790 &env->segs[R_CS]);
4791 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4792 &env->segs[R_SS]);
4793 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4794 &env->segs[R_DS]);
4796 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4797 EIP + next_eip_addend);
4798 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4799 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4801 /* load the interception bitmaps so we do not need to access the
4802 vmcb in svm mode */
4803 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4804 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4805 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4806 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4807 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4808 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4810 /* enable intercepts */
4811 env->hflags |= HF_SVMI_MASK;
4813 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4815 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4816 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4818 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4819 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4821 /* clear exit_info_2 so we behave like the real hardware */
4822 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4824 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4825 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4826 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4827 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4828 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4829 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4830 if (int_ctl & V_INTR_MASKING_MASK) {
4831 env->v_tpr = int_ctl & V_TPR_MASK;
4832 env->hflags2 |= HF2_VINTR_MASK;
4833 if (env->eflags & IF_MASK)
4834 env->hflags2 |= HF2_HIF_MASK;
4837 cpu_load_efer(env,
4838 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4839 env->eflags = 0;
4840 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4841 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4842 CC_OP = CC_OP_EFLAGS;
4844 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4845 env, R_ES);
4846 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4847 env, R_CS);
4848 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4849 env, R_SS);
4850 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4851 env, R_DS);
4853 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4854 env->eip = EIP;
4855 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4856 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4857 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4858 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4859 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4861 /* FIXME: guest state consistency checks */
4863 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4864 case TLB_CONTROL_DO_NOTHING:
4865 break;
4866 case TLB_CONTROL_FLUSH_ALL_ASID:
4867 /* FIXME: this is not 100% correct but should work for now */
4868 tlb_flush(env, 1);
4869 break;
4872 env->hflags2 |= HF2_GIF_MASK;
4874 if (int_ctl & V_IRQ_MASK) {
4875 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4878 /* maybe we need to inject an event */
4879 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4880 if (event_inj & SVM_EVTINJ_VALID) {
4881 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4882 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4883 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4884 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4886 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
4887 /* FIXME: need to implement valid_err */
4888 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4889 case SVM_EVTINJ_TYPE_INTR:
4890 env->exception_index = vector;
4891 env->error_code = event_inj_err;
4892 env->exception_is_int = 0;
4893 env->exception_next_eip = -1;
4894 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
4895 /* XXX: is it always correct ? */
4896 do_interrupt(vector, 0, 0, 0, 1);
4897 break;
4898 case SVM_EVTINJ_TYPE_NMI:
4899 env->exception_index = EXCP02_NMI;
4900 env->error_code = event_inj_err;
4901 env->exception_is_int = 0;
4902 env->exception_next_eip = EIP;
4903 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
4904 cpu_loop_exit();
4905 break;
4906 case SVM_EVTINJ_TYPE_EXEPT:
4907 env->exception_index = vector;
4908 env->error_code = event_inj_err;
4909 env->exception_is_int = 0;
4910 env->exception_next_eip = -1;
4911 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
4912 cpu_loop_exit();
4913 break;
4914 case SVM_EVTINJ_TYPE_SOFT:
4915 env->exception_index = vector;
4916 env->error_code = event_inj_err;
4917 env->exception_is_int = 1;
4918 env->exception_next_eip = EIP;
4919 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
4920 cpu_loop_exit();
4921 break;
4923 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
4927 void helper_vmmcall(void)
4929 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4930 raise_exception(EXCP06_ILLOP);
4933 void helper_vmload(int aflag)
4935 target_ulong addr;
4936 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4938 if (aflag == 2)
4939 addr = EAX;
4940 else
4941 addr = (uint32_t)EAX;
4943 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4944 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4945 env->segs[R_FS].base);
4947 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4948 env, R_FS);
4949 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4950 env, R_GS);
4951 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4952 &env->tr);
4953 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4954 &env->ldt);
4956 #ifdef TARGET_X86_64
4957 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4958 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4959 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4960 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4961 #endif
4962 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4963 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4964 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4965 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4968 void helper_vmsave(int aflag)
4970 target_ulong addr;
4971 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
4973 if (aflag == 2)
4974 addr = EAX;
4975 else
4976 addr = (uint32_t)EAX;
4978 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4979 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4980 env->segs[R_FS].base);
4982 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
4983 &env->segs[R_FS]);
4984 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
4985 &env->segs[R_GS]);
4986 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
4987 &env->tr);
4988 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
4989 &env->ldt);
4991 #ifdef TARGET_X86_64
4992 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4993 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4994 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4995 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4996 #endif
4997 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4998 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4999 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5000 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5003 void helper_stgi(void)
5005 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5006 env->hflags2 |= HF2_GIF_MASK;
5009 void helper_clgi(void)
5011 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5012 env->hflags2 &= ~HF2_GIF_MASK;
5015 void helper_skinit(void)
5017 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5018 /* XXX: not implemented */
5019 raise_exception(EXCP06_ILLOP);
5022 void helper_invlpga(int aflag)
5024 target_ulong addr;
5025 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5027 if (aflag == 2)
5028 addr = EAX;
5029 else
5030 addr = (uint32_t)EAX;
5032 /* XXX: could use the ASID to see if it is needed to do the
5033 flush */
5034 tlb_flush_page(env, addr);
5037 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5039 if (likely(!(env->hflags & HF_SVMI_MASK)))
5040 return;
5041 switch(type) {
5042 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5043 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5044 helper_vmexit(type, param);
5046 break;
5047 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5048 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5049 helper_vmexit(type, param);
5051 break;
5052 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5053 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5054 helper_vmexit(type, param);
5056 break;
5057 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5058 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5059 helper_vmexit(type, param);
5061 break;
5062 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5063 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5064 helper_vmexit(type, param);
5066 break;
5067 case SVM_EXIT_MSR:
5068 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5069 /* FIXME: this should be read in at vmrun (faster this way?) */
5070 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5071 uint32_t t0, t1;
5072 switch((uint32_t)ECX) {
5073 case 0 ... 0x1fff:
5074 t0 = (ECX * 2) % 8;
5075 t1 = ECX / 8;
5076 break;
5077 case 0xc0000000 ... 0xc0001fff:
5078 t0 = (8192 + ECX - 0xc0000000) * 2;
5079 t1 = (t0 / 8);
5080 t0 %= 8;
5081 break;
5082 case 0xc0010000 ... 0xc0011fff:
5083 t0 = (16384 + ECX - 0xc0010000) * 2;
5084 t1 = (t0 / 8);
5085 t0 %= 8;
5086 break;
5087 default:
5088 helper_vmexit(type, param);
5089 t0 = 0;
5090 t1 = 0;
5091 break;
5093 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5094 helper_vmexit(type, param);
5096 break;
5097 default:
5098 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5099 helper_vmexit(type, param);
5101 break;
5105 void helper_svm_check_io(uint32_t port, uint32_t param,
5106 uint32_t next_eip_addend)
5108 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5109 /* FIXME: this should be read in at vmrun (faster this way?) */
5110 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5111 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5112 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5113 /* next EIP */
5114 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5115 env->eip + next_eip_addend);
5116 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5121 /* Note: currently only 32 bits of exit_code are used */
5122 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5124 uint32_t int_ctl;
5126 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5127 exit_code, exit_info_1,
5128 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5129 EIP);
5131 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5132 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5133 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5134 } else {
5135 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5138 /* Save the VM state in the vmcb */
5139 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5140 &env->segs[R_ES]);
5141 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5142 &env->segs[R_CS]);
5143 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5144 &env->segs[R_SS]);
5145 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5146 &env->segs[R_DS]);
5148 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5149 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5151 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5152 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5154 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5155 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5156 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5157 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5158 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5160 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5161 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5162 int_ctl |= env->v_tpr & V_TPR_MASK;
5163 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5164 int_ctl |= V_IRQ_MASK;
5165 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5167 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5168 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5169 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5170 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5171 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5172 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5173 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5175 /* Reload the host state from vm_hsave */
5176 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5177 env->hflags &= ~HF_SVMI_MASK;
5178 env->intercept = 0;
5179 env->intercept_exceptions = 0;
5180 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5181 env->tsc_offset = 0;
5183 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5184 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5186 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5187 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5189 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5190 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5191 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5192 /* we need to set the efer after the crs so the hidden flags get
5193 set properly */
5194 cpu_load_efer(env,
5195 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5196 env->eflags = 0;
5197 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5198 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5199 CC_OP = CC_OP_EFLAGS;
5201 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5202 env, R_ES);
5203 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5204 env, R_CS);
5205 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5206 env, R_SS);
5207 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5208 env, R_DS);
5210 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5211 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5212 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5214 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5215 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5217 /* other setups */
5218 cpu_x86_set_cpl(env, 0);
5219 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5220 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5222 env->hflags2 &= ~HF2_GIF_MASK;
5223 /* FIXME: Resets the current ASID register to zero (host ASID). */
5225 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5227 /* Clears the TSC_OFFSET inside the processor. */
5229 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5230 from the page table indicated the host's CR3. If the PDPEs contain
5231 illegal state, the processor causes a shutdown. */
5233 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5234 env->cr[0] |= CR0_PE_MASK;
5235 env->eflags &= ~VM_MASK;
5237 /* Disables all breakpoints in the host DR7 register. */
5239 /* Checks the reloaded host state for consistency. */
5241 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5242 host's code segment or non-canonical (in the case of long mode), a
5243 #GP fault is delivered inside the host.) */
5245 /* remove any pending exception */
5246 env->exception_index = -1;
5247 env->error_code = 0;
5248 env->old_exception = -1;
5250 cpu_loop_exit();
5253 #endif
5255 /* MMX/SSE */
5256 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5257 void helper_enter_mmx(void)
5259 env->fpstt = 0;
5260 *(uint32_t *)(env->fptags) = 0;
5261 *(uint32_t *)(env->fptags + 4) = 0;
5264 void helper_emms(void)
5266 /* set to empty state */
5267 *(uint32_t *)(env->fptags) = 0x01010101;
5268 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5271 /* XXX: suppress */
5272 void helper_movq(void *d, void *s)
5274 *(uint64_t *)d = *(uint64_t *)s;
5277 #define SHIFT 0
5278 #include "ops_sse.h"
5280 #define SHIFT 1
5281 #include "ops_sse.h"
5283 #define SHIFT 0
5284 #include "helper_template.h"
5285 #undef SHIFT
5287 #define SHIFT 1
5288 #include "helper_template.h"
5289 #undef SHIFT
5291 #define SHIFT 2
5292 #include "helper_template.h"
5293 #undef SHIFT
5295 #ifdef TARGET_X86_64
5297 #define SHIFT 3
5298 #include "helper_template.h"
5299 #undef SHIFT
5301 #endif
5303 /* bit operations */
5304 target_ulong helper_bsf(target_ulong t0)
5306 int count;
5307 target_ulong res;
5309 res = t0;
5310 count = 0;
5311 while ((res & 1) == 0) {
5312 count++;
5313 res >>= 1;
5315 return count;
5318 target_ulong helper_bsr(target_ulong t0)
5320 int count;
5321 target_ulong res, mask;
5323 res = t0;
5324 count = TARGET_LONG_BITS - 1;
5325 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5326 while ((res & mask) == 0) {
5327 count--;
5328 res <<= 1;
5330 return count;
5334 static int compute_all_eflags(void)
5336 return CC_SRC;
5339 static int compute_c_eflags(void)
5341 return CC_SRC & CC_C;
5344 uint32_t helper_cc_compute_all(int op)
5346 switch (op) {
5347 default: /* should never happen */ return 0;
5349 case CC_OP_EFLAGS: return compute_all_eflags();
5351 case CC_OP_MULB: return compute_all_mulb();
5352 case CC_OP_MULW: return compute_all_mulw();
5353 case CC_OP_MULL: return compute_all_mull();
5355 case CC_OP_ADDB: return compute_all_addb();
5356 case CC_OP_ADDW: return compute_all_addw();
5357 case CC_OP_ADDL: return compute_all_addl();
5359 case CC_OP_ADCB: return compute_all_adcb();
5360 case CC_OP_ADCW: return compute_all_adcw();
5361 case CC_OP_ADCL: return compute_all_adcl();
5363 case CC_OP_SUBB: return compute_all_subb();
5364 case CC_OP_SUBW: return compute_all_subw();
5365 case CC_OP_SUBL: return compute_all_subl();
5367 case CC_OP_SBBB: return compute_all_sbbb();
5368 case CC_OP_SBBW: return compute_all_sbbw();
5369 case CC_OP_SBBL: return compute_all_sbbl();
5371 case CC_OP_LOGICB: return compute_all_logicb();
5372 case CC_OP_LOGICW: return compute_all_logicw();
5373 case CC_OP_LOGICL: return compute_all_logicl();
5375 case CC_OP_INCB: return compute_all_incb();
5376 case CC_OP_INCW: return compute_all_incw();
5377 case CC_OP_INCL: return compute_all_incl();
5379 case CC_OP_DECB: return compute_all_decb();
5380 case CC_OP_DECW: return compute_all_decw();
5381 case CC_OP_DECL: return compute_all_decl();
5383 case CC_OP_SHLB: return compute_all_shlb();
5384 case CC_OP_SHLW: return compute_all_shlw();
5385 case CC_OP_SHLL: return compute_all_shll();
5387 case CC_OP_SARB: return compute_all_sarb();
5388 case CC_OP_SARW: return compute_all_sarw();
5389 case CC_OP_SARL: return compute_all_sarl();
5391 #ifdef TARGET_X86_64
5392 case CC_OP_MULQ: return compute_all_mulq();
5394 case CC_OP_ADDQ: return compute_all_addq();
5396 case CC_OP_ADCQ: return compute_all_adcq();
5398 case CC_OP_SUBQ: return compute_all_subq();
5400 case CC_OP_SBBQ: return compute_all_sbbq();
5402 case CC_OP_LOGICQ: return compute_all_logicq();
5404 case CC_OP_INCQ: return compute_all_incq();
5406 case CC_OP_DECQ: return compute_all_decq();
5408 case CC_OP_SHLQ: return compute_all_shlq();
5410 case CC_OP_SARQ: return compute_all_sarq();
5411 #endif
5415 uint32_t helper_cc_compute_c(int op)
5417 switch (op) {
5418 default: /* should never happen */ return 0;
5420 case CC_OP_EFLAGS: return compute_c_eflags();
5422 case CC_OP_MULB: return compute_c_mull();
5423 case CC_OP_MULW: return compute_c_mull();
5424 case CC_OP_MULL: return compute_c_mull();
5426 case CC_OP_ADDB: return compute_c_addb();
5427 case CC_OP_ADDW: return compute_c_addw();
5428 case CC_OP_ADDL: return compute_c_addl();
5430 case CC_OP_ADCB: return compute_c_adcb();
5431 case CC_OP_ADCW: return compute_c_adcw();
5432 case CC_OP_ADCL: return compute_c_adcl();
5434 case CC_OP_SUBB: return compute_c_subb();
5435 case CC_OP_SUBW: return compute_c_subw();
5436 case CC_OP_SUBL: return compute_c_subl();
5438 case CC_OP_SBBB: return compute_c_sbbb();
5439 case CC_OP_SBBW: return compute_c_sbbw();
5440 case CC_OP_SBBL: return compute_c_sbbl();
5442 case CC_OP_LOGICB: return compute_c_logicb();
5443 case CC_OP_LOGICW: return compute_c_logicw();
5444 case CC_OP_LOGICL: return compute_c_logicl();
5446 case CC_OP_INCB: return compute_c_incl();
5447 case CC_OP_INCW: return compute_c_incl();
5448 case CC_OP_INCL: return compute_c_incl();
5450 case CC_OP_DECB: return compute_c_incl();
5451 case CC_OP_DECW: return compute_c_incl();
5452 case CC_OP_DECL: return compute_c_incl();
5454 case CC_OP_SHLB: return compute_c_shlb();
5455 case CC_OP_SHLW: return compute_c_shlw();
5456 case CC_OP_SHLL: return compute_c_shll();
5458 case CC_OP_SARB: return compute_c_sarl();
5459 case CC_OP_SARW: return compute_c_sarl();
5460 case CC_OP_SARL: return compute_c_sarl();
5462 #ifdef TARGET_X86_64
5463 case CC_OP_MULQ: return compute_c_mull();
5465 case CC_OP_ADDQ: return compute_c_addq();
5467 case CC_OP_ADCQ: return compute_c_adcq();
5469 case CC_OP_SUBQ: return compute_c_subq();
5471 case CC_OP_SBBQ: return compute_c_sbbq();
5473 case CC_OP_LOGICQ: return compute_c_logicq();
5475 case CC_OP_INCQ: return compute_c_incl();
5477 case CC_OP_DECQ: return compute_c_incl();
5479 case CC_OP_SHLQ: return compute_c_shlq();
5481 case CC_OP_SARQ: return compute_c_sarl();
5482 #endif