Update
[qemu-kvm/fedora.git] / target-i386 / op_helper.c
blobebeeebdecd2f174fd57d0ecb7cc4cd12b8fd97cd
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define CPU_NO_GLOBAL_REGS
21 #include "exec.h"
22 #include "host-utils.h"
24 //#define DEBUG_PCALL
26 #if 0
27 #define raise_exception_err(a, b)\
28 do {\
29 if (logfile)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
32 } while (0)
33 #endif
35 const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 /* modulo 17 table */
71 const uint8_t rclw_table[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
78 /* modulo 9 table */
79 const uint8_t rclb_table[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
86 const CPU86_LDouble f15rk[7] =
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
97 /* broken thread support */
99 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
101 void helper_lock(void)
103 spin_lock(&global_cpu_lock);
106 void helper_unlock(void)
108 spin_unlock(&global_cpu_lock);
111 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
113 load_eflags(t0, update_mask);
116 target_ulong helper_read_eflags(void)
118 uint32_t eflags;
119 eflags = cc_table[CC_OP].compute_all();
120 eflags |= (DF & DF_MASK);
121 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122 return eflags;
125 /* return non zero if error */
126 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127 int selector)
129 SegmentCache *dt;
130 int index;
131 target_ulong ptr;
133 if (selector & 0x4)
134 dt = &env->ldt;
135 else
136 dt = &env->gdt;
137 index = selector & ~7;
138 if ((index + 7) > dt->limit)
139 return -1;
140 ptr = dt->base + index;
141 *e1_ptr = ldl_kernel(ptr);
142 *e2_ptr = ldl_kernel(ptr + 4);
143 return 0;
146 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
148 unsigned int limit;
149 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150 if (e2 & DESC_G_MASK)
151 limit = (limit << 12) | 0xfff;
152 return limit;
155 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
157 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
160 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
162 sc->base = get_seg_base(e1, e2);
163 sc->limit = get_seg_limit(e1, e2);
164 sc->flags = e2;
167 /* init the segment cache in vm86 mode. */
168 static inline void load_seg_vm(int seg, int selector)
170 selector &= 0xffff;
171 cpu_x86_load_seg_cache(env, seg, selector,
172 (selector << 4), 0xffff, 0);
175 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176 uint32_t *esp_ptr, int dpl)
178 int type, index, shift;
180 #if 0
182 int i;
183 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184 for(i=0;i<env->tr.limit;i++) {
185 printf("%02x ", env->tr.base[i]);
186 if ((i & 7) == 7) printf("\n");
188 printf("\n");
190 #endif
192 if (!(env->tr.flags & DESC_P_MASK))
193 cpu_abort(env, "invalid tss");
194 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195 if ((type & 7) != 1)
196 cpu_abort(env, "invalid tss type");
197 shift = type >> 3;
198 index = (dpl * 4 + 2) << shift;
199 if (index + (4 << shift) - 1 > env->tr.limit)
200 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201 if (shift == 0) {
202 *esp_ptr = lduw_kernel(env->tr.base + index);
203 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204 } else {
205 *esp_ptr = ldl_kernel(env->tr.base + index);
206 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
210 /* XXX: merge with load_seg() */
211 static void tss_load_seg(int seg_reg, int selector)
213 uint32_t e1, e2;
214 int rpl, dpl, cpl;
216 if ((selector & 0xfffc) != 0) {
217 if (load_segment(&e1, &e2, selector) != 0)
218 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 if (!(e2 & DESC_S_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 rpl = selector & 3;
222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223 cpl = env->hflags & HF_CPL_MASK;
224 if (seg_reg == R_CS) {
225 if (!(e2 & DESC_CS_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* XXX: is it correct ? */
228 if (dpl != rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if ((e2 & DESC_C_MASK) && dpl > rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 } else if (seg_reg == R_SS) {
233 /* SS must be writable data */
234 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (dpl != cpl || dpl != rpl)
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 } else {
239 /* not readable code */
240 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244 if (dpl < cpl || dpl < rpl)
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248 if (!(e2 & DESC_P_MASK))
249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250 cpu_x86_load_seg_cache(env, seg_reg, selector,
251 get_seg_base(e1, e2),
252 get_seg_limit(e1, e2),
253 e2);
254 } else {
255 if (seg_reg == R_SS || seg_reg == R_CS)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260 #define SWITCH_TSS_JMP 0
261 #define SWITCH_TSS_IRET 1
262 #define SWITCH_TSS_CALL 2
264 /* XXX: restore CPU state in registers (PowerPC case) */
265 static void switch_tss(int tss_selector,
266 uint32_t e1, uint32_t e2, int source,
267 uint32_t next_eip)
269 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270 target_ulong tss_base;
271 uint32_t new_regs[8], new_segs[6];
272 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273 uint32_t old_eflags, eflags_mask;
274 SegmentCache *dt;
275 int index;
276 target_ulong ptr;
278 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279 #ifdef DEBUG_PCALL
280 if (loglevel & CPU_LOG_PCALL)
281 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282 #endif
284 /* if task gate, we read the TSS segment and we load it */
285 if (type == 5) {
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288 tss_selector = e1 >> 16;
289 if (tss_selector & 4)
290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291 if (load_segment(&e1, &e2, tss_selector) != 0)
292 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293 if (e2 & DESC_S_MASK)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296 if ((type & 7) != 1)
297 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300 if (!(e2 & DESC_P_MASK))
301 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
303 if (type & 8)
304 tss_limit_max = 103;
305 else
306 tss_limit_max = 43;
307 tss_limit = get_seg_limit(e1, e2);
308 tss_base = get_seg_base(e1, e2);
309 if ((tss_selector & 4) != 0 ||
310 tss_limit < tss_limit_max)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if (old_type & 8)
314 old_tss_limit_max = 103;
315 else
316 old_tss_limit_max = 43;
318 /* read all the registers from the new TSS */
319 if (type & 8) {
320 /* 32 bit */
321 new_cr3 = ldl_kernel(tss_base + 0x1c);
322 new_eip = ldl_kernel(tss_base + 0x20);
323 new_eflags = ldl_kernel(tss_base + 0x24);
324 for(i = 0; i < 8; i++)
325 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326 for(i = 0; i < 6; i++)
327 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328 new_ldt = lduw_kernel(tss_base + 0x60);
329 new_trap = ldl_kernel(tss_base + 0x64);
330 } else {
331 /* 16 bit */
332 new_cr3 = 0;
333 new_eip = lduw_kernel(tss_base + 0x0e);
334 new_eflags = lduw_kernel(tss_base + 0x10);
335 for(i = 0; i < 8; i++)
336 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337 for(i = 0; i < 4; i++)
338 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339 new_ldt = lduw_kernel(tss_base + 0x2a);
340 new_segs[R_FS] = 0;
341 new_segs[R_GS] = 0;
342 new_trap = 0;
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
350 v1 = ldub_kernel(env->tr.base);
351 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352 stb_kernel(env->tr.base, v1);
353 stb_kernel(env->tr.base + old_tss_limit_max, v2);
355 /* clear busy bit (it is restartable) */
356 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357 target_ulong ptr;
358 uint32_t e2;
359 ptr = env->gdt.base + (env->tr.selector & ~7);
360 e2 = ldl_kernel(ptr + 4);
361 e2 &= ~DESC_TSS_BUSY_MASK;
362 stl_kernel(ptr + 4, e2);
364 old_eflags = compute_eflags();
365 if (source == SWITCH_TSS_IRET)
366 old_eflags &= ~NT_MASK;
368 /* save the current state in the old TSS */
369 if (type & 8) {
370 /* 32 bit */
371 stl_kernel(env->tr.base + 0x20, next_eip);
372 stl_kernel(env->tr.base + 0x24, old_eflags);
373 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381 for(i = 0; i < 6; i++)
382 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383 } else {
384 /* 16 bit */
385 stw_kernel(env->tr.base + 0x0e, next_eip);
386 stw_kernel(env->tr.base + 0x10, old_eflags);
387 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395 for(i = 0; i < 4; i++)
396 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
399 /* now if an exception occurs, it will occurs in the next task
400 context */
402 if (source == SWITCH_TSS_CALL) {
403 stw_kernel(tss_base, env->tr.selector);
404 new_eflags |= NT_MASK;
407 /* set busy bit */
408 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409 target_ulong ptr;
410 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = ldl_kernel(ptr + 4);
413 e2 |= DESC_TSS_BUSY_MASK;
414 stl_kernel(ptr + 4, e2);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8))
436 eflags_mask &= 0xffff;
437 load_eflags(new_eflags, eflags_mask);
438 /* XXX: what to do in 16 bit case ? */
439 EAX = new_regs[0];
440 ECX = new_regs[1];
441 EDX = new_regs[2];
442 EBX = new_regs[3];
443 ESP = new_regs[4];
444 EBP = new_regs[5];
445 ESI = new_regs[6];
446 EDI = new_regs[7];
447 if (new_eflags & VM_MASK) {
448 for(i = 0; i < 6; i++)
449 load_seg_vm(i, new_segs[i]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env, 3);
452 } else {
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i = 0; i < 6; i++)
457 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
460 env->ldt.selector = new_ldt & ~4;
461 env->ldt.base = 0;
462 env->ldt.limit = 0;
463 env->ldt.flags = 0;
465 /* load the LDT */
466 if (new_ldt & 4)
467 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 ptr = dt->base + index;
475 e1 = ldl_kernel(ptr);
476 e2 = ldl_kernel(ptr + 4);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479 if (!(e2 & DESC_P_MASK))
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 tss_load_seg(R_CS, new_segs[R_CS]);
487 tss_load_seg(R_SS, new_segs[R_SS]);
488 tss_load_seg(R_ES, new_segs[R_ES]);
489 tss_load_seg(R_DS, new_segs[R_DS]);
490 tss_load_seg(R_FS, new_segs[R_FS]);
491 tss_load_seg(R_GS, new_segs[R_GS]);
494 /* check that EIP is in the CS segment limits */
495 if (new_eip > env->segs[R_CS].limit) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF, 0);
501 /* check if Port I/O is allowed in TSS */
502 static inline void check_io(int addr, int size)
504 int io_offset, val, mask;
506 /* TSS must be a valid 32 bit one */
507 if (!(env->tr.flags & DESC_P_MASK) ||
508 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509 env->tr.limit < 103)
510 goto fail;
511 io_offset = lduw_kernel(env->tr.base + 0x66);
512 io_offset += (addr >> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset + 1) > env->tr.limit)
515 goto fail;
516 val = lduw_kernel(env->tr.base + io_offset);
517 val >>= (addr & 7);
518 mask = (1 << size) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val & mask) != 0) {
521 fail:
522 raise_exception_err(EXCP0D_GPF, 0);
526 void helper_check_iob(uint32_t t0)
528 check_io(t0, 1);
531 void helper_check_iow(uint32_t t0)
533 check_io(t0, 2);
536 void helper_check_iol(uint32_t t0)
538 check_io(t0, 4);
541 void helper_outb(uint32_t port, uint32_t data)
543 cpu_outb(env, port, data & 0xff);
546 target_ulong helper_inb(uint32_t port)
548 return cpu_inb(env, port);
551 void helper_outw(uint32_t port, uint32_t data)
553 cpu_outw(env, port, data & 0xffff);
556 target_ulong helper_inw(uint32_t port)
558 return cpu_inw(env, port);
561 void helper_outl(uint32_t port, uint32_t data)
563 cpu_outl(env, port, data);
566 target_ulong helper_inl(uint32_t port)
568 return cpu_inl(env, port);
571 static inline unsigned int get_sp_mask(unsigned int e2)
573 if (e2 & DESC_B_MASK)
574 return 0xffffffff;
575 else
576 return 0xffff;
579 #ifdef TARGET_X86_64
580 #define SET_ESP(val, sp_mask)\
581 do {\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
586 else\
587 ESP = (val);\
588 } while (0)
589 #else
590 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591 #endif
593 /* XXX: add a is_user flag to have proper security support */
594 #define PUSHW(ssp, sp, sp_mask, val)\
596 sp -= 2;\
597 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
600 #define PUSHL(ssp, sp, sp_mask, val)\
602 sp -= 4;\
603 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
606 #define POPW(ssp, sp, sp_mask, val)\
608 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
609 sp += 2;\
612 #define POPL(ssp, sp, sp_mask, val)\
614 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
615 sp += 4;\
618 /* protected mode interrupt */
619 static void do_interrupt_protected(int intno, int is_int, int error_code,
620 unsigned int next_eip, int is_hw)
622 SegmentCache *dt;
623 target_ulong ptr, ssp;
624 int type, dpl, selector, ss_dpl, cpl;
625 int has_error_code, new_stack, shift;
626 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627 uint32_t old_eip, sp_mask;
629 has_error_code = 0;
630 if (!is_int && !is_hw) {
631 switch(intno) {
632 case 8:
633 case 10:
634 case 11:
635 case 12:
636 case 13:
637 case 14:
638 case 17:
639 has_error_code = 1;
640 break;
643 if (is_int)
644 old_eip = next_eip;
645 else
646 old_eip = env->eip;
648 dt = &env->idt;
649 if (intno * 8 + 7 > dt->limit)
650 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
651 ptr = dt->base + intno * 8;
652 e1 = ldl_kernel(ptr);
653 e2 = ldl_kernel(ptr + 4);
654 /* check gate type */
655 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
656 switch(type) {
657 case 5: /* task gate */
658 /* must do that check here to return the correct error code */
659 if (!(e2 & DESC_P_MASK))
660 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
661 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
662 if (has_error_code) {
663 int type;
664 uint32_t mask;
665 /* push the error code */
666 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
667 shift = type >> 3;
668 if (env->segs[R_SS].flags & DESC_B_MASK)
669 mask = 0xffffffff;
670 else
671 mask = 0xffff;
672 esp = (ESP - (2 << shift)) & mask;
673 ssp = env->segs[R_SS].base + esp;
674 if (shift)
675 stl_kernel(ssp, error_code);
676 else
677 stw_kernel(ssp, error_code);
678 SET_ESP(esp, mask);
680 return;
681 case 6: /* 286 interrupt gate */
682 case 7: /* 286 trap gate */
683 case 14: /* 386 interrupt gate */
684 case 15: /* 386 trap gate */
685 break;
686 default:
687 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
688 break;
690 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
691 cpl = env->hflags & HF_CPL_MASK;
692 /* check privilege if software int */
693 if (is_int && dpl < cpl)
694 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
695 /* check valid bit */
696 if (!(e2 & DESC_P_MASK))
697 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
698 selector = e1 >> 16;
699 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
700 if ((selector & 0xfffc) == 0)
701 raise_exception_err(EXCP0D_GPF, 0);
703 if (load_segment(&e1, &e2, selector) != 0)
704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
706 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
708 if (dpl > cpl)
709 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
710 if (!(e2 & DESC_P_MASK))
711 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
712 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
713 /* to inner privilege */
714 get_ss_esp_from_tss(&ss, &esp, dpl);
715 if ((ss & 0xfffc) == 0)
716 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
717 if ((ss & 3) != dpl)
718 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
719 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
722 if (ss_dpl != dpl)
723 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
724 if (!(ss_e2 & DESC_S_MASK) ||
725 (ss_e2 & DESC_CS_MASK) ||
726 !(ss_e2 & DESC_W_MASK))
727 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 if (!(ss_e2 & DESC_P_MASK))
729 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
730 new_stack = 1;
731 sp_mask = get_sp_mask(ss_e2);
732 ssp = get_seg_base(ss_e1, ss_e2);
733 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
734 /* to same privilege */
735 if (env->eflags & VM_MASK)
736 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737 new_stack = 0;
738 sp_mask = get_sp_mask(env->segs[R_SS].flags);
739 ssp = env->segs[R_SS].base;
740 esp = ESP;
741 dpl = cpl;
742 } else {
743 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
744 new_stack = 0; /* avoid warning */
745 sp_mask = 0; /* avoid warning */
746 ssp = 0; /* avoid warning */
747 esp = 0; /* avoid warning */
750 shift = type >> 3;
752 #if 0
753 /* XXX: check that enough room is available */
754 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
755 if (env->eflags & VM_MASK)
756 push_size += 8;
757 push_size <<= shift;
758 #endif
759 if (shift == 1) {
760 if (new_stack) {
761 if (env->eflags & VM_MASK) {
762 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
763 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
764 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
765 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
767 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
768 PUSHL(ssp, esp, sp_mask, ESP);
770 PUSHL(ssp, esp, sp_mask, compute_eflags());
771 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
772 PUSHL(ssp, esp, sp_mask, old_eip);
773 if (has_error_code) {
774 PUSHL(ssp, esp, sp_mask, error_code);
776 } else {
777 if (new_stack) {
778 if (env->eflags & VM_MASK) {
779 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
780 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
781 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
782 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
784 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
785 PUSHW(ssp, esp, sp_mask, ESP);
787 PUSHW(ssp, esp, sp_mask, compute_eflags());
788 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
789 PUSHW(ssp, esp, sp_mask, old_eip);
790 if (has_error_code) {
791 PUSHW(ssp, esp, sp_mask, error_code);
795 if (new_stack) {
796 if (env->eflags & VM_MASK) {
797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
802 ss = (ss & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_SS, ss,
804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
806 SET_ESP(esp, sp_mask);
808 selector = (selector & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_CS, selector,
810 get_seg_base(e1, e2),
811 get_seg_limit(e1, e2),
812 e2);
813 cpu_x86_set_cpl(env, dpl);
814 env->eip = offset;
816 /* interrupt gate clear IF mask */
817 if ((type & 1) == 0) {
818 env->eflags &= ~IF_MASK;
820 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
823 #ifdef TARGET_X86_64
825 #define PUSHQ(sp, val)\
827 sp -= 8;\
828 stq_kernel(sp, (val));\
831 #define POPQ(sp, val)\
833 val = ldq_kernel(sp);\
834 sp += 8;\
837 static inline target_ulong get_rsp_from_tss(int level)
839 int index;
841 #if 0
842 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
843 env->tr.base, env->tr.limit);
844 #endif
846 if (!(env->tr.flags & DESC_P_MASK))
847 cpu_abort(env, "invalid tss");
848 index = 8 * level + 4;
849 if ((index + 7) > env->tr.limit)
850 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
851 return ldq_kernel(env->tr.base + index);
854 /* 64 bit interrupt */
855 static void do_interrupt64(int intno, int is_int, int error_code,
856 target_ulong next_eip, int is_hw)
858 SegmentCache *dt;
859 target_ulong ptr;
860 int type, dpl, selector, cpl, ist;
861 int has_error_code, new_stack;
862 uint32_t e1, e2, e3, ss;
863 target_ulong old_eip, esp, offset;
865 has_error_code = 0;
866 if (!is_int && !is_hw) {
867 switch(intno) {
868 case 8:
869 case 10:
870 case 11:
871 case 12:
872 case 13:
873 case 14:
874 case 17:
875 has_error_code = 1;
876 break;
879 if (is_int)
880 old_eip = next_eip;
881 else
882 old_eip = env->eip;
884 dt = &env->idt;
885 if (intno * 16 + 15 > dt->limit)
886 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
887 ptr = dt->base + intno * 16;
888 e1 = ldl_kernel(ptr);
889 e2 = ldl_kernel(ptr + 4);
890 e3 = ldl_kernel(ptr + 8);
891 /* check gate type */
892 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
893 switch(type) {
894 case 14: /* 386 interrupt gate */
895 case 15: /* 386 trap gate */
896 break;
897 default:
898 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
899 break;
901 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902 cpl = env->hflags & HF_CPL_MASK;
903 /* check privilege if software int */
904 if (is_int && dpl < cpl)
905 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906 /* check valid bit */
907 if (!(e2 & DESC_P_MASK))
908 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
909 selector = e1 >> 16;
910 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
911 ist = e2 & 7;
912 if ((selector & 0xfffc) == 0)
913 raise_exception_err(EXCP0D_GPF, 0);
915 if (load_segment(&e1, &e2, selector) != 0)
916 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
918 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
920 if (dpl > cpl)
921 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
922 if (!(e2 & DESC_P_MASK))
923 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
924 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
927 /* to inner privilege */
928 if (ist != 0)
929 esp = get_rsp_from_tss(ist + 3);
930 else
931 esp = get_rsp_from_tss(dpl);
932 esp &= ~0xfLL; /* align stack */
933 ss = 0;
934 new_stack = 1;
935 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
936 /* to same privilege */
937 if (env->eflags & VM_MASK)
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 new_stack = 0;
940 if (ist != 0)
941 esp = get_rsp_from_tss(ist + 3);
942 else
943 esp = ESP;
944 esp &= ~0xfLL; /* align stack */
945 dpl = cpl;
946 } else {
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0; /* avoid warning */
949 esp = 0; /* avoid warning */
952 PUSHQ(esp, env->segs[R_SS].selector);
953 PUSHQ(esp, ESP);
954 PUSHQ(esp, compute_eflags());
955 PUSHQ(esp, env->segs[R_CS].selector);
956 PUSHQ(esp, old_eip);
957 if (has_error_code) {
958 PUSHQ(esp, error_code);
961 if (new_stack) {
962 ss = 0 | dpl;
963 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
965 ESP = esp;
967 selector = (selector & ~3) | dpl;
968 cpu_x86_load_seg_cache(env, R_CS, selector,
969 get_seg_base(e1, e2),
970 get_seg_limit(e1, e2),
971 e2);
972 cpu_x86_set_cpl(env, dpl);
973 env->eip = offset;
975 /* interrupt gate clear IF mask */
976 if ((type & 1) == 0) {
977 env->eflags &= ~IF_MASK;
979 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
981 #endif
983 #if defined(CONFIG_USER_ONLY)
984 void helper_syscall(int next_eip_addend)
986 env->exception_index = EXCP_SYSCALL;
987 env->exception_next_eip = env->eip + next_eip_addend;
988 cpu_loop_exit();
990 #else
991 void helper_syscall(int next_eip_addend)
993 int selector;
995 if (!(env->efer & MSR_EFER_SCE)) {
996 raise_exception_err(EXCP06_ILLOP, 0);
998 selector = (env->star >> 32) & 0xffff;
999 #ifdef TARGET_X86_64
1000 if (env->hflags & HF_LMA_MASK) {
1001 int code64;
1003 ECX = env->eip + next_eip_addend;
1004 env->regs[11] = compute_eflags();
1006 code64 = env->hflags & HF_CS64_MASK;
1008 cpu_x86_set_cpl(env, 0);
1009 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1010 0, 0xffffffff,
1011 DESC_G_MASK | DESC_P_MASK |
1012 DESC_S_MASK |
1013 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1014 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1015 0, 0xffffffff,
1016 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1017 DESC_S_MASK |
1018 DESC_W_MASK | DESC_A_MASK);
1019 env->eflags &= ~env->fmask;
1020 load_eflags(env->eflags, 0);
1021 if (code64)
1022 env->eip = env->lstar;
1023 else
1024 env->eip = env->cstar;
1025 } else
1026 #endif
1028 ECX = (uint32_t)(env->eip + next_eip_addend);
1030 cpu_x86_set_cpl(env, 0);
1031 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1032 0, 0xffffffff,
1033 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1034 DESC_S_MASK |
1035 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1036 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1037 0, 0xffffffff,
1038 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039 DESC_S_MASK |
1040 DESC_W_MASK | DESC_A_MASK);
1041 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1042 env->eip = (uint32_t)env->star;
1045 #endif
1047 void helper_sysret(int dflag)
1049 int cpl, selector;
1051 if (!(env->efer & MSR_EFER_SCE)) {
1052 raise_exception_err(EXCP06_ILLOP, 0);
1054 cpl = env->hflags & HF_CPL_MASK;
1055 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1056 raise_exception_err(EXCP0D_GPF, 0);
1058 selector = (env->star >> 48) & 0xffff;
1059 #ifdef TARGET_X86_64
1060 if (env->hflags & HF_LMA_MASK) {
1061 if (dflag == 2) {
1062 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1063 0, 0xffffffff,
1064 DESC_G_MASK | DESC_P_MASK |
1065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1067 DESC_L_MASK);
1068 env->eip = ECX;
1069 } else {
1070 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1071 0, 0xffffffff,
1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1075 env->eip = (uint32_t)ECX;
1077 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_W_MASK | DESC_A_MASK);
1082 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1083 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1084 cpu_x86_set_cpl(env, 3);
1085 } else
1086 #endif
1088 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1089 0, 0xffffffff,
1090 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1091 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1092 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1093 env->eip = (uint32_t)ECX;
1094 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1095 0, 0xffffffff,
1096 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1097 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098 DESC_W_MASK | DESC_A_MASK);
1099 env->eflags |= IF_MASK;
1100 cpu_x86_set_cpl(env, 3);
1102 #ifdef USE_KQEMU
1103 if (kqemu_is_ok(env)) {
1104 if (env->hflags & HF_LMA_MASK)
1105 CC_OP = CC_OP_EFLAGS;
1106 env->exception_index = -1;
1107 cpu_loop_exit();
1109 #endif
1112 /* real mode interrupt */
1113 static void do_interrupt_real(int intno, int is_int, int error_code,
1114 unsigned int next_eip)
1116 SegmentCache *dt;
1117 target_ulong ptr, ssp;
1118 int selector;
1119 uint32_t offset, esp;
1120 uint32_t old_cs, old_eip;
1122 /* real mode (simpler !) */
1123 dt = &env->idt;
1124 if (intno * 4 + 3 > dt->limit)
1125 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126 ptr = dt->base + intno * 4;
1127 offset = lduw_kernel(ptr);
1128 selector = lduw_kernel(ptr + 2);
1129 esp = ESP;
1130 ssp = env->segs[R_SS].base;
1131 if (is_int)
1132 old_eip = next_eip;
1133 else
1134 old_eip = env->eip;
1135 old_cs = env->segs[R_CS].selector;
1136 /* XXX: use SS segment size ? */
1137 PUSHW(ssp, esp, 0xffff, compute_eflags());
1138 PUSHW(ssp, esp, 0xffff, old_cs);
1139 PUSHW(ssp, esp, 0xffff, old_eip);
1141 /* update processor state */
1142 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143 env->eip = offset;
1144 env->segs[R_CS].selector = selector;
1145 env->segs[R_CS].base = (selector << 4);
1146 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1149 /* fake user mode interrupt */
1150 void do_interrupt_user(int intno, int is_int, int error_code,
1151 target_ulong next_eip)
1153 SegmentCache *dt;
1154 target_ulong ptr;
1155 int dpl, cpl, shift;
1156 uint32_t e2;
1158 dt = &env->idt;
1159 if (env->hflags & HF_LMA_MASK) {
1160 shift = 4;
1161 } else {
1162 shift = 3;
1164 ptr = dt->base + (intno << shift);
1165 e2 = ldl_kernel(ptr + 4);
1167 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168 cpl = env->hflags & HF_CPL_MASK;
1169 /* check privilege if software int */
1170 if (is_int && dpl < cpl)
1171 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1173 /* Since we emulate only user space, we cannot do more than
1174 exiting the emulation with the suitable exception and error
1175 code */
1176 if (is_int)
1177 EIP = next_eip;
1181 * Begin execution of an interruption. is_int is TRUE if coming from
1182 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183 * instruction. It is only relevant if is_int is TRUE.
1185 void do_interrupt(int intno, int is_int, int error_code,
1186 target_ulong next_eip, int is_hw)
1188 if (loglevel & CPU_LOG_INT) {
1189 if ((env->cr[0] & CR0_PE_MASK)) {
1190 static int count;
1191 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192 count, intno, error_code, is_int,
1193 env->hflags & HF_CPL_MASK,
1194 env->segs[R_CS].selector, EIP,
1195 (int)env->segs[R_CS].base + EIP,
1196 env->segs[R_SS].selector, ESP);
1197 if (intno == 0x0e) {
1198 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1199 } else {
1200 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1202 fprintf(logfile, "\n");
1203 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1204 #if 0
1206 int i;
1207 uint8_t *ptr;
1208 fprintf(logfile, " code=");
1209 ptr = env->segs[R_CS].base + env->eip;
1210 for(i = 0; i < 16; i++) {
1211 fprintf(logfile, " %02x", ldub(ptr + i));
1213 fprintf(logfile, "\n");
1215 #endif
1216 count++;
1219 if (env->cr[0] & CR0_PE_MASK) {
1220 #if TARGET_X86_64
1221 if (env->hflags & HF_LMA_MASK) {
1222 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1223 } else
1224 #endif
1226 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1228 } else {
1229 do_interrupt_real(intno, is_int, error_code, next_eip);
1234 * Check nested exceptions and change to double or triple fault if
1235 * needed. It should only be called, if this is not an interrupt.
1236 * Returns the new exception number.
1238 static int check_exception(int intno, int *error_code)
1240 int first_contributory = env->old_exception == 0 ||
1241 (env->old_exception >= 10 &&
1242 env->old_exception <= 13);
1243 int second_contributory = intno == 0 ||
1244 (intno >= 10 && intno <= 13);
1246 if (loglevel & CPU_LOG_INT)
1247 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1248 env->old_exception, intno);
1250 if (env->old_exception == EXCP08_DBLE)
1251 cpu_abort(env, "triple fault");
1253 if ((first_contributory && second_contributory)
1254 || (env->old_exception == EXCP0E_PAGE &&
1255 (second_contributory || (intno == EXCP0E_PAGE)))) {
1256 intno = EXCP08_DBLE;
1257 *error_code = 0;
1260 if (second_contributory || (intno == EXCP0E_PAGE) ||
1261 (intno == EXCP08_DBLE))
1262 env->old_exception = intno;
1264 return intno;
1268 * Signal an interruption. It is executed in the main CPU loop.
1269 * is_int is TRUE if coming from the int instruction. next_eip is the
1270 * EIP value AFTER the interrupt instruction. It is only relevant if
1271 * is_int is TRUE.
1273 void raise_interrupt(int intno, int is_int, int error_code,
1274 int next_eip_addend)
1276 if (!is_int) {
1277 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278 intno = check_exception(intno, &error_code);
1279 } else {
1280 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1283 env->exception_index = intno;
1284 env->error_code = error_code;
1285 env->exception_is_int = is_int;
1286 env->exception_next_eip = env->eip + next_eip_addend;
1287 cpu_loop_exit();
1290 /* shortcuts to generate exceptions */
1292 void (raise_exception_err)(int exception_index, int error_code)
1294 raise_interrupt(exception_index, 0, error_code, 0);
1297 void raise_exception(int exception_index)
1299 raise_interrupt(exception_index, 0, 0, 0);
1302 /* SMM support */
1304 #if defined(CONFIG_USER_ONLY)
1306 void do_smm_enter(void)
1310 void helper_rsm(void)
1314 #else
1316 #ifdef TARGET_X86_64
1317 #define SMM_REVISION_ID 0x00020064
1318 #else
1319 #define SMM_REVISION_ID 0x00020000
1320 #endif
1322 void do_smm_enter(void)
1324 target_ulong sm_state;
1325 SegmentCache *dt;
1326 int i, offset;
1328 if (loglevel & CPU_LOG_INT) {
1329 fprintf(logfile, "SMM: enter\n");
1330 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1333 env->hflags |= HF_SMM_MASK;
1334 cpu_smm_update(env);
1336 sm_state = env->smbase + 0x8000;
1338 #ifdef TARGET_X86_64
1339 for(i = 0; i < 6; i++) {
1340 dt = &env->segs[i];
1341 offset = 0x7e00 + i * 16;
1342 stw_phys(sm_state + offset, dt->selector);
1343 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1344 stl_phys(sm_state + offset + 4, dt->limit);
1345 stq_phys(sm_state + offset + 8, dt->base);
1348 stq_phys(sm_state + 0x7e68, env->gdt.base);
1349 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1351 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1352 stq_phys(sm_state + 0x7e78, env->ldt.base);
1353 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1354 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1356 stq_phys(sm_state + 0x7e88, env->idt.base);
1357 stl_phys(sm_state + 0x7e84, env->idt.limit);
1359 stw_phys(sm_state + 0x7e90, env->tr.selector);
1360 stq_phys(sm_state + 0x7e98, env->tr.base);
1361 stl_phys(sm_state + 0x7e94, env->tr.limit);
1362 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1364 stq_phys(sm_state + 0x7ed0, env->efer);
1366 stq_phys(sm_state + 0x7ff8, EAX);
1367 stq_phys(sm_state + 0x7ff0, ECX);
1368 stq_phys(sm_state + 0x7fe8, EDX);
1369 stq_phys(sm_state + 0x7fe0, EBX);
1370 stq_phys(sm_state + 0x7fd8, ESP);
1371 stq_phys(sm_state + 0x7fd0, EBP);
1372 stq_phys(sm_state + 0x7fc8, ESI);
1373 stq_phys(sm_state + 0x7fc0, EDI);
1374 for(i = 8; i < 16; i++)
1375 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1376 stq_phys(sm_state + 0x7f78, env->eip);
1377 stl_phys(sm_state + 0x7f70, compute_eflags());
1378 stl_phys(sm_state + 0x7f68, env->dr[6]);
1379 stl_phys(sm_state + 0x7f60, env->dr[7]);
1381 stl_phys(sm_state + 0x7f48, env->cr[4]);
1382 stl_phys(sm_state + 0x7f50, env->cr[3]);
1383 stl_phys(sm_state + 0x7f58, env->cr[0]);
1385 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1386 stl_phys(sm_state + 0x7f00, env->smbase);
1387 #else
1388 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1389 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1390 stl_phys(sm_state + 0x7ff4, compute_eflags());
1391 stl_phys(sm_state + 0x7ff0, env->eip);
1392 stl_phys(sm_state + 0x7fec, EDI);
1393 stl_phys(sm_state + 0x7fe8, ESI);
1394 stl_phys(sm_state + 0x7fe4, EBP);
1395 stl_phys(sm_state + 0x7fe0, ESP);
1396 stl_phys(sm_state + 0x7fdc, EBX);
1397 stl_phys(sm_state + 0x7fd8, EDX);
1398 stl_phys(sm_state + 0x7fd4, ECX);
1399 stl_phys(sm_state + 0x7fd0, EAX);
1400 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1401 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1403 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1404 stl_phys(sm_state + 0x7f64, env->tr.base);
1405 stl_phys(sm_state + 0x7f60, env->tr.limit);
1406 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1408 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1409 stl_phys(sm_state + 0x7f80, env->ldt.base);
1410 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1411 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1413 stl_phys(sm_state + 0x7f74, env->gdt.base);
1414 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1416 stl_phys(sm_state + 0x7f58, env->idt.base);
1417 stl_phys(sm_state + 0x7f54, env->idt.limit);
1419 for(i = 0; i < 6; i++) {
1420 dt = &env->segs[i];
1421 if (i < 3)
1422 offset = 0x7f84 + i * 12;
1423 else
1424 offset = 0x7f2c + (i - 3) * 12;
1425 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1426 stl_phys(sm_state + offset + 8, dt->base);
1427 stl_phys(sm_state + offset + 4, dt->limit);
1428 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1430 stl_phys(sm_state + 0x7f14, env->cr[4]);
1432 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1433 stl_phys(sm_state + 0x7ef8, env->smbase);
1434 #endif
1435 /* init SMM cpu state */
1437 #ifdef TARGET_X86_64
1438 cpu_load_efer(env, 0);
1439 #endif
1440 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1441 env->eip = 0x00008000;
1442 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1443 0xffffffff, 0);
1444 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1445 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1446 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1447 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1450 cpu_x86_update_cr0(env,
1451 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1452 cpu_x86_update_cr4(env, 0);
1453 env->dr[7] = 0x00000400;
1454 CC_OP = CC_OP_EFLAGS;
1457 void helper_rsm(void)
1459 target_ulong sm_state;
1460 int i, offset;
1461 uint32_t val;
1463 sm_state = env->smbase + 0x8000;
1464 #ifdef TARGET_X86_64
1465 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1467 for(i = 0; i < 6; i++) {
1468 offset = 0x7e00 + i * 16;
1469 cpu_x86_load_seg_cache(env, i,
1470 lduw_phys(sm_state + offset),
1471 ldq_phys(sm_state + offset + 8),
1472 ldl_phys(sm_state + offset + 4),
1473 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1476 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1477 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1479 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1480 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1481 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1482 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1484 env->idt.base = ldq_phys(sm_state + 0x7e88);
1485 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1487 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1488 env->tr.base = ldq_phys(sm_state + 0x7e98);
1489 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1490 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1492 EAX = ldq_phys(sm_state + 0x7ff8);
1493 ECX = ldq_phys(sm_state + 0x7ff0);
1494 EDX = ldq_phys(sm_state + 0x7fe8);
1495 EBX = ldq_phys(sm_state + 0x7fe0);
1496 ESP = ldq_phys(sm_state + 0x7fd8);
1497 EBP = ldq_phys(sm_state + 0x7fd0);
1498 ESI = ldq_phys(sm_state + 0x7fc8);
1499 EDI = ldq_phys(sm_state + 0x7fc0);
1500 for(i = 8; i < 16; i++)
1501 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1502 env->eip = ldq_phys(sm_state + 0x7f78);
1503 load_eflags(ldl_phys(sm_state + 0x7f70),
1504 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1505 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1506 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1508 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1509 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1510 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1512 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1513 if (val & 0x20000) {
1514 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1516 #else
1517 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1518 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1519 load_eflags(ldl_phys(sm_state + 0x7ff4),
1520 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521 env->eip = ldl_phys(sm_state + 0x7ff0);
1522 EDI = ldl_phys(sm_state + 0x7fec);
1523 ESI = ldl_phys(sm_state + 0x7fe8);
1524 EBP = ldl_phys(sm_state + 0x7fe4);
1525 ESP = ldl_phys(sm_state + 0x7fe0);
1526 EBX = ldl_phys(sm_state + 0x7fdc);
1527 EDX = ldl_phys(sm_state + 0x7fd8);
1528 ECX = ldl_phys(sm_state + 0x7fd4);
1529 EAX = ldl_phys(sm_state + 0x7fd0);
1530 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1531 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1533 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1534 env->tr.base = ldl_phys(sm_state + 0x7f64);
1535 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1536 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1538 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1539 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1540 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1541 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1543 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1544 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1546 env->idt.base = ldl_phys(sm_state + 0x7f58);
1547 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1549 for(i = 0; i < 6; i++) {
1550 if (i < 3)
1551 offset = 0x7f84 + i * 12;
1552 else
1553 offset = 0x7f2c + (i - 3) * 12;
1554 cpu_x86_load_seg_cache(env, i,
1555 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1556 ldl_phys(sm_state + offset + 8),
1557 ldl_phys(sm_state + offset + 4),
1558 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1560 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1562 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1563 if (val & 0x20000) {
1564 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1566 #endif
1567 CC_OP = CC_OP_EFLAGS;
1568 env->hflags &= ~HF_SMM_MASK;
1569 cpu_smm_update(env);
1571 if (loglevel & CPU_LOG_INT) {
1572 fprintf(logfile, "SMM: after RSM\n");
1573 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1577 #endif /* !CONFIG_USER_ONLY */
1580 /* division, flags are undefined */
1582 void helper_divb_AL(target_ulong t0)
1584 unsigned int num, den, q, r;
1586 num = (EAX & 0xffff);
1587 den = (t0 & 0xff);
1588 if (den == 0) {
1589 raise_exception(EXCP00_DIVZ);
1591 q = (num / den);
1592 if (q > 0xff)
1593 raise_exception(EXCP00_DIVZ);
1594 q &= 0xff;
1595 r = (num % den) & 0xff;
1596 EAX = (EAX & ~0xffff) | (r << 8) | q;
1599 void helper_idivb_AL(target_ulong t0)
1601 int num, den, q, r;
1603 num = (int16_t)EAX;
1604 den = (int8_t)t0;
1605 if (den == 0) {
1606 raise_exception(EXCP00_DIVZ);
1608 q = (num / den);
1609 if (q != (int8_t)q)
1610 raise_exception(EXCP00_DIVZ);
1611 q &= 0xff;
1612 r = (num % den) & 0xff;
1613 EAX = (EAX & ~0xffff) | (r << 8) | q;
1616 void helper_divw_AX(target_ulong t0)
1618 unsigned int num, den, q, r;
1620 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1621 den = (t0 & 0xffff);
1622 if (den == 0) {
1623 raise_exception(EXCP00_DIVZ);
1625 q = (num / den);
1626 if (q > 0xffff)
1627 raise_exception(EXCP00_DIVZ);
1628 q &= 0xffff;
1629 r = (num % den) & 0xffff;
1630 EAX = (EAX & ~0xffff) | q;
1631 EDX = (EDX & ~0xffff) | r;
1634 void helper_idivw_AX(target_ulong t0)
1636 int num, den, q, r;
1638 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1639 den = (int16_t)t0;
1640 if (den == 0) {
1641 raise_exception(EXCP00_DIVZ);
1643 q = (num / den);
1644 if (q != (int16_t)q)
1645 raise_exception(EXCP00_DIVZ);
1646 q &= 0xffff;
1647 r = (num % den) & 0xffff;
1648 EAX = (EAX & ~0xffff) | q;
1649 EDX = (EDX & ~0xffff) | r;
1652 void helper_divl_EAX(target_ulong t0)
1654 unsigned int den, r;
1655 uint64_t num, q;
1657 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1658 den = t0;
1659 if (den == 0) {
1660 raise_exception(EXCP00_DIVZ);
1662 q = (num / den);
1663 r = (num % den);
1664 if (q > 0xffffffff)
1665 raise_exception(EXCP00_DIVZ);
1666 EAX = (uint32_t)q;
1667 EDX = (uint32_t)r;
1670 void helper_idivl_EAX(target_ulong t0)
1672 int den, r;
1673 int64_t num, q;
1675 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1676 den = t0;
1677 if (den == 0) {
1678 raise_exception(EXCP00_DIVZ);
1680 q = (num / den);
1681 r = (num % den);
1682 if (q != (int32_t)q)
1683 raise_exception(EXCP00_DIVZ);
1684 EAX = (uint32_t)q;
1685 EDX = (uint32_t)r;
1688 /* bcd */
1690 /* XXX: exception */
1691 void helper_aam(int base)
1693 int al, ah;
1694 al = EAX & 0xff;
1695 ah = al / base;
1696 al = al % base;
1697 EAX = (EAX & ~0xffff) | al | (ah << 8);
1698 CC_DST = al;
1701 void helper_aad(int base)
1703 int al, ah;
1704 al = EAX & 0xff;
1705 ah = (EAX >> 8) & 0xff;
1706 al = ((ah * base) + al) & 0xff;
1707 EAX = (EAX & ~0xffff) | al;
1708 CC_DST = al;
1711 void helper_aaa(void)
1713 int icarry;
1714 int al, ah, af;
1715 int eflags;
1717 eflags = cc_table[CC_OP].compute_all();
1718 af = eflags & CC_A;
1719 al = EAX & 0xff;
1720 ah = (EAX >> 8) & 0xff;
1722 icarry = (al > 0xf9);
1723 if (((al & 0x0f) > 9 ) || af) {
1724 al = (al + 6) & 0x0f;
1725 ah = (ah + 1 + icarry) & 0xff;
1726 eflags |= CC_C | CC_A;
1727 } else {
1728 eflags &= ~(CC_C | CC_A);
1729 al &= 0x0f;
1731 EAX = (EAX & ~0xffff) | al | (ah << 8);
1732 CC_SRC = eflags;
1733 FORCE_RET();
1736 void helper_aas(void)
1738 int icarry;
1739 int al, ah, af;
1740 int eflags;
1742 eflags = cc_table[CC_OP].compute_all();
1743 af = eflags & CC_A;
1744 al = EAX & 0xff;
1745 ah = (EAX >> 8) & 0xff;
1747 icarry = (al < 6);
1748 if (((al & 0x0f) > 9 ) || af) {
1749 al = (al - 6) & 0x0f;
1750 ah = (ah - 1 - icarry) & 0xff;
1751 eflags |= CC_C | CC_A;
1752 } else {
1753 eflags &= ~(CC_C | CC_A);
1754 al &= 0x0f;
1756 EAX = (EAX & ~0xffff) | al | (ah << 8);
1757 CC_SRC = eflags;
1758 FORCE_RET();
1761 void helper_daa(void)
1763 int al, af, cf;
1764 int eflags;
1766 eflags = cc_table[CC_OP].compute_all();
1767 cf = eflags & CC_C;
1768 af = eflags & CC_A;
1769 al = EAX & 0xff;
1771 eflags = 0;
1772 if (((al & 0x0f) > 9 ) || af) {
1773 al = (al + 6) & 0xff;
1774 eflags |= CC_A;
1776 if ((al > 0x9f) || cf) {
1777 al = (al + 0x60) & 0xff;
1778 eflags |= CC_C;
1780 EAX = (EAX & ~0xff) | al;
1781 /* well, speed is not an issue here, so we compute the flags by hand */
1782 eflags |= (al == 0) << 6; /* zf */
1783 eflags |= parity_table[al]; /* pf */
1784 eflags |= (al & 0x80); /* sf */
1785 CC_SRC = eflags;
1786 FORCE_RET();
1789 void helper_das(void)
1791 int al, al1, af, cf;
1792 int eflags;
1794 eflags = cc_table[CC_OP].compute_all();
1795 cf = eflags & CC_C;
1796 af = eflags & CC_A;
1797 al = EAX & 0xff;
1799 eflags = 0;
1800 al1 = al;
1801 if (((al & 0x0f) > 9 ) || af) {
1802 eflags |= CC_A;
1803 if (al < 6 || cf)
1804 eflags |= CC_C;
1805 al = (al - 6) & 0xff;
1807 if ((al1 > 0x99) || cf) {
1808 al = (al - 0x60) & 0xff;
1809 eflags |= CC_C;
1811 EAX = (EAX & ~0xff) | al;
1812 /* well, speed is not an issue here, so we compute the flags by hand */
1813 eflags |= (al == 0) << 6; /* zf */
1814 eflags |= parity_table[al]; /* pf */
1815 eflags |= (al & 0x80); /* sf */
1816 CC_SRC = eflags;
1817 FORCE_RET();
1820 void helper_into(int next_eip_addend)
1822 int eflags;
1823 eflags = cc_table[CC_OP].compute_all();
1824 if (eflags & CC_O) {
1825 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1829 void helper_cmpxchg8b(target_ulong a0)
1831 uint64_t d;
1832 int eflags;
1834 eflags = cc_table[CC_OP].compute_all();
1835 d = ldq(a0);
1836 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1837 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1838 eflags |= CC_Z;
1839 } else {
1840 EDX = (uint32_t)(d >> 32);
1841 EAX = (uint32_t)d;
1842 eflags &= ~CC_Z;
1844 CC_SRC = eflags;
1847 #ifdef TARGET_X86_64
1848 void helper_cmpxchg16b(target_ulong a0)
1850 uint64_t d0, d1;
1851 int eflags;
1853 eflags = cc_table[CC_OP].compute_all();
1854 d0 = ldq(a0);
1855 d1 = ldq(a0 + 8);
1856 if (d0 == EAX && d1 == EDX) {
1857 stq(a0, EBX);
1858 stq(a0 + 8, ECX);
1859 eflags |= CC_Z;
1860 } else {
1861 EDX = d1;
1862 EAX = d0;
1863 eflags &= ~CC_Z;
1865 CC_SRC = eflags;
1867 #endif
1869 void helper_single_step(void)
1871 env->dr[6] |= 0x4000;
1872 raise_exception(EXCP01_SSTP);
1875 void helper_cpuid(void)
1877 uint32_t index;
1879 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1881 index = (uint32_t)EAX;
1882 /* test if maximum index reached */
1883 if (index & 0x80000000) {
1884 if (index > env->cpuid_xlevel)
1885 index = env->cpuid_level;
1886 } else {
1887 if (index > env->cpuid_level)
1888 index = env->cpuid_level;
1891 switch(index) {
1892 case 0:
1893 EAX = env->cpuid_level;
1894 EBX = env->cpuid_vendor1;
1895 EDX = env->cpuid_vendor2;
1896 ECX = env->cpuid_vendor3;
1897 break;
1898 case 1:
1899 EAX = env->cpuid_version;
1900 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1901 ECX = env->cpuid_ext_features;
1902 EDX = env->cpuid_features;
1903 break;
1904 case 2:
1905 /* cache info: needed for Pentium Pro compatibility */
1906 EAX = 1;
1907 EBX = 0;
1908 ECX = 0;
1909 EDX = 0x2c307d;
1910 break;
1911 case 0x80000000:
1912 EAX = env->cpuid_xlevel;
1913 EBX = env->cpuid_vendor1;
1914 EDX = env->cpuid_vendor2;
1915 ECX = env->cpuid_vendor3;
1916 break;
1917 case 0x80000001:
1918 EAX = env->cpuid_features;
1919 EBX = 0;
1920 ECX = env->cpuid_ext3_features;
1921 EDX = env->cpuid_ext2_features;
1922 break;
1923 case 0x80000002:
1924 case 0x80000003:
1925 case 0x80000004:
1926 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1927 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1928 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1929 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1930 break;
1931 case 0x80000005:
1932 /* cache info (L1 cache) */
1933 EAX = 0x01ff01ff;
1934 EBX = 0x01ff01ff;
1935 ECX = 0x40020140;
1936 EDX = 0x40020140;
1937 break;
1938 case 0x80000006:
1939 /* cache info (L2 cache) */
1940 EAX = 0;
1941 EBX = 0x42004200;
1942 ECX = 0x02008140;
1943 EDX = 0;
1944 break;
1945 case 0x80000008:
1946 /* virtual & phys address size in low 2 bytes. */
1947 /* XXX: This value must match the one used in the MMU code. */
1948 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1949 /* 64 bit processor */
1950 #if defined(USE_KQEMU)
1951 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1952 #else
1953 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1954 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1955 #endif
1956 } else {
1957 #if defined(USE_KQEMU)
1958 EAX = 0x00000020; /* 32 bits physical */
1959 #else
1960 EAX = 0x00000024; /* 36 bits physical */
1961 #endif
1963 EBX = 0;
1964 ECX = 0;
1965 EDX = 0;
1966 break;
1967 case 0x8000000A:
1968 EAX = 0x00000001;
1969 EBX = 0;
1970 ECX = 0;
1971 EDX = 0;
1972 break;
1973 default:
1974 /* reserved values: zero */
1975 EAX = 0;
1976 EBX = 0;
1977 ECX = 0;
1978 EDX = 0;
1979 break;
1983 void helper_enter_level(int level, int data32, target_ulong t1)
1985 target_ulong ssp;
1986 uint32_t esp_mask, esp, ebp;
1988 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1989 ssp = env->segs[R_SS].base;
1990 ebp = EBP;
1991 esp = ESP;
1992 if (data32) {
1993 /* 32 bit */
1994 esp -= 4;
1995 while (--level) {
1996 esp -= 4;
1997 ebp -= 4;
1998 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2000 esp -= 4;
2001 stl(ssp + (esp & esp_mask), t1);
2002 } else {
2003 /* 16 bit */
2004 esp -= 2;
2005 while (--level) {
2006 esp -= 2;
2007 ebp -= 2;
2008 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2010 esp -= 2;
2011 stw(ssp + (esp & esp_mask), t1);
2015 #ifdef TARGET_X86_64
2016 void helper_enter64_level(int level, int data64, target_ulong t1)
2018 target_ulong esp, ebp;
2019 ebp = EBP;
2020 esp = ESP;
2022 if (data64) {
2023 /* 64 bit */
2024 esp -= 8;
2025 while (--level) {
2026 esp -= 8;
2027 ebp -= 8;
2028 stq(esp, ldq(ebp));
2030 esp -= 8;
2031 stq(esp, t1);
2032 } else {
2033 /* 16 bit */
2034 esp -= 2;
2035 while (--level) {
2036 esp -= 2;
2037 ebp -= 2;
2038 stw(esp, lduw(ebp));
2040 esp -= 2;
2041 stw(esp, t1);
2044 #endif
2046 void helper_lldt(int selector)
2048 SegmentCache *dt;
2049 uint32_t e1, e2;
2050 int index, entry_limit;
2051 target_ulong ptr;
2053 selector &= 0xffff;
2054 if ((selector & 0xfffc) == 0) {
2055 /* XXX: NULL selector case: invalid LDT */
2056 env->ldt.base = 0;
2057 env->ldt.limit = 0;
2058 } else {
2059 if (selector & 0x4)
2060 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2061 dt = &env->gdt;
2062 index = selector & ~7;
2063 #ifdef TARGET_X86_64
2064 if (env->hflags & HF_LMA_MASK)
2065 entry_limit = 15;
2066 else
2067 #endif
2068 entry_limit = 7;
2069 if ((index + entry_limit) > dt->limit)
2070 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2071 ptr = dt->base + index;
2072 e1 = ldl_kernel(ptr);
2073 e2 = ldl_kernel(ptr + 4);
2074 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2075 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2076 if (!(e2 & DESC_P_MASK))
2077 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2078 #ifdef TARGET_X86_64
2079 if (env->hflags & HF_LMA_MASK) {
2080 uint32_t e3;
2081 e3 = ldl_kernel(ptr + 8);
2082 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2083 env->ldt.base |= (target_ulong)e3 << 32;
2084 } else
2085 #endif
2087 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2090 env->ldt.selector = selector;
2093 void helper_ltr(int selector)
2095 SegmentCache *dt;
2096 uint32_t e1, e2;
2097 int index, type, entry_limit;
2098 target_ulong ptr;
2100 selector &= 0xffff;
2101 if ((selector & 0xfffc) == 0) {
2102 /* NULL selector case: invalid TR */
2103 env->tr.base = 0;
2104 env->tr.limit = 0;
2105 env->tr.flags = 0;
2106 } else {
2107 if (selector & 0x4)
2108 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2109 dt = &env->gdt;
2110 index = selector & ~7;
2111 #ifdef TARGET_X86_64
2112 if (env->hflags & HF_LMA_MASK)
2113 entry_limit = 15;
2114 else
2115 #endif
2116 entry_limit = 7;
2117 if ((index + entry_limit) > dt->limit)
2118 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119 ptr = dt->base + index;
2120 e1 = ldl_kernel(ptr);
2121 e2 = ldl_kernel(ptr + 4);
2122 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2123 if ((e2 & DESC_S_MASK) ||
2124 (type != 1 && type != 9))
2125 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2126 if (!(e2 & DESC_P_MASK))
2127 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2128 #ifdef TARGET_X86_64
2129 if (env->hflags & HF_LMA_MASK) {
2130 uint32_t e3, e4;
2131 e3 = ldl_kernel(ptr + 8);
2132 e4 = ldl_kernel(ptr + 12);
2133 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2134 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2135 load_seg_cache_raw_dt(&env->tr, e1, e2);
2136 env->tr.base |= (target_ulong)e3 << 32;
2137 } else
2138 #endif
2140 load_seg_cache_raw_dt(&env->tr, e1, e2);
2142 e2 |= DESC_TSS_BUSY_MASK;
2143 stl_kernel(ptr + 4, e2);
2145 env->tr.selector = selector;
2148 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2149 void helper_load_seg(int seg_reg, int selector)
2151 uint32_t e1, e2;
2152 int cpl, dpl, rpl;
2153 SegmentCache *dt;
2154 int index;
2155 target_ulong ptr;
2157 selector &= 0xffff;
2158 cpl = env->hflags & HF_CPL_MASK;
2159 if ((selector & 0xfffc) == 0) {
2160 /* null selector case */
2161 if (seg_reg == R_SS
2162 #ifdef TARGET_X86_64
2163 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2164 #endif
2166 raise_exception_err(EXCP0D_GPF, 0);
2167 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2168 } else {
2170 if (selector & 0x4)
2171 dt = &env->ldt;
2172 else
2173 dt = &env->gdt;
2174 index = selector & ~7;
2175 if ((index + 7) > dt->limit)
2176 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2177 ptr = dt->base + index;
2178 e1 = ldl_kernel(ptr);
2179 e2 = ldl_kernel(ptr + 4);
2181 if (!(e2 & DESC_S_MASK))
2182 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2183 rpl = selector & 3;
2184 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2185 if (seg_reg == R_SS) {
2186 /* must be writable segment */
2187 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2188 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2189 if (rpl != cpl || dpl != cpl)
2190 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2191 } else {
2192 /* must be readable segment */
2193 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2194 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2196 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2197 /* if not conforming code, test rights */
2198 if (dpl < cpl || dpl < rpl)
2199 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2203 if (!(e2 & DESC_P_MASK)) {
2204 if (seg_reg == R_SS)
2205 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2206 else
2207 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2210 /* set the access bit if not already set */
2211 if (!(e2 & DESC_A_MASK)) {
2212 e2 |= DESC_A_MASK;
2213 stl_kernel(ptr + 4, e2);
2216 cpu_x86_load_seg_cache(env, seg_reg, selector,
2217 get_seg_base(e1, e2),
2218 get_seg_limit(e1, e2),
2219 e2);
2220 #if 0
2221 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2222 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2223 #endif
2227 /* protected mode jump */
2228 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2229 int next_eip_addend)
2231 int gate_cs, type;
2232 uint32_t e1, e2, cpl, dpl, rpl, limit;
2233 target_ulong next_eip;
2235 if ((new_cs & 0xfffc) == 0)
2236 raise_exception_err(EXCP0D_GPF, 0);
2237 if (load_segment(&e1, &e2, new_cs) != 0)
2238 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2239 cpl = env->hflags & HF_CPL_MASK;
2240 if (e2 & DESC_S_MASK) {
2241 if (!(e2 & DESC_CS_MASK))
2242 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2243 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2244 if (e2 & DESC_C_MASK) {
2245 /* conforming code segment */
2246 if (dpl > cpl)
2247 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2248 } else {
2249 /* non conforming code segment */
2250 rpl = new_cs & 3;
2251 if (rpl > cpl)
2252 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2253 if (dpl != cpl)
2254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2256 if (!(e2 & DESC_P_MASK))
2257 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2258 limit = get_seg_limit(e1, e2);
2259 if (new_eip > limit &&
2260 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2261 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2262 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2263 get_seg_base(e1, e2), limit, e2);
2264 EIP = new_eip;
2265 } else {
2266 /* jump to call or task gate */
2267 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2268 rpl = new_cs & 3;
2269 cpl = env->hflags & HF_CPL_MASK;
2270 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2271 switch(type) {
2272 case 1: /* 286 TSS */
2273 case 9: /* 386 TSS */
2274 case 5: /* task gate */
2275 if (dpl < cpl || dpl < rpl)
2276 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2277 next_eip = env->eip + next_eip_addend;
2278 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2279 CC_OP = CC_OP_EFLAGS;
2280 break;
2281 case 4: /* 286 call gate */
2282 case 12: /* 386 call gate */
2283 if ((dpl < cpl) || (dpl < rpl))
2284 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2285 if (!(e2 & DESC_P_MASK))
2286 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2287 gate_cs = e1 >> 16;
2288 new_eip = (e1 & 0xffff);
2289 if (type == 12)
2290 new_eip |= (e2 & 0xffff0000);
2291 if (load_segment(&e1, &e2, gate_cs) != 0)
2292 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2293 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2294 /* must be code segment */
2295 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2296 (DESC_S_MASK | DESC_CS_MASK)))
2297 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2298 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2299 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2300 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2301 if (!(e2 & DESC_P_MASK))
2302 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2303 limit = get_seg_limit(e1, e2);
2304 if (new_eip > limit)
2305 raise_exception_err(EXCP0D_GPF, 0);
2306 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2307 get_seg_base(e1, e2), limit, e2);
2308 EIP = new_eip;
2309 break;
2310 default:
2311 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2312 break;
2317 /* real mode call */
2318 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2319 int shift, int next_eip)
2321 int new_eip;
2322 uint32_t esp, esp_mask;
2323 target_ulong ssp;
2325 new_eip = new_eip1;
2326 esp = ESP;
2327 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2328 ssp = env->segs[R_SS].base;
2329 if (shift) {
2330 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2331 PUSHL(ssp, esp, esp_mask, next_eip);
2332 } else {
2333 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2334 PUSHW(ssp, esp, esp_mask, next_eip);
2337 SET_ESP(esp, esp_mask);
2338 env->eip = new_eip;
2339 env->segs[R_CS].selector = new_cs;
2340 env->segs[R_CS].base = (new_cs << 4);
2343 /* protected mode call */
2344 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2345 int shift, int next_eip_addend)
2347 int new_stack, i;
2348 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2349 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2350 uint32_t val, limit, old_sp_mask;
2351 target_ulong ssp, old_ssp, next_eip;
2353 next_eip = env->eip + next_eip_addend;
2354 #ifdef DEBUG_PCALL
2355 if (loglevel & CPU_LOG_PCALL) {
2356 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2357 new_cs, (uint32_t)new_eip, shift);
2358 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2360 #endif
2361 if ((new_cs & 0xfffc) == 0)
2362 raise_exception_err(EXCP0D_GPF, 0);
2363 if (load_segment(&e1, &e2, new_cs) != 0)
2364 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2365 cpl = env->hflags & HF_CPL_MASK;
2366 #ifdef DEBUG_PCALL
2367 if (loglevel & CPU_LOG_PCALL) {
2368 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2370 #endif
2371 if (e2 & DESC_S_MASK) {
2372 if (!(e2 & DESC_CS_MASK))
2373 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2374 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2375 if (e2 & DESC_C_MASK) {
2376 /* conforming code segment */
2377 if (dpl > cpl)
2378 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2379 } else {
2380 /* non conforming code segment */
2381 rpl = new_cs & 3;
2382 if (rpl > cpl)
2383 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384 if (dpl != cpl)
2385 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2387 if (!(e2 & DESC_P_MASK))
2388 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2390 #ifdef TARGET_X86_64
2391 /* XXX: check 16/32 bit cases in long mode */
2392 if (shift == 2) {
2393 target_ulong rsp;
2394 /* 64 bit case */
2395 rsp = ESP;
2396 PUSHQ(rsp, env->segs[R_CS].selector);
2397 PUSHQ(rsp, next_eip);
2398 /* from this point, not restartable */
2399 ESP = rsp;
2400 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2401 get_seg_base(e1, e2),
2402 get_seg_limit(e1, e2), e2);
2403 EIP = new_eip;
2404 } else
2405 #endif
2407 sp = ESP;
2408 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2409 ssp = env->segs[R_SS].base;
2410 if (shift) {
2411 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2412 PUSHL(ssp, sp, sp_mask, next_eip);
2413 } else {
2414 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2415 PUSHW(ssp, sp, sp_mask, next_eip);
2418 limit = get_seg_limit(e1, e2);
2419 if (new_eip > limit)
2420 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2421 /* from this point, not restartable */
2422 SET_ESP(sp, sp_mask);
2423 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2424 get_seg_base(e1, e2), limit, e2);
2425 EIP = new_eip;
2427 } else {
2428 /* check gate type */
2429 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2430 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2431 rpl = new_cs & 3;
2432 switch(type) {
2433 case 1: /* available 286 TSS */
2434 case 9: /* available 386 TSS */
2435 case 5: /* task gate */
2436 if (dpl < cpl || dpl < rpl)
2437 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2438 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2439 CC_OP = CC_OP_EFLAGS;
2440 return;
2441 case 4: /* 286 call gate */
2442 case 12: /* 386 call gate */
2443 break;
2444 default:
2445 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2446 break;
2448 shift = type >> 3;
2450 if (dpl < cpl || dpl < rpl)
2451 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2452 /* check valid bit */
2453 if (!(e2 & DESC_P_MASK))
2454 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2455 selector = e1 >> 16;
2456 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2457 param_count = e2 & 0x1f;
2458 if ((selector & 0xfffc) == 0)
2459 raise_exception_err(EXCP0D_GPF, 0);
2461 if (load_segment(&e1, &e2, selector) != 0)
2462 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2463 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2464 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2465 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2466 if (dpl > cpl)
2467 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2468 if (!(e2 & DESC_P_MASK))
2469 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2471 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2472 /* to inner privilege */
2473 get_ss_esp_from_tss(&ss, &sp, dpl);
2474 #ifdef DEBUG_PCALL
2475 if (loglevel & CPU_LOG_PCALL)
2476 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2477 ss, sp, param_count, ESP);
2478 #endif
2479 if ((ss & 0xfffc) == 0)
2480 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2481 if ((ss & 3) != dpl)
2482 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2483 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2484 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2485 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2486 if (ss_dpl != dpl)
2487 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2488 if (!(ss_e2 & DESC_S_MASK) ||
2489 (ss_e2 & DESC_CS_MASK) ||
2490 !(ss_e2 & DESC_W_MASK))
2491 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492 if (!(ss_e2 & DESC_P_MASK))
2493 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2495 // push_size = ((param_count * 2) + 8) << shift;
2497 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2498 old_ssp = env->segs[R_SS].base;
2500 sp_mask = get_sp_mask(ss_e2);
2501 ssp = get_seg_base(ss_e1, ss_e2);
2502 if (shift) {
2503 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2504 PUSHL(ssp, sp, sp_mask, ESP);
2505 for(i = param_count - 1; i >= 0; i--) {
2506 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2507 PUSHL(ssp, sp, sp_mask, val);
2509 } else {
2510 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2511 PUSHW(ssp, sp, sp_mask, ESP);
2512 for(i = param_count - 1; i >= 0; i--) {
2513 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2514 PUSHW(ssp, sp, sp_mask, val);
2517 new_stack = 1;
2518 } else {
2519 /* to same privilege */
2520 sp = ESP;
2521 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2522 ssp = env->segs[R_SS].base;
2523 // push_size = (4 << shift);
2524 new_stack = 0;
2527 if (shift) {
2528 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2529 PUSHL(ssp, sp, sp_mask, next_eip);
2530 } else {
2531 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2532 PUSHW(ssp, sp, sp_mask, next_eip);
2535 /* from this point, not restartable */
2537 if (new_stack) {
2538 ss = (ss & ~3) | dpl;
2539 cpu_x86_load_seg_cache(env, R_SS, ss,
2540 ssp,
2541 get_seg_limit(ss_e1, ss_e2),
2542 ss_e2);
2545 selector = (selector & ~3) | dpl;
2546 cpu_x86_load_seg_cache(env, R_CS, selector,
2547 get_seg_base(e1, e2),
2548 get_seg_limit(e1, e2),
2549 e2);
2550 cpu_x86_set_cpl(env, dpl);
2551 SET_ESP(sp, sp_mask);
2552 EIP = offset;
2554 #ifdef USE_KQEMU
2555 if (kqemu_is_ok(env)) {
2556 env->exception_index = -1;
2557 cpu_loop_exit();
2559 #endif
2562 /* real and vm86 mode iret */
2563 void helper_iret_real(int shift)
2565 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2566 target_ulong ssp;
2567 int eflags_mask;
2569 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2570 sp = ESP;
2571 ssp = env->segs[R_SS].base;
2572 if (shift == 1) {
2573 /* 32 bits */
2574 POPL(ssp, sp, sp_mask, new_eip);
2575 POPL(ssp, sp, sp_mask, new_cs);
2576 new_cs &= 0xffff;
2577 POPL(ssp, sp, sp_mask, new_eflags);
2578 } else {
2579 /* 16 bits */
2580 POPW(ssp, sp, sp_mask, new_eip);
2581 POPW(ssp, sp, sp_mask, new_cs);
2582 POPW(ssp, sp, sp_mask, new_eflags);
2584 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2585 load_seg_vm(R_CS, new_cs);
2586 env->eip = new_eip;
2587 if (env->eflags & VM_MASK)
2588 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2589 else
2590 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2591 if (shift == 0)
2592 eflags_mask &= 0xffff;
2593 load_eflags(new_eflags, eflags_mask);
2594 env->hflags2 &= ~HF2_NMI_MASK;
2597 static inline void validate_seg(int seg_reg, int cpl)
2599 int dpl;
2600 uint32_t e2;
2602 /* XXX: on x86_64, we do not want to nullify FS and GS because
2603 they may still contain a valid base. I would be interested to
2604 know how a real x86_64 CPU behaves */
2605 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2606 (env->segs[seg_reg].selector & 0xfffc) == 0)
2607 return;
2609 e2 = env->segs[seg_reg].flags;
2610 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2611 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2612 /* data or non conforming code segment */
2613 if (dpl < cpl) {
2614 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2619 /* protected mode iret */
2620 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2622 uint32_t new_cs, new_eflags, new_ss;
2623 uint32_t new_es, new_ds, new_fs, new_gs;
2624 uint32_t e1, e2, ss_e1, ss_e2;
2625 int cpl, dpl, rpl, eflags_mask, iopl;
2626 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2628 #ifdef TARGET_X86_64
2629 if (shift == 2)
2630 sp_mask = -1;
2631 else
2632 #endif
2633 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2634 sp = ESP;
2635 ssp = env->segs[R_SS].base;
2636 new_eflags = 0; /* avoid warning */
2637 #ifdef TARGET_X86_64
2638 if (shift == 2) {
2639 POPQ(sp, new_eip);
2640 POPQ(sp, new_cs);
2641 new_cs &= 0xffff;
2642 if (is_iret) {
2643 POPQ(sp, new_eflags);
2645 } else
2646 #endif
2647 if (shift == 1) {
2648 /* 32 bits */
2649 POPL(ssp, sp, sp_mask, new_eip);
2650 POPL(ssp, sp, sp_mask, new_cs);
2651 new_cs &= 0xffff;
2652 if (is_iret) {
2653 POPL(ssp, sp, sp_mask, new_eflags);
2654 if (new_eflags & VM_MASK)
2655 goto return_to_vm86;
2657 } else {
2658 /* 16 bits */
2659 POPW(ssp, sp, sp_mask, new_eip);
2660 POPW(ssp, sp, sp_mask, new_cs);
2661 if (is_iret)
2662 POPW(ssp, sp, sp_mask, new_eflags);
2664 #ifdef DEBUG_PCALL
2665 if (loglevel & CPU_LOG_PCALL) {
2666 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2667 new_cs, new_eip, shift, addend);
2668 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2670 #endif
2671 if ((new_cs & 0xfffc) == 0)
2672 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2673 if (load_segment(&e1, &e2, new_cs) != 0)
2674 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2675 if (!(e2 & DESC_S_MASK) ||
2676 !(e2 & DESC_CS_MASK))
2677 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2678 cpl = env->hflags & HF_CPL_MASK;
2679 rpl = new_cs & 3;
2680 if (rpl < cpl)
2681 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2683 if (e2 & DESC_C_MASK) {
2684 if (dpl > rpl)
2685 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2686 } else {
2687 if (dpl != rpl)
2688 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2690 if (!(e2 & DESC_P_MASK))
2691 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2693 sp += addend;
2694 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2695 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2696 /* return to same privilege level */
2697 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2698 get_seg_base(e1, e2),
2699 get_seg_limit(e1, e2),
2700 e2);
2701 } else {
2702 /* return to different privilege level */
2703 #ifdef TARGET_X86_64
2704 if (shift == 2) {
2705 POPQ(sp, new_esp);
2706 POPQ(sp, new_ss);
2707 new_ss &= 0xffff;
2708 } else
2709 #endif
2710 if (shift == 1) {
2711 /* 32 bits */
2712 POPL(ssp, sp, sp_mask, new_esp);
2713 POPL(ssp, sp, sp_mask, new_ss);
2714 new_ss &= 0xffff;
2715 } else {
2716 /* 16 bits */
2717 POPW(ssp, sp, sp_mask, new_esp);
2718 POPW(ssp, sp, sp_mask, new_ss);
2720 #ifdef DEBUG_PCALL
2721 if (loglevel & CPU_LOG_PCALL) {
2722 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2723 new_ss, new_esp);
2725 #endif
2726 if ((new_ss & 0xfffc) == 0) {
2727 #ifdef TARGET_X86_64
2728 /* NULL ss is allowed in long mode if cpl != 3*/
2729 /* XXX: test CS64 ? */
2730 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2731 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2732 0, 0xffffffff,
2733 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2734 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2735 DESC_W_MASK | DESC_A_MASK);
2736 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2737 } else
2738 #endif
2740 raise_exception_err(EXCP0D_GPF, 0);
2742 } else {
2743 if ((new_ss & 3) != rpl)
2744 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2745 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2746 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2747 if (!(ss_e2 & DESC_S_MASK) ||
2748 (ss_e2 & DESC_CS_MASK) ||
2749 !(ss_e2 & DESC_W_MASK))
2750 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2751 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2752 if (dpl != rpl)
2753 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2754 if (!(ss_e2 & DESC_P_MASK))
2755 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2756 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2757 get_seg_base(ss_e1, ss_e2),
2758 get_seg_limit(ss_e1, ss_e2),
2759 ss_e2);
2762 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2763 get_seg_base(e1, e2),
2764 get_seg_limit(e1, e2),
2765 e2);
2766 cpu_x86_set_cpl(env, rpl);
2767 sp = new_esp;
2768 #ifdef TARGET_X86_64
2769 if (env->hflags & HF_CS64_MASK)
2770 sp_mask = -1;
2771 else
2772 #endif
2773 sp_mask = get_sp_mask(ss_e2);
2775 /* validate data segments */
2776 validate_seg(R_ES, rpl);
2777 validate_seg(R_DS, rpl);
2778 validate_seg(R_FS, rpl);
2779 validate_seg(R_GS, rpl);
2781 sp += addend;
2783 SET_ESP(sp, sp_mask);
2784 env->eip = new_eip;
2785 if (is_iret) {
2786 /* NOTE: 'cpl' is the _old_ CPL */
2787 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2788 if (cpl == 0)
2789 eflags_mask |= IOPL_MASK;
2790 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2791 if (cpl <= iopl)
2792 eflags_mask |= IF_MASK;
2793 if (shift == 0)
2794 eflags_mask &= 0xffff;
2795 load_eflags(new_eflags, eflags_mask);
2797 return;
2799 return_to_vm86:
2800 POPL(ssp, sp, sp_mask, new_esp);
2801 POPL(ssp, sp, sp_mask, new_ss);
2802 POPL(ssp, sp, sp_mask, new_es);
2803 POPL(ssp, sp, sp_mask, new_ds);
2804 POPL(ssp, sp, sp_mask, new_fs);
2805 POPL(ssp, sp, sp_mask, new_gs);
2807 /* modify processor state */
2808 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2809 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2810 load_seg_vm(R_CS, new_cs & 0xffff);
2811 cpu_x86_set_cpl(env, 3);
2812 load_seg_vm(R_SS, new_ss & 0xffff);
2813 load_seg_vm(R_ES, new_es & 0xffff);
2814 load_seg_vm(R_DS, new_ds & 0xffff);
2815 load_seg_vm(R_FS, new_fs & 0xffff);
2816 load_seg_vm(R_GS, new_gs & 0xffff);
2818 env->eip = new_eip & 0xffff;
2819 ESP = new_esp;
2822 void helper_iret_protected(int shift, int next_eip)
2824 int tss_selector, type;
2825 uint32_t e1, e2;
2827 /* specific case for TSS */
2828 if (env->eflags & NT_MASK) {
2829 #ifdef TARGET_X86_64
2830 if (env->hflags & HF_LMA_MASK)
2831 raise_exception_err(EXCP0D_GPF, 0);
2832 #endif
2833 tss_selector = lduw_kernel(env->tr.base + 0);
2834 if (tss_selector & 4)
2835 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2836 if (load_segment(&e1, &e2, tss_selector) != 0)
2837 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2838 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2839 /* NOTE: we check both segment and busy TSS */
2840 if (type != 3)
2841 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2842 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2843 } else {
2844 helper_ret_protected(shift, 1, 0);
2846 env->hflags2 &= ~HF2_NMI_MASK;
2847 #ifdef USE_KQEMU
2848 if (kqemu_is_ok(env)) {
2849 CC_OP = CC_OP_EFLAGS;
2850 env->exception_index = -1;
2851 cpu_loop_exit();
2853 #endif
2856 void helper_lret_protected(int shift, int addend)
2858 helper_ret_protected(shift, 0, addend);
2859 #ifdef USE_KQEMU
2860 if (kqemu_is_ok(env)) {
2861 env->exception_index = -1;
2862 cpu_loop_exit();
2864 #endif
2867 void helper_sysenter(void)
2869 if (env->sysenter_cs == 0) {
2870 raise_exception_err(EXCP0D_GPF, 0);
2872 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2873 cpu_x86_set_cpl(env, 0);
2874 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2875 0, 0xffffffff,
2876 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2877 DESC_S_MASK |
2878 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2879 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2880 0, 0xffffffff,
2881 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2882 DESC_S_MASK |
2883 DESC_W_MASK | DESC_A_MASK);
2884 ESP = env->sysenter_esp;
2885 EIP = env->sysenter_eip;
2888 void helper_sysexit(void)
2890 int cpl;
2892 cpl = env->hflags & HF_CPL_MASK;
2893 if (env->sysenter_cs == 0 || cpl != 0) {
2894 raise_exception_err(EXCP0D_GPF, 0);
2896 cpu_x86_set_cpl(env, 3);
2897 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2898 0, 0xffffffff,
2899 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2900 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2901 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2902 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2903 0, 0xffffffff,
2904 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2905 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2906 DESC_W_MASK | DESC_A_MASK);
2907 ESP = ECX;
2908 EIP = EDX;
2909 #ifdef USE_KQEMU
2910 if (kqemu_is_ok(env)) {
2911 env->exception_index = -1;
2912 cpu_loop_exit();
2914 #endif
2917 #if defined(CONFIG_USER_ONLY)
2918 target_ulong helper_read_crN(int reg)
2920 return 0;
2923 void helper_write_crN(int reg, target_ulong t0)
2926 #else
2927 target_ulong helper_read_crN(int reg)
2929 target_ulong val;
2931 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2932 switch(reg) {
2933 default:
2934 val = env->cr[reg];
2935 break;
2936 case 8:
2937 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2938 val = cpu_get_apic_tpr(env);
2939 } else {
2940 val = env->v_tpr;
2942 break;
2944 return val;
2947 void helper_write_crN(int reg, target_ulong t0)
2949 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2950 switch(reg) {
2951 case 0:
2952 cpu_x86_update_cr0(env, t0);
2953 break;
2954 case 3:
2955 cpu_x86_update_cr3(env, t0);
2956 break;
2957 case 4:
2958 cpu_x86_update_cr4(env, t0);
2959 break;
2960 case 8:
2961 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2962 cpu_set_apic_tpr(env, t0);
2964 env->v_tpr = t0 & 0x0f;
2965 break;
2966 default:
2967 env->cr[reg] = t0;
2968 break;
2971 #endif
2973 void helper_lmsw(target_ulong t0)
2975 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2976 if already set to one. */
2977 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2978 helper_write_crN(0, t0);
2981 void helper_clts(void)
2983 env->cr[0] &= ~CR0_TS_MASK;
2984 env->hflags &= ~HF_TS_MASK;
2987 /* XXX: do more */
2988 void helper_movl_drN_T0(int reg, target_ulong t0)
2990 env->dr[reg] = t0;
2993 void helper_invlpg(target_ulong addr)
2995 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2996 tlb_flush_page(env, addr);
2999 void helper_rdtsc(void)
3001 uint64_t val;
3003 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3004 raise_exception(EXCP0D_GPF);
3006 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3008 val = cpu_get_tsc(env) + env->tsc_offset;
3009 EAX = (uint32_t)(val);
3010 EDX = (uint32_t)(val >> 32);
3013 void helper_rdpmc(void)
3015 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3016 raise_exception(EXCP0D_GPF);
3018 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3020 /* currently unimplemented */
3021 raise_exception_err(EXCP06_ILLOP, 0);
3024 #if defined(CONFIG_USER_ONLY)
3025 void helper_wrmsr(void)
3029 void helper_rdmsr(void)
3032 #else
3033 void helper_wrmsr(void)
3035 uint64_t val;
3037 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3039 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3041 switch((uint32_t)ECX) {
3042 case MSR_IA32_SYSENTER_CS:
3043 env->sysenter_cs = val & 0xffff;
3044 break;
3045 case MSR_IA32_SYSENTER_ESP:
3046 env->sysenter_esp = val;
3047 break;
3048 case MSR_IA32_SYSENTER_EIP:
3049 env->sysenter_eip = val;
3050 break;
3051 case MSR_IA32_APICBASE:
3052 cpu_set_apic_base(env, val);
3053 break;
3054 case MSR_EFER:
3056 uint64_t update_mask;
3057 update_mask = 0;
3058 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3059 update_mask |= MSR_EFER_SCE;
3060 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3061 update_mask |= MSR_EFER_LME;
3062 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3063 update_mask |= MSR_EFER_FFXSR;
3064 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3065 update_mask |= MSR_EFER_NXE;
3066 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3067 update_mask |= MSR_EFER_SVME;
3068 cpu_load_efer(env, (env->efer & ~update_mask) |
3069 (val & update_mask));
3071 break;
3072 case MSR_STAR:
3073 env->star = val;
3074 break;
3075 case MSR_PAT:
3076 env->pat = val;
3077 break;
3078 case MSR_VM_HSAVE_PA:
3079 env->vm_hsave = val;
3080 break;
3081 #ifdef TARGET_X86_64
3082 case MSR_LSTAR:
3083 env->lstar = val;
3084 break;
3085 case MSR_CSTAR:
3086 env->cstar = val;
3087 break;
3088 case MSR_FMASK:
3089 env->fmask = val;
3090 break;
3091 case MSR_FSBASE:
3092 env->segs[R_FS].base = val;
3093 break;
3094 case MSR_GSBASE:
3095 env->segs[R_GS].base = val;
3096 break;
3097 case MSR_KERNELGSBASE:
3098 env->kernelgsbase = val;
3099 break;
3100 #endif
3101 default:
3102 /* XXX: exception ? */
3103 break;
3107 void helper_rdmsr(void)
3109 uint64_t val;
3111 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3113 switch((uint32_t)ECX) {
3114 case MSR_IA32_SYSENTER_CS:
3115 val = env->sysenter_cs;
3116 break;
3117 case MSR_IA32_SYSENTER_ESP:
3118 val = env->sysenter_esp;
3119 break;
3120 case MSR_IA32_SYSENTER_EIP:
3121 val = env->sysenter_eip;
3122 break;
3123 case MSR_IA32_APICBASE:
3124 val = cpu_get_apic_base(env);
3125 break;
3126 case MSR_EFER:
3127 val = env->efer;
3128 break;
3129 case MSR_STAR:
3130 val = env->star;
3131 break;
3132 case MSR_PAT:
3133 val = env->pat;
3134 break;
3135 case MSR_VM_HSAVE_PA:
3136 val = env->vm_hsave;
3137 break;
3138 #ifdef TARGET_X86_64
3139 case MSR_LSTAR:
3140 val = env->lstar;
3141 break;
3142 case MSR_CSTAR:
3143 val = env->cstar;
3144 break;
3145 case MSR_FMASK:
3146 val = env->fmask;
3147 break;
3148 case MSR_FSBASE:
3149 val = env->segs[R_FS].base;
3150 break;
3151 case MSR_GSBASE:
3152 val = env->segs[R_GS].base;
3153 break;
3154 case MSR_KERNELGSBASE:
3155 val = env->kernelgsbase;
3156 break;
3157 #endif
3158 #ifdef USE_KQEMU
3159 case MSR_QPI_COMMBASE:
3160 if (env->kqemu_enabled) {
3161 val = kqemu_comm_base;
3162 } else {
3163 val = 0;
3165 break;
3166 #endif
3167 default:
3168 /* XXX: exception ? */
3169 val = 0;
3170 break;
3172 EAX = (uint32_t)(val);
3173 EDX = (uint32_t)(val >> 32);
3175 #endif
3177 target_ulong helper_lsl(target_ulong selector1)
3179 unsigned int limit;
3180 uint32_t e1, e2, eflags, selector;
3181 int rpl, dpl, cpl, type;
3183 selector = selector1 & 0xffff;
3184 eflags = cc_table[CC_OP].compute_all();
3185 if (load_segment(&e1, &e2, selector) != 0)
3186 goto fail;
3187 rpl = selector & 3;
3188 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3189 cpl = env->hflags & HF_CPL_MASK;
3190 if (e2 & DESC_S_MASK) {
3191 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3192 /* conforming */
3193 } else {
3194 if (dpl < cpl || dpl < rpl)
3195 goto fail;
3197 } else {
3198 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3199 switch(type) {
3200 case 1:
3201 case 2:
3202 case 3:
3203 case 9:
3204 case 11:
3205 break;
3206 default:
3207 goto fail;
3209 if (dpl < cpl || dpl < rpl) {
3210 fail:
3211 CC_SRC = eflags & ~CC_Z;
3212 return 0;
3215 limit = get_seg_limit(e1, e2);
3216 CC_SRC = eflags | CC_Z;
3217 return limit;
3220 target_ulong helper_lar(target_ulong selector1)
3222 uint32_t e1, e2, eflags, selector;
3223 int rpl, dpl, cpl, type;
3225 selector = selector1 & 0xffff;
3226 eflags = cc_table[CC_OP].compute_all();
3227 if ((selector & 0xfffc) == 0)
3228 goto fail;
3229 if (load_segment(&e1, &e2, selector) != 0)
3230 goto fail;
3231 rpl = selector & 3;
3232 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3233 cpl = env->hflags & HF_CPL_MASK;
3234 if (e2 & DESC_S_MASK) {
3235 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3236 /* conforming */
3237 } else {
3238 if (dpl < cpl || dpl < rpl)
3239 goto fail;
3241 } else {
3242 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3243 switch(type) {
3244 case 1:
3245 case 2:
3246 case 3:
3247 case 4:
3248 case 5:
3249 case 9:
3250 case 11:
3251 case 12:
3252 break;
3253 default:
3254 goto fail;
3256 if (dpl < cpl || dpl < rpl) {
3257 fail:
3258 CC_SRC = eflags & ~CC_Z;
3259 return 0;
3262 CC_SRC = eflags | CC_Z;
3263 return e2 & 0x00f0ff00;
3266 void helper_verr(target_ulong selector1)
3268 uint32_t e1, e2, eflags, selector;
3269 int rpl, dpl, cpl;
3271 selector = selector1 & 0xffff;
3272 eflags = cc_table[CC_OP].compute_all();
3273 if ((selector & 0xfffc) == 0)
3274 goto fail;
3275 if (load_segment(&e1, &e2, selector) != 0)
3276 goto fail;
3277 if (!(e2 & DESC_S_MASK))
3278 goto fail;
3279 rpl = selector & 3;
3280 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3281 cpl = env->hflags & HF_CPL_MASK;
3282 if (e2 & DESC_CS_MASK) {
3283 if (!(e2 & DESC_R_MASK))
3284 goto fail;
3285 if (!(e2 & DESC_C_MASK)) {
3286 if (dpl < cpl || dpl < rpl)
3287 goto fail;
3289 } else {
3290 if (dpl < cpl || dpl < rpl) {
3291 fail:
3292 CC_SRC = eflags & ~CC_Z;
3293 return;
3296 CC_SRC = eflags | CC_Z;
3299 void helper_verw(target_ulong selector1)
3301 uint32_t e1, e2, eflags, selector;
3302 int rpl, dpl, cpl;
3304 selector = selector1 & 0xffff;
3305 eflags = cc_table[CC_OP].compute_all();
3306 if ((selector & 0xfffc) == 0)
3307 goto fail;
3308 if (load_segment(&e1, &e2, selector) != 0)
3309 goto fail;
3310 if (!(e2 & DESC_S_MASK))
3311 goto fail;
3312 rpl = selector & 3;
3313 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3314 cpl = env->hflags & HF_CPL_MASK;
3315 if (e2 & DESC_CS_MASK) {
3316 goto fail;
3317 } else {
3318 if (dpl < cpl || dpl < rpl)
3319 goto fail;
3320 if (!(e2 & DESC_W_MASK)) {
3321 fail:
3322 CC_SRC = eflags & ~CC_Z;
3323 return;
3326 CC_SRC = eflags | CC_Z;
3329 /* x87 FPU helpers */
3331 static void fpu_set_exception(int mask)
3333 env->fpus |= mask;
3334 if (env->fpus & (~env->fpuc & FPUC_EM))
3335 env->fpus |= FPUS_SE | FPUS_B;
3338 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3340 if (b == 0.0)
3341 fpu_set_exception(FPUS_ZE);
3342 return a / b;
3345 void fpu_raise_exception(void)
3347 if (env->cr[0] & CR0_NE_MASK) {
3348 raise_exception(EXCP10_COPR);
3350 #if !defined(CONFIG_USER_ONLY)
3351 else {
3352 cpu_set_ferr(env);
3354 #endif
3357 void helper_flds_FT0(uint32_t val)
3359 union {
3360 float32 f;
3361 uint32_t i;
3362 } u;
3363 u.i = val;
3364 FT0 = float32_to_floatx(u.f, &env->fp_status);
3367 void helper_fldl_FT0(uint64_t val)
3369 union {
3370 float64 f;
3371 uint64_t i;
3372 } u;
3373 u.i = val;
3374 FT0 = float64_to_floatx(u.f, &env->fp_status);
3377 void helper_fildl_FT0(int32_t val)
3379 FT0 = int32_to_floatx(val, &env->fp_status);
3382 void helper_flds_ST0(uint32_t val)
3384 int new_fpstt;
3385 union {
3386 float32 f;
3387 uint32_t i;
3388 } u;
3389 new_fpstt = (env->fpstt - 1) & 7;
3390 u.i = val;
3391 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3392 env->fpstt = new_fpstt;
3393 env->fptags[new_fpstt] = 0; /* validate stack entry */
3396 void helper_fldl_ST0(uint64_t val)
3398 int new_fpstt;
3399 union {
3400 float64 f;
3401 uint64_t i;
3402 } u;
3403 new_fpstt = (env->fpstt - 1) & 7;
3404 u.i = val;
3405 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3406 env->fpstt = new_fpstt;
3407 env->fptags[new_fpstt] = 0; /* validate stack entry */
3410 void helper_fildl_ST0(int32_t val)
3412 int new_fpstt;
3413 new_fpstt = (env->fpstt - 1) & 7;
3414 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3415 env->fpstt = new_fpstt;
3416 env->fptags[new_fpstt] = 0; /* validate stack entry */
3419 void helper_fildll_ST0(int64_t val)
3421 int new_fpstt;
3422 new_fpstt = (env->fpstt - 1) & 7;
3423 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3424 env->fpstt = new_fpstt;
3425 env->fptags[new_fpstt] = 0; /* validate stack entry */
3428 uint32_t helper_fsts_ST0(void)
3430 union {
3431 float32 f;
3432 uint32_t i;
3433 } u;
3434 u.f = floatx_to_float32(ST0, &env->fp_status);
3435 return u.i;
3438 uint64_t helper_fstl_ST0(void)
3440 union {
3441 float64 f;
3442 uint64_t i;
3443 } u;
3444 u.f = floatx_to_float64(ST0, &env->fp_status);
3445 return u.i;
3448 int32_t helper_fist_ST0(void)
3450 int32_t val;
3451 val = floatx_to_int32(ST0, &env->fp_status);
3452 if (val != (int16_t)val)
3453 val = -32768;
3454 return val;
3457 int32_t helper_fistl_ST0(void)
3459 int32_t val;
3460 val = floatx_to_int32(ST0, &env->fp_status);
3461 return val;
3464 int64_t helper_fistll_ST0(void)
3466 int64_t val;
3467 val = floatx_to_int64(ST0, &env->fp_status);
3468 return val;
3471 int32_t helper_fistt_ST0(void)
3473 int32_t val;
3474 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3475 if (val != (int16_t)val)
3476 val = -32768;
3477 return val;
3480 int32_t helper_fisttl_ST0(void)
3482 int32_t val;
3483 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3484 return val;
3487 int64_t helper_fisttll_ST0(void)
3489 int64_t val;
3490 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3491 return val;
3494 void helper_fldt_ST0(target_ulong ptr)
3496 int new_fpstt;
3497 new_fpstt = (env->fpstt - 1) & 7;
3498 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3499 env->fpstt = new_fpstt;
3500 env->fptags[new_fpstt] = 0; /* validate stack entry */
3503 void helper_fstt_ST0(target_ulong ptr)
3505 helper_fstt(ST0, ptr);
3508 void helper_fpush(void)
3510 fpush();
3513 void helper_fpop(void)
3515 fpop();
3518 void helper_fdecstp(void)
3520 env->fpstt = (env->fpstt - 1) & 7;
3521 env->fpus &= (~0x4700);
3524 void helper_fincstp(void)
3526 env->fpstt = (env->fpstt + 1) & 7;
3527 env->fpus &= (~0x4700);
3530 /* FPU move */
3532 void helper_ffree_STN(int st_index)
3534 env->fptags[(env->fpstt + st_index) & 7] = 1;
3537 void helper_fmov_ST0_FT0(void)
3539 ST0 = FT0;
3542 void helper_fmov_FT0_STN(int st_index)
3544 FT0 = ST(st_index);
3547 void helper_fmov_ST0_STN(int st_index)
3549 ST0 = ST(st_index);
3552 void helper_fmov_STN_ST0(int st_index)
3554 ST(st_index) = ST0;
3557 void helper_fxchg_ST0_STN(int st_index)
3559 CPU86_LDouble tmp;
3560 tmp = ST(st_index);
3561 ST(st_index) = ST0;
3562 ST0 = tmp;
3565 /* FPU operations */
3567 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3569 void helper_fcom_ST0_FT0(void)
3571 int ret;
3573 ret = floatx_compare(ST0, FT0, &env->fp_status);
3574 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3575 FORCE_RET();
3578 void helper_fucom_ST0_FT0(void)
3580 int ret;
3582 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3583 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3584 FORCE_RET();
3587 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3589 void helper_fcomi_ST0_FT0(void)
3591 int eflags;
3592 int ret;
3594 ret = floatx_compare(ST0, FT0, &env->fp_status);
3595 eflags = cc_table[CC_OP].compute_all();
3596 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3597 CC_SRC = eflags;
3598 FORCE_RET();
3601 void helper_fucomi_ST0_FT0(void)
3603 int eflags;
3604 int ret;
3606 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3607 eflags = cc_table[CC_OP].compute_all();
3608 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3609 CC_SRC = eflags;
3610 FORCE_RET();
3613 void helper_fadd_ST0_FT0(void)
3615 ST0 += FT0;
3618 void helper_fmul_ST0_FT0(void)
3620 ST0 *= FT0;
3623 void helper_fsub_ST0_FT0(void)
3625 ST0 -= FT0;
3628 void helper_fsubr_ST0_FT0(void)
3630 ST0 = FT0 - ST0;
3633 void helper_fdiv_ST0_FT0(void)
3635 ST0 = helper_fdiv(ST0, FT0);
3638 void helper_fdivr_ST0_FT0(void)
3640 ST0 = helper_fdiv(FT0, ST0);
3643 /* fp operations between STN and ST0 */
3645 void helper_fadd_STN_ST0(int st_index)
3647 ST(st_index) += ST0;
3650 void helper_fmul_STN_ST0(int st_index)
3652 ST(st_index) *= ST0;
3655 void helper_fsub_STN_ST0(int st_index)
3657 ST(st_index) -= ST0;
3660 void helper_fsubr_STN_ST0(int st_index)
3662 CPU86_LDouble *p;
3663 p = &ST(st_index);
3664 *p = ST0 - *p;
3667 void helper_fdiv_STN_ST0(int st_index)
3669 CPU86_LDouble *p;
3670 p = &ST(st_index);
3671 *p = helper_fdiv(*p, ST0);
3674 void helper_fdivr_STN_ST0(int st_index)
3676 CPU86_LDouble *p;
3677 p = &ST(st_index);
3678 *p = helper_fdiv(ST0, *p);
3681 /* misc FPU operations */
3682 void helper_fchs_ST0(void)
3684 ST0 = floatx_chs(ST0);
3687 void helper_fabs_ST0(void)
3689 ST0 = floatx_abs(ST0);
3692 void helper_fld1_ST0(void)
3694 ST0 = f15rk[1];
3697 void helper_fldl2t_ST0(void)
3699 ST0 = f15rk[6];
3702 void helper_fldl2e_ST0(void)
3704 ST0 = f15rk[5];
3707 void helper_fldpi_ST0(void)
3709 ST0 = f15rk[2];
3712 void helper_fldlg2_ST0(void)
3714 ST0 = f15rk[3];
3717 void helper_fldln2_ST0(void)
3719 ST0 = f15rk[4];
3722 void helper_fldz_ST0(void)
3724 ST0 = f15rk[0];
3727 void helper_fldz_FT0(void)
3729 FT0 = f15rk[0];
3732 uint32_t helper_fnstsw(void)
3734 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3737 uint32_t helper_fnstcw(void)
3739 return env->fpuc;
3742 static void update_fp_status(void)
3744 int rnd_type;
3746 /* set rounding mode */
3747 switch(env->fpuc & RC_MASK) {
3748 default:
3749 case RC_NEAR:
3750 rnd_type = float_round_nearest_even;
3751 break;
3752 case RC_DOWN:
3753 rnd_type = float_round_down;
3754 break;
3755 case RC_UP:
3756 rnd_type = float_round_up;
3757 break;
3758 case RC_CHOP:
3759 rnd_type = float_round_to_zero;
3760 break;
3762 set_float_rounding_mode(rnd_type, &env->fp_status);
3763 #ifdef FLOATX80
3764 switch((env->fpuc >> 8) & 3) {
3765 case 0:
3766 rnd_type = 32;
3767 break;
3768 case 2:
3769 rnd_type = 64;
3770 break;
3771 case 3:
3772 default:
3773 rnd_type = 80;
3774 break;
3776 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3777 #endif
3780 void helper_fldcw(uint32_t val)
3782 env->fpuc = val;
3783 update_fp_status();
3786 void helper_fclex(void)
3788 env->fpus &= 0x7f00;
3791 void helper_fwait(void)
3793 if (env->fpus & FPUS_SE)
3794 fpu_raise_exception();
3795 FORCE_RET();
3798 void helper_fninit(void)
3800 env->fpus = 0;
3801 env->fpstt = 0;
3802 env->fpuc = 0x37f;
3803 env->fptags[0] = 1;
3804 env->fptags[1] = 1;
3805 env->fptags[2] = 1;
3806 env->fptags[3] = 1;
3807 env->fptags[4] = 1;
3808 env->fptags[5] = 1;
3809 env->fptags[6] = 1;
3810 env->fptags[7] = 1;
3813 /* BCD ops */
3815 void helper_fbld_ST0(target_ulong ptr)
3817 CPU86_LDouble tmp;
3818 uint64_t val;
3819 unsigned int v;
3820 int i;
3822 val = 0;
3823 for(i = 8; i >= 0; i--) {
3824 v = ldub(ptr + i);
3825 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3827 tmp = val;
3828 if (ldub(ptr + 9) & 0x80)
3829 tmp = -tmp;
3830 fpush();
3831 ST0 = tmp;
3834 void helper_fbst_ST0(target_ulong ptr)
3836 int v;
3837 target_ulong mem_ref, mem_end;
3838 int64_t val;
3840 val = floatx_to_int64(ST0, &env->fp_status);
3841 mem_ref = ptr;
3842 mem_end = mem_ref + 9;
3843 if (val < 0) {
3844 stb(mem_end, 0x80);
3845 val = -val;
3846 } else {
3847 stb(mem_end, 0x00);
3849 while (mem_ref < mem_end) {
3850 if (val == 0)
3851 break;
3852 v = val % 100;
3853 val = val / 100;
3854 v = ((v / 10) << 4) | (v % 10);
3855 stb(mem_ref++, v);
3857 while (mem_ref < mem_end) {
3858 stb(mem_ref++, 0);
3862 void helper_f2xm1(void)
3864 ST0 = pow(2.0,ST0) - 1.0;
3867 void helper_fyl2x(void)
3869 CPU86_LDouble fptemp;
3871 fptemp = ST0;
3872 if (fptemp>0.0){
3873 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3874 ST1 *= fptemp;
3875 fpop();
3876 } else {
3877 env->fpus &= (~0x4700);
3878 env->fpus |= 0x400;
3882 void helper_fptan(void)
3884 CPU86_LDouble fptemp;
3886 fptemp = ST0;
3887 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3888 env->fpus |= 0x400;
3889 } else {
3890 ST0 = tan(fptemp);
3891 fpush();
3892 ST0 = 1.0;
3893 env->fpus &= (~0x400); /* C2 <-- 0 */
3894 /* the above code is for |arg| < 2**52 only */
3898 void helper_fpatan(void)
3900 CPU86_LDouble fptemp, fpsrcop;
3902 fpsrcop = ST1;
3903 fptemp = ST0;
3904 ST1 = atan2(fpsrcop,fptemp);
3905 fpop();
3908 void helper_fxtract(void)
3910 CPU86_LDoubleU temp;
3911 unsigned int expdif;
3913 temp.d = ST0;
3914 expdif = EXPD(temp) - EXPBIAS;
3915 /*DP exponent bias*/
3916 ST0 = expdif;
3917 fpush();
3918 BIASEXPONENT(temp);
3919 ST0 = temp.d;
3922 void helper_fprem1(void)
3924 CPU86_LDouble dblq, fpsrcop, fptemp;
3925 CPU86_LDoubleU fpsrcop1, fptemp1;
3926 int expdif;
3927 signed long long int q;
3929 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3930 ST0 = 0.0 / 0.0; /* NaN */
3931 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3932 return;
3935 fpsrcop = ST0;
3936 fptemp = ST1;
3937 fpsrcop1.d = fpsrcop;
3938 fptemp1.d = fptemp;
3939 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3941 if (expdif < 0) {
3942 /* optimisation? taken from the AMD docs */
3943 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3944 /* ST0 is unchanged */
3945 return;
3948 if (expdif < 53) {
3949 dblq = fpsrcop / fptemp;
3950 /* round dblq towards nearest integer */
3951 dblq = rint(dblq);
3952 ST0 = fpsrcop - fptemp * dblq;
3954 /* convert dblq to q by truncating towards zero */
3955 if (dblq < 0.0)
3956 q = (signed long long int)(-dblq);
3957 else
3958 q = (signed long long int)dblq;
3960 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3961 /* (C0,C3,C1) <-- (q2,q1,q0) */
3962 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3963 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3964 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3965 } else {
3966 env->fpus |= 0x400; /* C2 <-- 1 */
3967 fptemp = pow(2.0, expdif - 50);
3968 fpsrcop = (ST0 / ST1) / fptemp;
3969 /* fpsrcop = integer obtained by chopping */
3970 fpsrcop = (fpsrcop < 0.0) ?
3971 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3972 ST0 -= (ST1 * fpsrcop * fptemp);
3976 void helper_fprem(void)
3978 CPU86_LDouble dblq, fpsrcop, fptemp;
3979 CPU86_LDoubleU fpsrcop1, fptemp1;
3980 int expdif;
3981 signed long long int q;
3983 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3984 ST0 = 0.0 / 0.0; /* NaN */
3985 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3986 return;
3989 fpsrcop = (CPU86_LDouble)ST0;
3990 fptemp = (CPU86_LDouble)ST1;
3991 fpsrcop1.d = fpsrcop;
3992 fptemp1.d = fptemp;
3993 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3995 if (expdif < 0) {
3996 /* optimisation? taken from the AMD docs */
3997 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3998 /* ST0 is unchanged */
3999 return;
4002 if ( expdif < 53 ) {
4003 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4004 /* round dblq towards zero */
4005 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4006 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4008 /* convert dblq to q by truncating towards zero */
4009 if (dblq < 0.0)
4010 q = (signed long long int)(-dblq);
4011 else
4012 q = (signed long long int)dblq;
4014 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4015 /* (C0,C3,C1) <-- (q2,q1,q0) */
4016 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4017 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4018 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4019 } else {
4020 int N = 32 + (expdif % 32); /* as per AMD docs */
4021 env->fpus |= 0x400; /* C2 <-- 1 */
4022 fptemp = pow(2.0, (double)(expdif - N));
4023 fpsrcop = (ST0 / ST1) / fptemp;
4024 /* fpsrcop = integer obtained by chopping */
4025 fpsrcop = (fpsrcop < 0.0) ?
4026 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4027 ST0 -= (ST1 * fpsrcop * fptemp);
4031 void helper_fyl2xp1(void)
4033 CPU86_LDouble fptemp;
4035 fptemp = ST0;
4036 if ((fptemp+1.0)>0.0) {
4037 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4038 ST1 *= fptemp;
4039 fpop();
4040 } else {
4041 env->fpus &= (~0x4700);
4042 env->fpus |= 0x400;
4046 void helper_fsqrt(void)
4048 CPU86_LDouble fptemp;
4050 fptemp = ST0;
4051 if (fptemp<0.0) {
4052 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4053 env->fpus |= 0x400;
4055 ST0 = sqrt(fptemp);
4058 void helper_fsincos(void)
4060 CPU86_LDouble fptemp;
4062 fptemp = ST0;
4063 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4064 env->fpus |= 0x400;
4065 } else {
4066 ST0 = sin(fptemp);
4067 fpush();
4068 ST0 = cos(fptemp);
4069 env->fpus &= (~0x400); /* C2 <-- 0 */
4070 /* the above code is for |arg| < 2**63 only */
4074 void helper_frndint(void)
4076 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4079 void helper_fscale(void)
4081 ST0 = ldexp (ST0, (int)(ST1));
4084 void helper_fsin(void)
4086 CPU86_LDouble fptemp;
4088 fptemp = ST0;
4089 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4090 env->fpus |= 0x400;
4091 } else {
4092 ST0 = sin(fptemp);
4093 env->fpus &= (~0x400); /* C2 <-- 0 */
4094 /* the above code is for |arg| < 2**53 only */
4098 void helper_fcos(void)
4100 CPU86_LDouble fptemp;
4102 fptemp = ST0;
4103 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4104 env->fpus |= 0x400;
4105 } else {
4106 ST0 = cos(fptemp);
4107 env->fpus &= (~0x400); /* C2 <-- 0 */
4108 /* the above code is for |arg5 < 2**63 only */
4112 void helper_fxam_ST0(void)
4114 CPU86_LDoubleU temp;
4115 int expdif;
4117 temp.d = ST0;
4119 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4120 if (SIGND(temp))
4121 env->fpus |= 0x200; /* C1 <-- 1 */
4123 /* XXX: test fptags too */
4124 expdif = EXPD(temp);
4125 if (expdif == MAXEXPD) {
4126 #ifdef USE_X86LDOUBLE
4127 if (MANTD(temp) == 0x8000000000000000ULL)
4128 #else
4129 if (MANTD(temp) == 0)
4130 #endif
4131 env->fpus |= 0x500 /*Infinity*/;
4132 else
4133 env->fpus |= 0x100 /*NaN*/;
4134 } else if (expdif == 0) {
4135 if (MANTD(temp) == 0)
4136 env->fpus |= 0x4000 /*Zero*/;
4137 else
4138 env->fpus |= 0x4400 /*Denormal*/;
4139 } else {
4140 env->fpus |= 0x400;
4144 void helper_fstenv(target_ulong ptr, int data32)
4146 int fpus, fptag, exp, i;
4147 uint64_t mant;
4148 CPU86_LDoubleU tmp;
4150 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4151 fptag = 0;
4152 for (i=7; i>=0; i--) {
4153 fptag <<= 2;
4154 if (env->fptags[i]) {
4155 fptag |= 3;
4156 } else {
4157 tmp.d = env->fpregs[i].d;
4158 exp = EXPD(tmp);
4159 mant = MANTD(tmp);
4160 if (exp == 0 && mant == 0) {
4161 /* zero */
4162 fptag |= 1;
4163 } else if (exp == 0 || exp == MAXEXPD
4164 #ifdef USE_X86LDOUBLE
4165 || (mant & (1LL << 63)) == 0
4166 #endif
4168 /* NaNs, infinity, denormal */
4169 fptag |= 2;
4173 if (data32) {
4174 /* 32 bit */
4175 stl(ptr, env->fpuc);
4176 stl(ptr + 4, fpus);
4177 stl(ptr + 8, fptag);
4178 stl(ptr + 12, 0); /* fpip */
4179 stl(ptr + 16, 0); /* fpcs */
4180 stl(ptr + 20, 0); /* fpoo */
4181 stl(ptr + 24, 0); /* fpos */
4182 } else {
4183 /* 16 bit */
4184 stw(ptr, env->fpuc);
4185 stw(ptr + 2, fpus);
4186 stw(ptr + 4, fptag);
4187 stw(ptr + 6, 0);
4188 stw(ptr + 8, 0);
4189 stw(ptr + 10, 0);
4190 stw(ptr + 12, 0);
4194 void helper_fldenv(target_ulong ptr, int data32)
4196 int i, fpus, fptag;
4198 if (data32) {
4199 env->fpuc = lduw(ptr);
4200 fpus = lduw(ptr + 4);
4201 fptag = lduw(ptr + 8);
4203 else {
4204 env->fpuc = lduw(ptr);
4205 fpus = lduw(ptr + 2);
4206 fptag = lduw(ptr + 4);
4208 env->fpstt = (fpus >> 11) & 7;
4209 env->fpus = fpus & ~0x3800;
4210 for(i = 0;i < 8; i++) {
4211 env->fptags[i] = ((fptag & 3) == 3);
4212 fptag >>= 2;
4216 void helper_fsave(target_ulong ptr, int data32)
4218 CPU86_LDouble tmp;
4219 int i;
4221 helper_fstenv(ptr, data32);
4223 ptr += (14 << data32);
4224 for(i = 0;i < 8; i++) {
4225 tmp = ST(i);
4226 helper_fstt(tmp, ptr);
4227 ptr += 10;
4230 /* fninit */
4231 env->fpus = 0;
4232 env->fpstt = 0;
4233 env->fpuc = 0x37f;
4234 env->fptags[0] = 1;
4235 env->fptags[1] = 1;
4236 env->fptags[2] = 1;
4237 env->fptags[3] = 1;
4238 env->fptags[4] = 1;
4239 env->fptags[5] = 1;
4240 env->fptags[6] = 1;
4241 env->fptags[7] = 1;
4244 void helper_frstor(target_ulong ptr, int data32)
4246 CPU86_LDouble tmp;
4247 int i;
4249 helper_fldenv(ptr, data32);
4250 ptr += (14 << data32);
4252 for(i = 0;i < 8; i++) {
4253 tmp = helper_fldt(ptr);
4254 ST(i) = tmp;
4255 ptr += 10;
4259 void helper_fxsave(target_ulong ptr, int data64)
4261 int fpus, fptag, i, nb_xmm_regs;
4262 CPU86_LDouble tmp;
4263 target_ulong addr;
4265 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4266 fptag = 0;
4267 for(i = 0; i < 8; i++) {
4268 fptag |= (env->fptags[i] << i);
4270 stw(ptr, env->fpuc);
4271 stw(ptr + 2, fpus);
4272 stw(ptr + 4, fptag ^ 0xff);
4273 #ifdef TARGET_X86_64
4274 if (data64) {
4275 stq(ptr + 0x08, 0); /* rip */
4276 stq(ptr + 0x10, 0); /* rdp */
4277 } else
4278 #endif
4280 stl(ptr + 0x08, 0); /* eip */
4281 stl(ptr + 0x0c, 0); /* sel */
4282 stl(ptr + 0x10, 0); /* dp */
4283 stl(ptr + 0x14, 0); /* sel */
4286 addr = ptr + 0x20;
4287 for(i = 0;i < 8; i++) {
4288 tmp = ST(i);
4289 helper_fstt(tmp, addr);
4290 addr += 16;
4293 if (env->cr[4] & CR4_OSFXSR_MASK) {
4294 /* XXX: finish it */
4295 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4296 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4297 if (env->hflags & HF_CS64_MASK)
4298 nb_xmm_regs = 16;
4299 else
4300 nb_xmm_regs = 8;
4301 addr = ptr + 0xa0;
4302 for(i = 0; i < nb_xmm_regs; i++) {
4303 stq(addr, env->xmm_regs[i].XMM_Q(0));
4304 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4305 addr += 16;
4310 void helper_fxrstor(target_ulong ptr, int data64)
4312 int i, fpus, fptag, nb_xmm_regs;
4313 CPU86_LDouble tmp;
4314 target_ulong addr;
4316 env->fpuc = lduw(ptr);
4317 fpus = lduw(ptr + 2);
4318 fptag = lduw(ptr + 4);
4319 env->fpstt = (fpus >> 11) & 7;
4320 env->fpus = fpus & ~0x3800;
4321 fptag ^= 0xff;
4322 for(i = 0;i < 8; i++) {
4323 env->fptags[i] = ((fptag >> i) & 1);
4326 addr = ptr + 0x20;
4327 for(i = 0;i < 8; i++) {
4328 tmp = helper_fldt(addr);
4329 ST(i) = tmp;
4330 addr += 16;
4333 if (env->cr[4] & CR4_OSFXSR_MASK) {
4334 /* XXX: finish it */
4335 env->mxcsr = ldl(ptr + 0x18);
4336 //ldl(ptr + 0x1c);
4337 if (env->hflags & HF_CS64_MASK)
4338 nb_xmm_regs = 16;
4339 else
4340 nb_xmm_regs = 8;
4341 addr = ptr + 0xa0;
4342 for(i = 0; i < nb_xmm_regs; i++) {
4343 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4344 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4345 addr += 16;
4350 #ifndef USE_X86LDOUBLE
4352 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4354 CPU86_LDoubleU temp;
4355 int e;
4357 temp.d = f;
4358 /* mantissa */
4359 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4360 /* exponent + sign */
4361 e = EXPD(temp) - EXPBIAS + 16383;
4362 e |= SIGND(temp) >> 16;
4363 *pexp = e;
4366 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4368 CPU86_LDoubleU temp;
4369 int e;
4370 uint64_t ll;
4372 /* XXX: handle overflow ? */
4373 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4374 e |= (upper >> 4) & 0x800; /* sign */
4375 ll = (mant >> 11) & ((1LL << 52) - 1);
4376 #ifdef __arm__
4377 temp.l.upper = (e << 20) | (ll >> 32);
4378 temp.l.lower = ll;
4379 #else
4380 temp.ll = ll | ((uint64_t)e << 52);
4381 #endif
4382 return temp.d;
4385 #else
4387 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4389 CPU86_LDoubleU temp;
4391 temp.d = f;
4392 *pmant = temp.l.lower;
4393 *pexp = temp.l.upper;
4396 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4398 CPU86_LDoubleU temp;
4400 temp.l.upper = upper;
4401 temp.l.lower = mant;
4402 return temp.d;
4404 #endif
4406 #ifdef TARGET_X86_64
4408 //#define DEBUG_MULDIV
4410 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4412 *plow += a;
4413 /* carry test */
4414 if (*plow < a)
4415 (*phigh)++;
4416 *phigh += b;
4419 static void neg128(uint64_t *plow, uint64_t *phigh)
4421 *plow = ~ *plow;
4422 *phigh = ~ *phigh;
4423 add128(plow, phigh, 1, 0);
4426 /* return TRUE if overflow */
4427 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4429 uint64_t q, r, a1, a0;
4430 int i, qb, ab;
4432 a0 = *plow;
4433 a1 = *phigh;
4434 if (a1 == 0) {
4435 q = a0 / b;
4436 r = a0 % b;
4437 *plow = q;
4438 *phigh = r;
4439 } else {
4440 if (a1 >= b)
4441 return 1;
4442 /* XXX: use a better algorithm */
4443 for(i = 0; i < 64; i++) {
4444 ab = a1 >> 63;
4445 a1 = (a1 << 1) | (a0 >> 63);
4446 if (ab || a1 >= b) {
4447 a1 -= b;
4448 qb = 1;
4449 } else {
4450 qb = 0;
4452 a0 = (a0 << 1) | qb;
4454 #if defined(DEBUG_MULDIV)
4455 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4456 *phigh, *plow, b, a0, a1);
4457 #endif
4458 *plow = a0;
4459 *phigh = a1;
4461 return 0;
4464 /* return TRUE if overflow */
4465 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4467 int sa, sb;
4468 sa = ((int64_t)*phigh < 0);
4469 if (sa)
4470 neg128(plow, phigh);
4471 sb = (b < 0);
4472 if (sb)
4473 b = -b;
4474 if (div64(plow, phigh, b) != 0)
4475 return 1;
4476 if (sa ^ sb) {
4477 if (*plow > (1ULL << 63))
4478 return 1;
4479 *plow = - *plow;
4480 } else {
4481 if (*plow >= (1ULL << 63))
4482 return 1;
4484 if (sa)
4485 *phigh = - *phigh;
4486 return 0;
4489 void helper_mulq_EAX_T0(target_ulong t0)
4491 uint64_t r0, r1;
4493 mulu64(&r0, &r1, EAX, t0);
4494 EAX = r0;
4495 EDX = r1;
4496 CC_DST = r0;
4497 CC_SRC = r1;
4500 void helper_imulq_EAX_T0(target_ulong t0)
4502 uint64_t r0, r1;
4504 muls64(&r0, &r1, EAX, t0);
4505 EAX = r0;
4506 EDX = r1;
4507 CC_DST = r0;
4508 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4511 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4513 uint64_t r0, r1;
4515 muls64(&r0, &r1, t0, t1);
4516 CC_DST = r0;
4517 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4518 return r0;
4521 void helper_divq_EAX(target_ulong t0)
4523 uint64_t r0, r1;
4524 if (t0 == 0) {
4525 raise_exception(EXCP00_DIVZ);
4527 r0 = EAX;
4528 r1 = EDX;
4529 if (div64(&r0, &r1, t0))
4530 raise_exception(EXCP00_DIVZ);
4531 EAX = r0;
4532 EDX = r1;
4535 void helper_idivq_EAX(target_ulong t0)
4537 uint64_t r0, r1;
4538 if (t0 == 0) {
4539 raise_exception(EXCP00_DIVZ);
4541 r0 = EAX;
4542 r1 = EDX;
4543 if (idiv64(&r0, &r1, t0))
4544 raise_exception(EXCP00_DIVZ);
4545 EAX = r0;
4546 EDX = r1;
4548 #endif
4550 void helper_hlt(void)
4552 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4554 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4555 env->halted = 1;
4556 env->exception_index = EXCP_HLT;
4557 cpu_loop_exit();
4560 void helper_monitor(target_ulong ptr)
4562 if ((uint32_t)ECX != 0)
4563 raise_exception(EXCP0D_GPF);
4564 /* XXX: store address ? */
4565 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4568 void helper_mwait(void)
4570 if ((uint32_t)ECX != 0)
4571 raise_exception(EXCP0D_GPF);
4572 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4573 /* XXX: not complete but not completely erroneous */
4574 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4575 /* more than one CPU: do not sleep because another CPU may
4576 wake this one */
4577 } else {
4578 helper_hlt();
4582 void helper_debug(void)
4584 env->exception_index = EXCP_DEBUG;
4585 cpu_loop_exit();
4588 void helper_raise_interrupt(int intno, int next_eip_addend)
4590 raise_interrupt(intno, 1, 0, next_eip_addend);
4593 void helper_raise_exception(int exception_index)
4595 raise_exception(exception_index);
4598 void helper_cli(void)
4600 env->eflags &= ~IF_MASK;
4603 void helper_sti(void)
4605 env->eflags |= IF_MASK;
4608 #if 0
4609 /* vm86plus instructions */
4610 void helper_cli_vm(void)
4612 env->eflags &= ~VIF_MASK;
4615 void helper_sti_vm(void)
4617 env->eflags |= VIF_MASK;
4618 if (env->eflags & VIP_MASK) {
4619 raise_exception(EXCP0D_GPF);
4622 #endif
4624 void helper_set_inhibit_irq(void)
4626 env->hflags |= HF_INHIBIT_IRQ_MASK;
4629 void helper_reset_inhibit_irq(void)
4631 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4634 void helper_boundw(target_ulong a0, int v)
4636 int low, high;
4637 low = ldsw(a0);
4638 high = ldsw(a0 + 2);
4639 v = (int16_t)v;
4640 if (v < low || v > high) {
4641 raise_exception(EXCP05_BOUND);
4643 FORCE_RET();
4646 void helper_boundl(target_ulong a0, int v)
4648 int low, high;
4649 low = ldl(a0);
4650 high = ldl(a0 + 4);
4651 if (v < low || v > high) {
4652 raise_exception(EXCP05_BOUND);
4654 FORCE_RET();
4657 static float approx_rsqrt(float a)
4659 return 1.0 / sqrt(a);
4662 static float approx_rcp(float a)
4664 return 1.0 / a;
4667 #if !defined(CONFIG_USER_ONLY)
4669 #define MMUSUFFIX _mmu
4671 #define SHIFT 0
4672 #include "softmmu_template.h"
4674 #define SHIFT 1
4675 #include "softmmu_template.h"
4677 #define SHIFT 2
4678 #include "softmmu_template.h"
4680 #define SHIFT 3
4681 #include "softmmu_template.h"
4683 #endif
4685 /* try to fill the TLB and return an exception if error. If retaddr is
4686 NULL, it means that the function was called in C code (i.e. not
4687 from generated code or from helper.c) */
4688 /* XXX: fix it to restore all registers */
4689 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4691 TranslationBlock *tb;
4692 int ret;
4693 unsigned long pc;
4694 CPUX86State *saved_env;
4696 /* XXX: hack to restore env in all cases, even if not called from
4697 generated code */
4698 saved_env = env;
4699 env = cpu_single_env;
4701 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4702 if (ret) {
4703 if (retaddr) {
4704 /* now we have a real cpu fault */
4705 pc = (unsigned long)retaddr;
4706 tb = tb_find_pc(pc);
4707 if (tb) {
4708 /* the PC is inside the translated code. It means that we have
4709 a virtual CPU fault */
4710 cpu_restore_state(tb, env, pc, NULL);
4713 raise_exception_err(env->exception_index, env->error_code);
4715 env = saved_env;
4719 /* Secure Virtual Machine helpers */
4721 #if defined(CONFIG_USER_ONLY)
4723 void helper_vmrun(int aflag, int next_eip_addend)
4726 void helper_vmmcall(void)
4729 void helper_vmload(int aflag)
4732 void helper_vmsave(int aflag)
4735 void helper_stgi(void)
4738 void helper_clgi(void)
4741 void helper_skinit(void)
4744 void helper_invlpga(int aflag)
4747 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4750 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4754 void helper_svm_check_io(uint32_t port, uint32_t param,
4755 uint32_t next_eip_addend)
4758 #else
4760 static inline void svm_save_seg(target_phys_addr_t addr,
4761 const SegmentCache *sc)
4763 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4764 sc->selector);
4765 stq_phys(addr + offsetof(struct vmcb_seg, base),
4766 sc->base);
4767 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4768 sc->limit);
4769 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4770 (sc->flags >> 8) | ((sc->flags >> 12) & 0x0f00));
4773 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4775 unsigned int flags;
4777 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4778 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4779 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4780 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4781 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4784 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4785 CPUState *env, int seg_reg)
4787 SegmentCache sc1, *sc = &sc1;
4788 svm_load_seg(addr, sc);
4789 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4790 sc->base, sc->limit, sc->flags);
4793 void helper_vmrun(int aflag, int next_eip_addend)
4795 target_ulong addr;
4796 uint32_t event_inj;
4797 uint32_t int_ctl;
4799 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4801 if (aflag == 2)
4802 addr = EAX;
4803 else
4804 addr = (uint32_t)EAX;
4806 if (loglevel & CPU_LOG_TB_IN_ASM)
4807 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4809 env->vm_vmcb = addr;
4811 /* save the current CPU state in the hsave page */
4812 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4813 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4815 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4816 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4818 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4819 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4820 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4821 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4822 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4823 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4825 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4826 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4828 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4829 &env->segs[R_ES]);
4830 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4831 &env->segs[R_CS]);
4832 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4833 &env->segs[R_SS]);
4834 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4835 &env->segs[R_DS]);
4837 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4838 EIP + next_eip_addend);
4839 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4840 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4842 /* load the interception bitmaps so we do not need to access the
4843 vmcb in svm mode */
4844 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4845 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4846 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4847 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4848 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4849 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4851 /* enable intercepts */
4852 env->hflags |= HF_SVMI_MASK;
4854 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4856 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4857 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4859 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4860 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4862 /* clear exit_info_2 so we behave like the real hardware */
4863 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4865 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4866 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4867 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4868 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4869 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4870 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4871 if (int_ctl & V_INTR_MASKING_MASK) {
4872 env->v_tpr = int_ctl & V_TPR_MASK;
4873 env->hflags2 |= HF2_VINTR_MASK;
4874 if (env->eflags & IF_MASK)
4875 env->hflags2 |= HF2_HIF_MASK;
4878 cpu_load_efer(env,
4879 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4880 env->eflags = 0;
4881 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4882 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4883 CC_OP = CC_OP_EFLAGS;
4885 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4886 env, R_ES);
4887 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4888 env, R_CS);
4889 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4890 env, R_SS);
4891 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4892 env, R_DS);
4894 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4895 env->eip = EIP;
4896 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4897 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4898 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4899 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4900 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4902 /* FIXME: guest state consistency checks */
4904 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4905 case TLB_CONTROL_DO_NOTHING:
4906 break;
4907 case TLB_CONTROL_FLUSH_ALL_ASID:
4908 /* FIXME: this is not 100% correct but should work for now */
4909 tlb_flush(env, 1);
4910 break;
4913 env->hflags2 |= HF2_GIF_MASK;
4915 if (int_ctl & V_IRQ_MASK) {
4916 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4919 /* maybe we need to inject an event */
4920 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4921 if (event_inj & SVM_EVTINJ_VALID) {
4922 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4923 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4924 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4925 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4927 if (loglevel & CPU_LOG_TB_IN_ASM)
4928 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4929 /* FIXME: need to implement valid_err */
4930 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4931 case SVM_EVTINJ_TYPE_INTR:
4932 env->exception_index = vector;
4933 env->error_code = event_inj_err;
4934 env->exception_is_int = 0;
4935 env->exception_next_eip = -1;
4936 if (loglevel & CPU_LOG_TB_IN_ASM)
4937 fprintf(logfile, "INTR");
4938 /* XXX: is it always correct ? */
4939 do_interrupt(vector, 0, 0, 0, 1);
4940 break;
4941 case SVM_EVTINJ_TYPE_NMI:
4942 env->exception_index = EXCP02_NMI;
4943 env->error_code = event_inj_err;
4944 env->exception_is_int = 0;
4945 env->exception_next_eip = EIP;
4946 if (loglevel & CPU_LOG_TB_IN_ASM)
4947 fprintf(logfile, "NMI");
4948 cpu_loop_exit();
4949 break;
4950 case SVM_EVTINJ_TYPE_EXEPT:
4951 env->exception_index = vector;
4952 env->error_code = event_inj_err;
4953 env->exception_is_int = 0;
4954 env->exception_next_eip = -1;
4955 if (loglevel & CPU_LOG_TB_IN_ASM)
4956 fprintf(logfile, "EXEPT");
4957 cpu_loop_exit();
4958 break;
4959 case SVM_EVTINJ_TYPE_SOFT:
4960 env->exception_index = vector;
4961 env->error_code = event_inj_err;
4962 env->exception_is_int = 1;
4963 env->exception_next_eip = EIP;
4964 if (loglevel & CPU_LOG_TB_IN_ASM)
4965 fprintf(logfile, "SOFT");
4966 cpu_loop_exit();
4967 break;
4969 if (loglevel & CPU_LOG_TB_IN_ASM)
4970 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4974 void helper_vmmcall(void)
4976 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4977 raise_exception(EXCP06_ILLOP);
4980 void helper_vmload(int aflag)
4982 target_ulong addr;
4983 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4985 if (aflag == 2)
4986 addr = EAX;
4987 else
4988 addr = (uint32_t)EAX;
4990 if (loglevel & CPU_LOG_TB_IN_ASM)
4991 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4992 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4993 env->segs[R_FS].base);
4995 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4996 env, R_FS);
4997 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4998 env, R_GS);
4999 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5000 &env->tr);
5001 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5002 &env->ldt);
5004 #ifdef TARGET_X86_64
5005 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5006 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5007 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5008 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5009 #endif
5010 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5011 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5012 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5013 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5016 void helper_vmsave(int aflag)
5018 target_ulong addr;
5019 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5021 if (aflag == 2)
5022 addr = EAX;
5023 else
5024 addr = (uint32_t)EAX;
5026 if (loglevel & CPU_LOG_TB_IN_ASM)
5027 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5028 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5029 env->segs[R_FS].base);
5031 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5032 &env->segs[R_FS]);
5033 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5034 &env->segs[R_GS]);
5035 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5036 &env->tr);
5037 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5038 &env->ldt);
5040 #ifdef TARGET_X86_64
5041 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5042 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5043 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5044 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5045 #endif
5046 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5047 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5048 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5049 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5052 void helper_stgi(void)
5054 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5055 env->hflags2 |= HF2_GIF_MASK;
5058 void helper_clgi(void)
5060 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5061 env->hflags2 &= ~HF2_GIF_MASK;
5064 void helper_skinit(void)
5066 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5067 /* XXX: not implemented */
5068 raise_exception(EXCP06_ILLOP);
5071 void helper_invlpga(int aflag)
5073 target_ulong addr;
5074 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5076 if (aflag == 2)
5077 addr = EAX;
5078 else
5079 addr = (uint32_t)EAX;
5081 /* XXX: could use the ASID to see if it is needed to do the
5082 flush */
5083 tlb_flush_page(env, addr);
5086 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5088 if (likely(!(env->hflags & HF_SVMI_MASK)))
5089 return;
5090 switch(type) {
5091 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5092 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5093 helper_vmexit(type, param);
5095 break;
5096 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5097 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5098 helper_vmexit(type, param);
5100 break;
5101 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5102 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5103 helper_vmexit(type, param);
5105 break;
5106 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5107 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5108 helper_vmexit(type, param);
5110 break;
5111 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5112 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5113 helper_vmexit(type, param);
5115 break;
5116 case SVM_EXIT_MSR:
5117 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5118 /* FIXME: this should be read in at vmrun (faster this way?) */
5119 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5120 uint32_t t0, t1;
5121 switch((uint32_t)ECX) {
5122 case 0 ... 0x1fff:
5123 t0 = (ECX * 2) % 8;
5124 t1 = ECX / 8;
5125 break;
5126 case 0xc0000000 ... 0xc0001fff:
5127 t0 = (8192 + ECX - 0xc0000000) * 2;
5128 t1 = (t0 / 8);
5129 t0 %= 8;
5130 break;
5131 case 0xc0010000 ... 0xc0011fff:
5132 t0 = (16384 + ECX - 0xc0010000) * 2;
5133 t1 = (t0 / 8);
5134 t0 %= 8;
5135 break;
5136 default:
5137 helper_vmexit(type, param);
5138 t0 = 0;
5139 t1 = 0;
5140 break;
5142 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5143 helper_vmexit(type, param);
5145 break;
5146 default:
5147 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5148 helper_vmexit(type, param);
5150 break;
5154 void helper_svm_check_io(uint32_t port, uint32_t param,
5155 uint32_t next_eip_addend)
5157 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5158 /* FIXME: this should be read in at vmrun (faster this way?) */
5159 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5160 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5161 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5162 /* next EIP */
5163 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5164 env->eip + next_eip_addend);
5165 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5170 /* Note: currently only 32 bits of exit_code are used */
5171 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5173 uint32_t int_ctl;
5175 if (loglevel & CPU_LOG_TB_IN_ASM)
5176 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5177 exit_code, exit_info_1,
5178 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5179 EIP);
5181 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5182 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5183 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5184 } else {
5185 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5188 /* Save the VM state in the vmcb */
5189 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5190 &env->segs[R_ES]);
5191 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5192 &env->segs[R_CS]);
5193 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5194 &env->segs[R_SS]);
5195 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5196 &env->segs[R_DS]);
5198 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5199 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5201 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5202 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5204 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5205 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5206 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5207 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5208 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5210 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5211 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5212 int_ctl |= env->v_tpr & V_TPR_MASK;
5213 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5214 int_ctl |= V_IRQ_MASK;
5215 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5217 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5218 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5219 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5220 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5221 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5222 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5223 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5225 /* Reload the host state from vm_hsave */
5226 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5227 env->hflags &= ~HF_SVMI_MASK;
5228 env->intercept = 0;
5229 env->intercept_exceptions = 0;
5230 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5231 env->tsc_offset = 0;
5233 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5234 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5236 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5237 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5239 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5240 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5241 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5242 /* we need to set the efer after the crs so the hidden flags get
5243 set properly */
5244 cpu_load_efer(env,
5245 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5246 env->eflags = 0;
5247 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5248 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5249 CC_OP = CC_OP_EFLAGS;
5251 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5252 env, R_ES);
5253 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5254 env, R_CS);
5255 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5256 env, R_SS);
5257 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5258 env, R_DS);
5260 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5261 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5262 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5264 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5265 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5267 /* other setups */
5268 cpu_x86_set_cpl(env, 0);
5269 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5270 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5272 env->hflags2 &= ~HF2_GIF_MASK;
5273 /* FIXME: Resets the current ASID register to zero (host ASID). */
5275 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5277 /* Clears the TSC_OFFSET inside the processor. */
5279 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5280 from the page table indicated the host's CR3. If the PDPEs contain
5281 illegal state, the processor causes a shutdown. */
5283 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5284 env->cr[0] |= CR0_PE_MASK;
5285 env->eflags &= ~VM_MASK;
5287 /* Disables all breakpoints in the host DR7 register. */
5289 /* Checks the reloaded host state for consistency. */
5291 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5292 host's code segment or non-canonical (in the case of long mode), a
5293 #GP fault is delivered inside the host.) */
5295 /* remove any pending exception */
5296 env->exception_index = -1;
5297 env->error_code = 0;
5298 env->old_exception = -1;
5300 cpu_loop_exit();
5303 #endif
5305 /* MMX/SSE */
5306 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5307 void helper_enter_mmx(void)
5309 env->fpstt = 0;
5310 *(uint32_t *)(env->fptags) = 0;
5311 *(uint32_t *)(env->fptags + 4) = 0;
5314 void helper_emms(void)
5316 /* set to empty state */
5317 *(uint32_t *)(env->fptags) = 0x01010101;
5318 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5321 /* XXX: suppress */
5322 void helper_movq(uint64_t *d, uint64_t *s)
5324 *d = *s;
5327 #define SHIFT 0
5328 #include "ops_sse.h"
5330 #define SHIFT 1
5331 #include "ops_sse.h"
5333 #define SHIFT 0
5334 #include "helper_template.h"
5335 #undef SHIFT
5337 #define SHIFT 1
5338 #include "helper_template.h"
5339 #undef SHIFT
5341 #define SHIFT 2
5342 #include "helper_template.h"
5343 #undef SHIFT
5345 #ifdef TARGET_X86_64
5347 #define SHIFT 3
5348 #include "helper_template.h"
5349 #undef SHIFT
5351 #endif
5353 /* bit operations */
5354 target_ulong helper_bsf(target_ulong t0)
5356 int count;
5357 target_ulong res;
5359 res = t0;
5360 count = 0;
5361 while ((res & 1) == 0) {
5362 count++;
5363 res >>= 1;
5365 return count;
5368 target_ulong helper_bsr(target_ulong t0)
5370 int count;
5371 target_ulong res, mask;
5373 res = t0;
5374 count = TARGET_LONG_BITS - 1;
5375 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5376 while ((res & mask) == 0) {
5377 count--;
5378 res <<= 1;
5380 return count;
5384 static int compute_all_eflags(void)
5386 return CC_SRC;
5389 static int compute_c_eflags(void)
5391 return CC_SRC & CC_C;
5394 CCTable cc_table[CC_OP_NB] = {
5395 [CC_OP_DYNAMIC] = { /* should never happen */ },
5397 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5399 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5400 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5401 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5403 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5404 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5405 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5407 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5408 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5409 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5411 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5412 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5413 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5415 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5416 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5417 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5419 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5420 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5421 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5423 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5424 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5425 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5427 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5428 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5429 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5431 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5432 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5433 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5435 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5436 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5437 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5439 #ifdef TARGET_X86_64
5440 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5442 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5444 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5446 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5448 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5450 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5452 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5454 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5456 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5458 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5459 #endif