Display TCGCond name in tcg dumper (original patch by Tristan Gingold)
[qemu/mini2440.git] / target-i386 / op_helper.c
blob23f30809052c904dbbd1221e2ff60e3cff1827e0
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define CPU_NO_GLOBAL_REGS
21 #include "exec.h"
22 #include "host-utils.h"
24 //#define DEBUG_PCALL
26 #if 0
27 #define raise_exception_err(a, b)\
28 do {\
29 if (logfile)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
32 } while (0)
33 #endif
35 const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 /* modulo 17 table */
71 const uint8_t rclw_table[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
78 /* modulo 9 table */
79 const uint8_t rclb_table[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
86 const CPU86_LDouble f15rk[7] =
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
97 /* broken thread support */
99 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
101 void helper_lock(void)
103 spin_lock(&global_cpu_lock);
106 void helper_unlock(void)
108 spin_unlock(&global_cpu_lock);
111 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
113 load_eflags(t0, update_mask);
116 target_ulong helper_read_eflags(void)
118 uint32_t eflags;
119 eflags = cc_table[CC_OP].compute_all();
120 eflags |= (DF & DF_MASK);
121 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122 return eflags;
125 /* return non zero if error */
126 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127 int selector)
129 SegmentCache *dt;
130 int index;
131 target_ulong ptr;
133 if (selector & 0x4)
134 dt = &env->ldt;
135 else
136 dt = &env->gdt;
137 index = selector & ~7;
138 if ((index + 7) > dt->limit)
139 return -1;
140 ptr = dt->base + index;
141 *e1_ptr = ldl_kernel(ptr);
142 *e2_ptr = ldl_kernel(ptr + 4);
143 return 0;
146 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
148 unsigned int limit;
149 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150 if (e2 & DESC_G_MASK)
151 limit = (limit << 12) | 0xfff;
152 return limit;
155 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
157 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
160 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
162 sc->base = get_seg_base(e1, e2);
163 sc->limit = get_seg_limit(e1, e2);
164 sc->flags = e2;
167 /* init the segment cache in vm86 mode. */
168 static inline void load_seg_vm(int seg, int selector)
170 selector &= 0xffff;
171 cpu_x86_load_seg_cache(env, seg, selector,
172 (selector << 4), 0xffff, 0);
175 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176 uint32_t *esp_ptr, int dpl)
178 int type, index, shift;
180 #if 0
182 int i;
183 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184 for(i=0;i<env->tr.limit;i++) {
185 printf("%02x ", env->tr.base[i]);
186 if ((i & 7) == 7) printf("\n");
188 printf("\n");
190 #endif
192 if (!(env->tr.flags & DESC_P_MASK))
193 cpu_abort(env, "invalid tss");
194 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195 if ((type & 7) != 1)
196 cpu_abort(env, "invalid tss type");
197 shift = type >> 3;
198 index = (dpl * 4 + 2) << shift;
199 if (index + (4 << shift) - 1 > env->tr.limit)
200 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201 if (shift == 0) {
202 *esp_ptr = lduw_kernel(env->tr.base + index);
203 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204 } else {
205 *esp_ptr = ldl_kernel(env->tr.base + index);
206 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
210 /* XXX: merge with load_seg() */
211 static void tss_load_seg(int seg_reg, int selector)
213 uint32_t e1, e2;
214 int rpl, dpl, cpl;
216 if ((selector & 0xfffc) != 0) {
217 if (load_segment(&e1, &e2, selector) != 0)
218 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 if (!(e2 & DESC_S_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 rpl = selector & 3;
222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223 cpl = env->hflags & HF_CPL_MASK;
224 if (seg_reg == R_CS) {
225 if (!(e2 & DESC_CS_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* XXX: is it correct ? */
228 if (dpl != rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if ((e2 & DESC_C_MASK) && dpl > rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 } else if (seg_reg == R_SS) {
233 /* SS must be writable data */
234 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (dpl != cpl || dpl != rpl)
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 } else {
239 /* not readable code */
240 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244 if (dpl < cpl || dpl < rpl)
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248 if (!(e2 & DESC_P_MASK))
249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250 cpu_x86_load_seg_cache(env, seg_reg, selector,
251 get_seg_base(e1, e2),
252 get_seg_limit(e1, e2),
253 e2);
254 } else {
255 if (seg_reg == R_SS || seg_reg == R_CS)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260 #define SWITCH_TSS_JMP 0
261 #define SWITCH_TSS_IRET 1
262 #define SWITCH_TSS_CALL 2
264 /* XXX: restore CPU state in registers (PowerPC case) */
265 static void switch_tss(int tss_selector,
266 uint32_t e1, uint32_t e2, int source,
267 uint32_t next_eip)
269 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270 target_ulong tss_base;
271 uint32_t new_regs[8], new_segs[6];
272 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273 uint32_t old_eflags, eflags_mask;
274 SegmentCache *dt;
275 int index;
276 target_ulong ptr;
278 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279 #ifdef DEBUG_PCALL
280 if (loglevel & CPU_LOG_PCALL)
281 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282 #endif
284 /* if task gate, we read the TSS segment and we load it */
285 if (type == 5) {
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288 tss_selector = e1 >> 16;
289 if (tss_selector & 4)
290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291 if (load_segment(&e1, &e2, tss_selector) != 0)
292 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293 if (e2 & DESC_S_MASK)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296 if ((type & 7) != 1)
297 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300 if (!(e2 & DESC_P_MASK))
301 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
303 if (type & 8)
304 tss_limit_max = 103;
305 else
306 tss_limit_max = 43;
307 tss_limit = get_seg_limit(e1, e2);
308 tss_base = get_seg_base(e1, e2);
309 if ((tss_selector & 4) != 0 ||
310 tss_limit < tss_limit_max)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if (old_type & 8)
314 old_tss_limit_max = 103;
315 else
316 old_tss_limit_max = 43;
318 /* read all the registers from the new TSS */
319 if (type & 8) {
320 /* 32 bit */
321 new_cr3 = ldl_kernel(tss_base + 0x1c);
322 new_eip = ldl_kernel(tss_base + 0x20);
323 new_eflags = ldl_kernel(tss_base + 0x24);
324 for(i = 0; i < 8; i++)
325 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326 for(i = 0; i < 6; i++)
327 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328 new_ldt = lduw_kernel(tss_base + 0x60);
329 new_trap = ldl_kernel(tss_base + 0x64);
330 } else {
331 /* 16 bit */
332 new_cr3 = 0;
333 new_eip = lduw_kernel(tss_base + 0x0e);
334 new_eflags = lduw_kernel(tss_base + 0x10);
335 for(i = 0; i < 8; i++)
336 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337 for(i = 0; i < 4; i++)
338 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339 new_ldt = lduw_kernel(tss_base + 0x2a);
340 new_segs[R_FS] = 0;
341 new_segs[R_GS] = 0;
342 new_trap = 0;
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
350 v1 = ldub_kernel(env->tr.base);
351 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352 stb_kernel(env->tr.base, v1);
353 stb_kernel(env->tr.base + old_tss_limit_max, v2);
355 /* clear busy bit (it is restartable) */
356 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357 target_ulong ptr;
358 uint32_t e2;
359 ptr = env->gdt.base + (env->tr.selector & ~7);
360 e2 = ldl_kernel(ptr + 4);
361 e2 &= ~DESC_TSS_BUSY_MASK;
362 stl_kernel(ptr + 4, e2);
364 old_eflags = compute_eflags();
365 if (source == SWITCH_TSS_IRET)
366 old_eflags &= ~NT_MASK;
368 /* save the current state in the old TSS */
369 if (type & 8) {
370 /* 32 bit */
371 stl_kernel(env->tr.base + 0x20, next_eip);
372 stl_kernel(env->tr.base + 0x24, old_eflags);
373 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381 for(i = 0; i < 6; i++)
382 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383 } else {
384 /* 16 bit */
385 stw_kernel(env->tr.base + 0x0e, next_eip);
386 stw_kernel(env->tr.base + 0x10, old_eflags);
387 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395 for(i = 0; i < 4; i++)
396 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
399 /* now if an exception occurs, it will occurs in the next task
400 context */
402 if (source == SWITCH_TSS_CALL) {
403 stw_kernel(tss_base, env->tr.selector);
404 new_eflags |= NT_MASK;
407 /* set busy bit */
408 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409 target_ulong ptr;
410 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = ldl_kernel(ptr + 4);
413 e2 |= DESC_TSS_BUSY_MASK;
414 stl_kernel(ptr + 4, e2);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8))
436 eflags_mask &= 0xffff;
437 load_eflags(new_eflags, eflags_mask);
438 /* XXX: what to do in 16 bit case ? */
439 EAX = new_regs[0];
440 ECX = new_regs[1];
441 EDX = new_regs[2];
442 EBX = new_regs[3];
443 ESP = new_regs[4];
444 EBP = new_regs[5];
445 ESI = new_regs[6];
446 EDI = new_regs[7];
447 if (new_eflags & VM_MASK) {
448 for(i = 0; i < 6; i++)
449 load_seg_vm(i, new_segs[i]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env, 3);
452 } else {
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i = 0; i < 6; i++)
457 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
460 env->ldt.selector = new_ldt & ~4;
461 env->ldt.base = 0;
462 env->ldt.limit = 0;
463 env->ldt.flags = 0;
465 /* load the LDT */
466 if (new_ldt & 4)
467 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 ptr = dt->base + index;
475 e1 = ldl_kernel(ptr);
476 e2 = ldl_kernel(ptr + 4);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479 if (!(e2 & DESC_P_MASK))
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 tss_load_seg(R_CS, new_segs[R_CS]);
487 tss_load_seg(R_SS, new_segs[R_SS]);
488 tss_load_seg(R_ES, new_segs[R_ES]);
489 tss_load_seg(R_DS, new_segs[R_DS]);
490 tss_load_seg(R_FS, new_segs[R_FS]);
491 tss_load_seg(R_GS, new_segs[R_GS]);
494 /* check that EIP is in the CS segment limits */
495 if (new_eip > env->segs[R_CS].limit) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF, 0);
501 /* check if Port I/O is allowed in TSS */
502 static inline void check_io(int addr, int size)
504 int io_offset, val, mask;
506 /* TSS must be a valid 32 bit one */
507 if (!(env->tr.flags & DESC_P_MASK) ||
508 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509 env->tr.limit < 103)
510 goto fail;
511 io_offset = lduw_kernel(env->tr.base + 0x66);
512 io_offset += (addr >> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset + 1) > env->tr.limit)
515 goto fail;
516 val = lduw_kernel(env->tr.base + io_offset);
517 val >>= (addr & 7);
518 mask = (1 << size) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val & mask) != 0) {
521 fail:
522 raise_exception_err(EXCP0D_GPF, 0);
526 void helper_check_iob(uint32_t t0)
528 check_io(t0, 1);
531 void helper_check_iow(uint32_t t0)
533 check_io(t0, 2);
536 void helper_check_iol(uint32_t t0)
538 check_io(t0, 4);
541 void helper_outb(uint32_t port, uint32_t data)
543 cpu_outb(env, port, data & 0xff);
546 target_ulong helper_inb(uint32_t port)
548 return cpu_inb(env, port);
551 void helper_outw(uint32_t port, uint32_t data)
553 cpu_outw(env, port, data & 0xffff);
556 target_ulong helper_inw(uint32_t port)
558 return cpu_inw(env, port);
561 void helper_outl(uint32_t port, uint32_t data)
563 cpu_outl(env, port, data);
566 target_ulong helper_inl(uint32_t port)
568 return cpu_inl(env, port);
571 static inline unsigned int get_sp_mask(unsigned int e2)
573 if (e2 & DESC_B_MASK)
574 return 0xffffffff;
575 else
576 return 0xffff;
579 #ifdef TARGET_X86_64
580 #define SET_ESP(val, sp_mask)\
581 do {\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
586 else\
587 ESP = (val);\
588 } while (0)
589 #else
590 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591 #endif
593 /* in 64-bit machines, this can overflow. So this segment addition macro
594 * can be used to trim the value to 32-bit whenever needed */
595 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
597 /* XXX: add a is_user flag to have proper security support */
598 #define PUSHW(ssp, sp, sp_mask, val)\
600 sp -= 2;\
601 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
604 #define PUSHL(ssp, sp, sp_mask, val)\
606 sp -= 4;\
607 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
610 #define POPW(ssp, sp, sp_mask, val)\
612 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
613 sp += 2;\
616 #define POPL(ssp, sp, sp_mask, val)\
618 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
619 sp += 4;\
622 /* protected mode interrupt */
623 static void do_interrupt_protected(int intno, int is_int, int error_code,
624 unsigned int next_eip, int is_hw)
626 SegmentCache *dt;
627 target_ulong ptr, ssp;
628 int type, dpl, selector, ss_dpl, cpl;
629 int has_error_code, new_stack, shift;
630 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
631 uint32_t old_eip, sp_mask;
633 has_error_code = 0;
634 if (!is_int && !is_hw) {
635 switch(intno) {
636 case 8:
637 case 10:
638 case 11:
639 case 12:
640 case 13:
641 case 14:
642 case 17:
643 has_error_code = 1;
644 break;
647 if (is_int)
648 old_eip = next_eip;
649 else
650 old_eip = env->eip;
652 dt = &env->idt;
653 if (intno * 8 + 7 > dt->limit)
654 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655 ptr = dt->base + intno * 8;
656 e1 = ldl_kernel(ptr);
657 e2 = ldl_kernel(ptr + 4);
658 /* check gate type */
659 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
660 switch(type) {
661 case 5: /* task gate */
662 /* must do that check here to return the correct error code */
663 if (!(e2 & DESC_P_MASK))
664 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
665 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
666 if (has_error_code) {
667 int type;
668 uint32_t mask;
669 /* push the error code */
670 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
671 shift = type >> 3;
672 if (env->segs[R_SS].flags & DESC_B_MASK)
673 mask = 0xffffffff;
674 else
675 mask = 0xffff;
676 esp = (ESP - (2 << shift)) & mask;
677 ssp = env->segs[R_SS].base + esp;
678 if (shift)
679 stl_kernel(ssp, error_code);
680 else
681 stw_kernel(ssp, error_code);
682 SET_ESP(esp, mask);
684 return;
685 case 6: /* 286 interrupt gate */
686 case 7: /* 286 trap gate */
687 case 14: /* 386 interrupt gate */
688 case 15: /* 386 trap gate */
689 break;
690 default:
691 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692 break;
694 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
695 cpl = env->hflags & HF_CPL_MASK;
696 /* check privilege if software int */
697 if (is_int && dpl < cpl)
698 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699 /* check valid bit */
700 if (!(e2 & DESC_P_MASK))
701 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
702 selector = e1 >> 16;
703 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
704 if ((selector & 0xfffc) == 0)
705 raise_exception_err(EXCP0D_GPF, 0);
707 if (load_segment(&e1, &e2, selector) != 0)
708 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
709 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
710 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
711 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
712 if (dpl > cpl)
713 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
714 if (!(e2 & DESC_P_MASK))
715 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
716 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
717 /* to inner privilege */
718 get_ss_esp_from_tss(&ss, &esp, dpl);
719 if ((ss & 0xfffc) == 0)
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 if ((ss & 3) != dpl)
722 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
723 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
724 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
726 if (ss_dpl != dpl)
727 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 if (!(ss_e2 & DESC_S_MASK) ||
729 (ss_e2 & DESC_CS_MASK) ||
730 !(ss_e2 & DESC_W_MASK))
731 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732 if (!(ss_e2 & DESC_P_MASK))
733 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
734 new_stack = 1;
735 sp_mask = get_sp_mask(ss_e2);
736 ssp = get_seg_base(ss_e1, ss_e2);
737 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
738 /* to same privilege */
739 if (env->eflags & VM_MASK)
740 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741 new_stack = 0;
742 sp_mask = get_sp_mask(env->segs[R_SS].flags);
743 ssp = env->segs[R_SS].base;
744 esp = ESP;
745 dpl = cpl;
746 } else {
747 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
748 new_stack = 0; /* avoid warning */
749 sp_mask = 0; /* avoid warning */
750 ssp = 0; /* avoid warning */
751 esp = 0; /* avoid warning */
754 shift = type >> 3;
756 #if 0
757 /* XXX: check that enough room is available */
758 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
759 if (env->eflags & VM_MASK)
760 push_size += 8;
761 push_size <<= shift;
762 #endif
763 if (shift == 1) {
764 if (new_stack) {
765 if (env->eflags & VM_MASK) {
766 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
767 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
768 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
769 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
771 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
772 PUSHL(ssp, esp, sp_mask, ESP);
774 PUSHL(ssp, esp, sp_mask, compute_eflags());
775 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
776 PUSHL(ssp, esp, sp_mask, old_eip);
777 if (has_error_code) {
778 PUSHL(ssp, esp, sp_mask, error_code);
780 } else {
781 if (new_stack) {
782 if (env->eflags & VM_MASK) {
783 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
784 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
785 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
786 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
788 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
789 PUSHW(ssp, esp, sp_mask, ESP);
791 PUSHW(ssp, esp, sp_mask, compute_eflags());
792 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
793 PUSHW(ssp, esp, sp_mask, old_eip);
794 if (has_error_code) {
795 PUSHW(ssp, esp, sp_mask, error_code);
799 if (new_stack) {
800 if (env->eflags & VM_MASK) {
801 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
802 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
803 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
804 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
806 ss = (ss & ~3) | dpl;
807 cpu_x86_load_seg_cache(env, R_SS, ss,
808 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
810 SET_ESP(esp, sp_mask);
812 selector = (selector & ~3) | dpl;
813 cpu_x86_load_seg_cache(env, R_CS, selector,
814 get_seg_base(e1, e2),
815 get_seg_limit(e1, e2),
816 e2);
817 cpu_x86_set_cpl(env, dpl);
818 env->eip = offset;
820 /* interrupt gate clear IF mask */
821 if ((type & 1) == 0) {
822 env->eflags &= ~IF_MASK;
824 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
827 #ifdef TARGET_X86_64
829 #define PUSHQ(sp, val)\
831 sp -= 8;\
832 stq_kernel(sp, (val));\
835 #define POPQ(sp, val)\
837 val = ldq_kernel(sp);\
838 sp += 8;\
841 static inline target_ulong get_rsp_from_tss(int level)
843 int index;
845 #if 0
846 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
847 env->tr.base, env->tr.limit);
848 #endif
850 if (!(env->tr.flags & DESC_P_MASK))
851 cpu_abort(env, "invalid tss");
852 index = 8 * level + 4;
853 if ((index + 7) > env->tr.limit)
854 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
855 return ldq_kernel(env->tr.base + index);
858 /* 64 bit interrupt */
859 static void do_interrupt64(int intno, int is_int, int error_code,
860 target_ulong next_eip, int is_hw)
862 SegmentCache *dt;
863 target_ulong ptr;
864 int type, dpl, selector, cpl, ist;
865 int has_error_code, new_stack;
866 uint32_t e1, e2, e3, ss;
867 target_ulong old_eip, esp, offset;
869 has_error_code = 0;
870 if (!is_int && !is_hw) {
871 switch(intno) {
872 case 8:
873 case 10:
874 case 11:
875 case 12:
876 case 13:
877 case 14:
878 case 17:
879 has_error_code = 1;
880 break;
883 if (is_int)
884 old_eip = next_eip;
885 else
886 old_eip = env->eip;
888 dt = &env->idt;
889 if (intno * 16 + 15 > dt->limit)
890 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
891 ptr = dt->base + intno * 16;
892 e1 = ldl_kernel(ptr);
893 e2 = ldl_kernel(ptr + 4);
894 e3 = ldl_kernel(ptr + 8);
895 /* check gate type */
896 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
897 switch(type) {
898 case 14: /* 386 interrupt gate */
899 case 15: /* 386 trap gate */
900 break;
901 default:
902 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903 break;
905 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
906 cpl = env->hflags & HF_CPL_MASK;
907 /* check privilege if software int */
908 if (is_int && dpl < cpl)
909 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910 /* check valid bit */
911 if (!(e2 & DESC_P_MASK))
912 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
913 selector = e1 >> 16;
914 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
915 ist = e2 & 7;
916 if ((selector & 0xfffc) == 0)
917 raise_exception_err(EXCP0D_GPF, 0);
919 if (load_segment(&e1, &e2, selector) != 0)
920 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
922 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
923 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
924 if (dpl > cpl)
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 if (!(e2 & DESC_P_MASK))
927 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
928 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
931 /* to inner privilege */
932 if (ist != 0)
933 esp = get_rsp_from_tss(ist + 3);
934 else
935 esp = get_rsp_from_tss(dpl);
936 esp &= ~0xfLL; /* align stack */
937 ss = 0;
938 new_stack = 1;
939 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
940 /* to same privilege */
941 if (env->eflags & VM_MASK)
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 new_stack = 0;
944 if (ist != 0)
945 esp = get_rsp_from_tss(ist + 3);
946 else
947 esp = ESP;
948 esp &= ~0xfLL; /* align stack */
949 dpl = cpl;
950 } else {
951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952 new_stack = 0; /* avoid warning */
953 esp = 0; /* avoid warning */
956 PUSHQ(esp, env->segs[R_SS].selector);
957 PUSHQ(esp, ESP);
958 PUSHQ(esp, compute_eflags());
959 PUSHQ(esp, env->segs[R_CS].selector);
960 PUSHQ(esp, old_eip);
961 if (has_error_code) {
962 PUSHQ(esp, error_code);
965 if (new_stack) {
966 ss = 0 | dpl;
967 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
969 ESP = esp;
971 selector = (selector & ~3) | dpl;
972 cpu_x86_load_seg_cache(env, R_CS, selector,
973 get_seg_base(e1, e2),
974 get_seg_limit(e1, e2),
975 e2);
976 cpu_x86_set_cpl(env, dpl);
977 env->eip = offset;
979 /* interrupt gate clear IF mask */
980 if ((type & 1) == 0) {
981 env->eflags &= ~IF_MASK;
983 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
985 #endif
987 #if defined(CONFIG_USER_ONLY)
988 void helper_syscall(int next_eip_addend)
990 env->exception_index = EXCP_SYSCALL;
991 env->exception_next_eip = env->eip + next_eip_addend;
992 cpu_loop_exit();
994 #else
995 void helper_syscall(int next_eip_addend)
997 int selector;
999 if (!(env->efer & MSR_EFER_SCE)) {
1000 raise_exception_err(EXCP06_ILLOP, 0);
1002 selector = (env->star >> 32) & 0xffff;
1003 #ifdef TARGET_X86_64
1004 if (env->hflags & HF_LMA_MASK) {
1005 int code64;
1007 ECX = env->eip + next_eip_addend;
1008 env->regs[11] = compute_eflags();
1010 code64 = env->hflags & HF_CS64_MASK;
1012 cpu_x86_set_cpl(env, 0);
1013 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1014 0, 0xffffffff,
1015 DESC_G_MASK | DESC_P_MASK |
1016 DESC_S_MASK |
1017 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1018 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1019 0, 0xffffffff,
1020 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021 DESC_S_MASK |
1022 DESC_W_MASK | DESC_A_MASK);
1023 env->eflags &= ~env->fmask;
1024 load_eflags(env->eflags, 0);
1025 if (code64)
1026 env->eip = env->lstar;
1027 else
1028 env->eip = env->cstar;
1029 } else
1030 #endif
1032 ECX = (uint32_t)(env->eip + next_eip_addend);
1034 cpu_x86_set_cpl(env, 0);
1035 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1036 0, 0xffffffff,
1037 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038 DESC_S_MASK |
1039 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1040 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1041 0, 0xffffffff,
1042 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1043 DESC_S_MASK |
1044 DESC_W_MASK | DESC_A_MASK);
1045 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1046 env->eip = (uint32_t)env->star;
1049 #endif
1051 void helper_sysret(int dflag)
1053 int cpl, selector;
1055 if (!(env->efer & MSR_EFER_SCE)) {
1056 raise_exception_err(EXCP06_ILLOP, 0);
1058 cpl = env->hflags & HF_CPL_MASK;
1059 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1060 raise_exception_err(EXCP0D_GPF, 0);
1062 selector = (env->star >> 48) & 0xffff;
1063 #ifdef TARGET_X86_64
1064 if (env->hflags & HF_LMA_MASK) {
1065 if (dflag == 2) {
1066 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1067 0, 0xffffffff,
1068 DESC_G_MASK | DESC_P_MASK |
1069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1071 DESC_L_MASK);
1072 env->eip = ECX;
1073 } else {
1074 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1075 0, 0xffffffff,
1076 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1079 env->eip = (uint32_t)ECX;
1081 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1082 0, 0xffffffff,
1083 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1084 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085 DESC_W_MASK | DESC_A_MASK);
1086 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1087 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1088 cpu_x86_set_cpl(env, 3);
1089 } else
1090 #endif
1092 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093 0, 0xffffffff,
1094 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097 env->eip = (uint32_t)ECX;
1098 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1099 0, 0xffffffff,
1100 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102 DESC_W_MASK | DESC_A_MASK);
1103 env->eflags |= IF_MASK;
1104 cpu_x86_set_cpl(env, 3);
1106 #ifdef USE_KQEMU
1107 if (kqemu_is_ok(env)) {
1108 if (env->hflags & HF_LMA_MASK)
1109 CC_OP = CC_OP_EFLAGS;
1110 env->exception_index = -1;
1111 cpu_loop_exit();
1113 #endif
1116 /* real mode interrupt */
1117 static void do_interrupt_real(int intno, int is_int, int error_code,
1118 unsigned int next_eip)
1120 SegmentCache *dt;
1121 target_ulong ptr, ssp;
1122 int selector;
1123 uint32_t offset, esp;
1124 uint32_t old_cs, old_eip;
1126 /* real mode (simpler !) */
1127 dt = &env->idt;
1128 if (intno * 4 + 3 > dt->limit)
1129 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130 ptr = dt->base + intno * 4;
1131 offset = lduw_kernel(ptr);
1132 selector = lduw_kernel(ptr + 2);
1133 esp = ESP;
1134 ssp = env->segs[R_SS].base;
1135 if (is_int)
1136 old_eip = next_eip;
1137 else
1138 old_eip = env->eip;
1139 old_cs = env->segs[R_CS].selector;
1140 /* XXX: use SS segment size ? */
1141 PUSHW(ssp, esp, 0xffff, compute_eflags());
1142 PUSHW(ssp, esp, 0xffff, old_cs);
1143 PUSHW(ssp, esp, 0xffff, old_eip);
1145 /* update processor state */
1146 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147 env->eip = offset;
1148 env->segs[R_CS].selector = selector;
1149 env->segs[R_CS].base = (selector << 4);
1150 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1153 /* fake user mode interrupt */
1154 void do_interrupt_user(int intno, int is_int, int error_code,
1155 target_ulong next_eip)
1157 SegmentCache *dt;
1158 target_ulong ptr;
1159 int dpl, cpl, shift;
1160 uint32_t e2;
1162 dt = &env->idt;
1163 if (env->hflags & HF_LMA_MASK) {
1164 shift = 4;
1165 } else {
1166 shift = 3;
1168 ptr = dt->base + (intno << shift);
1169 e2 = ldl_kernel(ptr + 4);
1171 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172 cpl = env->hflags & HF_CPL_MASK;
1173 /* check privilege if software int */
1174 if (is_int && dpl < cpl)
1175 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1177 /* Since we emulate only user space, we cannot do more than
1178 exiting the emulation with the suitable exception and error
1179 code */
1180 if (is_int)
1181 EIP = next_eip;
1185 * Begin execution of an interruption. is_int is TRUE if coming from
1186 * the int instruction. next_eip is the EIP value AFTER the interrupt
1187 * instruction. It is only relevant if is_int is TRUE.
1189 void do_interrupt(int intno, int is_int, int error_code,
1190 target_ulong next_eip, int is_hw)
1192 if (loglevel & CPU_LOG_INT) {
1193 if ((env->cr[0] & CR0_PE_MASK)) {
1194 static int count;
1195 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1196 count, intno, error_code, is_int,
1197 env->hflags & HF_CPL_MASK,
1198 env->segs[R_CS].selector, EIP,
1199 (int)env->segs[R_CS].base + EIP,
1200 env->segs[R_SS].selector, ESP);
1201 if (intno == 0x0e) {
1202 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1203 } else {
1204 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1206 fprintf(logfile, "\n");
1207 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1208 #if 0
1210 int i;
1211 uint8_t *ptr;
1212 fprintf(logfile, " code=");
1213 ptr = env->segs[R_CS].base + env->eip;
1214 for(i = 0; i < 16; i++) {
1215 fprintf(logfile, " %02x", ldub(ptr + i));
1217 fprintf(logfile, "\n");
1219 #endif
1220 count++;
1223 if (env->cr[0] & CR0_PE_MASK) {
1224 #ifdef TARGET_X86_64
1225 if (env->hflags & HF_LMA_MASK) {
1226 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1227 } else
1228 #endif
1230 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1232 } else {
1233 do_interrupt_real(intno, is_int, error_code, next_eip);
1238 * Check nested exceptions and change to double or triple fault if
1239 * needed. It should only be called, if this is not an interrupt.
1240 * Returns the new exception number.
1242 static int check_exception(int intno, int *error_code)
1244 int first_contributory = env->old_exception == 0 ||
1245 (env->old_exception >= 10 &&
1246 env->old_exception <= 13);
1247 int second_contributory = intno == 0 ||
1248 (intno >= 10 && intno <= 13);
1250 if (loglevel & CPU_LOG_INT)
1251 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1252 env->old_exception, intno);
1254 if (env->old_exception == EXCP08_DBLE)
1255 cpu_abort(env, "triple fault");
1257 if ((first_contributory && second_contributory)
1258 || (env->old_exception == EXCP0E_PAGE &&
1259 (second_contributory || (intno == EXCP0E_PAGE)))) {
1260 intno = EXCP08_DBLE;
1261 *error_code = 0;
1264 if (second_contributory || (intno == EXCP0E_PAGE) ||
1265 (intno == EXCP08_DBLE))
1266 env->old_exception = intno;
1268 return intno;
1272 * Signal an interruption. It is executed in the main CPU loop.
1273 * is_int is TRUE if coming from the int instruction. next_eip is the
1274 * EIP value AFTER the interrupt instruction. It is only relevant if
1275 * is_int is TRUE.
1277 void raise_interrupt(int intno, int is_int, int error_code,
1278 int next_eip_addend)
1280 if (!is_int) {
1281 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1282 intno = check_exception(intno, &error_code);
1283 } else {
1284 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1287 env->exception_index = intno;
1288 env->error_code = error_code;
1289 env->exception_is_int = is_int;
1290 env->exception_next_eip = env->eip + next_eip_addend;
1291 cpu_loop_exit();
1294 /* shortcuts to generate exceptions */
1296 void (raise_exception_err)(int exception_index, int error_code)
1298 raise_interrupt(exception_index, 0, error_code, 0);
1301 void raise_exception(int exception_index)
1303 raise_interrupt(exception_index, 0, 0, 0);
1306 /* SMM support */
1308 #if defined(CONFIG_USER_ONLY)
1310 void do_smm_enter(void)
1314 void helper_rsm(void)
1318 #else
1320 #ifdef TARGET_X86_64
1321 #define SMM_REVISION_ID 0x00020064
1322 #else
1323 #define SMM_REVISION_ID 0x00020000
1324 #endif
1326 void do_smm_enter(void)
1328 target_ulong sm_state;
1329 SegmentCache *dt;
1330 int i, offset;
1332 if (loglevel & CPU_LOG_INT) {
1333 fprintf(logfile, "SMM: enter\n");
1334 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1337 env->hflags |= HF_SMM_MASK;
1338 cpu_smm_update(env);
1340 sm_state = env->smbase + 0x8000;
1342 #ifdef TARGET_X86_64
1343 for(i = 0; i < 6; i++) {
1344 dt = &env->segs[i];
1345 offset = 0x7e00 + i * 16;
1346 stw_phys(sm_state + offset, dt->selector);
1347 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1348 stl_phys(sm_state + offset + 4, dt->limit);
1349 stq_phys(sm_state + offset + 8, dt->base);
1352 stq_phys(sm_state + 0x7e68, env->gdt.base);
1353 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1355 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1356 stq_phys(sm_state + 0x7e78, env->ldt.base);
1357 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1358 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1360 stq_phys(sm_state + 0x7e88, env->idt.base);
1361 stl_phys(sm_state + 0x7e84, env->idt.limit);
1363 stw_phys(sm_state + 0x7e90, env->tr.selector);
1364 stq_phys(sm_state + 0x7e98, env->tr.base);
1365 stl_phys(sm_state + 0x7e94, env->tr.limit);
1366 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1368 stq_phys(sm_state + 0x7ed0, env->efer);
1370 stq_phys(sm_state + 0x7ff8, EAX);
1371 stq_phys(sm_state + 0x7ff0, ECX);
1372 stq_phys(sm_state + 0x7fe8, EDX);
1373 stq_phys(sm_state + 0x7fe0, EBX);
1374 stq_phys(sm_state + 0x7fd8, ESP);
1375 stq_phys(sm_state + 0x7fd0, EBP);
1376 stq_phys(sm_state + 0x7fc8, ESI);
1377 stq_phys(sm_state + 0x7fc0, EDI);
1378 for(i = 8; i < 16; i++)
1379 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1380 stq_phys(sm_state + 0x7f78, env->eip);
1381 stl_phys(sm_state + 0x7f70, compute_eflags());
1382 stl_phys(sm_state + 0x7f68, env->dr[6]);
1383 stl_phys(sm_state + 0x7f60, env->dr[7]);
1385 stl_phys(sm_state + 0x7f48, env->cr[4]);
1386 stl_phys(sm_state + 0x7f50, env->cr[3]);
1387 stl_phys(sm_state + 0x7f58, env->cr[0]);
1389 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1390 stl_phys(sm_state + 0x7f00, env->smbase);
1391 #else
1392 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1393 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1394 stl_phys(sm_state + 0x7ff4, compute_eflags());
1395 stl_phys(sm_state + 0x7ff0, env->eip);
1396 stl_phys(sm_state + 0x7fec, EDI);
1397 stl_phys(sm_state + 0x7fe8, ESI);
1398 stl_phys(sm_state + 0x7fe4, EBP);
1399 stl_phys(sm_state + 0x7fe0, ESP);
1400 stl_phys(sm_state + 0x7fdc, EBX);
1401 stl_phys(sm_state + 0x7fd8, EDX);
1402 stl_phys(sm_state + 0x7fd4, ECX);
1403 stl_phys(sm_state + 0x7fd0, EAX);
1404 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1405 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1407 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1408 stl_phys(sm_state + 0x7f64, env->tr.base);
1409 stl_phys(sm_state + 0x7f60, env->tr.limit);
1410 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1412 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1413 stl_phys(sm_state + 0x7f80, env->ldt.base);
1414 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1415 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1417 stl_phys(sm_state + 0x7f74, env->gdt.base);
1418 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1420 stl_phys(sm_state + 0x7f58, env->idt.base);
1421 stl_phys(sm_state + 0x7f54, env->idt.limit);
1423 for(i = 0; i < 6; i++) {
1424 dt = &env->segs[i];
1425 if (i < 3)
1426 offset = 0x7f84 + i * 12;
1427 else
1428 offset = 0x7f2c + (i - 3) * 12;
1429 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1430 stl_phys(sm_state + offset + 8, dt->base);
1431 stl_phys(sm_state + offset + 4, dt->limit);
1432 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1434 stl_phys(sm_state + 0x7f14, env->cr[4]);
1436 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1437 stl_phys(sm_state + 0x7ef8, env->smbase);
1438 #endif
1439 /* init SMM cpu state */
1441 #ifdef TARGET_X86_64
1442 cpu_load_efer(env, 0);
1443 #endif
1444 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1445 env->eip = 0x00008000;
1446 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1447 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1449 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1450 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1451 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1452 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1454 cpu_x86_update_cr0(env,
1455 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1456 cpu_x86_update_cr4(env, 0);
1457 env->dr[7] = 0x00000400;
1458 CC_OP = CC_OP_EFLAGS;
1461 void helper_rsm(void)
1463 target_ulong sm_state;
1464 int i, offset;
1465 uint32_t val;
1467 sm_state = env->smbase + 0x8000;
1468 #ifdef TARGET_X86_64
1469 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1471 for(i = 0; i < 6; i++) {
1472 offset = 0x7e00 + i * 16;
1473 cpu_x86_load_seg_cache(env, i,
1474 lduw_phys(sm_state + offset),
1475 ldq_phys(sm_state + offset + 8),
1476 ldl_phys(sm_state + offset + 4),
1477 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1480 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1481 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1483 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1484 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1485 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1486 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1488 env->idt.base = ldq_phys(sm_state + 0x7e88);
1489 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1491 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1492 env->tr.base = ldq_phys(sm_state + 0x7e98);
1493 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1494 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1496 EAX = ldq_phys(sm_state + 0x7ff8);
1497 ECX = ldq_phys(sm_state + 0x7ff0);
1498 EDX = ldq_phys(sm_state + 0x7fe8);
1499 EBX = ldq_phys(sm_state + 0x7fe0);
1500 ESP = ldq_phys(sm_state + 0x7fd8);
1501 EBP = ldq_phys(sm_state + 0x7fd0);
1502 ESI = ldq_phys(sm_state + 0x7fc8);
1503 EDI = ldq_phys(sm_state + 0x7fc0);
1504 for(i = 8; i < 16; i++)
1505 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1506 env->eip = ldq_phys(sm_state + 0x7f78);
1507 load_eflags(ldl_phys(sm_state + 0x7f70),
1508 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1509 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1510 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1512 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1513 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1514 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1516 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1517 if (val & 0x20000) {
1518 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1520 #else
1521 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1522 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1523 load_eflags(ldl_phys(sm_state + 0x7ff4),
1524 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1525 env->eip = ldl_phys(sm_state + 0x7ff0);
1526 EDI = ldl_phys(sm_state + 0x7fec);
1527 ESI = ldl_phys(sm_state + 0x7fe8);
1528 EBP = ldl_phys(sm_state + 0x7fe4);
1529 ESP = ldl_phys(sm_state + 0x7fe0);
1530 EBX = ldl_phys(sm_state + 0x7fdc);
1531 EDX = ldl_phys(sm_state + 0x7fd8);
1532 ECX = ldl_phys(sm_state + 0x7fd4);
1533 EAX = ldl_phys(sm_state + 0x7fd0);
1534 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1535 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1537 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1538 env->tr.base = ldl_phys(sm_state + 0x7f64);
1539 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1540 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1542 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1543 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1544 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1545 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1547 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1548 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1550 env->idt.base = ldl_phys(sm_state + 0x7f58);
1551 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1553 for(i = 0; i < 6; i++) {
1554 if (i < 3)
1555 offset = 0x7f84 + i * 12;
1556 else
1557 offset = 0x7f2c + (i - 3) * 12;
1558 cpu_x86_load_seg_cache(env, i,
1559 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1560 ldl_phys(sm_state + offset + 8),
1561 ldl_phys(sm_state + offset + 4),
1562 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1564 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1566 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1567 if (val & 0x20000) {
1568 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1570 #endif
1571 CC_OP = CC_OP_EFLAGS;
1572 env->hflags &= ~HF_SMM_MASK;
1573 cpu_smm_update(env);
1575 if (loglevel & CPU_LOG_INT) {
1576 fprintf(logfile, "SMM: after RSM\n");
1577 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1581 #endif /* !CONFIG_USER_ONLY */
1584 /* division, flags are undefined */
1586 void helper_divb_AL(target_ulong t0)
1588 unsigned int num, den, q, r;
1590 num = (EAX & 0xffff);
1591 den = (t0 & 0xff);
1592 if (den == 0) {
1593 raise_exception(EXCP00_DIVZ);
1595 q = (num / den);
1596 if (q > 0xff)
1597 raise_exception(EXCP00_DIVZ);
1598 q &= 0xff;
1599 r = (num % den) & 0xff;
1600 EAX = (EAX & ~0xffff) | (r << 8) | q;
1603 void helper_idivb_AL(target_ulong t0)
1605 int num, den, q, r;
1607 num = (int16_t)EAX;
1608 den = (int8_t)t0;
1609 if (den == 0) {
1610 raise_exception(EXCP00_DIVZ);
1612 q = (num / den);
1613 if (q != (int8_t)q)
1614 raise_exception(EXCP00_DIVZ);
1615 q &= 0xff;
1616 r = (num % den) & 0xff;
1617 EAX = (EAX & ~0xffff) | (r << 8) | q;
1620 void helper_divw_AX(target_ulong t0)
1622 unsigned int num, den, q, r;
1624 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1625 den = (t0 & 0xffff);
1626 if (den == 0) {
1627 raise_exception(EXCP00_DIVZ);
1629 q = (num / den);
1630 if (q > 0xffff)
1631 raise_exception(EXCP00_DIVZ);
1632 q &= 0xffff;
1633 r = (num % den) & 0xffff;
1634 EAX = (EAX & ~0xffff) | q;
1635 EDX = (EDX & ~0xffff) | r;
1638 void helper_idivw_AX(target_ulong t0)
1640 int num, den, q, r;
1642 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1643 den = (int16_t)t0;
1644 if (den == 0) {
1645 raise_exception(EXCP00_DIVZ);
1647 q = (num / den);
1648 if (q != (int16_t)q)
1649 raise_exception(EXCP00_DIVZ);
1650 q &= 0xffff;
1651 r = (num % den) & 0xffff;
1652 EAX = (EAX & ~0xffff) | q;
1653 EDX = (EDX & ~0xffff) | r;
1656 void helper_divl_EAX(target_ulong t0)
1658 unsigned int den, r;
1659 uint64_t num, q;
1661 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1662 den = t0;
1663 if (den == 0) {
1664 raise_exception(EXCP00_DIVZ);
1666 q = (num / den);
1667 r = (num % den);
1668 if (q > 0xffffffff)
1669 raise_exception(EXCP00_DIVZ);
1670 EAX = (uint32_t)q;
1671 EDX = (uint32_t)r;
1674 void helper_idivl_EAX(target_ulong t0)
1676 int den, r;
1677 int64_t num, q;
1679 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1680 den = t0;
1681 if (den == 0) {
1682 raise_exception(EXCP00_DIVZ);
1684 q = (num / den);
1685 r = (num % den);
1686 if (q != (int32_t)q)
1687 raise_exception(EXCP00_DIVZ);
1688 EAX = (uint32_t)q;
1689 EDX = (uint32_t)r;
1692 /* bcd */
1694 /* XXX: exception */
1695 void helper_aam(int base)
1697 int al, ah;
1698 al = EAX & 0xff;
1699 ah = al / base;
1700 al = al % base;
1701 EAX = (EAX & ~0xffff) | al | (ah << 8);
1702 CC_DST = al;
1705 void helper_aad(int base)
1707 int al, ah;
1708 al = EAX & 0xff;
1709 ah = (EAX >> 8) & 0xff;
1710 al = ((ah * base) + al) & 0xff;
1711 EAX = (EAX & ~0xffff) | al;
1712 CC_DST = al;
1715 void helper_aaa(void)
1717 int icarry;
1718 int al, ah, af;
1719 int eflags;
1721 eflags = cc_table[CC_OP].compute_all();
1722 af = eflags & CC_A;
1723 al = EAX & 0xff;
1724 ah = (EAX >> 8) & 0xff;
1726 icarry = (al > 0xf9);
1727 if (((al & 0x0f) > 9 ) || af) {
1728 al = (al + 6) & 0x0f;
1729 ah = (ah + 1 + icarry) & 0xff;
1730 eflags |= CC_C | CC_A;
1731 } else {
1732 eflags &= ~(CC_C | CC_A);
1733 al &= 0x0f;
1735 EAX = (EAX & ~0xffff) | al | (ah << 8);
1736 CC_SRC = eflags;
1737 FORCE_RET();
1740 void helper_aas(void)
1742 int icarry;
1743 int al, ah, af;
1744 int eflags;
1746 eflags = cc_table[CC_OP].compute_all();
1747 af = eflags & CC_A;
1748 al = EAX & 0xff;
1749 ah = (EAX >> 8) & 0xff;
1751 icarry = (al < 6);
1752 if (((al & 0x0f) > 9 ) || af) {
1753 al = (al - 6) & 0x0f;
1754 ah = (ah - 1 - icarry) & 0xff;
1755 eflags |= CC_C | CC_A;
1756 } else {
1757 eflags &= ~(CC_C | CC_A);
1758 al &= 0x0f;
1760 EAX = (EAX & ~0xffff) | al | (ah << 8);
1761 CC_SRC = eflags;
1762 FORCE_RET();
1765 void helper_daa(void)
1767 int al, af, cf;
1768 int eflags;
1770 eflags = cc_table[CC_OP].compute_all();
1771 cf = eflags & CC_C;
1772 af = eflags & CC_A;
1773 al = EAX & 0xff;
1775 eflags = 0;
1776 if (((al & 0x0f) > 9 ) || af) {
1777 al = (al + 6) & 0xff;
1778 eflags |= CC_A;
1780 if ((al > 0x9f) || cf) {
1781 al = (al + 0x60) & 0xff;
1782 eflags |= CC_C;
1784 EAX = (EAX & ~0xff) | al;
1785 /* well, speed is not an issue here, so we compute the flags by hand */
1786 eflags |= (al == 0) << 6; /* zf */
1787 eflags |= parity_table[al]; /* pf */
1788 eflags |= (al & 0x80); /* sf */
1789 CC_SRC = eflags;
1790 FORCE_RET();
1793 void helper_das(void)
1795 int al, al1, af, cf;
1796 int eflags;
1798 eflags = cc_table[CC_OP].compute_all();
1799 cf = eflags & CC_C;
1800 af = eflags & CC_A;
1801 al = EAX & 0xff;
1803 eflags = 0;
1804 al1 = al;
1805 if (((al & 0x0f) > 9 ) || af) {
1806 eflags |= CC_A;
1807 if (al < 6 || cf)
1808 eflags |= CC_C;
1809 al = (al - 6) & 0xff;
1811 if ((al1 > 0x99) || cf) {
1812 al = (al - 0x60) & 0xff;
1813 eflags |= CC_C;
1815 EAX = (EAX & ~0xff) | al;
1816 /* well, speed is not an issue here, so we compute the flags by hand */
1817 eflags |= (al == 0) << 6; /* zf */
1818 eflags |= parity_table[al]; /* pf */
1819 eflags |= (al & 0x80); /* sf */
1820 CC_SRC = eflags;
1821 FORCE_RET();
1824 void helper_into(int next_eip_addend)
1826 int eflags;
1827 eflags = cc_table[CC_OP].compute_all();
1828 if (eflags & CC_O) {
1829 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1833 void helper_cmpxchg8b(target_ulong a0)
1835 uint64_t d;
1836 int eflags;
1838 eflags = cc_table[CC_OP].compute_all();
1839 d = ldq(a0);
1840 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1841 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1842 eflags |= CC_Z;
1843 } else {
1844 /* always do the store */
1845 stq(a0, d);
1846 EDX = (uint32_t)(d >> 32);
1847 EAX = (uint32_t)d;
1848 eflags &= ~CC_Z;
1850 CC_SRC = eflags;
1853 #ifdef TARGET_X86_64
1854 void helper_cmpxchg16b(target_ulong a0)
1856 uint64_t d0, d1;
1857 int eflags;
1859 if ((a0 & 0xf) != 0)
1860 raise_exception(EXCP0D_GPF);
1861 eflags = cc_table[CC_OP].compute_all();
1862 d0 = ldq(a0);
1863 d1 = ldq(a0 + 8);
1864 if (d0 == EAX && d1 == EDX) {
1865 stq(a0, EBX);
1866 stq(a0 + 8, ECX);
1867 eflags |= CC_Z;
1868 } else {
1869 /* always do the store */
1870 stq(a0, d0);
1871 stq(a0 + 8, d1);
1872 EDX = d1;
1873 EAX = d0;
1874 eflags &= ~CC_Z;
1876 CC_SRC = eflags;
1878 #endif
1880 void helper_single_step(void)
1882 env->dr[6] |= 0x4000;
1883 raise_exception(EXCP01_SSTP);
1886 void helper_cpuid(void)
1888 uint32_t index;
1890 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1892 index = (uint32_t)EAX;
1893 /* test if maximum index reached */
1894 if (index & 0x80000000) {
1895 if (index > env->cpuid_xlevel)
1896 index = env->cpuid_level;
1897 } else {
1898 if (index > env->cpuid_level)
1899 index = env->cpuid_level;
1902 switch(index) {
1903 case 0:
1904 EAX = env->cpuid_level;
1905 EBX = env->cpuid_vendor1;
1906 EDX = env->cpuid_vendor2;
1907 ECX = env->cpuid_vendor3;
1908 break;
1909 case 1:
1910 EAX = env->cpuid_version;
1911 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1912 ECX = env->cpuid_ext_features;
1913 EDX = env->cpuid_features;
1914 break;
1915 case 2:
1916 /* cache info: needed for Pentium Pro compatibility */
1917 EAX = 1;
1918 EBX = 0;
1919 ECX = 0;
1920 EDX = 0x2c307d;
1921 break;
1922 case 0x80000000:
1923 EAX = env->cpuid_xlevel;
1924 EBX = env->cpuid_vendor1;
1925 EDX = env->cpuid_vendor2;
1926 ECX = env->cpuid_vendor3;
1927 break;
1928 case 0x80000001:
1929 EAX = env->cpuid_features;
1930 EBX = 0;
1931 ECX = env->cpuid_ext3_features;
1932 EDX = env->cpuid_ext2_features;
1933 break;
1934 case 0x80000002:
1935 case 0x80000003:
1936 case 0x80000004:
1937 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1938 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1939 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1940 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1941 break;
1942 case 0x80000005:
1943 /* cache info (L1 cache) */
1944 EAX = 0x01ff01ff;
1945 EBX = 0x01ff01ff;
1946 ECX = 0x40020140;
1947 EDX = 0x40020140;
1948 break;
1949 case 0x80000006:
1950 /* cache info (L2 cache) */
1951 EAX = 0;
1952 EBX = 0x42004200;
1953 ECX = 0x02008140;
1954 EDX = 0;
1955 break;
1956 case 0x80000008:
1957 /* virtual & phys address size in low 2 bytes. */
1958 /* XXX: This value must match the one used in the MMU code. */
1959 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1960 /* 64 bit processor */
1961 #if defined(USE_KQEMU)
1962 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1963 #else
1964 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1965 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1966 #endif
1967 } else {
1968 #if defined(USE_KQEMU)
1969 EAX = 0x00000020; /* 32 bits physical */
1970 #else
1971 EAX = 0x00000024; /* 36 bits physical */
1972 #endif
1974 EBX = 0;
1975 ECX = 0;
1976 EDX = 0;
1977 break;
1978 case 0x8000000A:
1979 EAX = 0x00000001;
1980 EBX = 0;
1981 ECX = 0;
1982 EDX = 0;
1983 break;
1984 default:
1985 /* reserved values: zero */
1986 EAX = 0;
1987 EBX = 0;
1988 ECX = 0;
1989 EDX = 0;
1990 break;
1994 void helper_enter_level(int level, int data32, target_ulong t1)
1996 target_ulong ssp;
1997 uint32_t esp_mask, esp, ebp;
1999 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2000 ssp = env->segs[R_SS].base;
2001 ebp = EBP;
2002 esp = ESP;
2003 if (data32) {
2004 /* 32 bit */
2005 esp -= 4;
2006 while (--level) {
2007 esp -= 4;
2008 ebp -= 4;
2009 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2011 esp -= 4;
2012 stl(ssp + (esp & esp_mask), t1);
2013 } else {
2014 /* 16 bit */
2015 esp -= 2;
2016 while (--level) {
2017 esp -= 2;
2018 ebp -= 2;
2019 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2021 esp -= 2;
2022 stw(ssp + (esp & esp_mask), t1);
2026 #ifdef TARGET_X86_64
2027 void helper_enter64_level(int level, int data64, target_ulong t1)
2029 target_ulong esp, ebp;
2030 ebp = EBP;
2031 esp = ESP;
2033 if (data64) {
2034 /* 64 bit */
2035 esp -= 8;
2036 while (--level) {
2037 esp -= 8;
2038 ebp -= 8;
2039 stq(esp, ldq(ebp));
2041 esp -= 8;
2042 stq(esp, t1);
2043 } else {
2044 /* 16 bit */
2045 esp -= 2;
2046 while (--level) {
2047 esp -= 2;
2048 ebp -= 2;
2049 stw(esp, lduw(ebp));
2051 esp -= 2;
2052 stw(esp, t1);
2055 #endif
2057 void helper_lldt(int selector)
2059 SegmentCache *dt;
2060 uint32_t e1, e2;
2061 int index, entry_limit;
2062 target_ulong ptr;
2064 selector &= 0xffff;
2065 if ((selector & 0xfffc) == 0) {
2066 /* XXX: NULL selector case: invalid LDT */
2067 env->ldt.base = 0;
2068 env->ldt.limit = 0;
2069 } else {
2070 if (selector & 0x4)
2071 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2072 dt = &env->gdt;
2073 index = selector & ~7;
2074 #ifdef TARGET_X86_64
2075 if (env->hflags & HF_LMA_MASK)
2076 entry_limit = 15;
2077 else
2078 #endif
2079 entry_limit = 7;
2080 if ((index + entry_limit) > dt->limit)
2081 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2082 ptr = dt->base + index;
2083 e1 = ldl_kernel(ptr);
2084 e2 = ldl_kernel(ptr + 4);
2085 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2086 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2087 if (!(e2 & DESC_P_MASK))
2088 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2089 #ifdef TARGET_X86_64
2090 if (env->hflags & HF_LMA_MASK) {
2091 uint32_t e3;
2092 e3 = ldl_kernel(ptr + 8);
2093 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2094 env->ldt.base |= (target_ulong)e3 << 32;
2095 } else
2096 #endif
2098 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2101 env->ldt.selector = selector;
2104 void helper_ltr(int selector)
2106 SegmentCache *dt;
2107 uint32_t e1, e2;
2108 int index, type, entry_limit;
2109 target_ulong ptr;
2111 selector &= 0xffff;
2112 if ((selector & 0xfffc) == 0) {
2113 /* NULL selector case: invalid TR */
2114 env->tr.base = 0;
2115 env->tr.limit = 0;
2116 env->tr.flags = 0;
2117 } else {
2118 if (selector & 0x4)
2119 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2120 dt = &env->gdt;
2121 index = selector & ~7;
2122 #ifdef TARGET_X86_64
2123 if (env->hflags & HF_LMA_MASK)
2124 entry_limit = 15;
2125 else
2126 #endif
2127 entry_limit = 7;
2128 if ((index + entry_limit) > dt->limit)
2129 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130 ptr = dt->base + index;
2131 e1 = ldl_kernel(ptr);
2132 e2 = ldl_kernel(ptr + 4);
2133 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2134 if ((e2 & DESC_S_MASK) ||
2135 (type != 1 && type != 9))
2136 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2137 if (!(e2 & DESC_P_MASK))
2138 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2139 #ifdef TARGET_X86_64
2140 if (env->hflags & HF_LMA_MASK) {
2141 uint32_t e3, e4;
2142 e3 = ldl_kernel(ptr + 8);
2143 e4 = ldl_kernel(ptr + 12);
2144 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2145 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2146 load_seg_cache_raw_dt(&env->tr, e1, e2);
2147 env->tr.base |= (target_ulong)e3 << 32;
2148 } else
2149 #endif
2151 load_seg_cache_raw_dt(&env->tr, e1, e2);
2153 e2 |= DESC_TSS_BUSY_MASK;
2154 stl_kernel(ptr + 4, e2);
2156 env->tr.selector = selector;
2159 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2160 void helper_load_seg(int seg_reg, int selector)
2162 uint32_t e1, e2;
2163 int cpl, dpl, rpl;
2164 SegmentCache *dt;
2165 int index;
2166 target_ulong ptr;
2168 selector &= 0xffff;
2169 cpl = env->hflags & HF_CPL_MASK;
2170 if ((selector & 0xfffc) == 0) {
2171 /* null selector case */
2172 if (seg_reg == R_SS
2173 #ifdef TARGET_X86_64
2174 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2175 #endif
2177 raise_exception_err(EXCP0D_GPF, 0);
2178 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2179 } else {
2181 if (selector & 0x4)
2182 dt = &env->ldt;
2183 else
2184 dt = &env->gdt;
2185 index = selector & ~7;
2186 if ((index + 7) > dt->limit)
2187 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2188 ptr = dt->base + index;
2189 e1 = ldl_kernel(ptr);
2190 e2 = ldl_kernel(ptr + 4);
2192 if (!(e2 & DESC_S_MASK))
2193 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2194 rpl = selector & 3;
2195 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2196 if (seg_reg == R_SS) {
2197 /* must be writable segment */
2198 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2199 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2200 if (rpl != cpl || dpl != cpl)
2201 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2202 } else {
2203 /* must be readable segment */
2204 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2205 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2207 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2208 /* if not conforming code, test rights */
2209 if (dpl < cpl || dpl < rpl)
2210 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2214 if (!(e2 & DESC_P_MASK)) {
2215 if (seg_reg == R_SS)
2216 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2217 else
2218 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2221 /* set the access bit if not already set */
2222 if (!(e2 & DESC_A_MASK)) {
2223 e2 |= DESC_A_MASK;
2224 stl_kernel(ptr + 4, e2);
2227 cpu_x86_load_seg_cache(env, seg_reg, selector,
2228 get_seg_base(e1, e2),
2229 get_seg_limit(e1, e2),
2230 e2);
2231 #if 0
2232 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2233 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2234 #endif
2238 /* protected mode jump */
2239 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2240 int next_eip_addend)
2242 int gate_cs, type;
2243 uint32_t e1, e2, cpl, dpl, rpl, limit;
2244 target_ulong next_eip;
2246 if ((new_cs & 0xfffc) == 0)
2247 raise_exception_err(EXCP0D_GPF, 0);
2248 if (load_segment(&e1, &e2, new_cs) != 0)
2249 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2250 cpl = env->hflags & HF_CPL_MASK;
2251 if (e2 & DESC_S_MASK) {
2252 if (!(e2 & DESC_CS_MASK))
2253 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2254 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2255 if (e2 & DESC_C_MASK) {
2256 /* conforming code segment */
2257 if (dpl > cpl)
2258 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2259 } else {
2260 /* non conforming code segment */
2261 rpl = new_cs & 3;
2262 if (rpl > cpl)
2263 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2264 if (dpl != cpl)
2265 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2267 if (!(e2 & DESC_P_MASK))
2268 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2269 limit = get_seg_limit(e1, e2);
2270 if (new_eip > limit &&
2271 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2272 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2273 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2274 get_seg_base(e1, e2), limit, e2);
2275 EIP = new_eip;
2276 } else {
2277 /* jump to call or task gate */
2278 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2279 rpl = new_cs & 3;
2280 cpl = env->hflags & HF_CPL_MASK;
2281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2282 switch(type) {
2283 case 1: /* 286 TSS */
2284 case 9: /* 386 TSS */
2285 case 5: /* task gate */
2286 if (dpl < cpl || dpl < rpl)
2287 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2288 next_eip = env->eip + next_eip_addend;
2289 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2290 CC_OP = CC_OP_EFLAGS;
2291 break;
2292 case 4: /* 286 call gate */
2293 case 12: /* 386 call gate */
2294 if ((dpl < cpl) || (dpl < rpl))
2295 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2296 if (!(e2 & DESC_P_MASK))
2297 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2298 gate_cs = e1 >> 16;
2299 new_eip = (e1 & 0xffff);
2300 if (type == 12)
2301 new_eip |= (e2 & 0xffff0000);
2302 if (load_segment(&e1, &e2, gate_cs) != 0)
2303 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2304 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2305 /* must be code segment */
2306 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2307 (DESC_S_MASK | DESC_CS_MASK)))
2308 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2309 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2310 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2311 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2312 if (!(e2 & DESC_P_MASK))
2313 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2314 limit = get_seg_limit(e1, e2);
2315 if (new_eip > limit)
2316 raise_exception_err(EXCP0D_GPF, 0);
2317 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2318 get_seg_base(e1, e2), limit, e2);
2319 EIP = new_eip;
2320 break;
2321 default:
2322 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2323 break;
2328 /* real mode call */
2329 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2330 int shift, int next_eip)
2332 int new_eip;
2333 uint32_t esp, esp_mask;
2334 target_ulong ssp;
2336 new_eip = new_eip1;
2337 esp = ESP;
2338 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2339 ssp = env->segs[R_SS].base;
2340 if (shift) {
2341 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2342 PUSHL(ssp, esp, esp_mask, next_eip);
2343 } else {
2344 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2345 PUSHW(ssp, esp, esp_mask, next_eip);
2348 SET_ESP(esp, esp_mask);
2349 env->eip = new_eip;
2350 env->segs[R_CS].selector = new_cs;
2351 env->segs[R_CS].base = (new_cs << 4);
2354 /* protected mode call */
2355 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2356 int shift, int next_eip_addend)
2358 int new_stack, i;
2359 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2360 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2361 uint32_t val, limit, old_sp_mask;
2362 target_ulong ssp, old_ssp, next_eip;
2364 next_eip = env->eip + next_eip_addend;
2365 #ifdef DEBUG_PCALL
2366 if (loglevel & CPU_LOG_PCALL) {
2367 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2368 new_cs, (uint32_t)new_eip, shift);
2369 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2371 #endif
2372 if ((new_cs & 0xfffc) == 0)
2373 raise_exception_err(EXCP0D_GPF, 0);
2374 if (load_segment(&e1, &e2, new_cs) != 0)
2375 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2376 cpl = env->hflags & HF_CPL_MASK;
2377 #ifdef DEBUG_PCALL
2378 if (loglevel & CPU_LOG_PCALL) {
2379 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2381 #endif
2382 if (e2 & DESC_S_MASK) {
2383 if (!(e2 & DESC_CS_MASK))
2384 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2385 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2386 if (e2 & DESC_C_MASK) {
2387 /* conforming code segment */
2388 if (dpl > cpl)
2389 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2390 } else {
2391 /* non conforming code segment */
2392 rpl = new_cs & 3;
2393 if (rpl > cpl)
2394 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2395 if (dpl != cpl)
2396 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398 if (!(e2 & DESC_P_MASK))
2399 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2401 #ifdef TARGET_X86_64
2402 /* XXX: check 16/32 bit cases in long mode */
2403 if (shift == 2) {
2404 target_ulong rsp;
2405 /* 64 bit case */
2406 rsp = ESP;
2407 PUSHQ(rsp, env->segs[R_CS].selector);
2408 PUSHQ(rsp, next_eip);
2409 /* from this point, not restartable */
2410 ESP = rsp;
2411 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2412 get_seg_base(e1, e2),
2413 get_seg_limit(e1, e2), e2);
2414 EIP = new_eip;
2415 } else
2416 #endif
2418 sp = ESP;
2419 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2420 ssp = env->segs[R_SS].base;
2421 if (shift) {
2422 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2423 PUSHL(ssp, sp, sp_mask, next_eip);
2424 } else {
2425 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2426 PUSHW(ssp, sp, sp_mask, next_eip);
2429 limit = get_seg_limit(e1, e2);
2430 if (new_eip > limit)
2431 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2432 /* from this point, not restartable */
2433 SET_ESP(sp, sp_mask);
2434 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2435 get_seg_base(e1, e2), limit, e2);
2436 EIP = new_eip;
2438 } else {
2439 /* check gate type */
2440 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2441 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2442 rpl = new_cs & 3;
2443 switch(type) {
2444 case 1: /* available 286 TSS */
2445 case 9: /* available 386 TSS */
2446 case 5: /* task gate */
2447 if (dpl < cpl || dpl < rpl)
2448 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2449 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2450 CC_OP = CC_OP_EFLAGS;
2451 return;
2452 case 4: /* 286 call gate */
2453 case 12: /* 386 call gate */
2454 break;
2455 default:
2456 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2457 break;
2459 shift = type >> 3;
2461 if (dpl < cpl || dpl < rpl)
2462 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2463 /* check valid bit */
2464 if (!(e2 & DESC_P_MASK))
2465 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2466 selector = e1 >> 16;
2467 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2468 param_count = e2 & 0x1f;
2469 if ((selector & 0xfffc) == 0)
2470 raise_exception_err(EXCP0D_GPF, 0);
2472 if (load_segment(&e1, &e2, selector) != 0)
2473 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2474 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2475 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2476 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2477 if (dpl > cpl)
2478 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2479 if (!(e2 & DESC_P_MASK))
2480 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2482 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2483 /* to inner privilege */
2484 get_ss_esp_from_tss(&ss, &sp, dpl);
2485 #ifdef DEBUG_PCALL
2486 if (loglevel & CPU_LOG_PCALL)
2487 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2488 ss, sp, param_count, ESP);
2489 #endif
2490 if ((ss & 0xfffc) == 0)
2491 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492 if ((ss & 3) != dpl)
2493 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2494 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2495 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2496 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2497 if (ss_dpl != dpl)
2498 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2499 if (!(ss_e2 & DESC_S_MASK) ||
2500 (ss_e2 & DESC_CS_MASK) ||
2501 !(ss_e2 & DESC_W_MASK))
2502 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2503 if (!(ss_e2 & DESC_P_MASK))
2504 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2506 // push_size = ((param_count * 2) + 8) << shift;
2508 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2509 old_ssp = env->segs[R_SS].base;
2511 sp_mask = get_sp_mask(ss_e2);
2512 ssp = get_seg_base(ss_e1, ss_e2);
2513 if (shift) {
2514 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2515 PUSHL(ssp, sp, sp_mask, ESP);
2516 for(i = param_count - 1; i >= 0; i--) {
2517 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2518 PUSHL(ssp, sp, sp_mask, val);
2520 } else {
2521 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2522 PUSHW(ssp, sp, sp_mask, ESP);
2523 for(i = param_count - 1; i >= 0; i--) {
2524 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2525 PUSHW(ssp, sp, sp_mask, val);
2528 new_stack = 1;
2529 } else {
2530 /* to same privilege */
2531 sp = ESP;
2532 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2533 ssp = env->segs[R_SS].base;
2534 // push_size = (4 << shift);
2535 new_stack = 0;
2538 if (shift) {
2539 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2540 PUSHL(ssp, sp, sp_mask, next_eip);
2541 } else {
2542 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2543 PUSHW(ssp, sp, sp_mask, next_eip);
2546 /* from this point, not restartable */
2548 if (new_stack) {
2549 ss = (ss & ~3) | dpl;
2550 cpu_x86_load_seg_cache(env, R_SS, ss,
2551 ssp,
2552 get_seg_limit(ss_e1, ss_e2),
2553 ss_e2);
2556 selector = (selector & ~3) | dpl;
2557 cpu_x86_load_seg_cache(env, R_CS, selector,
2558 get_seg_base(e1, e2),
2559 get_seg_limit(e1, e2),
2560 e2);
2561 cpu_x86_set_cpl(env, dpl);
2562 SET_ESP(sp, sp_mask);
2563 EIP = offset;
2565 #ifdef USE_KQEMU
2566 if (kqemu_is_ok(env)) {
2567 env->exception_index = -1;
2568 cpu_loop_exit();
2570 #endif
2573 /* real and vm86 mode iret */
2574 void helper_iret_real(int shift)
2576 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2577 target_ulong ssp;
2578 int eflags_mask;
2580 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2581 sp = ESP;
2582 ssp = env->segs[R_SS].base;
2583 if (shift == 1) {
2584 /* 32 bits */
2585 POPL(ssp, sp, sp_mask, new_eip);
2586 POPL(ssp, sp, sp_mask, new_cs);
2587 new_cs &= 0xffff;
2588 POPL(ssp, sp, sp_mask, new_eflags);
2589 } else {
2590 /* 16 bits */
2591 POPW(ssp, sp, sp_mask, new_eip);
2592 POPW(ssp, sp, sp_mask, new_cs);
2593 POPW(ssp, sp, sp_mask, new_eflags);
2595 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2596 load_seg_vm(R_CS, new_cs);
2597 env->eip = new_eip;
2598 if (env->eflags & VM_MASK)
2599 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2600 else
2601 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2602 if (shift == 0)
2603 eflags_mask &= 0xffff;
2604 load_eflags(new_eflags, eflags_mask);
2605 env->hflags2 &= ~HF2_NMI_MASK;
2608 static inline void validate_seg(int seg_reg, int cpl)
2610 int dpl;
2611 uint32_t e2;
2613 /* XXX: on x86_64, we do not want to nullify FS and GS because
2614 they may still contain a valid base. I would be interested to
2615 know how a real x86_64 CPU behaves */
2616 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2617 (env->segs[seg_reg].selector & 0xfffc) == 0)
2618 return;
2620 e2 = env->segs[seg_reg].flags;
2621 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2622 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2623 /* data or non conforming code segment */
2624 if (dpl < cpl) {
2625 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2630 /* protected mode iret */
2631 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2633 uint32_t new_cs, new_eflags, new_ss;
2634 uint32_t new_es, new_ds, new_fs, new_gs;
2635 uint32_t e1, e2, ss_e1, ss_e2;
2636 int cpl, dpl, rpl, eflags_mask, iopl;
2637 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2639 #ifdef TARGET_X86_64
2640 if (shift == 2)
2641 sp_mask = -1;
2642 else
2643 #endif
2644 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2645 sp = ESP;
2646 ssp = env->segs[R_SS].base;
2647 new_eflags = 0; /* avoid warning */
2648 #ifdef TARGET_X86_64
2649 if (shift == 2) {
2650 POPQ(sp, new_eip);
2651 POPQ(sp, new_cs);
2652 new_cs &= 0xffff;
2653 if (is_iret) {
2654 POPQ(sp, new_eflags);
2656 } else
2657 #endif
2658 if (shift == 1) {
2659 /* 32 bits */
2660 POPL(ssp, sp, sp_mask, new_eip);
2661 POPL(ssp, sp, sp_mask, new_cs);
2662 new_cs &= 0xffff;
2663 if (is_iret) {
2664 POPL(ssp, sp, sp_mask, new_eflags);
2665 if (new_eflags & VM_MASK)
2666 goto return_to_vm86;
2668 } else {
2669 /* 16 bits */
2670 POPW(ssp, sp, sp_mask, new_eip);
2671 POPW(ssp, sp, sp_mask, new_cs);
2672 if (is_iret)
2673 POPW(ssp, sp, sp_mask, new_eflags);
2675 #ifdef DEBUG_PCALL
2676 if (loglevel & CPU_LOG_PCALL) {
2677 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2678 new_cs, new_eip, shift, addend);
2679 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2681 #endif
2682 if ((new_cs & 0xfffc) == 0)
2683 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2684 if (load_segment(&e1, &e2, new_cs) != 0)
2685 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2686 if (!(e2 & DESC_S_MASK) ||
2687 !(e2 & DESC_CS_MASK))
2688 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2689 cpl = env->hflags & HF_CPL_MASK;
2690 rpl = new_cs & 3;
2691 if (rpl < cpl)
2692 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2693 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2694 if (e2 & DESC_C_MASK) {
2695 if (dpl > rpl)
2696 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2697 } else {
2698 if (dpl != rpl)
2699 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2701 if (!(e2 & DESC_P_MASK))
2702 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2704 sp += addend;
2705 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2706 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2707 /* return to same privilege level */
2708 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2709 get_seg_base(e1, e2),
2710 get_seg_limit(e1, e2),
2711 e2);
2712 } else {
2713 /* return to different privilege level */
2714 #ifdef TARGET_X86_64
2715 if (shift == 2) {
2716 POPQ(sp, new_esp);
2717 POPQ(sp, new_ss);
2718 new_ss &= 0xffff;
2719 } else
2720 #endif
2721 if (shift == 1) {
2722 /* 32 bits */
2723 POPL(ssp, sp, sp_mask, new_esp);
2724 POPL(ssp, sp, sp_mask, new_ss);
2725 new_ss &= 0xffff;
2726 } else {
2727 /* 16 bits */
2728 POPW(ssp, sp, sp_mask, new_esp);
2729 POPW(ssp, sp, sp_mask, new_ss);
2731 #ifdef DEBUG_PCALL
2732 if (loglevel & CPU_LOG_PCALL) {
2733 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2734 new_ss, new_esp);
2736 #endif
2737 if ((new_ss & 0xfffc) == 0) {
2738 #ifdef TARGET_X86_64
2739 /* NULL ss is allowed in long mode if cpl != 3*/
2740 /* XXX: test CS64 ? */
2741 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2742 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2743 0, 0xffffffff,
2744 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2745 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2746 DESC_W_MASK | DESC_A_MASK);
2747 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2748 } else
2749 #endif
2751 raise_exception_err(EXCP0D_GPF, 0);
2753 } else {
2754 if ((new_ss & 3) != rpl)
2755 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2756 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2757 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2758 if (!(ss_e2 & DESC_S_MASK) ||
2759 (ss_e2 & DESC_CS_MASK) ||
2760 !(ss_e2 & DESC_W_MASK))
2761 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2762 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2763 if (dpl != rpl)
2764 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2765 if (!(ss_e2 & DESC_P_MASK))
2766 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2767 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2768 get_seg_base(ss_e1, ss_e2),
2769 get_seg_limit(ss_e1, ss_e2),
2770 ss_e2);
2773 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2774 get_seg_base(e1, e2),
2775 get_seg_limit(e1, e2),
2776 e2);
2777 cpu_x86_set_cpl(env, rpl);
2778 sp = new_esp;
2779 #ifdef TARGET_X86_64
2780 if (env->hflags & HF_CS64_MASK)
2781 sp_mask = -1;
2782 else
2783 #endif
2784 sp_mask = get_sp_mask(ss_e2);
2786 /* validate data segments */
2787 validate_seg(R_ES, rpl);
2788 validate_seg(R_DS, rpl);
2789 validate_seg(R_FS, rpl);
2790 validate_seg(R_GS, rpl);
2792 sp += addend;
2794 SET_ESP(sp, sp_mask);
2795 env->eip = new_eip;
2796 if (is_iret) {
2797 /* NOTE: 'cpl' is the _old_ CPL */
2798 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2799 if (cpl == 0)
2800 eflags_mask |= IOPL_MASK;
2801 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2802 if (cpl <= iopl)
2803 eflags_mask |= IF_MASK;
2804 if (shift == 0)
2805 eflags_mask &= 0xffff;
2806 load_eflags(new_eflags, eflags_mask);
2808 return;
2810 return_to_vm86:
2811 POPL(ssp, sp, sp_mask, new_esp);
2812 POPL(ssp, sp, sp_mask, new_ss);
2813 POPL(ssp, sp, sp_mask, new_es);
2814 POPL(ssp, sp, sp_mask, new_ds);
2815 POPL(ssp, sp, sp_mask, new_fs);
2816 POPL(ssp, sp, sp_mask, new_gs);
2818 /* modify processor state */
2819 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2820 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2821 load_seg_vm(R_CS, new_cs & 0xffff);
2822 cpu_x86_set_cpl(env, 3);
2823 load_seg_vm(R_SS, new_ss & 0xffff);
2824 load_seg_vm(R_ES, new_es & 0xffff);
2825 load_seg_vm(R_DS, new_ds & 0xffff);
2826 load_seg_vm(R_FS, new_fs & 0xffff);
2827 load_seg_vm(R_GS, new_gs & 0xffff);
2829 env->eip = new_eip & 0xffff;
2830 ESP = new_esp;
2833 void helper_iret_protected(int shift, int next_eip)
2835 int tss_selector, type;
2836 uint32_t e1, e2;
2838 /* specific case for TSS */
2839 if (env->eflags & NT_MASK) {
2840 #ifdef TARGET_X86_64
2841 if (env->hflags & HF_LMA_MASK)
2842 raise_exception_err(EXCP0D_GPF, 0);
2843 #endif
2844 tss_selector = lduw_kernel(env->tr.base + 0);
2845 if (tss_selector & 4)
2846 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2847 if (load_segment(&e1, &e2, tss_selector) != 0)
2848 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2849 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2850 /* NOTE: we check both segment and busy TSS */
2851 if (type != 3)
2852 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2853 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2854 } else {
2855 helper_ret_protected(shift, 1, 0);
2857 env->hflags2 &= ~HF2_NMI_MASK;
2858 #ifdef USE_KQEMU
2859 if (kqemu_is_ok(env)) {
2860 CC_OP = CC_OP_EFLAGS;
2861 env->exception_index = -1;
2862 cpu_loop_exit();
2864 #endif
2867 void helper_lret_protected(int shift, int addend)
2869 helper_ret_protected(shift, 0, addend);
2870 #ifdef USE_KQEMU
2871 if (kqemu_is_ok(env)) {
2872 env->exception_index = -1;
2873 cpu_loop_exit();
2875 #endif
2878 void helper_sysenter(void)
2880 if (env->sysenter_cs == 0) {
2881 raise_exception_err(EXCP0D_GPF, 0);
2883 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2884 cpu_x86_set_cpl(env, 0);
2885 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2886 0, 0xffffffff,
2887 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2888 DESC_S_MASK |
2889 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2890 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2891 0, 0xffffffff,
2892 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2893 DESC_S_MASK |
2894 DESC_W_MASK | DESC_A_MASK);
2895 ESP = env->sysenter_esp;
2896 EIP = env->sysenter_eip;
2899 void helper_sysexit(void)
2901 int cpl;
2903 cpl = env->hflags & HF_CPL_MASK;
2904 if (env->sysenter_cs == 0 || cpl != 0) {
2905 raise_exception_err(EXCP0D_GPF, 0);
2907 cpu_x86_set_cpl(env, 3);
2908 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2909 0, 0xffffffff,
2910 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2911 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2912 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2913 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2914 0, 0xffffffff,
2915 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2916 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2917 DESC_W_MASK | DESC_A_MASK);
2918 ESP = ECX;
2919 EIP = EDX;
2920 #ifdef USE_KQEMU
2921 if (kqemu_is_ok(env)) {
2922 env->exception_index = -1;
2923 cpu_loop_exit();
2925 #endif
2928 #if defined(CONFIG_USER_ONLY)
2929 target_ulong helper_read_crN(int reg)
2931 return 0;
2934 void helper_write_crN(int reg, target_ulong t0)
2937 #else
2938 target_ulong helper_read_crN(int reg)
2940 target_ulong val;
2942 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2943 switch(reg) {
2944 default:
2945 val = env->cr[reg];
2946 break;
2947 case 8:
2948 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2949 val = cpu_get_apic_tpr(env);
2950 } else {
2951 val = env->v_tpr;
2953 break;
2955 return val;
2958 void helper_write_crN(int reg, target_ulong t0)
2960 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2961 switch(reg) {
2962 case 0:
2963 cpu_x86_update_cr0(env, t0);
2964 break;
2965 case 3:
2966 cpu_x86_update_cr3(env, t0);
2967 break;
2968 case 4:
2969 cpu_x86_update_cr4(env, t0);
2970 break;
2971 case 8:
2972 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2973 cpu_set_apic_tpr(env, t0);
2975 env->v_tpr = t0 & 0x0f;
2976 break;
2977 default:
2978 env->cr[reg] = t0;
2979 break;
2982 #endif
2984 void helper_lmsw(target_ulong t0)
2986 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2987 if already set to one. */
2988 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2989 helper_write_crN(0, t0);
2992 void helper_clts(void)
2994 env->cr[0] &= ~CR0_TS_MASK;
2995 env->hflags &= ~HF_TS_MASK;
2998 /* XXX: do more */
2999 void helper_movl_drN_T0(int reg, target_ulong t0)
3001 env->dr[reg] = t0;
3004 void helper_invlpg(target_ulong addr)
3006 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3007 tlb_flush_page(env, addr);
3010 void helper_rdtsc(void)
3012 uint64_t val;
3014 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3015 raise_exception(EXCP0D_GPF);
3017 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3019 val = cpu_get_tsc(env) + env->tsc_offset;
3020 EAX = (uint32_t)(val);
3021 EDX = (uint32_t)(val >> 32);
3024 void helper_rdpmc(void)
3026 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3027 raise_exception(EXCP0D_GPF);
3029 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3031 /* currently unimplemented */
3032 raise_exception_err(EXCP06_ILLOP, 0);
3035 #if defined(CONFIG_USER_ONLY)
3036 void helper_wrmsr(void)
3040 void helper_rdmsr(void)
3043 #else
3044 void helper_wrmsr(void)
3046 uint64_t val;
3048 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3050 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3052 switch((uint32_t)ECX) {
3053 case MSR_IA32_SYSENTER_CS:
3054 env->sysenter_cs = val & 0xffff;
3055 break;
3056 case MSR_IA32_SYSENTER_ESP:
3057 env->sysenter_esp = val;
3058 break;
3059 case MSR_IA32_SYSENTER_EIP:
3060 env->sysenter_eip = val;
3061 break;
3062 case MSR_IA32_APICBASE:
3063 cpu_set_apic_base(env, val);
3064 break;
3065 case MSR_EFER:
3067 uint64_t update_mask;
3068 update_mask = 0;
3069 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3070 update_mask |= MSR_EFER_SCE;
3071 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3072 update_mask |= MSR_EFER_LME;
3073 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3074 update_mask |= MSR_EFER_FFXSR;
3075 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3076 update_mask |= MSR_EFER_NXE;
3077 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3078 update_mask |= MSR_EFER_SVME;
3079 cpu_load_efer(env, (env->efer & ~update_mask) |
3080 (val & update_mask));
3082 break;
3083 case MSR_STAR:
3084 env->star = val;
3085 break;
3086 case MSR_PAT:
3087 env->pat = val;
3088 break;
3089 case MSR_VM_HSAVE_PA:
3090 env->vm_hsave = val;
3091 break;
3092 #ifdef TARGET_X86_64
3093 case MSR_LSTAR:
3094 env->lstar = val;
3095 break;
3096 case MSR_CSTAR:
3097 env->cstar = val;
3098 break;
3099 case MSR_FMASK:
3100 env->fmask = val;
3101 break;
3102 case MSR_FSBASE:
3103 env->segs[R_FS].base = val;
3104 break;
3105 case MSR_GSBASE:
3106 env->segs[R_GS].base = val;
3107 break;
3108 case MSR_KERNELGSBASE:
3109 env->kernelgsbase = val;
3110 break;
3111 #endif
3112 default:
3113 /* XXX: exception ? */
3114 break;
3118 void helper_rdmsr(void)
3120 uint64_t val;
3122 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3124 switch((uint32_t)ECX) {
3125 case MSR_IA32_SYSENTER_CS:
3126 val = env->sysenter_cs;
3127 break;
3128 case MSR_IA32_SYSENTER_ESP:
3129 val = env->sysenter_esp;
3130 break;
3131 case MSR_IA32_SYSENTER_EIP:
3132 val = env->sysenter_eip;
3133 break;
3134 case MSR_IA32_APICBASE:
3135 val = cpu_get_apic_base(env);
3136 break;
3137 case MSR_EFER:
3138 val = env->efer;
3139 break;
3140 case MSR_STAR:
3141 val = env->star;
3142 break;
3143 case MSR_PAT:
3144 val = env->pat;
3145 break;
3146 case MSR_VM_HSAVE_PA:
3147 val = env->vm_hsave;
3148 break;
3149 #ifdef TARGET_X86_64
3150 case MSR_LSTAR:
3151 val = env->lstar;
3152 break;
3153 case MSR_CSTAR:
3154 val = env->cstar;
3155 break;
3156 case MSR_FMASK:
3157 val = env->fmask;
3158 break;
3159 case MSR_FSBASE:
3160 val = env->segs[R_FS].base;
3161 break;
3162 case MSR_GSBASE:
3163 val = env->segs[R_GS].base;
3164 break;
3165 case MSR_KERNELGSBASE:
3166 val = env->kernelgsbase;
3167 break;
3168 #endif
3169 #ifdef USE_KQEMU
3170 case MSR_QPI_COMMBASE:
3171 if (env->kqemu_enabled) {
3172 val = kqemu_comm_base;
3173 } else {
3174 val = 0;
3176 break;
3177 #endif
3178 default:
3179 /* XXX: exception ? */
3180 val = 0;
3181 break;
3183 EAX = (uint32_t)(val);
3184 EDX = (uint32_t)(val >> 32);
3186 #endif
3188 target_ulong helper_lsl(target_ulong selector1)
3190 unsigned int limit;
3191 uint32_t e1, e2, eflags, selector;
3192 int rpl, dpl, cpl, type;
3194 selector = selector1 & 0xffff;
3195 eflags = cc_table[CC_OP].compute_all();
3196 if (load_segment(&e1, &e2, selector) != 0)
3197 goto fail;
3198 rpl = selector & 3;
3199 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3200 cpl = env->hflags & HF_CPL_MASK;
3201 if (e2 & DESC_S_MASK) {
3202 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3203 /* conforming */
3204 } else {
3205 if (dpl < cpl || dpl < rpl)
3206 goto fail;
3208 } else {
3209 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3210 switch(type) {
3211 case 1:
3212 case 2:
3213 case 3:
3214 case 9:
3215 case 11:
3216 break;
3217 default:
3218 goto fail;
3220 if (dpl < cpl || dpl < rpl) {
3221 fail:
3222 CC_SRC = eflags & ~CC_Z;
3223 return 0;
3226 limit = get_seg_limit(e1, e2);
3227 CC_SRC = eflags | CC_Z;
3228 return limit;
3231 target_ulong helper_lar(target_ulong selector1)
3233 uint32_t e1, e2, eflags, selector;
3234 int rpl, dpl, cpl, type;
3236 selector = selector1 & 0xffff;
3237 eflags = cc_table[CC_OP].compute_all();
3238 if ((selector & 0xfffc) == 0)
3239 goto fail;
3240 if (load_segment(&e1, &e2, selector) != 0)
3241 goto fail;
3242 rpl = selector & 3;
3243 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3244 cpl = env->hflags & HF_CPL_MASK;
3245 if (e2 & DESC_S_MASK) {
3246 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3247 /* conforming */
3248 } else {
3249 if (dpl < cpl || dpl < rpl)
3250 goto fail;
3252 } else {
3253 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3254 switch(type) {
3255 case 1:
3256 case 2:
3257 case 3:
3258 case 4:
3259 case 5:
3260 case 9:
3261 case 11:
3262 case 12:
3263 break;
3264 default:
3265 goto fail;
3267 if (dpl < cpl || dpl < rpl) {
3268 fail:
3269 CC_SRC = eflags & ~CC_Z;
3270 return 0;
3273 CC_SRC = eflags | CC_Z;
3274 return e2 & 0x00f0ff00;
3277 void helper_verr(target_ulong selector1)
3279 uint32_t e1, e2, eflags, selector;
3280 int rpl, dpl, cpl;
3282 selector = selector1 & 0xffff;
3283 eflags = cc_table[CC_OP].compute_all();
3284 if ((selector & 0xfffc) == 0)
3285 goto fail;
3286 if (load_segment(&e1, &e2, selector) != 0)
3287 goto fail;
3288 if (!(e2 & DESC_S_MASK))
3289 goto fail;
3290 rpl = selector & 3;
3291 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3292 cpl = env->hflags & HF_CPL_MASK;
3293 if (e2 & DESC_CS_MASK) {
3294 if (!(e2 & DESC_R_MASK))
3295 goto fail;
3296 if (!(e2 & DESC_C_MASK)) {
3297 if (dpl < cpl || dpl < rpl)
3298 goto fail;
3300 } else {
3301 if (dpl < cpl || dpl < rpl) {
3302 fail:
3303 CC_SRC = eflags & ~CC_Z;
3304 return;
3307 CC_SRC = eflags | CC_Z;
3310 void helper_verw(target_ulong selector1)
3312 uint32_t e1, e2, eflags, selector;
3313 int rpl, dpl, cpl;
3315 selector = selector1 & 0xffff;
3316 eflags = cc_table[CC_OP].compute_all();
3317 if ((selector & 0xfffc) == 0)
3318 goto fail;
3319 if (load_segment(&e1, &e2, selector) != 0)
3320 goto fail;
3321 if (!(e2 & DESC_S_MASK))
3322 goto fail;
3323 rpl = selector & 3;
3324 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3325 cpl = env->hflags & HF_CPL_MASK;
3326 if (e2 & DESC_CS_MASK) {
3327 goto fail;
3328 } else {
3329 if (dpl < cpl || dpl < rpl)
3330 goto fail;
3331 if (!(e2 & DESC_W_MASK)) {
3332 fail:
3333 CC_SRC = eflags & ~CC_Z;
3334 return;
3337 CC_SRC = eflags | CC_Z;
3340 /* x87 FPU helpers */
3342 static void fpu_set_exception(int mask)
3344 env->fpus |= mask;
3345 if (env->fpus & (~env->fpuc & FPUC_EM))
3346 env->fpus |= FPUS_SE | FPUS_B;
3349 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3351 if (b == 0.0)
3352 fpu_set_exception(FPUS_ZE);
3353 return a / b;
3356 void fpu_raise_exception(void)
3358 if (env->cr[0] & CR0_NE_MASK) {
3359 raise_exception(EXCP10_COPR);
3361 #if !defined(CONFIG_USER_ONLY)
3362 else {
3363 cpu_set_ferr(env);
3365 #endif
3368 void helper_flds_FT0(uint32_t val)
3370 union {
3371 float32 f;
3372 uint32_t i;
3373 } u;
3374 u.i = val;
3375 FT0 = float32_to_floatx(u.f, &env->fp_status);
3378 void helper_fldl_FT0(uint64_t val)
3380 union {
3381 float64 f;
3382 uint64_t i;
3383 } u;
3384 u.i = val;
3385 FT0 = float64_to_floatx(u.f, &env->fp_status);
3388 void helper_fildl_FT0(int32_t val)
3390 FT0 = int32_to_floatx(val, &env->fp_status);
3393 void helper_flds_ST0(uint32_t val)
3395 int new_fpstt;
3396 union {
3397 float32 f;
3398 uint32_t i;
3399 } u;
3400 new_fpstt = (env->fpstt - 1) & 7;
3401 u.i = val;
3402 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3403 env->fpstt = new_fpstt;
3404 env->fptags[new_fpstt] = 0; /* validate stack entry */
3407 void helper_fldl_ST0(uint64_t val)
3409 int new_fpstt;
3410 union {
3411 float64 f;
3412 uint64_t i;
3413 } u;
3414 new_fpstt = (env->fpstt - 1) & 7;
3415 u.i = val;
3416 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3417 env->fpstt = new_fpstt;
3418 env->fptags[new_fpstt] = 0; /* validate stack entry */
3421 void helper_fildl_ST0(int32_t val)
3423 int new_fpstt;
3424 new_fpstt = (env->fpstt - 1) & 7;
3425 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3426 env->fpstt = new_fpstt;
3427 env->fptags[new_fpstt] = 0; /* validate stack entry */
3430 void helper_fildll_ST0(int64_t val)
3432 int new_fpstt;
3433 new_fpstt = (env->fpstt - 1) & 7;
3434 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3435 env->fpstt = new_fpstt;
3436 env->fptags[new_fpstt] = 0; /* validate stack entry */
3439 uint32_t helper_fsts_ST0(void)
3441 union {
3442 float32 f;
3443 uint32_t i;
3444 } u;
3445 u.f = floatx_to_float32(ST0, &env->fp_status);
3446 return u.i;
3449 uint64_t helper_fstl_ST0(void)
3451 union {
3452 float64 f;
3453 uint64_t i;
3454 } u;
3455 u.f = floatx_to_float64(ST0, &env->fp_status);
3456 return u.i;
3459 int32_t helper_fist_ST0(void)
3461 int32_t val;
3462 val = floatx_to_int32(ST0, &env->fp_status);
3463 if (val != (int16_t)val)
3464 val = -32768;
3465 return val;
3468 int32_t helper_fistl_ST0(void)
3470 int32_t val;
3471 val = floatx_to_int32(ST0, &env->fp_status);
3472 return val;
3475 int64_t helper_fistll_ST0(void)
3477 int64_t val;
3478 val = floatx_to_int64(ST0, &env->fp_status);
3479 return val;
3482 int32_t helper_fistt_ST0(void)
3484 int32_t val;
3485 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3486 if (val != (int16_t)val)
3487 val = -32768;
3488 return val;
3491 int32_t helper_fisttl_ST0(void)
3493 int32_t val;
3494 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3495 return val;
3498 int64_t helper_fisttll_ST0(void)
3500 int64_t val;
3501 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3502 return val;
3505 void helper_fldt_ST0(target_ulong ptr)
3507 int new_fpstt;
3508 new_fpstt = (env->fpstt - 1) & 7;
3509 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3510 env->fpstt = new_fpstt;
3511 env->fptags[new_fpstt] = 0; /* validate stack entry */
3514 void helper_fstt_ST0(target_ulong ptr)
3516 helper_fstt(ST0, ptr);
3519 void helper_fpush(void)
3521 fpush();
3524 void helper_fpop(void)
3526 fpop();
3529 void helper_fdecstp(void)
3531 env->fpstt = (env->fpstt - 1) & 7;
3532 env->fpus &= (~0x4700);
3535 void helper_fincstp(void)
3537 env->fpstt = (env->fpstt + 1) & 7;
3538 env->fpus &= (~0x4700);
3541 /* FPU move */
3543 void helper_ffree_STN(int st_index)
3545 env->fptags[(env->fpstt + st_index) & 7] = 1;
3548 void helper_fmov_ST0_FT0(void)
3550 ST0 = FT0;
3553 void helper_fmov_FT0_STN(int st_index)
3555 FT0 = ST(st_index);
3558 void helper_fmov_ST0_STN(int st_index)
3560 ST0 = ST(st_index);
3563 void helper_fmov_STN_ST0(int st_index)
3565 ST(st_index) = ST0;
3568 void helper_fxchg_ST0_STN(int st_index)
3570 CPU86_LDouble tmp;
3571 tmp = ST(st_index);
3572 ST(st_index) = ST0;
3573 ST0 = tmp;
3576 /* FPU operations */
3578 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3580 void helper_fcom_ST0_FT0(void)
3582 int ret;
3584 ret = floatx_compare(ST0, FT0, &env->fp_status);
3585 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3586 FORCE_RET();
3589 void helper_fucom_ST0_FT0(void)
3591 int ret;
3593 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3594 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3595 FORCE_RET();
3598 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3600 void helper_fcomi_ST0_FT0(void)
3602 int eflags;
3603 int ret;
3605 ret = floatx_compare(ST0, FT0, &env->fp_status);
3606 eflags = cc_table[CC_OP].compute_all();
3607 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3608 CC_SRC = eflags;
3609 FORCE_RET();
3612 void helper_fucomi_ST0_FT0(void)
3614 int eflags;
3615 int ret;
3617 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3618 eflags = cc_table[CC_OP].compute_all();
3619 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3620 CC_SRC = eflags;
3621 FORCE_RET();
3624 void helper_fadd_ST0_FT0(void)
3626 ST0 += FT0;
3629 void helper_fmul_ST0_FT0(void)
3631 ST0 *= FT0;
3634 void helper_fsub_ST0_FT0(void)
3636 ST0 -= FT0;
3639 void helper_fsubr_ST0_FT0(void)
3641 ST0 = FT0 - ST0;
3644 void helper_fdiv_ST0_FT0(void)
3646 ST0 = helper_fdiv(ST0, FT0);
3649 void helper_fdivr_ST0_FT0(void)
3651 ST0 = helper_fdiv(FT0, ST0);
3654 /* fp operations between STN and ST0 */
3656 void helper_fadd_STN_ST0(int st_index)
3658 ST(st_index) += ST0;
3661 void helper_fmul_STN_ST0(int st_index)
3663 ST(st_index) *= ST0;
3666 void helper_fsub_STN_ST0(int st_index)
3668 ST(st_index) -= ST0;
3671 void helper_fsubr_STN_ST0(int st_index)
3673 CPU86_LDouble *p;
3674 p = &ST(st_index);
3675 *p = ST0 - *p;
3678 void helper_fdiv_STN_ST0(int st_index)
3680 CPU86_LDouble *p;
3681 p = &ST(st_index);
3682 *p = helper_fdiv(*p, ST0);
3685 void helper_fdivr_STN_ST0(int st_index)
3687 CPU86_LDouble *p;
3688 p = &ST(st_index);
3689 *p = helper_fdiv(ST0, *p);
3692 /* misc FPU operations */
3693 void helper_fchs_ST0(void)
3695 ST0 = floatx_chs(ST0);
3698 void helper_fabs_ST0(void)
3700 ST0 = floatx_abs(ST0);
3703 void helper_fld1_ST0(void)
3705 ST0 = f15rk[1];
3708 void helper_fldl2t_ST0(void)
3710 ST0 = f15rk[6];
3713 void helper_fldl2e_ST0(void)
3715 ST0 = f15rk[5];
3718 void helper_fldpi_ST0(void)
3720 ST0 = f15rk[2];
3723 void helper_fldlg2_ST0(void)
3725 ST0 = f15rk[3];
3728 void helper_fldln2_ST0(void)
3730 ST0 = f15rk[4];
3733 void helper_fldz_ST0(void)
3735 ST0 = f15rk[0];
3738 void helper_fldz_FT0(void)
3740 FT0 = f15rk[0];
3743 uint32_t helper_fnstsw(void)
3745 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3748 uint32_t helper_fnstcw(void)
3750 return env->fpuc;
3753 static void update_fp_status(void)
3755 int rnd_type;
3757 /* set rounding mode */
3758 switch(env->fpuc & RC_MASK) {
3759 default:
3760 case RC_NEAR:
3761 rnd_type = float_round_nearest_even;
3762 break;
3763 case RC_DOWN:
3764 rnd_type = float_round_down;
3765 break;
3766 case RC_UP:
3767 rnd_type = float_round_up;
3768 break;
3769 case RC_CHOP:
3770 rnd_type = float_round_to_zero;
3771 break;
3773 set_float_rounding_mode(rnd_type, &env->fp_status);
3774 #ifdef FLOATX80
3775 switch((env->fpuc >> 8) & 3) {
3776 case 0:
3777 rnd_type = 32;
3778 break;
3779 case 2:
3780 rnd_type = 64;
3781 break;
3782 case 3:
3783 default:
3784 rnd_type = 80;
3785 break;
3787 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3788 #endif
3791 void helper_fldcw(uint32_t val)
3793 env->fpuc = val;
3794 update_fp_status();
3797 void helper_fclex(void)
3799 env->fpus &= 0x7f00;
3802 void helper_fwait(void)
3804 if (env->fpus & FPUS_SE)
3805 fpu_raise_exception();
3806 FORCE_RET();
3809 void helper_fninit(void)
3811 env->fpus = 0;
3812 env->fpstt = 0;
3813 env->fpuc = 0x37f;
3814 env->fptags[0] = 1;
3815 env->fptags[1] = 1;
3816 env->fptags[2] = 1;
3817 env->fptags[3] = 1;
3818 env->fptags[4] = 1;
3819 env->fptags[5] = 1;
3820 env->fptags[6] = 1;
3821 env->fptags[7] = 1;
3824 /* BCD ops */
3826 void helper_fbld_ST0(target_ulong ptr)
3828 CPU86_LDouble tmp;
3829 uint64_t val;
3830 unsigned int v;
3831 int i;
3833 val = 0;
3834 for(i = 8; i >= 0; i--) {
3835 v = ldub(ptr + i);
3836 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3838 tmp = val;
3839 if (ldub(ptr + 9) & 0x80)
3840 tmp = -tmp;
3841 fpush();
3842 ST0 = tmp;
3845 void helper_fbst_ST0(target_ulong ptr)
3847 int v;
3848 target_ulong mem_ref, mem_end;
3849 int64_t val;
3851 val = floatx_to_int64(ST0, &env->fp_status);
3852 mem_ref = ptr;
3853 mem_end = mem_ref + 9;
3854 if (val < 0) {
3855 stb(mem_end, 0x80);
3856 val = -val;
3857 } else {
3858 stb(mem_end, 0x00);
3860 while (mem_ref < mem_end) {
3861 if (val == 0)
3862 break;
3863 v = val % 100;
3864 val = val / 100;
3865 v = ((v / 10) << 4) | (v % 10);
3866 stb(mem_ref++, v);
3868 while (mem_ref < mem_end) {
3869 stb(mem_ref++, 0);
3873 void helper_f2xm1(void)
3875 ST0 = pow(2.0,ST0) - 1.0;
3878 void helper_fyl2x(void)
3880 CPU86_LDouble fptemp;
3882 fptemp = ST0;
3883 if (fptemp>0.0){
3884 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3885 ST1 *= fptemp;
3886 fpop();
3887 } else {
3888 env->fpus &= (~0x4700);
3889 env->fpus |= 0x400;
3893 void helper_fptan(void)
3895 CPU86_LDouble fptemp;
3897 fptemp = ST0;
3898 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3899 env->fpus |= 0x400;
3900 } else {
3901 ST0 = tan(fptemp);
3902 fpush();
3903 ST0 = 1.0;
3904 env->fpus &= (~0x400); /* C2 <-- 0 */
3905 /* the above code is for |arg| < 2**52 only */
3909 void helper_fpatan(void)
3911 CPU86_LDouble fptemp, fpsrcop;
3913 fpsrcop = ST1;
3914 fptemp = ST0;
3915 ST1 = atan2(fpsrcop,fptemp);
3916 fpop();
3919 void helper_fxtract(void)
3921 CPU86_LDoubleU temp;
3922 unsigned int expdif;
3924 temp.d = ST0;
3925 expdif = EXPD(temp) - EXPBIAS;
3926 /*DP exponent bias*/
3927 ST0 = expdif;
3928 fpush();
3929 BIASEXPONENT(temp);
3930 ST0 = temp.d;
3933 void helper_fprem1(void)
3935 CPU86_LDouble dblq, fpsrcop, fptemp;
3936 CPU86_LDoubleU fpsrcop1, fptemp1;
3937 int expdif;
3938 signed long long int q;
3940 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3941 ST0 = 0.0 / 0.0; /* NaN */
3942 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3943 return;
3946 fpsrcop = ST0;
3947 fptemp = ST1;
3948 fpsrcop1.d = fpsrcop;
3949 fptemp1.d = fptemp;
3950 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3952 if (expdif < 0) {
3953 /* optimisation? taken from the AMD docs */
3954 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3955 /* ST0 is unchanged */
3956 return;
3959 if (expdif < 53) {
3960 dblq = fpsrcop / fptemp;
3961 /* round dblq towards nearest integer */
3962 dblq = rint(dblq);
3963 ST0 = fpsrcop - fptemp * dblq;
3965 /* convert dblq to q by truncating towards zero */
3966 if (dblq < 0.0)
3967 q = (signed long long int)(-dblq);
3968 else
3969 q = (signed long long int)dblq;
3971 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3972 /* (C0,C3,C1) <-- (q2,q1,q0) */
3973 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3974 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3975 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3976 } else {
3977 env->fpus |= 0x400; /* C2 <-- 1 */
3978 fptemp = pow(2.0, expdif - 50);
3979 fpsrcop = (ST0 / ST1) / fptemp;
3980 /* fpsrcop = integer obtained by chopping */
3981 fpsrcop = (fpsrcop < 0.0) ?
3982 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3983 ST0 -= (ST1 * fpsrcop * fptemp);
3987 void helper_fprem(void)
3989 CPU86_LDouble dblq, fpsrcop, fptemp;
3990 CPU86_LDoubleU fpsrcop1, fptemp1;
3991 int expdif;
3992 signed long long int q;
3994 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3995 ST0 = 0.0 / 0.0; /* NaN */
3996 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3997 return;
4000 fpsrcop = (CPU86_LDouble)ST0;
4001 fptemp = (CPU86_LDouble)ST1;
4002 fpsrcop1.d = fpsrcop;
4003 fptemp1.d = fptemp;
4004 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4006 if (expdif < 0) {
4007 /* optimisation? taken from the AMD docs */
4008 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4009 /* ST0 is unchanged */
4010 return;
4013 if ( expdif < 53 ) {
4014 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4015 /* round dblq towards zero */
4016 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4017 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4019 /* convert dblq to q by truncating towards zero */
4020 if (dblq < 0.0)
4021 q = (signed long long int)(-dblq);
4022 else
4023 q = (signed long long int)dblq;
4025 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4026 /* (C0,C3,C1) <-- (q2,q1,q0) */
4027 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4028 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4029 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4030 } else {
4031 int N = 32 + (expdif % 32); /* as per AMD docs */
4032 env->fpus |= 0x400; /* C2 <-- 1 */
4033 fptemp = pow(2.0, (double)(expdif - N));
4034 fpsrcop = (ST0 / ST1) / fptemp;
4035 /* fpsrcop = integer obtained by chopping */
4036 fpsrcop = (fpsrcop < 0.0) ?
4037 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4038 ST0 -= (ST1 * fpsrcop * fptemp);
4042 void helper_fyl2xp1(void)
4044 CPU86_LDouble fptemp;
4046 fptemp = ST0;
4047 if ((fptemp+1.0)>0.0) {
4048 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4049 ST1 *= fptemp;
4050 fpop();
4051 } else {
4052 env->fpus &= (~0x4700);
4053 env->fpus |= 0x400;
4057 void helper_fsqrt(void)
4059 CPU86_LDouble fptemp;
4061 fptemp = ST0;
4062 if (fptemp<0.0) {
4063 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4064 env->fpus |= 0x400;
4066 ST0 = sqrt(fptemp);
4069 void helper_fsincos(void)
4071 CPU86_LDouble fptemp;
4073 fptemp = ST0;
4074 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4075 env->fpus |= 0x400;
4076 } else {
4077 ST0 = sin(fptemp);
4078 fpush();
4079 ST0 = cos(fptemp);
4080 env->fpus &= (~0x400); /* C2 <-- 0 */
4081 /* the above code is for |arg| < 2**63 only */
4085 void helper_frndint(void)
4087 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4090 void helper_fscale(void)
4092 ST0 = ldexp (ST0, (int)(ST1));
4095 void helper_fsin(void)
4097 CPU86_LDouble fptemp;
4099 fptemp = ST0;
4100 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4101 env->fpus |= 0x400;
4102 } else {
4103 ST0 = sin(fptemp);
4104 env->fpus &= (~0x400); /* C2 <-- 0 */
4105 /* the above code is for |arg| < 2**53 only */
4109 void helper_fcos(void)
4111 CPU86_LDouble fptemp;
4113 fptemp = ST0;
4114 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4115 env->fpus |= 0x400;
4116 } else {
4117 ST0 = cos(fptemp);
4118 env->fpus &= (~0x400); /* C2 <-- 0 */
4119 /* the above code is for |arg5 < 2**63 only */
4123 void helper_fxam_ST0(void)
4125 CPU86_LDoubleU temp;
4126 int expdif;
4128 temp.d = ST0;
4130 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4131 if (SIGND(temp))
4132 env->fpus |= 0x200; /* C1 <-- 1 */
4134 /* XXX: test fptags too */
4135 expdif = EXPD(temp);
4136 if (expdif == MAXEXPD) {
4137 #ifdef USE_X86LDOUBLE
4138 if (MANTD(temp) == 0x8000000000000000ULL)
4139 #else
4140 if (MANTD(temp) == 0)
4141 #endif
4142 env->fpus |= 0x500 /*Infinity*/;
4143 else
4144 env->fpus |= 0x100 /*NaN*/;
4145 } else if (expdif == 0) {
4146 if (MANTD(temp) == 0)
4147 env->fpus |= 0x4000 /*Zero*/;
4148 else
4149 env->fpus |= 0x4400 /*Denormal*/;
4150 } else {
4151 env->fpus |= 0x400;
4155 void helper_fstenv(target_ulong ptr, int data32)
4157 int fpus, fptag, exp, i;
4158 uint64_t mant;
4159 CPU86_LDoubleU tmp;
4161 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4162 fptag = 0;
4163 for (i=7; i>=0; i--) {
4164 fptag <<= 2;
4165 if (env->fptags[i]) {
4166 fptag |= 3;
4167 } else {
4168 tmp.d = env->fpregs[i].d;
4169 exp = EXPD(tmp);
4170 mant = MANTD(tmp);
4171 if (exp == 0 && mant == 0) {
4172 /* zero */
4173 fptag |= 1;
4174 } else if (exp == 0 || exp == MAXEXPD
4175 #ifdef USE_X86LDOUBLE
4176 || (mant & (1LL << 63)) == 0
4177 #endif
4179 /* NaNs, infinity, denormal */
4180 fptag |= 2;
4184 if (data32) {
4185 /* 32 bit */
4186 stl(ptr, env->fpuc);
4187 stl(ptr + 4, fpus);
4188 stl(ptr + 8, fptag);
4189 stl(ptr + 12, 0); /* fpip */
4190 stl(ptr + 16, 0); /* fpcs */
4191 stl(ptr + 20, 0); /* fpoo */
4192 stl(ptr + 24, 0); /* fpos */
4193 } else {
4194 /* 16 bit */
4195 stw(ptr, env->fpuc);
4196 stw(ptr + 2, fpus);
4197 stw(ptr + 4, fptag);
4198 stw(ptr + 6, 0);
4199 stw(ptr + 8, 0);
4200 stw(ptr + 10, 0);
4201 stw(ptr + 12, 0);
4205 void helper_fldenv(target_ulong ptr, int data32)
4207 int i, fpus, fptag;
4209 if (data32) {
4210 env->fpuc = lduw(ptr);
4211 fpus = lduw(ptr + 4);
4212 fptag = lduw(ptr + 8);
4214 else {
4215 env->fpuc = lduw(ptr);
4216 fpus = lduw(ptr + 2);
4217 fptag = lduw(ptr + 4);
4219 env->fpstt = (fpus >> 11) & 7;
4220 env->fpus = fpus & ~0x3800;
4221 for(i = 0;i < 8; i++) {
4222 env->fptags[i] = ((fptag & 3) == 3);
4223 fptag >>= 2;
4227 void helper_fsave(target_ulong ptr, int data32)
4229 CPU86_LDouble tmp;
4230 int i;
4232 helper_fstenv(ptr, data32);
4234 ptr += (14 << data32);
4235 for(i = 0;i < 8; i++) {
4236 tmp = ST(i);
4237 helper_fstt(tmp, ptr);
4238 ptr += 10;
4241 /* fninit */
4242 env->fpus = 0;
4243 env->fpstt = 0;
4244 env->fpuc = 0x37f;
4245 env->fptags[0] = 1;
4246 env->fptags[1] = 1;
4247 env->fptags[2] = 1;
4248 env->fptags[3] = 1;
4249 env->fptags[4] = 1;
4250 env->fptags[5] = 1;
4251 env->fptags[6] = 1;
4252 env->fptags[7] = 1;
4255 void helper_frstor(target_ulong ptr, int data32)
4257 CPU86_LDouble tmp;
4258 int i;
4260 helper_fldenv(ptr, data32);
4261 ptr += (14 << data32);
4263 for(i = 0;i < 8; i++) {
4264 tmp = helper_fldt(ptr);
4265 ST(i) = tmp;
4266 ptr += 10;
4270 void helper_fxsave(target_ulong ptr, int data64)
4272 int fpus, fptag, i, nb_xmm_regs;
4273 CPU86_LDouble tmp;
4274 target_ulong addr;
4276 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4277 fptag = 0;
4278 for(i = 0; i < 8; i++) {
4279 fptag |= (env->fptags[i] << i);
4281 stw(ptr, env->fpuc);
4282 stw(ptr + 2, fpus);
4283 stw(ptr + 4, fptag ^ 0xff);
4284 #ifdef TARGET_X86_64
4285 if (data64) {
4286 stq(ptr + 0x08, 0); /* rip */
4287 stq(ptr + 0x10, 0); /* rdp */
4288 } else
4289 #endif
4291 stl(ptr + 0x08, 0); /* eip */
4292 stl(ptr + 0x0c, 0); /* sel */
4293 stl(ptr + 0x10, 0); /* dp */
4294 stl(ptr + 0x14, 0); /* sel */
4297 addr = ptr + 0x20;
4298 for(i = 0;i < 8; i++) {
4299 tmp = ST(i);
4300 helper_fstt(tmp, addr);
4301 addr += 16;
4304 if (env->cr[4] & CR4_OSFXSR_MASK) {
4305 /* XXX: finish it */
4306 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4307 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4308 if (env->hflags & HF_CS64_MASK)
4309 nb_xmm_regs = 16;
4310 else
4311 nb_xmm_regs = 8;
4312 addr = ptr + 0xa0;
4313 for(i = 0; i < nb_xmm_regs; i++) {
4314 stq(addr, env->xmm_regs[i].XMM_Q(0));
4315 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4316 addr += 16;
4321 void helper_fxrstor(target_ulong ptr, int data64)
4323 int i, fpus, fptag, nb_xmm_regs;
4324 CPU86_LDouble tmp;
4325 target_ulong addr;
4327 env->fpuc = lduw(ptr);
4328 fpus = lduw(ptr + 2);
4329 fptag = lduw(ptr + 4);
4330 env->fpstt = (fpus >> 11) & 7;
4331 env->fpus = fpus & ~0x3800;
4332 fptag ^= 0xff;
4333 for(i = 0;i < 8; i++) {
4334 env->fptags[i] = ((fptag >> i) & 1);
4337 addr = ptr + 0x20;
4338 for(i = 0;i < 8; i++) {
4339 tmp = helper_fldt(addr);
4340 ST(i) = tmp;
4341 addr += 16;
4344 if (env->cr[4] & CR4_OSFXSR_MASK) {
4345 /* XXX: finish it */
4346 env->mxcsr = ldl(ptr + 0x18);
4347 //ldl(ptr + 0x1c);
4348 if (env->hflags & HF_CS64_MASK)
4349 nb_xmm_regs = 16;
4350 else
4351 nb_xmm_regs = 8;
4352 addr = ptr + 0xa0;
4353 for(i = 0; i < nb_xmm_regs; i++) {
4354 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4355 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4356 addr += 16;
4361 #ifndef USE_X86LDOUBLE
4363 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4365 CPU86_LDoubleU temp;
4366 int e;
4368 temp.d = f;
4369 /* mantissa */
4370 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4371 /* exponent + sign */
4372 e = EXPD(temp) - EXPBIAS + 16383;
4373 e |= SIGND(temp) >> 16;
4374 *pexp = e;
4377 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4379 CPU86_LDoubleU temp;
4380 int e;
4381 uint64_t ll;
4383 /* XXX: handle overflow ? */
4384 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4385 e |= (upper >> 4) & 0x800; /* sign */
4386 ll = (mant >> 11) & ((1LL << 52) - 1);
4387 #ifdef __arm__
4388 temp.l.upper = (e << 20) | (ll >> 32);
4389 temp.l.lower = ll;
4390 #else
4391 temp.ll = ll | ((uint64_t)e << 52);
4392 #endif
4393 return temp.d;
4396 #else
4398 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4400 CPU86_LDoubleU temp;
4402 temp.d = f;
4403 *pmant = temp.l.lower;
4404 *pexp = temp.l.upper;
4407 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4409 CPU86_LDoubleU temp;
4411 temp.l.upper = upper;
4412 temp.l.lower = mant;
4413 return temp.d;
4415 #endif
4417 #ifdef TARGET_X86_64
4419 //#define DEBUG_MULDIV
4421 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4423 *plow += a;
4424 /* carry test */
4425 if (*plow < a)
4426 (*phigh)++;
4427 *phigh += b;
4430 static void neg128(uint64_t *plow, uint64_t *phigh)
4432 *plow = ~ *plow;
4433 *phigh = ~ *phigh;
4434 add128(plow, phigh, 1, 0);
4437 /* return TRUE if overflow */
4438 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4440 uint64_t q, r, a1, a0;
4441 int i, qb, ab;
4443 a0 = *plow;
4444 a1 = *phigh;
4445 if (a1 == 0) {
4446 q = a0 / b;
4447 r = a0 % b;
4448 *plow = q;
4449 *phigh = r;
4450 } else {
4451 if (a1 >= b)
4452 return 1;
4453 /* XXX: use a better algorithm */
4454 for(i = 0; i < 64; i++) {
4455 ab = a1 >> 63;
4456 a1 = (a1 << 1) | (a0 >> 63);
4457 if (ab || a1 >= b) {
4458 a1 -= b;
4459 qb = 1;
4460 } else {
4461 qb = 0;
4463 a0 = (a0 << 1) | qb;
4465 #if defined(DEBUG_MULDIV)
4466 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4467 *phigh, *plow, b, a0, a1);
4468 #endif
4469 *plow = a0;
4470 *phigh = a1;
4472 return 0;
4475 /* return TRUE if overflow */
4476 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4478 int sa, sb;
4479 sa = ((int64_t)*phigh < 0);
4480 if (sa)
4481 neg128(plow, phigh);
4482 sb = (b < 0);
4483 if (sb)
4484 b = -b;
4485 if (div64(plow, phigh, b) != 0)
4486 return 1;
4487 if (sa ^ sb) {
4488 if (*plow > (1ULL << 63))
4489 return 1;
4490 *plow = - *plow;
4491 } else {
4492 if (*plow >= (1ULL << 63))
4493 return 1;
4495 if (sa)
4496 *phigh = - *phigh;
4497 return 0;
4500 void helper_mulq_EAX_T0(target_ulong t0)
4502 uint64_t r0, r1;
4504 mulu64(&r0, &r1, EAX, t0);
4505 EAX = r0;
4506 EDX = r1;
4507 CC_DST = r0;
4508 CC_SRC = r1;
4511 void helper_imulq_EAX_T0(target_ulong t0)
4513 uint64_t r0, r1;
4515 muls64(&r0, &r1, EAX, t0);
4516 EAX = r0;
4517 EDX = r1;
4518 CC_DST = r0;
4519 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4522 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4524 uint64_t r0, r1;
4526 muls64(&r0, &r1, t0, t1);
4527 CC_DST = r0;
4528 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4529 return r0;
4532 void helper_divq_EAX(target_ulong t0)
4534 uint64_t r0, r1;
4535 if (t0 == 0) {
4536 raise_exception(EXCP00_DIVZ);
4538 r0 = EAX;
4539 r1 = EDX;
4540 if (div64(&r0, &r1, t0))
4541 raise_exception(EXCP00_DIVZ);
4542 EAX = r0;
4543 EDX = r1;
4546 void helper_idivq_EAX(target_ulong t0)
4548 uint64_t r0, r1;
4549 if (t0 == 0) {
4550 raise_exception(EXCP00_DIVZ);
4552 r0 = EAX;
4553 r1 = EDX;
4554 if (idiv64(&r0, &r1, t0))
4555 raise_exception(EXCP00_DIVZ);
4556 EAX = r0;
4557 EDX = r1;
4559 #endif
4561 static void do_hlt(void)
4563 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4564 env->halted = 1;
4565 env->exception_index = EXCP_HLT;
4566 cpu_loop_exit();
4569 void helper_hlt(int next_eip_addend)
4571 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4572 EIP += next_eip_addend;
4574 do_hlt();
4577 void helper_monitor(target_ulong ptr)
4579 if ((uint32_t)ECX != 0)
4580 raise_exception(EXCP0D_GPF);
4581 /* XXX: store address ? */
4582 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4585 void helper_mwait(int next_eip_addend)
4587 if ((uint32_t)ECX != 0)
4588 raise_exception(EXCP0D_GPF);
4589 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4590 EIP += next_eip_addend;
4592 /* XXX: not complete but not completely erroneous */
4593 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4594 /* more than one CPU: do not sleep because another CPU may
4595 wake this one */
4596 } else {
4597 do_hlt();
4601 void helper_debug(void)
4603 env->exception_index = EXCP_DEBUG;
4604 cpu_loop_exit();
4607 void helper_raise_interrupt(int intno, int next_eip_addend)
4609 raise_interrupt(intno, 1, 0, next_eip_addend);
4612 void helper_raise_exception(int exception_index)
4614 raise_exception(exception_index);
4617 void helper_cli(void)
4619 env->eflags &= ~IF_MASK;
4622 void helper_sti(void)
4624 env->eflags |= IF_MASK;
4627 #if 0
4628 /* vm86plus instructions */
4629 void helper_cli_vm(void)
4631 env->eflags &= ~VIF_MASK;
4634 void helper_sti_vm(void)
4636 env->eflags |= VIF_MASK;
4637 if (env->eflags & VIP_MASK) {
4638 raise_exception(EXCP0D_GPF);
4641 #endif
4643 void helper_set_inhibit_irq(void)
4645 env->hflags |= HF_INHIBIT_IRQ_MASK;
4648 void helper_reset_inhibit_irq(void)
4650 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4653 void helper_boundw(target_ulong a0, int v)
4655 int low, high;
4656 low = ldsw(a0);
4657 high = ldsw(a0 + 2);
4658 v = (int16_t)v;
4659 if (v < low || v > high) {
4660 raise_exception(EXCP05_BOUND);
4662 FORCE_RET();
4665 void helper_boundl(target_ulong a0, int v)
4667 int low, high;
4668 low = ldl(a0);
4669 high = ldl(a0 + 4);
4670 if (v < low || v > high) {
4671 raise_exception(EXCP05_BOUND);
4673 FORCE_RET();
4676 static float approx_rsqrt(float a)
4678 return 1.0 / sqrt(a);
4681 static float approx_rcp(float a)
4683 return 1.0 / a;
4686 #if !defined(CONFIG_USER_ONLY)
4688 #define MMUSUFFIX _mmu
4690 #define SHIFT 0
4691 #include "softmmu_template.h"
4693 #define SHIFT 1
4694 #include "softmmu_template.h"
4696 #define SHIFT 2
4697 #include "softmmu_template.h"
4699 #define SHIFT 3
4700 #include "softmmu_template.h"
4702 #endif
4704 /* try to fill the TLB and return an exception if error. If retaddr is
4705 NULL, it means that the function was called in C code (i.e. not
4706 from generated code or from helper.c) */
4707 /* XXX: fix it to restore all registers */
4708 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4710 TranslationBlock *tb;
4711 int ret;
4712 unsigned long pc;
4713 CPUX86State *saved_env;
4715 /* XXX: hack to restore env in all cases, even if not called from
4716 generated code */
4717 saved_env = env;
4718 env = cpu_single_env;
4720 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4721 if (ret) {
4722 if (retaddr) {
4723 /* now we have a real cpu fault */
4724 pc = (unsigned long)retaddr;
4725 tb = tb_find_pc(pc);
4726 if (tb) {
4727 /* the PC is inside the translated code. It means that we have
4728 a virtual CPU fault */
4729 cpu_restore_state(tb, env, pc, NULL);
4732 raise_exception_err(env->exception_index, env->error_code);
4734 env = saved_env;
4738 /* Secure Virtual Machine helpers */
4740 #if defined(CONFIG_USER_ONLY)
4742 void helper_vmrun(int aflag, int next_eip_addend)
4745 void helper_vmmcall(void)
4748 void helper_vmload(int aflag)
4751 void helper_vmsave(int aflag)
4754 void helper_stgi(void)
4757 void helper_clgi(void)
4760 void helper_skinit(void)
4763 void helper_invlpga(int aflag)
4766 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4769 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4773 void helper_svm_check_io(uint32_t port, uint32_t param,
4774 uint32_t next_eip_addend)
4777 #else
4779 static inline void svm_save_seg(target_phys_addr_t addr,
4780 const SegmentCache *sc)
4782 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4783 sc->selector);
4784 stq_phys(addr + offsetof(struct vmcb_seg, base),
4785 sc->base);
4786 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4787 sc->limit);
4788 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4789 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4792 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4794 unsigned int flags;
4796 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4797 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4798 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4799 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4800 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4803 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4804 CPUState *env, int seg_reg)
4806 SegmentCache sc1, *sc = &sc1;
4807 svm_load_seg(addr, sc);
4808 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4809 sc->base, sc->limit, sc->flags);
4812 void helper_vmrun(int aflag, int next_eip_addend)
4814 target_ulong addr;
4815 uint32_t event_inj;
4816 uint32_t int_ctl;
4818 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4820 if (aflag == 2)
4821 addr = EAX;
4822 else
4823 addr = (uint32_t)EAX;
4825 if (loglevel & CPU_LOG_TB_IN_ASM)
4826 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4828 env->vm_vmcb = addr;
4830 /* save the current CPU state in the hsave page */
4831 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4832 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4834 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4835 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4837 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4838 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4839 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4840 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4841 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4842 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4844 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4845 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4847 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4848 &env->segs[R_ES]);
4849 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4850 &env->segs[R_CS]);
4851 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4852 &env->segs[R_SS]);
4853 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4854 &env->segs[R_DS]);
4856 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4857 EIP + next_eip_addend);
4858 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4859 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4861 /* load the interception bitmaps so we do not need to access the
4862 vmcb in svm mode */
4863 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4864 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4865 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4866 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4867 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4868 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4870 /* enable intercepts */
4871 env->hflags |= HF_SVMI_MASK;
4873 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4875 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4876 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4878 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4879 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4881 /* clear exit_info_2 so we behave like the real hardware */
4882 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4884 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4885 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4886 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4887 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4888 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4889 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4890 if (int_ctl & V_INTR_MASKING_MASK) {
4891 env->v_tpr = int_ctl & V_TPR_MASK;
4892 env->hflags2 |= HF2_VINTR_MASK;
4893 if (env->eflags & IF_MASK)
4894 env->hflags2 |= HF2_HIF_MASK;
4897 cpu_load_efer(env,
4898 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4899 env->eflags = 0;
4900 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4901 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4902 CC_OP = CC_OP_EFLAGS;
4904 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4905 env, R_ES);
4906 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4907 env, R_CS);
4908 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4909 env, R_SS);
4910 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4911 env, R_DS);
4913 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4914 env->eip = EIP;
4915 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4916 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4917 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4918 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4919 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4921 /* FIXME: guest state consistency checks */
4923 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4924 case TLB_CONTROL_DO_NOTHING:
4925 break;
4926 case TLB_CONTROL_FLUSH_ALL_ASID:
4927 /* FIXME: this is not 100% correct but should work for now */
4928 tlb_flush(env, 1);
4929 break;
4932 env->hflags2 |= HF2_GIF_MASK;
4934 if (int_ctl & V_IRQ_MASK) {
4935 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4938 /* maybe we need to inject an event */
4939 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4940 if (event_inj & SVM_EVTINJ_VALID) {
4941 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4942 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4943 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4944 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4946 if (loglevel & CPU_LOG_TB_IN_ASM)
4947 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4948 /* FIXME: need to implement valid_err */
4949 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4950 case SVM_EVTINJ_TYPE_INTR:
4951 env->exception_index = vector;
4952 env->error_code = event_inj_err;
4953 env->exception_is_int = 0;
4954 env->exception_next_eip = -1;
4955 if (loglevel & CPU_LOG_TB_IN_ASM)
4956 fprintf(logfile, "INTR");
4957 /* XXX: is it always correct ? */
4958 do_interrupt(vector, 0, 0, 0, 1);
4959 break;
4960 case SVM_EVTINJ_TYPE_NMI:
4961 env->exception_index = EXCP02_NMI;
4962 env->error_code = event_inj_err;
4963 env->exception_is_int = 0;
4964 env->exception_next_eip = EIP;
4965 if (loglevel & CPU_LOG_TB_IN_ASM)
4966 fprintf(logfile, "NMI");
4967 cpu_loop_exit();
4968 break;
4969 case SVM_EVTINJ_TYPE_EXEPT:
4970 env->exception_index = vector;
4971 env->error_code = event_inj_err;
4972 env->exception_is_int = 0;
4973 env->exception_next_eip = -1;
4974 if (loglevel & CPU_LOG_TB_IN_ASM)
4975 fprintf(logfile, "EXEPT");
4976 cpu_loop_exit();
4977 break;
4978 case SVM_EVTINJ_TYPE_SOFT:
4979 env->exception_index = vector;
4980 env->error_code = event_inj_err;
4981 env->exception_is_int = 1;
4982 env->exception_next_eip = EIP;
4983 if (loglevel & CPU_LOG_TB_IN_ASM)
4984 fprintf(logfile, "SOFT");
4985 cpu_loop_exit();
4986 break;
4988 if (loglevel & CPU_LOG_TB_IN_ASM)
4989 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4993 void helper_vmmcall(void)
4995 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4996 raise_exception(EXCP06_ILLOP);
4999 void helper_vmload(int aflag)
5001 target_ulong addr;
5002 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5004 if (aflag == 2)
5005 addr = EAX;
5006 else
5007 addr = (uint32_t)EAX;
5009 if (loglevel & CPU_LOG_TB_IN_ASM)
5010 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5011 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5012 env->segs[R_FS].base);
5014 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5015 env, R_FS);
5016 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5017 env, R_GS);
5018 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5019 &env->tr);
5020 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5021 &env->ldt);
5023 #ifdef TARGET_X86_64
5024 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5025 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5026 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5027 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5028 #endif
5029 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5030 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5031 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5032 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5035 void helper_vmsave(int aflag)
5037 target_ulong addr;
5038 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5040 if (aflag == 2)
5041 addr = EAX;
5042 else
5043 addr = (uint32_t)EAX;
5045 if (loglevel & CPU_LOG_TB_IN_ASM)
5046 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5047 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5048 env->segs[R_FS].base);
5050 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5051 &env->segs[R_FS]);
5052 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5053 &env->segs[R_GS]);
5054 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5055 &env->tr);
5056 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5057 &env->ldt);
5059 #ifdef TARGET_X86_64
5060 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5061 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5062 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5063 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5064 #endif
5065 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5066 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5067 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5068 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5071 void helper_stgi(void)
5073 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5074 env->hflags2 |= HF2_GIF_MASK;
5077 void helper_clgi(void)
5079 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5080 env->hflags2 &= ~HF2_GIF_MASK;
5083 void helper_skinit(void)
5085 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5086 /* XXX: not implemented */
5087 raise_exception(EXCP06_ILLOP);
5090 void helper_invlpga(int aflag)
5092 target_ulong addr;
5093 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5095 if (aflag == 2)
5096 addr = EAX;
5097 else
5098 addr = (uint32_t)EAX;
5100 /* XXX: could use the ASID to see if it is needed to do the
5101 flush */
5102 tlb_flush_page(env, addr);
5105 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5107 if (likely(!(env->hflags & HF_SVMI_MASK)))
5108 return;
5109 switch(type) {
5110 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5111 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5112 helper_vmexit(type, param);
5114 break;
5115 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5116 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5117 helper_vmexit(type, param);
5119 break;
5120 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5121 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5122 helper_vmexit(type, param);
5124 break;
5125 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5126 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5127 helper_vmexit(type, param);
5129 break;
5130 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5131 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5132 helper_vmexit(type, param);
5134 break;
5135 case SVM_EXIT_MSR:
5136 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5137 /* FIXME: this should be read in at vmrun (faster this way?) */
5138 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5139 uint32_t t0, t1;
5140 switch((uint32_t)ECX) {
5141 case 0 ... 0x1fff:
5142 t0 = (ECX * 2) % 8;
5143 t1 = ECX / 8;
5144 break;
5145 case 0xc0000000 ... 0xc0001fff:
5146 t0 = (8192 + ECX - 0xc0000000) * 2;
5147 t1 = (t0 / 8);
5148 t0 %= 8;
5149 break;
5150 case 0xc0010000 ... 0xc0011fff:
5151 t0 = (16384 + ECX - 0xc0010000) * 2;
5152 t1 = (t0 / 8);
5153 t0 %= 8;
5154 break;
5155 default:
5156 helper_vmexit(type, param);
5157 t0 = 0;
5158 t1 = 0;
5159 break;
5161 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5162 helper_vmexit(type, param);
5164 break;
5165 default:
5166 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5167 helper_vmexit(type, param);
5169 break;
5173 void helper_svm_check_io(uint32_t port, uint32_t param,
5174 uint32_t next_eip_addend)
5176 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5177 /* FIXME: this should be read in at vmrun (faster this way?) */
5178 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5179 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5180 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5181 /* next EIP */
5182 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5183 env->eip + next_eip_addend);
5184 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5189 /* Note: currently only 32 bits of exit_code are used */
5190 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5192 uint32_t int_ctl;
5194 if (loglevel & CPU_LOG_TB_IN_ASM)
5195 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5196 exit_code, exit_info_1,
5197 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5198 EIP);
5200 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5201 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5202 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5203 } else {
5204 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5207 /* Save the VM state in the vmcb */
5208 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5209 &env->segs[R_ES]);
5210 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5211 &env->segs[R_CS]);
5212 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5213 &env->segs[R_SS]);
5214 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5215 &env->segs[R_DS]);
5217 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5218 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5220 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5221 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5223 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5224 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5225 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5226 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5227 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5229 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5230 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5231 int_ctl |= env->v_tpr & V_TPR_MASK;
5232 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5233 int_ctl |= V_IRQ_MASK;
5234 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5236 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5237 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5238 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5239 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5240 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5241 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5242 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5244 /* Reload the host state from vm_hsave */
5245 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5246 env->hflags &= ~HF_SVMI_MASK;
5247 env->intercept = 0;
5248 env->intercept_exceptions = 0;
5249 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5250 env->tsc_offset = 0;
5252 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5253 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5255 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5256 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5258 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5259 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5260 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5261 /* we need to set the efer after the crs so the hidden flags get
5262 set properly */
5263 cpu_load_efer(env,
5264 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5265 env->eflags = 0;
5266 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5267 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5268 CC_OP = CC_OP_EFLAGS;
5270 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5271 env, R_ES);
5272 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5273 env, R_CS);
5274 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5275 env, R_SS);
5276 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5277 env, R_DS);
5279 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5280 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5281 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5283 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5284 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5286 /* other setups */
5287 cpu_x86_set_cpl(env, 0);
5288 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5289 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5291 env->hflags2 &= ~HF2_GIF_MASK;
5292 /* FIXME: Resets the current ASID register to zero (host ASID). */
5294 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5296 /* Clears the TSC_OFFSET inside the processor. */
5298 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5299 from the page table indicated the host's CR3. If the PDPEs contain
5300 illegal state, the processor causes a shutdown. */
5302 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5303 env->cr[0] |= CR0_PE_MASK;
5304 env->eflags &= ~VM_MASK;
5306 /* Disables all breakpoints in the host DR7 register. */
5308 /* Checks the reloaded host state for consistency. */
5310 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5311 host's code segment or non-canonical (in the case of long mode), a
5312 #GP fault is delivered inside the host.) */
5314 /* remove any pending exception */
5315 env->exception_index = -1;
5316 env->error_code = 0;
5317 env->old_exception = -1;
5319 cpu_loop_exit();
5322 #endif
5324 /* MMX/SSE */
5325 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5326 void helper_enter_mmx(void)
5328 env->fpstt = 0;
5329 *(uint32_t *)(env->fptags) = 0;
5330 *(uint32_t *)(env->fptags + 4) = 0;
5333 void helper_emms(void)
5335 /* set to empty state */
5336 *(uint32_t *)(env->fptags) = 0x01010101;
5337 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5340 /* XXX: suppress */
5341 void helper_movq(uint64_t *d, uint64_t *s)
5343 *d = *s;
5346 #define SHIFT 0
5347 #include "ops_sse.h"
5349 #define SHIFT 1
5350 #include "ops_sse.h"
5352 #define SHIFT 0
5353 #include "helper_template.h"
5354 #undef SHIFT
5356 #define SHIFT 1
5357 #include "helper_template.h"
5358 #undef SHIFT
5360 #define SHIFT 2
5361 #include "helper_template.h"
5362 #undef SHIFT
5364 #ifdef TARGET_X86_64
5366 #define SHIFT 3
5367 #include "helper_template.h"
5368 #undef SHIFT
5370 #endif
5372 /* bit operations */
5373 target_ulong helper_bsf(target_ulong t0)
5375 int count;
5376 target_ulong res;
5378 res = t0;
5379 count = 0;
5380 while ((res & 1) == 0) {
5381 count++;
5382 res >>= 1;
5384 return count;
5387 target_ulong helper_bsr(target_ulong t0)
5389 int count;
5390 target_ulong res, mask;
5392 res = t0;
5393 count = TARGET_LONG_BITS - 1;
5394 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5395 while ((res & mask) == 0) {
5396 count--;
5397 res <<= 1;
5399 return count;
5403 static int compute_all_eflags(void)
5405 return CC_SRC;
5408 static int compute_c_eflags(void)
5410 return CC_SRC & CC_C;
5413 CCTable cc_table[CC_OP_NB] = {
5414 [CC_OP_DYNAMIC] = { /* should never happen */ },
5416 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5418 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5419 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5420 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5422 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5423 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5424 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5426 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5427 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5428 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5430 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5431 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5432 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5434 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5435 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5436 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5438 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5439 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5440 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5442 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5443 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5444 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5446 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5447 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5448 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5450 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5451 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5452 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5454 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5455 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5456 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5458 #ifdef TARGET_X86_64
5459 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5461 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5463 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5465 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5467 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5469 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5471 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5473 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5475 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5477 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5478 #endif