Do not use load_seg_vm to load CS in real mode iret handling
[qemu/mini2440.git] / target-i386 / op_helper.c
blobe9a6942440236d95f7e8b87002fafd3fca53c380
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define CPU_NO_GLOBAL_REGS
21 #include "exec.h"
22 #include "host-utils.h"
24 //#define DEBUG_PCALL
26 #if 0
27 #define raise_exception_err(a, b)\
28 do {\
29 if (logfile)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
32 } while (0)
33 #endif
35 const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 /* modulo 17 table */
71 const uint8_t rclw_table[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
78 /* modulo 9 table */
79 const uint8_t rclb_table[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
86 const CPU86_LDouble f15rk[7] =
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
97 /* broken thread support */
99 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
101 void helper_lock(void)
103 spin_lock(&global_cpu_lock);
106 void helper_unlock(void)
108 spin_unlock(&global_cpu_lock);
111 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
113 load_eflags(t0, update_mask);
116 target_ulong helper_read_eflags(void)
118 uint32_t eflags;
119 eflags = cc_table[CC_OP].compute_all();
120 eflags |= (DF & DF_MASK);
121 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122 return eflags;
125 /* return non zero if error */
126 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127 int selector)
129 SegmentCache *dt;
130 int index;
131 target_ulong ptr;
133 if (selector & 0x4)
134 dt = &env->ldt;
135 else
136 dt = &env->gdt;
137 index = selector & ~7;
138 if ((index + 7) > dt->limit)
139 return -1;
140 ptr = dt->base + index;
141 *e1_ptr = ldl_kernel(ptr);
142 *e2_ptr = ldl_kernel(ptr + 4);
143 return 0;
146 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
148 unsigned int limit;
149 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150 if (e2 & DESC_G_MASK)
151 limit = (limit << 12) | 0xfff;
152 return limit;
155 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
157 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
160 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
162 sc->base = get_seg_base(e1, e2);
163 sc->limit = get_seg_limit(e1, e2);
164 sc->flags = e2;
167 /* init the segment cache in vm86 mode. */
168 static inline void load_seg_vm(int seg, int selector)
170 selector &= 0xffff;
171 cpu_x86_load_seg_cache(env, seg, selector,
172 (selector << 4), 0xffff, 0);
175 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176 uint32_t *esp_ptr, int dpl)
178 int type, index, shift;
180 #if 0
182 int i;
183 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184 for(i=0;i<env->tr.limit;i++) {
185 printf("%02x ", env->tr.base[i]);
186 if ((i & 7) == 7) printf("\n");
188 printf("\n");
190 #endif
192 if (!(env->tr.flags & DESC_P_MASK))
193 cpu_abort(env, "invalid tss");
194 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195 if ((type & 7) != 1)
196 cpu_abort(env, "invalid tss type");
197 shift = type >> 3;
198 index = (dpl * 4 + 2) << shift;
199 if (index + (4 << shift) - 1 > env->tr.limit)
200 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201 if (shift == 0) {
202 *esp_ptr = lduw_kernel(env->tr.base + index);
203 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204 } else {
205 *esp_ptr = ldl_kernel(env->tr.base + index);
206 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
210 /* XXX: merge with load_seg() */
211 static void tss_load_seg(int seg_reg, int selector)
213 uint32_t e1, e2;
214 int rpl, dpl, cpl;
216 if ((selector & 0xfffc) != 0) {
217 if (load_segment(&e1, &e2, selector) != 0)
218 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 if (!(e2 & DESC_S_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 rpl = selector & 3;
222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223 cpl = env->hflags & HF_CPL_MASK;
224 if (seg_reg == R_CS) {
225 if (!(e2 & DESC_CS_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* XXX: is it correct ? */
228 if (dpl != rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if ((e2 & DESC_C_MASK) && dpl > rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 } else if (seg_reg == R_SS) {
233 /* SS must be writable data */
234 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (dpl != cpl || dpl != rpl)
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 } else {
239 /* not readable code */
240 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244 if (dpl < cpl || dpl < rpl)
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248 if (!(e2 & DESC_P_MASK))
249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250 cpu_x86_load_seg_cache(env, seg_reg, selector,
251 get_seg_base(e1, e2),
252 get_seg_limit(e1, e2),
253 e2);
254 } else {
255 if (seg_reg == R_SS || seg_reg == R_CS)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260 #define SWITCH_TSS_JMP 0
261 #define SWITCH_TSS_IRET 1
262 #define SWITCH_TSS_CALL 2
264 /* XXX: restore CPU state in registers (PowerPC case) */
265 static void switch_tss(int tss_selector,
266 uint32_t e1, uint32_t e2, int source,
267 uint32_t next_eip)
269 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270 target_ulong tss_base;
271 uint32_t new_regs[8], new_segs[6];
272 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273 uint32_t old_eflags, eflags_mask;
274 SegmentCache *dt;
275 int index;
276 target_ulong ptr;
278 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279 #ifdef DEBUG_PCALL
280 if (loglevel & CPU_LOG_PCALL)
281 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282 #endif
284 /* if task gate, we read the TSS segment and we load it */
285 if (type == 5) {
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288 tss_selector = e1 >> 16;
289 if (tss_selector & 4)
290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291 if (load_segment(&e1, &e2, tss_selector) != 0)
292 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293 if (e2 & DESC_S_MASK)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296 if ((type & 7) != 1)
297 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300 if (!(e2 & DESC_P_MASK))
301 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
303 if (type & 8)
304 tss_limit_max = 103;
305 else
306 tss_limit_max = 43;
307 tss_limit = get_seg_limit(e1, e2);
308 tss_base = get_seg_base(e1, e2);
309 if ((tss_selector & 4) != 0 ||
310 tss_limit < tss_limit_max)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if (old_type & 8)
314 old_tss_limit_max = 103;
315 else
316 old_tss_limit_max = 43;
318 /* read all the registers from the new TSS */
319 if (type & 8) {
320 /* 32 bit */
321 new_cr3 = ldl_kernel(tss_base + 0x1c);
322 new_eip = ldl_kernel(tss_base + 0x20);
323 new_eflags = ldl_kernel(tss_base + 0x24);
324 for(i = 0; i < 8; i++)
325 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326 for(i = 0; i < 6; i++)
327 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328 new_ldt = lduw_kernel(tss_base + 0x60);
329 new_trap = ldl_kernel(tss_base + 0x64);
330 } else {
331 /* 16 bit */
332 new_cr3 = 0;
333 new_eip = lduw_kernel(tss_base + 0x0e);
334 new_eflags = lduw_kernel(tss_base + 0x10);
335 for(i = 0; i < 8; i++)
336 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337 for(i = 0; i < 4; i++)
338 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339 new_ldt = lduw_kernel(tss_base + 0x2a);
340 new_segs[R_FS] = 0;
341 new_segs[R_GS] = 0;
342 new_trap = 0;
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
350 v1 = ldub_kernel(env->tr.base);
351 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352 stb_kernel(env->tr.base, v1);
353 stb_kernel(env->tr.base + old_tss_limit_max, v2);
355 /* clear busy bit (it is restartable) */
356 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357 target_ulong ptr;
358 uint32_t e2;
359 ptr = env->gdt.base + (env->tr.selector & ~7);
360 e2 = ldl_kernel(ptr + 4);
361 e2 &= ~DESC_TSS_BUSY_MASK;
362 stl_kernel(ptr + 4, e2);
364 old_eflags = compute_eflags();
365 if (source == SWITCH_TSS_IRET)
366 old_eflags &= ~NT_MASK;
368 /* save the current state in the old TSS */
369 if (type & 8) {
370 /* 32 bit */
371 stl_kernel(env->tr.base + 0x20, next_eip);
372 stl_kernel(env->tr.base + 0x24, old_eflags);
373 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381 for(i = 0; i < 6; i++)
382 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383 } else {
384 /* 16 bit */
385 stw_kernel(env->tr.base + 0x0e, next_eip);
386 stw_kernel(env->tr.base + 0x10, old_eflags);
387 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395 for(i = 0; i < 4; i++)
396 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
399 /* now if an exception occurs, it will occurs in the next task
400 context */
402 if (source == SWITCH_TSS_CALL) {
403 stw_kernel(tss_base, env->tr.selector);
404 new_eflags |= NT_MASK;
407 /* set busy bit */
408 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409 target_ulong ptr;
410 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = ldl_kernel(ptr + 4);
413 e2 |= DESC_TSS_BUSY_MASK;
414 stl_kernel(ptr + 4, e2);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8))
436 eflags_mask &= 0xffff;
437 load_eflags(new_eflags, eflags_mask);
438 /* XXX: what to do in 16 bit case ? */
439 EAX = new_regs[0];
440 ECX = new_regs[1];
441 EDX = new_regs[2];
442 EBX = new_regs[3];
443 ESP = new_regs[4];
444 EBP = new_regs[5];
445 ESI = new_regs[6];
446 EDI = new_regs[7];
447 if (new_eflags & VM_MASK) {
448 for(i = 0; i < 6; i++)
449 load_seg_vm(i, new_segs[i]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env, 3);
452 } else {
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i = 0; i < 6; i++)
457 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
460 env->ldt.selector = new_ldt & ~4;
461 env->ldt.base = 0;
462 env->ldt.limit = 0;
463 env->ldt.flags = 0;
465 /* load the LDT */
466 if (new_ldt & 4)
467 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 ptr = dt->base + index;
475 e1 = ldl_kernel(ptr);
476 e2 = ldl_kernel(ptr + 4);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479 if (!(e2 & DESC_P_MASK))
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 tss_load_seg(R_CS, new_segs[R_CS]);
487 tss_load_seg(R_SS, new_segs[R_SS]);
488 tss_load_seg(R_ES, new_segs[R_ES]);
489 tss_load_seg(R_DS, new_segs[R_DS]);
490 tss_load_seg(R_FS, new_segs[R_FS]);
491 tss_load_seg(R_GS, new_segs[R_GS]);
494 /* check that EIP is in the CS segment limits */
495 if (new_eip > env->segs[R_CS].limit) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF, 0);
501 /* check if Port I/O is allowed in TSS */
502 static inline void check_io(int addr, int size)
504 int io_offset, val, mask;
506 /* TSS must be a valid 32 bit one */
507 if (!(env->tr.flags & DESC_P_MASK) ||
508 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509 env->tr.limit < 103)
510 goto fail;
511 io_offset = lduw_kernel(env->tr.base + 0x66);
512 io_offset += (addr >> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset + 1) > env->tr.limit)
515 goto fail;
516 val = lduw_kernel(env->tr.base + io_offset);
517 val >>= (addr & 7);
518 mask = (1 << size) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val & mask) != 0) {
521 fail:
522 raise_exception_err(EXCP0D_GPF, 0);
526 void helper_check_iob(uint32_t t0)
528 check_io(t0, 1);
531 void helper_check_iow(uint32_t t0)
533 check_io(t0, 2);
536 void helper_check_iol(uint32_t t0)
538 check_io(t0, 4);
541 void helper_outb(uint32_t port, uint32_t data)
543 cpu_outb(env, port, data & 0xff);
546 target_ulong helper_inb(uint32_t port)
548 return cpu_inb(env, port);
551 void helper_outw(uint32_t port, uint32_t data)
553 cpu_outw(env, port, data & 0xffff);
556 target_ulong helper_inw(uint32_t port)
558 return cpu_inw(env, port);
561 void helper_outl(uint32_t port, uint32_t data)
563 cpu_outl(env, port, data);
566 target_ulong helper_inl(uint32_t port)
568 return cpu_inl(env, port);
571 static inline unsigned int get_sp_mask(unsigned int e2)
573 if (e2 & DESC_B_MASK)
574 return 0xffffffff;
575 else
576 return 0xffff;
579 #ifdef TARGET_X86_64
580 #define SET_ESP(val, sp_mask)\
581 do {\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
586 else\
587 ESP = (val);\
588 } while (0)
589 #else
590 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591 #endif
593 /* in 64-bit machines, this can overflow. So this segment addition macro
594 * can be used to trim the value to 32-bit whenever needed */
595 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
597 /* XXX: add a is_user flag to have proper security support */
598 #define PUSHW(ssp, sp, sp_mask, val)\
600 sp -= 2;\
601 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
604 #define PUSHL(ssp, sp, sp_mask, val)\
606 sp -= 4;\
607 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
610 #define POPW(ssp, sp, sp_mask, val)\
612 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
613 sp += 2;\
616 #define POPL(ssp, sp, sp_mask, val)\
618 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
619 sp += 4;\
622 /* protected mode interrupt */
623 static void do_interrupt_protected(int intno, int is_int, int error_code,
624 unsigned int next_eip, int is_hw)
626 SegmentCache *dt;
627 target_ulong ptr, ssp;
628 int type, dpl, selector, ss_dpl, cpl;
629 int has_error_code, new_stack, shift;
630 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
631 uint32_t old_eip, sp_mask;
633 has_error_code = 0;
634 if (!is_int && !is_hw) {
635 switch(intno) {
636 case 8:
637 case 10:
638 case 11:
639 case 12:
640 case 13:
641 case 14:
642 case 17:
643 has_error_code = 1;
644 break;
647 if (is_int)
648 old_eip = next_eip;
649 else
650 old_eip = env->eip;
652 dt = &env->idt;
653 if (intno * 8 + 7 > dt->limit)
654 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655 ptr = dt->base + intno * 8;
656 e1 = ldl_kernel(ptr);
657 e2 = ldl_kernel(ptr + 4);
658 /* check gate type */
659 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
660 switch(type) {
661 case 5: /* task gate */
662 /* must do that check here to return the correct error code */
663 if (!(e2 & DESC_P_MASK))
664 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
665 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
666 if (has_error_code) {
667 int type;
668 uint32_t mask;
669 /* push the error code */
670 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
671 shift = type >> 3;
672 if (env->segs[R_SS].flags & DESC_B_MASK)
673 mask = 0xffffffff;
674 else
675 mask = 0xffff;
676 esp = (ESP - (2 << shift)) & mask;
677 ssp = env->segs[R_SS].base + esp;
678 if (shift)
679 stl_kernel(ssp, error_code);
680 else
681 stw_kernel(ssp, error_code);
682 SET_ESP(esp, mask);
684 return;
685 case 6: /* 286 interrupt gate */
686 case 7: /* 286 trap gate */
687 case 14: /* 386 interrupt gate */
688 case 15: /* 386 trap gate */
689 break;
690 default:
691 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692 break;
694 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
695 cpl = env->hflags & HF_CPL_MASK;
696 /* check privilege if software int */
697 if (is_int && dpl < cpl)
698 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699 /* check valid bit */
700 if (!(e2 & DESC_P_MASK))
701 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
702 selector = e1 >> 16;
703 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
704 if ((selector & 0xfffc) == 0)
705 raise_exception_err(EXCP0D_GPF, 0);
707 if (load_segment(&e1, &e2, selector) != 0)
708 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
709 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
710 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
711 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
712 if (dpl > cpl)
713 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
714 if (!(e2 & DESC_P_MASK))
715 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
716 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
717 /* to inner privilege */
718 get_ss_esp_from_tss(&ss, &esp, dpl);
719 if ((ss & 0xfffc) == 0)
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 if ((ss & 3) != dpl)
722 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
723 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
724 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
726 if (ss_dpl != dpl)
727 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 if (!(ss_e2 & DESC_S_MASK) ||
729 (ss_e2 & DESC_CS_MASK) ||
730 !(ss_e2 & DESC_W_MASK))
731 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732 if (!(ss_e2 & DESC_P_MASK))
733 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
734 new_stack = 1;
735 sp_mask = get_sp_mask(ss_e2);
736 ssp = get_seg_base(ss_e1, ss_e2);
737 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
738 /* to same privilege */
739 if (env->eflags & VM_MASK)
740 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741 new_stack = 0;
742 sp_mask = get_sp_mask(env->segs[R_SS].flags);
743 ssp = env->segs[R_SS].base;
744 esp = ESP;
745 dpl = cpl;
746 } else {
747 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
748 new_stack = 0; /* avoid warning */
749 sp_mask = 0; /* avoid warning */
750 ssp = 0; /* avoid warning */
751 esp = 0; /* avoid warning */
754 shift = type >> 3;
756 #if 0
757 /* XXX: check that enough room is available */
758 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
759 if (env->eflags & VM_MASK)
760 push_size += 8;
761 push_size <<= shift;
762 #endif
763 if (shift == 1) {
764 if (new_stack) {
765 if (env->eflags & VM_MASK) {
766 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
767 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
768 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
769 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
771 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
772 PUSHL(ssp, esp, sp_mask, ESP);
774 PUSHL(ssp, esp, sp_mask, compute_eflags());
775 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
776 PUSHL(ssp, esp, sp_mask, old_eip);
777 if (has_error_code) {
778 PUSHL(ssp, esp, sp_mask, error_code);
780 } else {
781 if (new_stack) {
782 if (env->eflags & VM_MASK) {
783 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
784 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
785 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
786 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
788 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
789 PUSHW(ssp, esp, sp_mask, ESP);
791 PUSHW(ssp, esp, sp_mask, compute_eflags());
792 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
793 PUSHW(ssp, esp, sp_mask, old_eip);
794 if (has_error_code) {
795 PUSHW(ssp, esp, sp_mask, error_code);
799 if (new_stack) {
800 if (env->eflags & VM_MASK) {
801 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
802 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
803 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
804 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
806 ss = (ss & ~3) | dpl;
807 cpu_x86_load_seg_cache(env, R_SS, ss,
808 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
810 SET_ESP(esp, sp_mask);
812 selector = (selector & ~3) | dpl;
813 cpu_x86_load_seg_cache(env, R_CS, selector,
814 get_seg_base(e1, e2),
815 get_seg_limit(e1, e2),
816 e2);
817 cpu_x86_set_cpl(env, dpl);
818 env->eip = offset;
820 /* interrupt gate clear IF mask */
821 if ((type & 1) == 0) {
822 env->eflags &= ~IF_MASK;
824 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
827 #ifdef TARGET_X86_64
829 #define PUSHQ(sp, val)\
831 sp -= 8;\
832 stq_kernel(sp, (val));\
835 #define POPQ(sp, val)\
837 val = ldq_kernel(sp);\
838 sp += 8;\
841 static inline target_ulong get_rsp_from_tss(int level)
843 int index;
845 #if 0
846 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
847 env->tr.base, env->tr.limit);
848 #endif
850 if (!(env->tr.flags & DESC_P_MASK))
851 cpu_abort(env, "invalid tss");
852 index = 8 * level + 4;
853 if ((index + 7) > env->tr.limit)
854 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
855 return ldq_kernel(env->tr.base + index);
858 /* 64 bit interrupt */
859 static void do_interrupt64(int intno, int is_int, int error_code,
860 target_ulong next_eip, int is_hw)
862 SegmentCache *dt;
863 target_ulong ptr;
864 int type, dpl, selector, cpl, ist;
865 int has_error_code, new_stack;
866 uint32_t e1, e2, e3, ss;
867 target_ulong old_eip, esp, offset;
869 has_error_code = 0;
870 if (!is_int && !is_hw) {
871 switch(intno) {
872 case 8:
873 case 10:
874 case 11:
875 case 12:
876 case 13:
877 case 14:
878 case 17:
879 has_error_code = 1;
880 break;
883 if (is_int)
884 old_eip = next_eip;
885 else
886 old_eip = env->eip;
888 dt = &env->idt;
889 if (intno * 16 + 15 > dt->limit)
890 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
891 ptr = dt->base + intno * 16;
892 e1 = ldl_kernel(ptr);
893 e2 = ldl_kernel(ptr + 4);
894 e3 = ldl_kernel(ptr + 8);
895 /* check gate type */
896 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
897 switch(type) {
898 case 14: /* 386 interrupt gate */
899 case 15: /* 386 trap gate */
900 break;
901 default:
902 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903 break;
905 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
906 cpl = env->hflags & HF_CPL_MASK;
907 /* check privilege if software int */
908 if (is_int && dpl < cpl)
909 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910 /* check valid bit */
911 if (!(e2 & DESC_P_MASK))
912 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
913 selector = e1 >> 16;
914 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
915 ist = e2 & 7;
916 if ((selector & 0xfffc) == 0)
917 raise_exception_err(EXCP0D_GPF, 0);
919 if (load_segment(&e1, &e2, selector) != 0)
920 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
922 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
923 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
924 if (dpl > cpl)
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 if (!(e2 & DESC_P_MASK))
927 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
928 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
931 /* to inner privilege */
932 if (ist != 0)
933 esp = get_rsp_from_tss(ist + 3);
934 else
935 esp = get_rsp_from_tss(dpl);
936 esp &= ~0xfLL; /* align stack */
937 ss = 0;
938 new_stack = 1;
939 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
940 /* to same privilege */
941 if (env->eflags & VM_MASK)
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 new_stack = 0;
944 if (ist != 0)
945 esp = get_rsp_from_tss(ist + 3);
946 else
947 esp = ESP;
948 esp &= ~0xfLL; /* align stack */
949 dpl = cpl;
950 } else {
951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952 new_stack = 0; /* avoid warning */
953 esp = 0; /* avoid warning */
956 PUSHQ(esp, env->segs[R_SS].selector);
957 PUSHQ(esp, ESP);
958 PUSHQ(esp, compute_eflags());
959 PUSHQ(esp, env->segs[R_CS].selector);
960 PUSHQ(esp, old_eip);
961 if (has_error_code) {
962 PUSHQ(esp, error_code);
965 if (new_stack) {
966 ss = 0 | dpl;
967 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
969 ESP = esp;
971 selector = (selector & ~3) | dpl;
972 cpu_x86_load_seg_cache(env, R_CS, selector,
973 get_seg_base(e1, e2),
974 get_seg_limit(e1, e2),
975 e2);
976 cpu_x86_set_cpl(env, dpl);
977 env->eip = offset;
979 /* interrupt gate clear IF mask */
980 if ((type & 1) == 0) {
981 env->eflags &= ~IF_MASK;
983 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
985 #endif
987 #if defined(CONFIG_USER_ONLY)
988 void helper_syscall(int next_eip_addend)
990 env->exception_index = EXCP_SYSCALL;
991 env->exception_next_eip = env->eip + next_eip_addend;
992 cpu_loop_exit();
994 #else
995 void helper_syscall(int next_eip_addend)
997 int selector;
999 if (!(env->efer & MSR_EFER_SCE)) {
1000 raise_exception_err(EXCP06_ILLOP, 0);
1002 selector = (env->star >> 32) & 0xffff;
1003 #ifdef TARGET_X86_64
1004 if (env->hflags & HF_LMA_MASK) {
1005 int code64;
1007 ECX = env->eip + next_eip_addend;
1008 env->regs[11] = compute_eflags();
1010 code64 = env->hflags & HF_CS64_MASK;
1012 cpu_x86_set_cpl(env, 0);
1013 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1014 0, 0xffffffff,
1015 DESC_G_MASK | DESC_P_MASK |
1016 DESC_S_MASK |
1017 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1018 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1019 0, 0xffffffff,
1020 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021 DESC_S_MASK |
1022 DESC_W_MASK | DESC_A_MASK);
1023 env->eflags &= ~env->fmask;
1024 load_eflags(env->eflags, 0);
1025 if (code64)
1026 env->eip = env->lstar;
1027 else
1028 env->eip = env->cstar;
1029 } else
1030 #endif
1032 ECX = (uint32_t)(env->eip + next_eip_addend);
1034 cpu_x86_set_cpl(env, 0);
1035 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1036 0, 0xffffffff,
1037 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038 DESC_S_MASK |
1039 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1040 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1041 0, 0xffffffff,
1042 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1043 DESC_S_MASK |
1044 DESC_W_MASK | DESC_A_MASK);
1045 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1046 env->eip = (uint32_t)env->star;
1049 #endif
1051 void helper_sysret(int dflag)
1053 int cpl, selector;
1055 if (!(env->efer & MSR_EFER_SCE)) {
1056 raise_exception_err(EXCP06_ILLOP, 0);
1058 cpl = env->hflags & HF_CPL_MASK;
1059 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1060 raise_exception_err(EXCP0D_GPF, 0);
1062 selector = (env->star >> 48) & 0xffff;
1063 #ifdef TARGET_X86_64
1064 if (env->hflags & HF_LMA_MASK) {
1065 if (dflag == 2) {
1066 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1067 0, 0xffffffff,
1068 DESC_G_MASK | DESC_P_MASK |
1069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1071 DESC_L_MASK);
1072 env->eip = ECX;
1073 } else {
1074 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1075 0, 0xffffffff,
1076 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1079 env->eip = (uint32_t)ECX;
1081 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1082 0, 0xffffffff,
1083 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1084 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085 DESC_W_MASK | DESC_A_MASK);
1086 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1087 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1088 cpu_x86_set_cpl(env, 3);
1089 } else
1090 #endif
1092 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093 0, 0xffffffff,
1094 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097 env->eip = (uint32_t)ECX;
1098 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1099 0, 0xffffffff,
1100 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102 DESC_W_MASK | DESC_A_MASK);
1103 env->eflags |= IF_MASK;
1104 cpu_x86_set_cpl(env, 3);
1106 #ifdef USE_KQEMU
1107 if (kqemu_is_ok(env)) {
1108 if (env->hflags & HF_LMA_MASK)
1109 CC_OP = CC_OP_EFLAGS;
1110 env->exception_index = -1;
1111 cpu_loop_exit();
1113 #endif
1116 /* real mode interrupt */
1117 static void do_interrupt_real(int intno, int is_int, int error_code,
1118 unsigned int next_eip)
1120 SegmentCache *dt;
1121 target_ulong ptr, ssp;
1122 int selector;
1123 uint32_t offset, esp;
1124 uint32_t old_cs, old_eip;
1126 /* real mode (simpler !) */
1127 dt = &env->idt;
1128 if (intno * 4 + 3 > dt->limit)
1129 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130 ptr = dt->base + intno * 4;
1131 offset = lduw_kernel(ptr);
1132 selector = lduw_kernel(ptr + 2);
1133 esp = ESP;
1134 ssp = env->segs[R_SS].base;
1135 if (is_int)
1136 old_eip = next_eip;
1137 else
1138 old_eip = env->eip;
1139 old_cs = env->segs[R_CS].selector;
1140 /* XXX: use SS segment size ? */
1141 PUSHW(ssp, esp, 0xffff, compute_eflags());
1142 PUSHW(ssp, esp, 0xffff, old_cs);
1143 PUSHW(ssp, esp, 0xffff, old_eip);
1145 /* update processor state */
1146 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147 env->eip = offset;
1148 env->segs[R_CS].selector = selector;
1149 env->segs[R_CS].base = (selector << 4);
1150 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1153 /* fake user mode interrupt */
1154 void do_interrupt_user(int intno, int is_int, int error_code,
1155 target_ulong next_eip)
1157 SegmentCache *dt;
1158 target_ulong ptr;
1159 int dpl, cpl, shift;
1160 uint32_t e2;
1162 dt = &env->idt;
1163 if (env->hflags & HF_LMA_MASK) {
1164 shift = 4;
1165 } else {
1166 shift = 3;
1168 ptr = dt->base + (intno << shift);
1169 e2 = ldl_kernel(ptr + 4);
1171 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172 cpl = env->hflags & HF_CPL_MASK;
1173 /* check privilege if software int */
1174 if (is_int && dpl < cpl)
1175 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1177 /* Since we emulate only user space, we cannot do more than
1178 exiting the emulation with the suitable exception and error
1179 code */
1180 if (is_int)
1181 EIP = next_eip;
1185 * Begin execution of an interruption. is_int is TRUE if coming from
1186 * the int instruction. next_eip is the EIP value AFTER the interrupt
1187 * instruction. It is only relevant if is_int is TRUE.
1189 void do_interrupt(int intno, int is_int, int error_code,
1190 target_ulong next_eip, int is_hw)
1192 if (loglevel & CPU_LOG_INT) {
1193 if ((env->cr[0] & CR0_PE_MASK)) {
1194 static int count;
1195 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1196 count, intno, error_code, is_int,
1197 env->hflags & HF_CPL_MASK,
1198 env->segs[R_CS].selector, EIP,
1199 (int)env->segs[R_CS].base + EIP,
1200 env->segs[R_SS].selector, ESP);
1201 if (intno == 0x0e) {
1202 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1203 } else {
1204 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1206 fprintf(logfile, "\n");
1207 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1208 #if 0
1210 int i;
1211 uint8_t *ptr;
1212 fprintf(logfile, " code=");
1213 ptr = env->segs[R_CS].base + env->eip;
1214 for(i = 0; i < 16; i++) {
1215 fprintf(logfile, " %02x", ldub(ptr + i));
1217 fprintf(logfile, "\n");
1219 #endif
1220 count++;
1223 if (env->cr[0] & CR0_PE_MASK) {
1224 #ifdef TARGET_X86_64
1225 if (env->hflags & HF_LMA_MASK) {
1226 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1227 } else
1228 #endif
1230 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1232 } else {
1233 do_interrupt_real(intno, is_int, error_code, next_eip);
1238 * Check nested exceptions and change to double or triple fault if
1239 * needed. It should only be called, if this is not an interrupt.
1240 * Returns the new exception number.
1242 static int check_exception(int intno, int *error_code)
1244 int first_contributory = env->old_exception == 0 ||
1245 (env->old_exception >= 10 &&
1246 env->old_exception <= 13);
1247 int second_contributory = intno == 0 ||
1248 (intno >= 10 && intno <= 13);
1250 if (loglevel & CPU_LOG_INT)
1251 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1252 env->old_exception, intno);
1254 if (env->old_exception == EXCP08_DBLE)
1255 cpu_abort(env, "triple fault");
1257 if ((first_contributory && second_contributory)
1258 || (env->old_exception == EXCP0E_PAGE &&
1259 (second_contributory || (intno == EXCP0E_PAGE)))) {
1260 intno = EXCP08_DBLE;
1261 *error_code = 0;
1264 if (second_contributory || (intno == EXCP0E_PAGE) ||
1265 (intno == EXCP08_DBLE))
1266 env->old_exception = intno;
1268 return intno;
1272 * Signal an interruption. It is executed in the main CPU loop.
1273 * is_int is TRUE if coming from the int instruction. next_eip is the
1274 * EIP value AFTER the interrupt instruction. It is only relevant if
1275 * is_int is TRUE.
1277 void raise_interrupt(int intno, int is_int, int error_code,
1278 int next_eip_addend)
1280 if (!is_int) {
1281 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1282 intno = check_exception(intno, &error_code);
1283 } else {
1284 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1287 env->exception_index = intno;
1288 env->error_code = error_code;
1289 env->exception_is_int = is_int;
1290 env->exception_next_eip = env->eip + next_eip_addend;
1291 cpu_loop_exit();
1294 /* shortcuts to generate exceptions */
1296 void (raise_exception_err)(int exception_index, int error_code)
1298 raise_interrupt(exception_index, 0, error_code, 0);
1301 void raise_exception(int exception_index)
1303 raise_interrupt(exception_index, 0, 0, 0);
1306 /* SMM support */
1308 #if defined(CONFIG_USER_ONLY)
1310 void do_smm_enter(void)
1314 void helper_rsm(void)
1318 #else
1320 #ifdef TARGET_X86_64
1321 #define SMM_REVISION_ID 0x00020064
1322 #else
1323 #define SMM_REVISION_ID 0x00020000
1324 #endif
1326 void do_smm_enter(void)
1328 target_ulong sm_state;
1329 SegmentCache *dt;
1330 int i, offset;
1332 if (loglevel & CPU_LOG_INT) {
1333 fprintf(logfile, "SMM: enter\n");
1334 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1337 env->hflags |= HF_SMM_MASK;
1338 cpu_smm_update(env);
1340 sm_state = env->smbase + 0x8000;
1342 #ifdef TARGET_X86_64
1343 for(i = 0; i < 6; i++) {
1344 dt = &env->segs[i];
1345 offset = 0x7e00 + i * 16;
1346 stw_phys(sm_state + offset, dt->selector);
1347 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1348 stl_phys(sm_state + offset + 4, dt->limit);
1349 stq_phys(sm_state + offset + 8, dt->base);
1352 stq_phys(sm_state + 0x7e68, env->gdt.base);
1353 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1355 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1356 stq_phys(sm_state + 0x7e78, env->ldt.base);
1357 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1358 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1360 stq_phys(sm_state + 0x7e88, env->idt.base);
1361 stl_phys(sm_state + 0x7e84, env->idt.limit);
1363 stw_phys(sm_state + 0x7e90, env->tr.selector);
1364 stq_phys(sm_state + 0x7e98, env->tr.base);
1365 stl_phys(sm_state + 0x7e94, env->tr.limit);
1366 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1368 stq_phys(sm_state + 0x7ed0, env->efer);
1370 stq_phys(sm_state + 0x7ff8, EAX);
1371 stq_phys(sm_state + 0x7ff0, ECX);
1372 stq_phys(sm_state + 0x7fe8, EDX);
1373 stq_phys(sm_state + 0x7fe0, EBX);
1374 stq_phys(sm_state + 0x7fd8, ESP);
1375 stq_phys(sm_state + 0x7fd0, EBP);
1376 stq_phys(sm_state + 0x7fc8, ESI);
1377 stq_phys(sm_state + 0x7fc0, EDI);
1378 for(i = 8; i < 16; i++)
1379 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1380 stq_phys(sm_state + 0x7f78, env->eip);
1381 stl_phys(sm_state + 0x7f70, compute_eflags());
1382 stl_phys(sm_state + 0x7f68, env->dr[6]);
1383 stl_phys(sm_state + 0x7f60, env->dr[7]);
1385 stl_phys(sm_state + 0x7f48, env->cr[4]);
1386 stl_phys(sm_state + 0x7f50, env->cr[3]);
1387 stl_phys(sm_state + 0x7f58, env->cr[0]);
1389 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1390 stl_phys(sm_state + 0x7f00, env->smbase);
1391 #else
1392 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1393 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1394 stl_phys(sm_state + 0x7ff4, compute_eflags());
1395 stl_phys(sm_state + 0x7ff0, env->eip);
1396 stl_phys(sm_state + 0x7fec, EDI);
1397 stl_phys(sm_state + 0x7fe8, ESI);
1398 stl_phys(sm_state + 0x7fe4, EBP);
1399 stl_phys(sm_state + 0x7fe0, ESP);
1400 stl_phys(sm_state + 0x7fdc, EBX);
1401 stl_phys(sm_state + 0x7fd8, EDX);
1402 stl_phys(sm_state + 0x7fd4, ECX);
1403 stl_phys(sm_state + 0x7fd0, EAX);
1404 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1405 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1407 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1408 stl_phys(sm_state + 0x7f64, env->tr.base);
1409 stl_phys(sm_state + 0x7f60, env->tr.limit);
1410 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1412 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1413 stl_phys(sm_state + 0x7f80, env->ldt.base);
1414 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1415 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1417 stl_phys(sm_state + 0x7f74, env->gdt.base);
1418 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1420 stl_phys(sm_state + 0x7f58, env->idt.base);
1421 stl_phys(sm_state + 0x7f54, env->idt.limit);
1423 for(i = 0; i < 6; i++) {
1424 dt = &env->segs[i];
1425 if (i < 3)
1426 offset = 0x7f84 + i * 12;
1427 else
1428 offset = 0x7f2c + (i - 3) * 12;
1429 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1430 stl_phys(sm_state + offset + 8, dt->base);
1431 stl_phys(sm_state + offset + 4, dt->limit);
1432 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1434 stl_phys(sm_state + 0x7f14, env->cr[4]);
1436 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1437 stl_phys(sm_state + 0x7ef8, env->smbase);
1438 #endif
1439 /* init SMM cpu state */
1441 #ifdef TARGET_X86_64
1442 cpu_load_efer(env, 0);
1443 #endif
1444 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1445 env->eip = 0x00008000;
1446 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1447 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1449 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1450 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1451 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1452 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1454 cpu_x86_update_cr0(env,
1455 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1456 cpu_x86_update_cr4(env, 0);
1457 env->dr[7] = 0x00000400;
1458 CC_OP = CC_OP_EFLAGS;
1461 void helper_rsm(void)
1463 target_ulong sm_state;
1464 int i, offset;
1465 uint32_t val;
1467 sm_state = env->smbase + 0x8000;
1468 #ifdef TARGET_X86_64
1469 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1471 for(i = 0; i < 6; i++) {
1472 offset = 0x7e00 + i * 16;
1473 cpu_x86_load_seg_cache(env, i,
1474 lduw_phys(sm_state + offset),
1475 ldq_phys(sm_state + offset + 8),
1476 ldl_phys(sm_state + offset + 4),
1477 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1480 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1481 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1483 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1484 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1485 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1486 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1488 env->idt.base = ldq_phys(sm_state + 0x7e88);
1489 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1491 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1492 env->tr.base = ldq_phys(sm_state + 0x7e98);
1493 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1494 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1496 EAX = ldq_phys(sm_state + 0x7ff8);
1497 ECX = ldq_phys(sm_state + 0x7ff0);
1498 EDX = ldq_phys(sm_state + 0x7fe8);
1499 EBX = ldq_phys(sm_state + 0x7fe0);
1500 ESP = ldq_phys(sm_state + 0x7fd8);
1501 EBP = ldq_phys(sm_state + 0x7fd0);
1502 ESI = ldq_phys(sm_state + 0x7fc8);
1503 EDI = ldq_phys(sm_state + 0x7fc0);
1504 for(i = 8; i < 16; i++)
1505 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1506 env->eip = ldq_phys(sm_state + 0x7f78);
1507 load_eflags(ldl_phys(sm_state + 0x7f70),
1508 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1509 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1510 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1512 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1513 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1514 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1516 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1517 if (val & 0x20000) {
1518 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1520 #else
1521 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1522 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1523 load_eflags(ldl_phys(sm_state + 0x7ff4),
1524 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1525 env->eip = ldl_phys(sm_state + 0x7ff0);
1526 EDI = ldl_phys(sm_state + 0x7fec);
1527 ESI = ldl_phys(sm_state + 0x7fe8);
1528 EBP = ldl_phys(sm_state + 0x7fe4);
1529 ESP = ldl_phys(sm_state + 0x7fe0);
1530 EBX = ldl_phys(sm_state + 0x7fdc);
1531 EDX = ldl_phys(sm_state + 0x7fd8);
1532 ECX = ldl_phys(sm_state + 0x7fd4);
1533 EAX = ldl_phys(sm_state + 0x7fd0);
1534 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1535 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1537 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1538 env->tr.base = ldl_phys(sm_state + 0x7f64);
1539 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1540 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1542 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1543 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1544 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1545 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1547 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1548 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1550 env->idt.base = ldl_phys(sm_state + 0x7f58);
1551 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1553 for(i = 0; i < 6; i++) {
1554 if (i < 3)
1555 offset = 0x7f84 + i * 12;
1556 else
1557 offset = 0x7f2c + (i - 3) * 12;
1558 cpu_x86_load_seg_cache(env, i,
1559 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1560 ldl_phys(sm_state + offset + 8),
1561 ldl_phys(sm_state + offset + 4),
1562 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1564 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1566 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1567 if (val & 0x20000) {
1568 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1570 #endif
1571 CC_OP = CC_OP_EFLAGS;
1572 env->hflags &= ~HF_SMM_MASK;
1573 cpu_smm_update(env);
1575 if (loglevel & CPU_LOG_INT) {
1576 fprintf(logfile, "SMM: after RSM\n");
1577 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1581 #endif /* !CONFIG_USER_ONLY */
1584 /* division, flags are undefined */
1586 void helper_divb_AL(target_ulong t0)
1588 unsigned int num, den, q, r;
1590 num = (EAX & 0xffff);
1591 den = (t0 & 0xff);
1592 if (den == 0) {
1593 raise_exception(EXCP00_DIVZ);
1595 q = (num / den);
1596 if (q > 0xff)
1597 raise_exception(EXCP00_DIVZ);
1598 q &= 0xff;
1599 r = (num % den) & 0xff;
1600 EAX = (EAX & ~0xffff) | (r << 8) | q;
1603 void helper_idivb_AL(target_ulong t0)
1605 int num, den, q, r;
1607 num = (int16_t)EAX;
1608 den = (int8_t)t0;
1609 if (den == 0) {
1610 raise_exception(EXCP00_DIVZ);
1612 q = (num / den);
1613 if (q != (int8_t)q)
1614 raise_exception(EXCP00_DIVZ);
1615 q &= 0xff;
1616 r = (num % den) & 0xff;
1617 EAX = (EAX & ~0xffff) | (r << 8) | q;
1620 void helper_divw_AX(target_ulong t0)
1622 unsigned int num, den, q, r;
1624 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1625 den = (t0 & 0xffff);
1626 if (den == 0) {
1627 raise_exception(EXCP00_DIVZ);
1629 q = (num / den);
1630 if (q > 0xffff)
1631 raise_exception(EXCP00_DIVZ);
1632 q &= 0xffff;
1633 r = (num % den) & 0xffff;
1634 EAX = (EAX & ~0xffff) | q;
1635 EDX = (EDX & ~0xffff) | r;
1638 void helper_idivw_AX(target_ulong t0)
1640 int num, den, q, r;
1642 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1643 den = (int16_t)t0;
1644 if (den == 0) {
1645 raise_exception(EXCP00_DIVZ);
1647 q = (num / den);
1648 if (q != (int16_t)q)
1649 raise_exception(EXCP00_DIVZ);
1650 q &= 0xffff;
1651 r = (num % den) & 0xffff;
1652 EAX = (EAX & ~0xffff) | q;
1653 EDX = (EDX & ~0xffff) | r;
1656 void helper_divl_EAX(target_ulong t0)
1658 unsigned int den, r;
1659 uint64_t num, q;
1661 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1662 den = t0;
1663 if (den == 0) {
1664 raise_exception(EXCP00_DIVZ);
1666 q = (num / den);
1667 r = (num % den);
1668 if (q > 0xffffffff)
1669 raise_exception(EXCP00_DIVZ);
1670 EAX = (uint32_t)q;
1671 EDX = (uint32_t)r;
1674 void helper_idivl_EAX(target_ulong t0)
1676 int den, r;
1677 int64_t num, q;
1679 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1680 den = t0;
1681 if (den == 0) {
1682 raise_exception(EXCP00_DIVZ);
1684 q = (num / den);
1685 r = (num % den);
1686 if (q != (int32_t)q)
1687 raise_exception(EXCP00_DIVZ);
1688 EAX = (uint32_t)q;
1689 EDX = (uint32_t)r;
1692 /* bcd */
1694 /* XXX: exception */
1695 void helper_aam(int base)
1697 int al, ah;
1698 al = EAX & 0xff;
1699 ah = al / base;
1700 al = al % base;
1701 EAX = (EAX & ~0xffff) | al | (ah << 8);
1702 CC_DST = al;
1705 void helper_aad(int base)
1707 int al, ah;
1708 al = EAX & 0xff;
1709 ah = (EAX >> 8) & 0xff;
1710 al = ((ah * base) + al) & 0xff;
1711 EAX = (EAX & ~0xffff) | al;
1712 CC_DST = al;
1715 void helper_aaa(void)
1717 int icarry;
1718 int al, ah, af;
1719 int eflags;
1721 eflags = cc_table[CC_OP].compute_all();
1722 af = eflags & CC_A;
1723 al = EAX & 0xff;
1724 ah = (EAX >> 8) & 0xff;
1726 icarry = (al > 0xf9);
1727 if (((al & 0x0f) > 9 ) || af) {
1728 al = (al + 6) & 0x0f;
1729 ah = (ah + 1 + icarry) & 0xff;
1730 eflags |= CC_C | CC_A;
1731 } else {
1732 eflags &= ~(CC_C | CC_A);
1733 al &= 0x0f;
1735 EAX = (EAX & ~0xffff) | al | (ah << 8);
1736 CC_SRC = eflags;
1737 FORCE_RET();
1740 void helper_aas(void)
1742 int icarry;
1743 int al, ah, af;
1744 int eflags;
1746 eflags = cc_table[CC_OP].compute_all();
1747 af = eflags & CC_A;
1748 al = EAX & 0xff;
1749 ah = (EAX >> 8) & 0xff;
1751 icarry = (al < 6);
1752 if (((al & 0x0f) > 9 ) || af) {
1753 al = (al - 6) & 0x0f;
1754 ah = (ah - 1 - icarry) & 0xff;
1755 eflags |= CC_C | CC_A;
1756 } else {
1757 eflags &= ~(CC_C | CC_A);
1758 al &= 0x0f;
1760 EAX = (EAX & ~0xffff) | al | (ah << 8);
1761 CC_SRC = eflags;
1762 FORCE_RET();
1765 void helper_daa(void)
1767 int al, af, cf;
1768 int eflags;
1770 eflags = cc_table[CC_OP].compute_all();
1771 cf = eflags & CC_C;
1772 af = eflags & CC_A;
1773 al = EAX & 0xff;
1775 eflags = 0;
1776 if (((al & 0x0f) > 9 ) || af) {
1777 al = (al + 6) & 0xff;
1778 eflags |= CC_A;
1780 if ((al > 0x9f) || cf) {
1781 al = (al + 0x60) & 0xff;
1782 eflags |= CC_C;
1784 EAX = (EAX & ~0xff) | al;
1785 /* well, speed is not an issue here, so we compute the flags by hand */
1786 eflags |= (al == 0) << 6; /* zf */
1787 eflags |= parity_table[al]; /* pf */
1788 eflags |= (al & 0x80); /* sf */
1789 CC_SRC = eflags;
1790 FORCE_RET();
1793 void helper_das(void)
1795 int al, al1, af, cf;
1796 int eflags;
1798 eflags = cc_table[CC_OP].compute_all();
1799 cf = eflags & CC_C;
1800 af = eflags & CC_A;
1801 al = EAX & 0xff;
1803 eflags = 0;
1804 al1 = al;
1805 if (((al & 0x0f) > 9 ) || af) {
1806 eflags |= CC_A;
1807 if (al < 6 || cf)
1808 eflags |= CC_C;
1809 al = (al - 6) & 0xff;
1811 if ((al1 > 0x99) || cf) {
1812 al = (al - 0x60) & 0xff;
1813 eflags |= CC_C;
1815 EAX = (EAX & ~0xff) | al;
1816 /* well, speed is not an issue here, so we compute the flags by hand */
1817 eflags |= (al == 0) << 6; /* zf */
1818 eflags |= parity_table[al]; /* pf */
1819 eflags |= (al & 0x80); /* sf */
1820 CC_SRC = eflags;
1821 FORCE_RET();
1824 void helper_into(int next_eip_addend)
1826 int eflags;
1827 eflags = cc_table[CC_OP].compute_all();
1828 if (eflags & CC_O) {
1829 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1833 void helper_cmpxchg8b(target_ulong a0)
1835 uint64_t d;
1836 int eflags;
1838 eflags = cc_table[CC_OP].compute_all();
1839 d = ldq(a0);
1840 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1841 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1842 eflags |= CC_Z;
1843 } else {
1844 /* always do the store */
1845 stq(a0, d);
1846 EDX = (uint32_t)(d >> 32);
1847 EAX = (uint32_t)d;
1848 eflags &= ~CC_Z;
1850 CC_SRC = eflags;
1853 #ifdef TARGET_X86_64
1854 void helper_cmpxchg16b(target_ulong a0)
1856 uint64_t d0, d1;
1857 int eflags;
1859 if ((a0 & 0xf) != 0)
1860 raise_exception(EXCP0D_GPF);
1861 eflags = cc_table[CC_OP].compute_all();
1862 d0 = ldq(a0);
1863 d1 = ldq(a0 + 8);
1864 if (d0 == EAX && d1 == EDX) {
1865 stq(a0, EBX);
1866 stq(a0 + 8, ECX);
1867 eflags |= CC_Z;
1868 } else {
1869 /* always do the store */
1870 stq(a0, d0);
1871 stq(a0 + 8, d1);
1872 EDX = d1;
1873 EAX = d0;
1874 eflags &= ~CC_Z;
1876 CC_SRC = eflags;
1878 #endif
1880 void helper_single_step(void)
1882 env->dr[6] |= 0x4000;
1883 raise_exception(EXCP01_SSTP);
1886 void helper_cpuid(void)
1888 uint32_t index;
1890 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1892 index = (uint32_t)EAX;
1893 /* test if maximum index reached */
1894 if (index & 0x80000000) {
1895 if (index > env->cpuid_xlevel)
1896 index = env->cpuid_level;
1897 } else {
1898 if (index > env->cpuid_level)
1899 index = env->cpuid_level;
1902 switch(index) {
1903 case 0:
1904 EAX = env->cpuid_level;
1905 EBX = env->cpuid_vendor1;
1906 EDX = env->cpuid_vendor2;
1907 ECX = env->cpuid_vendor3;
1908 break;
1909 case 1:
1910 EAX = env->cpuid_version;
1911 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1912 ECX = env->cpuid_ext_features;
1913 EDX = env->cpuid_features;
1914 break;
1915 case 2:
1916 /* cache info: needed for Pentium Pro compatibility */
1917 EAX = 1;
1918 EBX = 0;
1919 ECX = 0;
1920 EDX = 0x2c307d;
1921 break;
1922 case 4:
1923 /* cache info: needed for Core compatibility */
1924 switch (ECX) {
1925 case 0: /* L1 dcache info */
1926 EAX = 0x0000121;
1927 EBX = 0x1c0003f;
1928 ECX = 0x000003f;
1929 EDX = 0x0000001;
1930 break;
1931 case 1: /* L1 icache info */
1932 EAX = 0x0000122;
1933 EBX = 0x1c0003f;
1934 ECX = 0x000003f;
1935 EDX = 0x0000001;
1936 break;
1937 case 2: /* L2 cache info */
1938 EAX = 0x0000143;
1939 EBX = 0x3c0003f;
1940 ECX = 0x0000fff;
1941 EDX = 0x0000001;
1942 break;
1943 default: /* end of info */
1944 EAX = 0;
1945 EBX = 0;
1946 ECX = 0;
1947 EDX = 0;
1948 break;
1951 break;
1952 case 5:
1953 /* mwait info: needed for Core compatibility */
1954 EAX = 0; /* Smallest monitor-line size in bytes */
1955 EBX = 0; /* Largest monitor-line size in bytes */
1956 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1957 EDX = 0;
1958 break;
1959 case 6:
1960 /* Thermal and Power Leaf */
1961 EAX = 0;
1962 EBX = 0;
1963 ECX = 0;
1964 EDX = 0;
1965 break;
1966 case 9:
1967 /* Direct Cache Access Information Leaf */
1968 EAX = 0; /* Bits 0-31 in DCA_CAP MSR */
1969 EBX = 0;
1970 ECX = 0;
1971 EDX = 0;
1972 break;
1973 case 0xA:
1974 /* Architectural Performance Monitoring Leaf */
1975 EAX = 0;
1976 EBX = 0;
1977 ECX = 0;
1978 EDX = 0;
1979 break;
1980 case 0x80000000:
1981 EAX = env->cpuid_xlevel;
1982 EBX = env->cpuid_vendor1;
1983 EDX = env->cpuid_vendor2;
1984 ECX = env->cpuid_vendor3;
1985 break;
1986 case 0x80000001:
1987 EAX = env->cpuid_features;
1988 EBX = 0;
1989 ECX = env->cpuid_ext3_features;
1990 EDX = env->cpuid_ext2_features;
1991 break;
1992 case 0x80000002:
1993 case 0x80000003:
1994 case 0x80000004:
1995 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1996 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1997 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1998 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1999 break;
2000 case 0x80000005:
2001 /* cache info (L1 cache) */
2002 EAX = 0x01ff01ff;
2003 EBX = 0x01ff01ff;
2004 ECX = 0x40020140;
2005 EDX = 0x40020140;
2006 break;
2007 case 0x80000006:
2008 /* cache info (L2 cache) */
2009 EAX = 0;
2010 EBX = 0x42004200;
2011 ECX = 0x02008140;
2012 EDX = 0;
2013 break;
2014 case 0x80000008:
2015 /* virtual & phys address size in low 2 bytes. */
2016 /* XXX: This value must match the one used in the MMU code. */
2017 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
2018 /* 64 bit processor */
2019 #if defined(USE_KQEMU)
2020 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2021 #else
2022 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2023 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2024 #endif
2025 } else {
2026 #if defined(USE_KQEMU)
2027 EAX = 0x00000020; /* 32 bits physical */
2028 #else
2029 EAX = 0x00000024; /* 36 bits physical */
2030 #endif
2032 EBX = 0;
2033 ECX = 0;
2034 EDX = 0;
2035 break;
2036 case 0x8000000A:
2037 EAX = 0x00000001;
2038 EBX = 0;
2039 ECX = 0;
2040 EDX = 0;
2041 break;
2042 default:
2043 /* reserved values: zero */
2044 EAX = 0;
2045 EBX = 0;
2046 ECX = 0;
2047 EDX = 0;
2048 break;
2052 void helper_enter_level(int level, int data32, target_ulong t1)
2054 target_ulong ssp;
2055 uint32_t esp_mask, esp, ebp;
2057 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2058 ssp = env->segs[R_SS].base;
2059 ebp = EBP;
2060 esp = ESP;
2061 if (data32) {
2062 /* 32 bit */
2063 esp -= 4;
2064 while (--level) {
2065 esp -= 4;
2066 ebp -= 4;
2067 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2069 esp -= 4;
2070 stl(ssp + (esp & esp_mask), t1);
2071 } else {
2072 /* 16 bit */
2073 esp -= 2;
2074 while (--level) {
2075 esp -= 2;
2076 ebp -= 2;
2077 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2079 esp -= 2;
2080 stw(ssp + (esp & esp_mask), t1);
2084 #ifdef TARGET_X86_64
2085 void helper_enter64_level(int level, int data64, target_ulong t1)
2087 target_ulong esp, ebp;
2088 ebp = EBP;
2089 esp = ESP;
2091 if (data64) {
2092 /* 64 bit */
2093 esp -= 8;
2094 while (--level) {
2095 esp -= 8;
2096 ebp -= 8;
2097 stq(esp, ldq(ebp));
2099 esp -= 8;
2100 stq(esp, t1);
2101 } else {
2102 /* 16 bit */
2103 esp -= 2;
2104 while (--level) {
2105 esp -= 2;
2106 ebp -= 2;
2107 stw(esp, lduw(ebp));
2109 esp -= 2;
2110 stw(esp, t1);
2113 #endif
2115 void helper_lldt(int selector)
2117 SegmentCache *dt;
2118 uint32_t e1, e2;
2119 int index, entry_limit;
2120 target_ulong ptr;
2122 selector &= 0xffff;
2123 if ((selector & 0xfffc) == 0) {
2124 /* XXX: NULL selector case: invalid LDT */
2125 env->ldt.base = 0;
2126 env->ldt.limit = 0;
2127 } else {
2128 if (selector & 0x4)
2129 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130 dt = &env->gdt;
2131 index = selector & ~7;
2132 #ifdef TARGET_X86_64
2133 if (env->hflags & HF_LMA_MASK)
2134 entry_limit = 15;
2135 else
2136 #endif
2137 entry_limit = 7;
2138 if ((index + entry_limit) > dt->limit)
2139 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2140 ptr = dt->base + index;
2141 e1 = ldl_kernel(ptr);
2142 e2 = ldl_kernel(ptr + 4);
2143 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2144 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2145 if (!(e2 & DESC_P_MASK))
2146 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2147 #ifdef TARGET_X86_64
2148 if (env->hflags & HF_LMA_MASK) {
2149 uint32_t e3;
2150 e3 = ldl_kernel(ptr + 8);
2151 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2152 env->ldt.base |= (target_ulong)e3 << 32;
2153 } else
2154 #endif
2156 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2159 env->ldt.selector = selector;
2162 void helper_ltr(int selector)
2164 SegmentCache *dt;
2165 uint32_t e1, e2;
2166 int index, type, entry_limit;
2167 target_ulong ptr;
2169 selector &= 0xffff;
2170 if ((selector & 0xfffc) == 0) {
2171 /* NULL selector case: invalid TR */
2172 env->tr.base = 0;
2173 env->tr.limit = 0;
2174 env->tr.flags = 0;
2175 } else {
2176 if (selector & 0x4)
2177 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2178 dt = &env->gdt;
2179 index = selector & ~7;
2180 #ifdef TARGET_X86_64
2181 if (env->hflags & HF_LMA_MASK)
2182 entry_limit = 15;
2183 else
2184 #endif
2185 entry_limit = 7;
2186 if ((index + entry_limit) > dt->limit)
2187 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2188 ptr = dt->base + index;
2189 e1 = ldl_kernel(ptr);
2190 e2 = ldl_kernel(ptr + 4);
2191 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2192 if ((e2 & DESC_S_MASK) ||
2193 (type != 1 && type != 9))
2194 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2195 if (!(e2 & DESC_P_MASK))
2196 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2197 #ifdef TARGET_X86_64
2198 if (env->hflags & HF_LMA_MASK) {
2199 uint32_t e3, e4;
2200 e3 = ldl_kernel(ptr + 8);
2201 e4 = ldl_kernel(ptr + 12);
2202 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2203 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2204 load_seg_cache_raw_dt(&env->tr, e1, e2);
2205 env->tr.base |= (target_ulong)e3 << 32;
2206 } else
2207 #endif
2209 load_seg_cache_raw_dt(&env->tr, e1, e2);
2211 e2 |= DESC_TSS_BUSY_MASK;
2212 stl_kernel(ptr + 4, e2);
2214 env->tr.selector = selector;
2217 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2218 void helper_load_seg(int seg_reg, int selector)
2220 uint32_t e1, e2;
2221 int cpl, dpl, rpl;
2222 SegmentCache *dt;
2223 int index;
2224 target_ulong ptr;
2226 selector &= 0xffff;
2227 cpl = env->hflags & HF_CPL_MASK;
2228 if ((selector & 0xfffc) == 0) {
2229 /* null selector case */
2230 if (seg_reg == R_SS
2231 #ifdef TARGET_X86_64
2232 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2233 #endif
2235 raise_exception_err(EXCP0D_GPF, 0);
2236 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2237 } else {
2239 if (selector & 0x4)
2240 dt = &env->ldt;
2241 else
2242 dt = &env->gdt;
2243 index = selector & ~7;
2244 if ((index + 7) > dt->limit)
2245 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2246 ptr = dt->base + index;
2247 e1 = ldl_kernel(ptr);
2248 e2 = ldl_kernel(ptr + 4);
2250 if (!(e2 & DESC_S_MASK))
2251 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2252 rpl = selector & 3;
2253 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2254 if (seg_reg == R_SS) {
2255 /* must be writable segment */
2256 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2257 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2258 if (rpl != cpl || dpl != cpl)
2259 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2260 } else {
2261 /* must be readable segment */
2262 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2263 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2265 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2266 /* if not conforming code, test rights */
2267 if (dpl < cpl || dpl < rpl)
2268 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2272 if (!(e2 & DESC_P_MASK)) {
2273 if (seg_reg == R_SS)
2274 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2275 else
2276 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2279 /* set the access bit if not already set */
2280 if (!(e2 & DESC_A_MASK)) {
2281 e2 |= DESC_A_MASK;
2282 stl_kernel(ptr + 4, e2);
2285 cpu_x86_load_seg_cache(env, seg_reg, selector,
2286 get_seg_base(e1, e2),
2287 get_seg_limit(e1, e2),
2288 e2);
2289 #if 0
2290 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2291 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2292 #endif
2296 /* protected mode jump */
2297 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2298 int next_eip_addend)
2300 int gate_cs, type;
2301 uint32_t e1, e2, cpl, dpl, rpl, limit;
2302 target_ulong next_eip;
2304 if ((new_cs & 0xfffc) == 0)
2305 raise_exception_err(EXCP0D_GPF, 0);
2306 if (load_segment(&e1, &e2, new_cs) != 0)
2307 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2308 cpl = env->hflags & HF_CPL_MASK;
2309 if (e2 & DESC_S_MASK) {
2310 if (!(e2 & DESC_CS_MASK))
2311 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2312 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2313 if (e2 & DESC_C_MASK) {
2314 /* conforming code segment */
2315 if (dpl > cpl)
2316 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2317 } else {
2318 /* non conforming code segment */
2319 rpl = new_cs & 3;
2320 if (rpl > cpl)
2321 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2322 if (dpl != cpl)
2323 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325 if (!(e2 & DESC_P_MASK))
2326 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2327 limit = get_seg_limit(e1, e2);
2328 if (new_eip > limit &&
2329 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2330 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2331 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2332 get_seg_base(e1, e2), limit, e2);
2333 EIP = new_eip;
2334 } else {
2335 /* jump to call or task gate */
2336 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2337 rpl = new_cs & 3;
2338 cpl = env->hflags & HF_CPL_MASK;
2339 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2340 switch(type) {
2341 case 1: /* 286 TSS */
2342 case 9: /* 386 TSS */
2343 case 5: /* task gate */
2344 if (dpl < cpl || dpl < rpl)
2345 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346 next_eip = env->eip + next_eip_addend;
2347 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2348 CC_OP = CC_OP_EFLAGS;
2349 break;
2350 case 4: /* 286 call gate */
2351 case 12: /* 386 call gate */
2352 if ((dpl < cpl) || (dpl < rpl))
2353 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2354 if (!(e2 & DESC_P_MASK))
2355 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2356 gate_cs = e1 >> 16;
2357 new_eip = (e1 & 0xffff);
2358 if (type == 12)
2359 new_eip |= (e2 & 0xffff0000);
2360 if (load_segment(&e1, &e2, gate_cs) != 0)
2361 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2362 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2363 /* must be code segment */
2364 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2365 (DESC_S_MASK | DESC_CS_MASK)))
2366 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2367 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2368 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2369 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2370 if (!(e2 & DESC_P_MASK))
2371 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2372 limit = get_seg_limit(e1, e2);
2373 if (new_eip > limit)
2374 raise_exception_err(EXCP0D_GPF, 0);
2375 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2376 get_seg_base(e1, e2), limit, e2);
2377 EIP = new_eip;
2378 break;
2379 default:
2380 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381 break;
2386 /* real mode call */
2387 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2388 int shift, int next_eip)
2390 int new_eip;
2391 uint32_t esp, esp_mask;
2392 target_ulong ssp;
2394 new_eip = new_eip1;
2395 esp = ESP;
2396 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2397 ssp = env->segs[R_SS].base;
2398 if (shift) {
2399 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2400 PUSHL(ssp, esp, esp_mask, next_eip);
2401 } else {
2402 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2403 PUSHW(ssp, esp, esp_mask, next_eip);
2406 SET_ESP(esp, esp_mask);
2407 env->eip = new_eip;
2408 env->segs[R_CS].selector = new_cs;
2409 env->segs[R_CS].base = (new_cs << 4);
2412 /* protected mode call */
2413 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2414 int shift, int next_eip_addend)
2416 int new_stack, i;
2417 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2418 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2419 uint32_t val, limit, old_sp_mask;
2420 target_ulong ssp, old_ssp, next_eip;
2422 next_eip = env->eip + next_eip_addend;
2423 #ifdef DEBUG_PCALL
2424 if (loglevel & CPU_LOG_PCALL) {
2425 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2426 new_cs, (uint32_t)new_eip, shift);
2427 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2429 #endif
2430 if ((new_cs & 0xfffc) == 0)
2431 raise_exception_err(EXCP0D_GPF, 0);
2432 if (load_segment(&e1, &e2, new_cs) != 0)
2433 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2434 cpl = env->hflags & HF_CPL_MASK;
2435 #ifdef DEBUG_PCALL
2436 if (loglevel & CPU_LOG_PCALL) {
2437 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2439 #endif
2440 if (e2 & DESC_S_MASK) {
2441 if (!(e2 & DESC_CS_MASK))
2442 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2443 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2444 if (e2 & DESC_C_MASK) {
2445 /* conforming code segment */
2446 if (dpl > cpl)
2447 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2448 } else {
2449 /* non conforming code segment */
2450 rpl = new_cs & 3;
2451 if (rpl > cpl)
2452 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2453 if (dpl != cpl)
2454 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2456 if (!(e2 & DESC_P_MASK))
2457 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2459 #ifdef TARGET_X86_64
2460 /* XXX: check 16/32 bit cases in long mode */
2461 if (shift == 2) {
2462 target_ulong rsp;
2463 /* 64 bit case */
2464 rsp = ESP;
2465 PUSHQ(rsp, env->segs[R_CS].selector);
2466 PUSHQ(rsp, next_eip);
2467 /* from this point, not restartable */
2468 ESP = rsp;
2469 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2470 get_seg_base(e1, e2),
2471 get_seg_limit(e1, e2), e2);
2472 EIP = new_eip;
2473 } else
2474 #endif
2476 sp = ESP;
2477 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2478 ssp = env->segs[R_SS].base;
2479 if (shift) {
2480 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2481 PUSHL(ssp, sp, sp_mask, next_eip);
2482 } else {
2483 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2484 PUSHW(ssp, sp, sp_mask, next_eip);
2487 limit = get_seg_limit(e1, e2);
2488 if (new_eip > limit)
2489 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2490 /* from this point, not restartable */
2491 SET_ESP(sp, sp_mask);
2492 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2493 get_seg_base(e1, e2), limit, e2);
2494 EIP = new_eip;
2496 } else {
2497 /* check gate type */
2498 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2499 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2500 rpl = new_cs & 3;
2501 switch(type) {
2502 case 1: /* available 286 TSS */
2503 case 9: /* available 386 TSS */
2504 case 5: /* task gate */
2505 if (dpl < cpl || dpl < rpl)
2506 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2507 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2508 CC_OP = CC_OP_EFLAGS;
2509 return;
2510 case 4: /* 286 call gate */
2511 case 12: /* 386 call gate */
2512 break;
2513 default:
2514 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2515 break;
2517 shift = type >> 3;
2519 if (dpl < cpl || dpl < rpl)
2520 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2521 /* check valid bit */
2522 if (!(e2 & DESC_P_MASK))
2523 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2524 selector = e1 >> 16;
2525 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2526 param_count = e2 & 0x1f;
2527 if ((selector & 0xfffc) == 0)
2528 raise_exception_err(EXCP0D_GPF, 0);
2530 if (load_segment(&e1, &e2, selector) != 0)
2531 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2532 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2533 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2534 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2535 if (dpl > cpl)
2536 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2537 if (!(e2 & DESC_P_MASK))
2538 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2540 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2541 /* to inner privilege */
2542 get_ss_esp_from_tss(&ss, &sp, dpl);
2543 #ifdef DEBUG_PCALL
2544 if (loglevel & CPU_LOG_PCALL)
2545 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2546 ss, sp, param_count, ESP);
2547 #endif
2548 if ((ss & 0xfffc) == 0)
2549 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2550 if ((ss & 3) != dpl)
2551 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2552 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2553 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2554 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2555 if (ss_dpl != dpl)
2556 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2557 if (!(ss_e2 & DESC_S_MASK) ||
2558 (ss_e2 & DESC_CS_MASK) ||
2559 !(ss_e2 & DESC_W_MASK))
2560 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2561 if (!(ss_e2 & DESC_P_MASK))
2562 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2564 // push_size = ((param_count * 2) + 8) << shift;
2566 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2567 old_ssp = env->segs[R_SS].base;
2569 sp_mask = get_sp_mask(ss_e2);
2570 ssp = get_seg_base(ss_e1, ss_e2);
2571 if (shift) {
2572 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2573 PUSHL(ssp, sp, sp_mask, ESP);
2574 for(i = param_count - 1; i >= 0; i--) {
2575 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2576 PUSHL(ssp, sp, sp_mask, val);
2578 } else {
2579 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2580 PUSHW(ssp, sp, sp_mask, ESP);
2581 for(i = param_count - 1; i >= 0; i--) {
2582 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2583 PUSHW(ssp, sp, sp_mask, val);
2586 new_stack = 1;
2587 } else {
2588 /* to same privilege */
2589 sp = ESP;
2590 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2591 ssp = env->segs[R_SS].base;
2592 // push_size = (4 << shift);
2593 new_stack = 0;
2596 if (shift) {
2597 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2598 PUSHL(ssp, sp, sp_mask, next_eip);
2599 } else {
2600 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2601 PUSHW(ssp, sp, sp_mask, next_eip);
2604 /* from this point, not restartable */
2606 if (new_stack) {
2607 ss = (ss & ~3) | dpl;
2608 cpu_x86_load_seg_cache(env, R_SS, ss,
2609 ssp,
2610 get_seg_limit(ss_e1, ss_e2),
2611 ss_e2);
2614 selector = (selector & ~3) | dpl;
2615 cpu_x86_load_seg_cache(env, R_CS, selector,
2616 get_seg_base(e1, e2),
2617 get_seg_limit(e1, e2),
2618 e2);
2619 cpu_x86_set_cpl(env, dpl);
2620 SET_ESP(sp, sp_mask);
2621 EIP = offset;
2623 #ifdef USE_KQEMU
2624 if (kqemu_is_ok(env)) {
2625 env->exception_index = -1;
2626 cpu_loop_exit();
2628 #endif
2631 /* real and vm86 mode iret */
2632 void helper_iret_real(int shift)
2634 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2635 target_ulong ssp;
2636 int eflags_mask;
2638 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2639 sp = ESP;
2640 ssp = env->segs[R_SS].base;
2641 if (shift == 1) {
2642 /* 32 bits */
2643 POPL(ssp, sp, sp_mask, new_eip);
2644 POPL(ssp, sp, sp_mask, new_cs);
2645 new_cs &= 0xffff;
2646 POPL(ssp, sp, sp_mask, new_eflags);
2647 } else {
2648 /* 16 bits */
2649 POPW(ssp, sp, sp_mask, new_eip);
2650 POPW(ssp, sp, sp_mask, new_cs);
2651 POPW(ssp, sp, sp_mask, new_eflags);
2653 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2654 env->segs[R_CS].selector = new_cs;
2655 env->segs[R_CS].base = (new_cs << 4);
2656 env->eip = new_eip;
2657 if (env->eflags & VM_MASK)
2658 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2659 else
2660 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2661 if (shift == 0)
2662 eflags_mask &= 0xffff;
2663 load_eflags(new_eflags, eflags_mask);
2664 env->hflags2 &= ~HF2_NMI_MASK;
2667 static inline void validate_seg(int seg_reg, int cpl)
2669 int dpl;
2670 uint32_t e2;
2672 /* XXX: on x86_64, we do not want to nullify FS and GS because
2673 they may still contain a valid base. I would be interested to
2674 know how a real x86_64 CPU behaves */
2675 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2676 (env->segs[seg_reg].selector & 0xfffc) == 0)
2677 return;
2679 e2 = env->segs[seg_reg].flags;
2680 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2681 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2682 /* data or non conforming code segment */
2683 if (dpl < cpl) {
2684 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2689 /* protected mode iret */
2690 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2692 uint32_t new_cs, new_eflags, new_ss;
2693 uint32_t new_es, new_ds, new_fs, new_gs;
2694 uint32_t e1, e2, ss_e1, ss_e2;
2695 int cpl, dpl, rpl, eflags_mask, iopl;
2696 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2698 #ifdef TARGET_X86_64
2699 if (shift == 2)
2700 sp_mask = -1;
2701 else
2702 #endif
2703 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2704 sp = ESP;
2705 ssp = env->segs[R_SS].base;
2706 new_eflags = 0; /* avoid warning */
2707 #ifdef TARGET_X86_64
2708 if (shift == 2) {
2709 POPQ(sp, new_eip);
2710 POPQ(sp, new_cs);
2711 new_cs &= 0xffff;
2712 if (is_iret) {
2713 POPQ(sp, new_eflags);
2715 } else
2716 #endif
2717 if (shift == 1) {
2718 /* 32 bits */
2719 POPL(ssp, sp, sp_mask, new_eip);
2720 POPL(ssp, sp, sp_mask, new_cs);
2721 new_cs &= 0xffff;
2722 if (is_iret) {
2723 POPL(ssp, sp, sp_mask, new_eflags);
2724 if (new_eflags & VM_MASK)
2725 goto return_to_vm86;
2727 } else {
2728 /* 16 bits */
2729 POPW(ssp, sp, sp_mask, new_eip);
2730 POPW(ssp, sp, sp_mask, new_cs);
2731 if (is_iret)
2732 POPW(ssp, sp, sp_mask, new_eflags);
2734 #ifdef DEBUG_PCALL
2735 if (loglevel & CPU_LOG_PCALL) {
2736 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2737 new_cs, new_eip, shift, addend);
2738 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2740 #endif
2741 if ((new_cs & 0xfffc) == 0)
2742 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2743 if (load_segment(&e1, &e2, new_cs) != 0)
2744 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2745 if (!(e2 & DESC_S_MASK) ||
2746 !(e2 & DESC_CS_MASK))
2747 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2748 cpl = env->hflags & HF_CPL_MASK;
2749 rpl = new_cs & 3;
2750 if (rpl < cpl)
2751 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2752 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2753 if (e2 & DESC_C_MASK) {
2754 if (dpl > rpl)
2755 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2756 } else {
2757 if (dpl != rpl)
2758 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2760 if (!(e2 & DESC_P_MASK))
2761 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2763 sp += addend;
2764 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2765 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2766 /* return to same privilege level */
2767 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2768 get_seg_base(e1, e2),
2769 get_seg_limit(e1, e2),
2770 e2);
2771 } else {
2772 /* return to different privilege level */
2773 #ifdef TARGET_X86_64
2774 if (shift == 2) {
2775 POPQ(sp, new_esp);
2776 POPQ(sp, new_ss);
2777 new_ss &= 0xffff;
2778 } else
2779 #endif
2780 if (shift == 1) {
2781 /* 32 bits */
2782 POPL(ssp, sp, sp_mask, new_esp);
2783 POPL(ssp, sp, sp_mask, new_ss);
2784 new_ss &= 0xffff;
2785 } else {
2786 /* 16 bits */
2787 POPW(ssp, sp, sp_mask, new_esp);
2788 POPW(ssp, sp, sp_mask, new_ss);
2790 #ifdef DEBUG_PCALL
2791 if (loglevel & CPU_LOG_PCALL) {
2792 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2793 new_ss, new_esp);
2795 #endif
2796 if ((new_ss & 0xfffc) == 0) {
2797 #ifdef TARGET_X86_64
2798 /* NULL ss is allowed in long mode if cpl != 3*/
2799 /* XXX: test CS64 ? */
2800 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2801 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2802 0, 0xffffffff,
2803 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2804 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2805 DESC_W_MASK | DESC_A_MASK);
2806 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2807 } else
2808 #endif
2810 raise_exception_err(EXCP0D_GPF, 0);
2812 } else {
2813 if ((new_ss & 3) != rpl)
2814 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2815 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2816 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2817 if (!(ss_e2 & DESC_S_MASK) ||
2818 (ss_e2 & DESC_CS_MASK) ||
2819 !(ss_e2 & DESC_W_MASK))
2820 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2821 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2822 if (dpl != rpl)
2823 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2824 if (!(ss_e2 & DESC_P_MASK))
2825 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2826 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2827 get_seg_base(ss_e1, ss_e2),
2828 get_seg_limit(ss_e1, ss_e2),
2829 ss_e2);
2832 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2833 get_seg_base(e1, e2),
2834 get_seg_limit(e1, e2),
2835 e2);
2836 cpu_x86_set_cpl(env, rpl);
2837 sp = new_esp;
2838 #ifdef TARGET_X86_64
2839 if (env->hflags & HF_CS64_MASK)
2840 sp_mask = -1;
2841 else
2842 #endif
2843 sp_mask = get_sp_mask(ss_e2);
2845 /* validate data segments */
2846 validate_seg(R_ES, rpl);
2847 validate_seg(R_DS, rpl);
2848 validate_seg(R_FS, rpl);
2849 validate_seg(R_GS, rpl);
2851 sp += addend;
2853 SET_ESP(sp, sp_mask);
2854 env->eip = new_eip;
2855 if (is_iret) {
2856 /* NOTE: 'cpl' is the _old_ CPL */
2857 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2858 if (cpl == 0)
2859 eflags_mask |= IOPL_MASK;
2860 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2861 if (cpl <= iopl)
2862 eflags_mask |= IF_MASK;
2863 if (shift == 0)
2864 eflags_mask &= 0xffff;
2865 load_eflags(new_eflags, eflags_mask);
2867 return;
2869 return_to_vm86:
2870 POPL(ssp, sp, sp_mask, new_esp);
2871 POPL(ssp, sp, sp_mask, new_ss);
2872 POPL(ssp, sp, sp_mask, new_es);
2873 POPL(ssp, sp, sp_mask, new_ds);
2874 POPL(ssp, sp, sp_mask, new_fs);
2875 POPL(ssp, sp, sp_mask, new_gs);
2877 /* modify processor state */
2878 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2879 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2880 load_seg_vm(R_CS, new_cs & 0xffff);
2881 cpu_x86_set_cpl(env, 3);
2882 load_seg_vm(R_SS, new_ss & 0xffff);
2883 load_seg_vm(R_ES, new_es & 0xffff);
2884 load_seg_vm(R_DS, new_ds & 0xffff);
2885 load_seg_vm(R_FS, new_fs & 0xffff);
2886 load_seg_vm(R_GS, new_gs & 0xffff);
2888 env->eip = new_eip & 0xffff;
2889 ESP = new_esp;
2892 void helper_iret_protected(int shift, int next_eip)
2894 int tss_selector, type;
2895 uint32_t e1, e2;
2897 /* specific case for TSS */
2898 if (env->eflags & NT_MASK) {
2899 #ifdef TARGET_X86_64
2900 if (env->hflags & HF_LMA_MASK)
2901 raise_exception_err(EXCP0D_GPF, 0);
2902 #endif
2903 tss_selector = lduw_kernel(env->tr.base + 0);
2904 if (tss_selector & 4)
2905 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2906 if (load_segment(&e1, &e2, tss_selector) != 0)
2907 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2908 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2909 /* NOTE: we check both segment and busy TSS */
2910 if (type != 3)
2911 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2912 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2913 } else {
2914 helper_ret_protected(shift, 1, 0);
2916 env->hflags2 &= ~HF2_NMI_MASK;
2917 #ifdef USE_KQEMU
2918 if (kqemu_is_ok(env)) {
2919 CC_OP = CC_OP_EFLAGS;
2920 env->exception_index = -1;
2921 cpu_loop_exit();
2923 #endif
2926 void helper_lret_protected(int shift, int addend)
2928 helper_ret_protected(shift, 0, addend);
2929 #ifdef USE_KQEMU
2930 if (kqemu_is_ok(env)) {
2931 env->exception_index = -1;
2932 cpu_loop_exit();
2934 #endif
2937 void helper_sysenter(void)
2939 if (env->sysenter_cs == 0) {
2940 raise_exception_err(EXCP0D_GPF, 0);
2942 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2943 cpu_x86_set_cpl(env, 0);
2945 #ifdef TARGET_X86_64
2946 if (env->hflags & HF_LMA_MASK) {
2947 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2948 0, 0xffffffff,
2949 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2950 DESC_S_MASK |
2951 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2952 } else
2953 #endif
2955 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2956 0, 0xffffffff,
2957 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2958 DESC_S_MASK |
2959 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2961 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2962 0, 0xffffffff,
2963 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2964 DESC_S_MASK |
2965 DESC_W_MASK | DESC_A_MASK);
2966 ESP = env->sysenter_esp;
2967 EIP = env->sysenter_eip;
2970 void helper_sysexit(int dflag)
2972 int cpl;
2974 cpl = env->hflags & HF_CPL_MASK;
2975 if (env->sysenter_cs == 0 || cpl != 0) {
2976 raise_exception_err(EXCP0D_GPF, 0);
2978 cpu_x86_set_cpl(env, 3);
2979 #ifdef TARGET_X86_64
2980 if (dflag == 2) {
2981 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2982 0, 0xffffffff,
2983 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2984 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2985 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2986 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2987 0, 0xffffffff,
2988 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2989 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2990 DESC_W_MASK | DESC_A_MASK);
2991 } else
2992 #endif
2994 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2995 0, 0xffffffff,
2996 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2997 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2998 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2999 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3000 0, 0xffffffff,
3001 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3002 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3003 DESC_W_MASK | DESC_A_MASK);
3005 ESP = ECX;
3006 EIP = EDX;
3007 #ifdef USE_KQEMU
3008 if (kqemu_is_ok(env)) {
3009 env->exception_index = -1;
3010 cpu_loop_exit();
3012 #endif
3015 #if defined(CONFIG_USER_ONLY)
3016 target_ulong helper_read_crN(int reg)
3018 return 0;
3021 void helper_write_crN(int reg, target_ulong t0)
3024 #else
3025 target_ulong helper_read_crN(int reg)
3027 target_ulong val;
3029 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3030 switch(reg) {
3031 default:
3032 val = env->cr[reg];
3033 break;
3034 case 8:
3035 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3036 val = cpu_get_apic_tpr(env);
3037 } else {
3038 val = env->v_tpr;
3040 break;
3042 return val;
3045 void helper_write_crN(int reg, target_ulong t0)
3047 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3048 switch(reg) {
3049 case 0:
3050 cpu_x86_update_cr0(env, t0);
3051 break;
3052 case 3:
3053 cpu_x86_update_cr3(env, t0);
3054 break;
3055 case 4:
3056 cpu_x86_update_cr4(env, t0);
3057 break;
3058 case 8:
3059 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3060 cpu_set_apic_tpr(env, t0);
3062 env->v_tpr = t0 & 0x0f;
3063 break;
3064 default:
3065 env->cr[reg] = t0;
3066 break;
3069 #endif
3071 void helper_lmsw(target_ulong t0)
3073 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3074 if already set to one. */
3075 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3076 helper_write_crN(0, t0);
3079 void helper_clts(void)
3081 env->cr[0] &= ~CR0_TS_MASK;
3082 env->hflags &= ~HF_TS_MASK;
3085 /* XXX: do more */
3086 void helper_movl_drN_T0(int reg, target_ulong t0)
3088 env->dr[reg] = t0;
3091 void helper_invlpg(target_ulong addr)
3093 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3094 tlb_flush_page(env, addr);
3097 void helper_rdtsc(void)
3099 uint64_t val;
3101 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3102 raise_exception(EXCP0D_GPF);
3104 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3106 val = cpu_get_tsc(env) + env->tsc_offset;
3107 EAX = (uint32_t)(val);
3108 EDX = (uint32_t)(val >> 32);
3111 void helper_rdpmc(void)
3113 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3114 raise_exception(EXCP0D_GPF);
3116 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3118 /* currently unimplemented */
3119 raise_exception_err(EXCP06_ILLOP, 0);
3122 #if defined(CONFIG_USER_ONLY)
3123 void helper_wrmsr(void)
3127 void helper_rdmsr(void)
3130 #else
3131 void helper_wrmsr(void)
3133 uint64_t val;
3135 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3137 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3139 switch((uint32_t)ECX) {
3140 case MSR_IA32_SYSENTER_CS:
3141 env->sysenter_cs = val & 0xffff;
3142 break;
3143 case MSR_IA32_SYSENTER_ESP:
3144 env->sysenter_esp = val;
3145 break;
3146 case MSR_IA32_SYSENTER_EIP:
3147 env->sysenter_eip = val;
3148 break;
3149 case MSR_IA32_APICBASE:
3150 cpu_set_apic_base(env, val);
3151 break;
3152 case MSR_EFER:
3154 uint64_t update_mask;
3155 update_mask = 0;
3156 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3157 update_mask |= MSR_EFER_SCE;
3158 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3159 update_mask |= MSR_EFER_LME;
3160 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3161 update_mask |= MSR_EFER_FFXSR;
3162 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3163 update_mask |= MSR_EFER_NXE;
3164 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3165 update_mask |= MSR_EFER_SVME;
3166 cpu_load_efer(env, (env->efer & ~update_mask) |
3167 (val & update_mask));
3169 break;
3170 case MSR_STAR:
3171 env->star = val;
3172 break;
3173 case MSR_PAT:
3174 env->pat = val;
3175 break;
3176 case MSR_VM_HSAVE_PA:
3177 env->vm_hsave = val;
3178 break;
3179 #ifdef TARGET_X86_64
3180 case MSR_LSTAR:
3181 env->lstar = val;
3182 break;
3183 case MSR_CSTAR:
3184 env->cstar = val;
3185 break;
3186 case MSR_FMASK:
3187 env->fmask = val;
3188 break;
3189 case MSR_FSBASE:
3190 env->segs[R_FS].base = val;
3191 break;
3192 case MSR_GSBASE:
3193 env->segs[R_GS].base = val;
3194 break;
3195 case MSR_KERNELGSBASE:
3196 env->kernelgsbase = val;
3197 break;
3198 #endif
3199 default:
3200 /* XXX: exception ? */
3201 break;
3205 void helper_rdmsr(void)
3207 uint64_t val;
3209 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3211 switch((uint32_t)ECX) {
3212 case MSR_IA32_SYSENTER_CS:
3213 val = env->sysenter_cs;
3214 break;
3215 case MSR_IA32_SYSENTER_ESP:
3216 val = env->sysenter_esp;
3217 break;
3218 case MSR_IA32_SYSENTER_EIP:
3219 val = env->sysenter_eip;
3220 break;
3221 case MSR_IA32_APICBASE:
3222 val = cpu_get_apic_base(env);
3223 break;
3224 case MSR_EFER:
3225 val = env->efer;
3226 break;
3227 case MSR_STAR:
3228 val = env->star;
3229 break;
3230 case MSR_PAT:
3231 val = env->pat;
3232 break;
3233 case MSR_VM_HSAVE_PA:
3234 val = env->vm_hsave;
3235 break;
3236 case MSR_IA32_PERF_STATUS:
3237 /* tsc_increment_by_tick */
3238 val = 1000ULL;
3239 /* CPU multiplier */
3240 val |= (((uint64_t)4ULL) << 40);
3241 break;
3242 #ifdef TARGET_X86_64
3243 case MSR_LSTAR:
3244 val = env->lstar;
3245 break;
3246 case MSR_CSTAR:
3247 val = env->cstar;
3248 break;
3249 case MSR_FMASK:
3250 val = env->fmask;
3251 break;
3252 case MSR_FSBASE:
3253 val = env->segs[R_FS].base;
3254 break;
3255 case MSR_GSBASE:
3256 val = env->segs[R_GS].base;
3257 break;
3258 case MSR_KERNELGSBASE:
3259 val = env->kernelgsbase;
3260 break;
3261 #endif
3262 #ifdef USE_KQEMU
3263 case MSR_QPI_COMMBASE:
3264 if (env->kqemu_enabled) {
3265 val = kqemu_comm_base;
3266 } else {
3267 val = 0;
3269 break;
3270 #endif
3271 default:
3272 /* XXX: exception ? */
3273 val = 0;
3274 break;
3276 EAX = (uint32_t)(val);
3277 EDX = (uint32_t)(val >> 32);
3279 #endif
3281 target_ulong helper_lsl(target_ulong selector1)
3283 unsigned int limit;
3284 uint32_t e1, e2, eflags, selector;
3285 int rpl, dpl, cpl, type;
3287 selector = selector1 & 0xffff;
3288 eflags = cc_table[CC_OP].compute_all();
3289 if (load_segment(&e1, &e2, selector) != 0)
3290 goto fail;
3291 rpl = selector & 3;
3292 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3293 cpl = env->hflags & HF_CPL_MASK;
3294 if (e2 & DESC_S_MASK) {
3295 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3296 /* conforming */
3297 } else {
3298 if (dpl < cpl || dpl < rpl)
3299 goto fail;
3301 } else {
3302 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3303 switch(type) {
3304 case 1:
3305 case 2:
3306 case 3:
3307 case 9:
3308 case 11:
3309 break;
3310 default:
3311 goto fail;
3313 if (dpl < cpl || dpl < rpl) {
3314 fail:
3315 CC_SRC = eflags & ~CC_Z;
3316 return 0;
3319 limit = get_seg_limit(e1, e2);
3320 CC_SRC = eflags | CC_Z;
3321 return limit;
3324 target_ulong helper_lar(target_ulong selector1)
3326 uint32_t e1, e2, eflags, selector;
3327 int rpl, dpl, cpl, type;
3329 selector = selector1 & 0xffff;
3330 eflags = cc_table[CC_OP].compute_all();
3331 if ((selector & 0xfffc) == 0)
3332 goto fail;
3333 if (load_segment(&e1, &e2, selector) != 0)
3334 goto fail;
3335 rpl = selector & 3;
3336 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3337 cpl = env->hflags & HF_CPL_MASK;
3338 if (e2 & DESC_S_MASK) {
3339 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3340 /* conforming */
3341 } else {
3342 if (dpl < cpl || dpl < rpl)
3343 goto fail;
3345 } else {
3346 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3347 switch(type) {
3348 case 1:
3349 case 2:
3350 case 3:
3351 case 4:
3352 case 5:
3353 case 9:
3354 case 11:
3355 case 12:
3356 break;
3357 default:
3358 goto fail;
3360 if (dpl < cpl || dpl < rpl) {
3361 fail:
3362 CC_SRC = eflags & ~CC_Z;
3363 return 0;
3366 CC_SRC = eflags | CC_Z;
3367 return e2 & 0x00f0ff00;
3370 void helper_verr(target_ulong selector1)
3372 uint32_t e1, e2, eflags, selector;
3373 int rpl, dpl, cpl;
3375 selector = selector1 & 0xffff;
3376 eflags = cc_table[CC_OP].compute_all();
3377 if ((selector & 0xfffc) == 0)
3378 goto fail;
3379 if (load_segment(&e1, &e2, selector) != 0)
3380 goto fail;
3381 if (!(e2 & DESC_S_MASK))
3382 goto fail;
3383 rpl = selector & 3;
3384 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3385 cpl = env->hflags & HF_CPL_MASK;
3386 if (e2 & DESC_CS_MASK) {
3387 if (!(e2 & DESC_R_MASK))
3388 goto fail;
3389 if (!(e2 & DESC_C_MASK)) {
3390 if (dpl < cpl || dpl < rpl)
3391 goto fail;
3393 } else {
3394 if (dpl < cpl || dpl < rpl) {
3395 fail:
3396 CC_SRC = eflags & ~CC_Z;
3397 return;
3400 CC_SRC = eflags | CC_Z;
3403 void helper_verw(target_ulong selector1)
3405 uint32_t e1, e2, eflags, selector;
3406 int rpl, dpl, cpl;
3408 selector = selector1 & 0xffff;
3409 eflags = cc_table[CC_OP].compute_all();
3410 if ((selector & 0xfffc) == 0)
3411 goto fail;
3412 if (load_segment(&e1, &e2, selector) != 0)
3413 goto fail;
3414 if (!(e2 & DESC_S_MASK))
3415 goto fail;
3416 rpl = selector & 3;
3417 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3418 cpl = env->hflags & HF_CPL_MASK;
3419 if (e2 & DESC_CS_MASK) {
3420 goto fail;
3421 } else {
3422 if (dpl < cpl || dpl < rpl)
3423 goto fail;
3424 if (!(e2 & DESC_W_MASK)) {
3425 fail:
3426 CC_SRC = eflags & ~CC_Z;
3427 return;
3430 CC_SRC = eflags | CC_Z;
3433 /* x87 FPU helpers */
3435 static void fpu_set_exception(int mask)
3437 env->fpus |= mask;
3438 if (env->fpus & (~env->fpuc & FPUC_EM))
3439 env->fpus |= FPUS_SE | FPUS_B;
3442 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3444 if (b == 0.0)
3445 fpu_set_exception(FPUS_ZE);
3446 return a / b;
3449 void fpu_raise_exception(void)
3451 if (env->cr[0] & CR0_NE_MASK) {
3452 raise_exception(EXCP10_COPR);
3454 #if !defined(CONFIG_USER_ONLY)
3455 else {
3456 cpu_set_ferr(env);
3458 #endif
3461 void helper_flds_FT0(uint32_t val)
3463 union {
3464 float32 f;
3465 uint32_t i;
3466 } u;
3467 u.i = val;
3468 FT0 = float32_to_floatx(u.f, &env->fp_status);
3471 void helper_fldl_FT0(uint64_t val)
3473 union {
3474 float64 f;
3475 uint64_t i;
3476 } u;
3477 u.i = val;
3478 FT0 = float64_to_floatx(u.f, &env->fp_status);
3481 void helper_fildl_FT0(int32_t val)
3483 FT0 = int32_to_floatx(val, &env->fp_status);
3486 void helper_flds_ST0(uint32_t val)
3488 int new_fpstt;
3489 union {
3490 float32 f;
3491 uint32_t i;
3492 } u;
3493 new_fpstt = (env->fpstt - 1) & 7;
3494 u.i = val;
3495 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3496 env->fpstt = new_fpstt;
3497 env->fptags[new_fpstt] = 0; /* validate stack entry */
3500 void helper_fldl_ST0(uint64_t val)
3502 int new_fpstt;
3503 union {
3504 float64 f;
3505 uint64_t i;
3506 } u;
3507 new_fpstt = (env->fpstt - 1) & 7;
3508 u.i = val;
3509 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3510 env->fpstt = new_fpstt;
3511 env->fptags[new_fpstt] = 0; /* validate stack entry */
3514 void helper_fildl_ST0(int32_t val)
3516 int new_fpstt;
3517 new_fpstt = (env->fpstt - 1) & 7;
3518 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3519 env->fpstt = new_fpstt;
3520 env->fptags[new_fpstt] = 0; /* validate stack entry */
3523 void helper_fildll_ST0(int64_t val)
3525 int new_fpstt;
3526 new_fpstt = (env->fpstt - 1) & 7;
3527 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3528 env->fpstt = new_fpstt;
3529 env->fptags[new_fpstt] = 0; /* validate stack entry */
3532 uint32_t helper_fsts_ST0(void)
3534 union {
3535 float32 f;
3536 uint32_t i;
3537 } u;
3538 u.f = floatx_to_float32(ST0, &env->fp_status);
3539 return u.i;
3542 uint64_t helper_fstl_ST0(void)
3544 union {
3545 float64 f;
3546 uint64_t i;
3547 } u;
3548 u.f = floatx_to_float64(ST0, &env->fp_status);
3549 return u.i;
3552 int32_t helper_fist_ST0(void)
3554 int32_t val;
3555 val = floatx_to_int32(ST0, &env->fp_status);
3556 if (val != (int16_t)val)
3557 val = -32768;
3558 return val;
3561 int32_t helper_fistl_ST0(void)
3563 int32_t val;
3564 val = floatx_to_int32(ST0, &env->fp_status);
3565 return val;
3568 int64_t helper_fistll_ST0(void)
3570 int64_t val;
3571 val = floatx_to_int64(ST0, &env->fp_status);
3572 return val;
3575 int32_t helper_fistt_ST0(void)
3577 int32_t val;
3578 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3579 if (val != (int16_t)val)
3580 val = -32768;
3581 return val;
3584 int32_t helper_fisttl_ST0(void)
3586 int32_t val;
3587 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3588 return val;
3591 int64_t helper_fisttll_ST0(void)
3593 int64_t val;
3594 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3595 return val;
3598 void helper_fldt_ST0(target_ulong ptr)
3600 int new_fpstt;
3601 new_fpstt = (env->fpstt - 1) & 7;
3602 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3603 env->fpstt = new_fpstt;
3604 env->fptags[new_fpstt] = 0; /* validate stack entry */
3607 void helper_fstt_ST0(target_ulong ptr)
3609 helper_fstt(ST0, ptr);
3612 void helper_fpush(void)
3614 fpush();
3617 void helper_fpop(void)
3619 fpop();
3622 void helper_fdecstp(void)
3624 env->fpstt = (env->fpstt - 1) & 7;
3625 env->fpus &= (~0x4700);
3628 void helper_fincstp(void)
3630 env->fpstt = (env->fpstt + 1) & 7;
3631 env->fpus &= (~0x4700);
3634 /* FPU move */
3636 void helper_ffree_STN(int st_index)
3638 env->fptags[(env->fpstt + st_index) & 7] = 1;
3641 void helper_fmov_ST0_FT0(void)
3643 ST0 = FT0;
3646 void helper_fmov_FT0_STN(int st_index)
3648 FT0 = ST(st_index);
3651 void helper_fmov_ST0_STN(int st_index)
3653 ST0 = ST(st_index);
3656 void helper_fmov_STN_ST0(int st_index)
3658 ST(st_index) = ST0;
3661 void helper_fxchg_ST0_STN(int st_index)
3663 CPU86_LDouble tmp;
3664 tmp = ST(st_index);
3665 ST(st_index) = ST0;
3666 ST0 = tmp;
3669 /* FPU operations */
3671 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3673 void helper_fcom_ST0_FT0(void)
3675 int ret;
3677 ret = floatx_compare(ST0, FT0, &env->fp_status);
3678 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3679 FORCE_RET();
3682 void helper_fucom_ST0_FT0(void)
3684 int ret;
3686 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3687 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3688 FORCE_RET();
3691 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3693 void helper_fcomi_ST0_FT0(void)
3695 int eflags;
3696 int ret;
3698 ret = floatx_compare(ST0, FT0, &env->fp_status);
3699 eflags = cc_table[CC_OP].compute_all();
3700 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3701 CC_SRC = eflags;
3702 FORCE_RET();
3705 void helper_fucomi_ST0_FT0(void)
3707 int eflags;
3708 int ret;
3710 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3711 eflags = cc_table[CC_OP].compute_all();
3712 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3713 CC_SRC = eflags;
3714 FORCE_RET();
3717 void helper_fadd_ST0_FT0(void)
3719 ST0 += FT0;
3722 void helper_fmul_ST0_FT0(void)
3724 ST0 *= FT0;
3727 void helper_fsub_ST0_FT0(void)
3729 ST0 -= FT0;
3732 void helper_fsubr_ST0_FT0(void)
3734 ST0 = FT0 - ST0;
3737 void helper_fdiv_ST0_FT0(void)
3739 ST0 = helper_fdiv(ST0, FT0);
3742 void helper_fdivr_ST0_FT0(void)
3744 ST0 = helper_fdiv(FT0, ST0);
3747 /* fp operations between STN and ST0 */
3749 void helper_fadd_STN_ST0(int st_index)
3751 ST(st_index) += ST0;
3754 void helper_fmul_STN_ST0(int st_index)
3756 ST(st_index) *= ST0;
3759 void helper_fsub_STN_ST0(int st_index)
3761 ST(st_index) -= ST0;
3764 void helper_fsubr_STN_ST0(int st_index)
3766 CPU86_LDouble *p;
3767 p = &ST(st_index);
3768 *p = ST0 - *p;
3771 void helper_fdiv_STN_ST0(int st_index)
3773 CPU86_LDouble *p;
3774 p = &ST(st_index);
3775 *p = helper_fdiv(*p, ST0);
3778 void helper_fdivr_STN_ST0(int st_index)
3780 CPU86_LDouble *p;
3781 p = &ST(st_index);
3782 *p = helper_fdiv(ST0, *p);
3785 /* misc FPU operations */
3786 void helper_fchs_ST0(void)
3788 ST0 = floatx_chs(ST0);
3791 void helper_fabs_ST0(void)
3793 ST0 = floatx_abs(ST0);
3796 void helper_fld1_ST0(void)
3798 ST0 = f15rk[1];
3801 void helper_fldl2t_ST0(void)
3803 ST0 = f15rk[6];
3806 void helper_fldl2e_ST0(void)
3808 ST0 = f15rk[5];
3811 void helper_fldpi_ST0(void)
3813 ST0 = f15rk[2];
3816 void helper_fldlg2_ST0(void)
3818 ST0 = f15rk[3];
3821 void helper_fldln2_ST0(void)
3823 ST0 = f15rk[4];
3826 void helper_fldz_ST0(void)
3828 ST0 = f15rk[0];
3831 void helper_fldz_FT0(void)
3833 FT0 = f15rk[0];
3836 uint32_t helper_fnstsw(void)
3838 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3841 uint32_t helper_fnstcw(void)
3843 return env->fpuc;
3846 static void update_fp_status(void)
3848 int rnd_type;
3850 /* set rounding mode */
3851 switch(env->fpuc & RC_MASK) {
3852 default:
3853 case RC_NEAR:
3854 rnd_type = float_round_nearest_even;
3855 break;
3856 case RC_DOWN:
3857 rnd_type = float_round_down;
3858 break;
3859 case RC_UP:
3860 rnd_type = float_round_up;
3861 break;
3862 case RC_CHOP:
3863 rnd_type = float_round_to_zero;
3864 break;
3866 set_float_rounding_mode(rnd_type, &env->fp_status);
3867 #ifdef FLOATX80
3868 switch((env->fpuc >> 8) & 3) {
3869 case 0:
3870 rnd_type = 32;
3871 break;
3872 case 2:
3873 rnd_type = 64;
3874 break;
3875 case 3:
3876 default:
3877 rnd_type = 80;
3878 break;
3880 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3881 #endif
3884 void helper_fldcw(uint32_t val)
3886 env->fpuc = val;
3887 update_fp_status();
3890 void helper_fclex(void)
3892 env->fpus &= 0x7f00;
3895 void helper_fwait(void)
3897 if (env->fpus & FPUS_SE)
3898 fpu_raise_exception();
3899 FORCE_RET();
3902 void helper_fninit(void)
3904 env->fpus = 0;
3905 env->fpstt = 0;
3906 env->fpuc = 0x37f;
3907 env->fptags[0] = 1;
3908 env->fptags[1] = 1;
3909 env->fptags[2] = 1;
3910 env->fptags[3] = 1;
3911 env->fptags[4] = 1;
3912 env->fptags[5] = 1;
3913 env->fptags[6] = 1;
3914 env->fptags[7] = 1;
3917 /* BCD ops */
3919 void helper_fbld_ST0(target_ulong ptr)
3921 CPU86_LDouble tmp;
3922 uint64_t val;
3923 unsigned int v;
3924 int i;
3926 val = 0;
3927 for(i = 8; i >= 0; i--) {
3928 v = ldub(ptr + i);
3929 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3931 tmp = val;
3932 if (ldub(ptr + 9) & 0x80)
3933 tmp = -tmp;
3934 fpush();
3935 ST0 = tmp;
3938 void helper_fbst_ST0(target_ulong ptr)
3940 int v;
3941 target_ulong mem_ref, mem_end;
3942 int64_t val;
3944 val = floatx_to_int64(ST0, &env->fp_status);
3945 mem_ref = ptr;
3946 mem_end = mem_ref + 9;
3947 if (val < 0) {
3948 stb(mem_end, 0x80);
3949 val = -val;
3950 } else {
3951 stb(mem_end, 0x00);
3953 while (mem_ref < mem_end) {
3954 if (val == 0)
3955 break;
3956 v = val % 100;
3957 val = val / 100;
3958 v = ((v / 10) << 4) | (v % 10);
3959 stb(mem_ref++, v);
3961 while (mem_ref < mem_end) {
3962 stb(mem_ref++, 0);
3966 void helper_f2xm1(void)
3968 ST0 = pow(2.0,ST0) - 1.0;
3971 void helper_fyl2x(void)
3973 CPU86_LDouble fptemp;
3975 fptemp = ST0;
3976 if (fptemp>0.0){
3977 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3978 ST1 *= fptemp;
3979 fpop();
3980 } else {
3981 env->fpus &= (~0x4700);
3982 env->fpus |= 0x400;
3986 void helper_fptan(void)
3988 CPU86_LDouble fptemp;
3990 fptemp = ST0;
3991 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3992 env->fpus |= 0x400;
3993 } else {
3994 ST0 = tan(fptemp);
3995 fpush();
3996 ST0 = 1.0;
3997 env->fpus &= (~0x400); /* C2 <-- 0 */
3998 /* the above code is for |arg| < 2**52 only */
4002 void helper_fpatan(void)
4004 CPU86_LDouble fptemp, fpsrcop;
4006 fpsrcop = ST1;
4007 fptemp = ST0;
4008 ST1 = atan2(fpsrcop,fptemp);
4009 fpop();
4012 void helper_fxtract(void)
4014 CPU86_LDoubleU temp;
4015 unsigned int expdif;
4017 temp.d = ST0;
4018 expdif = EXPD(temp) - EXPBIAS;
4019 /*DP exponent bias*/
4020 ST0 = expdif;
4021 fpush();
4022 BIASEXPONENT(temp);
4023 ST0 = temp.d;
4026 void helper_fprem1(void)
4028 CPU86_LDouble dblq, fpsrcop, fptemp;
4029 CPU86_LDoubleU fpsrcop1, fptemp1;
4030 int expdif;
4031 signed long long int q;
4033 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4034 ST0 = 0.0 / 0.0; /* NaN */
4035 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4036 return;
4039 fpsrcop = ST0;
4040 fptemp = ST1;
4041 fpsrcop1.d = fpsrcop;
4042 fptemp1.d = fptemp;
4043 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4045 if (expdif < 0) {
4046 /* optimisation? taken from the AMD docs */
4047 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4048 /* ST0 is unchanged */
4049 return;
4052 if (expdif < 53) {
4053 dblq = fpsrcop / fptemp;
4054 /* round dblq towards nearest integer */
4055 dblq = rint(dblq);
4056 ST0 = fpsrcop - fptemp * dblq;
4058 /* convert dblq to q by truncating towards zero */
4059 if (dblq < 0.0)
4060 q = (signed long long int)(-dblq);
4061 else
4062 q = (signed long long int)dblq;
4064 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4065 /* (C0,C3,C1) <-- (q2,q1,q0) */
4066 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4067 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4068 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4069 } else {
4070 env->fpus |= 0x400; /* C2 <-- 1 */
4071 fptemp = pow(2.0, expdif - 50);
4072 fpsrcop = (ST0 / ST1) / fptemp;
4073 /* fpsrcop = integer obtained by chopping */
4074 fpsrcop = (fpsrcop < 0.0) ?
4075 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4076 ST0 -= (ST1 * fpsrcop * fptemp);
4080 void helper_fprem(void)
4082 CPU86_LDouble dblq, fpsrcop, fptemp;
4083 CPU86_LDoubleU fpsrcop1, fptemp1;
4084 int expdif;
4085 signed long long int q;
4087 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4088 ST0 = 0.0 / 0.0; /* NaN */
4089 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4090 return;
4093 fpsrcop = (CPU86_LDouble)ST0;
4094 fptemp = (CPU86_LDouble)ST1;
4095 fpsrcop1.d = fpsrcop;
4096 fptemp1.d = fptemp;
4097 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4099 if (expdif < 0) {
4100 /* optimisation? taken from the AMD docs */
4101 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4102 /* ST0 is unchanged */
4103 return;
4106 if ( expdif < 53 ) {
4107 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4108 /* round dblq towards zero */
4109 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4110 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4112 /* convert dblq to q by truncating towards zero */
4113 if (dblq < 0.0)
4114 q = (signed long long int)(-dblq);
4115 else
4116 q = (signed long long int)dblq;
4118 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4119 /* (C0,C3,C1) <-- (q2,q1,q0) */
4120 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4121 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4122 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4123 } else {
4124 int N = 32 + (expdif % 32); /* as per AMD docs */
4125 env->fpus |= 0x400; /* C2 <-- 1 */
4126 fptemp = pow(2.0, (double)(expdif - N));
4127 fpsrcop = (ST0 / ST1) / fptemp;
4128 /* fpsrcop = integer obtained by chopping */
4129 fpsrcop = (fpsrcop < 0.0) ?
4130 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4131 ST0 -= (ST1 * fpsrcop * fptemp);
4135 void helper_fyl2xp1(void)
4137 CPU86_LDouble fptemp;
4139 fptemp = ST0;
4140 if ((fptemp+1.0)>0.0) {
4141 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4142 ST1 *= fptemp;
4143 fpop();
4144 } else {
4145 env->fpus &= (~0x4700);
4146 env->fpus |= 0x400;
4150 void helper_fsqrt(void)
4152 CPU86_LDouble fptemp;
4154 fptemp = ST0;
4155 if (fptemp<0.0) {
4156 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4157 env->fpus |= 0x400;
4159 ST0 = sqrt(fptemp);
4162 void helper_fsincos(void)
4164 CPU86_LDouble fptemp;
4166 fptemp = ST0;
4167 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4168 env->fpus |= 0x400;
4169 } else {
4170 ST0 = sin(fptemp);
4171 fpush();
4172 ST0 = cos(fptemp);
4173 env->fpus &= (~0x400); /* C2 <-- 0 */
4174 /* the above code is for |arg| < 2**63 only */
4178 void helper_frndint(void)
4180 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4183 void helper_fscale(void)
4185 ST0 = ldexp (ST0, (int)(ST1));
4188 void helper_fsin(void)
4190 CPU86_LDouble fptemp;
4192 fptemp = ST0;
4193 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4194 env->fpus |= 0x400;
4195 } else {
4196 ST0 = sin(fptemp);
4197 env->fpus &= (~0x400); /* C2 <-- 0 */
4198 /* the above code is for |arg| < 2**53 only */
4202 void helper_fcos(void)
4204 CPU86_LDouble fptemp;
4206 fptemp = ST0;
4207 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4208 env->fpus |= 0x400;
4209 } else {
4210 ST0 = cos(fptemp);
4211 env->fpus &= (~0x400); /* C2 <-- 0 */
4212 /* the above code is for |arg5 < 2**63 only */
4216 void helper_fxam_ST0(void)
4218 CPU86_LDoubleU temp;
4219 int expdif;
4221 temp.d = ST0;
4223 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4224 if (SIGND(temp))
4225 env->fpus |= 0x200; /* C1 <-- 1 */
4227 /* XXX: test fptags too */
4228 expdif = EXPD(temp);
4229 if (expdif == MAXEXPD) {
4230 #ifdef USE_X86LDOUBLE
4231 if (MANTD(temp) == 0x8000000000000000ULL)
4232 #else
4233 if (MANTD(temp) == 0)
4234 #endif
4235 env->fpus |= 0x500 /*Infinity*/;
4236 else
4237 env->fpus |= 0x100 /*NaN*/;
4238 } else if (expdif == 0) {
4239 if (MANTD(temp) == 0)
4240 env->fpus |= 0x4000 /*Zero*/;
4241 else
4242 env->fpus |= 0x4400 /*Denormal*/;
4243 } else {
4244 env->fpus |= 0x400;
4248 void helper_fstenv(target_ulong ptr, int data32)
4250 int fpus, fptag, exp, i;
4251 uint64_t mant;
4252 CPU86_LDoubleU tmp;
4254 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4255 fptag = 0;
4256 for (i=7; i>=0; i--) {
4257 fptag <<= 2;
4258 if (env->fptags[i]) {
4259 fptag |= 3;
4260 } else {
4261 tmp.d = env->fpregs[i].d;
4262 exp = EXPD(tmp);
4263 mant = MANTD(tmp);
4264 if (exp == 0 && mant == 0) {
4265 /* zero */
4266 fptag |= 1;
4267 } else if (exp == 0 || exp == MAXEXPD
4268 #ifdef USE_X86LDOUBLE
4269 || (mant & (1LL << 63)) == 0
4270 #endif
4272 /* NaNs, infinity, denormal */
4273 fptag |= 2;
4277 if (data32) {
4278 /* 32 bit */
4279 stl(ptr, env->fpuc);
4280 stl(ptr + 4, fpus);
4281 stl(ptr + 8, fptag);
4282 stl(ptr + 12, 0); /* fpip */
4283 stl(ptr + 16, 0); /* fpcs */
4284 stl(ptr + 20, 0); /* fpoo */
4285 stl(ptr + 24, 0); /* fpos */
4286 } else {
4287 /* 16 bit */
4288 stw(ptr, env->fpuc);
4289 stw(ptr + 2, fpus);
4290 stw(ptr + 4, fptag);
4291 stw(ptr + 6, 0);
4292 stw(ptr + 8, 0);
4293 stw(ptr + 10, 0);
4294 stw(ptr + 12, 0);
4298 void helper_fldenv(target_ulong ptr, int data32)
4300 int i, fpus, fptag;
4302 if (data32) {
4303 env->fpuc = lduw(ptr);
4304 fpus = lduw(ptr + 4);
4305 fptag = lduw(ptr + 8);
4307 else {
4308 env->fpuc = lduw(ptr);
4309 fpus = lduw(ptr + 2);
4310 fptag = lduw(ptr + 4);
4312 env->fpstt = (fpus >> 11) & 7;
4313 env->fpus = fpus & ~0x3800;
4314 for(i = 0;i < 8; i++) {
4315 env->fptags[i] = ((fptag & 3) == 3);
4316 fptag >>= 2;
4320 void helper_fsave(target_ulong ptr, int data32)
4322 CPU86_LDouble tmp;
4323 int i;
4325 helper_fstenv(ptr, data32);
4327 ptr += (14 << data32);
4328 for(i = 0;i < 8; i++) {
4329 tmp = ST(i);
4330 helper_fstt(tmp, ptr);
4331 ptr += 10;
4334 /* fninit */
4335 env->fpus = 0;
4336 env->fpstt = 0;
4337 env->fpuc = 0x37f;
4338 env->fptags[0] = 1;
4339 env->fptags[1] = 1;
4340 env->fptags[2] = 1;
4341 env->fptags[3] = 1;
4342 env->fptags[4] = 1;
4343 env->fptags[5] = 1;
4344 env->fptags[6] = 1;
4345 env->fptags[7] = 1;
4348 void helper_frstor(target_ulong ptr, int data32)
4350 CPU86_LDouble tmp;
4351 int i;
4353 helper_fldenv(ptr, data32);
4354 ptr += (14 << data32);
4356 for(i = 0;i < 8; i++) {
4357 tmp = helper_fldt(ptr);
4358 ST(i) = tmp;
4359 ptr += 10;
4363 void helper_fxsave(target_ulong ptr, int data64)
4365 int fpus, fptag, i, nb_xmm_regs;
4366 CPU86_LDouble tmp;
4367 target_ulong addr;
4369 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4370 fptag = 0;
4371 for(i = 0; i < 8; i++) {
4372 fptag |= (env->fptags[i] << i);
4374 stw(ptr, env->fpuc);
4375 stw(ptr + 2, fpus);
4376 stw(ptr + 4, fptag ^ 0xff);
4377 #ifdef TARGET_X86_64
4378 if (data64) {
4379 stq(ptr + 0x08, 0); /* rip */
4380 stq(ptr + 0x10, 0); /* rdp */
4381 } else
4382 #endif
4384 stl(ptr + 0x08, 0); /* eip */
4385 stl(ptr + 0x0c, 0); /* sel */
4386 stl(ptr + 0x10, 0); /* dp */
4387 stl(ptr + 0x14, 0); /* sel */
4390 addr = ptr + 0x20;
4391 for(i = 0;i < 8; i++) {
4392 tmp = ST(i);
4393 helper_fstt(tmp, addr);
4394 addr += 16;
4397 if (env->cr[4] & CR4_OSFXSR_MASK) {
4398 /* XXX: finish it */
4399 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4400 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4401 if (env->hflags & HF_CS64_MASK)
4402 nb_xmm_regs = 16;
4403 else
4404 nb_xmm_regs = 8;
4405 addr = ptr + 0xa0;
4406 for(i = 0; i < nb_xmm_regs; i++) {
4407 stq(addr, env->xmm_regs[i].XMM_Q(0));
4408 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4409 addr += 16;
4414 void helper_fxrstor(target_ulong ptr, int data64)
4416 int i, fpus, fptag, nb_xmm_regs;
4417 CPU86_LDouble tmp;
4418 target_ulong addr;
4420 env->fpuc = lduw(ptr);
4421 fpus = lduw(ptr + 2);
4422 fptag = lduw(ptr + 4);
4423 env->fpstt = (fpus >> 11) & 7;
4424 env->fpus = fpus & ~0x3800;
4425 fptag ^= 0xff;
4426 for(i = 0;i < 8; i++) {
4427 env->fptags[i] = ((fptag >> i) & 1);
4430 addr = ptr + 0x20;
4431 for(i = 0;i < 8; i++) {
4432 tmp = helper_fldt(addr);
4433 ST(i) = tmp;
4434 addr += 16;
4437 if (env->cr[4] & CR4_OSFXSR_MASK) {
4438 /* XXX: finish it */
4439 env->mxcsr = ldl(ptr + 0x18);
4440 //ldl(ptr + 0x1c);
4441 if (env->hflags & HF_CS64_MASK)
4442 nb_xmm_regs = 16;
4443 else
4444 nb_xmm_regs = 8;
4445 addr = ptr + 0xa0;
4446 for(i = 0; i < nb_xmm_regs; i++) {
4447 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4448 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4449 addr += 16;
4454 #ifndef USE_X86LDOUBLE
4456 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4458 CPU86_LDoubleU temp;
4459 int e;
4461 temp.d = f;
4462 /* mantissa */
4463 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4464 /* exponent + sign */
4465 e = EXPD(temp) - EXPBIAS + 16383;
4466 e |= SIGND(temp) >> 16;
4467 *pexp = e;
4470 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4472 CPU86_LDoubleU temp;
4473 int e;
4474 uint64_t ll;
4476 /* XXX: handle overflow ? */
4477 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4478 e |= (upper >> 4) & 0x800; /* sign */
4479 ll = (mant >> 11) & ((1LL << 52) - 1);
4480 #ifdef __arm__
4481 temp.l.upper = (e << 20) | (ll >> 32);
4482 temp.l.lower = ll;
4483 #else
4484 temp.ll = ll | ((uint64_t)e << 52);
4485 #endif
4486 return temp.d;
4489 #else
4491 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4493 CPU86_LDoubleU temp;
4495 temp.d = f;
4496 *pmant = temp.l.lower;
4497 *pexp = temp.l.upper;
4500 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4502 CPU86_LDoubleU temp;
4504 temp.l.upper = upper;
4505 temp.l.lower = mant;
4506 return temp.d;
4508 #endif
4510 #ifdef TARGET_X86_64
4512 //#define DEBUG_MULDIV
4514 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4516 *plow += a;
4517 /* carry test */
4518 if (*plow < a)
4519 (*phigh)++;
4520 *phigh += b;
4523 static void neg128(uint64_t *plow, uint64_t *phigh)
4525 *plow = ~ *plow;
4526 *phigh = ~ *phigh;
4527 add128(plow, phigh, 1, 0);
4530 /* return TRUE if overflow */
4531 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4533 uint64_t q, r, a1, a0;
4534 int i, qb, ab;
4536 a0 = *plow;
4537 a1 = *phigh;
4538 if (a1 == 0) {
4539 q = a0 / b;
4540 r = a0 % b;
4541 *plow = q;
4542 *phigh = r;
4543 } else {
4544 if (a1 >= b)
4545 return 1;
4546 /* XXX: use a better algorithm */
4547 for(i = 0; i < 64; i++) {
4548 ab = a1 >> 63;
4549 a1 = (a1 << 1) | (a0 >> 63);
4550 if (ab || a1 >= b) {
4551 a1 -= b;
4552 qb = 1;
4553 } else {
4554 qb = 0;
4556 a0 = (a0 << 1) | qb;
4558 #if defined(DEBUG_MULDIV)
4559 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4560 *phigh, *plow, b, a0, a1);
4561 #endif
4562 *plow = a0;
4563 *phigh = a1;
4565 return 0;
4568 /* return TRUE if overflow */
4569 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4571 int sa, sb;
4572 sa = ((int64_t)*phigh < 0);
4573 if (sa)
4574 neg128(plow, phigh);
4575 sb = (b < 0);
4576 if (sb)
4577 b = -b;
4578 if (div64(plow, phigh, b) != 0)
4579 return 1;
4580 if (sa ^ sb) {
4581 if (*plow > (1ULL << 63))
4582 return 1;
4583 *plow = - *plow;
4584 } else {
4585 if (*plow >= (1ULL << 63))
4586 return 1;
4588 if (sa)
4589 *phigh = - *phigh;
4590 return 0;
4593 void helper_mulq_EAX_T0(target_ulong t0)
4595 uint64_t r0, r1;
4597 mulu64(&r0, &r1, EAX, t0);
4598 EAX = r0;
4599 EDX = r1;
4600 CC_DST = r0;
4601 CC_SRC = r1;
4604 void helper_imulq_EAX_T0(target_ulong t0)
4606 uint64_t r0, r1;
4608 muls64(&r0, &r1, EAX, t0);
4609 EAX = r0;
4610 EDX = r1;
4611 CC_DST = r0;
4612 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4615 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4617 uint64_t r0, r1;
4619 muls64(&r0, &r1, t0, t1);
4620 CC_DST = r0;
4621 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4622 return r0;
4625 void helper_divq_EAX(target_ulong t0)
4627 uint64_t r0, r1;
4628 if (t0 == 0) {
4629 raise_exception(EXCP00_DIVZ);
4631 r0 = EAX;
4632 r1 = EDX;
4633 if (div64(&r0, &r1, t0))
4634 raise_exception(EXCP00_DIVZ);
4635 EAX = r0;
4636 EDX = r1;
4639 void helper_idivq_EAX(target_ulong t0)
4641 uint64_t r0, r1;
4642 if (t0 == 0) {
4643 raise_exception(EXCP00_DIVZ);
4645 r0 = EAX;
4646 r1 = EDX;
4647 if (idiv64(&r0, &r1, t0))
4648 raise_exception(EXCP00_DIVZ);
4649 EAX = r0;
4650 EDX = r1;
4652 #endif
4654 static void do_hlt(void)
4656 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4657 env->halted = 1;
4658 env->exception_index = EXCP_HLT;
4659 cpu_loop_exit();
4662 void helper_hlt(int next_eip_addend)
4664 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4665 EIP += next_eip_addend;
4667 do_hlt();
4670 void helper_monitor(target_ulong ptr)
4672 if ((uint32_t)ECX != 0)
4673 raise_exception(EXCP0D_GPF);
4674 /* XXX: store address ? */
4675 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4678 void helper_mwait(int next_eip_addend)
4680 if ((uint32_t)ECX != 0)
4681 raise_exception(EXCP0D_GPF);
4682 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4683 EIP += next_eip_addend;
4685 /* XXX: not complete but not completely erroneous */
4686 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4687 /* more than one CPU: do not sleep because another CPU may
4688 wake this one */
4689 } else {
4690 do_hlt();
4694 void helper_debug(void)
4696 env->exception_index = EXCP_DEBUG;
4697 cpu_loop_exit();
4700 void helper_raise_interrupt(int intno, int next_eip_addend)
4702 raise_interrupt(intno, 1, 0, next_eip_addend);
4705 void helper_raise_exception(int exception_index)
4707 raise_exception(exception_index);
4710 void helper_cli(void)
4712 env->eflags &= ~IF_MASK;
4715 void helper_sti(void)
4717 env->eflags |= IF_MASK;
4720 #if 0
4721 /* vm86plus instructions */
4722 void helper_cli_vm(void)
4724 env->eflags &= ~VIF_MASK;
4727 void helper_sti_vm(void)
4729 env->eflags |= VIF_MASK;
4730 if (env->eflags & VIP_MASK) {
4731 raise_exception(EXCP0D_GPF);
4734 #endif
4736 void helper_set_inhibit_irq(void)
4738 env->hflags |= HF_INHIBIT_IRQ_MASK;
4741 void helper_reset_inhibit_irq(void)
4743 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4746 void helper_boundw(target_ulong a0, int v)
4748 int low, high;
4749 low = ldsw(a0);
4750 high = ldsw(a0 + 2);
4751 v = (int16_t)v;
4752 if (v < low || v > high) {
4753 raise_exception(EXCP05_BOUND);
4755 FORCE_RET();
4758 void helper_boundl(target_ulong a0, int v)
4760 int low, high;
4761 low = ldl(a0);
4762 high = ldl(a0 + 4);
4763 if (v < low || v > high) {
4764 raise_exception(EXCP05_BOUND);
4766 FORCE_RET();
4769 static float approx_rsqrt(float a)
4771 return 1.0 / sqrt(a);
4774 static float approx_rcp(float a)
4776 return 1.0 / a;
4779 #if !defined(CONFIG_USER_ONLY)
4781 #define MMUSUFFIX _mmu
4783 #define SHIFT 0
4784 #include "softmmu_template.h"
4786 #define SHIFT 1
4787 #include "softmmu_template.h"
4789 #define SHIFT 2
4790 #include "softmmu_template.h"
4792 #define SHIFT 3
4793 #include "softmmu_template.h"
4795 #endif
4797 /* try to fill the TLB and return an exception if error. If retaddr is
4798 NULL, it means that the function was called in C code (i.e. not
4799 from generated code or from helper.c) */
4800 /* XXX: fix it to restore all registers */
4801 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4803 TranslationBlock *tb;
4804 int ret;
4805 unsigned long pc;
4806 CPUX86State *saved_env;
4808 /* XXX: hack to restore env in all cases, even if not called from
4809 generated code */
4810 saved_env = env;
4811 env = cpu_single_env;
4813 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4814 if (ret) {
4815 if (retaddr) {
4816 /* now we have a real cpu fault */
4817 pc = (unsigned long)retaddr;
4818 tb = tb_find_pc(pc);
4819 if (tb) {
4820 /* the PC is inside the translated code. It means that we have
4821 a virtual CPU fault */
4822 cpu_restore_state(tb, env, pc, NULL);
4825 raise_exception_err(env->exception_index, env->error_code);
4827 env = saved_env;
4831 /* Secure Virtual Machine helpers */
4833 #if defined(CONFIG_USER_ONLY)
4835 void helper_vmrun(int aflag, int next_eip_addend)
4838 void helper_vmmcall(void)
4841 void helper_vmload(int aflag)
4844 void helper_vmsave(int aflag)
4847 void helper_stgi(void)
4850 void helper_clgi(void)
4853 void helper_skinit(void)
4856 void helper_invlpga(int aflag)
4859 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4862 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4866 void helper_svm_check_io(uint32_t port, uint32_t param,
4867 uint32_t next_eip_addend)
4870 #else
4872 static inline void svm_save_seg(target_phys_addr_t addr,
4873 const SegmentCache *sc)
4875 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4876 sc->selector);
4877 stq_phys(addr + offsetof(struct vmcb_seg, base),
4878 sc->base);
4879 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4880 sc->limit);
4881 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4882 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4885 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4887 unsigned int flags;
4889 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4890 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4891 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4892 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4893 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4896 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4897 CPUState *env, int seg_reg)
4899 SegmentCache sc1, *sc = &sc1;
4900 svm_load_seg(addr, sc);
4901 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4902 sc->base, sc->limit, sc->flags);
4905 void helper_vmrun(int aflag, int next_eip_addend)
4907 target_ulong addr;
4908 uint32_t event_inj;
4909 uint32_t int_ctl;
4911 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4913 if (aflag == 2)
4914 addr = EAX;
4915 else
4916 addr = (uint32_t)EAX;
4918 if (loglevel & CPU_LOG_TB_IN_ASM)
4919 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4921 env->vm_vmcb = addr;
4923 /* save the current CPU state in the hsave page */
4924 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4925 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4927 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4928 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4930 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4931 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4932 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4933 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4934 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4935 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4937 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4938 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4940 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4941 &env->segs[R_ES]);
4942 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4943 &env->segs[R_CS]);
4944 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4945 &env->segs[R_SS]);
4946 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4947 &env->segs[R_DS]);
4949 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4950 EIP + next_eip_addend);
4951 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4952 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4954 /* load the interception bitmaps so we do not need to access the
4955 vmcb in svm mode */
4956 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4957 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4958 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4959 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4960 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4961 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4963 /* enable intercepts */
4964 env->hflags |= HF_SVMI_MASK;
4966 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4968 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4969 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4971 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4972 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4974 /* clear exit_info_2 so we behave like the real hardware */
4975 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4977 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4978 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4979 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4980 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4981 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4982 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4983 if (int_ctl & V_INTR_MASKING_MASK) {
4984 env->v_tpr = int_ctl & V_TPR_MASK;
4985 env->hflags2 |= HF2_VINTR_MASK;
4986 if (env->eflags & IF_MASK)
4987 env->hflags2 |= HF2_HIF_MASK;
4990 cpu_load_efer(env,
4991 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4992 env->eflags = 0;
4993 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4994 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4995 CC_OP = CC_OP_EFLAGS;
4997 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4998 env, R_ES);
4999 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5000 env, R_CS);
5001 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5002 env, R_SS);
5003 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5004 env, R_DS);
5006 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5007 env->eip = EIP;
5008 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5009 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5010 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5011 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5012 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5014 /* FIXME: guest state consistency checks */
5016 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5017 case TLB_CONTROL_DO_NOTHING:
5018 break;
5019 case TLB_CONTROL_FLUSH_ALL_ASID:
5020 /* FIXME: this is not 100% correct but should work for now */
5021 tlb_flush(env, 1);
5022 break;
5025 env->hflags2 |= HF2_GIF_MASK;
5027 if (int_ctl & V_IRQ_MASK) {
5028 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5031 /* maybe we need to inject an event */
5032 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5033 if (event_inj & SVM_EVTINJ_VALID) {
5034 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5035 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5036 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5037 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
5039 if (loglevel & CPU_LOG_TB_IN_ASM)
5040 fprintf(logfile, "Injecting(%#hx): ", valid_err);
5041 /* FIXME: need to implement valid_err */
5042 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5043 case SVM_EVTINJ_TYPE_INTR:
5044 env->exception_index = vector;
5045 env->error_code = event_inj_err;
5046 env->exception_is_int = 0;
5047 env->exception_next_eip = -1;
5048 if (loglevel & CPU_LOG_TB_IN_ASM)
5049 fprintf(logfile, "INTR");
5050 /* XXX: is it always correct ? */
5051 do_interrupt(vector, 0, 0, 0, 1);
5052 break;
5053 case SVM_EVTINJ_TYPE_NMI:
5054 env->exception_index = EXCP02_NMI;
5055 env->error_code = event_inj_err;
5056 env->exception_is_int = 0;
5057 env->exception_next_eip = EIP;
5058 if (loglevel & CPU_LOG_TB_IN_ASM)
5059 fprintf(logfile, "NMI");
5060 cpu_loop_exit();
5061 break;
5062 case SVM_EVTINJ_TYPE_EXEPT:
5063 env->exception_index = vector;
5064 env->error_code = event_inj_err;
5065 env->exception_is_int = 0;
5066 env->exception_next_eip = -1;
5067 if (loglevel & CPU_LOG_TB_IN_ASM)
5068 fprintf(logfile, "EXEPT");
5069 cpu_loop_exit();
5070 break;
5071 case SVM_EVTINJ_TYPE_SOFT:
5072 env->exception_index = vector;
5073 env->error_code = event_inj_err;
5074 env->exception_is_int = 1;
5075 env->exception_next_eip = EIP;
5076 if (loglevel & CPU_LOG_TB_IN_ASM)
5077 fprintf(logfile, "SOFT");
5078 cpu_loop_exit();
5079 break;
5081 if (loglevel & CPU_LOG_TB_IN_ASM)
5082 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
5086 void helper_vmmcall(void)
5088 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5089 raise_exception(EXCP06_ILLOP);
5092 void helper_vmload(int aflag)
5094 target_ulong addr;
5095 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5097 if (aflag == 2)
5098 addr = EAX;
5099 else
5100 addr = (uint32_t)EAX;
5102 if (loglevel & CPU_LOG_TB_IN_ASM)
5103 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5104 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5105 env->segs[R_FS].base);
5107 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5108 env, R_FS);
5109 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5110 env, R_GS);
5111 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5112 &env->tr);
5113 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5114 &env->ldt);
5116 #ifdef TARGET_X86_64
5117 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5118 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5119 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5120 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5121 #endif
5122 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5123 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5124 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5125 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5128 void helper_vmsave(int aflag)
5130 target_ulong addr;
5131 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5133 if (aflag == 2)
5134 addr = EAX;
5135 else
5136 addr = (uint32_t)EAX;
5138 if (loglevel & CPU_LOG_TB_IN_ASM)
5139 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5140 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5141 env->segs[R_FS].base);
5143 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5144 &env->segs[R_FS]);
5145 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5146 &env->segs[R_GS]);
5147 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5148 &env->tr);
5149 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5150 &env->ldt);
5152 #ifdef TARGET_X86_64
5153 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5154 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5155 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5156 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5157 #endif
5158 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5159 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5160 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5161 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5164 void helper_stgi(void)
5166 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5167 env->hflags2 |= HF2_GIF_MASK;
5170 void helper_clgi(void)
5172 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5173 env->hflags2 &= ~HF2_GIF_MASK;
5176 void helper_skinit(void)
5178 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5179 /* XXX: not implemented */
5180 raise_exception(EXCP06_ILLOP);
5183 void helper_invlpga(int aflag)
5185 target_ulong addr;
5186 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5188 if (aflag == 2)
5189 addr = EAX;
5190 else
5191 addr = (uint32_t)EAX;
5193 /* XXX: could use the ASID to see if it is needed to do the
5194 flush */
5195 tlb_flush_page(env, addr);
5198 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5200 if (likely(!(env->hflags & HF_SVMI_MASK)))
5201 return;
5202 switch(type) {
5203 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5204 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5205 helper_vmexit(type, param);
5207 break;
5208 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5209 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5210 helper_vmexit(type, param);
5212 break;
5213 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5214 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5215 helper_vmexit(type, param);
5217 break;
5218 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5219 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5220 helper_vmexit(type, param);
5222 break;
5223 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5224 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5225 helper_vmexit(type, param);
5227 break;
5228 case SVM_EXIT_MSR:
5229 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5230 /* FIXME: this should be read in at vmrun (faster this way?) */
5231 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5232 uint32_t t0, t1;
5233 switch((uint32_t)ECX) {
5234 case 0 ... 0x1fff:
5235 t0 = (ECX * 2) % 8;
5236 t1 = ECX / 8;
5237 break;
5238 case 0xc0000000 ... 0xc0001fff:
5239 t0 = (8192 + ECX - 0xc0000000) * 2;
5240 t1 = (t0 / 8);
5241 t0 %= 8;
5242 break;
5243 case 0xc0010000 ... 0xc0011fff:
5244 t0 = (16384 + ECX - 0xc0010000) * 2;
5245 t1 = (t0 / 8);
5246 t0 %= 8;
5247 break;
5248 default:
5249 helper_vmexit(type, param);
5250 t0 = 0;
5251 t1 = 0;
5252 break;
5254 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5255 helper_vmexit(type, param);
5257 break;
5258 default:
5259 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5260 helper_vmexit(type, param);
5262 break;
5266 void helper_svm_check_io(uint32_t port, uint32_t param,
5267 uint32_t next_eip_addend)
5269 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5270 /* FIXME: this should be read in at vmrun (faster this way?) */
5271 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5272 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5273 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5274 /* next EIP */
5275 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5276 env->eip + next_eip_addend);
5277 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5282 /* Note: currently only 32 bits of exit_code are used */
5283 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5285 uint32_t int_ctl;
5287 if (loglevel & CPU_LOG_TB_IN_ASM)
5288 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5289 exit_code, exit_info_1,
5290 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5291 EIP);
5293 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5294 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5295 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5296 } else {
5297 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5300 /* Save the VM state in the vmcb */
5301 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5302 &env->segs[R_ES]);
5303 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5304 &env->segs[R_CS]);
5305 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5306 &env->segs[R_SS]);
5307 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5308 &env->segs[R_DS]);
5310 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5311 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5313 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5314 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5316 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5317 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5318 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5319 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5320 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5322 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5323 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5324 int_ctl |= env->v_tpr & V_TPR_MASK;
5325 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5326 int_ctl |= V_IRQ_MASK;
5327 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5329 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5330 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5331 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5332 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5333 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5334 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5335 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5337 /* Reload the host state from vm_hsave */
5338 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5339 env->hflags &= ~HF_SVMI_MASK;
5340 env->intercept = 0;
5341 env->intercept_exceptions = 0;
5342 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5343 env->tsc_offset = 0;
5345 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5346 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5348 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5349 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5351 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5352 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5353 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5354 /* we need to set the efer after the crs so the hidden flags get
5355 set properly */
5356 cpu_load_efer(env,
5357 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5358 env->eflags = 0;
5359 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5360 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5361 CC_OP = CC_OP_EFLAGS;
5363 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5364 env, R_ES);
5365 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5366 env, R_CS);
5367 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5368 env, R_SS);
5369 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5370 env, R_DS);
5372 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5373 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5374 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5376 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5377 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5379 /* other setups */
5380 cpu_x86_set_cpl(env, 0);
5381 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5382 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5384 env->hflags2 &= ~HF2_GIF_MASK;
5385 /* FIXME: Resets the current ASID register to zero (host ASID). */
5387 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5389 /* Clears the TSC_OFFSET inside the processor. */
5391 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5392 from the page table indicated the host's CR3. If the PDPEs contain
5393 illegal state, the processor causes a shutdown. */
5395 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5396 env->cr[0] |= CR0_PE_MASK;
5397 env->eflags &= ~VM_MASK;
5399 /* Disables all breakpoints in the host DR7 register. */
5401 /* Checks the reloaded host state for consistency. */
5403 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5404 host's code segment or non-canonical (in the case of long mode), a
5405 #GP fault is delivered inside the host.) */
5407 /* remove any pending exception */
5408 env->exception_index = -1;
5409 env->error_code = 0;
5410 env->old_exception = -1;
5412 cpu_loop_exit();
5415 #endif
5417 /* MMX/SSE */
5418 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5419 void helper_enter_mmx(void)
5421 env->fpstt = 0;
5422 *(uint32_t *)(env->fptags) = 0;
5423 *(uint32_t *)(env->fptags + 4) = 0;
5426 void helper_emms(void)
5428 /* set to empty state */
5429 *(uint32_t *)(env->fptags) = 0x01010101;
5430 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5433 /* XXX: suppress */
5434 void helper_movq(uint64_t *d, uint64_t *s)
5436 *d = *s;
5439 #define SHIFT 0
5440 #include "ops_sse.h"
5442 #define SHIFT 1
5443 #include "ops_sse.h"
5445 #define SHIFT 0
5446 #include "helper_template.h"
5447 #undef SHIFT
5449 #define SHIFT 1
5450 #include "helper_template.h"
5451 #undef SHIFT
5453 #define SHIFT 2
5454 #include "helper_template.h"
5455 #undef SHIFT
5457 #ifdef TARGET_X86_64
5459 #define SHIFT 3
5460 #include "helper_template.h"
5461 #undef SHIFT
5463 #endif
5465 /* bit operations */
5466 target_ulong helper_bsf(target_ulong t0)
5468 int count;
5469 target_ulong res;
5471 res = t0;
5472 count = 0;
5473 while ((res & 1) == 0) {
5474 count++;
5475 res >>= 1;
5477 return count;
5480 target_ulong helper_bsr(target_ulong t0)
5482 int count;
5483 target_ulong res, mask;
5485 res = t0;
5486 count = TARGET_LONG_BITS - 1;
5487 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5488 while ((res & mask) == 0) {
5489 count--;
5490 res <<= 1;
5492 return count;
5496 static int compute_all_eflags(void)
5498 return CC_SRC;
5501 static int compute_c_eflags(void)
5503 return CC_SRC & CC_C;
5506 CCTable cc_table[CC_OP_NB] = {
5507 [CC_OP_DYNAMIC] = { /* should never happen */ },
5509 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5511 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5512 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5513 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5515 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5516 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5517 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5519 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5520 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5521 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5523 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5524 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5525 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5527 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5528 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5529 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5531 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5532 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5533 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5535 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5536 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5537 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5539 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5540 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5541 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5543 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5544 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5545 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5547 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5548 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5549 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5551 #ifdef TARGET_X86_64
5552 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5554 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5556 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5558 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5560 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5562 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5564 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5566 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5568 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5570 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5571 #endif