Add new command line option for controlling shadow cache size
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob154708e565e9fa0de15735d308cd6dda63c397e7
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec.h"
21 //#define DEBUG_PCALL
23 #if 0
24 #define raise_exception_err(a, b)\
25 do {\
26 if (logfile)\
27 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
28 (raise_exception_err)(a, b);\
29 } while (0)
30 #endif
32 const uint8_t parity_table[256] = {
33 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 /* modulo 17 table */
68 const uint8_t rclw_table[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 9,10,11,12,13,14,15,
71 16, 0, 1, 2, 3, 4, 5, 6,
72 7, 8, 9,10,11,12,13,14,
75 /* modulo 9 table */
76 const uint8_t rclb_table[32] = {
77 0, 1, 2, 3, 4, 5, 6, 7,
78 8, 0, 1, 2, 3, 4, 5, 6,
79 7, 8, 0, 1, 2, 3, 4, 5,
80 6, 7, 8, 0, 1, 2, 3, 4,
83 const CPU86_LDouble f15rk[7] =
85 0.00000000000000000000L,
86 1.00000000000000000000L,
87 3.14159265358979323851L, /*pi*/
88 0.30102999566398119523L, /*lg2*/
89 0.69314718055994530943L, /*ln2*/
90 1.44269504088896340739L, /*l2e*/
91 3.32192809488736234781L, /*l2t*/
94 /* thread support */
96 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
98 void cpu_lock(void)
100 spin_lock(&global_cpu_lock);
103 void cpu_unlock(void)
105 spin_unlock(&global_cpu_lock);
108 /* return non zero if error */
109 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
110 int selector)
112 SegmentCache *dt;
113 int index;
114 target_ulong ptr;
116 if (selector & 0x4)
117 dt = &env->ldt;
118 else
119 dt = &env->gdt;
120 index = selector & ~7;
121 if ((index + 7) > dt->limit)
122 return -1;
123 ptr = dt->base + index;
124 *e1_ptr = ldl_kernel(ptr);
125 *e2_ptr = ldl_kernel(ptr + 4);
126 return 0;
129 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
131 unsigned int limit;
132 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
133 if (e2 & DESC_G_MASK)
134 limit = (limit << 12) | 0xfff;
135 return limit;
138 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
140 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
143 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
145 sc->base = get_seg_base(e1, e2);
146 sc->limit = get_seg_limit(e1, e2);
147 sc->flags = e2;
150 /* init the segment cache in vm86 mode. */
151 static inline void load_seg_vm(int seg, int selector)
153 selector &= 0xffff;
154 cpu_x86_load_seg_cache(env, seg, selector,
155 (selector << 4), 0xffff, 0);
158 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
159 uint32_t *esp_ptr, int dpl)
161 int type, index, shift;
163 #if 0
165 int i;
166 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
167 for(i=0;i<env->tr.limit;i++) {
168 printf("%02x ", env->tr.base[i]);
169 if ((i & 7) == 7) printf("\n");
171 printf("\n");
173 #endif
175 if (!(env->tr.flags & DESC_P_MASK))
176 cpu_abort(env, "invalid tss");
177 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
178 if ((type & 7) != 1)
179 cpu_abort(env, "invalid tss type");
180 shift = type >> 3;
181 index = (dpl * 4 + 2) << shift;
182 if (index + (4 << shift) - 1 > env->tr.limit)
183 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
184 if (shift == 0) {
185 *esp_ptr = lduw_kernel(env->tr.base + index);
186 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
187 } else {
188 *esp_ptr = ldl_kernel(env->tr.base + index);
189 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
193 /* XXX: merge with load_seg() */
194 static void tss_load_seg(int seg_reg, int selector)
196 uint32_t e1, e2;
197 int rpl, dpl, cpl;
199 if ((selector & 0xfffc) != 0) {
200 if (load_segment(&e1, &e2, selector) != 0)
201 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
202 if (!(e2 & DESC_S_MASK))
203 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204 rpl = selector & 3;
205 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
206 cpl = env->hflags & HF_CPL_MASK;
207 if (seg_reg == R_CS) {
208 if (!(e2 & DESC_CS_MASK))
209 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
210 /* XXX: is it correct ? */
211 if (dpl != rpl)
212 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
213 if ((e2 & DESC_C_MASK) && dpl > rpl)
214 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215 } else if (seg_reg == R_SS) {
216 /* SS must be writable data */
217 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
218 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 if (dpl != cpl || dpl != rpl)
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 } else {
222 /* not readable code */
223 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
224 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225 /* if data or non conforming code, checks the rights */
226 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
227 if (dpl < cpl || dpl < rpl)
228 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231 if (!(e2 & DESC_P_MASK))
232 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
233 cpu_x86_load_seg_cache(env, seg_reg, selector,
234 get_seg_base(e1, e2),
235 get_seg_limit(e1, e2),
236 e2);
237 } else {
238 if (seg_reg == R_SS || seg_reg == R_CS)
239 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 #define SWITCH_TSS_JMP 0
244 #define SWITCH_TSS_IRET 1
245 #define SWITCH_TSS_CALL 2
247 /* XXX: restore CPU state in registers (PowerPC case) */
248 static void switch_tss(int tss_selector,
249 uint32_t e1, uint32_t e2, int source,
250 uint32_t next_eip)
252 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
253 target_ulong tss_base;
254 uint32_t new_regs[8], new_segs[6];
255 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
256 uint32_t old_eflags, eflags_mask;
257 SegmentCache *dt;
258 int index;
259 target_ulong ptr;
261 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
262 #ifdef DEBUG_PCALL
263 if (loglevel & CPU_LOG_PCALL)
264 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
265 #endif
267 /* if task gate, we read the TSS segment and we load it */
268 if (type == 5) {
269 if (!(e2 & DESC_P_MASK))
270 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
271 tss_selector = e1 >> 16;
272 if (tss_selector & 4)
273 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
274 if (load_segment(&e1, &e2, tss_selector) != 0)
275 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
276 if (e2 & DESC_S_MASK)
277 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279 if ((type & 7) != 1)
280 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
283 if (!(e2 & DESC_P_MASK))
284 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
286 if (type & 8)
287 tss_limit_max = 103;
288 else
289 tss_limit_max = 43;
290 tss_limit = get_seg_limit(e1, e2);
291 tss_base = get_seg_base(e1, e2);
292 if ((tss_selector & 4) != 0 ||
293 tss_limit < tss_limit_max)
294 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
295 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
296 if (old_type & 8)
297 old_tss_limit_max = 103;
298 else
299 old_tss_limit_max = 43;
301 /* read all the registers from the new TSS */
302 if (type & 8) {
303 /* 32 bit */
304 new_cr3 = ldl_kernel(tss_base + 0x1c);
305 new_eip = ldl_kernel(tss_base + 0x20);
306 new_eflags = ldl_kernel(tss_base + 0x24);
307 for(i = 0; i < 8; i++)
308 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
309 for(i = 0; i < 6; i++)
310 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
311 new_ldt = lduw_kernel(tss_base + 0x60);
312 new_trap = ldl_kernel(tss_base + 0x64);
313 } else {
314 /* 16 bit */
315 new_cr3 = 0;
316 new_eip = lduw_kernel(tss_base + 0x0e);
317 new_eflags = lduw_kernel(tss_base + 0x10);
318 for(i = 0; i < 8; i++)
319 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
320 for(i = 0; i < 4; i++)
321 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
322 new_ldt = lduw_kernel(tss_base + 0x2a);
323 new_segs[R_FS] = 0;
324 new_segs[R_GS] = 0;
325 new_trap = 0;
328 /* NOTE: we must avoid memory exceptions during the task switch,
329 so we make dummy accesses before */
330 /* XXX: it can still fail in some cases, so a bigger hack is
331 necessary to valid the TLB after having done the accesses */
333 v1 = ldub_kernel(env->tr.base);
334 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
335 stb_kernel(env->tr.base, v1);
336 stb_kernel(env->tr.base + old_tss_limit_max, v2);
338 /* clear busy bit (it is restartable) */
339 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
340 target_ulong ptr;
341 uint32_t e2;
342 ptr = env->gdt.base + (env->tr.selector & ~7);
343 e2 = ldl_kernel(ptr + 4);
344 e2 &= ~DESC_TSS_BUSY_MASK;
345 stl_kernel(ptr + 4, e2);
347 old_eflags = compute_eflags();
348 if (source == SWITCH_TSS_IRET)
349 old_eflags &= ~NT_MASK;
351 /* save the current state in the old TSS */
352 if (type & 8) {
353 /* 32 bit */
354 stl_kernel(env->tr.base + 0x20, next_eip);
355 stl_kernel(env->tr.base + 0x24, old_eflags);
356 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
357 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
358 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
359 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
360 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
361 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
362 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
363 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
364 for(i = 0; i < 6; i++)
365 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
366 } else {
367 /* 16 bit */
368 stw_kernel(env->tr.base + 0x0e, next_eip);
369 stw_kernel(env->tr.base + 0x10, old_eflags);
370 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
371 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
372 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
373 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
374 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
375 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
376 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
377 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
378 for(i = 0; i < 4; i++)
379 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
382 /* now if an exception occurs, it will occurs in the next task
383 context */
385 if (source == SWITCH_TSS_CALL) {
386 stw_kernel(tss_base, env->tr.selector);
387 new_eflags |= NT_MASK;
390 /* set busy bit */
391 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
392 target_ulong ptr;
393 uint32_t e2;
394 ptr = env->gdt.base + (tss_selector & ~7);
395 e2 = ldl_kernel(ptr + 4);
396 e2 |= DESC_TSS_BUSY_MASK;
397 stl_kernel(ptr + 4, e2);
400 /* set the new CPU state */
401 /* from this point, any exception which occurs can give problems */
402 env->cr[0] |= CR0_TS_MASK;
403 env->hflags |= HF_TS_MASK;
404 env->tr.selector = tss_selector;
405 env->tr.base = tss_base;
406 env->tr.limit = tss_limit;
407 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
409 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
410 cpu_x86_update_cr3(env, new_cr3);
413 /* load all registers without an exception, then reload them with
414 possible exception */
415 env->eip = new_eip;
416 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
417 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
418 if (!(type & 8))
419 eflags_mask &= 0xffff;
420 load_eflags(new_eflags, eflags_mask);
421 /* XXX: what to do in 16 bit case ? */
422 EAX = new_regs[0];
423 ECX = new_regs[1];
424 EDX = new_regs[2];
425 EBX = new_regs[3];
426 ESP = new_regs[4];
427 EBP = new_regs[5];
428 ESI = new_regs[6];
429 EDI = new_regs[7];
430 if (new_eflags & VM_MASK) {
431 for(i = 0; i < 6; i++)
432 load_seg_vm(i, new_segs[i]);
433 /* in vm86, CPL is always 3 */
434 cpu_x86_set_cpl(env, 3);
435 } else {
436 /* CPL is set the RPL of CS */
437 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
438 /* first just selectors as the rest may trigger exceptions */
439 for(i = 0; i < 6; i++)
440 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
443 env->ldt.selector = new_ldt & ~4;
444 env->ldt.base = 0;
445 env->ldt.limit = 0;
446 env->ldt.flags = 0;
448 /* load the LDT */
449 if (new_ldt & 4)
450 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
452 if ((new_ldt & 0xfffc) != 0) {
453 dt = &env->gdt;
454 index = new_ldt & ~7;
455 if ((index + 7) > dt->limit)
456 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
457 ptr = dt->base + index;
458 e1 = ldl_kernel(ptr);
459 e2 = ldl_kernel(ptr + 4);
460 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
461 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
462 if (!(e2 & DESC_P_MASK))
463 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464 load_seg_cache_raw_dt(&env->ldt, e1, e2);
467 /* load the segments */
468 if (!(new_eflags & VM_MASK)) {
469 tss_load_seg(R_CS, new_segs[R_CS]);
470 tss_load_seg(R_SS, new_segs[R_SS]);
471 tss_load_seg(R_ES, new_segs[R_ES]);
472 tss_load_seg(R_DS, new_segs[R_DS]);
473 tss_load_seg(R_FS, new_segs[R_FS]);
474 tss_load_seg(R_GS, new_segs[R_GS]);
477 /* check that EIP is in the CS segment limits */
478 if (new_eip > env->segs[R_CS].limit) {
479 /* XXX: different exception if CALL ? */
480 raise_exception_err(EXCP0D_GPF, 0);
484 /* check if Port I/O is allowed in TSS */
485 static inline void check_io(int addr, int size)
487 int io_offset, val, mask;
489 /* TSS must be a valid 32 bit one */
490 if (!(env->tr.flags & DESC_P_MASK) ||
491 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
492 env->tr.limit < 103)
493 goto fail;
494 io_offset = lduw_kernel(env->tr.base + 0x66);
495 io_offset += (addr >> 3);
496 /* Note: the check needs two bytes */
497 if ((io_offset + 1) > env->tr.limit)
498 goto fail;
499 val = lduw_kernel(env->tr.base + io_offset);
500 val >>= (addr & 7);
501 mask = (1 << size) - 1;
502 /* all bits must be zero to allow the I/O */
503 if ((val & mask) != 0) {
504 fail:
505 raise_exception_err(EXCP0D_GPF, 0);
509 void check_iob_T0(void)
511 check_io(T0, 1);
514 void check_iow_T0(void)
516 check_io(T0, 2);
519 void check_iol_T0(void)
521 check_io(T0, 4);
524 void check_iob_DX(void)
526 check_io(EDX & 0xffff, 1);
529 void check_iow_DX(void)
531 check_io(EDX & 0xffff, 2);
534 void check_iol_DX(void)
536 check_io(EDX & 0xffff, 4);
539 static inline unsigned int get_sp_mask(unsigned int e2)
541 if (e2 & DESC_B_MASK)
542 return 0xffffffff;
543 else
544 return 0xffff;
547 #ifdef TARGET_X86_64
548 #define SET_ESP(val, sp_mask)\
549 do {\
550 if ((sp_mask) == 0xffff)\
551 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
552 else if ((sp_mask) == 0xffffffffLL)\
553 ESP = (uint32_t)(val);\
554 else\
555 ESP = (val);\
556 } while (0)
557 #else
558 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
559 #endif
561 /* XXX: add a is_user flag to have proper security support */
562 #define PUSHW(ssp, sp, sp_mask, val)\
564 sp -= 2;\
565 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568 #define PUSHL(ssp, sp, sp_mask, val)\
570 sp -= 4;\
571 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574 #define POPW(ssp, sp, sp_mask, val)\
576 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
577 sp += 2;\
580 #define POPL(ssp, sp, sp_mask, val)\
582 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
583 sp += 4;\
586 /* protected mode interrupt */
587 static void do_interrupt_protected(int intno, int is_int, int error_code,
588 unsigned int next_eip, int is_hw)
590 SegmentCache *dt;
591 target_ulong ptr, ssp;
592 int type, dpl, selector, ss_dpl, cpl;
593 int has_error_code, new_stack, shift;
594 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
595 uint32_t old_eip, sp_mask;
596 int svm_should_check = 1;
598 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
599 next_eip = EIP;
600 svm_should_check = 0;
603 if (svm_should_check
604 && (INTERCEPTEDl(_exceptions, 1 << intno)
605 && !is_int)) {
606 raise_interrupt(intno, is_int, error_code, 0);
608 has_error_code = 0;
609 if (!is_int && !is_hw) {
610 switch(intno) {
611 case 8:
612 case 10:
613 case 11:
614 case 12:
615 case 13:
616 case 14:
617 case 17:
618 has_error_code = 1;
619 break;
622 if (is_int)
623 old_eip = next_eip;
624 else
625 old_eip = env->eip;
627 dt = &env->idt;
628 if (intno * 8 + 7 > dt->limit)
629 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
630 ptr = dt->base + intno * 8;
631 e1 = ldl_kernel(ptr);
632 e2 = ldl_kernel(ptr + 4);
633 /* check gate type */
634 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
635 switch(type) {
636 case 5: /* task gate */
637 /* must do that check here to return the correct error code */
638 if (!(e2 & DESC_P_MASK))
639 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
640 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
641 if (has_error_code) {
642 int type;
643 uint32_t mask;
644 /* push the error code */
645 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
646 shift = type >> 3;
647 if (env->segs[R_SS].flags & DESC_B_MASK)
648 mask = 0xffffffff;
649 else
650 mask = 0xffff;
651 esp = (ESP - (2 << shift)) & mask;
652 ssp = env->segs[R_SS].base + esp;
653 if (shift)
654 stl_kernel(ssp, error_code);
655 else
656 stw_kernel(ssp, error_code);
657 SET_ESP(esp, mask);
659 return;
660 case 6: /* 286 interrupt gate */
661 case 7: /* 286 trap gate */
662 case 14: /* 386 interrupt gate */
663 case 15: /* 386 trap gate */
664 break;
665 default:
666 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
667 break;
669 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
670 cpl = env->hflags & HF_CPL_MASK;
671 /* check privledge if software int */
672 if (is_int && dpl < cpl)
673 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
674 /* check valid bit */
675 if (!(e2 & DESC_P_MASK))
676 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
677 selector = e1 >> 16;
678 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
679 if ((selector & 0xfffc) == 0)
680 raise_exception_err(EXCP0D_GPF, 0);
682 if (load_segment(&e1, &e2, selector) != 0)
683 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
684 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
687 if (dpl > cpl)
688 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
689 if (!(e2 & DESC_P_MASK))
690 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
691 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
692 /* to inner privilege */
693 get_ss_esp_from_tss(&ss, &esp, dpl);
694 if ((ss & 0xfffc) == 0)
695 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
696 if ((ss & 3) != dpl)
697 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
698 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
699 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
701 if (ss_dpl != dpl)
702 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
703 if (!(ss_e2 & DESC_S_MASK) ||
704 (ss_e2 & DESC_CS_MASK) ||
705 !(ss_e2 & DESC_W_MASK))
706 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
707 if (!(ss_e2 & DESC_P_MASK))
708 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709 new_stack = 1;
710 sp_mask = get_sp_mask(ss_e2);
711 ssp = get_seg_base(ss_e1, ss_e2);
712 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
713 /* to same privilege */
714 if (env->eflags & VM_MASK)
715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
716 new_stack = 0;
717 sp_mask = get_sp_mask(env->segs[R_SS].flags);
718 ssp = env->segs[R_SS].base;
719 esp = ESP;
720 dpl = cpl;
721 } else {
722 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
723 new_stack = 0; /* avoid warning */
724 sp_mask = 0; /* avoid warning */
725 ssp = 0; /* avoid warning */
726 esp = 0; /* avoid warning */
729 shift = type >> 3;
731 #if 0
732 /* XXX: check that enough room is available */
733 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
734 if (env->eflags & VM_MASK)
735 push_size += 8;
736 push_size <<= shift;
737 #endif
738 if (shift == 1) {
739 if (new_stack) {
740 if (env->eflags & VM_MASK) {
741 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
742 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
743 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
744 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
746 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
747 PUSHL(ssp, esp, sp_mask, ESP);
749 PUSHL(ssp, esp, sp_mask, compute_eflags());
750 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
751 PUSHL(ssp, esp, sp_mask, old_eip);
752 if (has_error_code) {
753 PUSHL(ssp, esp, sp_mask, error_code);
755 } else {
756 if (new_stack) {
757 if (env->eflags & VM_MASK) {
758 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
759 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
760 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
761 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
763 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
764 PUSHW(ssp, esp, sp_mask, ESP);
766 PUSHW(ssp, esp, sp_mask, compute_eflags());
767 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
768 PUSHW(ssp, esp, sp_mask, old_eip);
769 if (has_error_code) {
770 PUSHW(ssp, esp, sp_mask, error_code);
774 if (new_stack) {
775 if (env->eflags & VM_MASK) {
776 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
777 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
778 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
779 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
781 ss = (ss & ~3) | dpl;
782 cpu_x86_load_seg_cache(env, R_SS, ss,
783 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
785 SET_ESP(esp, sp_mask);
787 selector = (selector & ~3) | dpl;
788 cpu_x86_load_seg_cache(env, R_CS, selector,
789 get_seg_base(e1, e2),
790 get_seg_limit(e1, e2),
791 e2);
792 cpu_x86_set_cpl(env, dpl);
793 env->eip = offset;
795 /* interrupt gate clear IF mask */
796 if ((type & 1) == 0) {
797 env->eflags &= ~IF_MASK;
799 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
802 #ifdef TARGET_X86_64
804 #define PUSHQ(sp, val)\
806 sp -= 8;\
807 stq_kernel(sp, (val));\
810 #define POPQ(sp, val)\
812 val = ldq_kernel(sp);\
813 sp += 8;\
816 static inline target_ulong get_rsp_from_tss(int level)
818 int index;
820 #if 0
821 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
822 env->tr.base, env->tr.limit);
823 #endif
825 if (!(env->tr.flags & DESC_P_MASK))
826 cpu_abort(env, "invalid tss");
827 index = 8 * level + 4;
828 if ((index + 7) > env->tr.limit)
829 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
830 return ldq_kernel(env->tr.base + index);
833 /* 64 bit interrupt */
834 static void do_interrupt64(int intno, int is_int, int error_code,
835 target_ulong next_eip, int is_hw)
837 SegmentCache *dt;
838 target_ulong ptr;
839 int type, dpl, selector, cpl, ist;
840 int has_error_code, new_stack;
841 uint32_t e1, e2, e3, ss;
842 target_ulong old_eip, esp, offset;
843 int svm_should_check = 1;
845 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
846 next_eip = EIP;
847 svm_should_check = 0;
849 if (svm_should_check
850 && INTERCEPTEDl(_exceptions, 1 << intno)
851 && !is_int) {
852 raise_interrupt(intno, is_int, error_code, 0);
854 has_error_code = 0;
855 if (!is_int && !is_hw) {
856 switch(intno) {
857 case 8:
858 case 10:
859 case 11:
860 case 12:
861 case 13:
862 case 14:
863 case 17:
864 has_error_code = 1;
865 break;
868 if (is_int)
869 old_eip = next_eip;
870 else
871 old_eip = env->eip;
873 dt = &env->idt;
874 if (intno * 16 + 15 > dt->limit)
875 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
876 ptr = dt->base + intno * 16;
877 e1 = ldl_kernel(ptr);
878 e2 = ldl_kernel(ptr + 4);
879 e3 = ldl_kernel(ptr + 8);
880 /* check gate type */
881 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
882 switch(type) {
883 case 14: /* 386 interrupt gate */
884 case 15: /* 386 trap gate */
885 break;
886 default:
887 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
888 break;
890 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
891 cpl = env->hflags & HF_CPL_MASK;
892 /* check privledge if software int */
893 if (is_int && dpl < cpl)
894 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
895 /* check valid bit */
896 if (!(e2 & DESC_P_MASK))
897 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
898 selector = e1 >> 16;
899 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
900 ist = e2 & 7;
901 if ((selector & 0xfffc) == 0)
902 raise_exception_err(EXCP0D_GPF, 0);
904 if (load_segment(&e1, &e2, selector) != 0)
905 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
906 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909 if (dpl > cpl)
910 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
911 if (!(e2 & DESC_P_MASK))
912 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
913 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
914 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
915 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
916 /* to inner privilege */
917 if (ist != 0)
918 esp = get_rsp_from_tss(ist + 3);
919 else
920 esp = get_rsp_from_tss(dpl);
921 esp &= ~0xfLL; /* align stack */
922 ss = 0;
923 new_stack = 1;
924 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
925 /* to same privilege */
926 if (env->eflags & VM_MASK)
927 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
928 new_stack = 0;
929 if (ist != 0)
930 esp = get_rsp_from_tss(ist + 3);
931 else
932 esp = ESP;
933 esp &= ~0xfLL; /* align stack */
934 dpl = cpl;
935 } else {
936 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937 new_stack = 0; /* avoid warning */
938 esp = 0; /* avoid warning */
941 PUSHQ(esp, env->segs[R_SS].selector);
942 PUSHQ(esp, ESP);
943 PUSHQ(esp, compute_eflags());
944 PUSHQ(esp, env->segs[R_CS].selector);
945 PUSHQ(esp, old_eip);
946 if (has_error_code) {
947 PUSHQ(esp, error_code);
950 if (new_stack) {
951 ss = 0 | dpl;
952 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
954 ESP = esp;
956 selector = (selector & ~3) | dpl;
957 cpu_x86_load_seg_cache(env, R_CS, selector,
958 get_seg_base(e1, e2),
959 get_seg_limit(e1, e2),
960 e2);
961 cpu_x86_set_cpl(env, dpl);
962 env->eip = offset;
964 /* interrupt gate clear IF mask */
965 if ((type & 1) == 0) {
966 env->eflags &= ~IF_MASK;
968 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
970 #endif
972 void helper_syscall(int next_eip_addend)
974 int selector;
976 if (!(env->efer & MSR_EFER_SCE)) {
977 raise_exception_err(EXCP06_ILLOP, 0);
979 selector = (env->star >> 32) & 0xffff;
980 #ifdef TARGET_X86_64
981 if (env->hflags & HF_LMA_MASK) {
982 int code64;
984 ECX = env->eip + next_eip_addend;
985 env->regs[11] = compute_eflags();
987 code64 = env->hflags & HF_CS64_MASK;
989 cpu_x86_set_cpl(env, 0);
990 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
991 0, 0xffffffff,
992 DESC_G_MASK | DESC_P_MASK |
993 DESC_S_MASK |
994 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
995 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
996 0, 0xffffffff,
997 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
998 DESC_S_MASK |
999 DESC_W_MASK | DESC_A_MASK);
1000 env->eflags &= ~env->fmask;
1001 if (code64)
1002 env->eip = env->lstar;
1003 else
1004 env->eip = env->cstar;
1005 } else
1006 #endif
1008 ECX = (uint32_t)(env->eip + next_eip_addend);
1010 cpu_x86_set_cpl(env, 0);
1011 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1012 0, 0xffffffff,
1013 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1014 DESC_S_MASK |
1015 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1016 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1017 0, 0xffffffff,
1018 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1019 DESC_S_MASK |
1020 DESC_W_MASK | DESC_A_MASK);
1021 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1022 env->eip = (uint32_t)env->star;
1026 void helper_sysret(int dflag)
1028 int cpl, selector;
1030 if (!(env->efer & MSR_EFER_SCE)) {
1031 raise_exception_err(EXCP06_ILLOP, 0);
1033 cpl = env->hflags & HF_CPL_MASK;
1034 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1035 raise_exception_err(EXCP0D_GPF, 0);
1037 selector = (env->star >> 48) & 0xffff;
1038 #ifdef TARGET_X86_64
1039 if (env->hflags & HF_LMA_MASK) {
1040 if (dflag == 2) {
1041 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1042 0, 0xffffffff,
1043 DESC_G_MASK | DESC_P_MASK |
1044 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1045 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1046 DESC_L_MASK);
1047 env->eip = ECX;
1048 } else {
1049 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1050 0, 0xffffffff,
1051 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1053 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1054 env->eip = (uint32_t)ECX;
1056 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1057 0, 0xffffffff,
1058 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1059 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1060 DESC_W_MASK | DESC_A_MASK);
1061 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1062 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1063 cpu_x86_set_cpl(env, 3);
1064 } else
1065 #endif
1067 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1068 0, 0xffffffff,
1069 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1070 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1071 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1072 env->eip = (uint32_t)ECX;
1073 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1074 0, 0xffffffff,
1075 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1076 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1077 DESC_W_MASK | DESC_A_MASK);
1078 env->eflags |= IF_MASK;
1079 cpu_x86_set_cpl(env, 3);
1081 #ifdef USE_KQEMU
1082 if (kqemu_is_ok(env)) {
1083 if (env->hflags & HF_LMA_MASK)
1084 CC_OP = CC_OP_EFLAGS;
1085 env->exception_index = -1;
1086 cpu_loop_exit();
1088 #endif
1091 /* real mode interrupt */
1092 static void do_interrupt_real(int intno, int is_int, int error_code,
1093 unsigned int next_eip)
1095 SegmentCache *dt;
1096 target_ulong ptr, ssp;
1097 int selector;
1098 uint32_t offset, esp;
1099 uint32_t old_cs, old_eip;
1100 int svm_should_check = 1;
1102 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1103 next_eip = EIP;
1104 svm_should_check = 0;
1106 if (svm_should_check
1107 && INTERCEPTEDl(_exceptions, 1 << intno)
1108 && !is_int) {
1109 raise_interrupt(intno, is_int, error_code, 0);
1111 /* real mode (simpler !) */
1112 dt = &env->idt;
1113 if (intno * 4 + 3 > dt->limit)
1114 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1115 ptr = dt->base + intno * 4;
1116 offset = lduw_kernel(ptr);
1117 selector = lduw_kernel(ptr + 2);
1118 esp = ESP;
1119 ssp = env->segs[R_SS].base;
1120 if (is_int)
1121 old_eip = next_eip;
1122 else
1123 old_eip = env->eip;
1124 old_cs = env->segs[R_CS].selector;
1125 /* XXX: use SS segment size ? */
1126 PUSHW(ssp, esp, 0xffff, compute_eflags());
1127 PUSHW(ssp, esp, 0xffff, old_cs);
1128 PUSHW(ssp, esp, 0xffff, old_eip);
1130 /* update processor state */
1131 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1132 env->eip = offset;
1133 env->segs[R_CS].selector = selector;
1134 env->segs[R_CS].base = (selector << 4);
1135 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1138 /* fake user mode interrupt */
1139 void do_interrupt_user(int intno, int is_int, int error_code,
1140 target_ulong next_eip)
1142 SegmentCache *dt;
1143 target_ulong ptr;
1144 int dpl, cpl;
1145 uint32_t e2;
1147 dt = &env->idt;
1148 ptr = dt->base + (intno * 8);
1149 e2 = ldl_kernel(ptr + 4);
1151 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1152 cpl = env->hflags & HF_CPL_MASK;
1153 /* check privledge if software int */
1154 if (is_int && dpl < cpl)
1155 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1157 /* Since we emulate only user space, we cannot do more than
1158 exiting the emulation with the suitable exception and error
1159 code */
1160 if (is_int)
1161 EIP = next_eip;
1165 * Begin execution of an interruption. is_int is TRUE if coming from
1166 * the int instruction. next_eip is the EIP value AFTER the interrupt
1167 * instruction. It is only relevant if is_int is TRUE.
1169 void do_interrupt(int intno, int is_int, int error_code,
1170 target_ulong next_eip, int is_hw)
1172 if (loglevel & CPU_LOG_INT) {
1173 if ((env->cr[0] & CR0_PE_MASK)) {
1174 static int count;
1175 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1176 count, intno, error_code, is_int,
1177 env->hflags & HF_CPL_MASK,
1178 env->segs[R_CS].selector, EIP,
1179 (int)env->segs[R_CS].base + EIP,
1180 env->segs[R_SS].selector, ESP);
1181 if (intno == 0x0e) {
1182 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1183 } else {
1184 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1186 fprintf(logfile, "\n");
1187 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1188 #if 0
1190 int i;
1191 uint8_t *ptr;
1192 fprintf(logfile, " code=");
1193 ptr = env->segs[R_CS].base + env->eip;
1194 for(i = 0; i < 16; i++) {
1195 fprintf(logfile, " %02x", ldub(ptr + i));
1197 fprintf(logfile, "\n");
1199 #endif
1200 count++;
1203 if (env->cr[0] & CR0_PE_MASK) {
1204 #if TARGET_X86_64
1205 if (env->hflags & HF_LMA_MASK) {
1206 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1207 } else
1208 #endif
1210 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1212 } else {
1213 do_interrupt_real(intno, is_int, error_code, next_eip);
1218 * Check nested exceptions and change to double or triple fault if
1219 * needed. It should only be called, if this is not an interrupt.
1220 * Returns the new exception number.
1222 int check_exception(int intno, int *error_code)
1224 char first_contributory = env->old_exception == 0 ||
1225 (env->old_exception >= 10 &&
1226 env->old_exception <= 13);
1227 char second_contributory = intno == 0 ||
1228 (intno >= 10 && intno <= 13);
1230 if (loglevel & CPU_LOG_INT)
1231 fprintf(logfile, "check_exception old: %x new %x\n",
1232 env->old_exception, intno);
1234 if (env->old_exception == EXCP08_DBLE)
1235 cpu_abort(env, "triple fault");
1237 if ((first_contributory && second_contributory)
1238 || (env->old_exception == EXCP0E_PAGE &&
1239 (second_contributory || (intno == EXCP0E_PAGE)))) {
1240 intno = EXCP08_DBLE;
1241 *error_code = 0;
1244 if (second_contributory || (intno == EXCP0E_PAGE) ||
1245 (intno == EXCP08_DBLE))
1246 env->old_exception = intno;
1248 return intno;
1252 * Signal an interruption. It is executed in the main CPU loop.
1253 * is_int is TRUE if coming from the int instruction. next_eip is the
1254 * EIP value AFTER the interrupt instruction. It is only relevant if
1255 * is_int is TRUE.
1257 void raise_interrupt(int intno, int is_int, int error_code,
1258 int next_eip_addend)
1260 if (!is_int) {
1261 svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1262 intno = check_exception(intno, &error_code);
1265 env->exception_index = intno;
1266 env->error_code = error_code;
1267 env->exception_is_int = is_int;
1268 env->exception_next_eip = env->eip + next_eip_addend;
1269 cpu_loop_exit();
1272 /* same as raise_exception_err, but do not restore global registers */
1273 static void raise_exception_err_norestore(int exception_index, int error_code)
1275 exception_index = check_exception(exception_index, &error_code);
1277 env->exception_index = exception_index;
1278 env->error_code = error_code;
1279 env->exception_is_int = 0;
1280 env->exception_next_eip = 0;
1281 longjmp(env->jmp_env, 1);
1284 /* shortcuts to generate exceptions */
1286 void (raise_exception_err)(int exception_index, int error_code)
1288 raise_interrupt(exception_index, 0, error_code, 0);
1291 void raise_exception(int exception_index)
1293 raise_interrupt(exception_index, 0, 0, 0);
1296 /* SMM support */
1298 #if defined(CONFIG_USER_ONLY)
1300 void do_smm_enter(void)
1304 void helper_rsm(void)
1308 #else
1310 #ifdef TARGET_X86_64
1311 #define SMM_REVISION_ID 0x00020064
1312 #else
1313 #define SMM_REVISION_ID 0x00020000
1314 #endif
1316 void do_smm_enter(void)
1318 target_ulong sm_state;
1319 SegmentCache *dt;
1320 int i, offset;
1322 if (loglevel & CPU_LOG_INT) {
1323 fprintf(logfile, "SMM: enter\n");
1324 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1327 env->hflags |= HF_SMM_MASK;
1328 cpu_smm_update(env);
1330 sm_state = env->smbase + 0x8000;
1332 #ifdef TARGET_X86_64
1333 for(i = 0; i < 6; i++) {
1334 dt = &env->segs[i];
1335 offset = 0x7e00 + i * 16;
1336 stw_phys(sm_state + offset, dt->selector);
1337 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1338 stl_phys(sm_state + offset + 4, dt->limit);
1339 stq_phys(sm_state + offset + 8, dt->base);
1342 stq_phys(sm_state + 0x7e68, env->gdt.base);
1343 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1345 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1346 stq_phys(sm_state + 0x7e78, env->ldt.base);
1347 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1348 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1350 stq_phys(sm_state + 0x7e88, env->idt.base);
1351 stl_phys(sm_state + 0x7e84, env->idt.limit);
1353 stw_phys(sm_state + 0x7e90, env->tr.selector);
1354 stq_phys(sm_state + 0x7e98, env->tr.base);
1355 stl_phys(sm_state + 0x7e94, env->tr.limit);
1356 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1358 stq_phys(sm_state + 0x7ed0, env->efer);
1360 stq_phys(sm_state + 0x7ff8, EAX);
1361 stq_phys(sm_state + 0x7ff0, ECX);
1362 stq_phys(sm_state + 0x7fe8, EDX);
1363 stq_phys(sm_state + 0x7fe0, EBX);
1364 stq_phys(sm_state + 0x7fd8, ESP);
1365 stq_phys(sm_state + 0x7fd0, EBP);
1366 stq_phys(sm_state + 0x7fc8, ESI);
1367 stq_phys(sm_state + 0x7fc0, EDI);
1368 for(i = 8; i < 16; i++)
1369 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1370 stq_phys(sm_state + 0x7f78, env->eip);
1371 stl_phys(sm_state + 0x7f70, compute_eflags());
1372 stl_phys(sm_state + 0x7f68, env->dr[6]);
1373 stl_phys(sm_state + 0x7f60, env->dr[7]);
1375 stl_phys(sm_state + 0x7f48, env->cr[4]);
1376 stl_phys(sm_state + 0x7f50, env->cr[3]);
1377 stl_phys(sm_state + 0x7f58, env->cr[0]);
1379 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1380 stl_phys(sm_state + 0x7f00, env->smbase);
1381 #else
1382 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1383 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1384 stl_phys(sm_state + 0x7ff4, compute_eflags());
1385 stl_phys(sm_state + 0x7ff0, env->eip);
1386 stl_phys(sm_state + 0x7fec, EDI);
1387 stl_phys(sm_state + 0x7fe8, ESI);
1388 stl_phys(sm_state + 0x7fe4, EBP);
1389 stl_phys(sm_state + 0x7fe0, ESP);
1390 stl_phys(sm_state + 0x7fdc, EBX);
1391 stl_phys(sm_state + 0x7fd8, EDX);
1392 stl_phys(sm_state + 0x7fd4, ECX);
1393 stl_phys(sm_state + 0x7fd0, EAX);
1394 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1395 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1397 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1398 stl_phys(sm_state + 0x7f64, env->tr.base);
1399 stl_phys(sm_state + 0x7f60, env->tr.limit);
1400 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1402 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1403 stl_phys(sm_state + 0x7f80, env->ldt.base);
1404 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1405 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1407 stl_phys(sm_state + 0x7f74, env->gdt.base);
1408 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1410 stl_phys(sm_state + 0x7f58, env->idt.base);
1411 stl_phys(sm_state + 0x7f54, env->idt.limit);
1413 for(i = 0; i < 6; i++) {
1414 dt = &env->segs[i];
1415 if (i < 3)
1416 offset = 0x7f84 + i * 12;
1417 else
1418 offset = 0x7f2c + (i - 3) * 12;
1419 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1420 stl_phys(sm_state + offset + 8, dt->base);
1421 stl_phys(sm_state + offset + 4, dt->limit);
1422 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1424 stl_phys(sm_state + 0x7f14, env->cr[4]);
1426 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1427 stl_phys(sm_state + 0x7ef8, env->smbase);
1428 #endif
1429 /* init SMM cpu state */
1431 #ifdef TARGET_X86_64
1432 env->efer = 0;
1433 env->hflags &= ~HF_LMA_MASK;
1434 #endif
1435 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1436 env->eip = 0x00008000;
1437 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1438 0xffffffff, 0);
1439 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1440 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1441 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1442 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1443 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1445 cpu_x86_update_cr0(env,
1446 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1447 cpu_x86_update_cr4(env, 0);
1448 env->dr[7] = 0x00000400;
1449 CC_OP = CC_OP_EFLAGS;
1452 void helper_rsm(void)
1454 target_ulong sm_state;
1455 int i, offset;
1456 uint32_t val;
1458 sm_state = env->smbase + 0x8000;
1459 #ifdef TARGET_X86_64
1460 env->efer = ldq_phys(sm_state + 0x7ed0);
1461 if (env->efer & MSR_EFER_LMA)
1462 env->hflags |= HF_LMA_MASK;
1463 else
1464 env->hflags &= ~HF_LMA_MASK;
1466 for(i = 0; i < 6; i++) {
1467 offset = 0x7e00 + i * 16;
1468 cpu_x86_load_seg_cache(env, i,
1469 lduw_phys(sm_state + offset),
1470 ldq_phys(sm_state + offset + 8),
1471 ldl_phys(sm_state + offset + 4),
1472 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1475 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1476 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1478 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1479 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1480 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1481 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1483 env->idt.base = ldq_phys(sm_state + 0x7e88);
1484 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1486 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1487 env->tr.base = ldq_phys(sm_state + 0x7e98);
1488 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1489 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1491 EAX = ldq_phys(sm_state + 0x7ff8);
1492 ECX = ldq_phys(sm_state + 0x7ff0);
1493 EDX = ldq_phys(sm_state + 0x7fe8);
1494 EBX = ldq_phys(sm_state + 0x7fe0);
1495 ESP = ldq_phys(sm_state + 0x7fd8);
1496 EBP = ldq_phys(sm_state + 0x7fd0);
1497 ESI = ldq_phys(sm_state + 0x7fc8);
1498 EDI = ldq_phys(sm_state + 0x7fc0);
1499 for(i = 8; i < 16; i++)
1500 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1501 env->eip = ldq_phys(sm_state + 0x7f78);
1502 load_eflags(ldl_phys(sm_state + 0x7f70),
1503 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1504 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1505 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1507 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1508 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1509 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1511 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1512 if (val & 0x20000) {
1513 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1515 #else
1516 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1517 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1518 load_eflags(ldl_phys(sm_state + 0x7ff4),
1519 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1520 env->eip = ldl_phys(sm_state + 0x7ff0);
1521 EDI = ldl_phys(sm_state + 0x7fec);
1522 ESI = ldl_phys(sm_state + 0x7fe8);
1523 EBP = ldl_phys(sm_state + 0x7fe4);
1524 ESP = ldl_phys(sm_state + 0x7fe0);
1525 EBX = ldl_phys(sm_state + 0x7fdc);
1526 EDX = ldl_phys(sm_state + 0x7fd8);
1527 ECX = ldl_phys(sm_state + 0x7fd4);
1528 EAX = ldl_phys(sm_state + 0x7fd0);
1529 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1530 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1532 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1533 env->tr.base = ldl_phys(sm_state + 0x7f64);
1534 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1535 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1537 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1538 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1539 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1540 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1542 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1543 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1545 env->idt.base = ldl_phys(sm_state + 0x7f58);
1546 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1548 for(i = 0; i < 6; i++) {
1549 if (i < 3)
1550 offset = 0x7f84 + i * 12;
1551 else
1552 offset = 0x7f2c + (i - 3) * 12;
1553 cpu_x86_load_seg_cache(env, i,
1554 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1555 ldl_phys(sm_state + offset + 8),
1556 ldl_phys(sm_state + offset + 4),
1557 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1559 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1561 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1562 if (val & 0x20000) {
1563 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1565 #endif
1566 CC_OP = CC_OP_EFLAGS;
1567 env->hflags &= ~HF_SMM_MASK;
1568 cpu_smm_update(env);
1570 if (loglevel & CPU_LOG_INT) {
1571 fprintf(logfile, "SMM: after RSM\n");
1572 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1576 #endif /* !CONFIG_USER_ONLY */
1579 #ifdef BUGGY_GCC_DIV64
1580 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1581 call it from another function */
1582 uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1584 *q_ptr = num / den;
1585 return num % den;
1588 int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1590 *q_ptr = num / den;
1591 return num % den;
1593 #endif
1595 void helper_divl_EAX_T0(void)
1597 unsigned int den, r;
1598 uint64_t num, q;
1600 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1601 den = T0;
1602 if (den == 0) {
1603 raise_exception(EXCP00_DIVZ);
1605 #ifdef BUGGY_GCC_DIV64
1606 r = div32(&q, num, den);
1607 #else
1608 q = (num / den);
1609 r = (num % den);
1610 #endif
1611 if (q > 0xffffffff)
1612 raise_exception(EXCP00_DIVZ);
1613 EAX = (uint32_t)q;
1614 EDX = (uint32_t)r;
1617 void helper_idivl_EAX_T0(void)
1619 int den, r;
1620 int64_t num, q;
1622 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1623 den = T0;
1624 if (den == 0) {
1625 raise_exception(EXCP00_DIVZ);
1627 #ifdef BUGGY_GCC_DIV64
1628 r = idiv32(&q, num, den);
1629 #else
1630 q = (num / den);
1631 r = (num % den);
1632 #endif
1633 if (q != (int32_t)q)
1634 raise_exception(EXCP00_DIVZ);
1635 EAX = (uint32_t)q;
1636 EDX = (uint32_t)r;
1639 void helper_cmpxchg8b(void)
1641 uint64_t d;
1642 int eflags;
1644 eflags = cc_table[CC_OP].compute_all();
1645 d = ldq(A0);
1646 if (d == (((uint64_t)EDX << 32) | EAX)) {
1647 stq(A0, ((uint64_t)ECX << 32) | EBX);
1648 eflags |= CC_Z;
1649 } else {
1650 EDX = d >> 32;
1651 EAX = d;
1652 eflags &= ~CC_Z;
1654 CC_SRC = eflags;
1657 void helper_single_step()
1659 env->dr[6] |= 0x4000;
1660 raise_exception(EXCP01_SSTP);
1663 void helper_cpuid(void)
1665 uint32_t index;
1666 index = (uint32_t)EAX;
1668 /* test if maximum index reached */
1669 if (index & 0x80000000) {
1670 if (index > env->cpuid_xlevel)
1671 index = env->cpuid_level;
1672 } else {
1673 if (index > env->cpuid_level)
1674 index = env->cpuid_level;
1677 switch(index) {
1678 case 0:
1679 EAX = env->cpuid_level;
1680 EBX = env->cpuid_vendor1;
1681 EDX = env->cpuid_vendor2;
1682 ECX = env->cpuid_vendor3;
1683 break;
1684 case 1:
1685 EAX = env->cpuid_version;
1686 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1687 ECX = env->cpuid_ext_features;
1688 EDX = env->cpuid_features;
1689 break;
1690 case 2:
1691 /* cache info: needed for Pentium Pro compatibility */
1692 EAX = 1;
1693 EBX = 0;
1694 ECX = 0;
1695 EDX = 0x2c307d;
1696 break;
1697 case 0x80000000:
1698 EAX = env->cpuid_xlevel;
1699 EBX = env->cpuid_vendor1;
1700 EDX = env->cpuid_vendor2;
1701 ECX = env->cpuid_vendor3;
1702 break;
1703 case 0x80000001:
1704 EAX = env->cpuid_features;
1705 EBX = 0;
1706 ECX = env->cpuid_ext3_features;
1707 EDX = env->cpuid_ext2_features;
1708 break;
1709 case 0x80000002:
1710 case 0x80000003:
1711 case 0x80000004:
1712 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1713 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1714 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1715 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1716 break;
1717 case 0x80000005:
1718 /* cache info (L1 cache) */
1719 EAX = 0x01ff01ff;
1720 EBX = 0x01ff01ff;
1721 ECX = 0x40020140;
1722 EDX = 0x40020140;
1723 break;
1724 case 0x80000006:
1725 /* cache info (L2 cache) */
1726 EAX = 0;
1727 EBX = 0x42004200;
1728 ECX = 0x02008140;
1729 EDX = 0;
1730 break;
1731 case 0x80000008:
1732 /* virtual & phys address size in low 2 bytes. */
1733 EAX = 0x00003028;
1734 EBX = 0;
1735 ECX = 0;
1736 EDX = 0;
1737 break;
1738 default:
1739 /* reserved values: zero */
1740 EAX = 0;
1741 EBX = 0;
1742 ECX = 0;
1743 EDX = 0;
1744 break;
1748 void helper_enter_level(int level, int data32)
1750 target_ulong ssp;
1751 uint32_t esp_mask, esp, ebp;
1753 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1754 ssp = env->segs[R_SS].base;
1755 ebp = EBP;
1756 esp = ESP;
1757 if (data32) {
1758 /* 32 bit */
1759 esp -= 4;
1760 while (--level) {
1761 esp -= 4;
1762 ebp -= 4;
1763 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1765 esp -= 4;
1766 stl(ssp + (esp & esp_mask), T1);
1767 } else {
1768 /* 16 bit */
1769 esp -= 2;
1770 while (--level) {
1771 esp -= 2;
1772 ebp -= 2;
1773 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1775 esp -= 2;
1776 stw(ssp + (esp & esp_mask), T1);
1780 #ifdef TARGET_X86_64
1781 void helper_enter64_level(int level, int data64)
1783 target_ulong esp, ebp;
1784 ebp = EBP;
1785 esp = ESP;
1787 if (data64) {
1788 /* 64 bit */
1789 esp -= 8;
1790 while (--level) {
1791 esp -= 8;
1792 ebp -= 8;
1793 stq(esp, ldq(ebp));
1795 esp -= 8;
1796 stq(esp, T1);
1797 } else {
1798 /* 16 bit */
1799 esp -= 2;
1800 while (--level) {
1801 esp -= 2;
1802 ebp -= 2;
1803 stw(esp, lduw(ebp));
1805 esp -= 2;
1806 stw(esp, T1);
1809 #endif
1811 void helper_lldt_T0(void)
1813 int selector;
1814 SegmentCache *dt;
1815 uint32_t e1, e2;
1816 int index, entry_limit;
1817 target_ulong ptr;
1819 selector = T0 & 0xffff;
1820 if ((selector & 0xfffc) == 0) {
1821 /* XXX: NULL selector case: invalid LDT */
1822 env->ldt.base = 0;
1823 env->ldt.limit = 0;
1824 } else {
1825 if (selector & 0x4)
1826 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1827 dt = &env->gdt;
1828 index = selector & ~7;
1829 #ifdef TARGET_X86_64
1830 if (env->hflags & HF_LMA_MASK)
1831 entry_limit = 15;
1832 else
1833 #endif
1834 entry_limit = 7;
1835 if ((index + entry_limit) > dt->limit)
1836 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1837 ptr = dt->base + index;
1838 e1 = ldl_kernel(ptr);
1839 e2 = ldl_kernel(ptr + 4);
1840 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1841 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1842 if (!(e2 & DESC_P_MASK))
1843 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1844 #ifdef TARGET_X86_64
1845 if (env->hflags & HF_LMA_MASK) {
1846 uint32_t e3;
1847 e3 = ldl_kernel(ptr + 8);
1848 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1849 env->ldt.base |= (target_ulong)e3 << 32;
1850 } else
1851 #endif
1853 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1856 env->ldt.selector = selector;
1859 void helper_ltr_T0(void)
1861 int selector;
1862 SegmentCache *dt;
1863 uint32_t e1, e2;
1864 int index, type, entry_limit;
1865 target_ulong ptr;
1867 selector = T0 & 0xffff;
1868 if ((selector & 0xfffc) == 0) {
1869 /* NULL selector case: invalid TR */
1870 env->tr.base = 0;
1871 env->tr.limit = 0;
1872 env->tr.flags = 0;
1873 } else {
1874 if (selector & 0x4)
1875 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1876 dt = &env->gdt;
1877 index = selector & ~7;
1878 #ifdef TARGET_X86_64
1879 if (env->hflags & HF_LMA_MASK)
1880 entry_limit = 15;
1881 else
1882 #endif
1883 entry_limit = 7;
1884 if ((index + entry_limit) > dt->limit)
1885 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1886 ptr = dt->base + index;
1887 e1 = ldl_kernel(ptr);
1888 e2 = ldl_kernel(ptr + 4);
1889 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1890 if ((e2 & DESC_S_MASK) ||
1891 (type != 1 && type != 9))
1892 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1893 if (!(e2 & DESC_P_MASK))
1894 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1895 #ifdef TARGET_X86_64
1896 if (env->hflags & HF_LMA_MASK) {
1897 uint32_t e3, e4;
1898 e3 = ldl_kernel(ptr + 8);
1899 e4 = ldl_kernel(ptr + 12);
1900 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1901 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1902 load_seg_cache_raw_dt(&env->tr, e1, e2);
1903 env->tr.base |= (target_ulong)e3 << 32;
1904 } else
1905 #endif
1907 load_seg_cache_raw_dt(&env->tr, e1, e2);
1909 e2 |= DESC_TSS_BUSY_MASK;
1910 stl_kernel(ptr + 4, e2);
1912 env->tr.selector = selector;
1915 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1916 void load_seg(int seg_reg, int selector)
1918 uint32_t e1, e2;
1919 int cpl, dpl, rpl;
1920 SegmentCache *dt;
1921 int index;
1922 target_ulong ptr;
1924 selector &= 0xffff;
1925 cpl = env->hflags & HF_CPL_MASK;
1926 if ((selector & 0xfffc) == 0) {
1927 /* null selector case */
1928 if (seg_reg == R_SS
1929 #ifdef TARGET_X86_64
1930 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1931 #endif
1933 raise_exception_err(EXCP0D_GPF, 0);
1934 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1935 } else {
1937 if (selector & 0x4)
1938 dt = &env->ldt;
1939 else
1940 dt = &env->gdt;
1941 index = selector & ~7;
1942 if ((index + 7) > dt->limit)
1943 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1944 ptr = dt->base + index;
1945 e1 = ldl_kernel(ptr);
1946 e2 = ldl_kernel(ptr + 4);
1948 if (!(e2 & DESC_S_MASK))
1949 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1950 rpl = selector & 3;
1951 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1952 if (seg_reg == R_SS) {
1953 /* must be writable segment */
1954 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1955 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1956 if (rpl != cpl || dpl != cpl)
1957 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1958 } else {
1959 /* must be readable segment */
1960 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1963 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1964 /* if not conforming code, test rights */
1965 if (dpl < cpl || dpl < rpl)
1966 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1970 if (!(e2 & DESC_P_MASK)) {
1971 if (seg_reg == R_SS)
1972 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1973 else
1974 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1977 /* set the access bit if not already set */
1978 if (!(e2 & DESC_A_MASK)) {
1979 e2 |= DESC_A_MASK;
1980 stl_kernel(ptr + 4, e2);
1983 cpu_x86_load_seg_cache(env, seg_reg, selector,
1984 get_seg_base(e1, e2),
1985 get_seg_limit(e1, e2),
1986 e2);
1987 #if 0
1988 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1989 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1990 #endif
1994 /* protected mode jump */
1995 void helper_ljmp_protected_T0_T1(int next_eip_addend)
1997 int new_cs, gate_cs, type;
1998 uint32_t e1, e2, cpl, dpl, rpl, limit;
1999 target_ulong new_eip, next_eip;
2001 new_cs = T0;
2002 new_eip = T1;
2003 if ((new_cs & 0xfffc) == 0)
2004 raise_exception_err(EXCP0D_GPF, 0);
2005 if (load_segment(&e1, &e2, new_cs) != 0)
2006 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2007 cpl = env->hflags & HF_CPL_MASK;
2008 if (e2 & DESC_S_MASK) {
2009 if (!(e2 & DESC_CS_MASK))
2010 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2011 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2012 if (e2 & DESC_C_MASK) {
2013 /* conforming code segment */
2014 if (dpl > cpl)
2015 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2016 } else {
2017 /* non conforming code segment */
2018 rpl = new_cs & 3;
2019 if (rpl > cpl)
2020 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2021 if (dpl != cpl)
2022 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2024 if (!(e2 & DESC_P_MASK))
2025 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2026 limit = get_seg_limit(e1, e2);
2027 if (new_eip > limit &&
2028 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2029 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2030 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2031 get_seg_base(e1, e2), limit, e2);
2032 EIP = new_eip;
2033 } else {
2034 /* jump to call or task gate */
2035 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2036 rpl = new_cs & 3;
2037 cpl = env->hflags & HF_CPL_MASK;
2038 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2039 switch(type) {
2040 case 1: /* 286 TSS */
2041 case 9: /* 386 TSS */
2042 case 5: /* task gate */
2043 if (dpl < cpl || dpl < rpl)
2044 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2045 next_eip = env->eip + next_eip_addend;
2046 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2047 CC_OP = CC_OP_EFLAGS;
2048 break;
2049 case 4: /* 286 call gate */
2050 case 12: /* 386 call gate */
2051 if ((dpl < cpl) || (dpl < rpl))
2052 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2053 if (!(e2 & DESC_P_MASK))
2054 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2055 gate_cs = e1 >> 16;
2056 new_eip = (e1 & 0xffff);
2057 if (type == 12)
2058 new_eip |= (e2 & 0xffff0000);
2059 if (load_segment(&e1, &e2, gate_cs) != 0)
2060 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2061 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2062 /* must be code segment */
2063 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2064 (DESC_S_MASK | DESC_CS_MASK)))
2065 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2066 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2067 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2068 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2069 if (!(e2 & DESC_P_MASK))
2070 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2071 limit = get_seg_limit(e1, e2);
2072 if (new_eip > limit)
2073 raise_exception_err(EXCP0D_GPF, 0);
2074 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2075 get_seg_base(e1, e2), limit, e2);
2076 EIP = new_eip;
2077 break;
2078 default:
2079 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2080 break;
2085 /* real mode call */
2086 void helper_lcall_real_T0_T1(int shift, int next_eip)
2088 int new_cs, new_eip;
2089 uint32_t esp, esp_mask;
2090 target_ulong ssp;
2092 new_cs = T0;
2093 new_eip = T1;
2094 esp = ESP;
2095 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2096 ssp = env->segs[R_SS].base;
2097 if (shift) {
2098 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2099 PUSHL(ssp, esp, esp_mask, next_eip);
2100 } else {
2101 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2102 PUSHW(ssp, esp, esp_mask, next_eip);
2105 SET_ESP(esp, esp_mask);
2106 env->eip = new_eip;
2107 env->segs[R_CS].selector = new_cs;
2108 env->segs[R_CS].base = (new_cs << 4);
2111 /* protected mode call */
2112 void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2114 int new_cs, new_stack, i;
2115 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2116 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2117 uint32_t val, limit, old_sp_mask;
2118 target_ulong ssp, old_ssp, next_eip, new_eip;
2120 new_cs = T0;
2121 new_eip = T1;
2122 next_eip = env->eip + next_eip_addend;
2123 #ifdef DEBUG_PCALL
2124 if (loglevel & CPU_LOG_PCALL) {
2125 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2126 new_cs, (uint32_t)new_eip, shift);
2127 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2129 #endif
2130 if ((new_cs & 0xfffc) == 0)
2131 raise_exception_err(EXCP0D_GPF, 0);
2132 if (load_segment(&e1, &e2, new_cs) != 0)
2133 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2134 cpl = env->hflags & HF_CPL_MASK;
2135 #ifdef DEBUG_PCALL
2136 if (loglevel & CPU_LOG_PCALL) {
2137 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2139 #endif
2140 if (e2 & DESC_S_MASK) {
2141 if (!(e2 & DESC_CS_MASK))
2142 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2143 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2144 if (e2 & DESC_C_MASK) {
2145 /* conforming code segment */
2146 if (dpl > cpl)
2147 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2148 } else {
2149 /* non conforming code segment */
2150 rpl = new_cs & 3;
2151 if (rpl > cpl)
2152 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2153 if (dpl != cpl)
2154 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2156 if (!(e2 & DESC_P_MASK))
2157 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2159 #ifdef TARGET_X86_64
2160 /* XXX: check 16/32 bit cases in long mode */
2161 if (shift == 2) {
2162 target_ulong rsp;
2163 /* 64 bit case */
2164 rsp = ESP;
2165 PUSHQ(rsp, env->segs[R_CS].selector);
2166 PUSHQ(rsp, next_eip);
2167 /* from this point, not restartable */
2168 ESP = rsp;
2169 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2170 get_seg_base(e1, e2),
2171 get_seg_limit(e1, e2), e2);
2172 EIP = new_eip;
2173 } else
2174 #endif
2176 sp = ESP;
2177 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2178 ssp = env->segs[R_SS].base;
2179 if (shift) {
2180 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2181 PUSHL(ssp, sp, sp_mask, next_eip);
2182 } else {
2183 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2184 PUSHW(ssp, sp, sp_mask, next_eip);
2187 limit = get_seg_limit(e1, e2);
2188 if (new_eip > limit)
2189 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2190 /* from this point, not restartable */
2191 SET_ESP(sp, sp_mask);
2192 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2193 get_seg_base(e1, e2), limit, e2);
2194 EIP = new_eip;
2196 } else {
2197 /* check gate type */
2198 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2199 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2200 rpl = new_cs & 3;
2201 switch(type) {
2202 case 1: /* available 286 TSS */
2203 case 9: /* available 386 TSS */
2204 case 5: /* task gate */
2205 if (dpl < cpl || dpl < rpl)
2206 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2207 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2208 CC_OP = CC_OP_EFLAGS;
2209 return;
2210 case 4: /* 286 call gate */
2211 case 12: /* 386 call gate */
2212 break;
2213 default:
2214 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2215 break;
2217 shift = type >> 3;
2219 if (dpl < cpl || dpl < rpl)
2220 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2221 /* check valid bit */
2222 if (!(e2 & DESC_P_MASK))
2223 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2224 selector = e1 >> 16;
2225 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2226 param_count = e2 & 0x1f;
2227 if ((selector & 0xfffc) == 0)
2228 raise_exception_err(EXCP0D_GPF, 0);
2230 if (load_segment(&e1, &e2, selector) != 0)
2231 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2232 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2233 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2234 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2235 if (dpl > cpl)
2236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2237 if (!(e2 & DESC_P_MASK))
2238 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2240 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2241 /* to inner privilege */
2242 get_ss_esp_from_tss(&ss, &sp, dpl);
2243 #ifdef DEBUG_PCALL
2244 if (loglevel & CPU_LOG_PCALL)
2245 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2246 ss, sp, param_count, ESP);
2247 #endif
2248 if ((ss & 0xfffc) == 0)
2249 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2250 if ((ss & 3) != dpl)
2251 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2252 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2253 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2254 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2255 if (ss_dpl != dpl)
2256 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2257 if (!(ss_e2 & DESC_S_MASK) ||
2258 (ss_e2 & DESC_CS_MASK) ||
2259 !(ss_e2 & DESC_W_MASK))
2260 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2261 if (!(ss_e2 & DESC_P_MASK))
2262 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2264 // push_size = ((param_count * 2) + 8) << shift;
2266 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2267 old_ssp = env->segs[R_SS].base;
2269 sp_mask = get_sp_mask(ss_e2);
2270 ssp = get_seg_base(ss_e1, ss_e2);
2271 if (shift) {
2272 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2273 PUSHL(ssp, sp, sp_mask, ESP);
2274 for(i = param_count - 1; i >= 0; i--) {
2275 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2276 PUSHL(ssp, sp, sp_mask, val);
2278 } else {
2279 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2280 PUSHW(ssp, sp, sp_mask, ESP);
2281 for(i = param_count - 1; i >= 0; i--) {
2282 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2283 PUSHW(ssp, sp, sp_mask, val);
2286 new_stack = 1;
2287 } else {
2288 /* to same privilege */
2289 sp = ESP;
2290 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2291 ssp = env->segs[R_SS].base;
2292 // push_size = (4 << shift);
2293 new_stack = 0;
2296 if (shift) {
2297 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2298 PUSHL(ssp, sp, sp_mask, next_eip);
2299 } else {
2300 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2301 PUSHW(ssp, sp, sp_mask, next_eip);
2304 /* from this point, not restartable */
2306 if (new_stack) {
2307 ss = (ss & ~3) | dpl;
2308 cpu_x86_load_seg_cache(env, R_SS, ss,
2309 ssp,
2310 get_seg_limit(ss_e1, ss_e2),
2311 ss_e2);
2314 selector = (selector & ~3) | dpl;
2315 cpu_x86_load_seg_cache(env, R_CS, selector,
2316 get_seg_base(e1, e2),
2317 get_seg_limit(e1, e2),
2318 e2);
2319 cpu_x86_set_cpl(env, dpl);
2320 SET_ESP(sp, sp_mask);
2321 EIP = offset;
2323 #ifdef USE_KQEMU
2324 if (kqemu_is_ok(env)) {
2325 env->exception_index = -1;
2326 cpu_loop_exit();
2328 #endif
2331 /* real and vm86 mode iret */
2332 void helper_iret_real(int shift)
2334 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2335 target_ulong ssp;
2336 int eflags_mask;
2338 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2339 sp = ESP;
2340 ssp = env->segs[R_SS].base;
2341 if (shift == 1) {
2342 /* 32 bits */
2343 POPL(ssp, sp, sp_mask, new_eip);
2344 POPL(ssp, sp, sp_mask, new_cs);
2345 new_cs &= 0xffff;
2346 POPL(ssp, sp, sp_mask, new_eflags);
2347 } else {
2348 /* 16 bits */
2349 POPW(ssp, sp, sp_mask, new_eip);
2350 POPW(ssp, sp, sp_mask, new_cs);
2351 POPW(ssp, sp, sp_mask, new_eflags);
2353 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2354 load_seg_vm(R_CS, new_cs);
2355 env->eip = new_eip;
2356 if (env->eflags & VM_MASK)
2357 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2358 else
2359 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2360 if (shift == 0)
2361 eflags_mask &= 0xffff;
2362 load_eflags(new_eflags, eflags_mask);
2365 static inline void validate_seg(int seg_reg, int cpl)
2367 int dpl;
2368 uint32_t e2;
2370 /* XXX: on x86_64, we do not want to nullify FS and GS because
2371 they may still contain a valid base. I would be interested to
2372 know how a real x86_64 CPU behaves */
2373 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2374 (env->segs[seg_reg].selector & 0xfffc) == 0)
2375 return;
2377 e2 = env->segs[seg_reg].flags;
2378 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2379 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2380 /* data or non conforming code segment */
2381 if (dpl < cpl) {
2382 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2387 /* protected mode iret */
2388 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2390 uint32_t new_cs, new_eflags, new_ss;
2391 uint32_t new_es, new_ds, new_fs, new_gs;
2392 uint32_t e1, e2, ss_e1, ss_e2;
2393 int cpl, dpl, rpl, eflags_mask, iopl;
2394 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2396 #ifdef TARGET_X86_64
2397 if (shift == 2)
2398 sp_mask = -1;
2399 else
2400 #endif
2401 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2402 sp = ESP;
2403 ssp = env->segs[R_SS].base;
2404 new_eflags = 0; /* avoid warning */
2405 #ifdef TARGET_X86_64
2406 if (shift == 2) {
2407 POPQ(sp, new_eip);
2408 POPQ(sp, new_cs);
2409 new_cs &= 0xffff;
2410 if (is_iret) {
2411 POPQ(sp, new_eflags);
2413 } else
2414 #endif
2415 if (shift == 1) {
2416 /* 32 bits */
2417 POPL(ssp, sp, sp_mask, new_eip);
2418 POPL(ssp, sp, sp_mask, new_cs);
2419 new_cs &= 0xffff;
2420 if (is_iret) {
2421 POPL(ssp, sp, sp_mask, new_eflags);
2422 if (new_eflags & VM_MASK)
2423 goto return_to_vm86;
2425 } else {
2426 /* 16 bits */
2427 POPW(ssp, sp, sp_mask, new_eip);
2428 POPW(ssp, sp, sp_mask, new_cs);
2429 if (is_iret)
2430 POPW(ssp, sp, sp_mask, new_eflags);
2432 #ifdef DEBUG_PCALL
2433 if (loglevel & CPU_LOG_PCALL) {
2434 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2435 new_cs, new_eip, shift, addend);
2436 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2438 #endif
2439 if ((new_cs & 0xfffc) == 0)
2440 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2441 if (load_segment(&e1, &e2, new_cs) != 0)
2442 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2443 if (!(e2 & DESC_S_MASK) ||
2444 !(e2 & DESC_CS_MASK))
2445 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2446 cpl = env->hflags & HF_CPL_MASK;
2447 rpl = new_cs & 3;
2448 if (rpl < cpl)
2449 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2450 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2451 if (e2 & DESC_C_MASK) {
2452 if (dpl > rpl)
2453 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2454 } else {
2455 if (dpl != rpl)
2456 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2458 if (!(e2 & DESC_P_MASK))
2459 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2461 sp += addend;
2462 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2463 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2464 /* return to same priledge level */
2465 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2466 get_seg_base(e1, e2),
2467 get_seg_limit(e1, e2),
2468 e2);
2469 } else {
2470 /* return to different privilege level */
2471 #ifdef TARGET_X86_64
2472 if (shift == 2) {
2473 POPQ(sp, new_esp);
2474 POPQ(sp, new_ss);
2475 new_ss &= 0xffff;
2476 } else
2477 #endif
2478 if (shift == 1) {
2479 /* 32 bits */
2480 POPL(ssp, sp, sp_mask, new_esp);
2481 POPL(ssp, sp, sp_mask, new_ss);
2482 new_ss &= 0xffff;
2483 } else {
2484 /* 16 bits */
2485 POPW(ssp, sp, sp_mask, new_esp);
2486 POPW(ssp, sp, sp_mask, new_ss);
2488 #ifdef DEBUG_PCALL
2489 if (loglevel & CPU_LOG_PCALL) {
2490 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2491 new_ss, new_esp);
2493 #endif
2494 if ((new_ss & 0xfffc) == 0) {
2495 #ifdef TARGET_X86_64
2496 /* NULL ss is allowed in long mode if cpl != 3*/
2497 /* XXX: test CS64 ? */
2498 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2499 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2500 0, 0xffffffff,
2501 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2502 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2503 DESC_W_MASK | DESC_A_MASK);
2504 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2505 } else
2506 #endif
2508 raise_exception_err(EXCP0D_GPF, 0);
2510 } else {
2511 if ((new_ss & 3) != rpl)
2512 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2513 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2514 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2515 if (!(ss_e2 & DESC_S_MASK) ||
2516 (ss_e2 & DESC_CS_MASK) ||
2517 !(ss_e2 & DESC_W_MASK))
2518 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2519 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2520 if (dpl != rpl)
2521 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2522 if (!(ss_e2 & DESC_P_MASK))
2523 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2524 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2525 get_seg_base(ss_e1, ss_e2),
2526 get_seg_limit(ss_e1, ss_e2),
2527 ss_e2);
2530 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2531 get_seg_base(e1, e2),
2532 get_seg_limit(e1, e2),
2533 e2);
2534 cpu_x86_set_cpl(env, rpl);
2535 sp = new_esp;
2536 #ifdef TARGET_X86_64
2537 if (env->hflags & HF_CS64_MASK)
2538 sp_mask = -1;
2539 else
2540 #endif
2541 sp_mask = get_sp_mask(ss_e2);
2543 /* validate data segments */
2544 validate_seg(R_ES, rpl);
2545 validate_seg(R_DS, rpl);
2546 validate_seg(R_FS, rpl);
2547 validate_seg(R_GS, rpl);
2549 sp += addend;
2551 SET_ESP(sp, sp_mask);
2552 env->eip = new_eip;
2553 if (is_iret) {
2554 /* NOTE: 'cpl' is the _old_ CPL */
2555 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2556 if (cpl == 0)
2557 eflags_mask |= IOPL_MASK;
2558 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2559 if (cpl <= iopl)
2560 eflags_mask |= IF_MASK;
2561 if (shift == 0)
2562 eflags_mask &= 0xffff;
2563 load_eflags(new_eflags, eflags_mask);
2565 return;
2567 return_to_vm86:
2568 POPL(ssp, sp, sp_mask, new_esp);
2569 POPL(ssp, sp, sp_mask, new_ss);
2570 POPL(ssp, sp, sp_mask, new_es);
2571 POPL(ssp, sp, sp_mask, new_ds);
2572 POPL(ssp, sp, sp_mask, new_fs);
2573 POPL(ssp, sp, sp_mask, new_gs);
2575 /* modify processor state */
2576 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2577 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2578 load_seg_vm(R_CS, new_cs & 0xffff);
2579 cpu_x86_set_cpl(env, 3);
2580 load_seg_vm(R_SS, new_ss & 0xffff);
2581 load_seg_vm(R_ES, new_es & 0xffff);
2582 load_seg_vm(R_DS, new_ds & 0xffff);
2583 load_seg_vm(R_FS, new_fs & 0xffff);
2584 load_seg_vm(R_GS, new_gs & 0xffff);
2586 env->eip = new_eip & 0xffff;
2587 ESP = new_esp;
2590 void helper_iret_protected(int shift, int next_eip)
2592 int tss_selector, type;
2593 uint32_t e1, e2;
2595 /* specific case for TSS */
2596 if (env->eflags & NT_MASK) {
2597 #ifdef TARGET_X86_64
2598 if (env->hflags & HF_LMA_MASK)
2599 raise_exception_err(EXCP0D_GPF, 0);
2600 #endif
2601 tss_selector = lduw_kernel(env->tr.base + 0);
2602 if (tss_selector & 4)
2603 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2604 if (load_segment(&e1, &e2, tss_selector) != 0)
2605 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2606 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2607 /* NOTE: we check both segment and busy TSS */
2608 if (type != 3)
2609 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2610 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2611 } else {
2612 helper_ret_protected(shift, 1, 0);
2614 #ifdef USE_KQEMU
2615 if (kqemu_is_ok(env)) {
2616 CC_OP = CC_OP_EFLAGS;
2617 env->exception_index = -1;
2618 cpu_loop_exit();
2620 #endif
2623 void helper_lret_protected(int shift, int addend)
2625 helper_ret_protected(shift, 0, addend);
2626 #ifdef USE_KQEMU
2627 if (kqemu_is_ok(env)) {
2628 env->exception_index = -1;
2629 cpu_loop_exit();
2631 #endif
2634 void helper_sysenter(void)
2636 if (env->sysenter_cs == 0) {
2637 raise_exception_err(EXCP0D_GPF, 0);
2639 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2640 cpu_x86_set_cpl(env, 0);
2641 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2642 0, 0xffffffff,
2643 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2644 DESC_S_MASK |
2645 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2646 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2647 0, 0xffffffff,
2648 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2649 DESC_S_MASK |
2650 DESC_W_MASK | DESC_A_MASK);
2651 ESP = env->sysenter_esp;
2652 EIP = env->sysenter_eip;
2655 void helper_sysexit(void)
2657 int cpl;
2659 cpl = env->hflags & HF_CPL_MASK;
2660 if (env->sysenter_cs == 0 || cpl != 0) {
2661 raise_exception_err(EXCP0D_GPF, 0);
2663 cpu_x86_set_cpl(env, 3);
2664 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2665 0, 0xffffffff,
2666 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2667 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2668 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2669 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2670 0, 0xffffffff,
2671 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2672 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2673 DESC_W_MASK | DESC_A_MASK);
2674 ESP = ECX;
2675 EIP = EDX;
2676 #ifdef USE_KQEMU
2677 if (kqemu_is_ok(env)) {
2678 env->exception_index = -1;
2679 cpu_loop_exit();
2681 #endif
2684 void helper_movl_crN_T0(int reg)
2686 #if !defined(CONFIG_USER_ONLY)
2687 switch(reg) {
2688 case 0:
2689 cpu_x86_update_cr0(env, T0);
2690 break;
2691 case 3:
2692 cpu_x86_update_cr3(env, T0);
2693 break;
2694 case 4:
2695 cpu_x86_update_cr4(env, T0);
2696 break;
2697 case 8:
2698 cpu_set_apic_tpr(env, T0);
2699 break;
2700 default:
2701 env->cr[reg] = T0;
2702 break;
2704 #endif
2707 /* XXX: do more */
2708 void helper_movl_drN_T0(int reg)
2710 env->dr[reg] = T0;
2713 void helper_invlpg(target_ulong addr)
2715 cpu_x86_flush_tlb(env, addr);
2718 void helper_rdtsc(void)
2720 uint64_t val;
2722 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2723 raise_exception(EXCP0D_GPF);
2725 val = cpu_get_tsc(env);
2726 EAX = (uint32_t)(val);
2727 EDX = (uint32_t)(val >> 32);
2730 #if defined(CONFIG_USER_ONLY)
2731 void helper_wrmsr(void)
2735 void helper_rdmsr(void)
2738 #else
2739 void helper_wrmsr(void)
2741 uint64_t val;
2743 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2745 switch((uint32_t)ECX) {
2746 case MSR_IA32_SYSENTER_CS:
2747 env->sysenter_cs = val & 0xffff;
2748 break;
2749 case MSR_IA32_SYSENTER_ESP:
2750 env->sysenter_esp = val;
2751 break;
2752 case MSR_IA32_SYSENTER_EIP:
2753 env->sysenter_eip = val;
2754 break;
2755 case MSR_IA32_APICBASE:
2756 cpu_set_apic_base(env, val);
2757 break;
2758 case MSR_EFER:
2760 uint64_t update_mask;
2761 update_mask = 0;
2762 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2763 update_mask |= MSR_EFER_SCE;
2764 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2765 update_mask |= MSR_EFER_LME;
2766 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2767 update_mask |= MSR_EFER_FFXSR;
2768 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2769 update_mask |= MSR_EFER_NXE;
2770 env->efer = (env->efer & ~update_mask) |
2771 (val & update_mask);
2773 break;
2774 case MSR_STAR:
2775 env->star = val;
2776 break;
2777 case MSR_PAT:
2778 env->pat = val;
2779 break;
2780 case MSR_VM_HSAVE_PA:
2781 env->vm_hsave = val;
2782 break;
2783 #ifdef TARGET_X86_64
2784 case MSR_LSTAR:
2785 env->lstar = val;
2786 break;
2787 case MSR_CSTAR:
2788 env->cstar = val;
2789 break;
2790 case MSR_FMASK:
2791 env->fmask = val;
2792 break;
2793 case MSR_FSBASE:
2794 env->segs[R_FS].base = val;
2795 break;
2796 case MSR_GSBASE:
2797 env->segs[R_GS].base = val;
2798 break;
2799 case MSR_KERNELGSBASE:
2800 env->kernelgsbase = val;
2801 break;
2802 #endif
2803 default:
2804 /* XXX: exception ? */
2805 break;
2809 void helper_rdmsr(void)
2811 uint64_t val;
2812 switch((uint32_t)ECX) {
2813 case MSR_IA32_SYSENTER_CS:
2814 val = env->sysenter_cs;
2815 break;
2816 case MSR_IA32_SYSENTER_ESP:
2817 val = env->sysenter_esp;
2818 break;
2819 case MSR_IA32_SYSENTER_EIP:
2820 val = env->sysenter_eip;
2821 break;
2822 case MSR_IA32_APICBASE:
2823 val = cpu_get_apic_base(env);
2824 break;
2825 case MSR_EFER:
2826 val = env->efer;
2827 break;
2828 case MSR_STAR:
2829 val = env->star;
2830 break;
2831 case MSR_PAT:
2832 val = env->pat;
2833 break;
2834 case MSR_VM_HSAVE_PA:
2835 val = env->vm_hsave;
2836 break;
2837 #ifdef TARGET_X86_64
2838 case MSR_LSTAR:
2839 val = env->lstar;
2840 break;
2841 case MSR_CSTAR:
2842 val = env->cstar;
2843 break;
2844 case MSR_FMASK:
2845 val = env->fmask;
2846 break;
2847 case MSR_FSBASE:
2848 val = env->segs[R_FS].base;
2849 break;
2850 case MSR_GSBASE:
2851 val = env->segs[R_GS].base;
2852 break;
2853 case MSR_KERNELGSBASE:
2854 val = env->kernelgsbase;
2855 break;
2856 #endif
2857 default:
2858 /* XXX: exception ? */
2859 val = 0;
2860 break;
2862 EAX = (uint32_t)(val);
2863 EDX = (uint32_t)(val >> 32);
2865 #endif
2867 void helper_lsl(void)
2869 unsigned int selector, limit;
2870 uint32_t e1, e2, eflags;
2871 int rpl, dpl, cpl, type;
2873 eflags = cc_table[CC_OP].compute_all();
2874 selector = T0 & 0xffff;
2875 if (load_segment(&e1, &e2, selector) != 0)
2876 goto fail;
2877 rpl = selector & 3;
2878 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2879 cpl = env->hflags & HF_CPL_MASK;
2880 if (e2 & DESC_S_MASK) {
2881 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2882 /* conforming */
2883 } else {
2884 if (dpl < cpl || dpl < rpl)
2885 goto fail;
2887 } else {
2888 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2889 switch(type) {
2890 case 1:
2891 case 2:
2892 case 3:
2893 case 9:
2894 case 11:
2895 break;
2896 default:
2897 goto fail;
2899 if (dpl < cpl || dpl < rpl) {
2900 fail:
2901 CC_SRC = eflags & ~CC_Z;
2902 return;
2905 limit = get_seg_limit(e1, e2);
2906 T1 = limit;
2907 CC_SRC = eflags | CC_Z;
2910 void helper_lar(void)
2912 unsigned int selector;
2913 uint32_t e1, e2, eflags;
2914 int rpl, dpl, cpl, type;
2916 eflags = cc_table[CC_OP].compute_all();
2917 selector = T0 & 0xffff;
2918 if ((selector & 0xfffc) == 0)
2919 goto fail;
2920 if (load_segment(&e1, &e2, selector) != 0)
2921 goto fail;
2922 rpl = selector & 3;
2923 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2924 cpl = env->hflags & HF_CPL_MASK;
2925 if (e2 & DESC_S_MASK) {
2926 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2927 /* conforming */
2928 } else {
2929 if (dpl < cpl || dpl < rpl)
2930 goto fail;
2932 } else {
2933 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2934 switch(type) {
2935 case 1:
2936 case 2:
2937 case 3:
2938 case 4:
2939 case 5:
2940 case 9:
2941 case 11:
2942 case 12:
2943 break;
2944 default:
2945 goto fail;
2947 if (dpl < cpl || dpl < rpl) {
2948 fail:
2949 CC_SRC = eflags & ~CC_Z;
2950 return;
2953 T1 = e2 & 0x00f0ff00;
2954 CC_SRC = eflags | CC_Z;
2957 void helper_verr(void)
2959 unsigned int selector;
2960 uint32_t e1, e2, eflags;
2961 int rpl, dpl, cpl;
2963 eflags = cc_table[CC_OP].compute_all();
2964 selector = T0 & 0xffff;
2965 if ((selector & 0xfffc) == 0)
2966 goto fail;
2967 if (load_segment(&e1, &e2, selector) != 0)
2968 goto fail;
2969 if (!(e2 & DESC_S_MASK))
2970 goto fail;
2971 rpl = selector & 3;
2972 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2973 cpl = env->hflags & HF_CPL_MASK;
2974 if (e2 & DESC_CS_MASK) {
2975 if (!(e2 & DESC_R_MASK))
2976 goto fail;
2977 if (!(e2 & DESC_C_MASK)) {
2978 if (dpl < cpl || dpl < rpl)
2979 goto fail;
2981 } else {
2982 if (dpl < cpl || dpl < rpl) {
2983 fail:
2984 CC_SRC = eflags & ~CC_Z;
2985 return;
2988 CC_SRC = eflags | CC_Z;
2991 void helper_verw(void)
2993 unsigned int selector;
2994 uint32_t e1, e2, eflags;
2995 int rpl, dpl, cpl;
2997 eflags = cc_table[CC_OP].compute_all();
2998 selector = T0 & 0xffff;
2999 if ((selector & 0xfffc) == 0)
3000 goto fail;
3001 if (load_segment(&e1, &e2, selector) != 0)
3002 goto fail;
3003 if (!(e2 & DESC_S_MASK))
3004 goto fail;
3005 rpl = selector & 3;
3006 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3007 cpl = env->hflags & HF_CPL_MASK;
3008 if (e2 & DESC_CS_MASK) {
3009 goto fail;
3010 } else {
3011 if (dpl < cpl || dpl < rpl)
3012 goto fail;
3013 if (!(e2 & DESC_W_MASK)) {
3014 fail:
3015 CC_SRC = eflags & ~CC_Z;
3016 return;
3019 CC_SRC = eflags | CC_Z;
3022 /* FPU helpers */
3024 void helper_fldt_ST0_A0(void)
3026 int new_fpstt;
3027 new_fpstt = (env->fpstt - 1) & 7;
3028 env->fpregs[new_fpstt].d = helper_fldt(A0);
3029 env->fpstt = new_fpstt;
3030 env->fptags[new_fpstt] = 0; /* validate stack entry */
3033 void helper_fstt_ST0_A0(void)
3035 helper_fstt(ST0, A0);
3038 void fpu_set_exception(int mask)
3040 env->fpus |= mask;
3041 if (env->fpus & (~env->fpuc & FPUC_EM))
3042 env->fpus |= FPUS_SE | FPUS_B;
3045 CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3047 if (b == 0.0)
3048 fpu_set_exception(FPUS_ZE);
3049 return a / b;
3052 void fpu_raise_exception(void)
3054 if (env->cr[0] & CR0_NE_MASK) {
3055 raise_exception(EXCP10_COPR);
3057 #if !defined(CONFIG_USER_ONLY)
3058 else {
3059 cpu_set_ferr(env);
3061 #endif
3064 /* BCD ops */
3066 void helper_fbld_ST0_A0(void)
3068 CPU86_LDouble tmp;
3069 uint64_t val;
3070 unsigned int v;
3071 int i;
3073 val = 0;
3074 for(i = 8; i >= 0; i--) {
3075 v = ldub(A0 + i);
3076 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3078 tmp = val;
3079 if (ldub(A0 + 9) & 0x80)
3080 tmp = -tmp;
3081 fpush();
3082 ST0 = tmp;
3085 void helper_fbst_ST0_A0(void)
3087 int v;
3088 target_ulong mem_ref, mem_end;
3089 int64_t val;
3091 val = floatx_to_int64(ST0, &env->fp_status);
3092 mem_ref = A0;
3093 mem_end = mem_ref + 9;
3094 if (val < 0) {
3095 stb(mem_end, 0x80);
3096 val = -val;
3097 } else {
3098 stb(mem_end, 0x00);
3100 while (mem_ref < mem_end) {
3101 if (val == 0)
3102 break;
3103 v = val % 100;
3104 val = val / 100;
3105 v = ((v / 10) << 4) | (v % 10);
3106 stb(mem_ref++, v);
3108 while (mem_ref < mem_end) {
3109 stb(mem_ref++, 0);
3113 void helper_f2xm1(void)
3115 ST0 = pow(2.0,ST0) - 1.0;
3118 void helper_fyl2x(void)
3120 CPU86_LDouble fptemp;
3122 fptemp = ST0;
3123 if (fptemp>0.0){
3124 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3125 ST1 *= fptemp;
3126 fpop();
3127 } else {
3128 env->fpus &= (~0x4700);
3129 env->fpus |= 0x400;
3133 void helper_fptan(void)
3135 CPU86_LDouble fptemp;
3137 fptemp = ST0;
3138 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3139 env->fpus |= 0x400;
3140 } else {
3141 ST0 = tan(fptemp);
3142 fpush();
3143 ST0 = 1.0;
3144 env->fpus &= (~0x400); /* C2 <-- 0 */
3145 /* the above code is for |arg| < 2**52 only */
3149 void helper_fpatan(void)
3151 CPU86_LDouble fptemp, fpsrcop;
3153 fpsrcop = ST1;
3154 fptemp = ST0;
3155 ST1 = atan2(fpsrcop,fptemp);
3156 fpop();
3159 void helper_fxtract(void)
3161 CPU86_LDoubleU temp;
3162 unsigned int expdif;
3164 temp.d = ST0;
3165 expdif = EXPD(temp) - EXPBIAS;
3166 /*DP exponent bias*/
3167 ST0 = expdif;
3168 fpush();
3169 BIASEXPONENT(temp);
3170 ST0 = temp.d;
3173 void helper_fprem1(void)
3175 CPU86_LDouble dblq, fpsrcop, fptemp;
3176 CPU86_LDoubleU fpsrcop1, fptemp1;
3177 int expdif;
3178 signed long long int q;
3180 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3181 ST0 = 0.0 / 0.0; /* NaN */
3182 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3183 return;
3186 fpsrcop = ST0;
3187 fptemp = ST1;
3188 fpsrcop1.d = fpsrcop;
3189 fptemp1.d = fptemp;
3190 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3192 if (expdif < 0) {
3193 /* optimisation? taken from the AMD docs */
3194 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3195 /* ST0 is unchanged */
3196 return;
3199 if (expdif < 53) {
3200 dblq = fpsrcop / fptemp;
3201 /* round dblq towards nearest integer */
3202 dblq = rint(dblq);
3203 ST0 = fpsrcop - fptemp * dblq;
3205 /* convert dblq to q by truncating towards zero */
3206 if (dblq < 0.0)
3207 q = (signed long long int)(-dblq);
3208 else
3209 q = (signed long long int)dblq;
3211 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3212 /* (C0,C3,C1) <-- (q2,q1,q0) */
3213 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3214 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3215 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3216 } else {
3217 env->fpus |= 0x400; /* C2 <-- 1 */
3218 fptemp = pow(2.0, expdif - 50);
3219 fpsrcop = (ST0 / ST1) / fptemp;
3220 /* fpsrcop = integer obtained by chopping */
3221 fpsrcop = (fpsrcop < 0.0) ?
3222 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3223 ST0 -= (ST1 * fpsrcop * fptemp);
3227 void helper_fprem(void)
3229 CPU86_LDouble dblq, fpsrcop, fptemp;
3230 CPU86_LDoubleU fpsrcop1, fptemp1;
3231 int expdif;
3232 signed long long int q;
3234 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3235 ST0 = 0.0 / 0.0; /* NaN */
3236 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3237 return;
3240 fpsrcop = (CPU86_LDouble)ST0;
3241 fptemp = (CPU86_LDouble)ST1;
3242 fpsrcop1.d = fpsrcop;
3243 fptemp1.d = fptemp;
3244 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3246 if (expdif < 0) {
3247 /* optimisation? taken from the AMD docs */
3248 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3249 /* ST0 is unchanged */
3250 return;
3253 if ( expdif < 53 ) {
3254 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3255 /* round dblq towards zero */
3256 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3257 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3259 /* convert dblq to q by truncating towards zero */
3260 if (dblq < 0.0)
3261 q = (signed long long int)(-dblq);
3262 else
3263 q = (signed long long int)dblq;
3265 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3266 /* (C0,C3,C1) <-- (q2,q1,q0) */
3267 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3268 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3269 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3270 } else {
3271 int N = 32 + (expdif % 32); /* as per AMD docs */
3272 env->fpus |= 0x400; /* C2 <-- 1 */
3273 fptemp = pow(2.0, (double)(expdif - N));
3274 fpsrcop = (ST0 / ST1) / fptemp;
3275 /* fpsrcop = integer obtained by chopping */
3276 fpsrcop = (fpsrcop < 0.0) ?
3277 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3278 ST0 -= (ST1 * fpsrcop * fptemp);
3282 void helper_fyl2xp1(void)
3284 CPU86_LDouble fptemp;
3286 fptemp = ST0;
3287 if ((fptemp+1.0)>0.0) {
3288 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3289 ST1 *= fptemp;
3290 fpop();
3291 } else {
3292 env->fpus &= (~0x4700);
3293 env->fpus |= 0x400;
3297 void helper_fsqrt(void)
3299 CPU86_LDouble fptemp;
3301 fptemp = ST0;
3302 if (fptemp<0.0) {
3303 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3304 env->fpus |= 0x400;
3306 ST0 = sqrt(fptemp);
3309 void helper_fsincos(void)
3311 CPU86_LDouble fptemp;
3313 fptemp = ST0;
3314 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3315 env->fpus |= 0x400;
3316 } else {
3317 ST0 = sin(fptemp);
3318 fpush();
3319 ST0 = cos(fptemp);
3320 env->fpus &= (~0x400); /* C2 <-- 0 */
3321 /* the above code is for |arg| < 2**63 only */
3325 void helper_frndint(void)
3327 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3330 void helper_fscale(void)
3332 ST0 = ldexp (ST0, (int)(ST1));
3335 void helper_fsin(void)
3337 CPU86_LDouble fptemp;
3339 fptemp = ST0;
3340 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3341 env->fpus |= 0x400;
3342 } else {
3343 ST0 = sin(fptemp);
3344 env->fpus &= (~0x400); /* C2 <-- 0 */
3345 /* the above code is for |arg| < 2**53 only */
3349 void helper_fcos(void)
3351 CPU86_LDouble fptemp;
3353 fptemp = ST0;
3354 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3355 env->fpus |= 0x400;
3356 } else {
3357 ST0 = cos(fptemp);
3358 env->fpus &= (~0x400); /* C2 <-- 0 */
3359 /* the above code is for |arg5 < 2**63 only */
3363 void helper_fxam_ST0(void)
3365 CPU86_LDoubleU temp;
3366 int expdif;
3368 temp.d = ST0;
3370 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3371 if (SIGND(temp))
3372 env->fpus |= 0x200; /* C1 <-- 1 */
3374 /* XXX: test fptags too */
3375 expdif = EXPD(temp);
3376 if (expdif == MAXEXPD) {
3377 #ifdef USE_X86LDOUBLE
3378 if (MANTD(temp) == 0x8000000000000000ULL)
3379 #else
3380 if (MANTD(temp) == 0)
3381 #endif
3382 env->fpus |= 0x500 /*Infinity*/;
3383 else
3384 env->fpus |= 0x100 /*NaN*/;
3385 } else if (expdif == 0) {
3386 if (MANTD(temp) == 0)
3387 env->fpus |= 0x4000 /*Zero*/;
3388 else
3389 env->fpus |= 0x4400 /*Denormal*/;
3390 } else {
3391 env->fpus |= 0x400;
3395 void helper_fstenv(target_ulong ptr, int data32)
3397 int fpus, fptag, exp, i;
3398 uint64_t mant;
3399 CPU86_LDoubleU tmp;
3401 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3402 fptag = 0;
3403 for (i=7; i>=0; i--) {
3404 fptag <<= 2;
3405 if (env->fptags[i]) {
3406 fptag |= 3;
3407 } else {
3408 tmp.d = env->fpregs[i].d;
3409 exp = EXPD(tmp);
3410 mant = MANTD(tmp);
3411 if (exp == 0 && mant == 0) {
3412 /* zero */
3413 fptag |= 1;
3414 } else if (exp == 0 || exp == MAXEXPD
3415 #ifdef USE_X86LDOUBLE
3416 || (mant & (1LL << 63)) == 0
3417 #endif
3419 /* NaNs, infinity, denormal */
3420 fptag |= 2;
3424 if (data32) {
3425 /* 32 bit */
3426 stl(ptr, env->fpuc);
3427 stl(ptr + 4, fpus);
3428 stl(ptr + 8, fptag);
3429 stl(ptr + 12, 0); /* fpip */
3430 stl(ptr + 16, 0); /* fpcs */
3431 stl(ptr + 20, 0); /* fpoo */
3432 stl(ptr + 24, 0); /* fpos */
3433 } else {
3434 /* 16 bit */
3435 stw(ptr, env->fpuc);
3436 stw(ptr + 2, fpus);
3437 stw(ptr + 4, fptag);
3438 stw(ptr + 6, 0);
3439 stw(ptr + 8, 0);
3440 stw(ptr + 10, 0);
3441 stw(ptr + 12, 0);
3445 void helper_fldenv(target_ulong ptr, int data32)
3447 int i, fpus, fptag;
3449 if (data32) {
3450 env->fpuc = lduw(ptr);
3451 fpus = lduw(ptr + 4);
3452 fptag = lduw(ptr + 8);
3454 else {
3455 env->fpuc = lduw(ptr);
3456 fpus = lduw(ptr + 2);
3457 fptag = lduw(ptr + 4);
3459 env->fpstt = (fpus >> 11) & 7;
3460 env->fpus = fpus & ~0x3800;
3461 for(i = 0;i < 8; i++) {
3462 env->fptags[i] = ((fptag & 3) == 3);
3463 fptag >>= 2;
3467 void helper_fsave(target_ulong ptr, int data32)
3469 CPU86_LDouble tmp;
3470 int i;
3472 helper_fstenv(ptr, data32);
3474 ptr += (14 << data32);
3475 for(i = 0;i < 8; i++) {
3476 tmp = ST(i);
3477 helper_fstt(tmp, ptr);
3478 ptr += 10;
3481 /* fninit */
3482 env->fpus = 0;
3483 env->fpstt = 0;
3484 env->fpuc = 0x37f;
3485 env->fptags[0] = 1;
3486 env->fptags[1] = 1;
3487 env->fptags[2] = 1;
3488 env->fptags[3] = 1;
3489 env->fptags[4] = 1;
3490 env->fptags[5] = 1;
3491 env->fptags[6] = 1;
3492 env->fptags[7] = 1;
3495 void helper_frstor(target_ulong ptr, int data32)
3497 CPU86_LDouble tmp;
3498 int i;
3500 helper_fldenv(ptr, data32);
3501 ptr += (14 << data32);
3503 for(i = 0;i < 8; i++) {
3504 tmp = helper_fldt(ptr);
3505 ST(i) = tmp;
3506 ptr += 10;
3510 void helper_fxsave(target_ulong ptr, int data64)
3512 int fpus, fptag, i, nb_xmm_regs;
3513 CPU86_LDouble tmp;
3514 target_ulong addr;
3516 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3517 fptag = 0;
3518 for(i = 0; i < 8; i++) {
3519 fptag |= (env->fptags[i] << i);
3521 stw(ptr, env->fpuc);
3522 stw(ptr + 2, fpus);
3523 stw(ptr + 4, fptag ^ 0xff);
3525 addr = ptr + 0x20;
3526 for(i = 0;i < 8; i++) {
3527 tmp = ST(i);
3528 helper_fstt(tmp, addr);
3529 addr += 16;
3532 if (env->cr[4] & CR4_OSFXSR_MASK) {
3533 /* XXX: finish it */
3534 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3535 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3536 nb_xmm_regs = 8 << data64;
3537 addr = ptr + 0xa0;
3538 for(i = 0; i < nb_xmm_regs; i++) {
3539 stq(addr, env->xmm_regs[i].XMM_Q(0));
3540 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3541 addr += 16;
3546 void helper_fxrstor(target_ulong ptr, int data64)
3548 int i, fpus, fptag, nb_xmm_regs;
3549 CPU86_LDouble tmp;
3550 target_ulong addr;
3552 env->fpuc = lduw(ptr);
3553 fpus = lduw(ptr + 2);
3554 fptag = lduw(ptr + 4);
3555 env->fpstt = (fpus >> 11) & 7;
3556 env->fpus = fpus & ~0x3800;
3557 fptag ^= 0xff;
3558 for(i = 0;i < 8; i++) {
3559 env->fptags[i] = ((fptag >> i) & 1);
3562 addr = ptr + 0x20;
3563 for(i = 0;i < 8; i++) {
3564 tmp = helper_fldt(addr);
3565 ST(i) = tmp;
3566 addr += 16;
3569 if (env->cr[4] & CR4_OSFXSR_MASK) {
3570 /* XXX: finish it */
3571 env->mxcsr = ldl(ptr + 0x18);
3572 //ldl(ptr + 0x1c);
3573 nb_xmm_regs = 8 << data64;
3574 addr = ptr + 0xa0;
3575 for(i = 0; i < nb_xmm_regs; i++) {
3576 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3577 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3578 addr += 16;
3583 #ifndef USE_X86LDOUBLE
3585 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3587 CPU86_LDoubleU temp;
3588 int e;
3590 temp.d = f;
3591 /* mantissa */
3592 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3593 /* exponent + sign */
3594 e = EXPD(temp) - EXPBIAS + 16383;
3595 e |= SIGND(temp) >> 16;
3596 *pexp = e;
3599 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3601 CPU86_LDoubleU temp;
3602 int e;
3603 uint64_t ll;
3605 /* XXX: handle overflow ? */
3606 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3607 e |= (upper >> 4) & 0x800; /* sign */
3608 ll = (mant >> 11) & ((1LL << 52) - 1);
3609 #ifdef __arm__
3610 temp.l.upper = (e << 20) | (ll >> 32);
3611 temp.l.lower = ll;
3612 #else
3613 temp.ll = ll | ((uint64_t)e << 52);
3614 #endif
3615 return temp.d;
3618 #else
3620 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3622 CPU86_LDoubleU temp;
3624 temp.d = f;
3625 *pmant = temp.l.lower;
3626 *pexp = temp.l.upper;
3629 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3631 CPU86_LDoubleU temp;
3633 temp.l.upper = upper;
3634 temp.l.lower = mant;
3635 return temp.d;
3637 #endif
3639 #ifdef TARGET_X86_64
3641 //#define DEBUG_MULDIV
3643 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3645 *plow += a;
3646 /* carry test */
3647 if (*plow < a)
3648 (*phigh)++;
3649 *phigh += b;
3652 static void neg128(uint64_t *plow, uint64_t *phigh)
3654 *plow = ~ *plow;
3655 *phigh = ~ *phigh;
3656 add128(plow, phigh, 1, 0);
3659 /* return TRUE if overflow */
3660 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3662 uint64_t q, r, a1, a0;
3663 int i, qb, ab;
3665 a0 = *plow;
3666 a1 = *phigh;
3667 if (a1 == 0) {
3668 q = a0 / b;
3669 r = a0 % b;
3670 *plow = q;
3671 *phigh = r;
3672 } else {
3673 if (a1 >= b)
3674 return 1;
3675 /* XXX: use a better algorithm */
3676 for(i = 0; i < 64; i++) {
3677 ab = a1 >> 63;
3678 a1 = (a1 << 1) | (a0 >> 63);
3679 if (ab || a1 >= b) {
3680 a1 -= b;
3681 qb = 1;
3682 } else {
3683 qb = 0;
3685 a0 = (a0 << 1) | qb;
3687 #if defined(DEBUG_MULDIV)
3688 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3689 *phigh, *plow, b, a0, a1);
3690 #endif
3691 *plow = a0;
3692 *phigh = a1;
3694 return 0;
3697 /* return TRUE if overflow */
3698 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3700 int sa, sb;
3701 sa = ((int64_t)*phigh < 0);
3702 if (sa)
3703 neg128(plow, phigh);
3704 sb = (b < 0);
3705 if (sb)
3706 b = -b;
3707 if (div64(plow, phigh, b) != 0)
3708 return 1;
3709 if (sa ^ sb) {
3710 if (*plow > (1ULL << 63))
3711 return 1;
3712 *plow = - *plow;
3713 } else {
3714 if (*plow >= (1ULL << 63))
3715 return 1;
3717 if (sa)
3718 *phigh = - *phigh;
3719 return 0;
3722 void helper_mulq_EAX_T0(void)
3724 uint64_t r0, r1;
3726 mulu64(&r1, &r0, EAX, T0);
3727 EAX = r0;
3728 EDX = r1;
3729 CC_DST = r0;
3730 CC_SRC = r1;
3733 void helper_imulq_EAX_T0(void)
3735 uint64_t r0, r1;
3737 muls64(&r1, &r0, EAX, T0);
3738 EAX = r0;
3739 EDX = r1;
3740 CC_DST = r0;
3741 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3744 void helper_imulq_T0_T1(void)
3746 uint64_t r0, r1;
3748 muls64(&r1, &r0, T0, T1);
3749 T0 = r0;
3750 CC_DST = r0;
3751 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3754 void helper_divq_EAX_T0(void)
3756 uint64_t r0, r1;
3757 if (T0 == 0) {
3758 raise_exception(EXCP00_DIVZ);
3760 r0 = EAX;
3761 r1 = EDX;
3762 if (div64(&r0, &r1, T0))
3763 raise_exception(EXCP00_DIVZ);
3764 EAX = r0;
3765 EDX = r1;
3768 void helper_idivq_EAX_T0(void)
3770 uint64_t r0, r1;
3771 if (T0 == 0) {
3772 raise_exception(EXCP00_DIVZ);
3774 r0 = EAX;
3775 r1 = EDX;
3776 if (idiv64(&r0, &r1, T0))
3777 raise_exception(EXCP00_DIVZ);
3778 EAX = r0;
3779 EDX = r1;
3782 void helper_bswapq_T0(void)
3784 T0 = bswap64(T0);
3786 #endif
3788 void helper_hlt(void)
3790 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3791 env->hflags |= HF_HALTED_MASK;
3792 env->exception_index = EXCP_HLT;
3793 cpu_loop_exit();
3796 void helper_monitor(void)
3798 if ((uint32_t)ECX != 0)
3799 raise_exception(EXCP0D_GPF);
3800 /* XXX: store address ? */
3803 void helper_mwait(void)
3805 if ((uint32_t)ECX != 0)
3806 raise_exception(EXCP0D_GPF);
3807 /* XXX: not complete but not completely erroneous */
3808 if (env->cpu_index != 0 || env->next_cpu != NULL) {
3809 /* more than one CPU: do not sleep because another CPU may
3810 wake this one */
3811 } else {
3812 helper_hlt();
3816 float approx_rsqrt(float a)
3818 return 1.0 / sqrt(a);
3821 float approx_rcp(float a)
3823 return 1.0 / a;
3826 void update_fp_status(void)
3828 int rnd_type;
3830 /* set rounding mode */
3831 switch(env->fpuc & RC_MASK) {
3832 default:
3833 case RC_NEAR:
3834 rnd_type = float_round_nearest_even;
3835 break;
3836 case RC_DOWN:
3837 rnd_type = float_round_down;
3838 break;
3839 case RC_UP:
3840 rnd_type = float_round_up;
3841 break;
3842 case RC_CHOP:
3843 rnd_type = float_round_to_zero;
3844 break;
3846 set_float_rounding_mode(rnd_type, &env->fp_status);
3847 #ifdef FLOATX80
3848 switch((env->fpuc >> 8) & 3) {
3849 case 0:
3850 rnd_type = 32;
3851 break;
3852 case 2:
3853 rnd_type = 64;
3854 break;
3855 case 3:
3856 default:
3857 rnd_type = 80;
3858 break;
3860 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3861 #endif
3864 #if !defined(CONFIG_USER_ONLY)
3866 #define MMUSUFFIX _mmu
3867 #define GETPC() (__builtin_return_address(0))
3869 #define SHIFT 0
3870 #include "softmmu_template.h"
3872 #define SHIFT 1
3873 #include "softmmu_template.h"
3875 #define SHIFT 2
3876 #include "softmmu_template.h"
3878 #define SHIFT 3
3879 #include "softmmu_template.h"
3881 #endif
3883 /* try to fill the TLB and return an exception if error. If retaddr is
3884 NULL, it means that the function was called in C code (i.e. not
3885 from generated code or from helper.c) */
3886 /* XXX: fix it to restore all registers */
3887 void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
3889 TranslationBlock *tb;
3890 int ret;
3891 unsigned long pc;
3892 CPUX86State *saved_env;
3894 /* XXX: hack to restore env in all cases, even if not called from
3895 generated code */
3896 saved_env = env;
3897 env = cpu_single_env;
3899 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
3900 if (ret) {
3901 if (retaddr) {
3902 /* now we have a real cpu fault */
3903 pc = (unsigned long)retaddr;
3904 tb = tb_find_pc(pc);
3905 if (tb) {
3906 /* the PC is inside the translated code. It means that we have
3907 a virtual CPU fault */
3908 cpu_restore_state(tb, env, pc, NULL);
3911 if (retaddr)
3912 raise_exception_err(env->exception_index, env->error_code);
3913 else
3914 raise_exception_err_norestore(env->exception_index, env->error_code);
3916 env = saved_env;
3920 /* Secure Virtual Machine helpers */
3922 void helper_stgi(void)
3924 env->hflags |= HF_GIF_MASK;
3927 void helper_clgi(void)
3929 env->hflags &= ~HF_GIF_MASK;
3932 #if defined(CONFIG_USER_ONLY)
3934 void helper_vmrun(target_ulong addr) { }
3935 void helper_vmmcall(void) { }
3936 void helper_vmload(target_ulong addr) { }
3937 void helper_vmsave(target_ulong addr) { }
3938 void helper_skinit(void) { }
3939 void helper_invlpga(void) { }
3940 void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3941 int svm_check_intercept_param(uint32_t type, uint64_t param)
3943 return 0;
3946 #else
3948 static inline uint32_t
3949 vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
3951 return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
3952 | ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
3953 | ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
3954 | (vmcb_base & 0xff000000) /* Base 31-24 */
3955 | (vmcb_limit & 0xf0000); /* Limit 19-16 */
3958 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
3960 return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
3961 | ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
3964 extern uint8_t *phys_ram_base;
3965 void helper_vmrun(target_ulong addr)
3967 uint32_t event_inj;
3968 uint32_t int_ctl;
3970 if (loglevel & CPU_LOG_TB_IN_ASM)
3971 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
3973 env->vm_vmcb = addr;
3974 regs_to_env();
3976 /* save the current CPU state in the hsave page */
3977 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
3978 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
3980 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
3981 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
3983 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
3984 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
3985 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
3986 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
3987 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
3988 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
3989 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
3991 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
3992 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
3994 SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
3995 SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
3996 SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
3997 SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
3999 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4000 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4001 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4003 /* load the interception bitmaps so we do not need to access the
4004 vmcb in svm mode */
4005 /* We shift all the intercept bits so we can OR them with the TB
4006 flags later on */
4007 env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4008 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4009 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4010 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4011 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4012 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4014 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4015 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4017 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4018 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4020 /* clear exit_info_2 so we behave like the real hardware */
4021 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4023 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4024 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4025 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4026 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4027 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4028 if (int_ctl & V_INTR_MASKING_MASK) {
4029 env->cr[8] = int_ctl & V_TPR_MASK;
4030 if (env->eflags & IF_MASK)
4031 env->hflags |= HF_HIF_MASK;
4034 #ifdef TARGET_X86_64
4035 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4036 env->hflags &= ~HF_LMA_MASK;
4037 if (env->efer & MSR_EFER_LMA)
4038 env->hflags |= HF_LMA_MASK;
4039 #endif
4040 env->eflags = 0;
4041 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4042 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4043 CC_OP = CC_OP_EFLAGS;
4044 CC_DST = 0xffffffff;
4046 SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4047 SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4048 SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4049 SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4051 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4052 env->eip = EIP;
4053 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4054 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4055 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4056 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4057 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4059 /* FIXME: guest state consistency checks */
4061 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4062 case TLB_CONTROL_DO_NOTHING:
4063 break;
4064 case TLB_CONTROL_FLUSH_ALL_ASID:
4065 /* FIXME: this is not 100% correct but should work for now */
4066 tlb_flush(env, 1);
4067 break;
4070 helper_stgi();
4072 regs_to_env();
4074 /* maybe we need to inject an event */
4075 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4076 if (event_inj & SVM_EVTINJ_VALID) {
4077 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4078 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4079 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4080 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4082 if (loglevel & CPU_LOG_TB_IN_ASM)
4083 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4084 /* FIXME: need to implement valid_err */
4085 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4086 case SVM_EVTINJ_TYPE_INTR:
4087 env->exception_index = vector;
4088 env->error_code = event_inj_err;
4089 env->exception_is_int = 1;
4090 env->exception_next_eip = -1;
4091 if (loglevel & CPU_LOG_TB_IN_ASM)
4092 fprintf(logfile, "INTR");
4093 break;
4094 case SVM_EVTINJ_TYPE_NMI:
4095 env->exception_index = vector;
4096 env->error_code = event_inj_err;
4097 env->exception_is_int = 1;
4098 env->exception_next_eip = EIP;
4099 if (loglevel & CPU_LOG_TB_IN_ASM)
4100 fprintf(logfile, "NMI");
4101 break;
4102 case SVM_EVTINJ_TYPE_EXEPT:
4103 env->exception_index = vector;
4104 env->error_code = event_inj_err;
4105 env->exception_is_int = 0;
4106 env->exception_next_eip = -1;
4107 if (loglevel & CPU_LOG_TB_IN_ASM)
4108 fprintf(logfile, "EXEPT");
4109 break;
4110 case SVM_EVTINJ_TYPE_SOFT:
4111 env->exception_index = vector;
4112 env->error_code = event_inj_err;
4113 env->exception_is_int = 1;
4114 env->exception_next_eip = EIP;
4115 if (loglevel & CPU_LOG_TB_IN_ASM)
4116 fprintf(logfile, "SOFT");
4117 break;
4119 if (loglevel & CPU_LOG_TB_IN_ASM)
4120 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4122 if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4123 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4126 cpu_loop_exit();
4129 void helper_vmmcall(void)
4131 if (loglevel & CPU_LOG_TB_IN_ASM)
4132 fprintf(logfile,"vmmcall!\n");
4135 void helper_vmload(target_ulong addr)
4137 if (loglevel & CPU_LOG_TB_IN_ASM)
4138 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4139 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4140 env->segs[R_FS].base);
4142 SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4143 SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4144 SVM_LOAD_SEG2(addr, tr, tr);
4145 SVM_LOAD_SEG2(addr, ldt, ldtr);
4147 #ifdef TARGET_X86_64
4148 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4149 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4150 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4151 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4152 #endif
4153 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4154 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4155 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4156 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4159 void helper_vmsave(target_ulong addr)
4161 if (loglevel & CPU_LOG_TB_IN_ASM)
4162 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4163 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4164 env->segs[R_FS].base);
4166 SVM_SAVE_SEG(addr, segs[R_FS], fs);
4167 SVM_SAVE_SEG(addr, segs[R_GS], gs);
4168 SVM_SAVE_SEG(addr, tr, tr);
4169 SVM_SAVE_SEG(addr, ldt, ldtr);
4171 #ifdef TARGET_X86_64
4172 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4173 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4174 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4175 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4176 #endif
4177 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4178 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4179 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4180 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4183 void helper_skinit(void)
4185 if (loglevel & CPU_LOG_TB_IN_ASM)
4186 fprintf(logfile,"skinit!\n");
4189 void helper_invlpga(void)
4191 tlb_flush(env, 0);
4194 int svm_check_intercept_param(uint32_t type, uint64_t param)
4196 switch(type) {
4197 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4198 if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4199 vmexit(type, param);
4200 return 1;
4202 break;
4203 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4204 if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4205 vmexit(type, param);
4206 return 1;
4208 break;
4209 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4210 if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4211 vmexit(type, param);
4212 return 1;
4214 break;
4215 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4216 if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4217 vmexit(type, param);
4218 return 1;
4220 break;
4221 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4222 if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4223 vmexit(type, param);
4224 return 1;
4226 break;
4227 case SVM_EXIT_IOIO:
4228 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4229 /* FIXME: this should be read in at vmrun (faster this way?) */
4230 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4231 uint16_t port = (uint16_t) (param >> 16);
4233 if(ldub_phys(addr + port / 8) & (1 << (port % 8)))
4234 vmexit(type, param);
4236 break;
4238 case SVM_EXIT_MSR:
4239 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4240 /* FIXME: this should be read in at vmrun (faster this way?) */
4241 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4242 switch((uint32_t)ECX) {
4243 case 0 ... 0x1fff:
4244 T0 = (ECX * 2) % 8;
4245 T1 = ECX / 8;
4246 break;
4247 case 0xc0000000 ... 0xc0001fff:
4248 T0 = (8192 + ECX - 0xc0000000) * 2;
4249 T1 = (T0 / 8);
4250 T0 %= 8;
4251 break;
4252 case 0xc0010000 ... 0xc0011fff:
4253 T0 = (16384 + ECX - 0xc0010000) * 2;
4254 T1 = (T0 / 8);
4255 T0 %= 8;
4256 break;
4257 default:
4258 vmexit(type, param);
4259 return 1;
4261 if (ldub_phys(addr + T1) & ((1 << param) << T0))
4262 vmexit(type, param);
4263 return 1;
4265 break;
4266 default:
4267 if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4268 vmexit(type, param);
4269 return 1;
4271 break;
4273 return 0;
4276 void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4278 uint32_t int_ctl;
4280 if (loglevel & CPU_LOG_TB_IN_ASM)
4281 fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4282 exit_code, exit_info_1,
4283 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4284 EIP);
4286 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4287 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4288 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4289 } else {
4290 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4293 /* Save the VM state in the vmcb */
4294 SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4295 SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4296 SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4297 SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4299 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4300 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4302 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4303 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4305 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4306 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4307 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4308 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4309 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4311 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4312 int_ctl &= ~V_TPR_MASK;
4313 int_ctl |= env->cr[8] & V_TPR_MASK;
4314 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4317 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4318 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4319 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4320 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4321 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4322 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4323 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4325 /* Reload the host state from vm_hsave */
4326 env->hflags &= ~HF_HIF_MASK;
4327 env->intercept = 0;
4328 env->intercept_exceptions = 0;
4329 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4331 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4332 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4334 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4335 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4337 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4338 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4339 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4340 if (int_ctl & V_INTR_MASKING_MASK)
4341 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4342 /* we need to set the efer after the crs so the hidden flags get set properly */
4343 #ifdef TARGET_X86_64
4344 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4345 env->hflags &= ~HF_LMA_MASK;
4346 if (env->efer & MSR_EFER_LMA)
4347 env->hflags |= HF_LMA_MASK;
4348 #endif
4350 env->eflags = 0;
4351 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4352 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4353 CC_OP = CC_OP_EFLAGS;
4355 SVM_LOAD_SEG(env->vm_hsave, ES, es);
4356 SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4357 SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4358 SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4360 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4361 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4362 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4364 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4365 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4367 /* other setups */
4368 cpu_x86_set_cpl(env, 0);
4369 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4370 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4371 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4373 helper_clgi();
4374 /* FIXME: Resets the current ASID register to zero (host ASID). */
4376 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4378 /* Clears the TSC_OFFSET inside the processor. */
4380 /* If the host is in PAE mode, the processor reloads the host's PDPEs
4381 from the page table indicated the host's CR3. If the PDPEs contain
4382 illegal state, the processor causes a shutdown. */
4384 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4385 env->cr[0] |= CR0_PE_MASK;
4386 env->eflags &= ~VM_MASK;
4388 /* Disables all breakpoints in the host DR7 register. */
4390 /* Checks the reloaded host state for consistency. */
4392 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4393 host's code segment or non-canonical (in the case of long mode), a
4394 #GP fault is delivered inside the host.) */
4396 /* remove any pending exception */
4397 env->exception_index = -1;
4398 env->error_code = 0;
4399 env->old_exception = -1;
4401 regs_to_env();
4402 cpu_loop_exit();
4405 #endif