Add new command line option for controlling shadow cache size
[qemu-kvm/fedora.git] / target-i386 / helper2.c
blobb0e969257366d09ba203e9d0f4a004cd326b8fe3
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
32 #ifdef USE_KVM
33 #include "../qemu-kvm.h"
34 #endif
36 //#define DEBUG_MMU
38 #ifdef USE_CODE_COPY
39 #include <asm/ldt.h>
40 #include <linux/unistd.h>
41 #include <linux/version.h>
43 int modify_ldt(int func, void *ptr, unsigned long bytecount)
45 return syscall(__NR_modify_ldt, func, ptr, bytecount);
48 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
49 #define modify_ldt_ldt_s user_desc
50 #endif
51 #endif /* USE_CODE_COPY */
53 extern const char *cpu_vendor_string;
55 CPUX86State *cpu_x86_init(void)
57 CPUX86State *env;
58 static int inited;
60 env = qemu_mallocz(sizeof(CPUX86State));
61 if (!env)
62 return NULL;
63 cpu_exec_init(env);
65 /* init various static tables */
66 if (!inited) {
67 inited = 1;
68 optimize_flags_init();
70 #ifdef USE_CODE_COPY
71 /* testing code for code copy case */
73 struct modify_ldt_ldt_s ldt;
75 ldt.entry_number = 1;
76 ldt.base_addr = (unsigned long)env;
77 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
78 ldt.seg_32bit = 1;
79 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
80 ldt.read_exec_only = 0;
81 ldt.limit_in_pages = 1;
82 ldt.seg_not_present = 0;
83 ldt.useable = 1;
84 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
86 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
88 #endif
90 int family, model, stepping;
91 #ifdef TARGET_X86_64
92 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
93 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
94 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
95 family = 6;
96 model = 2;
97 stepping = 3;
98 #else
99 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
100 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
101 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
102 #if 0
103 /* pentium 75-200 */
104 family = 5;
105 model = 2;
106 stepping = 11;
107 #else
108 /* pentium pro */
109 family = 6;
110 model = 3;
111 stepping = 3;
112 #endif
113 #endif
114 env->cpuid_level = 2;
115 env->cpuid_version = (family << 8) | (model << 4) | stepping;
116 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
117 CPUID_TSC | CPUID_MSR | CPUID_MCE |
118 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
119 CPUID_PAT);
120 env->pat = 0x0007040600070406ULL;
121 env->cpuid_ext3_features = CPUID_EXT3_SVM;
122 env->cpuid_ext_features = CPUID_EXT_SSE3;
123 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
124 env->cpuid_features |= CPUID_APIC;
125 env->cpuid_xlevel = 0x8000000e;
127 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
128 int c, len, i;
130 if (cpu_vendor_string != NULL)
131 model_id = cpu_vendor_string;
133 len = strlen(model_id);
134 for(i = 0; i < 48; i++) {
135 if (i >= len)
136 c = '\0';
137 else
138 c = model_id[i];
139 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
142 #ifdef TARGET_X86_64
143 /* currently not enabled for std i386 because not fully tested */
144 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
145 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
147 /* these features are needed for Win64 and aren't fully implemented */
148 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
149 /* this feature is needed for Solaris and isn't fully implemented */
150 env->cpuid_features |= CPUID_PSE36;
151 #endif
153 cpu_reset(env);
154 #ifdef USE_KQEMU
155 kqemu_init(env);
156 #endif
157 #ifdef USE_KVM
159 extern int kvm_allowed;
160 if (kvm_allowed) {
161 kvm_qemu_init_env(env);
162 env->ready_for_interrupt_injection = 1;
165 #endif
166 return env;
169 /* NOTE: must be called outside the CPU execute loop */
170 void cpu_reset(CPUX86State *env)
172 int i;
174 memset(env, 0, offsetof(CPUX86State, breakpoints));
176 tlb_flush(env, 1);
178 env->old_exception = -1;
180 /* init to reset state */
182 #ifdef CONFIG_SOFTMMU
183 env->hflags |= HF_SOFTMMU_MASK;
184 #endif
185 env->hflags |= HF_GIF_MASK;
187 cpu_x86_update_cr0(env, 0x60000010);
188 env->a20_mask = 0xffffffff;
189 env->smbase = 0x30000;
191 env->idt.limit = 0xffff;
192 env->gdt.limit = 0xffff;
193 env->ldt.limit = 0xffff;
194 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
195 env->tr.limit = 0xffff;
196 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
198 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
199 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
200 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
201 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
202 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
203 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
205 env->eip = 0xfff0;
206 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
208 env->eflags = 0x2;
210 /* FPU init */
211 for(i = 0;i < 8; i++)
212 env->fptags[i] = 1;
213 env->fpuc = 0x37f;
215 env->mxcsr = 0x1f80;
218 void cpu_x86_close(CPUX86State *env)
220 free(env);
223 /***********************************************************/
224 /* x86 debug */
226 static const char *cc_op_str[] = {
227 "DYNAMIC",
228 "EFLAGS",
230 "MULB",
231 "MULW",
232 "MULL",
233 "MULQ",
235 "ADDB",
236 "ADDW",
237 "ADDL",
238 "ADDQ",
240 "ADCB",
241 "ADCW",
242 "ADCL",
243 "ADCQ",
245 "SUBB",
246 "SUBW",
247 "SUBL",
248 "SUBQ",
250 "SBBB",
251 "SBBW",
252 "SBBL",
253 "SBBQ",
255 "LOGICB",
256 "LOGICW",
257 "LOGICL",
258 "LOGICQ",
260 "INCB",
261 "INCW",
262 "INCL",
263 "INCQ",
265 "DECB",
266 "DECW",
267 "DECL",
268 "DECQ",
270 "SHLB",
271 "SHLW",
272 "SHLL",
273 "SHLQ",
275 "SARB",
276 "SARW",
277 "SARL",
278 "SARQ",
281 void cpu_dump_state(CPUState *env, FILE *f,
282 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
283 int flags)
285 int eflags, i, nb;
286 char cc_op_name[32];
287 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
289 eflags = env->eflags;
290 #ifdef TARGET_X86_64
291 if (env->hflags & HF_CS64_MASK) {
292 cpu_fprintf(f,
293 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
294 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
295 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
296 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
297 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
298 env->regs[R_EAX],
299 env->regs[R_EBX],
300 env->regs[R_ECX],
301 env->regs[R_EDX],
302 env->regs[R_ESI],
303 env->regs[R_EDI],
304 env->regs[R_EBP],
305 env->regs[R_ESP],
306 env->regs[8],
307 env->regs[9],
308 env->regs[10],
309 env->regs[11],
310 env->regs[12],
311 env->regs[13],
312 env->regs[14],
313 env->regs[15],
314 env->eip, eflags,
315 eflags & DF_MASK ? 'D' : '-',
316 eflags & CC_O ? 'O' : '-',
317 eflags & CC_S ? 'S' : '-',
318 eflags & CC_Z ? 'Z' : '-',
319 eflags & CC_A ? 'A' : '-',
320 eflags & CC_P ? 'P' : '-',
321 eflags & CC_C ? 'C' : '-',
322 env->hflags & HF_CPL_MASK,
323 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
324 (env->a20_mask >> 20) & 1,
325 (env->hflags >> HF_SMM_SHIFT) & 1,
326 (env->hflags >> HF_HALTED_SHIFT) & 1);
327 } else
328 #endif
330 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
331 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
332 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
333 (uint32_t)env->regs[R_EAX],
334 (uint32_t)env->regs[R_EBX],
335 (uint32_t)env->regs[R_ECX],
336 (uint32_t)env->regs[R_EDX],
337 (uint32_t)env->regs[R_ESI],
338 (uint32_t)env->regs[R_EDI],
339 (uint32_t)env->regs[R_EBP],
340 (uint32_t)env->regs[R_ESP],
341 (uint32_t)env->eip, eflags,
342 eflags & DF_MASK ? 'D' : '-',
343 eflags & CC_O ? 'O' : '-',
344 eflags & CC_S ? 'S' : '-',
345 eflags & CC_Z ? 'Z' : '-',
346 eflags & CC_A ? 'A' : '-',
347 eflags & CC_P ? 'P' : '-',
348 eflags & CC_C ? 'C' : '-',
349 env->hflags & HF_CPL_MASK,
350 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
351 (env->a20_mask >> 20) & 1,
352 (env->hflags >> HF_SMM_SHIFT) & 1,
353 (env->hflags >> HF_HALTED_SHIFT) & 1);
356 #ifdef TARGET_X86_64
357 if (env->hflags & HF_LMA_MASK) {
358 for(i = 0; i < 6; i++) {
359 SegmentCache *sc = &env->segs[i];
360 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
361 seg_name[i],
362 sc->selector,
363 sc->base,
364 sc->limit,
365 sc->flags);
367 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
368 env->ldt.selector,
369 env->ldt.base,
370 env->ldt.limit,
371 env->ldt.flags);
372 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
373 env->tr.selector,
374 env->tr.base,
375 env->tr.limit,
376 env->tr.flags);
377 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
378 env->gdt.base, env->gdt.limit);
379 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
380 env->idt.base, env->idt.limit);
381 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
382 (uint32_t)env->cr[0],
383 env->cr[2],
384 env->cr[3],
385 (uint32_t)env->cr[4]);
386 } else
387 #endif
389 for(i = 0; i < 6; i++) {
390 SegmentCache *sc = &env->segs[i];
391 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
392 seg_name[i],
393 sc->selector,
394 (uint32_t)sc->base,
395 sc->limit,
396 sc->flags);
398 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
399 env->ldt.selector,
400 (uint32_t)env->ldt.base,
401 env->ldt.limit,
402 env->ldt.flags);
403 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
404 env->tr.selector,
405 (uint32_t)env->tr.base,
406 env->tr.limit,
407 env->tr.flags);
408 cpu_fprintf(f, "GDT= %08x %08x\n",
409 (uint32_t)env->gdt.base, env->gdt.limit);
410 cpu_fprintf(f, "IDT= %08x %08x\n",
411 (uint32_t)env->idt.base, env->idt.limit);
412 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
413 (uint32_t)env->cr[0],
414 (uint32_t)env->cr[2],
415 (uint32_t)env->cr[3],
416 (uint32_t)env->cr[4]);
418 if (flags & X86_DUMP_CCOP) {
419 if ((unsigned)env->cc_op < CC_OP_NB)
420 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
421 else
422 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
423 #ifdef TARGET_X86_64
424 if (env->hflags & HF_CS64_MASK) {
425 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
426 env->cc_src, env->cc_dst,
427 cc_op_name);
428 } else
429 #endif
431 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
432 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
433 cc_op_name);
436 if (flags & X86_DUMP_FPU) {
437 int fptag;
438 fptag = 0;
439 for(i = 0; i < 8; i++) {
440 fptag |= ((!env->fptags[i]) << i);
442 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
443 env->fpuc,
444 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
445 env->fpstt,
446 fptag,
447 env->mxcsr);
448 for(i=0;i<8;i++) {
449 #if defined(USE_X86LDOUBLE)
450 union {
451 long double d;
452 struct {
453 uint64_t lower;
454 uint16_t upper;
455 } l;
456 } tmp;
457 tmp.d = env->fpregs[i].d;
458 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
459 i, tmp.l.lower, tmp.l.upper);
460 #else
461 cpu_fprintf(f, "FPR%d=%016" PRIx64,
462 i, env->fpregs[i].mmx.q);
463 #endif
464 if ((i & 1) == 1)
465 cpu_fprintf(f, "\n");
466 else
467 cpu_fprintf(f, " ");
469 if (env->hflags & HF_CS64_MASK)
470 nb = 16;
471 else
472 nb = 8;
473 for(i=0;i<nb;i++) {
474 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
476 env->xmm_regs[i].XMM_L(3),
477 env->xmm_regs[i].XMM_L(2),
478 env->xmm_regs[i].XMM_L(1),
479 env->xmm_regs[i].XMM_L(0));
480 if ((i & 1) == 1)
481 cpu_fprintf(f, "\n");
482 else
483 cpu_fprintf(f, " ");
488 /***********************************************************/
489 /* x86 mmu */
490 /* XXX: add PGE support */
492 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
494 a20_state = (a20_state != 0);
495 if (a20_state != ((env->a20_mask >> 20) & 1)) {
496 #if defined(DEBUG_MMU)
497 printf("A20 update: a20=%d\n", a20_state);
498 #endif
499 /* if the cpu is currently executing code, we must unlink it and
500 all the potentially executing TB */
501 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
503 /* when a20 is changed, all the MMU mappings are invalid, so
504 we must flush everything */
505 tlb_flush(env, 1);
506 env->a20_mask = 0xffefffff | (a20_state << 20);
510 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
512 int pe_state;
514 #if defined(DEBUG_MMU)
515 printf("CR0 update: CR0=0x%08x\n", new_cr0);
516 #endif
517 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
518 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
519 tlb_flush(env, 1);
522 #ifdef TARGET_X86_64
523 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
524 (env->efer & MSR_EFER_LME)) {
525 /* enter in long mode */
526 /* XXX: generate an exception */
527 if (!(env->cr[4] & CR4_PAE_MASK))
528 return;
529 env->efer |= MSR_EFER_LMA;
530 env->hflags |= HF_LMA_MASK;
531 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
532 (env->efer & MSR_EFER_LMA)) {
533 /* exit long mode */
534 env->efer &= ~MSR_EFER_LMA;
535 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
536 env->eip &= 0xffffffff;
538 #endif
539 env->cr[0] = new_cr0 | CR0_ET_MASK;
541 /* update PE flag in hidden flags */
542 pe_state = (env->cr[0] & CR0_PE_MASK);
543 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
544 /* ensure that ADDSEG is always set in real mode */
545 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
546 /* update FPU flags */
547 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
548 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
551 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
552 the PDPT */
553 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
555 env->cr[3] = new_cr3;
556 if (env->cr[0] & CR0_PG_MASK) {
557 #if defined(DEBUG_MMU)
558 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
559 #endif
560 tlb_flush(env, 0);
564 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
566 #if defined(DEBUG_MMU)
567 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
568 #endif
569 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
570 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
571 tlb_flush(env, 1);
573 /* SSE handling */
574 if (!(env->cpuid_features & CPUID_SSE))
575 new_cr4 &= ~CR4_OSFXSR_MASK;
576 if (new_cr4 & CR4_OSFXSR_MASK)
577 env->hflags |= HF_OSFXSR_MASK;
578 else
579 env->hflags &= ~HF_OSFXSR_MASK;
581 env->cr[4] = new_cr4;
584 /* XXX: also flush 4MB pages */
585 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
587 tlb_flush_page(env, addr);
590 #if defined(CONFIG_USER_ONLY)
592 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
593 int is_write, int is_user, int is_softmmu)
595 /* user mode only emulation */
596 is_write &= 1;
597 env->cr[2] = addr;
598 env->error_code = (is_write << PG_ERROR_W_BIT);
599 env->error_code |= PG_ERROR_U_MASK;
600 env->exception_index = EXCP0E_PAGE;
601 return 1;
604 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
606 return addr;
609 #else
611 #define PHYS_ADDR_MASK 0xfffff000
613 /* return value:
614 -1 = cannot handle fault
615 0 = nothing more to do
616 1 = generate PF fault
617 2 = soft MMU activation required for this block
619 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
620 int is_write1, int is_user, int is_softmmu)
622 uint64_t ptep, pte;
623 uint32_t pdpe_addr, pde_addr, pte_addr;
624 int error_code, is_dirty, prot, page_size, ret, is_write;
625 unsigned long paddr, page_offset;
626 target_ulong vaddr, virt_addr;
628 #if defined(DEBUG_MMU)
629 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
630 addr, is_write1, is_user, env->eip);
631 #endif
632 is_write = is_write1 & 1;
634 if (!(env->cr[0] & CR0_PG_MASK)) {
635 pte = addr;
636 virt_addr = addr & TARGET_PAGE_MASK;
637 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
638 page_size = 4096;
639 goto do_mapping;
642 if (env->cr[4] & CR4_PAE_MASK) {
643 uint64_t pde, pdpe;
645 /* XXX: we only use 32 bit physical addresses */
646 #ifdef TARGET_X86_64
647 if (env->hflags & HF_LMA_MASK) {
648 uint32_t pml4e_addr;
649 uint64_t pml4e;
650 int32_t sext;
652 /* test virtual address sign extension */
653 sext = (int64_t)addr >> 47;
654 if (sext != 0 && sext != -1) {
655 env->error_code = 0;
656 env->exception_index = EXCP0D_GPF;
657 return 1;
660 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
661 env->a20_mask;
662 pml4e = ldq_phys(pml4e_addr);
663 if (!(pml4e & PG_PRESENT_MASK)) {
664 error_code = 0;
665 goto do_fault;
667 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
668 error_code = PG_ERROR_RSVD_MASK;
669 goto do_fault;
671 if (!(pml4e & PG_ACCESSED_MASK)) {
672 pml4e |= PG_ACCESSED_MASK;
673 stl_phys_notdirty(pml4e_addr, pml4e);
675 ptep = pml4e ^ PG_NX_MASK;
676 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
677 env->a20_mask;
678 pdpe = ldq_phys(pdpe_addr);
679 if (!(pdpe & PG_PRESENT_MASK)) {
680 error_code = 0;
681 goto do_fault;
683 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
684 error_code = PG_ERROR_RSVD_MASK;
685 goto do_fault;
687 ptep &= pdpe ^ PG_NX_MASK;
688 if (!(pdpe & PG_ACCESSED_MASK)) {
689 pdpe |= PG_ACCESSED_MASK;
690 stl_phys_notdirty(pdpe_addr, pdpe);
692 } else
693 #endif
695 /* XXX: load them when cr3 is loaded ? */
696 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
697 env->a20_mask;
698 pdpe = ldq_phys(pdpe_addr);
699 if (!(pdpe & PG_PRESENT_MASK)) {
700 error_code = 0;
701 goto do_fault;
703 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
706 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
707 env->a20_mask;
708 pde = ldq_phys(pde_addr);
709 if (!(pde & PG_PRESENT_MASK)) {
710 error_code = 0;
711 goto do_fault;
713 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
714 error_code = PG_ERROR_RSVD_MASK;
715 goto do_fault;
717 ptep &= pde ^ PG_NX_MASK;
718 if (pde & PG_PSE_MASK) {
719 /* 2 MB page */
720 page_size = 2048 * 1024;
721 ptep ^= PG_NX_MASK;
722 if ((ptep & PG_NX_MASK) && is_write1 == 2)
723 goto do_fault_protect;
724 if (is_user) {
725 if (!(ptep & PG_USER_MASK))
726 goto do_fault_protect;
727 if (is_write && !(ptep & PG_RW_MASK))
728 goto do_fault_protect;
729 } else {
730 if ((env->cr[0] & CR0_WP_MASK) &&
731 is_write && !(ptep & PG_RW_MASK))
732 goto do_fault_protect;
734 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
735 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
736 pde |= PG_ACCESSED_MASK;
737 if (is_dirty)
738 pde |= PG_DIRTY_MASK;
739 stl_phys_notdirty(pde_addr, pde);
741 /* align to page_size */
742 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
743 virt_addr = addr & ~(page_size - 1);
744 } else {
745 /* 4 KB page */
746 if (!(pde & PG_ACCESSED_MASK)) {
747 pde |= PG_ACCESSED_MASK;
748 stl_phys_notdirty(pde_addr, pde);
750 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
751 env->a20_mask;
752 pte = ldq_phys(pte_addr);
753 if (!(pte & PG_PRESENT_MASK)) {
754 error_code = 0;
755 goto do_fault;
757 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
758 error_code = PG_ERROR_RSVD_MASK;
759 goto do_fault;
761 /* combine pde and pte nx, user and rw protections */
762 ptep &= pte ^ PG_NX_MASK;
763 ptep ^= PG_NX_MASK;
764 if ((ptep & PG_NX_MASK) && is_write1 == 2)
765 goto do_fault_protect;
766 if (is_user) {
767 if (!(ptep & PG_USER_MASK))
768 goto do_fault_protect;
769 if (is_write && !(ptep & PG_RW_MASK))
770 goto do_fault_protect;
771 } else {
772 if ((env->cr[0] & CR0_WP_MASK) &&
773 is_write && !(ptep & PG_RW_MASK))
774 goto do_fault_protect;
776 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
777 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
778 pte |= PG_ACCESSED_MASK;
779 if (is_dirty)
780 pte |= PG_DIRTY_MASK;
781 stl_phys_notdirty(pte_addr, pte);
783 page_size = 4096;
784 virt_addr = addr & ~0xfff;
785 pte = pte & (PHYS_ADDR_MASK | 0xfff);
787 } else {
788 uint32_t pde;
790 /* page directory entry */
791 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
792 env->a20_mask;
793 pde = ldl_phys(pde_addr);
794 if (!(pde & PG_PRESENT_MASK)) {
795 error_code = 0;
796 goto do_fault;
798 /* if PSE bit is set, then we use a 4MB page */
799 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
800 page_size = 4096 * 1024;
801 if (is_user) {
802 if (!(pde & PG_USER_MASK))
803 goto do_fault_protect;
804 if (is_write && !(pde & PG_RW_MASK))
805 goto do_fault_protect;
806 } else {
807 if ((env->cr[0] & CR0_WP_MASK) &&
808 is_write && !(pde & PG_RW_MASK))
809 goto do_fault_protect;
811 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
812 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
813 pde |= PG_ACCESSED_MASK;
814 if (is_dirty)
815 pde |= PG_DIRTY_MASK;
816 stl_phys_notdirty(pde_addr, pde);
819 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
820 ptep = pte;
821 virt_addr = addr & ~(page_size - 1);
822 } else {
823 if (!(pde & PG_ACCESSED_MASK)) {
824 pde |= PG_ACCESSED_MASK;
825 stl_phys_notdirty(pde_addr, pde);
828 /* page directory entry */
829 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
830 env->a20_mask;
831 pte = ldl_phys(pte_addr);
832 if (!(pte & PG_PRESENT_MASK)) {
833 error_code = 0;
834 goto do_fault;
836 /* combine pde and pte user and rw protections */
837 ptep = pte & pde;
838 if (is_user) {
839 if (!(ptep & PG_USER_MASK))
840 goto do_fault_protect;
841 if (is_write && !(ptep & PG_RW_MASK))
842 goto do_fault_protect;
843 } else {
844 if ((env->cr[0] & CR0_WP_MASK) &&
845 is_write && !(ptep & PG_RW_MASK))
846 goto do_fault_protect;
848 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
849 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
850 pte |= PG_ACCESSED_MASK;
851 if (is_dirty)
852 pte |= PG_DIRTY_MASK;
853 stl_phys_notdirty(pte_addr, pte);
855 page_size = 4096;
856 virt_addr = addr & ~0xfff;
859 /* the page can be put in the TLB */
860 prot = PAGE_READ;
861 if (!(ptep & PG_NX_MASK))
862 prot |= PAGE_EXEC;
863 if (pte & PG_DIRTY_MASK) {
864 /* only set write access if already dirty... otherwise wait
865 for dirty access */
866 if (is_user) {
867 if (ptep & PG_RW_MASK)
868 prot |= PAGE_WRITE;
869 } else {
870 if (!(env->cr[0] & CR0_WP_MASK) ||
871 (ptep & PG_RW_MASK))
872 prot |= PAGE_WRITE;
875 do_mapping:
876 pte = pte & env->a20_mask;
878 /* Even if 4MB pages, we map only one 4KB page in the cache to
879 avoid filling it too fast */
880 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
881 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
882 vaddr = virt_addr + page_offset;
884 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
885 return ret;
886 do_fault_protect:
887 error_code = PG_ERROR_P_MASK;
888 do_fault:
889 error_code |= (is_write << PG_ERROR_W_BIT);
890 if (is_user)
891 error_code |= PG_ERROR_U_MASK;
892 if (is_write1 == 2 &&
893 (env->efer & MSR_EFER_NXE) &&
894 (env->cr[4] & CR4_PAE_MASK))
895 error_code |= PG_ERROR_I_D_MASK;
896 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
897 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
898 } else {
899 env->cr[2] = addr;
901 env->error_code = error_code;
902 env->exception_index = EXCP0E_PAGE;
903 /* the VMM will handle this */
904 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
905 return 2;
906 return 1;
909 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
911 uint32_t pde_addr, pte_addr;
912 uint32_t pde, pte, paddr, page_offset, page_size;
914 if (env->cr[4] & CR4_PAE_MASK) {
915 uint32_t pdpe_addr, pde_addr, pte_addr;
916 uint32_t pdpe;
918 /* XXX: we only use 32 bit physical addresses */
919 #ifdef TARGET_X86_64
920 if (env->hflags & HF_LMA_MASK) {
921 uint32_t pml4e_addr, pml4e;
922 int32_t sext;
924 /* test virtual address sign extension */
925 sext = (int64_t)addr >> 47;
926 if (sext != 0 && sext != -1)
927 return -1;
929 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
930 env->a20_mask;
931 pml4e = ldl_phys(pml4e_addr);
932 if (!(pml4e & PG_PRESENT_MASK))
933 return -1;
935 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
936 env->a20_mask;
937 pdpe = ldl_phys(pdpe_addr);
938 if (!(pdpe & PG_PRESENT_MASK))
939 return -1;
940 } else
941 #endif
943 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
944 env->a20_mask;
945 pdpe = ldl_phys(pdpe_addr);
946 if (!(pdpe & PG_PRESENT_MASK))
947 return -1;
950 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
951 env->a20_mask;
952 pde = ldl_phys(pde_addr);
953 if (!(pde & PG_PRESENT_MASK)) {
954 return -1;
956 if (pde & PG_PSE_MASK) {
957 /* 2 MB page */
958 page_size = 2048 * 1024;
959 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
960 } else {
961 /* 4 KB page */
962 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
963 env->a20_mask;
964 page_size = 4096;
965 pte = ldl_phys(pte_addr);
967 } else {
968 if (!(env->cr[0] & CR0_PG_MASK)) {
969 pte = addr;
970 page_size = 4096;
971 } else {
972 /* page directory entry */
973 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
974 pde = ldl_phys(pde_addr);
975 if (!(pde & PG_PRESENT_MASK))
976 return -1;
977 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
978 pte = pde & ~0x003ff000; /* align to 4MB */
979 page_size = 4096 * 1024;
980 } else {
981 /* page directory entry */
982 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
983 pte = ldl_phys(pte_addr);
984 if (!(pte & PG_PRESENT_MASK))
985 return -1;
986 page_size = 4096;
989 pte = pte & env->a20_mask;
992 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
993 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
994 return paddr;
996 #endif /* !CONFIG_USER_ONLY */
998 #if defined(USE_CODE_COPY)
999 struct fpstate {
1000 uint16_t fpuc;
1001 uint16_t dummy1;
1002 uint16_t fpus;
1003 uint16_t dummy2;
1004 uint16_t fptag;
1005 uint16_t dummy3;
1007 uint32_t fpip;
1008 uint32_t fpcs;
1009 uint32_t fpoo;
1010 uint32_t fpos;
1011 uint8_t fpregs1[8 * 10];
1014 void restore_native_fp_state(CPUState *env)
1016 int fptag, i, j;
1017 struct fpstate fp1, *fp = &fp1;
1019 fp->fpuc = env->fpuc;
1020 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1021 fptag = 0;
1022 for (i=7; i>=0; i--) {
1023 fptag <<= 2;
1024 if (env->fptags[i]) {
1025 fptag |= 3;
1026 } else {
1027 /* the FPU automatically computes it */
1030 fp->fptag = fptag;
1031 j = env->fpstt;
1032 for(i = 0;i < 8; i++) {
1033 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1034 j = (j + 1) & 7;
1036 asm volatile ("frstor %0" : "=m" (*fp));
1037 env->native_fp_regs = 1;
1040 void save_native_fp_state(CPUState *env)
1042 int fptag, i, j;
1043 uint16_t fpuc;
1044 struct fpstate fp1, *fp = &fp1;
1046 asm volatile ("fsave %0" : : "m" (*fp));
1047 env->fpuc = fp->fpuc;
1048 env->fpstt = (fp->fpus >> 11) & 7;
1049 env->fpus = fp->fpus & ~0x3800;
1050 fptag = fp->fptag;
1051 for(i = 0;i < 8; i++) {
1052 env->fptags[i] = ((fptag & 3) == 3);
1053 fptag >>= 2;
1055 j = env->fpstt;
1056 for(i = 0;i < 8; i++) {
1057 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1058 j = (j + 1) & 7;
1060 /* we must restore the default rounding state */
1061 /* XXX: we do not restore the exception state */
1062 fpuc = 0x037f | (env->fpuc & (3 << 10));
1063 asm volatile("fldcw %0" : : "m" (fpuc));
1064 env->native_fp_regs = 0;
1066 #endif