Sparc64 hypervisor mode
[qemu/qemu_0_9_1_stable.git] / target-i386 / helper2.c
blob7d5275c10127039279e40ee1c99c2cc4da369130
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
32 //#define DEBUG_MMU
34 #ifdef USE_CODE_COPY
35 #include <asm/ldt.h>
36 #include <linux/unistd.h>
37 #include <linux/version.h>
39 int modify_ldt(int func, void *ptr, unsigned long bytecount)
41 return syscall(__NR_modify_ldt, func, ptr, bytecount);
44 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
45 #define modify_ldt_ldt_s user_desc
46 #endif
47 #endif /* USE_CODE_COPY */
49 CPUX86State *cpu_x86_init(void)
51 CPUX86State *env;
52 static int inited;
54 env = qemu_mallocz(sizeof(CPUX86State));
55 if (!env)
56 return NULL;
57 cpu_exec_init(env);
59 /* init various static tables */
60 if (!inited) {
61 inited = 1;
62 optimize_flags_init();
64 #ifdef USE_CODE_COPY
65 /* testing code for code copy case */
67 struct modify_ldt_ldt_s ldt;
69 ldt.entry_number = 1;
70 ldt.base_addr = (unsigned long)env;
71 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
72 ldt.seg_32bit = 1;
73 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
74 ldt.read_exec_only = 0;
75 ldt.limit_in_pages = 1;
76 ldt.seg_not_present = 0;
77 ldt.useable = 1;
78 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
80 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
82 #endif
84 int family, model, stepping;
85 #ifdef TARGET_X86_64
86 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
87 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
88 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
89 family = 6;
90 model = 2;
91 stepping = 3;
92 #else
93 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
94 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
95 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
96 #if 0
97 /* pentium 75-200 */
98 family = 5;
99 model = 2;
100 stepping = 11;
101 #else
102 /* pentium pro */
103 family = 6;
104 model = 3;
105 stepping = 3;
106 #endif
107 #endif
108 env->cpuid_level = 2;
109 env->cpuid_version = (family << 8) | (model << 4) | stepping;
110 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
111 CPUID_TSC | CPUID_MSR | CPUID_MCE |
112 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
113 CPUID_PAT);
114 env->pat = 0x0007040600070406ULL;
115 env->cpuid_ext3_features = CPUID_EXT3_SVM;
116 env->cpuid_ext_features = CPUID_EXT_SSE3;
117 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
118 env->cpuid_features |= CPUID_APIC;
119 env->cpuid_xlevel = 0x8000000e;
121 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
122 int c, len, i;
123 len = strlen(model_id);
124 for(i = 0; i < 48; i++) {
125 if (i >= len)
126 c = '\0';
127 else
128 c = model_id[i];
129 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
132 #ifdef TARGET_X86_64
133 /* currently not enabled for std i386 because not fully tested */
134 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
135 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
137 /* these features are needed for Win64 and aren't fully implemented */
138 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
139 /* this feature is needed for Solaris and isn't fully implemented */
140 env->cpuid_features |= CPUID_PSE36;
141 #endif
143 cpu_reset(env);
144 #ifdef USE_KQEMU
145 kqemu_init(env);
146 #endif
147 return env;
150 /* NOTE: must be called outside the CPU execute loop */
151 void cpu_reset(CPUX86State *env)
153 int i;
155 memset(env, 0, offsetof(CPUX86State, breakpoints));
157 tlb_flush(env, 1);
159 env->old_exception = -1;
161 /* init to reset state */
163 #ifdef CONFIG_SOFTMMU
164 env->hflags |= HF_SOFTMMU_MASK;
165 #endif
166 env->hflags |= HF_GIF_MASK;
168 cpu_x86_update_cr0(env, 0x60000010);
169 env->a20_mask = 0xffffffff;
170 env->smbase = 0x30000;
172 env->idt.limit = 0xffff;
173 env->gdt.limit = 0xffff;
174 env->ldt.limit = 0xffff;
175 env->ldt.flags = DESC_P_MASK;
176 env->tr.limit = 0xffff;
177 env->tr.flags = DESC_P_MASK;
179 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
180 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
181 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
182 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
183 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
184 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
186 env->eip = 0xfff0;
187 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
189 env->eflags = 0x2;
191 /* FPU init */
192 for(i = 0;i < 8; i++)
193 env->fptags[i] = 1;
194 env->fpuc = 0x37f;
196 env->mxcsr = 0x1f80;
199 void cpu_x86_close(CPUX86State *env)
201 free(env);
204 /***********************************************************/
205 /* x86 debug */
207 static const char *cc_op_str[] = {
208 "DYNAMIC",
209 "EFLAGS",
211 "MULB",
212 "MULW",
213 "MULL",
214 "MULQ",
216 "ADDB",
217 "ADDW",
218 "ADDL",
219 "ADDQ",
221 "ADCB",
222 "ADCW",
223 "ADCL",
224 "ADCQ",
226 "SUBB",
227 "SUBW",
228 "SUBL",
229 "SUBQ",
231 "SBBB",
232 "SBBW",
233 "SBBL",
234 "SBBQ",
236 "LOGICB",
237 "LOGICW",
238 "LOGICL",
239 "LOGICQ",
241 "INCB",
242 "INCW",
243 "INCL",
244 "INCQ",
246 "DECB",
247 "DECW",
248 "DECL",
249 "DECQ",
251 "SHLB",
252 "SHLW",
253 "SHLL",
254 "SHLQ",
256 "SARB",
257 "SARW",
258 "SARL",
259 "SARQ",
262 void cpu_dump_state(CPUState *env, FILE *f,
263 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
264 int flags)
266 int eflags, i, nb;
267 char cc_op_name[32];
268 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
270 eflags = env->eflags;
271 #ifdef TARGET_X86_64
272 if (env->hflags & HF_CS64_MASK) {
273 cpu_fprintf(f,
274 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
275 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
276 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
277 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
278 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
279 env->regs[R_EAX],
280 env->regs[R_EBX],
281 env->regs[R_ECX],
282 env->regs[R_EDX],
283 env->regs[R_ESI],
284 env->regs[R_EDI],
285 env->regs[R_EBP],
286 env->regs[R_ESP],
287 env->regs[8],
288 env->regs[9],
289 env->regs[10],
290 env->regs[11],
291 env->regs[12],
292 env->regs[13],
293 env->regs[14],
294 env->regs[15],
295 env->eip, eflags,
296 eflags & DF_MASK ? 'D' : '-',
297 eflags & CC_O ? 'O' : '-',
298 eflags & CC_S ? 'S' : '-',
299 eflags & CC_Z ? 'Z' : '-',
300 eflags & CC_A ? 'A' : '-',
301 eflags & CC_P ? 'P' : '-',
302 eflags & CC_C ? 'C' : '-',
303 env->hflags & HF_CPL_MASK,
304 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
305 (env->a20_mask >> 20) & 1,
306 (env->hflags >> HF_SMM_SHIFT) & 1,
307 (env->hflags >> HF_HALTED_SHIFT) & 1);
308 } else
309 #endif
311 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
312 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
313 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
314 (uint32_t)env->regs[R_EAX],
315 (uint32_t)env->regs[R_EBX],
316 (uint32_t)env->regs[R_ECX],
317 (uint32_t)env->regs[R_EDX],
318 (uint32_t)env->regs[R_ESI],
319 (uint32_t)env->regs[R_EDI],
320 (uint32_t)env->regs[R_EBP],
321 (uint32_t)env->regs[R_ESP],
322 (uint32_t)env->eip, eflags,
323 eflags & DF_MASK ? 'D' : '-',
324 eflags & CC_O ? 'O' : '-',
325 eflags & CC_S ? 'S' : '-',
326 eflags & CC_Z ? 'Z' : '-',
327 eflags & CC_A ? 'A' : '-',
328 eflags & CC_P ? 'P' : '-',
329 eflags & CC_C ? 'C' : '-',
330 env->hflags & HF_CPL_MASK,
331 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
332 (env->a20_mask >> 20) & 1,
333 (env->hflags >> HF_SMM_SHIFT) & 1,
334 (env->hflags >> HF_HALTED_SHIFT) & 1);
337 #ifdef TARGET_X86_64
338 if (env->hflags & HF_LMA_MASK) {
339 for(i = 0; i < 6; i++) {
340 SegmentCache *sc = &env->segs[i];
341 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
342 seg_name[i],
343 sc->selector,
344 sc->base,
345 sc->limit,
346 sc->flags);
348 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
349 env->ldt.selector,
350 env->ldt.base,
351 env->ldt.limit,
352 env->ldt.flags);
353 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
354 env->tr.selector,
355 env->tr.base,
356 env->tr.limit,
357 env->tr.flags);
358 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
359 env->gdt.base, env->gdt.limit);
360 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
361 env->idt.base, env->idt.limit);
362 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
363 (uint32_t)env->cr[0],
364 env->cr[2],
365 env->cr[3],
366 (uint32_t)env->cr[4]);
367 } else
368 #endif
370 for(i = 0; i < 6; i++) {
371 SegmentCache *sc = &env->segs[i];
372 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
373 seg_name[i],
374 sc->selector,
375 (uint32_t)sc->base,
376 sc->limit,
377 sc->flags);
379 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
380 env->ldt.selector,
381 (uint32_t)env->ldt.base,
382 env->ldt.limit,
383 env->ldt.flags);
384 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
385 env->tr.selector,
386 (uint32_t)env->tr.base,
387 env->tr.limit,
388 env->tr.flags);
389 cpu_fprintf(f, "GDT= %08x %08x\n",
390 (uint32_t)env->gdt.base, env->gdt.limit);
391 cpu_fprintf(f, "IDT= %08x %08x\n",
392 (uint32_t)env->idt.base, env->idt.limit);
393 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
394 (uint32_t)env->cr[0],
395 (uint32_t)env->cr[2],
396 (uint32_t)env->cr[3],
397 (uint32_t)env->cr[4]);
399 if (flags & X86_DUMP_CCOP) {
400 if ((unsigned)env->cc_op < CC_OP_NB)
401 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
402 else
403 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
404 #ifdef TARGET_X86_64
405 if (env->hflags & HF_CS64_MASK) {
406 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
407 env->cc_src, env->cc_dst,
408 cc_op_name);
409 } else
410 #endif
412 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
413 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
414 cc_op_name);
417 if (flags & X86_DUMP_FPU) {
418 int fptag;
419 fptag = 0;
420 for(i = 0; i < 8; i++) {
421 fptag |= ((!env->fptags[i]) << i);
423 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
424 env->fpuc,
425 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
426 env->fpstt,
427 fptag,
428 env->mxcsr);
429 for(i=0;i<8;i++) {
430 #if defined(USE_X86LDOUBLE)
431 union {
432 long double d;
433 struct {
434 uint64_t lower;
435 uint16_t upper;
436 } l;
437 } tmp;
438 tmp.d = env->fpregs[i].d;
439 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
440 i, tmp.l.lower, tmp.l.upper);
441 #else
442 cpu_fprintf(f, "FPR%d=%016" PRIx64,
443 i, env->fpregs[i].mmx.q);
444 #endif
445 if ((i & 1) == 1)
446 cpu_fprintf(f, "\n");
447 else
448 cpu_fprintf(f, " ");
450 if (env->hflags & HF_CS64_MASK)
451 nb = 16;
452 else
453 nb = 8;
454 for(i=0;i<nb;i++) {
455 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
457 env->xmm_regs[i].XMM_L(3),
458 env->xmm_regs[i].XMM_L(2),
459 env->xmm_regs[i].XMM_L(1),
460 env->xmm_regs[i].XMM_L(0));
461 if ((i & 1) == 1)
462 cpu_fprintf(f, "\n");
463 else
464 cpu_fprintf(f, " ");
469 /***********************************************************/
470 /* x86 mmu */
471 /* XXX: add PGE support */
473 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
475 a20_state = (a20_state != 0);
476 if (a20_state != ((env->a20_mask >> 20) & 1)) {
477 #if defined(DEBUG_MMU)
478 printf("A20 update: a20=%d\n", a20_state);
479 #endif
480 /* if the cpu is currently executing code, we must unlink it and
481 all the potentially executing TB */
482 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
484 /* when a20 is changed, all the MMU mappings are invalid, so
485 we must flush everything */
486 tlb_flush(env, 1);
487 env->a20_mask = 0xffefffff | (a20_state << 20);
491 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
493 int pe_state;
495 #if defined(DEBUG_MMU)
496 printf("CR0 update: CR0=0x%08x\n", new_cr0);
497 #endif
498 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
499 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
500 tlb_flush(env, 1);
503 #ifdef TARGET_X86_64
504 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
505 (env->efer & MSR_EFER_LME)) {
506 /* enter in long mode */
507 /* XXX: generate an exception */
508 if (!(env->cr[4] & CR4_PAE_MASK))
509 return;
510 env->efer |= MSR_EFER_LMA;
511 env->hflags |= HF_LMA_MASK;
512 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
513 (env->efer & MSR_EFER_LMA)) {
514 /* exit long mode */
515 env->efer &= ~MSR_EFER_LMA;
516 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
517 env->eip &= 0xffffffff;
519 #endif
520 env->cr[0] = new_cr0 | CR0_ET_MASK;
522 /* update PE flag in hidden flags */
523 pe_state = (env->cr[0] & CR0_PE_MASK);
524 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
525 /* ensure that ADDSEG is always set in real mode */
526 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
527 /* update FPU flags */
528 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
529 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
532 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
533 the PDPT */
534 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
536 env->cr[3] = new_cr3;
537 if (env->cr[0] & CR0_PG_MASK) {
538 #if defined(DEBUG_MMU)
539 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
540 #endif
541 tlb_flush(env, 0);
545 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
547 #if defined(DEBUG_MMU)
548 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
549 #endif
550 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
551 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
552 tlb_flush(env, 1);
554 /* SSE handling */
555 if (!(env->cpuid_features & CPUID_SSE))
556 new_cr4 &= ~CR4_OSFXSR_MASK;
557 if (new_cr4 & CR4_OSFXSR_MASK)
558 env->hflags |= HF_OSFXSR_MASK;
559 else
560 env->hflags &= ~HF_OSFXSR_MASK;
562 env->cr[4] = new_cr4;
565 /* XXX: also flush 4MB pages */
566 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
568 tlb_flush_page(env, addr);
571 #if defined(CONFIG_USER_ONLY)
573 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
574 int is_write, int mmu_idx, int is_softmmu)
576 /* user mode only emulation */
577 is_write &= 1;
578 env->cr[2] = addr;
579 env->error_code = (is_write << PG_ERROR_W_BIT);
580 env->error_code |= PG_ERROR_U_MASK;
581 env->exception_index = EXCP0E_PAGE;
582 return 1;
585 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
587 return addr;
590 #else
592 #define PHYS_ADDR_MASK 0xfffff000
594 /* return value:
595 -1 = cannot handle fault
596 0 = nothing more to do
597 1 = generate PF fault
598 2 = soft MMU activation required for this block
600 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
601 int is_write1, int mmu_idx, int is_softmmu)
603 uint64_t ptep, pte;
604 uint32_t pdpe_addr, pde_addr, pte_addr;
605 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
606 unsigned long paddr, page_offset;
607 target_ulong vaddr, virt_addr;
609 is_user = mmu_idx == MMU_USER_IDX;
610 #if defined(DEBUG_MMU)
611 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
612 addr, is_write1, is_user, env->eip);
613 #endif
614 is_write = is_write1 & 1;
616 if (!(env->cr[0] & CR0_PG_MASK)) {
617 pte = addr;
618 virt_addr = addr & TARGET_PAGE_MASK;
619 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
620 page_size = 4096;
621 goto do_mapping;
624 if (env->cr[4] & CR4_PAE_MASK) {
625 uint64_t pde, pdpe;
627 /* XXX: we only use 32 bit physical addresses */
628 #ifdef TARGET_X86_64
629 if (env->hflags & HF_LMA_MASK) {
630 uint32_t pml4e_addr;
631 uint64_t pml4e;
632 int32_t sext;
634 /* test virtual address sign extension */
635 sext = (int64_t)addr >> 47;
636 if (sext != 0 && sext != -1) {
637 env->error_code = 0;
638 env->exception_index = EXCP0D_GPF;
639 return 1;
642 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
643 env->a20_mask;
644 pml4e = ldq_phys(pml4e_addr);
645 if (!(pml4e & PG_PRESENT_MASK)) {
646 error_code = 0;
647 goto do_fault;
649 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
650 error_code = PG_ERROR_RSVD_MASK;
651 goto do_fault;
653 if (!(pml4e & PG_ACCESSED_MASK)) {
654 pml4e |= PG_ACCESSED_MASK;
655 stl_phys_notdirty(pml4e_addr, pml4e);
657 ptep = pml4e ^ PG_NX_MASK;
658 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
659 env->a20_mask;
660 pdpe = ldq_phys(pdpe_addr);
661 if (!(pdpe & PG_PRESENT_MASK)) {
662 error_code = 0;
663 goto do_fault;
665 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
666 error_code = PG_ERROR_RSVD_MASK;
667 goto do_fault;
669 ptep &= pdpe ^ PG_NX_MASK;
670 if (!(pdpe & PG_ACCESSED_MASK)) {
671 pdpe |= PG_ACCESSED_MASK;
672 stl_phys_notdirty(pdpe_addr, pdpe);
674 } else
675 #endif
677 /* XXX: load them when cr3 is loaded ? */
678 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
679 env->a20_mask;
680 pdpe = ldq_phys(pdpe_addr);
681 if (!(pdpe & PG_PRESENT_MASK)) {
682 error_code = 0;
683 goto do_fault;
685 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
688 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
689 env->a20_mask;
690 pde = ldq_phys(pde_addr);
691 if (!(pde & PG_PRESENT_MASK)) {
692 error_code = 0;
693 goto do_fault;
695 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
696 error_code = PG_ERROR_RSVD_MASK;
697 goto do_fault;
699 ptep &= pde ^ PG_NX_MASK;
700 if (pde & PG_PSE_MASK) {
701 /* 2 MB page */
702 page_size = 2048 * 1024;
703 ptep ^= PG_NX_MASK;
704 if ((ptep & PG_NX_MASK) && is_write1 == 2)
705 goto do_fault_protect;
706 if (is_user) {
707 if (!(ptep & PG_USER_MASK))
708 goto do_fault_protect;
709 if (is_write && !(ptep & PG_RW_MASK))
710 goto do_fault_protect;
711 } else {
712 if ((env->cr[0] & CR0_WP_MASK) &&
713 is_write && !(ptep & PG_RW_MASK))
714 goto do_fault_protect;
716 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
717 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
718 pde |= PG_ACCESSED_MASK;
719 if (is_dirty)
720 pde |= PG_DIRTY_MASK;
721 stl_phys_notdirty(pde_addr, pde);
723 /* align to page_size */
724 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
725 virt_addr = addr & ~(page_size - 1);
726 } else {
727 /* 4 KB page */
728 if (!(pde & PG_ACCESSED_MASK)) {
729 pde |= PG_ACCESSED_MASK;
730 stl_phys_notdirty(pde_addr, pde);
732 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
733 env->a20_mask;
734 pte = ldq_phys(pte_addr);
735 if (!(pte & PG_PRESENT_MASK)) {
736 error_code = 0;
737 goto do_fault;
739 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
740 error_code = PG_ERROR_RSVD_MASK;
741 goto do_fault;
743 /* combine pde and pte nx, user and rw protections */
744 ptep &= pte ^ PG_NX_MASK;
745 ptep ^= PG_NX_MASK;
746 if ((ptep & PG_NX_MASK) && is_write1 == 2)
747 goto do_fault_protect;
748 if (is_user) {
749 if (!(ptep & PG_USER_MASK))
750 goto do_fault_protect;
751 if (is_write && !(ptep & PG_RW_MASK))
752 goto do_fault_protect;
753 } else {
754 if ((env->cr[0] & CR0_WP_MASK) &&
755 is_write && !(ptep & PG_RW_MASK))
756 goto do_fault_protect;
758 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
759 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
760 pte |= PG_ACCESSED_MASK;
761 if (is_dirty)
762 pte |= PG_DIRTY_MASK;
763 stl_phys_notdirty(pte_addr, pte);
765 page_size = 4096;
766 virt_addr = addr & ~0xfff;
767 pte = pte & (PHYS_ADDR_MASK | 0xfff);
769 } else {
770 uint32_t pde;
772 /* page directory entry */
773 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
774 env->a20_mask;
775 pde = ldl_phys(pde_addr);
776 if (!(pde & PG_PRESENT_MASK)) {
777 error_code = 0;
778 goto do_fault;
780 /* if PSE bit is set, then we use a 4MB page */
781 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
782 page_size = 4096 * 1024;
783 if (is_user) {
784 if (!(pde & PG_USER_MASK))
785 goto do_fault_protect;
786 if (is_write && !(pde & PG_RW_MASK))
787 goto do_fault_protect;
788 } else {
789 if ((env->cr[0] & CR0_WP_MASK) &&
790 is_write && !(pde & PG_RW_MASK))
791 goto do_fault_protect;
793 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
794 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
795 pde |= PG_ACCESSED_MASK;
796 if (is_dirty)
797 pde |= PG_DIRTY_MASK;
798 stl_phys_notdirty(pde_addr, pde);
801 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
802 ptep = pte;
803 virt_addr = addr & ~(page_size - 1);
804 } else {
805 if (!(pde & PG_ACCESSED_MASK)) {
806 pde |= PG_ACCESSED_MASK;
807 stl_phys_notdirty(pde_addr, pde);
810 /* page directory entry */
811 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
812 env->a20_mask;
813 pte = ldl_phys(pte_addr);
814 if (!(pte & PG_PRESENT_MASK)) {
815 error_code = 0;
816 goto do_fault;
818 /* combine pde and pte user and rw protections */
819 ptep = pte & pde;
820 if (is_user) {
821 if (!(ptep & PG_USER_MASK))
822 goto do_fault_protect;
823 if (is_write && !(ptep & PG_RW_MASK))
824 goto do_fault_protect;
825 } else {
826 if ((env->cr[0] & CR0_WP_MASK) &&
827 is_write && !(ptep & PG_RW_MASK))
828 goto do_fault_protect;
830 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
831 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
832 pte |= PG_ACCESSED_MASK;
833 if (is_dirty)
834 pte |= PG_DIRTY_MASK;
835 stl_phys_notdirty(pte_addr, pte);
837 page_size = 4096;
838 virt_addr = addr & ~0xfff;
841 /* the page can be put in the TLB */
842 prot = PAGE_READ;
843 if (!(ptep & PG_NX_MASK))
844 prot |= PAGE_EXEC;
845 if (pte & PG_DIRTY_MASK) {
846 /* only set write access if already dirty... otherwise wait
847 for dirty access */
848 if (is_user) {
849 if (ptep & PG_RW_MASK)
850 prot |= PAGE_WRITE;
851 } else {
852 if (!(env->cr[0] & CR0_WP_MASK) ||
853 (ptep & PG_RW_MASK))
854 prot |= PAGE_WRITE;
857 do_mapping:
858 pte = pte & env->a20_mask;
860 /* Even if 4MB pages, we map only one 4KB page in the cache to
861 avoid filling it too fast */
862 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
863 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
864 vaddr = virt_addr + page_offset;
866 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
867 return ret;
868 do_fault_protect:
869 error_code = PG_ERROR_P_MASK;
870 do_fault:
871 error_code |= (is_write << PG_ERROR_W_BIT);
872 if (is_user)
873 error_code |= PG_ERROR_U_MASK;
874 if (is_write1 == 2 &&
875 (env->efer & MSR_EFER_NXE) &&
876 (env->cr[4] & CR4_PAE_MASK))
877 error_code |= PG_ERROR_I_D_MASK;
878 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
879 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
880 } else {
881 env->cr[2] = addr;
883 env->error_code = error_code;
884 env->exception_index = EXCP0E_PAGE;
885 /* the VMM will handle this */
886 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
887 return 2;
888 return 1;
891 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
893 uint32_t pde_addr, pte_addr;
894 uint32_t pde, pte, paddr, page_offset, page_size;
896 if (env->cr[4] & CR4_PAE_MASK) {
897 uint32_t pdpe_addr, pde_addr, pte_addr;
898 uint32_t pdpe;
900 /* XXX: we only use 32 bit physical addresses */
901 #ifdef TARGET_X86_64
902 if (env->hflags & HF_LMA_MASK) {
903 uint32_t pml4e_addr, pml4e;
904 int32_t sext;
906 /* test virtual address sign extension */
907 sext = (int64_t)addr >> 47;
908 if (sext != 0 && sext != -1)
909 return -1;
911 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
912 env->a20_mask;
913 pml4e = ldl_phys(pml4e_addr);
914 if (!(pml4e & PG_PRESENT_MASK))
915 return -1;
917 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
918 env->a20_mask;
919 pdpe = ldl_phys(pdpe_addr);
920 if (!(pdpe & PG_PRESENT_MASK))
921 return -1;
922 } else
923 #endif
925 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
926 env->a20_mask;
927 pdpe = ldl_phys(pdpe_addr);
928 if (!(pdpe & PG_PRESENT_MASK))
929 return -1;
932 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
933 env->a20_mask;
934 pde = ldl_phys(pde_addr);
935 if (!(pde & PG_PRESENT_MASK)) {
936 return -1;
938 if (pde & PG_PSE_MASK) {
939 /* 2 MB page */
940 page_size = 2048 * 1024;
941 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
942 } else {
943 /* 4 KB page */
944 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
945 env->a20_mask;
946 page_size = 4096;
947 pte = ldl_phys(pte_addr);
949 } else {
950 if (!(env->cr[0] & CR0_PG_MASK)) {
951 pte = addr;
952 page_size = 4096;
953 } else {
954 /* page directory entry */
955 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
956 pde = ldl_phys(pde_addr);
957 if (!(pde & PG_PRESENT_MASK))
958 return -1;
959 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
960 pte = pde & ~0x003ff000; /* align to 4MB */
961 page_size = 4096 * 1024;
962 } else {
963 /* page directory entry */
964 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
965 pte = ldl_phys(pte_addr);
966 if (!(pte & PG_PRESENT_MASK))
967 return -1;
968 page_size = 4096;
971 pte = pte & env->a20_mask;
974 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
975 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
976 return paddr;
978 #endif /* !CONFIG_USER_ONLY */
980 #if defined(USE_CODE_COPY)
981 struct fpstate {
982 uint16_t fpuc;
983 uint16_t dummy1;
984 uint16_t fpus;
985 uint16_t dummy2;
986 uint16_t fptag;
987 uint16_t dummy3;
989 uint32_t fpip;
990 uint32_t fpcs;
991 uint32_t fpoo;
992 uint32_t fpos;
993 uint8_t fpregs1[8 * 10];
996 void restore_native_fp_state(CPUState *env)
998 int fptag, i, j;
999 struct fpstate fp1, *fp = &fp1;
1001 fp->fpuc = env->fpuc;
1002 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1003 fptag = 0;
1004 for (i=7; i>=0; i--) {
1005 fptag <<= 2;
1006 if (env->fptags[i]) {
1007 fptag |= 3;
1008 } else {
1009 /* the FPU automatically computes it */
1012 fp->fptag = fptag;
1013 j = env->fpstt;
1014 for(i = 0;i < 8; i++) {
1015 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1016 j = (j + 1) & 7;
1018 asm volatile ("frstor %0" : "=m" (*fp));
1019 env->native_fp_regs = 1;
1022 void save_native_fp_state(CPUState *env)
1024 int fptag, i, j;
1025 uint16_t fpuc;
1026 struct fpstate fp1, *fp = &fp1;
1028 asm volatile ("fsave %0" : : "m" (*fp));
1029 env->fpuc = fp->fpuc;
1030 env->fpstt = (fp->fpus >> 11) & 7;
1031 env->fpus = fp->fpus & ~0x3800;
1032 fptag = fp->fptag;
1033 for(i = 0;i < 8; i++) {
1034 env->fptags[i] = ((fptag & 3) == 3);
1035 fptag >>= 2;
1037 j = env->fpstt;
1038 for(i = 0;i < 8; i++) {
1039 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1040 j = (j + 1) & 7;
1042 /* we must restore the default rounding state */
1043 /* XXX: we do not restore the exception state */
1044 fpuc = 0x037f | (env->fpuc & (3 << 10));
1045 asm volatile("fldcw %0" : : "m" (fpuc));
1046 env->native_fp_regs = 0;
1048 #endif