Comment spelling fix.
[qemu/mini2440.git] / target-i386 / helper2.c
blobdc6a5000ecf66e2bf036681bbca62a5bf2e60814
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
31 //#define DEBUG_MMU
33 #ifdef USE_CODE_COPY
34 #include <asm/ldt.h>
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 int modify_ldt(int func, void *ptr, unsigned long bytecount)
40 return syscall(__NR_modify_ldt, func, ptr, bytecount);
43 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
44 #define modify_ldt_ldt_s user_desc
45 #endif
46 #endif /* USE_CODE_COPY */
48 CPUX86State *cpu_x86_init(void)
50 CPUX86State *env;
51 static int inited;
53 env = qemu_mallocz(sizeof(CPUX86State));
54 if (!env)
55 return NULL;
56 cpu_exec_init(env);
58 /* init various static tables */
59 if (!inited) {
60 inited = 1;
61 optimize_flags_init();
63 #ifdef USE_CODE_COPY
64 /* testing code for code copy case */
66 struct modify_ldt_ldt_s ldt;
68 ldt.entry_number = 1;
69 ldt.base_addr = (unsigned long)env;
70 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
71 ldt.seg_32bit = 1;
72 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
73 ldt.read_exec_only = 0;
74 ldt.limit_in_pages = 1;
75 ldt.seg_not_present = 0;
76 ldt.useable = 1;
77 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
79 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
81 #endif
83 int family, model, stepping;
84 #ifdef TARGET_X86_64
85 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
86 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
87 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
88 family = 6;
89 model = 2;
90 stepping = 3;
91 #else
92 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
93 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
94 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
95 #if 0
96 /* pentium 75-200 */
97 family = 5;
98 model = 2;
99 stepping = 11;
100 #else
101 /* pentium pro */
102 family = 6;
103 model = 3;
104 stepping = 3;
105 #endif
106 #endif
107 env->cpuid_level = 2;
108 env->cpuid_version = (family << 8) | (model << 4) | stepping;
109 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
110 CPUID_TSC | CPUID_MSR | CPUID_MCE |
111 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
112 CPUID_PAT);
113 env->pat = 0x0007040600070406ULL;
114 env->cpuid_ext_features = CPUID_EXT_SSE3;
115 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
116 env->cpuid_features |= CPUID_APIC;
117 env->cpuid_xlevel = 0;
119 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
120 int c, len, i;
121 len = strlen(model_id);
122 for(i = 0; i < 48; i++) {
123 if (i >= len)
124 c = '\0';
125 else
126 c = model_id[i];
127 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
130 #ifdef TARGET_X86_64
131 /* currently not enabled for std i386 because not fully tested */
132 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
133 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
134 env->cpuid_xlevel = 0x80000008;
136 /* these features are needed for Win64 and aren't fully implemented */
137 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
138 /* this feature is needed for Solaris and isn't fully implemented */
139 env->cpuid_features |= CPUID_PSE36;
140 #endif
142 cpu_reset(env);
143 #ifdef USE_KQEMU
144 kqemu_init(env);
145 #endif
146 return env;
149 /* NOTE: must be called outside the CPU execute loop */
150 void cpu_reset(CPUX86State *env)
152 int i;
154 memset(env, 0, offsetof(CPUX86State, breakpoints));
156 tlb_flush(env, 1);
158 /* init to reset state */
160 #ifdef CONFIG_SOFTMMU
161 env->hflags |= HF_SOFTMMU_MASK;
162 #endif
164 cpu_x86_update_cr0(env, 0x60000010);
165 env->a20_mask = 0xffffffff;
166 env->smbase = 0x30000;
168 env->idt.limit = 0xffff;
169 env->gdt.limit = 0xffff;
170 env->ldt.limit = 0xffff;
171 env->ldt.flags = DESC_P_MASK;
172 env->tr.limit = 0xffff;
173 env->tr.flags = DESC_P_MASK;
175 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
176 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
177 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
178 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
179 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
180 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
182 env->eip = 0xfff0;
183 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
185 env->eflags = 0x2;
187 /* FPU init */
188 for(i = 0;i < 8; i++)
189 env->fptags[i] = 1;
190 env->fpuc = 0x37f;
192 env->mxcsr = 0x1f80;
195 void cpu_x86_close(CPUX86State *env)
197 free(env);
200 /***********************************************************/
201 /* x86 debug */
203 static const char *cc_op_str[] = {
204 "DYNAMIC",
205 "EFLAGS",
207 "MULB",
208 "MULW",
209 "MULL",
210 "MULQ",
212 "ADDB",
213 "ADDW",
214 "ADDL",
215 "ADDQ",
217 "ADCB",
218 "ADCW",
219 "ADCL",
220 "ADCQ",
222 "SUBB",
223 "SUBW",
224 "SUBL",
225 "SUBQ",
227 "SBBB",
228 "SBBW",
229 "SBBL",
230 "SBBQ",
232 "LOGICB",
233 "LOGICW",
234 "LOGICL",
235 "LOGICQ",
237 "INCB",
238 "INCW",
239 "INCL",
240 "INCQ",
242 "DECB",
243 "DECW",
244 "DECL",
245 "DECQ",
247 "SHLB",
248 "SHLW",
249 "SHLL",
250 "SHLQ",
252 "SARB",
253 "SARW",
254 "SARL",
255 "SARQ",
258 void cpu_dump_state(CPUState *env, FILE *f,
259 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
260 int flags)
262 int eflags, i, nb;
263 char cc_op_name[32];
264 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
266 eflags = env->eflags;
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_CS64_MASK) {
269 cpu_fprintf(f,
270 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
271 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
272 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
273 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
274 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
275 env->regs[R_EAX],
276 env->regs[R_EBX],
277 env->regs[R_ECX],
278 env->regs[R_EDX],
279 env->regs[R_ESI],
280 env->regs[R_EDI],
281 env->regs[R_EBP],
282 env->regs[R_ESP],
283 env->regs[8],
284 env->regs[9],
285 env->regs[10],
286 env->regs[11],
287 env->regs[12],
288 env->regs[13],
289 env->regs[14],
290 env->regs[15],
291 env->eip, eflags,
292 eflags & DF_MASK ? 'D' : '-',
293 eflags & CC_O ? 'O' : '-',
294 eflags & CC_S ? 'S' : '-',
295 eflags & CC_Z ? 'Z' : '-',
296 eflags & CC_A ? 'A' : '-',
297 eflags & CC_P ? 'P' : '-',
298 eflags & CC_C ? 'C' : '-',
299 env->hflags & HF_CPL_MASK,
300 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
301 (env->a20_mask >> 20) & 1,
302 (env->hflags >> HF_SMM_SHIFT) & 1,
303 (env->hflags >> HF_HALTED_SHIFT) & 1);
304 } else
305 #endif
307 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
308 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
309 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
310 (uint32_t)env->regs[R_EAX],
311 (uint32_t)env->regs[R_EBX],
312 (uint32_t)env->regs[R_ECX],
313 (uint32_t)env->regs[R_EDX],
314 (uint32_t)env->regs[R_ESI],
315 (uint32_t)env->regs[R_EDI],
316 (uint32_t)env->regs[R_EBP],
317 (uint32_t)env->regs[R_ESP],
318 (uint32_t)env->eip, eflags,
319 eflags & DF_MASK ? 'D' : '-',
320 eflags & CC_O ? 'O' : '-',
321 eflags & CC_S ? 'S' : '-',
322 eflags & CC_Z ? 'Z' : '-',
323 eflags & CC_A ? 'A' : '-',
324 eflags & CC_P ? 'P' : '-',
325 eflags & CC_C ? 'C' : '-',
326 env->hflags & HF_CPL_MASK,
327 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
328 (env->a20_mask >> 20) & 1,
329 (env->hflags >> HF_SMM_SHIFT) & 1,
330 (env->hflags >> HF_HALTED_SHIFT) & 1);
333 #ifdef TARGET_X86_64
334 if (env->hflags & HF_LMA_MASK) {
335 for(i = 0; i < 6; i++) {
336 SegmentCache *sc = &env->segs[i];
337 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
338 seg_name[i],
339 sc->selector,
340 sc->base,
341 sc->limit,
342 sc->flags);
344 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
345 env->ldt.selector,
346 env->ldt.base,
347 env->ldt.limit,
348 env->ldt.flags);
349 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
350 env->tr.selector,
351 env->tr.base,
352 env->tr.limit,
353 env->tr.flags);
354 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
355 env->gdt.base, env->gdt.limit);
356 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
357 env->idt.base, env->idt.limit);
358 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
359 (uint32_t)env->cr[0],
360 env->cr[2],
361 env->cr[3],
362 (uint32_t)env->cr[4]);
363 } else
364 #endif
366 for(i = 0; i < 6; i++) {
367 SegmentCache *sc = &env->segs[i];
368 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
369 seg_name[i],
370 sc->selector,
371 (uint32_t)sc->base,
372 sc->limit,
373 sc->flags);
375 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
376 env->ldt.selector,
377 (uint32_t)env->ldt.base,
378 env->ldt.limit,
379 env->ldt.flags);
380 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
381 env->tr.selector,
382 (uint32_t)env->tr.base,
383 env->tr.limit,
384 env->tr.flags);
385 cpu_fprintf(f, "GDT= %08x %08x\n",
386 (uint32_t)env->gdt.base, env->gdt.limit);
387 cpu_fprintf(f, "IDT= %08x %08x\n",
388 (uint32_t)env->idt.base, env->idt.limit);
389 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
390 (uint32_t)env->cr[0],
391 (uint32_t)env->cr[2],
392 (uint32_t)env->cr[3],
393 (uint32_t)env->cr[4]);
395 if (flags & X86_DUMP_CCOP) {
396 if ((unsigned)env->cc_op < CC_OP_NB)
397 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
398 else
399 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
400 #ifdef TARGET_X86_64
401 if (env->hflags & HF_CS64_MASK) {
402 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
403 env->cc_src, env->cc_dst,
404 cc_op_name);
405 } else
406 #endif
408 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
409 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
410 cc_op_name);
413 if (flags & X86_DUMP_FPU) {
414 int fptag;
415 fptag = 0;
416 for(i = 0; i < 8; i++) {
417 fptag |= ((!env->fptags[i]) << i);
419 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
420 env->fpuc,
421 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
422 env->fpstt,
423 fptag,
424 env->mxcsr);
425 for(i=0;i<8;i++) {
426 #if defined(USE_X86LDOUBLE)
427 union {
428 long double d;
429 struct {
430 uint64_t lower;
431 uint16_t upper;
432 } l;
433 } tmp;
434 tmp.d = env->fpregs[i].d;
435 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
436 i, tmp.l.lower, tmp.l.upper);
437 #else
438 cpu_fprintf(f, "FPR%d=%016" PRIx64,
439 i, env->fpregs[i].mmx.q);
440 #endif
441 if ((i & 1) == 1)
442 cpu_fprintf(f, "\n");
443 else
444 cpu_fprintf(f, " ");
446 if (env->hflags & HF_CS64_MASK)
447 nb = 16;
448 else
449 nb = 8;
450 for(i=0;i<nb;i++) {
451 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
453 env->xmm_regs[i].XMM_L(3),
454 env->xmm_regs[i].XMM_L(2),
455 env->xmm_regs[i].XMM_L(1),
456 env->xmm_regs[i].XMM_L(0));
457 if ((i & 1) == 1)
458 cpu_fprintf(f, "\n");
459 else
460 cpu_fprintf(f, " ");
465 /***********************************************************/
466 /* x86 mmu */
467 /* XXX: add PGE support */
469 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
471 a20_state = (a20_state != 0);
472 if (a20_state != ((env->a20_mask >> 20) & 1)) {
473 #if defined(DEBUG_MMU)
474 printf("A20 update: a20=%d\n", a20_state);
475 #endif
476 /* if the cpu is currently executing code, we must unlink it and
477 all the potentially executing TB */
478 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
480 /* when a20 is changed, all the MMU mappings are invalid, so
481 we must flush everything */
482 tlb_flush(env, 1);
483 env->a20_mask = 0xffefffff | (a20_state << 20);
487 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
489 int pe_state;
491 #if defined(DEBUG_MMU)
492 printf("CR0 update: CR0=0x%08x\n", new_cr0);
493 #endif
494 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
495 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
496 tlb_flush(env, 1);
499 #ifdef TARGET_X86_64
500 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
501 (env->efer & MSR_EFER_LME)) {
502 /* enter in long mode */
503 /* XXX: generate an exception */
504 if (!(env->cr[4] & CR4_PAE_MASK))
505 return;
506 env->efer |= MSR_EFER_LMA;
507 env->hflags |= HF_LMA_MASK;
508 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
509 (env->efer & MSR_EFER_LMA)) {
510 /* exit long mode */
511 env->efer &= ~MSR_EFER_LMA;
512 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
513 env->eip &= 0xffffffff;
515 #endif
516 env->cr[0] = new_cr0 | CR0_ET_MASK;
518 /* update PE flag in hidden flags */
519 pe_state = (env->cr[0] & CR0_PE_MASK);
520 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
521 /* ensure that ADDSEG is always set in real mode */
522 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
523 /* update FPU flags */
524 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
525 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
528 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
529 the PDPT */
530 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
532 env->cr[3] = new_cr3;
533 if (env->cr[0] & CR0_PG_MASK) {
534 #if defined(DEBUG_MMU)
535 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
536 #endif
537 tlb_flush(env, 0);
541 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
543 #if defined(DEBUG_MMU)
544 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
545 #endif
546 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
547 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
548 tlb_flush(env, 1);
550 /* SSE handling */
551 if (!(env->cpuid_features & CPUID_SSE))
552 new_cr4 &= ~CR4_OSFXSR_MASK;
553 if (new_cr4 & CR4_OSFXSR_MASK)
554 env->hflags |= HF_OSFXSR_MASK;
555 else
556 env->hflags &= ~HF_OSFXSR_MASK;
558 env->cr[4] = new_cr4;
561 /* XXX: also flush 4MB pages */
562 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
564 tlb_flush_page(env, addr);
567 #if defined(CONFIG_USER_ONLY)
569 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
570 int is_write, int is_user, int is_softmmu)
572 /* user mode only emulation */
573 is_write &= 1;
574 env->cr[2] = addr;
575 env->error_code = (is_write << PG_ERROR_W_BIT);
576 env->error_code |= PG_ERROR_U_MASK;
577 env->exception_index = EXCP0E_PAGE;
578 return 1;
581 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
583 return addr;
586 #else
588 #define PHYS_ADDR_MASK 0xfffff000
590 /* return value:
591 -1 = cannot handle fault
592 0 = nothing more to do
593 1 = generate PF fault
594 2 = soft MMU activation required for this block
596 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
597 int is_write1, int is_user, int is_softmmu)
599 uint64_t ptep, pte;
600 uint32_t pdpe_addr, pde_addr, pte_addr;
601 int error_code, is_dirty, prot, page_size, ret, is_write;
602 unsigned long paddr, page_offset;
603 target_ulong vaddr, virt_addr;
605 #if defined(DEBUG_MMU)
606 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
607 addr, is_write1, is_user, env->eip);
608 #endif
609 is_write = is_write1 & 1;
611 if (!(env->cr[0] & CR0_PG_MASK)) {
612 pte = addr;
613 virt_addr = addr & TARGET_PAGE_MASK;
614 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
615 page_size = 4096;
616 goto do_mapping;
619 if (env->cr[4] & CR4_PAE_MASK) {
620 uint64_t pde, pdpe;
622 /* XXX: we only use 32 bit physical addresses */
623 #ifdef TARGET_X86_64
624 if (env->hflags & HF_LMA_MASK) {
625 uint32_t pml4e_addr;
626 uint64_t pml4e;
627 int32_t sext;
629 /* test virtual address sign extension */
630 sext = (int64_t)addr >> 47;
631 if (sext != 0 && sext != -1) {
632 env->error_code = 0;
633 env->exception_index = EXCP0D_GPF;
634 return 1;
637 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
638 env->a20_mask;
639 pml4e = ldq_phys(pml4e_addr);
640 if (!(pml4e & PG_PRESENT_MASK)) {
641 error_code = 0;
642 goto do_fault;
644 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
645 error_code = PG_ERROR_RSVD_MASK;
646 goto do_fault;
648 if (!(pml4e & PG_ACCESSED_MASK)) {
649 pml4e |= PG_ACCESSED_MASK;
650 stl_phys_notdirty(pml4e_addr, pml4e);
652 ptep = pml4e ^ PG_NX_MASK;
653 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
654 env->a20_mask;
655 pdpe = ldq_phys(pdpe_addr);
656 if (!(pdpe & PG_PRESENT_MASK)) {
657 error_code = 0;
658 goto do_fault;
660 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
661 error_code = PG_ERROR_RSVD_MASK;
662 goto do_fault;
664 ptep &= pdpe ^ PG_NX_MASK;
665 if (!(pdpe & PG_ACCESSED_MASK)) {
666 pdpe |= PG_ACCESSED_MASK;
667 stl_phys_notdirty(pdpe_addr, pdpe);
669 } else
670 #endif
672 /* XXX: load them when cr3 is loaded ? */
673 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
674 env->a20_mask;
675 pdpe = ldq_phys(pdpe_addr);
676 if (!(pdpe & PG_PRESENT_MASK)) {
677 error_code = 0;
678 goto do_fault;
680 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
683 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
684 env->a20_mask;
685 pde = ldq_phys(pde_addr);
686 if (!(pde & PG_PRESENT_MASK)) {
687 error_code = 0;
688 goto do_fault;
690 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
691 error_code = PG_ERROR_RSVD_MASK;
692 goto do_fault;
694 ptep &= pde ^ PG_NX_MASK;
695 if (pde & PG_PSE_MASK) {
696 /* 2 MB page */
697 page_size = 2048 * 1024;
698 ptep ^= PG_NX_MASK;
699 if ((ptep & PG_NX_MASK) && is_write1 == 2)
700 goto do_fault_protect;
701 if (is_user) {
702 if (!(ptep & PG_USER_MASK))
703 goto do_fault_protect;
704 if (is_write && !(ptep & PG_RW_MASK))
705 goto do_fault_protect;
706 } else {
707 if ((env->cr[0] & CR0_WP_MASK) &&
708 is_write && !(ptep & PG_RW_MASK))
709 goto do_fault_protect;
711 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
712 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
713 pde |= PG_ACCESSED_MASK;
714 if (is_dirty)
715 pde |= PG_DIRTY_MASK;
716 stl_phys_notdirty(pde_addr, pde);
718 /* align to page_size */
719 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
720 virt_addr = addr & ~(page_size - 1);
721 } else {
722 /* 4 KB page */
723 if (!(pde & PG_ACCESSED_MASK)) {
724 pde |= PG_ACCESSED_MASK;
725 stl_phys_notdirty(pde_addr, pde);
727 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
728 env->a20_mask;
729 pte = ldq_phys(pte_addr);
730 if (!(pte & PG_PRESENT_MASK)) {
731 error_code = 0;
732 goto do_fault;
734 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
735 error_code = PG_ERROR_RSVD_MASK;
736 goto do_fault;
738 /* combine pde and pte nx, user and rw protections */
739 ptep &= pte ^ PG_NX_MASK;
740 ptep ^= PG_NX_MASK;
741 if ((ptep & PG_NX_MASK) && is_write1 == 2)
742 goto do_fault_protect;
743 if (is_user) {
744 if (!(ptep & PG_USER_MASK))
745 goto do_fault_protect;
746 if (is_write && !(ptep & PG_RW_MASK))
747 goto do_fault_protect;
748 } else {
749 if ((env->cr[0] & CR0_WP_MASK) &&
750 is_write && !(ptep & PG_RW_MASK))
751 goto do_fault_protect;
753 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
754 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
755 pte |= PG_ACCESSED_MASK;
756 if (is_dirty)
757 pte |= PG_DIRTY_MASK;
758 stl_phys_notdirty(pte_addr, pte);
760 page_size = 4096;
761 virt_addr = addr & ~0xfff;
762 pte = pte & (PHYS_ADDR_MASK | 0xfff);
764 } else {
765 uint32_t pde;
767 /* page directory entry */
768 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
769 env->a20_mask;
770 pde = ldl_phys(pde_addr);
771 if (!(pde & PG_PRESENT_MASK)) {
772 error_code = 0;
773 goto do_fault;
775 /* if PSE bit is set, then we use a 4MB page */
776 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
777 page_size = 4096 * 1024;
778 if (is_user) {
779 if (!(pde & PG_USER_MASK))
780 goto do_fault_protect;
781 if (is_write && !(pde & PG_RW_MASK))
782 goto do_fault_protect;
783 } else {
784 if ((env->cr[0] & CR0_WP_MASK) &&
785 is_write && !(pde & PG_RW_MASK))
786 goto do_fault_protect;
788 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
789 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
790 pde |= PG_ACCESSED_MASK;
791 if (is_dirty)
792 pde |= PG_DIRTY_MASK;
793 stl_phys_notdirty(pde_addr, pde);
796 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
797 ptep = pte;
798 virt_addr = addr & ~(page_size - 1);
799 } else {
800 if (!(pde & PG_ACCESSED_MASK)) {
801 pde |= PG_ACCESSED_MASK;
802 stl_phys_notdirty(pde_addr, pde);
805 /* page directory entry */
806 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
807 env->a20_mask;
808 pte = ldl_phys(pte_addr);
809 if (!(pte & PG_PRESENT_MASK)) {
810 error_code = 0;
811 goto do_fault;
813 /* combine pde and pte user and rw protections */
814 ptep = pte & pde;
815 if (is_user) {
816 if (!(ptep & PG_USER_MASK))
817 goto do_fault_protect;
818 if (is_write && !(ptep & PG_RW_MASK))
819 goto do_fault_protect;
820 } else {
821 if ((env->cr[0] & CR0_WP_MASK) &&
822 is_write && !(ptep & PG_RW_MASK))
823 goto do_fault_protect;
825 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
826 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
827 pte |= PG_ACCESSED_MASK;
828 if (is_dirty)
829 pte |= PG_DIRTY_MASK;
830 stl_phys_notdirty(pte_addr, pte);
832 page_size = 4096;
833 virt_addr = addr & ~0xfff;
836 /* the page can be put in the TLB */
837 prot = PAGE_READ;
838 if (!(ptep & PG_NX_MASK))
839 prot |= PAGE_EXEC;
840 if (pte & PG_DIRTY_MASK) {
841 /* only set write access if already dirty... otherwise wait
842 for dirty access */
843 if (is_user) {
844 if (ptep & PG_RW_MASK)
845 prot |= PAGE_WRITE;
846 } else {
847 if (!(env->cr[0] & CR0_WP_MASK) ||
848 (ptep & PG_RW_MASK))
849 prot |= PAGE_WRITE;
852 do_mapping:
853 pte = pte & env->a20_mask;
855 /* Even if 4MB pages, we map only one 4KB page in the cache to
856 avoid filling it too fast */
857 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
858 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
859 vaddr = virt_addr + page_offset;
861 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
862 return ret;
863 do_fault_protect:
864 error_code = PG_ERROR_P_MASK;
865 do_fault:
866 env->cr[2] = addr;
867 error_code |= (is_write << PG_ERROR_W_BIT);
868 if (is_user)
869 error_code |= PG_ERROR_U_MASK;
870 if (is_write1 == 2 &&
871 (env->efer & MSR_EFER_NXE) &&
872 (env->cr[4] & CR4_PAE_MASK))
873 error_code |= PG_ERROR_I_D_MASK;
874 env->error_code = error_code;
875 env->exception_index = EXCP0E_PAGE;
876 return 1;
879 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
881 uint32_t pde_addr, pte_addr;
882 uint32_t pde, pte, paddr, page_offset, page_size;
884 if (env->cr[4] & CR4_PAE_MASK) {
885 uint32_t pdpe_addr, pde_addr, pte_addr;
886 uint32_t pdpe;
888 /* XXX: we only use 32 bit physical addresses */
889 #ifdef TARGET_X86_64
890 if (env->hflags & HF_LMA_MASK) {
891 uint32_t pml4e_addr, pml4e;
892 int32_t sext;
894 /* test virtual address sign extension */
895 sext = (int64_t)addr >> 47;
896 if (sext != 0 && sext != -1)
897 return -1;
899 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
900 env->a20_mask;
901 pml4e = ldl_phys(pml4e_addr);
902 if (!(pml4e & PG_PRESENT_MASK))
903 return -1;
905 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
906 env->a20_mask;
907 pdpe = ldl_phys(pdpe_addr);
908 if (!(pdpe & PG_PRESENT_MASK))
909 return -1;
910 } else
911 #endif
913 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
914 env->a20_mask;
915 pdpe = ldl_phys(pdpe_addr);
916 if (!(pdpe & PG_PRESENT_MASK))
917 return -1;
920 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
921 env->a20_mask;
922 pde = ldl_phys(pde_addr);
923 if (!(pde & PG_PRESENT_MASK)) {
924 return -1;
926 if (pde & PG_PSE_MASK) {
927 /* 2 MB page */
928 page_size = 2048 * 1024;
929 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
930 } else {
931 /* 4 KB page */
932 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
933 env->a20_mask;
934 page_size = 4096;
935 pte = ldl_phys(pte_addr);
937 } else {
938 if (!(env->cr[0] & CR0_PG_MASK)) {
939 pte = addr;
940 page_size = 4096;
941 } else {
942 /* page directory entry */
943 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
944 pde = ldl_phys(pde_addr);
945 if (!(pde & PG_PRESENT_MASK))
946 return -1;
947 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
948 pte = pde & ~0x003ff000; /* align to 4MB */
949 page_size = 4096 * 1024;
950 } else {
951 /* page directory entry */
952 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
953 pte = ldl_phys(pte_addr);
954 if (!(pte & PG_PRESENT_MASK))
955 return -1;
956 page_size = 4096;
959 pte = pte & env->a20_mask;
962 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
963 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
964 return paddr;
966 #endif /* !CONFIG_USER_ONLY */
968 #if defined(USE_CODE_COPY)
969 struct fpstate {
970 uint16_t fpuc;
971 uint16_t dummy1;
972 uint16_t fpus;
973 uint16_t dummy2;
974 uint16_t fptag;
975 uint16_t dummy3;
977 uint32_t fpip;
978 uint32_t fpcs;
979 uint32_t fpoo;
980 uint32_t fpos;
981 uint8_t fpregs1[8 * 10];
984 void restore_native_fp_state(CPUState *env)
986 int fptag, i, j;
987 struct fpstate fp1, *fp = &fp1;
989 fp->fpuc = env->fpuc;
990 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
991 fptag = 0;
992 for (i=7; i>=0; i--) {
993 fptag <<= 2;
994 if (env->fptags[i]) {
995 fptag |= 3;
996 } else {
997 /* the FPU automatically computes it */
1000 fp->fptag = fptag;
1001 j = env->fpstt;
1002 for(i = 0;i < 8; i++) {
1003 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1004 j = (j + 1) & 7;
1006 asm volatile ("frstor %0" : "=m" (*fp));
1007 env->native_fp_regs = 1;
1010 void save_native_fp_state(CPUState *env)
1012 int fptag, i, j;
1013 uint16_t fpuc;
1014 struct fpstate fp1, *fp = &fp1;
1016 asm volatile ("fsave %0" : : "m" (*fp));
1017 env->fpuc = fp->fpuc;
1018 env->fpstt = (fp->fpus >> 11) & 7;
1019 env->fpus = fp->fpus & ~0x3800;
1020 fptag = fp->fptag;
1021 for(i = 0;i < 8; i++) {
1022 env->fptags[i] = ((fptag & 3) == 3);
1023 fptag >>= 2;
1025 j = env->fpstt;
1026 for(i = 0;i < 8; i++) {
1027 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1028 j = (j + 1) & 7;
1030 /* we must restore the default rounding state */
1031 /* XXX: we do not restore the exception state */
1032 fpuc = 0x037f | (env->fpuc & (3 << 10));
1033 asm volatile("fldcw %0" : : "m" (fpuc));
1034 env->native_fp_regs = 0;
1036 #endif