Fix typo in comment, by Andreas Faerber.
[qemu/dscho.git] / target-i386 / helper2.c
blob8ce5425830732a6d7ffc0015930d7fe258383d4d
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
31 //#define DEBUG_MMU
33 #ifdef USE_CODE_COPY
34 #include <asm/ldt.h>
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 int modify_ldt(int func, void *ptr, unsigned long bytecount)
40 return syscall(__NR_modify_ldt, func, ptr, bytecount);
43 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
44 #define modify_ldt_ldt_s user_desc
45 #endif
46 #endif /* USE_CODE_COPY */
48 CPUX86State *cpu_x86_init(void)
50 CPUX86State *env;
51 static int inited;
53 env = qemu_mallocz(sizeof(CPUX86State));
54 if (!env)
55 return NULL;
56 cpu_exec_init(env);
58 /* init various static tables */
59 if (!inited) {
60 inited = 1;
61 optimize_flags_init();
63 #ifdef USE_CODE_COPY
64 /* testing code for code copy case */
66 struct modify_ldt_ldt_s ldt;
68 ldt.entry_number = 1;
69 ldt.base_addr = (unsigned long)env;
70 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
71 ldt.seg_32bit = 1;
72 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
73 ldt.read_exec_only = 0;
74 ldt.limit_in_pages = 1;
75 ldt.seg_not_present = 0;
76 ldt.useable = 1;
77 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
79 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
81 #endif
83 int family, model, stepping;
84 #ifdef TARGET_X86_64
85 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
86 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
87 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
88 family = 6;
89 model = 2;
90 stepping = 3;
91 #else
92 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
93 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
94 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
95 #if 0
96 /* pentium 75-200 */
97 family = 5;
98 model = 2;
99 stepping = 11;
100 #else
101 /* pentium pro */
102 family = 6;
103 model = 3;
104 stepping = 3;
105 #endif
106 #endif
107 env->cpuid_level = 2;
108 env->cpuid_version = (family << 8) | (model << 4) | stepping;
109 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
110 CPUID_TSC | CPUID_MSR | CPUID_MCE |
111 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
112 CPUID_PAT);
113 env->pat = 0x0007040600070406ULL;
114 env->cpuid_ext_features = CPUID_EXT_SSE3;
115 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
116 env->cpuid_features |= CPUID_APIC;
117 env->cpuid_xlevel = 0;
119 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
120 int c, len, i;
121 len = strlen(model_id);
122 for(i = 0; i < 48; i++) {
123 if (i >= len)
124 c = '\0';
125 else
126 c = model_id[i];
127 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
130 #ifdef TARGET_X86_64
131 /* currently not enabled for std i386 because not fully tested */
132 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
133 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
134 env->cpuid_xlevel = 0x80000008;
136 /* these features are needed for Win64 and aren't fully implemented */
137 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
138 /* this feature is needed for Solaris and isn't fully implemented */
139 env->cpuid_features |= CPUID_PSE36;
140 #endif
142 cpu_reset(env);
143 #ifdef USE_KQEMU
144 kqemu_init(env);
145 #endif
146 return env;
149 /* NOTE: must be called outside the CPU execute loop */
150 void cpu_reset(CPUX86State *env)
152 int i;
154 memset(env, 0, offsetof(CPUX86State, breakpoints));
156 tlb_flush(env, 1);
158 env->old_exception = -1;
160 /* init to reset state */
162 #ifdef CONFIG_SOFTMMU
163 env->hflags |= HF_SOFTMMU_MASK;
164 #endif
166 cpu_x86_update_cr0(env, 0x60000010);
167 env->a20_mask = 0xffffffff;
168 env->smbase = 0x30000;
170 env->idt.limit = 0xffff;
171 env->gdt.limit = 0xffff;
172 env->ldt.limit = 0xffff;
173 env->ldt.flags = DESC_P_MASK;
174 env->tr.limit = 0xffff;
175 env->tr.flags = DESC_P_MASK;
177 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
178 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
179 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
180 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
181 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
182 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
184 env->eip = 0xfff0;
185 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
187 env->eflags = 0x2;
189 /* FPU init */
190 for(i = 0;i < 8; i++)
191 env->fptags[i] = 1;
192 env->fpuc = 0x37f;
194 env->mxcsr = 0x1f80;
197 void cpu_x86_close(CPUX86State *env)
199 free(env);
202 /***********************************************************/
203 /* x86 debug */
205 static const char *cc_op_str[] = {
206 "DYNAMIC",
207 "EFLAGS",
209 "MULB",
210 "MULW",
211 "MULL",
212 "MULQ",
214 "ADDB",
215 "ADDW",
216 "ADDL",
217 "ADDQ",
219 "ADCB",
220 "ADCW",
221 "ADCL",
222 "ADCQ",
224 "SUBB",
225 "SUBW",
226 "SUBL",
227 "SUBQ",
229 "SBBB",
230 "SBBW",
231 "SBBL",
232 "SBBQ",
234 "LOGICB",
235 "LOGICW",
236 "LOGICL",
237 "LOGICQ",
239 "INCB",
240 "INCW",
241 "INCL",
242 "INCQ",
244 "DECB",
245 "DECW",
246 "DECL",
247 "DECQ",
249 "SHLB",
250 "SHLW",
251 "SHLL",
252 "SHLQ",
254 "SARB",
255 "SARW",
256 "SARL",
257 "SARQ",
260 void cpu_dump_state(CPUState *env, FILE *f,
261 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
262 int flags)
264 int eflags, i, nb;
265 char cc_op_name[32];
266 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
268 eflags = env->eflags;
269 #ifdef TARGET_X86_64
270 if (env->hflags & HF_CS64_MASK) {
271 cpu_fprintf(f,
272 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
273 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
274 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
275 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
276 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
277 env->regs[R_EAX],
278 env->regs[R_EBX],
279 env->regs[R_ECX],
280 env->regs[R_EDX],
281 env->regs[R_ESI],
282 env->regs[R_EDI],
283 env->regs[R_EBP],
284 env->regs[R_ESP],
285 env->regs[8],
286 env->regs[9],
287 env->regs[10],
288 env->regs[11],
289 env->regs[12],
290 env->regs[13],
291 env->regs[14],
292 env->regs[15],
293 env->eip, eflags,
294 eflags & DF_MASK ? 'D' : '-',
295 eflags & CC_O ? 'O' : '-',
296 eflags & CC_S ? 'S' : '-',
297 eflags & CC_Z ? 'Z' : '-',
298 eflags & CC_A ? 'A' : '-',
299 eflags & CC_P ? 'P' : '-',
300 eflags & CC_C ? 'C' : '-',
301 env->hflags & HF_CPL_MASK,
302 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
303 (env->a20_mask >> 20) & 1,
304 (env->hflags >> HF_SMM_SHIFT) & 1,
305 (env->hflags >> HF_HALTED_SHIFT) & 1);
306 } else
307 #endif
309 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
310 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
311 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
312 (uint32_t)env->regs[R_EAX],
313 (uint32_t)env->regs[R_EBX],
314 (uint32_t)env->regs[R_ECX],
315 (uint32_t)env->regs[R_EDX],
316 (uint32_t)env->regs[R_ESI],
317 (uint32_t)env->regs[R_EDI],
318 (uint32_t)env->regs[R_EBP],
319 (uint32_t)env->regs[R_ESP],
320 (uint32_t)env->eip, eflags,
321 eflags & DF_MASK ? 'D' : '-',
322 eflags & CC_O ? 'O' : '-',
323 eflags & CC_S ? 'S' : '-',
324 eflags & CC_Z ? 'Z' : '-',
325 eflags & CC_A ? 'A' : '-',
326 eflags & CC_P ? 'P' : '-',
327 eflags & CC_C ? 'C' : '-',
328 env->hflags & HF_CPL_MASK,
329 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
330 (env->a20_mask >> 20) & 1,
331 (env->hflags >> HF_SMM_SHIFT) & 1,
332 (env->hflags >> HF_HALTED_SHIFT) & 1);
335 #ifdef TARGET_X86_64
336 if (env->hflags & HF_LMA_MASK) {
337 for(i = 0; i < 6; i++) {
338 SegmentCache *sc = &env->segs[i];
339 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
340 seg_name[i],
341 sc->selector,
342 sc->base,
343 sc->limit,
344 sc->flags);
346 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
347 env->ldt.selector,
348 env->ldt.base,
349 env->ldt.limit,
350 env->ldt.flags);
351 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
352 env->tr.selector,
353 env->tr.base,
354 env->tr.limit,
355 env->tr.flags);
356 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
357 env->gdt.base, env->gdt.limit);
358 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
359 env->idt.base, env->idt.limit);
360 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
361 (uint32_t)env->cr[0],
362 env->cr[2],
363 env->cr[3],
364 (uint32_t)env->cr[4]);
365 } else
366 #endif
368 for(i = 0; i < 6; i++) {
369 SegmentCache *sc = &env->segs[i];
370 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
371 seg_name[i],
372 sc->selector,
373 (uint32_t)sc->base,
374 sc->limit,
375 sc->flags);
377 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
378 env->ldt.selector,
379 (uint32_t)env->ldt.base,
380 env->ldt.limit,
381 env->ldt.flags);
382 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
383 env->tr.selector,
384 (uint32_t)env->tr.base,
385 env->tr.limit,
386 env->tr.flags);
387 cpu_fprintf(f, "GDT= %08x %08x\n",
388 (uint32_t)env->gdt.base, env->gdt.limit);
389 cpu_fprintf(f, "IDT= %08x %08x\n",
390 (uint32_t)env->idt.base, env->idt.limit);
391 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
392 (uint32_t)env->cr[0],
393 (uint32_t)env->cr[2],
394 (uint32_t)env->cr[3],
395 (uint32_t)env->cr[4]);
397 if (flags & X86_DUMP_CCOP) {
398 if ((unsigned)env->cc_op < CC_OP_NB)
399 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
400 else
401 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
402 #ifdef TARGET_X86_64
403 if (env->hflags & HF_CS64_MASK) {
404 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
405 env->cc_src, env->cc_dst,
406 cc_op_name);
407 } else
408 #endif
410 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
411 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
412 cc_op_name);
415 if (flags & X86_DUMP_FPU) {
416 int fptag;
417 fptag = 0;
418 for(i = 0; i < 8; i++) {
419 fptag |= ((!env->fptags[i]) << i);
421 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
422 env->fpuc,
423 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
424 env->fpstt,
425 fptag,
426 env->mxcsr);
427 for(i=0;i<8;i++) {
428 #if defined(USE_X86LDOUBLE)
429 union {
430 long double d;
431 struct {
432 uint64_t lower;
433 uint16_t upper;
434 } l;
435 } tmp;
436 tmp.d = env->fpregs[i].d;
437 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
438 i, tmp.l.lower, tmp.l.upper);
439 #else
440 cpu_fprintf(f, "FPR%d=%016" PRIx64,
441 i, env->fpregs[i].mmx.q);
442 #endif
443 if ((i & 1) == 1)
444 cpu_fprintf(f, "\n");
445 else
446 cpu_fprintf(f, " ");
448 if (env->hflags & HF_CS64_MASK)
449 nb = 16;
450 else
451 nb = 8;
452 for(i=0;i<nb;i++) {
453 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
455 env->xmm_regs[i].XMM_L(3),
456 env->xmm_regs[i].XMM_L(2),
457 env->xmm_regs[i].XMM_L(1),
458 env->xmm_regs[i].XMM_L(0));
459 if ((i & 1) == 1)
460 cpu_fprintf(f, "\n");
461 else
462 cpu_fprintf(f, " ");
467 /***********************************************************/
468 /* x86 mmu */
469 /* XXX: add PGE support */
471 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
473 a20_state = (a20_state != 0);
474 if (a20_state != ((env->a20_mask >> 20) & 1)) {
475 #if defined(DEBUG_MMU)
476 printf("A20 update: a20=%d\n", a20_state);
477 #endif
478 /* if the cpu is currently executing code, we must unlink it and
479 all the potentially executing TB */
480 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
482 /* when a20 is changed, all the MMU mappings are invalid, so
483 we must flush everything */
484 tlb_flush(env, 1);
485 env->a20_mask = 0xffefffff | (a20_state << 20);
489 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
491 int pe_state;
493 #if defined(DEBUG_MMU)
494 printf("CR0 update: CR0=0x%08x\n", new_cr0);
495 #endif
496 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
497 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
498 tlb_flush(env, 1);
501 #ifdef TARGET_X86_64
502 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
503 (env->efer & MSR_EFER_LME)) {
504 /* enter in long mode */
505 /* XXX: generate an exception */
506 if (!(env->cr[4] & CR4_PAE_MASK))
507 return;
508 env->efer |= MSR_EFER_LMA;
509 env->hflags |= HF_LMA_MASK;
510 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
511 (env->efer & MSR_EFER_LMA)) {
512 /* exit long mode */
513 env->efer &= ~MSR_EFER_LMA;
514 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
515 env->eip &= 0xffffffff;
517 #endif
518 env->cr[0] = new_cr0 | CR0_ET_MASK;
520 /* update PE flag in hidden flags */
521 pe_state = (env->cr[0] & CR0_PE_MASK);
522 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
523 /* ensure that ADDSEG is always set in real mode */
524 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
525 /* update FPU flags */
526 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
527 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
530 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
531 the PDPT */
532 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
534 env->cr[3] = new_cr3;
535 if (env->cr[0] & CR0_PG_MASK) {
536 #if defined(DEBUG_MMU)
537 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
538 #endif
539 tlb_flush(env, 0);
543 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
545 #if defined(DEBUG_MMU)
546 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
547 #endif
548 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
549 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
550 tlb_flush(env, 1);
552 /* SSE handling */
553 if (!(env->cpuid_features & CPUID_SSE))
554 new_cr4 &= ~CR4_OSFXSR_MASK;
555 if (new_cr4 & CR4_OSFXSR_MASK)
556 env->hflags |= HF_OSFXSR_MASK;
557 else
558 env->hflags &= ~HF_OSFXSR_MASK;
560 env->cr[4] = new_cr4;
563 /* XXX: also flush 4MB pages */
564 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
566 tlb_flush_page(env, addr);
569 #if defined(CONFIG_USER_ONLY)
571 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
572 int is_write, int is_user, int is_softmmu)
574 /* user mode only emulation */
575 is_write &= 1;
576 env->cr[2] = addr;
577 env->error_code = (is_write << PG_ERROR_W_BIT);
578 env->error_code |= PG_ERROR_U_MASK;
579 env->exception_index = EXCP0E_PAGE;
580 return 1;
583 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
585 return addr;
588 #else
590 #define PHYS_ADDR_MASK 0xfffff000
592 /* return value:
593 -1 = cannot handle fault
594 0 = nothing more to do
595 1 = generate PF fault
596 2 = soft MMU activation required for this block
598 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
599 int is_write1, int is_user, int is_softmmu)
601 uint64_t ptep, pte;
602 uint32_t pdpe_addr, pde_addr, pte_addr;
603 int error_code, is_dirty, prot, page_size, ret, is_write;
604 unsigned long paddr, page_offset;
605 target_ulong vaddr, virt_addr;
607 #if defined(DEBUG_MMU)
608 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
609 addr, is_write1, is_user, env->eip);
610 #endif
611 is_write = is_write1 & 1;
613 if (!(env->cr[0] & CR0_PG_MASK)) {
614 pte = addr;
615 virt_addr = addr & TARGET_PAGE_MASK;
616 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
617 page_size = 4096;
618 goto do_mapping;
621 if (env->cr[4] & CR4_PAE_MASK) {
622 uint64_t pde, pdpe;
624 /* XXX: we only use 32 bit physical addresses */
625 #ifdef TARGET_X86_64
626 if (env->hflags & HF_LMA_MASK) {
627 uint32_t pml4e_addr;
628 uint64_t pml4e;
629 int32_t sext;
631 /* test virtual address sign extension */
632 sext = (int64_t)addr >> 47;
633 if (sext != 0 && sext != -1) {
634 env->error_code = 0;
635 env->exception_index = EXCP0D_GPF;
636 return 1;
639 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
640 env->a20_mask;
641 pml4e = ldq_phys(pml4e_addr);
642 if (!(pml4e & PG_PRESENT_MASK)) {
643 error_code = 0;
644 goto do_fault;
646 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
647 error_code = PG_ERROR_RSVD_MASK;
648 goto do_fault;
650 if (!(pml4e & PG_ACCESSED_MASK)) {
651 pml4e |= PG_ACCESSED_MASK;
652 stl_phys_notdirty(pml4e_addr, pml4e);
654 ptep = pml4e ^ PG_NX_MASK;
655 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
656 env->a20_mask;
657 pdpe = ldq_phys(pdpe_addr);
658 if (!(pdpe & PG_PRESENT_MASK)) {
659 error_code = 0;
660 goto do_fault;
662 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
663 error_code = PG_ERROR_RSVD_MASK;
664 goto do_fault;
666 ptep &= pdpe ^ PG_NX_MASK;
667 if (!(pdpe & PG_ACCESSED_MASK)) {
668 pdpe |= PG_ACCESSED_MASK;
669 stl_phys_notdirty(pdpe_addr, pdpe);
671 } else
672 #endif
674 /* XXX: load them when cr3 is loaded ? */
675 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
676 env->a20_mask;
677 pdpe = ldq_phys(pdpe_addr);
678 if (!(pdpe & PG_PRESENT_MASK)) {
679 error_code = 0;
680 goto do_fault;
682 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
685 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
686 env->a20_mask;
687 pde = ldq_phys(pde_addr);
688 if (!(pde & PG_PRESENT_MASK)) {
689 error_code = 0;
690 goto do_fault;
692 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
693 error_code = PG_ERROR_RSVD_MASK;
694 goto do_fault;
696 ptep &= pde ^ PG_NX_MASK;
697 if (pde & PG_PSE_MASK) {
698 /* 2 MB page */
699 page_size = 2048 * 1024;
700 ptep ^= PG_NX_MASK;
701 if ((ptep & PG_NX_MASK) && is_write1 == 2)
702 goto do_fault_protect;
703 if (is_user) {
704 if (!(ptep & PG_USER_MASK))
705 goto do_fault_protect;
706 if (is_write && !(ptep & PG_RW_MASK))
707 goto do_fault_protect;
708 } else {
709 if ((env->cr[0] & CR0_WP_MASK) &&
710 is_write && !(ptep & PG_RW_MASK))
711 goto do_fault_protect;
713 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
714 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
715 pde |= PG_ACCESSED_MASK;
716 if (is_dirty)
717 pde |= PG_DIRTY_MASK;
718 stl_phys_notdirty(pde_addr, pde);
720 /* align to page_size */
721 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
722 virt_addr = addr & ~(page_size - 1);
723 } else {
724 /* 4 KB page */
725 if (!(pde & PG_ACCESSED_MASK)) {
726 pde |= PG_ACCESSED_MASK;
727 stl_phys_notdirty(pde_addr, pde);
729 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
730 env->a20_mask;
731 pte = ldq_phys(pte_addr);
732 if (!(pte & PG_PRESENT_MASK)) {
733 error_code = 0;
734 goto do_fault;
736 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
737 error_code = PG_ERROR_RSVD_MASK;
738 goto do_fault;
740 /* combine pde and pte nx, user and rw protections */
741 ptep &= pte ^ PG_NX_MASK;
742 ptep ^= PG_NX_MASK;
743 if ((ptep & PG_NX_MASK) && is_write1 == 2)
744 goto do_fault_protect;
745 if (is_user) {
746 if (!(ptep & PG_USER_MASK))
747 goto do_fault_protect;
748 if (is_write && !(ptep & PG_RW_MASK))
749 goto do_fault_protect;
750 } else {
751 if ((env->cr[0] & CR0_WP_MASK) &&
752 is_write && !(ptep & PG_RW_MASK))
753 goto do_fault_protect;
755 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
756 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
757 pte |= PG_ACCESSED_MASK;
758 if (is_dirty)
759 pte |= PG_DIRTY_MASK;
760 stl_phys_notdirty(pte_addr, pte);
762 page_size = 4096;
763 virt_addr = addr & ~0xfff;
764 pte = pte & (PHYS_ADDR_MASK | 0xfff);
766 } else {
767 uint32_t pde;
769 /* page directory entry */
770 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
771 env->a20_mask;
772 pde = ldl_phys(pde_addr);
773 if (!(pde & PG_PRESENT_MASK)) {
774 error_code = 0;
775 goto do_fault;
777 /* if PSE bit is set, then we use a 4MB page */
778 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
779 page_size = 4096 * 1024;
780 if (is_user) {
781 if (!(pde & PG_USER_MASK))
782 goto do_fault_protect;
783 if (is_write && !(pde & PG_RW_MASK))
784 goto do_fault_protect;
785 } else {
786 if ((env->cr[0] & CR0_WP_MASK) &&
787 is_write && !(pde & PG_RW_MASK))
788 goto do_fault_protect;
790 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
791 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
792 pde |= PG_ACCESSED_MASK;
793 if (is_dirty)
794 pde |= PG_DIRTY_MASK;
795 stl_phys_notdirty(pde_addr, pde);
798 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
799 ptep = pte;
800 virt_addr = addr & ~(page_size - 1);
801 } else {
802 if (!(pde & PG_ACCESSED_MASK)) {
803 pde |= PG_ACCESSED_MASK;
804 stl_phys_notdirty(pde_addr, pde);
807 /* page directory entry */
808 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
809 env->a20_mask;
810 pte = ldl_phys(pte_addr);
811 if (!(pte & PG_PRESENT_MASK)) {
812 error_code = 0;
813 goto do_fault;
815 /* combine pde and pte user and rw protections */
816 ptep = pte & pde;
817 if (is_user) {
818 if (!(ptep & PG_USER_MASK))
819 goto do_fault_protect;
820 if (is_write && !(ptep & PG_RW_MASK))
821 goto do_fault_protect;
822 } else {
823 if ((env->cr[0] & CR0_WP_MASK) &&
824 is_write && !(ptep & PG_RW_MASK))
825 goto do_fault_protect;
827 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
828 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
829 pte |= PG_ACCESSED_MASK;
830 if (is_dirty)
831 pte |= PG_DIRTY_MASK;
832 stl_phys_notdirty(pte_addr, pte);
834 page_size = 4096;
835 virt_addr = addr & ~0xfff;
838 /* the page can be put in the TLB */
839 prot = PAGE_READ;
840 if (!(ptep & PG_NX_MASK))
841 prot |= PAGE_EXEC;
842 if (pte & PG_DIRTY_MASK) {
843 /* only set write access if already dirty... otherwise wait
844 for dirty access */
845 if (is_user) {
846 if (ptep & PG_RW_MASK)
847 prot |= PAGE_WRITE;
848 } else {
849 if (!(env->cr[0] & CR0_WP_MASK) ||
850 (ptep & PG_RW_MASK))
851 prot |= PAGE_WRITE;
854 do_mapping:
855 pte = pte & env->a20_mask;
857 /* Even if 4MB pages, we map only one 4KB page in the cache to
858 avoid filling it too fast */
859 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
860 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
861 vaddr = virt_addr + page_offset;
863 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
864 return ret;
865 do_fault_protect:
866 error_code = PG_ERROR_P_MASK;
867 do_fault:
868 env->cr[2] = addr;
869 error_code |= (is_write << PG_ERROR_W_BIT);
870 if (is_user)
871 error_code |= PG_ERROR_U_MASK;
872 if (is_write1 == 2 &&
873 (env->efer & MSR_EFER_NXE) &&
874 (env->cr[4] & CR4_PAE_MASK))
875 error_code |= PG_ERROR_I_D_MASK;
876 env->error_code = error_code;
877 env->exception_index = EXCP0E_PAGE;
878 return 1;
881 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
883 uint32_t pde_addr, pte_addr;
884 uint32_t pde, pte, paddr, page_offset, page_size;
886 if (env->cr[4] & CR4_PAE_MASK) {
887 uint32_t pdpe_addr, pde_addr, pte_addr;
888 uint32_t pdpe;
890 /* XXX: we only use 32 bit physical addresses */
891 #ifdef TARGET_X86_64
892 if (env->hflags & HF_LMA_MASK) {
893 uint32_t pml4e_addr, pml4e;
894 int32_t sext;
896 /* test virtual address sign extension */
897 sext = (int64_t)addr >> 47;
898 if (sext != 0 && sext != -1)
899 return -1;
901 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
902 env->a20_mask;
903 pml4e = ldl_phys(pml4e_addr);
904 if (!(pml4e & PG_PRESENT_MASK))
905 return -1;
907 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
908 env->a20_mask;
909 pdpe = ldl_phys(pdpe_addr);
910 if (!(pdpe & PG_PRESENT_MASK))
911 return -1;
912 } else
913 #endif
915 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
916 env->a20_mask;
917 pdpe = ldl_phys(pdpe_addr);
918 if (!(pdpe & PG_PRESENT_MASK))
919 return -1;
922 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
923 env->a20_mask;
924 pde = ldl_phys(pde_addr);
925 if (!(pde & PG_PRESENT_MASK)) {
926 return -1;
928 if (pde & PG_PSE_MASK) {
929 /* 2 MB page */
930 page_size = 2048 * 1024;
931 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
932 } else {
933 /* 4 KB page */
934 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
935 env->a20_mask;
936 page_size = 4096;
937 pte = ldl_phys(pte_addr);
939 } else {
940 if (!(env->cr[0] & CR0_PG_MASK)) {
941 pte = addr;
942 page_size = 4096;
943 } else {
944 /* page directory entry */
945 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
946 pde = ldl_phys(pde_addr);
947 if (!(pde & PG_PRESENT_MASK))
948 return -1;
949 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
950 pte = pde & ~0x003ff000; /* align to 4MB */
951 page_size = 4096 * 1024;
952 } else {
953 /* page directory entry */
954 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
955 pte = ldl_phys(pte_addr);
956 if (!(pte & PG_PRESENT_MASK))
957 return -1;
958 page_size = 4096;
961 pte = pte & env->a20_mask;
964 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
965 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
966 return paddr;
968 #endif /* !CONFIG_USER_ONLY */
970 #if defined(USE_CODE_COPY)
971 struct fpstate {
972 uint16_t fpuc;
973 uint16_t dummy1;
974 uint16_t fpus;
975 uint16_t dummy2;
976 uint16_t fptag;
977 uint16_t dummy3;
979 uint32_t fpip;
980 uint32_t fpcs;
981 uint32_t fpoo;
982 uint32_t fpos;
983 uint8_t fpregs1[8 * 10];
986 void restore_native_fp_state(CPUState *env)
988 int fptag, i, j;
989 struct fpstate fp1, *fp = &fp1;
991 fp->fpuc = env->fpuc;
992 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
993 fptag = 0;
994 for (i=7; i>=0; i--) {
995 fptag <<= 2;
996 if (env->fptags[i]) {
997 fptag |= 3;
998 } else {
999 /* the FPU automatically computes it */
1002 fp->fptag = fptag;
1003 j = env->fpstt;
1004 for(i = 0;i < 8; i++) {
1005 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1006 j = (j + 1) & 7;
1008 asm volatile ("frstor %0" : "=m" (*fp));
1009 env->native_fp_regs = 1;
1012 void save_native_fp_state(CPUState *env)
1014 int fptag, i, j;
1015 uint16_t fpuc;
1016 struct fpstate fp1, *fp = &fp1;
1018 asm volatile ("fsave %0" : : "m" (*fp));
1019 env->fpuc = fp->fpuc;
1020 env->fpstt = (fp->fpus >> 11) & 7;
1021 env->fpus = fp->fpus & ~0x3800;
1022 fptag = fp->fptag;
1023 for(i = 0;i < 8; i++) {
1024 env->fptags[i] = ((fptag & 3) == 3);
1025 fptag >>= 2;
1027 j = env->fpstt;
1028 for(i = 0;i < 8; i++) {
1029 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1030 j = (j + 1) & 7;
1032 /* we must restore the default rounding state */
1033 /* XXX: we do not restore the exception state */
1034 fpuc = 0x037f | (env->fpuc & (3 << 10));
1035 asm volatile("fldcw %0" : : "m" (fpuc));
1036 env->native_fp_regs = 0;
1038 #endif