PCI SCSI HBA emulation.
[qemu/mini2440.git] / target-i386 / helper2.c
blob9d5d9b564e7be6859c493e4c92161009ed276a46
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
31 //#define DEBUG_MMU
33 #ifdef USE_CODE_COPY
34 #include <asm/ldt.h>
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41 #define modify_ldt_ldt_s user_desc
42 #endif
43 #endif /* USE_CODE_COPY */
45 CPUX86State *cpu_x86_init(void)
47 CPUX86State *env;
48 static int inited;
50 env = qemu_mallocz(sizeof(CPUX86State));
51 if (!env)
52 return NULL;
53 cpu_exec_init(env);
55 /* init various static tables */
56 if (!inited) {
57 inited = 1;
58 optimize_flags_init();
60 #ifdef USE_CODE_COPY
61 /* testing code for code copy case */
63 struct modify_ldt_ldt_s ldt;
65 ldt.entry_number = 1;
66 ldt.base_addr = (unsigned long)env;
67 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
68 ldt.seg_32bit = 1;
69 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
70 ldt.read_exec_only = 0;
71 ldt.limit_in_pages = 1;
72 ldt.seg_not_present = 0;
73 ldt.useable = 1;
74 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
76 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
78 #endif
80 int family, model, stepping;
81 #ifdef TARGET_X86_64
82 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
83 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
84 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
85 family = 6;
86 model = 2;
87 stepping = 3;
88 #else
89 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
90 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
91 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
92 #if 0
93 /* pentium 75-200 */
94 family = 5;
95 model = 2;
96 stepping = 11;
97 #else
98 /* pentium pro */
99 family = 6;
100 model = 3;
101 stepping = 3;
102 #endif
103 #endif
104 env->cpuid_level = 2;
105 env->cpuid_version = (family << 8) | (model << 4) | stepping;
106 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
107 CPUID_TSC | CPUID_MSR | CPUID_MCE |
108 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
109 CPUID_PAT);
110 env->pat = 0x0007040600070406ULL;
111 env->cpuid_ext_features = CPUID_EXT_SSE3;
112 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
113 env->cpuid_features |= CPUID_APIC;
114 env->cpuid_xlevel = 0;
116 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
117 int c, len, i;
118 len = strlen(model_id);
119 for(i = 0; i < 48; i++) {
120 if (i >= len)
121 c = '\0';
122 else
123 c = model_id[i];
124 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
127 #ifdef TARGET_X86_64
128 /* currently not enabled for std i386 because not fully tested */
129 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
130 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
131 env->cpuid_xlevel = 0x80000008;
133 /* these features are needed for Win64 and aren't fully implemented */
134 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
135 #endif
137 cpu_reset(env);
138 #ifdef USE_KQEMU
139 kqemu_init(env);
140 #endif
141 return env;
144 /* NOTE: must be called outside the CPU execute loop */
145 void cpu_reset(CPUX86State *env)
147 int i;
149 memset(env, 0, offsetof(CPUX86State, breakpoints));
151 tlb_flush(env, 1);
153 /* init to reset state */
155 #ifdef CONFIG_SOFTMMU
156 env->hflags |= HF_SOFTMMU_MASK;
157 #endif
159 cpu_x86_update_cr0(env, 0x60000010);
160 env->a20_mask = 0xffffffff;
162 env->idt.limit = 0xffff;
163 env->gdt.limit = 0xffff;
164 env->ldt.limit = 0xffff;
165 env->ldt.flags = DESC_P_MASK;
166 env->tr.limit = 0xffff;
167 env->tr.flags = DESC_P_MASK;
169 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
170 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
171 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
172 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
173 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
174 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
176 env->eip = 0xfff0;
177 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
179 env->eflags = 0x2;
181 /* FPU init */
182 for(i = 0;i < 8; i++)
183 env->fptags[i] = 1;
184 env->fpuc = 0x37f;
186 env->mxcsr = 0x1f80;
189 void cpu_x86_close(CPUX86State *env)
191 free(env);
194 /***********************************************************/
195 /* x86 debug */
197 static const char *cc_op_str[] = {
198 "DYNAMIC",
199 "EFLAGS",
201 "MULB",
202 "MULW",
203 "MULL",
204 "MULQ",
206 "ADDB",
207 "ADDW",
208 "ADDL",
209 "ADDQ",
211 "ADCB",
212 "ADCW",
213 "ADCL",
214 "ADCQ",
216 "SUBB",
217 "SUBW",
218 "SUBL",
219 "SUBQ",
221 "SBBB",
222 "SBBW",
223 "SBBL",
224 "SBBQ",
226 "LOGICB",
227 "LOGICW",
228 "LOGICL",
229 "LOGICQ",
231 "INCB",
232 "INCW",
233 "INCL",
234 "INCQ",
236 "DECB",
237 "DECW",
238 "DECL",
239 "DECQ",
241 "SHLB",
242 "SHLW",
243 "SHLL",
244 "SHLQ",
246 "SARB",
247 "SARW",
248 "SARL",
249 "SARQ",
252 void cpu_dump_state(CPUState *env, FILE *f,
253 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
254 int flags)
256 int eflags, i, nb;
257 char cc_op_name[32];
258 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
260 eflags = env->eflags;
261 #ifdef TARGET_X86_64
262 if (env->hflags & HF_CS64_MASK) {
263 cpu_fprintf(f,
264 "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
265 "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
266 "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
267 "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
268 "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n",
269 env->regs[R_EAX],
270 env->regs[R_EBX],
271 env->regs[R_ECX],
272 env->regs[R_EDX],
273 env->regs[R_ESI],
274 env->regs[R_EDI],
275 env->regs[R_EBP],
276 env->regs[R_ESP],
277 env->regs[8],
278 env->regs[9],
279 env->regs[10],
280 env->regs[11],
281 env->regs[12],
282 env->regs[13],
283 env->regs[14],
284 env->regs[15],
285 env->eip, eflags,
286 eflags & DF_MASK ? 'D' : '-',
287 eflags & CC_O ? 'O' : '-',
288 eflags & CC_S ? 'S' : '-',
289 eflags & CC_Z ? 'Z' : '-',
290 eflags & CC_A ? 'A' : '-',
291 eflags & CC_P ? 'P' : '-',
292 eflags & CC_C ? 'C' : '-',
293 env->hflags & HF_CPL_MASK,
294 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
295 (env->a20_mask >> 20) & 1,
296 (env->hflags >> HF_HALTED_SHIFT) & 1);
297 } else
298 #endif
300 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
301 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
302 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n",
303 (uint32_t)env->regs[R_EAX],
304 (uint32_t)env->regs[R_EBX],
305 (uint32_t)env->regs[R_ECX],
306 (uint32_t)env->regs[R_EDX],
307 (uint32_t)env->regs[R_ESI],
308 (uint32_t)env->regs[R_EDI],
309 (uint32_t)env->regs[R_EBP],
310 (uint32_t)env->regs[R_ESP],
311 (uint32_t)env->eip, eflags,
312 eflags & DF_MASK ? 'D' : '-',
313 eflags & CC_O ? 'O' : '-',
314 eflags & CC_S ? 'S' : '-',
315 eflags & CC_Z ? 'Z' : '-',
316 eflags & CC_A ? 'A' : '-',
317 eflags & CC_P ? 'P' : '-',
318 eflags & CC_C ? 'C' : '-',
319 env->hflags & HF_CPL_MASK,
320 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
321 (env->a20_mask >> 20) & 1,
322 (env->hflags >> HF_HALTED_SHIFT) & 1);
325 #ifdef TARGET_X86_64
326 if (env->hflags & HF_LMA_MASK) {
327 for(i = 0; i < 6; i++) {
328 SegmentCache *sc = &env->segs[i];
329 cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
330 seg_name[i],
331 sc->selector,
332 sc->base,
333 sc->limit,
334 sc->flags);
336 cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
337 env->ldt.selector,
338 env->ldt.base,
339 env->ldt.limit,
340 env->ldt.flags);
341 cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
342 env->tr.selector,
343 env->tr.base,
344 env->tr.limit,
345 env->tr.flags);
346 cpu_fprintf(f, "GDT= %016llx %08x\n",
347 env->gdt.base, env->gdt.limit);
348 cpu_fprintf(f, "IDT= %016llx %08x\n",
349 env->idt.base, env->idt.limit);
350 cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
351 (uint32_t)env->cr[0],
352 env->cr[2],
353 env->cr[3],
354 (uint32_t)env->cr[4]);
355 } else
356 #endif
358 for(i = 0; i < 6; i++) {
359 SegmentCache *sc = &env->segs[i];
360 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
361 seg_name[i],
362 sc->selector,
363 (uint32_t)sc->base,
364 sc->limit,
365 sc->flags);
367 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
368 env->ldt.selector,
369 (uint32_t)env->ldt.base,
370 env->ldt.limit,
371 env->ldt.flags);
372 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
373 env->tr.selector,
374 (uint32_t)env->tr.base,
375 env->tr.limit,
376 env->tr.flags);
377 cpu_fprintf(f, "GDT= %08x %08x\n",
378 (uint32_t)env->gdt.base, env->gdt.limit);
379 cpu_fprintf(f, "IDT= %08x %08x\n",
380 (uint32_t)env->idt.base, env->idt.limit);
381 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
382 (uint32_t)env->cr[0],
383 (uint32_t)env->cr[2],
384 (uint32_t)env->cr[3],
385 (uint32_t)env->cr[4]);
387 if (flags & X86_DUMP_CCOP) {
388 if ((unsigned)env->cc_op < CC_OP_NB)
389 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
390 else
391 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
392 #ifdef TARGET_X86_64
393 if (env->hflags & HF_CS64_MASK) {
394 cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
395 env->cc_src, env->cc_dst,
396 cc_op_name);
397 } else
398 #endif
400 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
401 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
402 cc_op_name);
405 if (flags & X86_DUMP_FPU) {
406 int fptag;
407 fptag = 0;
408 for(i = 0; i < 8; i++) {
409 fptag |= ((!env->fptags[i]) << i);
411 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
412 env->fpuc,
413 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
414 env->fpstt,
415 fptag,
416 env->mxcsr);
417 for(i=0;i<8;i++) {
418 #if defined(USE_X86LDOUBLE)
419 union {
420 long double d;
421 struct {
422 uint64_t lower;
423 uint16_t upper;
424 } l;
425 } tmp;
426 tmp.d = env->fpregs[i].d;
427 cpu_fprintf(f, "FPR%d=%016llx %04x",
428 i, tmp.l.lower, tmp.l.upper);
429 #else
430 cpu_fprintf(f, "FPR%d=%016llx",
431 i, env->fpregs[i].mmx.q);
432 #endif
433 if ((i & 1) == 1)
434 cpu_fprintf(f, "\n");
435 else
436 cpu_fprintf(f, " ");
438 if (env->hflags & HF_CS64_MASK)
439 nb = 16;
440 else
441 nb = 8;
442 for(i=0;i<nb;i++) {
443 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
445 env->xmm_regs[i].XMM_L(3),
446 env->xmm_regs[i].XMM_L(2),
447 env->xmm_regs[i].XMM_L(1),
448 env->xmm_regs[i].XMM_L(0));
449 if ((i & 1) == 1)
450 cpu_fprintf(f, "\n");
451 else
452 cpu_fprintf(f, " ");
457 /***********************************************************/
458 /* x86 mmu */
459 /* XXX: add PGE support */
461 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
463 a20_state = (a20_state != 0);
464 if (a20_state != ((env->a20_mask >> 20) & 1)) {
465 #if defined(DEBUG_MMU)
466 printf("A20 update: a20=%d\n", a20_state);
467 #endif
468 /* if the cpu is currently executing code, we must unlink it and
469 all the potentially executing TB */
470 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
472 /* when a20 is changed, all the MMU mappings are invalid, so
473 we must flush everything */
474 tlb_flush(env, 1);
475 env->a20_mask = 0xffefffff | (a20_state << 20);
479 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
481 int pe_state;
483 #if defined(DEBUG_MMU)
484 printf("CR0 update: CR0=0x%08x\n", new_cr0);
485 #endif
486 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
487 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
488 tlb_flush(env, 1);
491 #ifdef TARGET_X86_64
492 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
493 (env->efer & MSR_EFER_LME)) {
494 /* enter in long mode */
495 /* XXX: generate an exception */
496 if (!(env->cr[4] & CR4_PAE_MASK))
497 return;
498 env->efer |= MSR_EFER_LMA;
499 env->hflags |= HF_LMA_MASK;
500 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
501 (env->efer & MSR_EFER_LMA)) {
502 /* exit long mode */
503 env->efer &= ~MSR_EFER_LMA;
504 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
505 env->eip &= 0xffffffff;
507 #endif
508 env->cr[0] = new_cr0 | CR0_ET_MASK;
510 /* update PE flag in hidden flags */
511 pe_state = (env->cr[0] & CR0_PE_MASK);
512 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
513 /* ensure that ADDSEG is always set in real mode */
514 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
515 /* update FPU flags */
516 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
517 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
520 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
521 the PDPT */
522 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
524 env->cr[3] = new_cr3;
525 if (env->cr[0] & CR0_PG_MASK) {
526 #if defined(DEBUG_MMU)
527 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
528 #endif
529 tlb_flush(env, 0);
533 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
535 #if defined(DEBUG_MMU)
536 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
537 #endif
538 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
539 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
540 tlb_flush(env, 1);
542 /* SSE handling */
543 if (!(env->cpuid_features & CPUID_SSE))
544 new_cr4 &= ~CR4_OSFXSR_MASK;
545 if (new_cr4 & CR4_OSFXSR_MASK)
546 env->hflags |= HF_OSFXSR_MASK;
547 else
548 env->hflags &= ~HF_OSFXSR_MASK;
550 env->cr[4] = new_cr4;
553 /* XXX: also flush 4MB pages */
554 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
556 tlb_flush_page(env, addr);
559 #if defined(CONFIG_USER_ONLY)
561 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
562 int is_write, int is_user, int is_softmmu)
564 /* user mode only emulation */
565 is_write &= 1;
566 env->cr[2] = addr;
567 env->error_code = (is_write << PG_ERROR_W_BIT);
568 env->error_code |= PG_ERROR_U_MASK;
569 env->exception_index = EXCP0E_PAGE;
570 return 1;
573 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
575 return addr;
578 #else
580 #define PHYS_ADDR_MASK 0xfffff000
582 /* return value:
583 -1 = cannot handle fault
584 0 = nothing more to do
585 1 = generate PF fault
586 2 = soft MMU activation required for this block
588 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
589 int is_write1, int is_user, int is_softmmu)
591 uint64_t ptep, pte;
592 uint32_t pdpe_addr, pde_addr, pte_addr;
593 int error_code, is_dirty, prot, page_size, ret, is_write;
594 unsigned long paddr, page_offset;
595 target_ulong vaddr, virt_addr;
597 #if defined(DEBUG_MMU)
598 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
599 addr, is_write1, is_user, env->eip);
600 #endif
601 is_write = is_write1 & 1;
603 if (!(env->cr[0] & CR0_PG_MASK)) {
604 pte = addr;
605 virt_addr = addr & TARGET_PAGE_MASK;
606 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
607 page_size = 4096;
608 goto do_mapping;
611 if (env->cr[4] & CR4_PAE_MASK) {
612 uint64_t pde, pdpe;
614 /* XXX: we only use 32 bit physical addresses */
615 #ifdef TARGET_X86_64
616 if (env->hflags & HF_LMA_MASK) {
617 uint32_t pml4e_addr;
618 uint64_t pml4e;
619 int32_t sext;
621 /* test virtual address sign extension */
622 sext = (int64_t)addr >> 47;
623 if (sext != 0 && sext != -1) {
624 env->error_code = 0;
625 env->exception_index = EXCP0D_GPF;
626 return 1;
629 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
630 env->a20_mask;
631 pml4e = ldq_phys(pml4e_addr);
632 if (!(pml4e & PG_PRESENT_MASK)) {
633 error_code = 0;
634 goto do_fault;
636 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
637 error_code = PG_ERROR_RSVD_MASK;
638 goto do_fault;
640 if (!(pml4e & PG_ACCESSED_MASK)) {
641 pml4e |= PG_ACCESSED_MASK;
642 stl_phys_notdirty(pml4e_addr, pml4e);
644 ptep = pml4e ^ PG_NX_MASK;
645 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
646 env->a20_mask;
647 pdpe = ldq_phys(pdpe_addr);
648 if (!(pdpe & PG_PRESENT_MASK)) {
649 error_code = 0;
650 goto do_fault;
652 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
653 error_code = PG_ERROR_RSVD_MASK;
654 goto do_fault;
656 ptep &= pdpe ^ PG_NX_MASK;
657 if (!(pdpe & PG_ACCESSED_MASK)) {
658 pdpe |= PG_ACCESSED_MASK;
659 stl_phys_notdirty(pdpe_addr, pdpe);
661 } else
662 #endif
664 /* XXX: load them when cr3 is loaded ? */
665 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
666 env->a20_mask;
667 pdpe = ldq_phys(pdpe_addr);
668 if (!(pdpe & PG_PRESENT_MASK)) {
669 error_code = 0;
670 goto do_fault;
672 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
675 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
676 env->a20_mask;
677 pde = ldq_phys(pde_addr);
678 if (!(pde & PG_PRESENT_MASK)) {
679 error_code = 0;
680 goto do_fault;
682 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
683 error_code = PG_ERROR_RSVD_MASK;
684 goto do_fault;
686 ptep &= pde ^ PG_NX_MASK;
687 if (pde & PG_PSE_MASK) {
688 /* 2 MB page */
689 page_size = 2048 * 1024;
690 ptep ^= PG_NX_MASK;
691 if ((ptep & PG_NX_MASK) && is_write1 == 2)
692 goto do_fault_protect;
693 if (is_user) {
694 if (!(ptep & PG_USER_MASK))
695 goto do_fault_protect;
696 if (is_write && !(ptep & PG_RW_MASK))
697 goto do_fault_protect;
698 } else {
699 if ((env->cr[0] & CR0_WP_MASK) &&
700 is_write && !(ptep & PG_RW_MASK))
701 goto do_fault_protect;
703 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
704 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
705 pde |= PG_ACCESSED_MASK;
706 if (is_dirty)
707 pde |= PG_DIRTY_MASK;
708 stl_phys_notdirty(pde_addr, pde);
710 /* align to page_size */
711 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
712 virt_addr = addr & ~(page_size - 1);
713 } else {
714 /* 4 KB page */
715 if (!(pde & PG_ACCESSED_MASK)) {
716 pde |= PG_ACCESSED_MASK;
717 stl_phys_notdirty(pde_addr, pde);
719 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
720 env->a20_mask;
721 pte = ldq_phys(pte_addr);
722 if (!(pte & PG_PRESENT_MASK)) {
723 error_code = 0;
724 goto do_fault;
726 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
727 error_code = PG_ERROR_RSVD_MASK;
728 goto do_fault;
730 /* combine pde and pte nx, user and rw protections */
731 ptep &= pte ^ PG_NX_MASK;
732 ptep ^= PG_NX_MASK;
733 if ((ptep & PG_NX_MASK) && is_write1 == 2)
734 goto do_fault_protect;
735 if (is_user) {
736 if (!(ptep & PG_USER_MASK))
737 goto do_fault_protect;
738 if (is_write && !(ptep & PG_RW_MASK))
739 goto do_fault_protect;
740 } else {
741 if ((env->cr[0] & CR0_WP_MASK) &&
742 is_write && !(ptep & PG_RW_MASK))
743 goto do_fault_protect;
745 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
746 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
747 pte |= PG_ACCESSED_MASK;
748 if (is_dirty)
749 pte |= PG_DIRTY_MASK;
750 stl_phys_notdirty(pte_addr, pte);
752 page_size = 4096;
753 virt_addr = addr & ~0xfff;
754 pte = pte & (PHYS_ADDR_MASK | 0xfff);
756 } else {
757 uint32_t pde;
759 /* page directory entry */
760 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
761 env->a20_mask;
762 pde = ldl_phys(pde_addr);
763 if (!(pde & PG_PRESENT_MASK)) {
764 error_code = 0;
765 goto do_fault;
767 /* if PSE bit is set, then we use a 4MB page */
768 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
769 page_size = 4096 * 1024;
770 if (is_user) {
771 if (!(pde & PG_USER_MASK))
772 goto do_fault_protect;
773 if (is_write && !(pde & PG_RW_MASK))
774 goto do_fault_protect;
775 } else {
776 if ((env->cr[0] & CR0_WP_MASK) &&
777 is_write && !(pde & PG_RW_MASK))
778 goto do_fault_protect;
780 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
781 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
782 pde |= PG_ACCESSED_MASK;
783 if (is_dirty)
784 pde |= PG_DIRTY_MASK;
785 stl_phys_notdirty(pde_addr, pde);
788 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
789 ptep = pte;
790 virt_addr = addr & ~(page_size - 1);
791 } else {
792 if (!(pde & PG_ACCESSED_MASK)) {
793 pde |= PG_ACCESSED_MASK;
794 stl_phys_notdirty(pde_addr, pde);
797 /* page directory entry */
798 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
799 env->a20_mask;
800 pte = ldl_phys(pte_addr);
801 if (!(pte & PG_PRESENT_MASK)) {
802 error_code = 0;
803 goto do_fault;
805 /* combine pde and pte user and rw protections */
806 ptep = pte & pde;
807 if (is_user) {
808 if (!(ptep & PG_USER_MASK))
809 goto do_fault_protect;
810 if (is_write && !(ptep & PG_RW_MASK))
811 goto do_fault_protect;
812 } else {
813 if ((env->cr[0] & CR0_WP_MASK) &&
814 is_write && !(ptep & PG_RW_MASK))
815 goto do_fault_protect;
817 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
818 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
819 pte |= PG_ACCESSED_MASK;
820 if (is_dirty)
821 pte |= PG_DIRTY_MASK;
822 stl_phys_notdirty(pte_addr, pte);
824 page_size = 4096;
825 virt_addr = addr & ~0xfff;
828 /* the page can be put in the TLB */
829 prot = PAGE_READ;
830 if (!(ptep & PG_NX_MASK))
831 prot |= PAGE_EXEC;
832 if (pte & PG_DIRTY_MASK) {
833 /* only set write access if already dirty... otherwise wait
834 for dirty access */
835 if (is_user) {
836 if (ptep & PG_RW_MASK)
837 prot |= PAGE_WRITE;
838 } else {
839 if (!(env->cr[0] & CR0_WP_MASK) ||
840 (ptep & PG_RW_MASK))
841 prot |= PAGE_WRITE;
844 do_mapping:
845 pte = pte & env->a20_mask;
847 /* Even if 4MB pages, we map only one 4KB page in the cache to
848 avoid filling it too fast */
849 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
850 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
851 vaddr = virt_addr + page_offset;
853 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
854 return ret;
855 do_fault_protect:
856 error_code = PG_ERROR_P_MASK;
857 do_fault:
858 env->cr[2] = addr;
859 error_code |= (is_write << PG_ERROR_W_BIT);
860 if (is_user)
861 error_code |= PG_ERROR_U_MASK;
862 if (is_write1 == 2 &&
863 (env->efer & MSR_EFER_NXE) &&
864 (env->cr[4] & CR4_PAE_MASK))
865 error_code |= PG_ERROR_I_D_MASK;
866 env->error_code = error_code;
867 env->exception_index = EXCP0E_PAGE;
868 return 1;
871 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
873 uint32_t pde_addr, pte_addr;
874 uint32_t pde, pte, paddr, page_offset, page_size;
876 if (env->cr[4] & CR4_PAE_MASK) {
877 uint32_t pdpe_addr, pde_addr, pte_addr;
878 uint32_t pdpe;
880 /* XXX: we only use 32 bit physical addresses */
881 #ifdef TARGET_X86_64
882 if (env->hflags & HF_LMA_MASK) {
883 uint32_t pml4e_addr, pml4e;
884 int32_t sext;
886 /* test virtual address sign extension */
887 sext = (int64_t)addr >> 47;
888 if (sext != 0 && sext != -1)
889 return -1;
891 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
892 env->a20_mask;
893 pml4e = ldl_phys(pml4e_addr);
894 if (!(pml4e & PG_PRESENT_MASK))
895 return -1;
897 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
898 env->a20_mask;
899 pdpe = ldl_phys(pdpe_addr);
900 if (!(pdpe & PG_PRESENT_MASK))
901 return -1;
902 } else
903 #endif
905 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
906 env->a20_mask;
907 pdpe = ldl_phys(pdpe_addr);
908 if (!(pdpe & PG_PRESENT_MASK))
909 return -1;
912 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
913 env->a20_mask;
914 pde = ldl_phys(pde_addr);
915 if (!(pde & PG_PRESENT_MASK)) {
916 return -1;
918 if (pde & PG_PSE_MASK) {
919 /* 2 MB page */
920 page_size = 2048 * 1024;
921 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
922 } else {
923 /* 4 KB page */
924 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
925 env->a20_mask;
926 page_size = 4096;
927 pte = ldl_phys(pte_addr);
929 } else {
930 if (!(env->cr[0] & CR0_PG_MASK)) {
931 pte = addr;
932 page_size = 4096;
933 } else {
934 /* page directory entry */
935 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
936 pde = ldl_phys(pde_addr);
937 if (!(pde & PG_PRESENT_MASK))
938 return -1;
939 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
940 pte = pde & ~0x003ff000; /* align to 4MB */
941 page_size = 4096 * 1024;
942 } else {
943 /* page directory entry */
944 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
945 pte = ldl_phys(pte_addr);
946 if (!(pte & PG_PRESENT_MASK))
947 return -1;
948 page_size = 4096;
951 pte = pte & env->a20_mask;
954 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
955 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
956 return paddr;
958 #endif /* !CONFIG_USER_ONLY */
960 #if defined(USE_CODE_COPY)
961 struct fpstate {
962 uint16_t fpuc;
963 uint16_t dummy1;
964 uint16_t fpus;
965 uint16_t dummy2;
966 uint16_t fptag;
967 uint16_t dummy3;
969 uint32_t fpip;
970 uint32_t fpcs;
971 uint32_t fpoo;
972 uint32_t fpos;
973 uint8_t fpregs1[8 * 10];
976 void restore_native_fp_state(CPUState *env)
978 int fptag, i, j;
979 struct fpstate fp1, *fp = &fp1;
981 fp->fpuc = env->fpuc;
982 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
983 fptag = 0;
984 for (i=7; i>=0; i--) {
985 fptag <<= 2;
986 if (env->fptags[i]) {
987 fptag |= 3;
988 } else {
989 /* the FPU automatically computes it */
992 fp->fptag = fptag;
993 j = env->fpstt;
994 for(i = 0;i < 8; i++) {
995 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
996 j = (j + 1) & 7;
998 asm volatile ("frstor %0" : "=m" (*fp));
999 env->native_fp_regs = 1;
1002 void save_native_fp_state(CPUState *env)
1004 int fptag, i, j;
1005 uint16_t fpuc;
1006 struct fpstate fp1, *fp = &fp1;
1008 asm volatile ("fsave %0" : : "m" (*fp));
1009 env->fpuc = fp->fpuc;
1010 env->fpstt = (fp->fpus >> 11) & 7;
1011 env->fpus = fp->fpus & ~0x3800;
1012 fptag = fp->fptag;
1013 for(i = 0;i < 8; i++) {
1014 env->fptags[i] = ((fptag & 3) == 3);
1015 fptag >>= 2;
1017 j = env->fpstt;
1018 for(i = 0;i < 8; i++) {
1019 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1020 j = (j + 1) & 7;
1022 /* we must restore the default rounding state */
1023 /* XXX: we do not restore the exception state */
1024 fpuc = 0x037f | (env->fpuc & (3 << 10));
1025 asm volatile("fldcw %0" : : "m" (fpuc));
1026 env->native_fp_regs = 0;
1028 #endif