verr and verw eflags opt fix
[qemu/qemu_0_9_1_stable.git] / target-i386 / helper2.c
blob730af1b52cd32c4b966649492e6715e6d8bbdce4
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
31 //#define DEBUG_MMU
33 #ifdef USE_CODE_COPY
34 #include <asm/ldt.h>
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41 #define modify_ldt_ldt_s user_desc
42 #endif
43 #endif /* USE_CODE_COPY */
45 CPUX86State *cpu_x86_init(void)
47 CPUX86State *env;
48 static int inited;
50 cpu_exec_init();
52 env = malloc(sizeof(CPUX86State));
53 if (!env)
54 return NULL;
55 memset(env, 0, sizeof(CPUX86State));
56 /* init various static tables */
57 if (!inited) {
58 inited = 1;
59 optimize_flags_init();
61 #ifdef USE_CODE_COPY
62 /* testing code for code copy case */
64 struct modify_ldt_ldt_s ldt;
66 ldt.entry_number = 1;
67 ldt.base_addr = (unsigned long)env;
68 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
69 ldt.seg_32bit = 1;
70 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
71 ldt.read_exec_only = 0;
72 ldt.limit_in_pages = 1;
73 ldt.seg_not_present = 0;
74 ldt.useable = 1;
75 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
77 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
79 #endif
81 int family, model, stepping;
82 #ifdef TARGET_X86_64
83 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
84 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
85 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
86 family = 6;
87 model = 2;
88 stepping = 3;
89 #else
90 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
91 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
92 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
93 #if 0
94 /* pentium 75-200 */
95 family = 5;
96 model = 2;
97 stepping = 11;
98 #else
99 /* pentium pro */
100 family = 6;
101 model = 3;
102 stepping = 3;
103 #endif
104 #endif
105 env->cpuid_version = (family << 8) | (model << 4) | stepping;
106 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
107 CPUID_TSC | CPUID_MSR | CPUID_MCE |
108 CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
109 env->cpuid_ext_features = 0;
110 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
111 #ifdef TARGET_X86_64
112 /* currently not enabled for std i386 because not fully tested */
113 env->cpuid_features |= CPUID_APIC;
114 #endif
116 cpu_single_env = env;
117 cpu_reset(env);
118 #ifdef USE_KQEMU
119 kqemu_init(env);
120 #endif
121 return env;
124 /* NOTE: must be called outside the CPU execute loop */
125 void cpu_reset(CPUX86State *env)
127 int i;
129 memset(env, 0, offsetof(CPUX86State, breakpoints));
131 tlb_flush(env, 1);
133 /* init to reset state */
135 #ifdef CONFIG_SOFTMMU
136 env->hflags |= HF_SOFTMMU_MASK;
137 #endif
139 cpu_x86_update_cr0(env, 0x60000010);
140 env->a20_mask = 0xffffffff;
142 env->idt.limit = 0xffff;
143 env->gdt.limit = 0xffff;
144 env->ldt.limit = 0xffff;
145 env->ldt.flags = DESC_P_MASK;
146 env->tr.limit = 0xffff;
147 env->tr.flags = DESC_P_MASK;
149 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
150 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
151 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
152 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
153 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
154 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
156 env->eip = 0xfff0;
157 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
159 env->eflags = 0x2;
161 /* FPU init */
162 for(i = 0;i < 8; i++)
163 env->fptags[i] = 1;
164 env->fpuc = 0x37f;
166 env->mxcsr = 0x1f80;
169 void cpu_x86_close(CPUX86State *env)
171 free(env);
174 /***********************************************************/
175 /* x86 debug */
177 static const char *cc_op_str[] = {
178 "DYNAMIC",
179 "EFLAGS",
181 "MULB",
182 "MULW",
183 "MULL",
184 "MULQ",
186 "ADDB",
187 "ADDW",
188 "ADDL",
189 "ADDQ",
191 "ADCB",
192 "ADCW",
193 "ADCL",
194 "ADCQ",
196 "SUBB",
197 "SUBW",
198 "SUBL",
199 "SUBQ",
201 "SBBB",
202 "SBBW",
203 "SBBL",
204 "SBBQ",
206 "LOGICB",
207 "LOGICW",
208 "LOGICL",
209 "LOGICQ",
211 "INCB",
212 "INCW",
213 "INCL",
214 "INCQ",
216 "DECB",
217 "DECW",
218 "DECL",
219 "DECQ",
221 "SHLB",
222 "SHLW",
223 "SHLL",
224 "SHLQ",
226 "SARB",
227 "SARW",
228 "SARL",
229 "SARQ",
232 void cpu_dump_state(CPUState *env, FILE *f,
233 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
234 int flags)
236 int eflags, i;
237 char cc_op_name[32];
238 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
240 eflags = env->eflags;
241 #ifdef TARGET_X86_64
242 if (env->hflags & HF_CS64_MASK) {
243 cpu_fprintf(f,
244 "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
245 "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
246 "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
247 "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
248 "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
249 env->regs[R_EAX],
250 env->regs[R_EBX],
251 env->regs[R_ECX],
252 env->regs[R_EDX],
253 env->regs[R_ESI],
254 env->regs[R_EDI],
255 env->regs[R_EBP],
256 env->regs[R_ESP],
257 env->regs[8],
258 env->regs[9],
259 env->regs[10],
260 env->regs[11],
261 env->regs[12],
262 env->regs[13],
263 env->regs[14],
264 env->regs[15],
265 env->eip, eflags,
266 eflags & DF_MASK ? 'D' : '-',
267 eflags & CC_O ? 'O' : '-',
268 eflags & CC_S ? 'S' : '-',
269 eflags & CC_Z ? 'Z' : '-',
270 eflags & CC_A ? 'A' : '-',
271 eflags & CC_P ? 'P' : '-',
272 eflags & CC_C ? 'C' : '-',
273 env->hflags & HF_CPL_MASK,
274 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
275 (env->a20_mask >> 20) & 1);
276 } else
277 #endif
279 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
280 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
281 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
282 (uint32_t)env->regs[R_EAX],
283 (uint32_t)env->regs[R_EBX],
284 (uint32_t)env->regs[R_ECX],
285 (uint32_t)env->regs[R_EDX],
286 (uint32_t)env->regs[R_ESI],
287 (uint32_t)env->regs[R_EDI],
288 (uint32_t)env->regs[R_EBP],
289 (uint32_t)env->regs[R_ESP],
290 (uint32_t)env->eip, eflags,
291 eflags & DF_MASK ? 'D' : '-',
292 eflags & CC_O ? 'O' : '-',
293 eflags & CC_S ? 'S' : '-',
294 eflags & CC_Z ? 'Z' : '-',
295 eflags & CC_A ? 'A' : '-',
296 eflags & CC_P ? 'P' : '-',
297 eflags & CC_C ? 'C' : '-',
298 env->hflags & HF_CPL_MASK,
299 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
300 (env->a20_mask >> 20) & 1);
303 #ifdef TARGET_X86_64
304 if (env->hflags & HF_LMA_MASK) {
305 for(i = 0; i < 6; i++) {
306 SegmentCache *sc = &env->segs[i];
307 cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
308 seg_name[i],
309 sc->selector,
310 sc->base,
311 sc->limit,
312 sc->flags);
314 cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
315 env->ldt.selector,
316 env->ldt.base,
317 env->ldt.limit,
318 env->ldt.flags);
319 cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
320 env->tr.selector,
321 env->tr.base,
322 env->tr.limit,
323 env->tr.flags);
324 cpu_fprintf(f, "GDT= %016llx %08x\n",
325 env->gdt.base, env->gdt.limit);
326 cpu_fprintf(f, "IDT= %016llx %08x\n",
327 env->idt.base, env->idt.limit);
328 cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
329 (uint32_t)env->cr[0],
330 env->cr[2],
331 env->cr[3],
332 (uint32_t)env->cr[4]);
333 } else
334 #endif
336 for(i = 0; i < 6; i++) {
337 SegmentCache *sc = &env->segs[i];
338 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
339 seg_name[i],
340 sc->selector,
341 (uint32_t)sc->base,
342 sc->limit,
343 sc->flags);
345 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
346 env->ldt.selector,
347 (uint32_t)env->ldt.base,
348 env->ldt.limit,
349 env->ldt.flags);
350 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
351 env->tr.selector,
352 (uint32_t)env->tr.base,
353 env->tr.limit,
354 env->tr.flags);
355 cpu_fprintf(f, "GDT= %08x %08x\n",
356 (uint32_t)env->gdt.base, env->gdt.limit);
357 cpu_fprintf(f, "IDT= %08x %08x\n",
358 (uint32_t)env->idt.base, env->idt.limit);
359 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
360 (uint32_t)env->cr[0],
361 (uint32_t)env->cr[2],
362 (uint32_t)env->cr[3],
363 (uint32_t)env->cr[4]);
365 if (flags & X86_DUMP_CCOP) {
366 if ((unsigned)env->cc_op < CC_OP_NB)
367 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
368 else
369 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
370 #ifdef TARGET_X86_64
371 if (env->hflags & HF_CS64_MASK) {
372 cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
373 env->cc_src, env->cc_dst,
374 cc_op_name);
375 } else
376 #endif
378 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
379 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
380 cc_op_name);
383 if (flags & X86_DUMP_FPU) {
384 cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
385 (double)env->fpregs[0].d,
386 (double)env->fpregs[1].d,
387 (double)env->fpregs[2].d,
388 (double)env->fpregs[3].d);
389 cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
390 (double)env->fpregs[4].d,
391 (double)env->fpregs[5].d,
392 (double)env->fpregs[7].d,
393 (double)env->fpregs[8].d);
397 /***********************************************************/
398 /* x86 mmu */
399 /* XXX: add PGE support */
401 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
403 a20_state = (a20_state != 0);
404 if (a20_state != ((env->a20_mask >> 20) & 1)) {
405 #if defined(DEBUG_MMU)
406 printf("A20 update: a20=%d\n", a20_state);
407 #endif
408 /* if the cpu is currently executing code, we must unlink it and
409 all the potentially executing TB */
410 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
412 /* when a20 is changed, all the MMU mappings are invalid, so
413 we must flush everything */
414 tlb_flush(env, 1);
415 env->a20_mask = 0xffefffff | (a20_state << 20);
419 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
421 int pe_state;
423 #if defined(DEBUG_MMU)
424 printf("CR0 update: CR0=0x%08x\n", new_cr0);
425 #endif
426 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
427 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
428 tlb_flush(env, 1);
431 #ifdef TARGET_X86_64
432 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
433 (env->efer & MSR_EFER_LME)) {
434 /* enter in long mode */
435 /* XXX: generate an exception */
436 if (!(env->cr[4] & CR4_PAE_MASK))
437 return;
438 env->efer |= MSR_EFER_LMA;
439 env->hflags |= HF_LMA_MASK;
440 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
441 (env->efer & MSR_EFER_LMA)) {
442 /* exit long mode */
443 env->efer &= ~MSR_EFER_LMA;
444 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
445 env->eip &= 0xffffffff;
447 #endif
448 env->cr[0] = new_cr0 | CR0_ET_MASK;
450 /* update PE flag in hidden flags */
451 pe_state = (env->cr[0] & CR0_PE_MASK);
452 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
453 /* ensure that ADDSEG is always set in real mode */
454 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
455 /* update FPU flags */
456 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
457 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
460 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
461 the PDPT */
462 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
464 env->cr[3] = new_cr3;
465 if (env->cr[0] & CR0_PG_MASK) {
466 #if defined(DEBUG_MMU)
467 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
468 #endif
469 tlb_flush(env, 0);
473 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
475 #if defined(DEBUG_MMU)
476 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
477 #endif
478 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
479 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
480 tlb_flush(env, 1);
482 /* SSE handling */
483 if (!(env->cpuid_features & CPUID_SSE))
484 new_cr4 &= ~CR4_OSFXSR_MASK;
485 if (new_cr4 & CR4_OSFXSR_MASK)
486 env->hflags |= HF_OSFXSR_MASK;
487 else
488 env->hflags &= ~HF_OSFXSR_MASK;
490 env->cr[4] = new_cr4;
493 /* XXX: also flush 4MB pages */
494 void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
496 tlb_flush_page(env, addr);
499 #if defined(CONFIG_USER_ONLY)
501 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
502 int is_write, int is_user, int is_softmmu)
504 /* user mode only emulation */
505 is_write &= 1;
506 env->cr[2] = addr;
507 env->error_code = (is_write << PG_ERROR_W_BIT);
508 env->error_code |= PG_ERROR_U_MASK;
509 return 1;
512 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
514 return addr;
517 #else
519 /* return value:
520 -1 = cannot handle fault
521 0 = nothing more to do
522 1 = generate PF fault
523 2 = soft MMU activation required for this block
525 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
526 int is_write, int is_user, int is_softmmu)
528 uint32_t pdpe_addr, pde_addr, pte_addr;
529 uint32_t pde, pte, ptep, pdpe;
530 int error_code, is_dirty, prot, page_size, ret;
531 unsigned long paddr, page_offset;
532 target_ulong vaddr, virt_addr;
534 #if defined(DEBUG_MMU)
535 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
536 addr, is_write, is_user, env->eip);
537 #endif
538 is_write &= 1;
540 if (!(env->cr[0] & CR0_PG_MASK)) {
541 pte = addr;
542 virt_addr = addr & TARGET_PAGE_MASK;
543 prot = PAGE_READ | PAGE_WRITE;
544 page_size = 4096;
545 goto do_mapping;
548 if (env->cr[4] & CR4_PAE_MASK) {
549 /* XXX: we only use 32 bit physical addresses */
550 #ifdef TARGET_X86_64
551 if (env->hflags & HF_LMA_MASK) {
552 uint32_t pml4e_addr, pml4e;
553 int32_t sext;
555 /* XXX: handle user + rw rights */
556 /* XXX: handle NX flag */
557 /* test virtual address sign extension */
558 sext = (int64_t)addr >> 47;
559 if (sext != 0 && sext != -1) {
560 error_code = 0;
561 goto do_fault;
564 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
565 env->a20_mask;
566 pml4e = ldl_phys(pml4e_addr);
567 if (!(pml4e & PG_PRESENT_MASK)) {
568 error_code = 0;
569 goto do_fault;
571 if (!(pml4e & PG_ACCESSED_MASK)) {
572 pml4e |= PG_ACCESSED_MASK;
573 stl_phys_notdirty(pml4e_addr, pml4e);
576 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
577 env->a20_mask;
578 pdpe = ldl_phys(pdpe_addr);
579 if (!(pdpe & PG_PRESENT_MASK)) {
580 error_code = 0;
581 goto do_fault;
583 if (!(pdpe & PG_ACCESSED_MASK)) {
584 pdpe |= PG_ACCESSED_MASK;
585 stl_phys_notdirty(pdpe_addr, pdpe);
587 } else
588 #endif
590 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
591 env->a20_mask;
592 pdpe = ldl_phys(pdpe_addr);
593 if (!(pdpe & PG_PRESENT_MASK)) {
594 error_code = 0;
595 goto do_fault;
599 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
600 env->a20_mask;
601 pde = ldl_phys(pde_addr);
602 if (!(pde & PG_PRESENT_MASK)) {
603 error_code = 0;
604 goto do_fault;
606 if (pde & PG_PSE_MASK) {
607 /* 2 MB page */
608 page_size = 2048 * 1024;
609 goto handle_big_page;
610 } else {
611 /* 4 KB page */
612 if (!(pde & PG_ACCESSED_MASK)) {
613 pde |= PG_ACCESSED_MASK;
614 stl_phys_notdirty(pde_addr, pde);
616 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
617 env->a20_mask;
618 goto handle_4k_page;
620 } else {
621 /* page directory entry */
622 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
623 env->a20_mask;
624 pde = ldl_phys(pde_addr);
625 if (!(pde & PG_PRESENT_MASK)) {
626 error_code = 0;
627 goto do_fault;
629 /* if PSE bit is set, then we use a 4MB page */
630 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
631 page_size = 4096 * 1024;
632 handle_big_page:
633 if (is_user) {
634 if (!(pde & PG_USER_MASK))
635 goto do_fault_protect;
636 if (is_write && !(pde & PG_RW_MASK))
637 goto do_fault_protect;
638 } else {
639 if ((env->cr[0] & CR0_WP_MASK) &&
640 is_write && !(pde & PG_RW_MASK))
641 goto do_fault_protect;
643 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
644 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
645 pde |= PG_ACCESSED_MASK;
646 if (is_dirty)
647 pde |= PG_DIRTY_MASK;
648 stl_phys_notdirty(pde_addr, pde);
651 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
652 ptep = pte;
653 virt_addr = addr & ~(page_size - 1);
654 } else {
655 if (!(pde & PG_ACCESSED_MASK)) {
656 pde |= PG_ACCESSED_MASK;
657 stl_phys_notdirty(pde_addr, pde);
660 /* page directory entry */
661 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
662 env->a20_mask;
663 handle_4k_page:
664 pte = ldl_phys(pte_addr);
665 if (!(pte & PG_PRESENT_MASK)) {
666 error_code = 0;
667 goto do_fault;
669 /* combine pde and pte user and rw protections */
670 ptep = pte & pde;
671 if (is_user) {
672 if (!(ptep & PG_USER_MASK))
673 goto do_fault_protect;
674 if (is_write && !(ptep & PG_RW_MASK))
675 goto do_fault_protect;
676 } else {
677 if ((env->cr[0] & CR0_WP_MASK) &&
678 is_write && !(ptep & PG_RW_MASK))
679 goto do_fault_protect;
681 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
682 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
683 pte |= PG_ACCESSED_MASK;
684 if (is_dirty)
685 pte |= PG_DIRTY_MASK;
686 stl_phys_notdirty(pte_addr, pte);
688 page_size = 4096;
689 virt_addr = addr & ~0xfff;
692 /* the page can be put in the TLB */
693 prot = PAGE_READ;
694 if (pte & PG_DIRTY_MASK) {
695 /* only set write access if already dirty... otherwise wait
696 for dirty access */
697 if (is_user) {
698 if (ptep & PG_RW_MASK)
699 prot |= PAGE_WRITE;
700 } else {
701 if (!(env->cr[0] & CR0_WP_MASK) ||
702 (ptep & PG_RW_MASK))
703 prot |= PAGE_WRITE;
707 do_mapping:
708 pte = pte & env->a20_mask;
710 /* Even if 4MB pages, we map only one 4KB page in the cache to
711 avoid filling it too fast */
712 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
713 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
714 vaddr = virt_addr + page_offset;
716 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
717 return ret;
718 do_fault_protect:
719 error_code = PG_ERROR_P_MASK;
720 do_fault:
721 env->cr[2] = addr;
722 env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
723 if (is_user)
724 env->error_code |= PG_ERROR_U_MASK;
725 return 1;
728 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
730 uint32_t pde_addr, pte_addr;
731 uint32_t pde, pte, paddr, page_offset, page_size;
733 if (env->cr[4] & CR4_PAE_MASK) {
734 uint32_t pdpe_addr, pde_addr, pte_addr;
735 uint32_t pdpe;
737 /* XXX: we only use 32 bit physical addresses */
738 #ifdef TARGET_X86_64
739 if (env->hflags & HF_LMA_MASK) {
740 uint32_t pml4e_addr, pml4e;
741 int32_t sext;
743 /* test virtual address sign extension */
744 sext = (int64_t)addr >> 47;
745 if (sext != 0 && sext != -1)
746 return -1;
748 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
749 env->a20_mask;
750 pml4e = ldl_phys(pml4e_addr);
751 if (!(pml4e & PG_PRESENT_MASK))
752 return -1;
754 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
755 env->a20_mask;
756 pdpe = ldl_phys(pdpe_addr);
757 if (!(pdpe & PG_PRESENT_MASK))
758 return -1;
759 } else
760 #endif
762 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
763 env->a20_mask;
764 pdpe = ldl_phys(pdpe_addr);
765 if (!(pdpe & PG_PRESENT_MASK))
766 return -1;
769 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
770 env->a20_mask;
771 pde = ldl_phys(pde_addr);
772 if (!(pde & PG_PRESENT_MASK)) {
773 return -1;
775 if (pde & PG_PSE_MASK) {
776 /* 2 MB page */
777 page_size = 2048 * 1024;
778 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
779 } else {
780 /* 4 KB page */
781 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
782 env->a20_mask;
783 page_size = 4096;
784 pte = ldl_phys(pte_addr);
786 } else {
787 if (!(env->cr[0] & CR0_PG_MASK)) {
788 pte = addr;
789 page_size = 4096;
790 } else {
791 /* page directory entry */
792 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
793 pde = ldl_phys(pde_addr);
794 if (!(pde & PG_PRESENT_MASK))
795 return -1;
796 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
797 pte = pde & ~0x003ff000; /* align to 4MB */
798 page_size = 4096 * 1024;
799 } else {
800 /* page directory entry */
801 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
802 pte = ldl_phys(pte_addr);
803 if (!(pte & PG_PRESENT_MASK))
804 return -1;
805 page_size = 4096;
808 pte = pte & env->a20_mask;
811 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
812 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
813 return paddr;
815 #endif /* !CONFIG_USER_ONLY */
817 #if defined(USE_CODE_COPY)
818 struct fpstate {
819 uint16_t fpuc;
820 uint16_t dummy1;
821 uint16_t fpus;
822 uint16_t dummy2;
823 uint16_t fptag;
824 uint16_t dummy3;
826 uint32_t fpip;
827 uint32_t fpcs;
828 uint32_t fpoo;
829 uint32_t fpos;
830 uint8_t fpregs1[8 * 10];
833 void restore_native_fp_state(CPUState *env)
835 int fptag, i, j;
836 struct fpstate fp1, *fp = &fp1;
838 fp->fpuc = env->fpuc;
839 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
840 fptag = 0;
841 for (i=7; i>=0; i--) {
842 fptag <<= 2;
843 if (env->fptags[i]) {
844 fptag |= 3;
845 } else {
846 /* the FPU automatically computes it */
849 fp->fptag = fptag;
850 j = env->fpstt;
851 for(i = 0;i < 8; i++) {
852 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
853 j = (j + 1) & 7;
855 asm volatile ("frstor %0" : "=m" (*fp));
856 env->native_fp_regs = 1;
859 void save_native_fp_state(CPUState *env)
861 int fptag, i, j;
862 uint16_t fpuc;
863 struct fpstate fp1, *fp = &fp1;
865 asm volatile ("fsave %0" : : "m" (*fp));
866 env->fpuc = fp->fpuc;
867 env->fpstt = (fp->fpus >> 11) & 7;
868 env->fpus = fp->fpus & ~0x3800;
869 fptag = fp->fptag;
870 for(i = 0;i < 8; i++) {
871 env->fptags[i] = ((fptag & 3) == 3);
872 fptag >>= 2;
874 j = env->fpstt;
875 for(i = 0;i < 8; i++) {
876 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
877 j = (j + 1) & 7;
879 /* we must restore the default rounding state */
880 /* XXX: we do not restore the exception state */
881 fpuc = 0x037f | (env->fpuc & (3 << 10));
882 asm volatile("fldcw %0" : : "m" (fpuc));
883 env->native_fp_regs = 0;
885 #endif