ARM syscall fix (Paul Brook)
[qemu/qemu_0_9_1_stable.git] / target-i386 / helper2.c
blob24cbcfc4eab24ebe882d41ca8bf7db5e66965265
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
31 //#define DEBUG_MMU
33 #ifdef USE_CODE_COPY
34 #include <asm/ldt.h>
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41 #define modify_ldt_ldt_s user_desc
42 #endif
43 #endif /* USE_CODE_COPY */
45 CPUX86State *cpu_x86_init(void)
47 CPUX86State *env;
48 static int inited;
50 cpu_exec_init();
52 env = malloc(sizeof(CPUX86State));
53 if (!env)
54 return NULL;
55 memset(env, 0, sizeof(CPUX86State));
56 /* init various static tables */
57 if (!inited) {
58 inited = 1;
59 optimize_flags_init();
61 #ifdef USE_CODE_COPY
62 /* testing code for code copy case */
64 struct modify_ldt_ldt_s ldt;
66 ldt.entry_number = 1;
67 ldt.base_addr = (unsigned long)env;
68 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
69 ldt.seg_32bit = 1;
70 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
71 ldt.read_exec_only = 0;
72 ldt.limit_in_pages = 1;
73 ldt.seg_not_present = 0;
74 ldt.useable = 1;
75 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
77 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
79 #endif
81 int family, model, stepping;
82 #ifdef TARGET_X86_64
83 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
84 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
85 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
86 family = 6;
87 model = 2;
88 stepping = 3;
89 #else
90 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
91 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
92 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
93 #if 0
94 /* pentium 75-200 */
95 family = 5;
96 model = 2;
97 stepping = 11;
98 #else
99 /* pentium pro */
100 family = 6;
101 model = 3;
102 stepping = 3;
103 #endif
104 #endif
105 env->cpuid_level = 2;
106 env->cpuid_version = (family << 8) | (model << 4) | stepping;
107 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
108 CPUID_TSC | CPUID_MSR | CPUID_MCE |
109 CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
110 env->cpuid_ext_features = 0;
111 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
112 env->cpuid_xlevel = 0;
114 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
115 int c, len, i;
116 len = strlen(model_id);
117 for(i = 0; i < 48; i++) {
118 if (i >= len)
119 c = '\0';
120 else
121 c = model_id[i];
122 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
125 #ifdef TARGET_X86_64
126 /* currently not enabled for std i386 because not fully tested */
127 env->cpuid_features |= CPUID_APIC;
128 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
129 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL;
130 env->cpuid_xlevel = 0x80000008;
131 #endif
133 cpu_single_env = env;
134 cpu_reset(env);
135 #ifdef USE_KQEMU
136 kqemu_init(env);
137 #endif
138 return env;
141 /* NOTE: must be called outside the CPU execute loop */
142 void cpu_reset(CPUX86State *env)
144 int i;
146 memset(env, 0, offsetof(CPUX86State, breakpoints));
148 tlb_flush(env, 1);
150 /* init to reset state */
152 #ifdef CONFIG_SOFTMMU
153 env->hflags |= HF_SOFTMMU_MASK;
154 #endif
156 cpu_x86_update_cr0(env, 0x60000010);
157 env->a20_mask = 0xffffffff;
159 env->idt.limit = 0xffff;
160 env->gdt.limit = 0xffff;
161 env->ldt.limit = 0xffff;
162 env->ldt.flags = DESC_P_MASK;
163 env->tr.limit = 0xffff;
164 env->tr.flags = DESC_P_MASK;
166 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
167 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
168 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
169 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
170 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
171 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
173 env->eip = 0xfff0;
174 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
176 env->eflags = 0x2;
178 /* FPU init */
179 for(i = 0;i < 8; i++)
180 env->fptags[i] = 1;
181 env->fpuc = 0x37f;
183 env->mxcsr = 0x1f80;
186 void cpu_x86_close(CPUX86State *env)
188 free(env);
191 /***********************************************************/
192 /* x86 debug */
194 static const char *cc_op_str[] = {
195 "DYNAMIC",
196 "EFLAGS",
198 "MULB",
199 "MULW",
200 "MULL",
201 "MULQ",
203 "ADDB",
204 "ADDW",
205 "ADDL",
206 "ADDQ",
208 "ADCB",
209 "ADCW",
210 "ADCL",
211 "ADCQ",
213 "SUBB",
214 "SUBW",
215 "SUBL",
216 "SUBQ",
218 "SBBB",
219 "SBBW",
220 "SBBL",
221 "SBBQ",
223 "LOGICB",
224 "LOGICW",
225 "LOGICL",
226 "LOGICQ",
228 "INCB",
229 "INCW",
230 "INCL",
231 "INCQ",
233 "DECB",
234 "DECW",
235 "DECL",
236 "DECQ",
238 "SHLB",
239 "SHLW",
240 "SHLL",
241 "SHLQ",
243 "SARB",
244 "SARW",
245 "SARL",
246 "SARQ",
249 void cpu_dump_state(CPUState *env, FILE *f,
250 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
251 int flags)
253 int eflags, i;
254 char cc_op_name[32];
255 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
257 eflags = env->eflags;
258 #ifdef TARGET_X86_64
259 if (env->hflags & HF_CS64_MASK) {
260 cpu_fprintf(f,
261 "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
262 "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
263 "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
264 "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
265 "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
266 env->regs[R_EAX],
267 env->regs[R_EBX],
268 env->regs[R_ECX],
269 env->regs[R_EDX],
270 env->regs[R_ESI],
271 env->regs[R_EDI],
272 env->regs[R_EBP],
273 env->regs[R_ESP],
274 env->regs[8],
275 env->regs[9],
276 env->regs[10],
277 env->regs[11],
278 env->regs[12],
279 env->regs[13],
280 env->regs[14],
281 env->regs[15],
282 env->eip, eflags,
283 eflags & DF_MASK ? 'D' : '-',
284 eflags & CC_O ? 'O' : '-',
285 eflags & CC_S ? 'S' : '-',
286 eflags & CC_Z ? 'Z' : '-',
287 eflags & CC_A ? 'A' : '-',
288 eflags & CC_P ? 'P' : '-',
289 eflags & CC_C ? 'C' : '-',
290 env->hflags & HF_CPL_MASK,
291 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
292 (env->a20_mask >> 20) & 1);
293 } else
294 #endif
296 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
297 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
298 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
299 (uint32_t)env->regs[R_EAX],
300 (uint32_t)env->regs[R_EBX],
301 (uint32_t)env->regs[R_ECX],
302 (uint32_t)env->regs[R_EDX],
303 (uint32_t)env->regs[R_ESI],
304 (uint32_t)env->regs[R_EDI],
305 (uint32_t)env->regs[R_EBP],
306 (uint32_t)env->regs[R_ESP],
307 (uint32_t)env->eip, eflags,
308 eflags & DF_MASK ? 'D' : '-',
309 eflags & CC_O ? 'O' : '-',
310 eflags & CC_S ? 'S' : '-',
311 eflags & CC_Z ? 'Z' : '-',
312 eflags & CC_A ? 'A' : '-',
313 eflags & CC_P ? 'P' : '-',
314 eflags & CC_C ? 'C' : '-',
315 env->hflags & HF_CPL_MASK,
316 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
317 (env->a20_mask >> 20) & 1);
320 #ifdef TARGET_X86_64
321 if (env->hflags & HF_LMA_MASK) {
322 for(i = 0; i < 6; i++) {
323 SegmentCache *sc = &env->segs[i];
324 cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
325 seg_name[i],
326 sc->selector,
327 sc->base,
328 sc->limit,
329 sc->flags);
331 cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
332 env->ldt.selector,
333 env->ldt.base,
334 env->ldt.limit,
335 env->ldt.flags);
336 cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
337 env->tr.selector,
338 env->tr.base,
339 env->tr.limit,
340 env->tr.flags);
341 cpu_fprintf(f, "GDT= %016llx %08x\n",
342 env->gdt.base, env->gdt.limit);
343 cpu_fprintf(f, "IDT= %016llx %08x\n",
344 env->idt.base, env->idt.limit);
345 cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
346 (uint32_t)env->cr[0],
347 env->cr[2],
348 env->cr[3],
349 (uint32_t)env->cr[4]);
350 } else
351 #endif
353 for(i = 0; i < 6; i++) {
354 SegmentCache *sc = &env->segs[i];
355 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
356 seg_name[i],
357 sc->selector,
358 (uint32_t)sc->base,
359 sc->limit,
360 sc->flags);
362 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
363 env->ldt.selector,
364 (uint32_t)env->ldt.base,
365 env->ldt.limit,
366 env->ldt.flags);
367 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
368 env->tr.selector,
369 (uint32_t)env->tr.base,
370 env->tr.limit,
371 env->tr.flags);
372 cpu_fprintf(f, "GDT= %08x %08x\n",
373 (uint32_t)env->gdt.base, env->gdt.limit);
374 cpu_fprintf(f, "IDT= %08x %08x\n",
375 (uint32_t)env->idt.base, env->idt.limit);
376 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
377 (uint32_t)env->cr[0],
378 (uint32_t)env->cr[2],
379 (uint32_t)env->cr[3],
380 (uint32_t)env->cr[4]);
382 if (flags & X86_DUMP_CCOP) {
383 if ((unsigned)env->cc_op < CC_OP_NB)
384 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
385 else
386 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
387 #ifdef TARGET_X86_64
388 if (env->hflags & HF_CS64_MASK) {
389 cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
390 env->cc_src, env->cc_dst,
391 cc_op_name);
392 } else
393 #endif
395 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
396 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
397 cc_op_name);
400 if (flags & X86_DUMP_FPU) {
401 cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
402 (double)env->fpregs[0].d,
403 (double)env->fpregs[1].d,
404 (double)env->fpregs[2].d,
405 (double)env->fpregs[3].d);
406 cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
407 (double)env->fpregs[4].d,
408 (double)env->fpregs[5].d,
409 (double)env->fpregs[7].d,
410 (double)env->fpregs[8].d);
414 /***********************************************************/
415 /* x86 mmu */
416 /* XXX: add PGE support */
418 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
420 a20_state = (a20_state != 0);
421 if (a20_state != ((env->a20_mask >> 20) & 1)) {
422 #if defined(DEBUG_MMU)
423 printf("A20 update: a20=%d\n", a20_state);
424 #endif
425 /* if the cpu is currently executing code, we must unlink it and
426 all the potentially executing TB */
427 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
429 /* when a20 is changed, all the MMU mappings are invalid, so
430 we must flush everything */
431 tlb_flush(env, 1);
432 env->a20_mask = 0xffefffff | (a20_state << 20);
436 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
438 int pe_state;
440 #if defined(DEBUG_MMU)
441 printf("CR0 update: CR0=0x%08x\n", new_cr0);
442 #endif
443 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
444 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
445 tlb_flush(env, 1);
448 #ifdef TARGET_X86_64
449 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
450 (env->efer & MSR_EFER_LME)) {
451 /* enter in long mode */
452 /* XXX: generate an exception */
453 if (!(env->cr[4] & CR4_PAE_MASK))
454 return;
455 env->efer |= MSR_EFER_LMA;
456 env->hflags |= HF_LMA_MASK;
457 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
458 (env->efer & MSR_EFER_LMA)) {
459 /* exit long mode */
460 env->efer &= ~MSR_EFER_LMA;
461 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
462 env->eip &= 0xffffffff;
464 #endif
465 env->cr[0] = new_cr0 | CR0_ET_MASK;
467 /* update PE flag in hidden flags */
468 pe_state = (env->cr[0] & CR0_PE_MASK);
469 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
470 /* ensure that ADDSEG is always set in real mode */
471 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
472 /* update FPU flags */
473 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
474 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
477 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
478 the PDPT */
479 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
481 env->cr[3] = new_cr3;
482 if (env->cr[0] & CR0_PG_MASK) {
483 #if defined(DEBUG_MMU)
484 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
485 #endif
486 tlb_flush(env, 0);
490 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
492 #if defined(DEBUG_MMU)
493 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
494 #endif
495 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
496 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
497 tlb_flush(env, 1);
499 /* SSE handling */
500 if (!(env->cpuid_features & CPUID_SSE))
501 new_cr4 &= ~CR4_OSFXSR_MASK;
502 if (new_cr4 & CR4_OSFXSR_MASK)
503 env->hflags |= HF_OSFXSR_MASK;
504 else
505 env->hflags &= ~HF_OSFXSR_MASK;
507 env->cr[4] = new_cr4;
510 /* XXX: also flush 4MB pages */
511 void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
513 tlb_flush_page(env, addr);
516 #if defined(CONFIG_USER_ONLY)
518 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
519 int is_write, int is_user, int is_softmmu)
521 /* user mode only emulation */
522 is_write &= 1;
523 env->cr[2] = addr;
524 env->error_code = (is_write << PG_ERROR_W_BIT);
525 env->error_code |= PG_ERROR_U_MASK;
526 return 1;
529 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
531 return addr;
534 #else
536 /* return value:
537 -1 = cannot handle fault
538 0 = nothing more to do
539 1 = generate PF fault
540 2 = soft MMU activation required for this block
542 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
543 int is_write, int is_user, int is_softmmu)
545 uint32_t pdpe_addr, pde_addr, pte_addr;
546 uint32_t pde, pte, ptep, pdpe;
547 int error_code, is_dirty, prot, page_size, ret;
548 unsigned long paddr, page_offset;
549 target_ulong vaddr, virt_addr;
551 #if defined(DEBUG_MMU)
552 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
553 addr, is_write, is_user, env->eip);
554 #endif
555 is_write &= 1;
557 if (!(env->cr[0] & CR0_PG_MASK)) {
558 pte = addr;
559 virt_addr = addr & TARGET_PAGE_MASK;
560 prot = PAGE_READ | PAGE_WRITE;
561 page_size = 4096;
562 goto do_mapping;
565 if (env->cr[4] & CR4_PAE_MASK) {
566 /* XXX: we only use 32 bit physical addresses */
567 #ifdef TARGET_X86_64
568 if (env->hflags & HF_LMA_MASK) {
569 uint32_t pml4e_addr, pml4e;
570 int32_t sext;
572 /* XXX: handle user + rw rights */
573 /* XXX: handle NX flag */
574 /* test virtual address sign extension */
575 sext = (int64_t)addr >> 47;
576 if (sext != 0 && sext != -1) {
577 error_code = 0;
578 goto do_fault;
581 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
582 env->a20_mask;
583 pml4e = ldl_phys(pml4e_addr);
584 if (!(pml4e & PG_PRESENT_MASK)) {
585 error_code = 0;
586 goto do_fault;
588 if (!(pml4e & PG_ACCESSED_MASK)) {
589 pml4e |= PG_ACCESSED_MASK;
590 stl_phys_notdirty(pml4e_addr, pml4e);
593 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
594 env->a20_mask;
595 pdpe = ldl_phys(pdpe_addr);
596 if (!(pdpe & PG_PRESENT_MASK)) {
597 error_code = 0;
598 goto do_fault;
600 if (!(pdpe & PG_ACCESSED_MASK)) {
601 pdpe |= PG_ACCESSED_MASK;
602 stl_phys_notdirty(pdpe_addr, pdpe);
604 } else
605 #endif
607 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
608 env->a20_mask;
609 pdpe = ldl_phys(pdpe_addr);
610 if (!(pdpe & PG_PRESENT_MASK)) {
611 error_code = 0;
612 goto do_fault;
616 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
617 env->a20_mask;
618 pde = ldl_phys(pde_addr);
619 if (!(pde & PG_PRESENT_MASK)) {
620 error_code = 0;
621 goto do_fault;
623 if (pde & PG_PSE_MASK) {
624 /* 2 MB page */
625 page_size = 2048 * 1024;
626 goto handle_big_page;
627 } else {
628 /* 4 KB page */
629 if (!(pde & PG_ACCESSED_MASK)) {
630 pde |= PG_ACCESSED_MASK;
631 stl_phys_notdirty(pde_addr, pde);
633 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
634 env->a20_mask;
635 goto handle_4k_page;
637 } else {
638 /* page directory entry */
639 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
640 env->a20_mask;
641 pde = ldl_phys(pde_addr);
642 if (!(pde & PG_PRESENT_MASK)) {
643 error_code = 0;
644 goto do_fault;
646 /* if PSE bit is set, then we use a 4MB page */
647 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
648 page_size = 4096 * 1024;
649 handle_big_page:
650 if (is_user) {
651 if (!(pde & PG_USER_MASK))
652 goto do_fault_protect;
653 if (is_write && !(pde & PG_RW_MASK))
654 goto do_fault_protect;
655 } else {
656 if ((env->cr[0] & CR0_WP_MASK) &&
657 is_write && !(pde & PG_RW_MASK))
658 goto do_fault_protect;
660 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
661 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
662 pde |= PG_ACCESSED_MASK;
663 if (is_dirty)
664 pde |= PG_DIRTY_MASK;
665 stl_phys_notdirty(pde_addr, pde);
668 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
669 ptep = pte;
670 virt_addr = addr & ~(page_size - 1);
671 } else {
672 if (!(pde & PG_ACCESSED_MASK)) {
673 pde |= PG_ACCESSED_MASK;
674 stl_phys_notdirty(pde_addr, pde);
677 /* page directory entry */
678 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
679 env->a20_mask;
680 handle_4k_page:
681 pte = ldl_phys(pte_addr);
682 if (!(pte & PG_PRESENT_MASK)) {
683 error_code = 0;
684 goto do_fault;
686 /* combine pde and pte user and rw protections */
687 ptep = pte & pde;
688 if (is_user) {
689 if (!(ptep & PG_USER_MASK))
690 goto do_fault_protect;
691 if (is_write && !(ptep & PG_RW_MASK))
692 goto do_fault_protect;
693 } else {
694 if ((env->cr[0] & CR0_WP_MASK) &&
695 is_write && !(ptep & PG_RW_MASK))
696 goto do_fault_protect;
698 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
699 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
700 pte |= PG_ACCESSED_MASK;
701 if (is_dirty)
702 pte |= PG_DIRTY_MASK;
703 stl_phys_notdirty(pte_addr, pte);
705 page_size = 4096;
706 virt_addr = addr & ~0xfff;
709 /* the page can be put in the TLB */
710 prot = PAGE_READ;
711 if (pte & PG_DIRTY_MASK) {
712 /* only set write access if already dirty... otherwise wait
713 for dirty access */
714 if (is_user) {
715 if (ptep & PG_RW_MASK)
716 prot |= PAGE_WRITE;
717 } else {
718 if (!(env->cr[0] & CR0_WP_MASK) ||
719 (ptep & PG_RW_MASK))
720 prot |= PAGE_WRITE;
724 do_mapping:
725 pte = pte & env->a20_mask;
727 /* Even if 4MB pages, we map only one 4KB page in the cache to
728 avoid filling it too fast */
729 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
730 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
731 vaddr = virt_addr + page_offset;
733 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
734 return ret;
735 do_fault_protect:
736 error_code = PG_ERROR_P_MASK;
737 do_fault:
738 env->cr[2] = addr;
739 env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
740 if (is_user)
741 env->error_code |= PG_ERROR_U_MASK;
742 return 1;
745 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
747 uint32_t pde_addr, pte_addr;
748 uint32_t pde, pte, paddr, page_offset, page_size;
750 if (env->cr[4] & CR4_PAE_MASK) {
751 uint32_t pdpe_addr, pde_addr, pte_addr;
752 uint32_t pdpe;
754 /* XXX: we only use 32 bit physical addresses */
755 #ifdef TARGET_X86_64
756 if (env->hflags & HF_LMA_MASK) {
757 uint32_t pml4e_addr, pml4e;
758 int32_t sext;
760 /* test virtual address sign extension */
761 sext = (int64_t)addr >> 47;
762 if (sext != 0 && sext != -1)
763 return -1;
765 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
766 env->a20_mask;
767 pml4e = ldl_phys(pml4e_addr);
768 if (!(pml4e & PG_PRESENT_MASK))
769 return -1;
771 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
772 env->a20_mask;
773 pdpe = ldl_phys(pdpe_addr);
774 if (!(pdpe & PG_PRESENT_MASK))
775 return -1;
776 } else
777 #endif
779 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
780 env->a20_mask;
781 pdpe = ldl_phys(pdpe_addr);
782 if (!(pdpe & PG_PRESENT_MASK))
783 return -1;
786 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
787 env->a20_mask;
788 pde = ldl_phys(pde_addr);
789 if (!(pde & PG_PRESENT_MASK)) {
790 return -1;
792 if (pde & PG_PSE_MASK) {
793 /* 2 MB page */
794 page_size = 2048 * 1024;
795 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
796 } else {
797 /* 4 KB page */
798 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
799 env->a20_mask;
800 page_size = 4096;
801 pte = ldl_phys(pte_addr);
803 } else {
804 if (!(env->cr[0] & CR0_PG_MASK)) {
805 pte = addr;
806 page_size = 4096;
807 } else {
808 /* page directory entry */
809 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
810 pde = ldl_phys(pde_addr);
811 if (!(pde & PG_PRESENT_MASK))
812 return -1;
813 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
814 pte = pde & ~0x003ff000; /* align to 4MB */
815 page_size = 4096 * 1024;
816 } else {
817 /* page directory entry */
818 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
819 pte = ldl_phys(pte_addr);
820 if (!(pte & PG_PRESENT_MASK))
821 return -1;
822 page_size = 4096;
825 pte = pte & env->a20_mask;
828 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
829 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
830 return paddr;
832 #endif /* !CONFIG_USER_ONLY */
834 #if defined(USE_CODE_COPY)
835 struct fpstate {
836 uint16_t fpuc;
837 uint16_t dummy1;
838 uint16_t fpus;
839 uint16_t dummy2;
840 uint16_t fptag;
841 uint16_t dummy3;
843 uint32_t fpip;
844 uint32_t fpcs;
845 uint32_t fpoo;
846 uint32_t fpos;
847 uint8_t fpregs1[8 * 10];
850 void restore_native_fp_state(CPUState *env)
852 int fptag, i, j;
853 struct fpstate fp1, *fp = &fp1;
855 fp->fpuc = env->fpuc;
856 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
857 fptag = 0;
858 for (i=7; i>=0; i--) {
859 fptag <<= 2;
860 if (env->fptags[i]) {
861 fptag |= 3;
862 } else {
863 /* the FPU automatically computes it */
866 fp->fptag = fptag;
867 j = env->fpstt;
868 for(i = 0;i < 8; i++) {
869 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
870 j = (j + 1) & 7;
872 asm volatile ("frstor %0" : "=m" (*fp));
873 env->native_fp_regs = 1;
876 void save_native_fp_state(CPUState *env)
878 int fptag, i, j;
879 uint16_t fpuc;
880 struct fpstate fp1, *fp = &fp1;
882 asm volatile ("fsave %0" : : "m" (*fp));
883 env->fpuc = fp->fpuc;
884 env->fpstt = (fp->fpus >> 11) & 7;
885 env->fpus = fp->fpus & ~0x3800;
886 fptag = fp->fptag;
887 for(i = 0;i < 8; i++) {
888 env->fptags[i] = ((fptag & 3) == 3);
889 fptag >>= 2;
891 j = env->fpstt;
892 for(i = 0;i < 8; i++) {
893 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
894 j = (j + 1) & 7;
896 /* we must restore the default rounding state */
897 /* XXX: we do not restore the exception state */
898 fpuc = 0x037f | (env->fpuc & (3 << 10));
899 asm volatile("fldcw %0" : : "m" (fpuc));
900 env->native_fp_regs = 0;
902 #endif