2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 _syscall3(int, modify_ldt
, int, func
, void *, ptr
, unsigned long, bytecount
)
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41 #define modify_ldt_ldt_s user_desc
43 #endif /* USE_CODE_COPY */
45 CPUX86State
*cpu_x86_init(void)
52 env
= malloc(sizeof(CPUX86State
));
55 memset(env
, 0, sizeof(CPUX86State
));
56 /* init various static tables */
59 optimize_flags_init();
62 /* testing code for code copy case */
64 struct modify_ldt_ldt_s ldt
;
67 ldt
.base_addr
= (unsigned long)env
;
68 ldt
.limit
= (sizeof(CPUState
) + 0xfff) >> 12;
70 ldt
.contents
= MODIFY_LDT_CONTENTS_DATA
;
71 ldt
.read_exec_only
= 0;
72 ldt
.limit_in_pages
= 1;
73 ldt
.seg_not_present
= 0;
75 modify_ldt(1, &ldt
, sizeof(ldt
)); /* write ldt entry */
77 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
81 int family
, model
, stepping
;
83 env
->cpuid_vendor1
= 0x68747541; /* "Auth" */
84 env
->cpuid_vendor2
= 0x69746e65; /* "enti" */
85 env
->cpuid_vendor3
= 0x444d4163; /* "cAMD" */
90 env
->cpuid_vendor1
= 0x756e6547; /* "Genu" */
91 env
->cpuid_vendor2
= 0x49656e69; /* "ineI" */
92 env
->cpuid_vendor3
= 0x6c65746e; /* "ntel" */
105 env
->cpuid_level
= 2;
106 env
->cpuid_version
= (family
<< 8) | (model
<< 4) | stepping
;
107 env
->cpuid_features
= (CPUID_FP87
| CPUID_DE
| CPUID_PSE
|
108 CPUID_TSC
| CPUID_MSR
| CPUID_MCE
|
109 CPUID_CX8
| CPUID_PGE
| CPUID_CMOV
);
110 env
->cpuid_ext_features
= 0;
111 env
->cpuid_features
|= CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
| CPUID_SSE2
| CPUID_PAE
| CPUID_SEP
;
112 env
->cpuid_xlevel
= 0;
114 const char *model_id
= "QEMU Virtual CPU version " QEMU_VERSION
;
116 len
= strlen(model_id
);
117 for(i
= 0; i
< 48; i
++) {
122 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
126 /* currently not enabled for std i386 because not fully tested */
127 env
->cpuid_features
|= CPUID_APIC
;
128 env
->cpuid_ext2_features
= (env
->cpuid_features
& 0x0183F3FF);
129 env
->cpuid_ext2_features
|= CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
;
130 env
->cpuid_xlevel
= 0x80000008;
133 cpu_single_env
= env
;
141 /* NOTE: must be called outside the CPU execute loop */
142 void cpu_reset(CPUX86State
*env
)
146 memset(env
, 0, offsetof(CPUX86State
, breakpoints
));
150 /* init to reset state */
152 #ifdef CONFIG_SOFTMMU
153 env
->hflags
|= HF_SOFTMMU_MASK
;
156 cpu_x86_update_cr0(env
, 0x60000010);
157 env
->a20_mask
= 0xffffffff;
159 env
->idt
.limit
= 0xffff;
160 env
->gdt
.limit
= 0xffff;
161 env
->ldt
.limit
= 0xffff;
162 env
->ldt
.flags
= DESC_P_MASK
;
163 env
->tr
.limit
= 0xffff;
164 env
->tr
.flags
= DESC_P_MASK
;
166 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff, 0);
167 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff, 0);
168 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff, 0);
169 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff, 0);
170 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff, 0);
171 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff, 0);
174 env
->regs
[R_EDX
] = 0x600; /* indicate P6 processor */
179 for(i
= 0;i
< 8; i
++)
186 void cpu_x86_close(CPUX86State
*env
)
191 /***********************************************************/
194 static const char *cc_op_str
[] = {
249 void cpu_dump_state(CPUState
*env
, FILE *f
,
250 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
255 static const char *seg_name
[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
257 eflags
= env
->eflags
;
259 if (env
->hflags
& HF_CS64_MASK
) {
261 "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
262 "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
263 "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
264 "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
265 "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
283 eflags
& DF_MASK
? 'D' : '-',
284 eflags
& CC_O
? 'O' : '-',
285 eflags
& CC_S
? 'S' : '-',
286 eflags
& CC_Z
? 'Z' : '-',
287 eflags
& CC_A
? 'A' : '-',
288 eflags
& CC_P
? 'P' : '-',
289 eflags
& CC_C
? 'C' : '-',
290 env
->hflags
& HF_CPL_MASK
,
291 (env
->hflags
>> HF_INHIBIT_IRQ_SHIFT
) & 1,
292 (env
->a20_mask
>> 20) & 1);
296 cpu_fprintf(f
, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
297 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
298 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
299 (uint32_t)env
->regs
[R_EAX
],
300 (uint32_t)env
->regs
[R_EBX
],
301 (uint32_t)env
->regs
[R_ECX
],
302 (uint32_t)env
->regs
[R_EDX
],
303 (uint32_t)env
->regs
[R_ESI
],
304 (uint32_t)env
->regs
[R_EDI
],
305 (uint32_t)env
->regs
[R_EBP
],
306 (uint32_t)env
->regs
[R_ESP
],
307 (uint32_t)env
->eip
, eflags
,
308 eflags
& DF_MASK
? 'D' : '-',
309 eflags
& CC_O
? 'O' : '-',
310 eflags
& CC_S
? 'S' : '-',
311 eflags
& CC_Z
? 'Z' : '-',
312 eflags
& CC_A
? 'A' : '-',
313 eflags
& CC_P
? 'P' : '-',
314 eflags
& CC_C
? 'C' : '-',
315 env
->hflags
& HF_CPL_MASK
,
316 (env
->hflags
>> HF_INHIBIT_IRQ_SHIFT
) & 1,
317 (env
->a20_mask
>> 20) & 1);
321 if (env
->hflags
& HF_LMA_MASK
) {
322 for(i
= 0; i
< 6; i
++) {
323 SegmentCache
*sc
= &env
->segs
[i
];
324 cpu_fprintf(f
, "%s =%04x %016llx %08x %08x\n",
331 cpu_fprintf(f
, "LDT=%04x %016llx %08x %08x\n",
336 cpu_fprintf(f
, "TR =%04x %016llx %08x %08x\n",
341 cpu_fprintf(f
, "GDT= %016llx %08x\n",
342 env
->gdt
.base
, env
->gdt
.limit
);
343 cpu_fprintf(f
, "IDT= %016llx %08x\n",
344 env
->idt
.base
, env
->idt
.limit
);
345 cpu_fprintf(f
, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
346 (uint32_t)env
->cr
[0],
349 (uint32_t)env
->cr
[4]);
353 for(i
= 0; i
< 6; i
++) {
354 SegmentCache
*sc
= &env
->segs
[i
];
355 cpu_fprintf(f
, "%s =%04x %08x %08x %08x\n",
362 cpu_fprintf(f
, "LDT=%04x %08x %08x %08x\n",
364 (uint32_t)env
->ldt
.base
,
367 cpu_fprintf(f
, "TR =%04x %08x %08x %08x\n",
369 (uint32_t)env
->tr
.base
,
372 cpu_fprintf(f
, "GDT= %08x %08x\n",
373 (uint32_t)env
->gdt
.base
, env
->gdt
.limit
);
374 cpu_fprintf(f
, "IDT= %08x %08x\n",
375 (uint32_t)env
->idt
.base
, env
->idt
.limit
);
376 cpu_fprintf(f
, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
377 (uint32_t)env
->cr
[0],
378 (uint32_t)env
->cr
[2],
379 (uint32_t)env
->cr
[3],
380 (uint32_t)env
->cr
[4]);
382 if (flags
& X86_DUMP_CCOP
) {
383 if ((unsigned)env
->cc_op
< CC_OP_NB
)
384 snprintf(cc_op_name
, sizeof(cc_op_name
), "%s", cc_op_str
[env
->cc_op
]);
386 snprintf(cc_op_name
, sizeof(cc_op_name
), "[%d]", env
->cc_op
);
388 if (env
->hflags
& HF_CS64_MASK
) {
389 cpu_fprintf(f
, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
390 env
->cc_src
, env
->cc_dst
,
395 cpu_fprintf(f
, "CCS=%08x CCD=%08x CCO=%-8s\n",
396 (uint32_t)env
->cc_src
, (uint32_t)env
->cc_dst
,
400 if (flags
& X86_DUMP_FPU
) {
401 cpu_fprintf(f
, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
402 (double)env
->fpregs
[0].d
,
403 (double)env
->fpregs
[1].d
,
404 (double)env
->fpregs
[2].d
,
405 (double)env
->fpregs
[3].d
);
406 cpu_fprintf(f
, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
407 (double)env
->fpregs
[4].d
,
408 (double)env
->fpregs
[5].d
,
409 (double)env
->fpregs
[7].d
,
410 (double)env
->fpregs
[8].d
);
414 /***********************************************************/
416 /* XXX: add PGE support */
418 void cpu_x86_set_a20(CPUX86State
*env
, int a20_state
)
420 a20_state
= (a20_state
!= 0);
421 if (a20_state
!= ((env
->a20_mask
>> 20) & 1)) {
422 #if defined(DEBUG_MMU)
423 printf("A20 update: a20=%d\n", a20_state
);
425 /* if the cpu is currently executing code, we must unlink it and
426 all the potentially executing TB */
427 cpu_interrupt(env
, CPU_INTERRUPT_EXITTB
);
429 /* when a20 is changed, all the MMU mappings are invalid, so
430 we must flush everything */
432 env
->a20_mask
= 0xffefffff | (a20_state
<< 20);
436 void cpu_x86_update_cr0(CPUX86State
*env
, uint32_t new_cr0
)
440 #if defined(DEBUG_MMU)
441 printf("CR0 update: CR0=0x%08x\n", new_cr0
);
443 if ((new_cr0
& (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
)) !=
444 (env
->cr
[0] & (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
))) {
449 if (!(env
->cr
[0] & CR0_PG_MASK
) && (new_cr0
& CR0_PG_MASK
) &&
450 (env
->efer
& MSR_EFER_LME
)) {
451 /* enter in long mode */
452 /* XXX: generate an exception */
453 if (!(env
->cr
[4] & CR4_PAE_MASK
))
455 env
->efer
|= MSR_EFER_LMA
;
456 env
->hflags
|= HF_LMA_MASK
;
457 } else if ((env
->cr
[0] & CR0_PG_MASK
) && !(new_cr0
& CR0_PG_MASK
) &&
458 (env
->efer
& MSR_EFER_LMA
)) {
460 env
->efer
&= ~MSR_EFER_LMA
;
461 env
->hflags
&= ~(HF_LMA_MASK
| HF_CS64_MASK
);
462 env
->eip
&= 0xffffffff;
465 env
->cr
[0] = new_cr0
| CR0_ET_MASK
;
467 /* update PE flag in hidden flags */
468 pe_state
= (env
->cr
[0] & CR0_PE_MASK
);
469 env
->hflags
= (env
->hflags
& ~HF_PE_MASK
) | (pe_state
<< HF_PE_SHIFT
);
470 /* ensure that ADDSEG is always set in real mode */
471 env
->hflags
|= ((pe_state
^ 1) << HF_ADDSEG_SHIFT
);
472 /* update FPU flags */
473 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
474 ((new_cr0
<< (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
477 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
479 void cpu_x86_update_cr3(CPUX86State
*env
, target_ulong new_cr3
)
481 env
->cr
[3] = new_cr3
;
482 if (env
->cr
[0] & CR0_PG_MASK
) {
483 #if defined(DEBUG_MMU)
484 printf("CR3 update: CR3=" TARGET_FMT_lx
"\n", new_cr3
);
490 void cpu_x86_update_cr4(CPUX86State
*env
, uint32_t new_cr4
)
492 #if defined(DEBUG_MMU)
493 printf("CR4 update: CR4=%08x\n", (uint32_t)env
->cr
[4]);
495 if ((new_cr4
& (CR4_PGE_MASK
| CR4_PAE_MASK
| CR4_PSE_MASK
)) !=
496 (env
->cr
[4] & (CR4_PGE_MASK
| CR4_PAE_MASK
| CR4_PSE_MASK
))) {
500 if (!(env
->cpuid_features
& CPUID_SSE
))
501 new_cr4
&= ~CR4_OSFXSR_MASK
;
502 if (new_cr4
& CR4_OSFXSR_MASK
)
503 env
->hflags
|= HF_OSFXSR_MASK
;
505 env
->hflags
&= ~HF_OSFXSR_MASK
;
507 env
->cr
[4] = new_cr4
;
510 /* XXX: also flush 4MB pages */
511 void cpu_x86_flush_tlb(CPUX86State
*env
, uint32_t addr
)
513 tlb_flush_page(env
, addr
);
516 #if defined(CONFIG_USER_ONLY)
518 int cpu_x86_handle_mmu_fault(CPUX86State
*env
, target_ulong addr
,
519 int is_write
, int is_user
, int is_softmmu
)
521 /* user mode only emulation */
524 env
->error_code
= (is_write
<< PG_ERROR_W_BIT
);
525 env
->error_code
|= PG_ERROR_U_MASK
;
529 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
537 -1 = cannot handle fault
538 0 = nothing more to do
539 1 = generate PF fault
540 2 = soft MMU activation required for this block
542 int cpu_x86_handle_mmu_fault(CPUX86State
*env
, target_ulong addr
,
543 int is_write
, int is_user
, int is_softmmu
)
545 uint32_t pdpe_addr
, pde_addr
, pte_addr
;
546 uint32_t pde
, pte
, ptep
, pdpe
;
547 int error_code
, is_dirty
, prot
, page_size
, ret
;
548 unsigned long paddr
, page_offset
;
549 target_ulong vaddr
, virt_addr
;
551 #if defined(DEBUG_MMU)
552 printf("MMU fault: addr=" TARGET_FMT_lx
" w=%d u=%d eip=" TARGET_FMT_lx
"\n",
553 addr
, is_write
, is_user
, env
->eip
);
557 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
559 virt_addr
= addr
& TARGET_PAGE_MASK
;
560 prot
= PAGE_READ
| PAGE_WRITE
;
565 if (env
->cr
[4] & CR4_PAE_MASK
) {
566 /* XXX: we only use 32 bit physical addresses */
568 if (env
->hflags
& HF_LMA_MASK
) {
569 uint32_t pml4e_addr
, pml4e
;
572 /* XXX: handle user + rw rights */
573 /* XXX: handle NX flag */
574 /* test virtual address sign extension */
575 sext
= (int64_t)addr
>> 47;
576 if (sext
!= 0 && sext
!= -1) {
581 pml4e_addr
= ((env
->cr
[3] & ~0xfff) + (((addr
>> 39) & 0x1ff) << 3)) &
583 pml4e
= ldl_phys(pml4e_addr
);
584 if (!(pml4e
& PG_PRESENT_MASK
)) {
588 if (!(pml4e
& PG_ACCESSED_MASK
)) {
589 pml4e
|= PG_ACCESSED_MASK
;
590 stl_phys_notdirty(pml4e_addr
, pml4e
);
593 pdpe_addr
= ((pml4e
& ~0xfff) + (((addr
>> 30) & 0x1ff) << 3)) &
595 pdpe
= ldl_phys(pdpe_addr
);
596 if (!(pdpe
& PG_PRESENT_MASK
)) {
600 if (!(pdpe
& PG_ACCESSED_MASK
)) {
601 pdpe
|= PG_ACCESSED_MASK
;
602 stl_phys_notdirty(pdpe_addr
, pdpe
);
607 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 30) << 3)) &
609 pdpe
= ldl_phys(pdpe_addr
);
610 if (!(pdpe
& PG_PRESENT_MASK
)) {
616 pde_addr
= ((pdpe
& ~0xfff) + (((addr
>> 21) & 0x1ff) << 3)) &
618 pde
= ldl_phys(pde_addr
);
619 if (!(pde
& PG_PRESENT_MASK
)) {
623 if (pde
& PG_PSE_MASK
) {
625 page_size
= 2048 * 1024;
626 goto handle_big_page
;
629 if (!(pde
& PG_ACCESSED_MASK
)) {
630 pde
|= PG_ACCESSED_MASK
;
631 stl_phys_notdirty(pde_addr
, pde
);
633 pte_addr
= ((pde
& ~0xfff) + (((addr
>> 12) & 0x1ff) << 3)) &
638 /* page directory entry */
639 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & ~3)) &
641 pde
= ldl_phys(pde_addr
);
642 if (!(pde
& PG_PRESENT_MASK
)) {
646 /* if PSE bit is set, then we use a 4MB page */
647 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
648 page_size
= 4096 * 1024;
651 if (!(pde
& PG_USER_MASK
))
652 goto do_fault_protect
;
653 if (is_write
&& !(pde
& PG_RW_MASK
))
654 goto do_fault_protect
;
656 if ((env
->cr
[0] & CR0_WP_MASK
) &&
657 is_write
&& !(pde
& PG_RW_MASK
))
658 goto do_fault_protect
;
660 is_dirty
= is_write
&& !(pde
& PG_DIRTY_MASK
);
661 if (!(pde
& PG_ACCESSED_MASK
) || is_dirty
) {
662 pde
|= PG_ACCESSED_MASK
;
664 pde
|= PG_DIRTY_MASK
;
665 stl_phys_notdirty(pde_addr
, pde
);
668 pte
= pde
& ~( (page_size
- 1) & ~0xfff); /* align to page_size */
670 virt_addr
= addr
& ~(page_size
- 1);
672 if (!(pde
& PG_ACCESSED_MASK
)) {
673 pde
|= PG_ACCESSED_MASK
;
674 stl_phys_notdirty(pde_addr
, pde
);
677 /* page directory entry */
678 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) &
681 pte
= ldl_phys(pte_addr
);
682 if (!(pte
& PG_PRESENT_MASK
)) {
686 /* combine pde and pte user and rw protections */
689 if (!(ptep
& PG_USER_MASK
))
690 goto do_fault_protect
;
691 if (is_write
&& !(ptep
& PG_RW_MASK
))
692 goto do_fault_protect
;
694 if ((env
->cr
[0] & CR0_WP_MASK
) &&
695 is_write
&& !(ptep
& PG_RW_MASK
))
696 goto do_fault_protect
;
698 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
699 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
700 pte
|= PG_ACCESSED_MASK
;
702 pte
|= PG_DIRTY_MASK
;
703 stl_phys_notdirty(pte_addr
, pte
);
706 virt_addr
= addr
& ~0xfff;
709 /* the page can be put in the TLB */
711 if (pte
& PG_DIRTY_MASK
) {
712 /* only set write access if already dirty... otherwise wait
715 if (ptep
& PG_RW_MASK
)
718 if (!(env
->cr
[0] & CR0_WP_MASK
) ||
725 pte
= pte
& env
->a20_mask
;
727 /* Even if 4MB pages, we map only one 4KB page in the cache to
728 avoid filling it too fast */
729 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
730 paddr
= (pte
& TARGET_PAGE_MASK
) + page_offset
;
731 vaddr
= virt_addr
+ page_offset
;
733 ret
= tlb_set_page(env
, vaddr
, paddr
, prot
, is_user
, is_softmmu
);
736 error_code
= PG_ERROR_P_MASK
;
739 env
->error_code
= (is_write
<< PG_ERROR_W_BIT
) | error_code
;
741 env
->error_code
|= PG_ERROR_U_MASK
;
745 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
747 uint32_t pde_addr
, pte_addr
;
748 uint32_t pde
, pte
, paddr
, page_offset
, page_size
;
750 if (env
->cr
[4] & CR4_PAE_MASK
) {
751 uint32_t pdpe_addr
, pde_addr
, pte_addr
;
754 /* XXX: we only use 32 bit physical addresses */
756 if (env
->hflags
& HF_LMA_MASK
) {
757 uint32_t pml4e_addr
, pml4e
;
760 /* test virtual address sign extension */
761 sext
= (int64_t)addr
>> 47;
762 if (sext
!= 0 && sext
!= -1)
765 pml4e_addr
= ((env
->cr
[3] & ~0xfff) + (((addr
>> 39) & 0x1ff) << 3)) &
767 pml4e
= ldl_phys(pml4e_addr
);
768 if (!(pml4e
& PG_PRESENT_MASK
))
771 pdpe_addr
= ((pml4e
& ~0xfff) + (((addr
>> 30) & 0x1ff) << 3)) &
773 pdpe
= ldl_phys(pdpe_addr
);
774 if (!(pdpe
& PG_PRESENT_MASK
))
779 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 30) << 3)) &
781 pdpe
= ldl_phys(pdpe_addr
);
782 if (!(pdpe
& PG_PRESENT_MASK
))
786 pde_addr
= ((pdpe
& ~0xfff) + (((addr
>> 21) & 0x1ff) << 3)) &
788 pde
= ldl_phys(pde_addr
);
789 if (!(pde
& PG_PRESENT_MASK
)) {
792 if (pde
& PG_PSE_MASK
) {
794 page_size
= 2048 * 1024;
795 pte
= pde
& ~( (page_size
- 1) & ~0xfff); /* align to page_size */
798 pte_addr
= ((pde
& ~0xfff) + (((addr
>> 12) & 0x1ff) << 3)) &
801 pte
= ldl_phys(pte_addr
);
804 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
808 /* page directory entry */
809 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & ~3)) & env
->a20_mask
;
810 pde
= ldl_phys(pde_addr
);
811 if (!(pde
& PG_PRESENT_MASK
))
813 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
814 pte
= pde
& ~0x003ff000; /* align to 4MB */
815 page_size
= 4096 * 1024;
817 /* page directory entry */
818 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) & env
->a20_mask
;
819 pte
= ldl_phys(pte_addr
);
820 if (!(pte
& PG_PRESENT_MASK
))
825 pte
= pte
& env
->a20_mask
;
828 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
829 paddr
= (pte
& TARGET_PAGE_MASK
) + page_offset
;
832 #endif /* !CONFIG_USER_ONLY */
834 #if defined(USE_CODE_COPY)
847 uint8_t fpregs1
[8 * 10];
850 void restore_native_fp_state(CPUState
*env
)
853 struct fpstate fp1
, *fp
= &fp1
;
855 fp
->fpuc
= env
->fpuc
;
856 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
858 for (i
=7; i
>=0; i
--) {
860 if (env
->fptags
[i
]) {
863 /* the FPU automatically computes it */
868 for(i
= 0;i
< 8; i
++) {
869 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
872 asm volatile ("frstor %0" : "=m" (*fp
));
873 env
->native_fp_regs
= 1;
876 void save_native_fp_state(CPUState
*env
)
880 struct fpstate fp1
, *fp
= &fp1
;
882 asm volatile ("fsave %0" : : "m" (*fp
));
883 env
->fpuc
= fp
->fpuc
;
884 env
->fpstt
= (fp
->fpus
>> 11) & 7;
885 env
->fpus
= fp
->fpus
& ~0x3800;
887 for(i
= 0;i
< 8; i
++) {
888 env
->fptags
[i
] = ((fptag
& 3) == 3);
892 for(i
= 0;i
< 8; i
++) {
893 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
896 /* we must restore the default rounding state */
897 /* XXX: we do not restore the exception state */
898 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
899 asm volatile("fldcw %0" : : "m" (fpuc
));
900 env
->native_fp_regs
= 0;