2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 #include "../qemu-kvm.h"
40 #include <linux/unistd.h>
41 #include <linux/version.h>
43 int modify_ldt(int func
, void *ptr
, unsigned long bytecount
)
45 return syscall(__NR_modify_ldt
, func
, ptr
, bytecount
);
48 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
49 #define modify_ldt_ldt_s user_desc
51 #endif /* USE_CODE_COPY */
53 extern const char *cpu_vendor_string
;
55 CPUX86State
*cpu_x86_init(void)
60 env
= qemu_mallocz(sizeof(CPUX86State
));
65 /* init various static tables */
68 optimize_flags_init();
71 /* testing code for code copy case */
73 struct modify_ldt_ldt_s ldt
;
76 ldt
.base_addr
= (unsigned long)env
;
77 ldt
.limit
= (sizeof(CPUState
) + 0xfff) >> 12;
79 ldt
.contents
= MODIFY_LDT_CONTENTS_DATA
;
80 ldt
.read_exec_only
= 0;
81 ldt
.limit_in_pages
= 1;
82 ldt
.seg_not_present
= 0;
84 modify_ldt(1, &ldt
, sizeof(ldt
)); /* write ldt entry */
86 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
90 int family
, model
, stepping
;
92 env
->cpuid_vendor1
= 0x68747541; /* "Auth" */
93 env
->cpuid_vendor2
= 0x69746e65; /* "enti" */
94 env
->cpuid_vendor3
= 0x444d4163; /* "cAMD" */
99 env
->cpuid_vendor1
= 0x756e6547; /* "Genu" */
100 env
->cpuid_vendor2
= 0x49656e69; /* "ineI" */
101 env
->cpuid_vendor3
= 0x6c65746e; /* "ntel" */
114 env
->cpuid_level
= 2;
115 env
->cpuid_version
= (family
<< 8) | (model
<< 4) | stepping
;
116 env
->cpuid_features
= (CPUID_FP87
| CPUID_DE
| CPUID_PSE
|
117 CPUID_TSC
| CPUID_MSR
| CPUID_MCE
|
118 CPUID_CX8
| CPUID_PGE
| CPUID_CMOV
|
120 env
->pat
= 0x0007040600070406ULL
;
121 env
->cpuid_ext3_features
= CPUID_EXT3_SVM
;
122 env
->cpuid_ext_features
= CPUID_EXT_SSE3
;
123 env
->cpuid_features
|= CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
| CPUID_SSE2
| CPUID_PAE
| CPUID_SEP
;
124 env
->cpuid_features
|= CPUID_APIC
;
125 env
->cpuid_xlevel
= 0x8000000e;
127 const char *model_id
= "QEMU Virtual CPU version " QEMU_VERSION
;
130 if (cpu_vendor_string
!= NULL
)
131 model_id
= cpu_vendor_string
;
133 len
= strlen(model_id
);
134 for(i
= 0; i
< 48; i
++) {
139 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
143 /* currently not enabled for std i386 because not fully tested */
144 env
->cpuid_ext2_features
= (env
->cpuid_features
& 0x0183F3FF);
145 env
->cpuid_ext2_features
|= CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
;
147 /* these features are needed for Win64 and aren't fully implemented */
148 env
->cpuid_features
|= CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
;
149 /* this feature is needed for Solaris and isn't fully implemented */
150 env
->cpuid_features
|= CPUID_PSE36
;
159 extern int kvm_allowed
;
161 kvm_qemu_init_env(env
);
162 env
->ready_for_interrupt_injection
= 1;
169 /* NOTE: must be called outside the CPU execute loop */
170 void cpu_reset(CPUX86State
*env
)
174 memset(env
, 0, offsetof(CPUX86State
, breakpoints
));
178 env
->old_exception
= -1;
180 /* init to reset state */
182 #ifdef CONFIG_SOFTMMU
183 env
->hflags
|= HF_SOFTMMU_MASK
;
185 env
->hflags
|= HF_GIF_MASK
;
187 cpu_x86_update_cr0(env
, 0x60000010);
188 env
->a20_mask
= 0xffffffff;
189 env
->smbase
= 0x30000;
191 env
->idt
.limit
= 0xffff;
192 env
->gdt
.limit
= 0xffff;
193 env
->ldt
.limit
= 0xffff;
194 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
195 env
->tr
.limit
= 0xffff;
196 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
198 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff, 0);
199 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff, 0);
200 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff, 0);
201 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff, 0);
202 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff, 0);
203 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff, 0);
206 env
->regs
[R_EDX
] = 0x600; /* indicate P6 processor */
211 for(i
= 0;i
< 8; i
++)
218 void cpu_x86_close(CPUX86State
*env
)
223 /***********************************************************/
226 static const char *cc_op_str
[] = {
281 void cpu_dump_state(CPUState
*env
, FILE *f
,
282 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
287 static const char *seg_name
[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
289 eflags
= env
->eflags
;
291 if (env
->hflags
& HF_CS64_MASK
) {
293 "RAX=%016" PRIx64
" RBX=%016" PRIx64
" RCX=%016" PRIx64
" RDX=%016" PRIx64
"\n"
294 "RSI=%016" PRIx64
" RDI=%016" PRIx64
" RBP=%016" PRIx64
" RSP=%016" PRIx64
"\n"
295 "R8 =%016" PRIx64
" R9 =%016" PRIx64
" R10=%016" PRIx64
" R11=%016" PRIx64
"\n"
296 "R12=%016" PRIx64
" R13=%016" PRIx64
" R14=%016" PRIx64
" R15=%016" PRIx64
"\n"
297 "RIP=%016" PRIx64
" RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
315 eflags
& DF_MASK
? 'D' : '-',
316 eflags
& CC_O
? 'O' : '-',
317 eflags
& CC_S
? 'S' : '-',
318 eflags
& CC_Z
? 'Z' : '-',
319 eflags
& CC_A
? 'A' : '-',
320 eflags
& CC_P
? 'P' : '-',
321 eflags
& CC_C
? 'C' : '-',
322 env
->hflags
& HF_CPL_MASK
,
323 (env
->hflags
>> HF_INHIBIT_IRQ_SHIFT
) & 1,
324 (env
->a20_mask
>> 20) & 1,
325 (env
->hflags
>> HF_SMM_SHIFT
) & 1,
326 (env
->hflags
>> HF_HALTED_SHIFT
) & 1);
330 cpu_fprintf(f
, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
331 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
332 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
333 (uint32_t)env
->regs
[R_EAX
],
334 (uint32_t)env
->regs
[R_EBX
],
335 (uint32_t)env
->regs
[R_ECX
],
336 (uint32_t)env
->regs
[R_EDX
],
337 (uint32_t)env
->regs
[R_ESI
],
338 (uint32_t)env
->regs
[R_EDI
],
339 (uint32_t)env
->regs
[R_EBP
],
340 (uint32_t)env
->regs
[R_ESP
],
341 (uint32_t)env
->eip
, eflags
,
342 eflags
& DF_MASK
? 'D' : '-',
343 eflags
& CC_O
? 'O' : '-',
344 eflags
& CC_S
? 'S' : '-',
345 eflags
& CC_Z
? 'Z' : '-',
346 eflags
& CC_A
? 'A' : '-',
347 eflags
& CC_P
? 'P' : '-',
348 eflags
& CC_C
? 'C' : '-',
349 env
->hflags
& HF_CPL_MASK
,
350 (env
->hflags
>> HF_INHIBIT_IRQ_SHIFT
) & 1,
351 (env
->a20_mask
>> 20) & 1,
352 (env
->hflags
>> HF_SMM_SHIFT
) & 1,
353 (env
->hflags
>> HF_HALTED_SHIFT
) & 1);
357 if (env
->hflags
& HF_LMA_MASK
) {
358 for(i
= 0; i
< 6; i
++) {
359 SegmentCache
*sc
= &env
->segs
[i
];
360 cpu_fprintf(f
, "%s =%04x %016" PRIx64
" %08x %08x\n",
367 cpu_fprintf(f
, "LDT=%04x %016" PRIx64
" %08x %08x\n",
372 cpu_fprintf(f
, "TR =%04x %016" PRIx64
" %08x %08x\n",
377 cpu_fprintf(f
, "GDT= %016" PRIx64
" %08x\n",
378 env
->gdt
.base
, env
->gdt
.limit
);
379 cpu_fprintf(f
, "IDT= %016" PRIx64
" %08x\n",
380 env
->idt
.base
, env
->idt
.limit
);
381 cpu_fprintf(f
, "CR0=%08x CR2=%016" PRIx64
" CR3=%016" PRIx64
" CR4=%08x\n",
382 (uint32_t)env
->cr
[0],
385 (uint32_t)env
->cr
[4]);
389 for(i
= 0; i
< 6; i
++) {
390 SegmentCache
*sc
= &env
->segs
[i
];
391 cpu_fprintf(f
, "%s =%04x %08x %08x %08x\n",
398 cpu_fprintf(f
, "LDT=%04x %08x %08x %08x\n",
400 (uint32_t)env
->ldt
.base
,
403 cpu_fprintf(f
, "TR =%04x %08x %08x %08x\n",
405 (uint32_t)env
->tr
.base
,
408 cpu_fprintf(f
, "GDT= %08x %08x\n",
409 (uint32_t)env
->gdt
.base
, env
->gdt
.limit
);
410 cpu_fprintf(f
, "IDT= %08x %08x\n",
411 (uint32_t)env
->idt
.base
, env
->idt
.limit
);
412 cpu_fprintf(f
, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
413 (uint32_t)env
->cr
[0],
414 (uint32_t)env
->cr
[2],
415 (uint32_t)env
->cr
[3],
416 (uint32_t)env
->cr
[4]);
418 if (flags
& X86_DUMP_CCOP
) {
419 if ((unsigned)env
->cc_op
< CC_OP_NB
)
420 snprintf(cc_op_name
, sizeof(cc_op_name
), "%s", cc_op_str
[env
->cc_op
]);
422 snprintf(cc_op_name
, sizeof(cc_op_name
), "[%d]", env
->cc_op
);
424 if (env
->hflags
& HF_CS64_MASK
) {
425 cpu_fprintf(f
, "CCS=%016" PRIx64
" CCD=%016" PRIx64
" CCO=%-8s\n",
426 env
->cc_src
, env
->cc_dst
,
431 cpu_fprintf(f
, "CCS=%08x CCD=%08x CCO=%-8s\n",
432 (uint32_t)env
->cc_src
, (uint32_t)env
->cc_dst
,
436 if (flags
& X86_DUMP_FPU
) {
439 for(i
= 0; i
< 8; i
++) {
440 fptag
|= ((!env
->fptags
[i
]) << i
);
442 cpu_fprintf(f
, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
444 (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11,
449 #if defined(USE_X86LDOUBLE)
457 tmp
.d
= env
->fpregs
[i
].d
;
458 cpu_fprintf(f
, "FPR%d=%016" PRIx64
" %04x",
459 i
, tmp
.l
.lower
, tmp
.l
.upper
);
461 cpu_fprintf(f
, "FPR%d=%016" PRIx64
,
462 i
, env
->fpregs
[i
].mmx
.q
);
465 cpu_fprintf(f
, "\n");
469 if (env
->hflags
& HF_CS64_MASK
)
474 cpu_fprintf(f
, "XMM%02d=%08x%08x%08x%08x",
476 env
->xmm_regs
[i
].XMM_L(3),
477 env
->xmm_regs
[i
].XMM_L(2),
478 env
->xmm_regs
[i
].XMM_L(1),
479 env
->xmm_regs
[i
].XMM_L(0));
481 cpu_fprintf(f
, "\n");
488 /***********************************************************/
490 /* XXX: add PGE support */
492 void cpu_x86_set_a20(CPUX86State
*env
, int a20_state
)
494 a20_state
= (a20_state
!= 0);
495 if (a20_state
!= ((env
->a20_mask
>> 20) & 1)) {
496 #if defined(DEBUG_MMU)
497 printf("A20 update: a20=%d\n", a20_state
);
499 /* if the cpu is currently executing code, we must unlink it and
500 all the potentially executing TB */
501 cpu_interrupt(env
, CPU_INTERRUPT_EXITTB
);
503 /* when a20 is changed, all the MMU mappings are invalid, so
504 we must flush everything */
506 env
->a20_mask
= 0xffefffff | (a20_state
<< 20);
510 void cpu_x86_update_cr0(CPUX86State
*env
, uint32_t new_cr0
)
514 #if defined(DEBUG_MMU)
515 printf("CR0 update: CR0=0x%08x\n", new_cr0
);
517 if ((new_cr0
& (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
)) !=
518 (env
->cr
[0] & (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
))) {
523 if (!(env
->cr
[0] & CR0_PG_MASK
) && (new_cr0
& CR0_PG_MASK
) &&
524 (env
->efer
& MSR_EFER_LME
)) {
525 /* enter in long mode */
526 /* XXX: generate an exception */
527 if (!(env
->cr
[4] & CR4_PAE_MASK
))
529 env
->efer
|= MSR_EFER_LMA
;
530 env
->hflags
|= HF_LMA_MASK
;
531 } else if ((env
->cr
[0] & CR0_PG_MASK
) && !(new_cr0
& CR0_PG_MASK
) &&
532 (env
->efer
& MSR_EFER_LMA
)) {
534 env
->efer
&= ~MSR_EFER_LMA
;
535 env
->hflags
&= ~(HF_LMA_MASK
| HF_CS64_MASK
);
536 env
->eip
&= 0xffffffff;
539 env
->cr
[0] = new_cr0
| CR0_ET_MASK
;
541 /* update PE flag in hidden flags */
542 pe_state
= (env
->cr
[0] & CR0_PE_MASK
);
543 env
->hflags
= (env
->hflags
& ~HF_PE_MASK
) | (pe_state
<< HF_PE_SHIFT
);
544 /* ensure that ADDSEG is always set in real mode */
545 env
->hflags
|= ((pe_state
^ 1) << HF_ADDSEG_SHIFT
);
546 /* update FPU flags */
547 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
548 ((new_cr0
<< (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
551 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
553 void cpu_x86_update_cr3(CPUX86State
*env
, target_ulong new_cr3
)
555 env
->cr
[3] = new_cr3
;
556 if (env
->cr
[0] & CR0_PG_MASK
) {
557 #if defined(DEBUG_MMU)
558 printf("CR3 update: CR3=" TARGET_FMT_lx
"\n", new_cr3
);
564 void cpu_x86_update_cr4(CPUX86State
*env
, uint32_t new_cr4
)
566 #if defined(DEBUG_MMU)
567 printf("CR4 update: CR4=%08x\n", (uint32_t)env
->cr
[4]);
569 if ((new_cr4
& (CR4_PGE_MASK
| CR4_PAE_MASK
| CR4_PSE_MASK
)) !=
570 (env
->cr
[4] & (CR4_PGE_MASK
| CR4_PAE_MASK
| CR4_PSE_MASK
))) {
574 if (!(env
->cpuid_features
& CPUID_SSE
))
575 new_cr4
&= ~CR4_OSFXSR_MASK
;
576 if (new_cr4
& CR4_OSFXSR_MASK
)
577 env
->hflags
|= HF_OSFXSR_MASK
;
579 env
->hflags
&= ~HF_OSFXSR_MASK
;
581 env
->cr
[4] = new_cr4
;
584 /* XXX: also flush 4MB pages */
585 void cpu_x86_flush_tlb(CPUX86State
*env
, target_ulong addr
)
587 tlb_flush_page(env
, addr
);
590 #if defined(CONFIG_USER_ONLY)
592 int cpu_x86_handle_mmu_fault(CPUX86State
*env
, target_ulong addr
,
593 int is_write
, int is_user
, int is_softmmu
)
595 /* user mode only emulation */
598 env
->error_code
= (is_write
<< PG_ERROR_W_BIT
);
599 env
->error_code
|= PG_ERROR_U_MASK
;
600 env
->exception_index
= EXCP0E_PAGE
;
604 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
611 #define PHYS_ADDR_MASK 0xfffff000
614 -1 = cannot handle fault
615 0 = nothing more to do
616 1 = generate PF fault
617 2 = soft MMU activation required for this block
619 int cpu_x86_handle_mmu_fault(CPUX86State
*env
, target_ulong addr
,
620 int is_write1
, int is_user
, int is_softmmu
)
623 uint32_t pdpe_addr
, pde_addr
, pte_addr
;
624 int error_code
, is_dirty
, prot
, page_size
, ret
, is_write
;
625 unsigned long paddr
, page_offset
;
626 target_ulong vaddr
, virt_addr
;
628 #if defined(DEBUG_MMU)
629 printf("MMU fault: addr=" TARGET_FMT_lx
" w=%d u=%d eip=" TARGET_FMT_lx
"\n",
630 addr
, is_write1
, is_user
, env
->eip
);
632 is_write
= is_write1
& 1;
634 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
636 virt_addr
= addr
& TARGET_PAGE_MASK
;
637 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
642 if (env
->cr
[4] & CR4_PAE_MASK
) {
645 /* XXX: we only use 32 bit physical addresses */
647 if (env
->hflags
& HF_LMA_MASK
) {
652 /* test virtual address sign extension */
653 sext
= (int64_t)addr
>> 47;
654 if (sext
!= 0 && sext
!= -1) {
656 env
->exception_index
= EXCP0D_GPF
;
660 pml4e_addr
= ((env
->cr
[3] & ~0xfff) + (((addr
>> 39) & 0x1ff) << 3)) &
662 pml4e
= ldq_phys(pml4e_addr
);
663 if (!(pml4e
& PG_PRESENT_MASK
)) {
667 if (!(env
->efer
& MSR_EFER_NXE
) && (pml4e
& PG_NX_MASK
)) {
668 error_code
= PG_ERROR_RSVD_MASK
;
671 if (!(pml4e
& PG_ACCESSED_MASK
)) {
672 pml4e
|= PG_ACCESSED_MASK
;
673 stl_phys_notdirty(pml4e_addr
, pml4e
);
675 ptep
= pml4e
^ PG_NX_MASK
;
676 pdpe_addr
= ((pml4e
& PHYS_ADDR_MASK
) + (((addr
>> 30) & 0x1ff) << 3)) &
678 pdpe
= ldq_phys(pdpe_addr
);
679 if (!(pdpe
& PG_PRESENT_MASK
)) {
683 if (!(env
->efer
& MSR_EFER_NXE
) && (pdpe
& PG_NX_MASK
)) {
684 error_code
= PG_ERROR_RSVD_MASK
;
687 ptep
&= pdpe
^ PG_NX_MASK
;
688 if (!(pdpe
& PG_ACCESSED_MASK
)) {
689 pdpe
|= PG_ACCESSED_MASK
;
690 stl_phys_notdirty(pdpe_addr
, pdpe
);
695 /* XXX: load them when cr3 is loaded ? */
696 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 27) & 0x18)) &
698 pdpe
= ldq_phys(pdpe_addr
);
699 if (!(pdpe
& PG_PRESENT_MASK
)) {
703 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
706 pde_addr
= ((pdpe
& PHYS_ADDR_MASK
) + (((addr
>> 21) & 0x1ff) << 3)) &
708 pde
= ldq_phys(pde_addr
);
709 if (!(pde
& PG_PRESENT_MASK
)) {
713 if (!(env
->efer
& MSR_EFER_NXE
) && (pde
& PG_NX_MASK
)) {
714 error_code
= PG_ERROR_RSVD_MASK
;
717 ptep
&= pde
^ PG_NX_MASK
;
718 if (pde
& PG_PSE_MASK
) {
720 page_size
= 2048 * 1024;
722 if ((ptep
& PG_NX_MASK
) && is_write1
== 2)
723 goto do_fault_protect
;
725 if (!(ptep
& PG_USER_MASK
))
726 goto do_fault_protect
;
727 if (is_write
&& !(ptep
& PG_RW_MASK
))
728 goto do_fault_protect
;
730 if ((env
->cr
[0] & CR0_WP_MASK
) &&
731 is_write
&& !(ptep
& PG_RW_MASK
))
732 goto do_fault_protect
;
734 is_dirty
= is_write
&& !(pde
& PG_DIRTY_MASK
);
735 if (!(pde
& PG_ACCESSED_MASK
) || is_dirty
) {
736 pde
|= PG_ACCESSED_MASK
;
738 pde
|= PG_DIRTY_MASK
;
739 stl_phys_notdirty(pde_addr
, pde
);
741 /* align to page_size */
742 pte
= pde
& ((PHYS_ADDR_MASK
& ~(page_size
- 1)) | 0xfff);
743 virt_addr
= addr
& ~(page_size
- 1);
746 if (!(pde
& PG_ACCESSED_MASK
)) {
747 pde
|= PG_ACCESSED_MASK
;
748 stl_phys_notdirty(pde_addr
, pde
);
750 pte_addr
= ((pde
& PHYS_ADDR_MASK
) + (((addr
>> 12) & 0x1ff) << 3)) &
752 pte
= ldq_phys(pte_addr
);
753 if (!(pte
& PG_PRESENT_MASK
)) {
757 if (!(env
->efer
& MSR_EFER_NXE
) && (pte
& PG_NX_MASK
)) {
758 error_code
= PG_ERROR_RSVD_MASK
;
761 /* combine pde and pte nx, user and rw protections */
762 ptep
&= pte
^ PG_NX_MASK
;
764 if ((ptep
& PG_NX_MASK
) && is_write1
== 2)
765 goto do_fault_protect
;
767 if (!(ptep
& PG_USER_MASK
))
768 goto do_fault_protect
;
769 if (is_write
&& !(ptep
& PG_RW_MASK
))
770 goto do_fault_protect
;
772 if ((env
->cr
[0] & CR0_WP_MASK
) &&
773 is_write
&& !(ptep
& PG_RW_MASK
))
774 goto do_fault_protect
;
776 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
777 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
778 pte
|= PG_ACCESSED_MASK
;
780 pte
|= PG_DIRTY_MASK
;
781 stl_phys_notdirty(pte_addr
, pte
);
784 virt_addr
= addr
& ~0xfff;
785 pte
= pte
& (PHYS_ADDR_MASK
| 0xfff);
790 /* page directory entry */
791 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & 0xffc)) &
793 pde
= ldl_phys(pde_addr
);
794 if (!(pde
& PG_PRESENT_MASK
)) {
798 /* if PSE bit is set, then we use a 4MB page */
799 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
800 page_size
= 4096 * 1024;
802 if (!(pde
& PG_USER_MASK
))
803 goto do_fault_protect
;
804 if (is_write
&& !(pde
& PG_RW_MASK
))
805 goto do_fault_protect
;
807 if ((env
->cr
[0] & CR0_WP_MASK
) &&
808 is_write
&& !(pde
& PG_RW_MASK
))
809 goto do_fault_protect
;
811 is_dirty
= is_write
&& !(pde
& PG_DIRTY_MASK
);
812 if (!(pde
& PG_ACCESSED_MASK
) || is_dirty
) {
813 pde
|= PG_ACCESSED_MASK
;
815 pde
|= PG_DIRTY_MASK
;
816 stl_phys_notdirty(pde_addr
, pde
);
819 pte
= pde
& ~( (page_size
- 1) & ~0xfff); /* align to page_size */
821 virt_addr
= addr
& ~(page_size
- 1);
823 if (!(pde
& PG_ACCESSED_MASK
)) {
824 pde
|= PG_ACCESSED_MASK
;
825 stl_phys_notdirty(pde_addr
, pde
);
828 /* page directory entry */
829 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) &
831 pte
= ldl_phys(pte_addr
);
832 if (!(pte
& PG_PRESENT_MASK
)) {
836 /* combine pde and pte user and rw protections */
839 if (!(ptep
& PG_USER_MASK
))
840 goto do_fault_protect
;
841 if (is_write
&& !(ptep
& PG_RW_MASK
))
842 goto do_fault_protect
;
844 if ((env
->cr
[0] & CR0_WP_MASK
) &&
845 is_write
&& !(ptep
& PG_RW_MASK
))
846 goto do_fault_protect
;
848 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
849 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
850 pte
|= PG_ACCESSED_MASK
;
852 pte
|= PG_DIRTY_MASK
;
853 stl_phys_notdirty(pte_addr
, pte
);
856 virt_addr
= addr
& ~0xfff;
859 /* the page can be put in the TLB */
861 if (!(ptep
& PG_NX_MASK
))
863 if (pte
& PG_DIRTY_MASK
) {
864 /* only set write access if already dirty... otherwise wait
867 if (ptep
& PG_RW_MASK
)
870 if (!(env
->cr
[0] & CR0_WP_MASK
) ||
876 pte
= pte
& env
->a20_mask
;
878 /* Even if 4MB pages, we map only one 4KB page in the cache to
879 avoid filling it too fast */
880 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
881 paddr
= (pte
& TARGET_PAGE_MASK
) + page_offset
;
882 vaddr
= virt_addr
+ page_offset
;
884 ret
= tlb_set_page_exec(env
, vaddr
, paddr
, prot
, is_user
, is_softmmu
);
887 error_code
= PG_ERROR_P_MASK
;
889 error_code
|= (is_write
<< PG_ERROR_W_BIT
);
891 error_code
|= PG_ERROR_U_MASK
;
892 if (is_write1
== 2 &&
893 (env
->efer
& MSR_EFER_NXE
) &&
894 (env
->cr
[4] & CR4_PAE_MASK
))
895 error_code
|= PG_ERROR_I_D_MASK
;
896 if (INTERCEPTEDl(_exceptions
, 1 << EXCP0E_PAGE
)) {
897 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), addr
);
901 env
->error_code
= error_code
;
902 env
->exception_index
= EXCP0E_PAGE
;
903 /* the VMM will handle this */
904 if (INTERCEPTEDl(_exceptions
, 1 << EXCP0E_PAGE
))
909 target_phys_addr_t
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
911 uint32_t pde_addr
, pte_addr
;
912 uint32_t pde
, pte
, paddr
, page_offset
, page_size
;
914 if (env
->cr
[4] & CR4_PAE_MASK
) {
915 uint32_t pdpe_addr
, pde_addr
, pte_addr
;
918 /* XXX: we only use 32 bit physical addresses */
920 if (env
->hflags
& HF_LMA_MASK
) {
921 uint32_t pml4e_addr
, pml4e
;
924 /* test virtual address sign extension */
925 sext
= (int64_t)addr
>> 47;
926 if (sext
!= 0 && sext
!= -1)
929 pml4e_addr
= ((env
->cr
[3] & ~0xfff) + (((addr
>> 39) & 0x1ff) << 3)) &
931 pml4e
= ldl_phys(pml4e_addr
);
932 if (!(pml4e
& PG_PRESENT_MASK
))
935 pdpe_addr
= ((pml4e
& ~0xfff) + (((addr
>> 30) & 0x1ff) << 3)) &
937 pdpe
= ldl_phys(pdpe_addr
);
938 if (!(pdpe
& PG_PRESENT_MASK
))
943 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 27) & 0x18)) &
945 pdpe
= ldl_phys(pdpe_addr
);
946 if (!(pdpe
& PG_PRESENT_MASK
))
950 pde_addr
= ((pdpe
& ~0xfff) + (((addr
>> 21) & 0x1ff) << 3)) &
952 pde
= ldl_phys(pde_addr
);
953 if (!(pde
& PG_PRESENT_MASK
)) {
956 if (pde
& PG_PSE_MASK
) {
958 page_size
= 2048 * 1024;
959 pte
= pde
& ~( (page_size
- 1) & ~0xfff); /* align to page_size */
962 pte_addr
= ((pde
& ~0xfff) + (((addr
>> 12) & 0x1ff) << 3)) &
965 pte
= ldl_phys(pte_addr
);
968 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
972 /* page directory entry */
973 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & 0xffc)) & env
->a20_mask
;
974 pde
= ldl_phys(pde_addr
);
975 if (!(pde
& PG_PRESENT_MASK
))
977 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
978 pte
= pde
& ~0x003ff000; /* align to 4MB */
979 page_size
= 4096 * 1024;
981 /* page directory entry */
982 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) & env
->a20_mask
;
983 pte
= ldl_phys(pte_addr
);
984 if (!(pte
& PG_PRESENT_MASK
))
989 pte
= pte
& env
->a20_mask
;
992 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
993 paddr
= (pte
& TARGET_PAGE_MASK
) + page_offset
;
996 #endif /* !CONFIG_USER_ONLY */
998 #if defined(USE_CODE_COPY)
1011 uint8_t fpregs1
[8 * 10];
1014 void restore_native_fp_state(CPUState
*env
)
1017 struct fpstate fp1
, *fp
= &fp1
;
1019 fp
->fpuc
= env
->fpuc
;
1020 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
1022 for (i
=7; i
>=0; i
--) {
1024 if (env
->fptags
[i
]) {
1027 /* the FPU automatically computes it */
1032 for(i
= 0;i
< 8; i
++) {
1033 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
1036 asm volatile ("frstor %0" : "=m" (*fp
));
1037 env
->native_fp_regs
= 1;
1040 void save_native_fp_state(CPUState
*env
)
1044 struct fpstate fp1
, *fp
= &fp1
;
1046 asm volatile ("fsave %0" : : "m" (*fp
));
1047 env
->fpuc
= fp
->fpuc
;
1048 env
->fpstt
= (fp
->fpus
>> 11) & 7;
1049 env
->fpus
= fp
->fpus
& ~0x3800;
1051 for(i
= 0;i
< 8; i
++) {
1052 env
->fptags
[i
] = ((fptag
& 3) == 3);
1056 for(i
= 0;i
< 8; i
++) {
1057 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
1060 /* we must restore the default rounding state */
1061 /* XXX: we do not restore the exception state */
1062 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
1063 asm volatile("fldcw %0" : : "m" (fpuc
));
1064 env
->native_fp_regs
= 0;