2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 CPUX86State
*cpu_x86_init(void)
42 env
= malloc(sizeof(CPUX86State
));
45 memset(env
, 0, sizeof(CPUX86State
));
50 /* flags setup : we activate the IRQs by default as in user mode */
51 env
->eflags
= 0x2 | IF_MASK
;
55 env
->hflags
|= HF_SOFTMMU_MASK
;
57 /* init various static tables */
60 optimize_flags_init();
65 void cpu_x86_close(CPUX86State
*env
)
70 /***********************************************************/
73 static const char *cc_op_str
[] = {
106 void cpu_x86_dump_state(CPUX86State
*env
, FILE *f
, int flags
)
110 static const char *seg_name
[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
112 eflags
= env
->eflags
;
113 fprintf(f
, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
114 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
115 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d\n",
116 env
->regs
[R_EAX
], env
->regs
[R_EBX
], env
->regs
[R_ECX
], env
->regs
[R_EDX
],
117 env
->regs
[R_ESI
], env
->regs
[R_EDI
], env
->regs
[R_EBP
], env
->regs
[R_ESP
],
119 eflags
& DF_MASK
? 'D' : '-',
120 eflags
& CC_O
? 'O' : '-',
121 eflags
& CC_S
? 'S' : '-',
122 eflags
& CC_Z
? 'Z' : '-',
123 eflags
& CC_A
? 'A' : '-',
124 eflags
& CC_P
? 'P' : '-',
125 eflags
& CC_C
? 'C' : '-',
126 env
->hflags
& HF_CPL_MASK
,
127 (env
->hflags
>> HF_INHIBIT_IRQ_SHIFT
) & 1);
128 for(i
= 0; i
< 6; i
++) {
129 SegmentCache
*sc
= &env
->segs
[i
];
130 fprintf(f
, "%s =%04x %08x %08x %08x\n",
137 fprintf(f
, "LDT=%04x %08x %08x %08x\n",
142 fprintf(f
, "TR =%04x %08x %08x %08x\n",
147 fprintf(f
, "GDT= %08x %08x\n",
148 (int)env
->gdt
.base
, env
->gdt
.limit
);
149 fprintf(f
, "IDT= %08x %08x\n",
150 (int)env
->idt
.base
, env
->idt
.limit
);
151 fprintf(f
, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
152 env
->cr
[0], env
->cr
[2], env
->cr
[3], env
->cr
[4]);
154 if (flags
& X86_DUMP_CCOP
) {
155 if ((unsigned)env
->cc_op
< CC_OP_NB
)
156 strcpy(cc_op_name
, cc_op_str
[env
->cc_op
]);
158 snprintf(cc_op_name
, sizeof(cc_op_name
), "[%d]", env
->cc_op
);
159 fprintf(f
, "CCS=%08x CCD=%08x CCO=%-8s\n",
160 env
->cc_src
, env
->cc_dst
, cc_op_name
);
162 if (flags
& X86_DUMP_FPU
) {
163 fprintf(f
, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
164 (double)env
->fpregs
[0],
165 (double)env
->fpregs
[1],
166 (double)env
->fpregs
[2],
167 (double)env
->fpregs
[3]);
168 fprintf(f
, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
169 (double)env
->fpregs
[4],
170 (double)env
->fpregs
[5],
171 (double)env
->fpregs
[7],
172 (double)env
->fpregs
[8]);
176 /***********************************************************/
178 /* XXX: add PGE support */
180 /* called when cr3 or PG bit are modified */
181 static int last_pg_state
= -1;
182 static int last_pe_state
= 0;
183 static uint32_t a20_mask
;
188 uint8_t *phys_ram_base
;
190 void cpu_x86_set_a20(CPUX86State
*env
, int a20_state
)
192 a20_state
= (a20_state
!= 0);
193 if (a20_state
!= a20_enabled
) {
194 /* if the cpu is currently executing code, we must unlink it and
195 all the potentially executing TB */
196 cpu_interrupt(env
, 0);
198 /* when a20 is changed, all the MMU mappings are invalid, so
199 we must flush everything */
202 a20_enabled
= a20_state
;
204 a20_mask
= 0xffffffff;
206 a20_mask
= 0xffefffff;
210 void cpu_x86_update_cr0(CPUX86State
*env
)
212 int pg_state
, pe_state
;
215 printf("CR0 update: CR0=0x%08x\n", env
->cr
[0]);
217 pg_state
= env
->cr
[0] & CR0_PG_MASK
;
218 if (pg_state
!= last_pg_state
) {
221 last_pg_state
= pg_state
;
223 pe_state
= env
->cr
[0] & CR0_PE_MASK
;
224 if (last_pe_state
!= pe_state
) {
226 last_pe_state
= pe_state
;
230 void cpu_x86_update_cr3(CPUX86State
*env
)
232 if (env
->cr
[0] & CR0_PG_MASK
) {
233 #if defined(DEBUG_MMU)
234 printf("CR3 update: CR3=%08x\n", env
->cr
[3]);
241 void cpu_x86_init_mmu(CPUX86State
*env
)
244 a20_mask
= 0xffffffff;
247 cpu_x86_update_cr0(env
);
250 /* XXX: also flush 4MB pages */
251 void cpu_x86_flush_tlb(CPUX86State
*env
, uint32_t addr
)
254 unsigned long virt_addr
;
256 tlb_flush_page(env
, addr
);
258 flags
= page_get_flags(addr
);
259 if (flags
& PAGE_VALID
) {
260 virt_addr
= addr
& ~0xfff;
261 #if !defined(CONFIG_SOFTMMU)
262 munmap((void *)virt_addr
, 4096);
264 page_set_flags(virt_addr
, virt_addr
+ 4096, 0);
269 -1 = cannot handle fault
270 0 = nothing more to do
271 1 = generate PF fault
272 2 = soft MMU activation required for this block
274 int cpu_x86_handle_mmu_fault(CPUX86State
*env
, uint32_t addr
,
275 int is_write
, int is_user
, int is_softmmu
)
277 uint8_t *pde_ptr
, *pte_ptr
;
278 uint32_t pde
, pte
, virt_addr
;
279 int error_code
, is_dirty
, prot
, page_size
, ret
;
283 printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n",
284 addr
, is_write
, is_user
, env
->eip
);
287 if (env
->user_mode_only
) {
288 /* user mode only emulation */
293 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
295 virt_addr
= addr
& TARGET_PAGE_MASK
;
296 prot
= PROT_READ
| PROT_WRITE
;
301 /* page directory entry */
302 pde_ptr
= phys_ram_base
+
303 (((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & ~3)) & a20_mask
);
304 pde
= ldl_raw(pde_ptr
);
305 if (!(pde
& PG_PRESENT_MASK
)) {
310 if (!(pde
& PG_USER_MASK
))
311 goto do_fault_protect
;
312 if (is_write
&& !(pde
& PG_RW_MASK
))
313 goto do_fault_protect
;
315 if ((env
->cr
[0] & CR0_WP_MASK
) && (pde
& PG_USER_MASK
) &&
316 is_write
&& !(pde
& PG_RW_MASK
))
317 goto do_fault_protect
;
319 /* if PSE bit is set, then we use a 4MB page */
320 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
321 is_dirty
= is_write
&& !(pde
& PG_DIRTY_MASK
);
322 if (!(pde
& PG_ACCESSED_MASK
)) {
323 pde
|= PG_ACCESSED_MASK
;
325 pde
|= PG_DIRTY_MASK
;
326 stl_raw(pde_ptr
, pde
);
329 pte
= pde
& ~0x003ff000; /* align to 4MB */
330 page_size
= 4096 * 1024;
331 virt_addr
= addr
& ~0x003fffff;
333 if (!(pde
& PG_ACCESSED_MASK
)) {
334 pde
|= PG_ACCESSED_MASK
;
335 stl_raw(pde_ptr
, pde
);
338 /* page directory entry */
339 pte_ptr
= phys_ram_base
+
340 (((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) & a20_mask
);
341 pte
= ldl_raw(pte_ptr
);
342 if (!(pte
& PG_PRESENT_MASK
)) {
347 if (!(pte
& PG_USER_MASK
))
348 goto do_fault_protect
;
349 if (is_write
&& !(pte
& PG_RW_MASK
))
350 goto do_fault_protect
;
352 if ((env
->cr
[0] & CR0_WP_MASK
) && (pte
& PG_USER_MASK
) &&
353 is_write
&& !(pte
& PG_RW_MASK
))
354 goto do_fault_protect
;
356 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
357 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
358 pte
|= PG_ACCESSED_MASK
;
360 pte
|= PG_DIRTY_MASK
;
361 stl_raw(pte_ptr
, pte
);
364 virt_addr
= addr
& ~0xfff;
366 /* the page can be put in the TLB */
369 if (pte
& PG_RW_MASK
)
372 if (!(env
->cr
[0] & CR0_WP_MASK
) || !(pte
& PG_USER_MASK
) ||
378 pte
= pte
& a20_mask
;
379 #if !defined(CONFIG_SOFTMMU)
383 unsigned long paddr
, vaddr
, address
, addend
, page_offset
;
386 /* software MMU case. Even if 4MB pages, we map only one 4KB
387 page in the cache to avoid filling it too fast */
388 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
389 paddr
= (pte
& TARGET_PAGE_MASK
) + page_offset
;
390 vaddr
= virt_addr
+ page_offset
;
391 index
= (addr
>> 12) & (CPU_TLB_SIZE
- 1);
392 pd
= physpage_find(paddr
);
395 address
= vaddr
| pd
;
398 /* standard memory */
400 addend
= (unsigned long)phys_ram_base
+ pd
;
403 env
->tlb_read
[is_user
][index
].address
= address
;
404 env
->tlb_read
[is_user
][index
].addend
= addend
;
405 if (prot
& PROT_WRITE
) {
406 env
->tlb_write
[is_user
][index
].address
= address
;
407 env
->tlb_write
[is_user
][index
].addend
= addend
;
409 env
->tlb_write
[is_user
][index
].address
= -1;
410 env
->tlb_write
[is_user
][index
].addend
= -1;
412 page_set_flags(vaddr
, vaddr
+ TARGET_PAGE_SIZE
,
413 PAGE_VALID
| PAGE_EXEC
| prot
);
416 #if !defined(CONFIG_SOFTMMU)
419 /* XXX: incorrect for 4MB pages */
420 pd
= physpage_find(pte
& ~0xfff);
421 if ((pd
& 0xfff) != 0) {
422 /* IO access: no mapping is done as it will be handled by the
424 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
428 map_addr
= mmap((void *)virt_addr
, page_size
, prot
,
429 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, pd
);
430 if (map_addr
== MAP_FAILED
) {
432 "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
433 pte
& ~0xfff, virt_addr
);
437 printf("mmaping 0x%08x to virt 0x%08x pse=%d\n",
438 pte
& ~0xfff, virt_addr
, (page_size
!= 4096));
440 page_set_flags(virt_addr
, virt_addr
+ page_size
,
441 PAGE_VALID
| PAGE_EXEC
| prot
);
447 error_code
= PG_ERROR_P_MASK
;
450 env
->error_code
= (is_write
<< PG_ERROR_W_BIT
) | error_code
;
452 env
->error_code
|= PG_ERROR_U_MASK
;