More user timer fixes (Robert Reif)
[qemu/qemu_0_9_1_stable.git] / target-i386 / helper2.c
blob541d83f9272f195d707ae1e9c83c9dc50befccc2
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
32 //#define DEBUG_MMU
34 #ifdef USE_CODE_COPY
35 #include <asm/ldt.h>
36 #include <linux/unistd.h>
37 #include <linux/version.h>
39 int modify_ldt(int func, void *ptr, unsigned long bytecount)
41 return syscall(__NR_modify_ldt, func, ptr, bytecount);
44 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
45 #define modify_ldt_ldt_s user_desc
46 #endif
47 #endif /* USE_CODE_COPY */
49 CPUX86State *cpu_x86_init(void)
51 CPUX86State *env;
52 static int inited;
54 env = qemu_mallocz(sizeof(CPUX86State));
55 if (!env)
56 return NULL;
57 cpu_exec_init(env);
59 /* init various static tables */
60 if (!inited) {
61 inited = 1;
62 optimize_flags_init();
64 #ifdef USE_CODE_COPY
65 /* testing code for code copy case */
67 struct modify_ldt_ldt_s ldt;
69 ldt.entry_number = 1;
70 ldt.base_addr = (unsigned long)env;
71 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
72 ldt.seg_32bit = 1;
73 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
74 ldt.read_exec_only = 0;
75 ldt.limit_in_pages = 1;
76 ldt.seg_not_present = 0;
77 ldt.useable = 1;
78 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
80 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
82 #endif
84 int family, model, stepping;
85 #ifdef TARGET_X86_64
86 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
87 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
88 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
89 family = 6;
90 model = 2;
91 stepping = 3;
92 #else
93 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
94 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
95 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
96 #if 0
97 /* pentium 75-200 */
98 family = 5;
99 model = 2;
100 stepping = 11;
101 #else
102 /* pentium pro */
103 family = 6;
104 model = 3;
105 stepping = 3;
106 #endif
107 #endif
108 env->cpuid_level = 2;
109 env->cpuid_version = (family << 8) | (model << 4) | stepping;
110 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
111 CPUID_TSC | CPUID_MSR | CPUID_MCE |
112 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
113 CPUID_PAT);
114 env->pat = 0x0007040600070406ULL;
115 env->cpuid_ext3_features = CPUID_EXT3_SVM;
116 env->cpuid_ext_features = CPUID_EXT_SSE3;
117 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
118 env->cpuid_features |= CPUID_APIC;
119 env->cpuid_xlevel = 0x8000000e;
121 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
122 int c, len, i;
123 len = strlen(model_id);
124 for(i = 0; i < 48; i++) {
125 if (i >= len)
126 c = '\0';
127 else
128 c = model_id[i];
129 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
132 #ifdef TARGET_X86_64
133 /* currently not enabled for std i386 because not fully tested */
134 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
135 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
137 /* these features are needed for Win64 and aren't fully implemented */
138 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
139 /* this feature is needed for Solaris and isn't fully implemented */
140 env->cpuid_features |= CPUID_PSE36;
141 #endif
143 cpu_reset(env);
144 #ifdef USE_KQEMU
145 kqemu_init(env);
146 #endif
147 return env;
150 /* NOTE: must be called outside the CPU execute loop */
151 void cpu_reset(CPUX86State *env)
153 int i;
155 memset(env, 0, offsetof(CPUX86State, breakpoints));
157 tlb_flush(env, 1);
159 env->old_exception = -1;
161 /* init to reset state */
163 #ifdef CONFIG_SOFTMMU
164 env->hflags |= HF_SOFTMMU_MASK;
165 #endif
166 env->hflags |= HF_GIF_MASK;
168 cpu_x86_update_cr0(env, 0x60000010);
169 env->a20_mask = 0xffffffff;
170 env->smbase = 0x30000;
172 env->idt.limit = 0xffff;
173 env->gdt.limit = 0xffff;
174 env->ldt.limit = 0xffff;
175 env->ldt.flags = DESC_P_MASK;
176 env->tr.limit = 0xffff;
177 env->tr.flags = DESC_P_MASK;
179 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
180 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
181 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
182 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
183 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
184 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
186 env->eip = 0xfff0;
187 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
189 env->eflags = 0x2;
191 /* FPU init */
192 for(i = 0;i < 8; i++)
193 env->fptags[i] = 1;
194 env->fpuc = 0x37f;
196 env->mxcsr = 0x1f80;
199 void cpu_x86_close(CPUX86State *env)
201 free(env);
204 /***********************************************************/
205 /* x86 debug */
207 static const char *cc_op_str[] = {
208 "DYNAMIC",
209 "EFLAGS",
211 "MULB",
212 "MULW",
213 "MULL",
214 "MULQ",
216 "ADDB",
217 "ADDW",
218 "ADDL",
219 "ADDQ",
221 "ADCB",
222 "ADCW",
223 "ADCL",
224 "ADCQ",
226 "SUBB",
227 "SUBW",
228 "SUBL",
229 "SUBQ",
231 "SBBB",
232 "SBBW",
233 "SBBL",
234 "SBBQ",
236 "LOGICB",
237 "LOGICW",
238 "LOGICL",
239 "LOGICQ",
241 "INCB",
242 "INCW",
243 "INCL",
244 "INCQ",
246 "DECB",
247 "DECW",
248 "DECL",
249 "DECQ",
251 "SHLB",
252 "SHLW",
253 "SHLL",
254 "SHLQ",
256 "SARB",
257 "SARW",
258 "SARL",
259 "SARQ",
262 void cpu_dump_state(CPUState *env, FILE *f,
263 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
264 int flags)
266 int eflags, i, nb;
267 char cc_op_name[32];
268 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
270 eflags = env->eflags;
271 #ifdef TARGET_X86_64
272 if (env->hflags & HF_CS64_MASK) {
273 cpu_fprintf(f,
274 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
275 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
276 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
277 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
278 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
279 env->regs[R_EAX],
280 env->regs[R_EBX],
281 env->regs[R_ECX],
282 env->regs[R_EDX],
283 env->regs[R_ESI],
284 env->regs[R_EDI],
285 env->regs[R_EBP],
286 env->regs[R_ESP],
287 env->regs[8],
288 env->regs[9],
289 env->regs[10],
290 env->regs[11],
291 env->regs[12],
292 env->regs[13],
293 env->regs[14],
294 env->regs[15],
295 env->eip, eflags,
296 eflags & DF_MASK ? 'D' : '-',
297 eflags & CC_O ? 'O' : '-',
298 eflags & CC_S ? 'S' : '-',
299 eflags & CC_Z ? 'Z' : '-',
300 eflags & CC_A ? 'A' : '-',
301 eflags & CC_P ? 'P' : '-',
302 eflags & CC_C ? 'C' : '-',
303 env->hflags & HF_CPL_MASK,
304 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
305 (env->a20_mask >> 20) & 1,
306 (env->hflags >> HF_SMM_SHIFT) & 1,
307 (env->hflags >> HF_HALTED_SHIFT) & 1);
308 } else
309 #endif
311 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
312 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
313 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
314 (uint32_t)env->regs[R_EAX],
315 (uint32_t)env->regs[R_EBX],
316 (uint32_t)env->regs[R_ECX],
317 (uint32_t)env->regs[R_EDX],
318 (uint32_t)env->regs[R_ESI],
319 (uint32_t)env->regs[R_EDI],
320 (uint32_t)env->regs[R_EBP],
321 (uint32_t)env->regs[R_ESP],
322 (uint32_t)env->eip, eflags,
323 eflags & DF_MASK ? 'D' : '-',
324 eflags & CC_O ? 'O' : '-',
325 eflags & CC_S ? 'S' : '-',
326 eflags & CC_Z ? 'Z' : '-',
327 eflags & CC_A ? 'A' : '-',
328 eflags & CC_P ? 'P' : '-',
329 eflags & CC_C ? 'C' : '-',
330 env->hflags & HF_CPL_MASK,
331 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
332 (env->a20_mask >> 20) & 1,
333 (env->hflags >> HF_SMM_SHIFT) & 1,
334 (env->hflags >> HF_HALTED_SHIFT) & 1);
337 #ifdef TARGET_X86_64
338 if (env->hflags & HF_LMA_MASK) {
339 for(i = 0; i < 6; i++) {
340 SegmentCache *sc = &env->segs[i];
341 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
342 seg_name[i],
343 sc->selector,
344 sc->base,
345 sc->limit,
346 sc->flags);
348 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
349 env->ldt.selector,
350 env->ldt.base,
351 env->ldt.limit,
352 env->ldt.flags);
353 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
354 env->tr.selector,
355 env->tr.base,
356 env->tr.limit,
357 env->tr.flags);
358 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
359 env->gdt.base, env->gdt.limit);
360 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
361 env->idt.base, env->idt.limit);
362 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
363 (uint32_t)env->cr[0],
364 env->cr[2],
365 env->cr[3],
366 (uint32_t)env->cr[4]);
367 } else
368 #endif
370 for(i = 0; i < 6; i++) {
371 SegmentCache *sc = &env->segs[i];
372 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
373 seg_name[i],
374 sc->selector,
375 (uint32_t)sc->base,
376 sc->limit,
377 sc->flags);
379 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
380 env->ldt.selector,
381 (uint32_t)env->ldt.base,
382 env->ldt.limit,
383 env->ldt.flags);
384 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
385 env->tr.selector,
386 (uint32_t)env->tr.base,
387 env->tr.limit,
388 env->tr.flags);
389 cpu_fprintf(f, "GDT= %08x %08x\n",
390 (uint32_t)env->gdt.base, env->gdt.limit);
391 cpu_fprintf(f, "IDT= %08x %08x\n",
392 (uint32_t)env->idt.base, env->idt.limit);
393 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
394 (uint32_t)env->cr[0],
395 (uint32_t)env->cr[2],
396 (uint32_t)env->cr[3],
397 (uint32_t)env->cr[4]);
399 if (flags & X86_DUMP_CCOP) {
400 if ((unsigned)env->cc_op < CC_OP_NB)
401 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
402 else
403 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
404 #ifdef TARGET_X86_64
405 if (env->hflags & HF_CS64_MASK) {
406 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
407 env->cc_src, env->cc_dst,
408 cc_op_name);
409 } else
410 #endif
412 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
413 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
414 cc_op_name);
417 if (flags & X86_DUMP_FPU) {
418 int fptag;
419 fptag = 0;
420 for(i = 0; i < 8; i++) {
421 fptag |= ((!env->fptags[i]) << i);
423 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
424 env->fpuc,
425 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
426 env->fpstt,
427 fptag,
428 env->mxcsr);
429 for(i=0;i<8;i++) {
430 #if defined(USE_X86LDOUBLE)
431 union {
432 long double d;
433 struct {
434 uint64_t lower;
435 uint16_t upper;
436 } l;
437 } tmp;
438 tmp.d = env->fpregs[i].d;
439 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
440 i, tmp.l.lower, tmp.l.upper);
441 #else
442 cpu_fprintf(f, "FPR%d=%016" PRIx64,
443 i, env->fpregs[i].mmx.q);
444 #endif
445 if ((i & 1) == 1)
446 cpu_fprintf(f, "\n");
447 else
448 cpu_fprintf(f, " ");
450 if (env->hflags & HF_CS64_MASK)
451 nb = 16;
452 else
453 nb = 8;
454 for(i=0;i<nb;i++) {
455 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
457 env->xmm_regs[i].XMM_L(3),
458 env->xmm_regs[i].XMM_L(2),
459 env->xmm_regs[i].XMM_L(1),
460 env->xmm_regs[i].XMM_L(0));
461 if ((i & 1) == 1)
462 cpu_fprintf(f, "\n");
463 else
464 cpu_fprintf(f, " ");
469 /***********************************************************/
470 /* x86 mmu */
471 /* XXX: add PGE support */
473 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
475 a20_state = (a20_state != 0);
476 if (a20_state != ((env->a20_mask >> 20) & 1)) {
477 #if defined(DEBUG_MMU)
478 printf("A20 update: a20=%d\n", a20_state);
479 #endif
480 /* if the cpu is currently executing code, we must unlink it and
481 all the potentially executing TB */
482 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
484 /* when a20 is changed, all the MMU mappings are invalid, so
485 we must flush everything */
486 tlb_flush(env, 1);
487 env->a20_mask = 0xffefffff | (a20_state << 20);
491 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
493 int pe_state;
495 #if defined(DEBUG_MMU)
496 printf("CR0 update: CR0=0x%08x\n", new_cr0);
497 #endif
498 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
499 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
500 tlb_flush(env, 1);
503 #ifdef TARGET_X86_64
504 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
505 (env->efer & MSR_EFER_LME)) {
506 /* enter in long mode */
507 /* XXX: generate an exception */
508 if (!(env->cr[4] & CR4_PAE_MASK))
509 return;
510 env->efer |= MSR_EFER_LMA;
511 env->hflags |= HF_LMA_MASK;
512 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
513 (env->efer & MSR_EFER_LMA)) {
514 /* exit long mode */
515 env->efer &= ~MSR_EFER_LMA;
516 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
517 env->eip &= 0xffffffff;
519 #endif
520 env->cr[0] = new_cr0 | CR0_ET_MASK;
522 /* update PE flag in hidden flags */
523 pe_state = (env->cr[0] & CR0_PE_MASK);
524 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
525 /* ensure that ADDSEG is always set in real mode */
526 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
527 /* update FPU flags */
528 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
529 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
532 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
533 the PDPT */
534 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
536 env->cr[3] = new_cr3;
537 if (env->cr[0] & CR0_PG_MASK) {
538 #if defined(DEBUG_MMU)
539 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
540 #endif
541 tlb_flush(env, 0);
545 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
547 #if defined(DEBUG_MMU)
548 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
549 #endif
550 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
551 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
552 tlb_flush(env, 1);
554 /* SSE handling */
555 if (!(env->cpuid_features & CPUID_SSE))
556 new_cr4 &= ~CR4_OSFXSR_MASK;
557 if (new_cr4 & CR4_OSFXSR_MASK)
558 env->hflags |= HF_OSFXSR_MASK;
559 else
560 env->hflags &= ~HF_OSFXSR_MASK;
562 env->cr[4] = new_cr4;
565 /* XXX: also flush 4MB pages */
566 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
568 tlb_flush_page(env, addr);
571 #if defined(CONFIG_USER_ONLY)
573 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
574 int is_write, int is_user, int is_softmmu)
576 /* user mode only emulation */
577 is_write &= 1;
578 env->cr[2] = addr;
579 env->error_code = (is_write << PG_ERROR_W_BIT);
580 env->error_code |= PG_ERROR_U_MASK;
581 env->exception_index = EXCP0E_PAGE;
582 return 1;
585 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
587 return addr;
590 #else
592 #define PHYS_ADDR_MASK 0xfffff000
594 /* return value:
595 -1 = cannot handle fault
596 0 = nothing more to do
597 1 = generate PF fault
598 2 = soft MMU activation required for this block
600 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
601 int is_write1, int is_user, int is_softmmu)
603 uint64_t ptep, pte;
604 uint32_t pdpe_addr, pde_addr, pte_addr;
605 int error_code, is_dirty, prot, page_size, ret, is_write;
606 unsigned long paddr, page_offset;
607 target_ulong vaddr, virt_addr;
609 #if defined(DEBUG_MMU)
610 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
611 addr, is_write1, is_user, env->eip);
612 #endif
613 is_write = is_write1 & 1;
615 if (!(env->cr[0] & CR0_PG_MASK)) {
616 pte = addr;
617 virt_addr = addr & TARGET_PAGE_MASK;
618 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
619 page_size = 4096;
620 goto do_mapping;
623 if (env->cr[4] & CR4_PAE_MASK) {
624 uint64_t pde, pdpe;
626 /* XXX: we only use 32 bit physical addresses */
627 #ifdef TARGET_X86_64
628 if (env->hflags & HF_LMA_MASK) {
629 uint32_t pml4e_addr;
630 uint64_t pml4e;
631 int32_t sext;
633 /* test virtual address sign extension */
634 sext = (int64_t)addr >> 47;
635 if (sext != 0 && sext != -1) {
636 env->error_code = 0;
637 env->exception_index = EXCP0D_GPF;
638 return 1;
641 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
642 env->a20_mask;
643 pml4e = ldq_phys(pml4e_addr);
644 if (!(pml4e & PG_PRESENT_MASK)) {
645 error_code = 0;
646 goto do_fault;
648 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
649 error_code = PG_ERROR_RSVD_MASK;
650 goto do_fault;
652 if (!(pml4e & PG_ACCESSED_MASK)) {
653 pml4e |= PG_ACCESSED_MASK;
654 stl_phys_notdirty(pml4e_addr, pml4e);
656 ptep = pml4e ^ PG_NX_MASK;
657 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
658 env->a20_mask;
659 pdpe = ldq_phys(pdpe_addr);
660 if (!(pdpe & PG_PRESENT_MASK)) {
661 error_code = 0;
662 goto do_fault;
664 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
665 error_code = PG_ERROR_RSVD_MASK;
666 goto do_fault;
668 ptep &= pdpe ^ PG_NX_MASK;
669 if (!(pdpe & PG_ACCESSED_MASK)) {
670 pdpe |= PG_ACCESSED_MASK;
671 stl_phys_notdirty(pdpe_addr, pdpe);
673 } else
674 #endif
676 /* XXX: load them when cr3 is loaded ? */
677 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
678 env->a20_mask;
679 pdpe = ldq_phys(pdpe_addr);
680 if (!(pdpe & PG_PRESENT_MASK)) {
681 error_code = 0;
682 goto do_fault;
684 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
687 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
688 env->a20_mask;
689 pde = ldq_phys(pde_addr);
690 if (!(pde & PG_PRESENT_MASK)) {
691 error_code = 0;
692 goto do_fault;
694 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
695 error_code = PG_ERROR_RSVD_MASK;
696 goto do_fault;
698 ptep &= pde ^ PG_NX_MASK;
699 if (pde & PG_PSE_MASK) {
700 /* 2 MB page */
701 page_size = 2048 * 1024;
702 ptep ^= PG_NX_MASK;
703 if ((ptep & PG_NX_MASK) && is_write1 == 2)
704 goto do_fault_protect;
705 if (is_user) {
706 if (!(ptep & PG_USER_MASK))
707 goto do_fault_protect;
708 if (is_write && !(ptep & PG_RW_MASK))
709 goto do_fault_protect;
710 } else {
711 if ((env->cr[0] & CR0_WP_MASK) &&
712 is_write && !(ptep & PG_RW_MASK))
713 goto do_fault_protect;
715 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
716 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
717 pde |= PG_ACCESSED_MASK;
718 if (is_dirty)
719 pde |= PG_DIRTY_MASK;
720 stl_phys_notdirty(pde_addr, pde);
722 /* align to page_size */
723 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
724 virt_addr = addr & ~(page_size - 1);
725 } else {
726 /* 4 KB page */
727 if (!(pde & PG_ACCESSED_MASK)) {
728 pde |= PG_ACCESSED_MASK;
729 stl_phys_notdirty(pde_addr, pde);
731 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
732 env->a20_mask;
733 pte = ldq_phys(pte_addr);
734 if (!(pte & PG_PRESENT_MASK)) {
735 error_code = 0;
736 goto do_fault;
738 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
739 error_code = PG_ERROR_RSVD_MASK;
740 goto do_fault;
742 /* combine pde and pte nx, user and rw protections */
743 ptep &= pte ^ PG_NX_MASK;
744 ptep ^= PG_NX_MASK;
745 if ((ptep & PG_NX_MASK) && is_write1 == 2)
746 goto do_fault_protect;
747 if (is_user) {
748 if (!(ptep & PG_USER_MASK))
749 goto do_fault_protect;
750 if (is_write && !(ptep & PG_RW_MASK))
751 goto do_fault_protect;
752 } else {
753 if ((env->cr[0] & CR0_WP_MASK) &&
754 is_write && !(ptep & PG_RW_MASK))
755 goto do_fault_protect;
757 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
758 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
759 pte |= PG_ACCESSED_MASK;
760 if (is_dirty)
761 pte |= PG_DIRTY_MASK;
762 stl_phys_notdirty(pte_addr, pte);
764 page_size = 4096;
765 virt_addr = addr & ~0xfff;
766 pte = pte & (PHYS_ADDR_MASK | 0xfff);
768 } else {
769 uint32_t pde;
771 /* page directory entry */
772 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
773 env->a20_mask;
774 pde = ldl_phys(pde_addr);
775 if (!(pde & PG_PRESENT_MASK)) {
776 error_code = 0;
777 goto do_fault;
779 /* if PSE bit is set, then we use a 4MB page */
780 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
781 page_size = 4096 * 1024;
782 if (is_user) {
783 if (!(pde & PG_USER_MASK))
784 goto do_fault_protect;
785 if (is_write && !(pde & PG_RW_MASK))
786 goto do_fault_protect;
787 } else {
788 if ((env->cr[0] & CR0_WP_MASK) &&
789 is_write && !(pde & PG_RW_MASK))
790 goto do_fault_protect;
792 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
793 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
794 pde |= PG_ACCESSED_MASK;
795 if (is_dirty)
796 pde |= PG_DIRTY_MASK;
797 stl_phys_notdirty(pde_addr, pde);
800 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
801 ptep = pte;
802 virt_addr = addr & ~(page_size - 1);
803 } else {
804 if (!(pde & PG_ACCESSED_MASK)) {
805 pde |= PG_ACCESSED_MASK;
806 stl_phys_notdirty(pde_addr, pde);
809 /* page directory entry */
810 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
811 env->a20_mask;
812 pte = ldl_phys(pte_addr);
813 if (!(pte & PG_PRESENT_MASK)) {
814 error_code = 0;
815 goto do_fault;
817 /* combine pde and pte user and rw protections */
818 ptep = pte & pde;
819 if (is_user) {
820 if (!(ptep & PG_USER_MASK))
821 goto do_fault_protect;
822 if (is_write && !(ptep & PG_RW_MASK))
823 goto do_fault_protect;
824 } else {
825 if ((env->cr[0] & CR0_WP_MASK) &&
826 is_write && !(ptep & PG_RW_MASK))
827 goto do_fault_protect;
829 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
830 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
831 pte |= PG_ACCESSED_MASK;
832 if (is_dirty)
833 pte |= PG_DIRTY_MASK;
834 stl_phys_notdirty(pte_addr, pte);
836 page_size = 4096;
837 virt_addr = addr & ~0xfff;
840 /* the page can be put in the TLB */
841 prot = PAGE_READ;
842 if (!(ptep & PG_NX_MASK))
843 prot |= PAGE_EXEC;
844 if (pte & PG_DIRTY_MASK) {
845 /* only set write access if already dirty... otherwise wait
846 for dirty access */
847 if (is_user) {
848 if (ptep & PG_RW_MASK)
849 prot |= PAGE_WRITE;
850 } else {
851 if (!(env->cr[0] & CR0_WP_MASK) ||
852 (ptep & PG_RW_MASK))
853 prot |= PAGE_WRITE;
856 do_mapping:
857 pte = pte & env->a20_mask;
859 /* Even if 4MB pages, we map only one 4KB page in the cache to
860 avoid filling it too fast */
861 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
862 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
863 vaddr = virt_addr + page_offset;
865 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
866 return ret;
867 do_fault_protect:
868 error_code = PG_ERROR_P_MASK;
869 do_fault:
870 error_code |= (is_write << PG_ERROR_W_BIT);
871 if (is_user)
872 error_code |= PG_ERROR_U_MASK;
873 if (is_write1 == 2 &&
874 (env->efer & MSR_EFER_NXE) &&
875 (env->cr[4] & CR4_PAE_MASK))
876 error_code |= PG_ERROR_I_D_MASK;
877 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
878 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
879 } else {
880 env->cr[2] = addr;
882 env->error_code = error_code;
883 env->exception_index = EXCP0E_PAGE;
884 /* the VMM will handle this */
885 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
886 return 2;
887 return 1;
890 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
892 uint32_t pde_addr, pte_addr;
893 uint32_t pde, pte, paddr, page_offset, page_size;
895 if (env->cr[4] & CR4_PAE_MASK) {
896 uint32_t pdpe_addr, pde_addr, pte_addr;
897 uint32_t pdpe;
899 /* XXX: we only use 32 bit physical addresses */
900 #ifdef TARGET_X86_64
901 if (env->hflags & HF_LMA_MASK) {
902 uint32_t pml4e_addr, pml4e;
903 int32_t sext;
905 /* test virtual address sign extension */
906 sext = (int64_t)addr >> 47;
907 if (sext != 0 && sext != -1)
908 return -1;
910 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
911 env->a20_mask;
912 pml4e = ldl_phys(pml4e_addr);
913 if (!(pml4e & PG_PRESENT_MASK))
914 return -1;
916 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
917 env->a20_mask;
918 pdpe = ldl_phys(pdpe_addr);
919 if (!(pdpe & PG_PRESENT_MASK))
920 return -1;
921 } else
922 #endif
924 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
925 env->a20_mask;
926 pdpe = ldl_phys(pdpe_addr);
927 if (!(pdpe & PG_PRESENT_MASK))
928 return -1;
931 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
932 env->a20_mask;
933 pde = ldl_phys(pde_addr);
934 if (!(pde & PG_PRESENT_MASK)) {
935 return -1;
937 if (pde & PG_PSE_MASK) {
938 /* 2 MB page */
939 page_size = 2048 * 1024;
940 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
941 } else {
942 /* 4 KB page */
943 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
944 env->a20_mask;
945 page_size = 4096;
946 pte = ldl_phys(pte_addr);
948 } else {
949 if (!(env->cr[0] & CR0_PG_MASK)) {
950 pte = addr;
951 page_size = 4096;
952 } else {
953 /* page directory entry */
954 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
955 pde = ldl_phys(pde_addr);
956 if (!(pde & PG_PRESENT_MASK))
957 return -1;
958 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
959 pte = pde & ~0x003ff000; /* align to 4MB */
960 page_size = 4096 * 1024;
961 } else {
962 /* page directory entry */
963 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
964 pte = ldl_phys(pte_addr);
965 if (!(pte & PG_PRESENT_MASK))
966 return -1;
967 page_size = 4096;
970 pte = pte & env->a20_mask;
973 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
974 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
975 return paddr;
977 #endif /* !CONFIG_USER_ONLY */
979 #if defined(USE_CODE_COPY)
980 struct fpstate {
981 uint16_t fpuc;
982 uint16_t dummy1;
983 uint16_t fpus;
984 uint16_t dummy2;
985 uint16_t fptag;
986 uint16_t dummy3;
988 uint32_t fpip;
989 uint32_t fpcs;
990 uint32_t fpoo;
991 uint32_t fpos;
992 uint8_t fpregs1[8 * 10];
995 void restore_native_fp_state(CPUState *env)
997 int fptag, i, j;
998 struct fpstate fp1, *fp = &fp1;
1000 fp->fpuc = env->fpuc;
1001 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1002 fptag = 0;
1003 for (i=7; i>=0; i--) {
1004 fptag <<= 2;
1005 if (env->fptags[i]) {
1006 fptag |= 3;
1007 } else {
1008 /* the FPU automatically computes it */
1011 fp->fptag = fptag;
1012 j = env->fpstt;
1013 for(i = 0;i < 8; i++) {
1014 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
1015 j = (j + 1) & 7;
1017 asm volatile ("frstor %0" : "=m" (*fp));
1018 env->native_fp_regs = 1;
1021 void save_native_fp_state(CPUState *env)
1023 int fptag, i, j;
1024 uint16_t fpuc;
1025 struct fpstate fp1, *fp = &fp1;
1027 asm volatile ("fsave %0" : : "m" (*fp));
1028 env->fpuc = fp->fpuc;
1029 env->fpstt = (fp->fpus >> 11) & 7;
1030 env->fpus = fp->fpus & ~0x3800;
1031 fptag = fp->fptag;
1032 for(i = 0;i < 8; i++) {
1033 env->fptags[i] = ((fptag & 3) == 3);
1034 fptag >>= 2;
1036 j = env->fpstt;
1037 for(i = 0;i < 8; i++) {
1038 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
1039 j = (j + 1) & 7;
1041 /* we must restore the default rounding state */
1042 /* XXX: we do not restore the exception state */
1043 fpuc = 0x037f | (env->fpuc & (3 << 10));
1044 asm volatile("fldcw %0" : : "m" (fpuc));
1045 env->native_fp_regs = 0;
1047 #endif