sh4: Fix compiler warning (fprintf format string)
[qemu.git] / target-i386 / helper.c
blob3835835103e4709eeeaf1da2e50318e493396647
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
31 //#define DEBUG_MMU
33 /* NOTE: must be called outside the CPU execute loop */
34 void cpu_reset(CPUX86State *env)
36 int i;
38 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
39 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
40 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
43 memset(env, 0, offsetof(CPUX86State, breakpoints));
45 tlb_flush(env, 1);
47 env->old_exception = -1;
49 /* init to reset state */
51 #ifdef CONFIG_SOFTMMU
52 env->hflags |= HF_SOFTMMU_MASK;
53 #endif
54 env->hflags2 |= HF2_GIF_MASK;
56 cpu_x86_update_cr0(env, 0x60000010);
57 env->a20_mask = ~0x0;
58 env->smbase = 0x30000;
60 env->idt.limit = 0xffff;
61 env->gdt.limit = 0xffff;
62 env->ldt.limit = 0xffff;
63 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
64 env->tr.limit = 0xffff;
65 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
67 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
68 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
69 DESC_R_MASK | DESC_A_MASK);
70 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
71 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
72 DESC_A_MASK);
73 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
74 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
75 DESC_A_MASK);
76 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
77 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
78 DESC_A_MASK);
79 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
80 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
81 DESC_A_MASK);
82 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
83 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
84 DESC_A_MASK);
86 env->eip = 0xfff0;
87 env->regs[R_EDX] = env->cpuid_version;
89 env->eflags = 0x2;
91 /* FPU init */
92 for(i = 0;i < 8; i++)
93 env->fptags[i] = 1;
94 env->fpuc = 0x37f;
96 env->mxcsr = 0x1f80;
98 memset(env->dr, 0, sizeof(env->dr));
99 env->dr[6] = DR6_FIXED_1;
100 env->dr[7] = DR7_FIXED_1;
101 cpu_breakpoint_remove_all(env, BP_CPU);
102 cpu_watchpoint_remove_all(env, BP_CPU);
104 env->mcg_status = 0;
107 void cpu_x86_close(CPUX86State *env)
109 qemu_free(env);
112 /***********************************************************/
113 /* x86 debug */
115 static const char *cc_op_str[] = {
116 "DYNAMIC",
117 "EFLAGS",
119 "MULB",
120 "MULW",
121 "MULL",
122 "MULQ",
124 "ADDB",
125 "ADDW",
126 "ADDL",
127 "ADDQ",
129 "ADCB",
130 "ADCW",
131 "ADCL",
132 "ADCQ",
134 "SUBB",
135 "SUBW",
136 "SUBL",
137 "SUBQ",
139 "SBBB",
140 "SBBW",
141 "SBBL",
142 "SBBQ",
144 "LOGICB",
145 "LOGICW",
146 "LOGICL",
147 "LOGICQ",
149 "INCB",
150 "INCW",
151 "INCL",
152 "INCQ",
154 "DECB",
155 "DECW",
156 "DECL",
157 "DECQ",
159 "SHLB",
160 "SHLW",
161 "SHLL",
162 "SHLQ",
164 "SARB",
165 "SARW",
166 "SARL",
167 "SARQ",
170 static void
171 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
172 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
173 const char *name, struct SegmentCache *sc)
175 #ifdef TARGET_X86_64
176 if (env->hflags & HF_CS64_MASK) {
177 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
178 sc->selector, sc->base, sc->limit, sc->flags);
179 } else
180 #endif
182 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
183 (uint32_t)sc->base, sc->limit, sc->flags);
186 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
187 goto done;
189 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
190 if (sc->flags & DESC_S_MASK) {
191 if (sc->flags & DESC_CS_MASK) {
192 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
193 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
194 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
195 (sc->flags & DESC_R_MASK) ? 'R' : '-');
196 } else {
197 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
198 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
199 (sc->flags & DESC_W_MASK) ? 'W' : '-');
201 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
202 } else {
203 static const char *sys_type_name[2][16] = {
204 { /* 32 bit mode */
205 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
206 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
207 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
208 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
210 { /* 64 bit mode */
211 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
212 "Reserved", "Reserved", "Reserved", "Reserved",
213 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
214 "Reserved", "IntGate64", "TrapGate64"
217 cpu_fprintf(f, "%s",
218 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
219 [(sc->flags & DESC_TYPE_MASK)
220 >> DESC_TYPE_SHIFT]);
222 done:
223 cpu_fprintf(f, "\n");
226 void cpu_dump_state(CPUState *env, FILE *f,
227 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
228 int flags)
230 int eflags, i, nb;
231 char cc_op_name[32];
232 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
234 cpu_synchronize_state(env);
236 eflags = env->eflags;
237 #ifdef TARGET_X86_64
238 if (env->hflags & HF_CS64_MASK) {
239 cpu_fprintf(f,
240 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
241 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
242 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
243 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
244 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
245 env->regs[R_EAX],
246 env->regs[R_EBX],
247 env->regs[R_ECX],
248 env->regs[R_EDX],
249 env->regs[R_ESI],
250 env->regs[R_EDI],
251 env->regs[R_EBP],
252 env->regs[R_ESP],
253 env->regs[8],
254 env->regs[9],
255 env->regs[10],
256 env->regs[11],
257 env->regs[12],
258 env->regs[13],
259 env->regs[14],
260 env->regs[15],
261 env->eip, eflags,
262 eflags & DF_MASK ? 'D' : '-',
263 eflags & CC_O ? 'O' : '-',
264 eflags & CC_S ? 'S' : '-',
265 eflags & CC_Z ? 'Z' : '-',
266 eflags & CC_A ? 'A' : '-',
267 eflags & CC_P ? 'P' : '-',
268 eflags & CC_C ? 'C' : '-',
269 env->hflags & HF_CPL_MASK,
270 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
271 (env->a20_mask >> 20) & 1,
272 (env->hflags >> HF_SMM_SHIFT) & 1,
273 env->halted);
274 } else
275 #endif
277 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
278 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
279 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
280 (uint32_t)env->regs[R_EAX],
281 (uint32_t)env->regs[R_EBX],
282 (uint32_t)env->regs[R_ECX],
283 (uint32_t)env->regs[R_EDX],
284 (uint32_t)env->regs[R_ESI],
285 (uint32_t)env->regs[R_EDI],
286 (uint32_t)env->regs[R_EBP],
287 (uint32_t)env->regs[R_ESP],
288 (uint32_t)env->eip, eflags,
289 eflags & DF_MASK ? 'D' : '-',
290 eflags & CC_O ? 'O' : '-',
291 eflags & CC_S ? 'S' : '-',
292 eflags & CC_Z ? 'Z' : '-',
293 eflags & CC_A ? 'A' : '-',
294 eflags & CC_P ? 'P' : '-',
295 eflags & CC_C ? 'C' : '-',
296 env->hflags & HF_CPL_MASK,
297 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
298 (env->a20_mask >> 20) & 1,
299 (env->hflags >> HF_SMM_SHIFT) & 1,
300 env->halted);
303 for(i = 0; i < 6; i++) {
304 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
305 &env->segs[i]);
307 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
308 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
310 #ifdef TARGET_X86_64
311 if (env->hflags & HF_LMA_MASK) {
312 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
313 env->gdt.base, env->gdt.limit);
314 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
315 env->idt.base, env->idt.limit);
316 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
317 (uint32_t)env->cr[0],
318 env->cr[2],
319 env->cr[3],
320 (uint32_t)env->cr[4]);
321 for(i = 0; i < 4; i++)
322 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
323 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
324 env->dr[6], env->dr[7]);
325 } else
326 #endif
328 cpu_fprintf(f, "GDT= %08x %08x\n",
329 (uint32_t)env->gdt.base, env->gdt.limit);
330 cpu_fprintf(f, "IDT= %08x %08x\n",
331 (uint32_t)env->idt.base, env->idt.limit);
332 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
333 (uint32_t)env->cr[0],
334 (uint32_t)env->cr[2],
335 (uint32_t)env->cr[3],
336 (uint32_t)env->cr[4]);
337 for(i = 0; i < 4; i++)
338 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
339 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
341 if (flags & X86_DUMP_CCOP) {
342 if ((unsigned)env->cc_op < CC_OP_NB)
343 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
344 else
345 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
346 #ifdef TARGET_X86_64
347 if (env->hflags & HF_CS64_MASK) {
348 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
349 env->cc_src, env->cc_dst,
350 cc_op_name);
351 } else
352 #endif
354 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
355 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
356 cc_op_name);
359 if (flags & X86_DUMP_FPU) {
360 int fptag;
361 fptag = 0;
362 for(i = 0; i < 8; i++) {
363 fptag |= ((!env->fptags[i]) << i);
365 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
366 env->fpuc,
367 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
368 env->fpstt,
369 fptag,
370 env->mxcsr);
371 for(i=0;i<8;i++) {
372 #if defined(USE_X86LDOUBLE)
373 union {
374 long double d;
375 struct {
376 uint64_t lower;
377 uint16_t upper;
378 } l;
379 } tmp;
380 tmp.d = env->fpregs[i].d;
381 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
382 i, tmp.l.lower, tmp.l.upper);
383 #else
384 cpu_fprintf(f, "FPR%d=%016" PRIx64,
385 i, env->fpregs[i].mmx.q);
386 #endif
387 if ((i & 1) == 1)
388 cpu_fprintf(f, "\n");
389 else
390 cpu_fprintf(f, " ");
392 if (env->hflags & HF_CS64_MASK)
393 nb = 16;
394 else
395 nb = 8;
396 for(i=0;i<nb;i++) {
397 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
399 env->xmm_regs[i].XMM_L(3),
400 env->xmm_regs[i].XMM_L(2),
401 env->xmm_regs[i].XMM_L(1),
402 env->xmm_regs[i].XMM_L(0));
403 if ((i & 1) == 1)
404 cpu_fprintf(f, "\n");
405 else
406 cpu_fprintf(f, " ");
411 /***********************************************************/
412 /* x86 mmu */
413 /* XXX: add PGE support */
415 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
417 a20_state = (a20_state != 0);
418 if (a20_state != ((env->a20_mask >> 20) & 1)) {
419 #if defined(DEBUG_MMU)
420 printf("A20 update: a20=%d\n", a20_state);
421 #endif
422 /* if the cpu is currently executing code, we must unlink it and
423 all the potentially executing TB */
424 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
426 /* when a20 is changed, all the MMU mappings are invalid, so
427 we must flush everything */
428 tlb_flush(env, 1);
429 env->a20_mask = ~(1 << 20) | (a20_state << 20);
433 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
435 int pe_state;
437 #if defined(DEBUG_MMU)
438 printf("CR0 update: CR0=0x%08x\n", new_cr0);
439 #endif
440 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
441 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
442 tlb_flush(env, 1);
445 #ifdef TARGET_X86_64
446 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
447 (env->efer & MSR_EFER_LME)) {
448 /* enter in long mode */
449 /* XXX: generate an exception */
450 if (!(env->cr[4] & CR4_PAE_MASK))
451 return;
452 env->efer |= MSR_EFER_LMA;
453 env->hflags |= HF_LMA_MASK;
454 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
455 (env->efer & MSR_EFER_LMA)) {
456 /* exit long mode */
457 env->efer &= ~MSR_EFER_LMA;
458 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
459 env->eip &= 0xffffffff;
461 #endif
462 env->cr[0] = new_cr0 | CR0_ET_MASK;
464 /* update PE flag in hidden flags */
465 pe_state = (env->cr[0] & CR0_PE_MASK);
466 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
467 /* ensure that ADDSEG is always set in real mode */
468 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
469 /* update FPU flags */
470 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
471 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
474 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
475 the PDPT */
476 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
478 env->cr[3] = new_cr3;
479 if (env->cr[0] & CR0_PG_MASK) {
480 #if defined(DEBUG_MMU)
481 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
482 #endif
483 tlb_flush(env, 0);
487 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
489 #if defined(DEBUG_MMU)
490 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
491 #endif
492 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
493 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
494 tlb_flush(env, 1);
496 /* SSE handling */
497 if (!(env->cpuid_features & CPUID_SSE))
498 new_cr4 &= ~CR4_OSFXSR_MASK;
499 if (new_cr4 & CR4_OSFXSR_MASK)
500 env->hflags |= HF_OSFXSR_MASK;
501 else
502 env->hflags &= ~HF_OSFXSR_MASK;
504 env->cr[4] = new_cr4;
507 #if defined(CONFIG_USER_ONLY)
509 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
510 int is_write, int mmu_idx, int is_softmmu)
512 /* user mode only emulation */
513 is_write &= 1;
514 env->cr[2] = addr;
515 env->error_code = (is_write << PG_ERROR_W_BIT);
516 env->error_code |= PG_ERROR_U_MASK;
517 env->exception_index = EXCP0E_PAGE;
518 return 1;
521 #else
523 /* XXX: This value should match the one returned by CPUID
524 * and in exec.c */
525 # if defined(TARGET_X86_64)
526 # define PHYS_ADDR_MASK 0xfffffff000LL
527 # else
528 # define PHYS_ADDR_MASK 0xffffff000LL
529 # endif
531 /* return value:
532 -1 = cannot handle fault
533 0 = nothing more to do
534 1 = generate PF fault
536 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
537 int is_write1, int mmu_idx, int is_softmmu)
539 uint64_t ptep, pte;
540 target_ulong pde_addr, pte_addr;
541 int error_code, is_dirty, prot, page_size, is_write, is_user;
542 target_phys_addr_t paddr;
543 uint32_t page_offset;
544 target_ulong vaddr, virt_addr;
546 is_user = mmu_idx == MMU_USER_IDX;
547 #if defined(DEBUG_MMU)
548 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
549 addr, is_write1, is_user, env->eip);
550 #endif
551 is_write = is_write1 & 1;
553 if (!(env->cr[0] & CR0_PG_MASK)) {
554 pte = addr;
555 virt_addr = addr & TARGET_PAGE_MASK;
556 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
557 page_size = 4096;
558 goto do_mapping;
561 if (env->cr[4] & CR4_PAE_MASK) {
562 uint64_t pde, pdpe;
563 target_ulong pdpe_addr;
565 #ifdef TARGET_X86_64
566 if (env->hflags & HF_LMA_MASK) {
567 uint64_t pml4e_addr, pml4e;
568 int32_t sext;
570 /* test virtual address sign extension */
571 sext = (int64_t)addr >> 47;
572 if (sext != 0 && sext != -1) {
573 env->error_code = 0;
574 env->exception_index = EXCP0D_GPF;
575 return 1;
578 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
579 env->a20_mask;
580 pml4e = ldq_phys(pml4e_addr);
581 if (!(pml4e & PG_PRESENT_MASK)) {
582 error_code = 0;
583 goto do_fault;
585 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
586 error_code = PG_ERROR_RSVD_MASK;
587 goto do_fault;
589 if (!(pml4e & PG_ACCESSED_MASK)) {
590 pml4e |= PG_ACCESSED_MASK;
591 stl_phys_notdirty(pml4e_addr, pml4e);
593 ptep = pml4e ^ PG_NX_MASK;
594 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
595 env->a20_mask;
596 pdpe = ldq_phys(pdpe_addr);
597 if (!(pdpe & PG_PRESENT_MASK)) {
598 error_code = 0;
599 goto do_fault;
601 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
602 error_code = PG_ERROR_RSVD_MASK;
603 goto do_fault;
605 ptep &= pdpe ^ PG_NX_MASK;
606 if (!(pdpe & PG_ACCESSED_MASK)) {
607 pdpe |= PG_ACCESSED_MASK;
608 stl_phys_notdirty(pdpe_addr, pdpe);
610 } else
611 #endif
613 /* XXX: load them when cr3 is loaded ? */
614 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
615 env->a20_mask;
616 pdpe = ldq_phys(pdpe_addr);
617 if (!(pdpe & PG_PRESENT_MASK)) {
618 error_code = 0;
619 goto do_fault;
621 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
624 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
625 env->a20_mask;
626 pde = ldq_phys(pde_addr);
627 if (!(pde & PG_PRESENT_MASK)) {
628 error_code = 0;
629 goto do_fault;
631 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
632 error_code = PG_ERROR_RSVD_MASK;
633 goto do_fault;
635 ptep &= pde ^ PG_NX_MASK;
636 if (pde & PG_PSE_MASK) {
637 /* 2 MB page */
638 page_size = 2048 * 1024;
639 ptep ^= PG_NX_MASK;
640 if ((ptep & PG_NX_MASK) && is_write1 == 2)
641 goto do_fault_protect;
642 if (is_user) {
643 if (!(ptep & PG_USER_MASK))
644 goto do_fault_protect;
645 if (is_write && !(ptep & PG_RW_MASK))
646 goto do_fault_protect;
647 } else {
648 if ((env->cr[0] & CR0_WP_MASK) &&
649 is_write && !(ptep & PG_RW_MASK))
650 goto do_fault_protect;
652 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
653 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
654 pde |= PG_ACCESSED_MASK;
655 if (is_dirty)
656 pde |= PG_DIRTY_MASK;
657 stl_phys_notdirty(pde_addr, pde);
659 /* align to page_size */
660 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
661 virt_addr = addr & ~(page_size - 1);
662 } else {
663 /* 4 KB page */
664 if (!(pde & PG_ACCESSED_MASK)) {
665 pde |= PG_ACCESSED_MASK;
666 stl_phys_notdirty(pde_addr, pde);
668 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
669 env->a20_mask;
670 pte = ldq_phys(pte_addr);
671 if (!(pte & PG_PRESENT_MASK)) {
672 error_code = 0;
673 goto do_fault;
675 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
676 error_code = PG_ERROR_RSVD_MASK;
677 goto do_fault;
679 /* combine pde and pte nx, user and rw protections */
680 ptep &= pte ^ PG_NX_MASK;
681 ptep ^= PG_NX_MASK;
682 if ((ptep & PG_NX_MASK) && is_write1 == 2)
683 goto do_fault_protect;
684 if (is_user) {
685 if (!(ptep & PG_USER_MASK))
686 goto do_fault_protect;
687 if (is_write && !(ptep & PG_RW_MASK))
688 goto do_fault_protect;
689 } else {
690 if ((env->cr[0] & CR0_WP_MASK) &&
691 is_write && !(ptep & PG_RW_MASK))
692 goto do_fault_protect;
694 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
695 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
696 pte |= PG_ACCESSED_MASK;
697 if (is_dirty)
698 pte |= PG_DIRTY_MASK;
699 stl_phys_notdirty(pte_addr, pte);
701 page_size = 4096;
702 virt_addr = addr & ~0xfff;
703 pte = pte & (PHYS_ADDR_MASK | 0xfff);
705 } else {
706 uint32_t pde;
708 /* page directory entry */
709 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
710 env->a20_mask;
711 pde = ldl_phys(pde_addr);
712 if (!(pde & PG_PRESENT_MASK)) {
713 error_code = 0;
714 goto do_fault;
716 /* if PSE bit is set, then we use a 4MB page */
717 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
718 page_size = 4096 * 1024;
719 if (is_user) {
720 if (!(pde & PG_USER_MASK))
721 goto do_fault_protect;
722 if (is_write && !(pde & PG_RW_MASK))
723 goto do_fault_protect;
724 } else {
725 if ((env->cr[0] & CR0_WP_MASK) &&
726 is_write && !(pde & PG_RW_MASK))
727 goto do_fault_protect;
729 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
730 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
731 pde |= PG_ACCESSED_MASK;
732 if (is_dirty)
733 pde |= PG_DIRTY_MASK;
734 stl_phys_notdirty(pde_addr, pde);
737 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
738 ptep = pte;
739 virt_addr = addr & ~(page_size - 1);
740 } else {
741 if (!(pde & PG_ACCESSED_MASK)) {
742 pde |= PG_ACCESSED_MASK;
743 stl_phys_notdirty(pde_addr, pde);
746 /* page directory entry */
747 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
748 env->a20_mask;
749 pte = ldl_phys(pte_addr);
750 if (!(pte & PG_PRESENT_MASK)) {
751 error_code = 0;
752 goto do_fault;
754 /* combine pde and pte user and rw protections */
755 ptep = pte & pde;
756 if (is_user) {
757 if (!(ptep & PG_USER_MASK))
758 goto do_fault_protect;
759 if (is_write && !(ptep & PG_RW_MASK))
760 goto do_fault_protect;
761 } else {
762 if ((env->cr[0] & CR0_WP_MASK) &&
763 is_write && !(ptep & PG_RW_MASK))
764 goto do_fault_protect;
766 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
767 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
768 pte |= PG_ACCESSED_MASK;
769 if (is_dirty)
770 pte |= PG_DIRTY_MASK;
771 stl_phys_notdirty(pte_addr, pte);
773 page_size = 4096;
774 virt_addr = addr & ~0xfff;
777 /* the page can be put in the TLB */
778 prot = PAGE_READ;
779 if (!(ptep & PG_NX_MASK))
780 prot |= PAGE_EXEC;
781 if (pte & PG_DIRTY_MASK) {
782 /* only set write access if already dirty... otherwise wait
783 for dirty access */
784 if (is_user) {
785 if (ptep & PG_RW_MASK)
786 prot |= PAGE_WRITE;
787 } else {
788 if (!(env->cr[0] & CR0_WP_MASK) ||
789 (ptep & PG_RW_MASK))
790 prot |= PAGE_WRITE;
793 do_mapping:
794 pte = pte & env->a20_mask;
796 /* Even if 4MB pages, we map only one 4KB page in the cache to
797 avoid filling it too fast */
798 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
799 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
800 vaddr = virt_addr + page_offset;
802 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
803 return 0;
804 do_fault_protect:
805 error_code = PG_ERROR_P_MASK;
806 do_fault:
807 error_code |= (is_write << PG_ERROR_W_BIT);
808 if (is_user)
809 error_code |= PG_ERROR_U_MASK;
810 if (is_write1 == 2 &&
811 (env->efer & MSR_EFER_NXE) &&
812 (env->cr[4] & CR4_PAE_MASK))
813 error_code |= PG_ERROR_I_D_MASK;
814 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
815 /* cr2 is not modified in case of exceptions */
816 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
817 addr);
818 } else {
819 env->cr[2] = addr;
821 env->error_code = error_code;
822 env->exception_index = EXCP0E_PAGE;
823 return 1;
826 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
828 target_ulong pde_addr, pte_addr;
829 uint64_t pte;
830 target_phys_addr_t paddr;
831 uint32_t page_offset;
832 int page_size;
834 if (env->cr[4] & CR4_PAE_MASK) {
835 target_ulong pdpe_addr;
836 uint64_t pde, pdpe;
838 #ifdef TARGET_X86_64
839 if (env->hflags & HF_LMA_MASK) {
840 uint64_t pml4e_addr, pml4e;
841 int32_t sext;
843 /* test virtual address sign extension */
844 sext = (int64_t)addr >> 47;
845 if (sext != 0 && sext != -1)
846 return -1;
848 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
849 env->a20_mask;
850 pml4e = ldq_phys(pml4e_addr);
851 if (!(pml4e & PG_PRESENT_MASK))
852 return -1;
854 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
855 env->a20_mask;
856 pdpe = ldq_phys(pdpe_addr);
857 if (!(pdpe & PG_PRESENT_MASK))
858 return -1;
859 } else
860 #endif
862 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
863 env->a20_mask;
864 pdpe = ldq_phys(pdpe_addr);
865 if (!(pdpe & PG_PRESENT_MASK))
866 return -1;
869 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
870 env->a20_mask;
871 pde = ldq_phys(pde_addr);
872 if (!(pde & PG_PRESENT_MASK)) {
873 return -1;
875 if (pde & PG_PSE_MASK) {
876 /* 2 MB page */
877 page_size = 2048 * 1024;
878 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
879 } else {
880 /* 4 KB page */
881 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
882 env->a20_mask;
883 page_size = 4096;
884 pte = ldq_phys(pte_addr);
886 if (!(pte & PG_PRESENT_MASK))
887 return -1;
888 } else {
889 uint32_t pde;
891 if (!(env->cr[0] & CR0_PG_MASK)) {
892 pte = addr;
893 page_size = 4096;
894 } else {
895 /* page directory entry */
896 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
897 pde = ldl_phys(pde_addr);
898 if (!(pde & PG_PRESENT_MASK))
899 return -1;
900 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
901 pte = pde & ~0x003ff000; /* align to 4MB */
902 page_size = 4096 * 1024;
903 } else {
904 /* page directory entry */
905 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
906 pte = ldl_phys(pte_addr);
907 if (!(pte & PG_PRESENT_MASK))
908 return -1;
909 page_size = 4096;
912 pte = pte & env->a20_mask;
915 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
916 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
917 return paddr;
920 void hw_breakpoint_insert(CPUState *env, int index)
922 int type, err = 0;
924 switch (hw_breakpoint_type(env->dr[7], index)) {
925 case 0:
926 if (hw_breakpoint_enabled(env->dr[7], index))
927 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
928 &env->cpu_breakpoint[index]);
929 break;
930 case 1:
931 type = BP_CPU | BP_MEM_WRITE;
932 goto insert_wp;
933 case 2:
934 /* No support for I/O watchpoints yet */
935 break;
936 case 3:
937 type = BP_CPU | BP_MEM_ACCESS;
938 insert_wp:
939 err = cpu_watchpoint_insert(env, env->dr[index],
940 hw_breakpoint_len(env->dr[7], index),
941 type, &env->cpu_watchpoint[index]);
942 break;
944 if (err)
945 env->cpu_breakpoint[index] = NULL;
948 void hw_breakpoint_remove(CPUState *env, int index)
950 if (!env->cpu_breakpoint[index])
951 return;
952 switch (hw_breakpoint_type(env->dr[7], index)) {
953 case 0:
954 if (hw_breakpoint_enabled(env->dr[7], index))
955 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
956 break;
957 case 1:
958 case 3:
959 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
960 break;
961 case 2:
962 /* No support for I/O watchpoints yet */
963 break;
967 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
969 target_ulong dr6;
970 int reg, type;
971 int hit_enabled = 0;
973 dr6 = env->dr[6] & ~0xf;
974 for (reg = 0; reg < 4; reg++) {
975 type = hw_breakpoint_type(env->dr[7], reg);
976 if ((type == 0 && env->dr[reg] == env->eip) ||
977 ((type & 1) && env->cpu_watchpoint[reg] &&
978 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
979 dr6 |= 1 << reg;
980 if (hw_breakpoint_enabled(env->dr[7], reg))
981 hit_enabled = 1;
984 if (hit_enabled || force_dr6_update)
985 env->dr[6] = dr6;
986 return hit_enabled;
989 static CPUDebugExcpHandler *prev_debug_excp_handler;
991 void raise_exception_env(int exception_index, CPUState *env);
993 static void breakpoint_handler(CPUState *env)
995 CPUBreakpoint *bp;
997 if (env->watchpoint_hit) {
998 if (env->watchpoint_hit->flags & BP_CPU) {
999 env->watchpoint_hit = NULL;
1000 if (check_hw_breakpoints(env, 0))
1001 raise_exception_env(EXCP01_DB, env);
1002 else
1003 cpu_resume_from_signal(env, NULL);
1005 } else {
1006 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1007 if (bp->pc == env->eip) {
1008 if (bp->flags & BP_CPU) {
1009 check_hw_breakpoints(env, 1);
1010 raise_exception_env(EXCP01_DB, env);
1012 break;
1015 if (prev_debug_excp_handler)
1016 prev_debug_excp_handler(env);
1019 /* This should come from sysemu.h - if we could include it here... */
1020 void qemu_system_reset_request(void);
1022 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1023 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1025 uint64_t mcg_cap = cenv->mcg_cap;
1026 unsigned bank_num = mcg_cap & 0xff;
1027 uint64_t *banks = cenv->mce_banks;
1029 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1030 return;
1033 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1034 * reporting is disabled
1036 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1037 cenv->mcg_ctl != ~(uint64_t)0)
1038 return;
1039 banks += 4 * bank;
1041 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1042 * reporting is disabled for the bank
1044 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1045 return;
1046 if (status & MCI_STATUS_UC) {
1047 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1048 !(cenv->cr[4] & CR4_MCE_MASK)) {
1049 fprintf(stderr, "injects mce exception while previous "
1050 "one is in progress!\n");
1051 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1052 qemu_system_reset_request();
1053 return;
1055 if (banks[1] & MCI_STATUS_VAL)
1056 status |= MCI_STATUS_OVER;
1057 banks[2] = addr;
1058 banks[3] = misc;
1059 cenv->mcg_status = mcg_status;
1060 banks[1] = status;
1061 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1062 } else if (!(banks[1] & MCI_STATUS_VAL)
1063 || !(banks[1] & MCI_STATUS_UC)) {
1064 if (banks[1] & MCI_STATUS_VAL)
1065 status |= MCI_STATUS_OVER;
1066 banks[2] = addr;
1067 banks[3] = misc;
1068 banks[1] = status;
1069 } else
1070 banks[1] |= MCI_STATUS_OVER;
1072 #endif /* !CONFIG_USER_ONLY */
1074 static void mce_init(CPUX86State *cenv)
1076 unsigned int bank, bank_num;
1078 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1079 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1080 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1081 cenv->mcg_ctl = ~(uint64_t)0;
1082 bank_num = MCE_BANKS_DEF;
1083 for (bank = 0; bank < bank_num; bank++)
1084 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1088 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1089 target_ulong *base, unsigned int *limit,
1090 unsigned int *flags)
1092 SegmentCache *dt;
1093 target_ulong ptr;
1094 uint32_t e1, e2;
1095 int index;
1097 if (selector & 0x4)
1098 dt = &env->ldt;
1099 else
1100 dt = &env->gdt;
1101 index = selector & ~7;
1102 ptr = dt->base + index;
1103 if ((index + 7) > dt->limit
1104 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1105 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1106 return 0;
1108 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1109 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1110 if (e2 & DESC_G_MASK)
1111 *limit = (*limit << 12) | 0xfff;
1112 *flags = e2;
1114 return 1;
1117 CPUX86State *cpu_x86_init(const char *cpu_model)
1119 CPUX86State *env;
1120 static int inited;
1122 env = qemu_mallocz(sizeof(CPUX86State));
1123 cpu_exec_init(env);
1124 env->cpu_model_str = cpu_model;
1126 /* init various static tables */
1127 if (!inited) {
1128 inited = 1;
1129 optimize_flags_init();
1130 #ifndef CONFIG_USER_ONLY
1131 prev_debug_excp_handler =
1132 cpu_set_debug_excp_handler(breakpoint_handler);
1133 #endif
1135 if (cpu_x86_register(env, cpu_model) < 0) {
1136 cpu_x86_close(env);
1137 return NULL;
1139 mce_init(env);
1141 qemu_init_vcpu(env);
1143 return env;
1146 #if !defined(CONFIG_USER_ONLY)
1147 void do_cpu_init(CPUState *env)
1149 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1150 cpu_reset(env);
1151 env->interrupt_request = sipi;
1152 apic_init_reset(env);
1155 void do_cpu_sipi(CPUState *env)
1157 apic_sipi(env);
1159 #else
1160 void do_cpu_init(CPUState *env)
1163 void do_cpu_sipi(CPUState *env)
1166 #endif