target-i386: QOM'ify CPU reset
[qemu/ar7.git] / target-i386 / helper.c
blob87954f091939549c431adf093fe54d11b2ee5fba
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu.h"
24 #include "monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 /* NOTE: must be called outside the CPU execute loop */
30 void cpu_state_reset(CPUX86State *env)
32 cpu_reset(ENV_GET_CPU(env));
35 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
37 int cpuver = env->cpuid_version;
39 if (family == NULL || model == NULL) {
40 return;
43 *family = (cpuver >> 8) & 0x0f;
44 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
47 /* Broadcast MCA signal for processor version 06H_EH and above */
48 int cpu_x86_support_mca_broadcast(CPUX86State *env)
50 int family = 0;
51 int model = 0;
53 cpu_x86_version(env, &family, &model);
54 if ((family == 6 && model >= 14) || family > 6) {
55 return 1;
58 return 0;
61 /***********************************************************/
62 /* x86 debug */
64 static const char *cc_op_str[] = {
65 "DYNAMIC",
66 "EFLAGS",
68 "MULB",
69 "MULW",
70 "MULL",
71 "MULQ",
73 "ADDB",
74 "ADDW",
75 "ADDL",
76 "ADDQ",
78 "ADCB",
79 "ADCW",
80 "ADCL",
81 "ADCQ",
83 "SUBB",
84 "SUBW",
85 "SUBL",
86 "SUBQ",
88 "SBBB",
89 "SBBW",
90 "SBBL",
91 "SBBQ",
93 "LOGICB",
94 "LOGICW",
95 "LOGICL",
96 "LOGICQ",
98 "INCB",
99 "INCW",
100 "INCL",
101 "INCQ",
103 "DECB",
104 "DECW",
105 "DECL",
106 "DECQ",
108 "SHLB",
109 "SHLW",
110 "SHLL",
111 "SHLQ",
113 "SARB",
114 "SARW",
115 "SARL",
116 "SARQ",
119 static void
120 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
121 const char *name, struct SegmentCache *sc)
123 #ifdef TARGET_X86_64
124 if (env->hflags & HF_CS64_MASK) {
125 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
126 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
127 } else
128 #endif
130 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
131 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
134 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
135 goto done;
137 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
138 if (sc->flags & DESC_S_MASK) {
139 if (sc->flags & DESC_CS_MASK) {
140 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
141 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
142 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
143 (sc->flags & DESC_R_MASK) ? 'R' : '-');
144 } else {
145 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
146 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
147 (sc->flags & DESC_W_MASK) ? 'W' : '-');
149 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
150 } else {
151 static const char *sys_type_name[2][16] = {
152 { /* 32 bit mode */
153 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
154 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
155 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
156 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
158 { /* 64 bit mode */
159 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
160 "Reserved", "Reserved", "Reserved", "Reserved",
161 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
162 "Reserved", "IntGate64", "TrapGate64"
165 cpu_fprintf(f, "%s",
166 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
167 [(sc->flags & DESC_TYPE_MASK)
168 >> DESC_TYPE_SHIFT]);
170 done:
171 cpu_fprintf(f, "\n");
174 #define DUMP_CODE_BYTES_TOTAL 50
175 #define DUMP_CODE_BYTES_BACKWARD 20
177 void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
178 int flags)
180 int eflags, i, nb;
181 char cc_op_name[32];
182 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
184 cpu_synchronize_state(env);
186 eflags = env->eflags;
187 #ifdef TARGET_X86_64
188 if (env->hflags & HF_CS64_MASK) {
189 cpu_fprintf(f,
190 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
191 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
192 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
193 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
194 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
195 env->regs[R_EAX],
196 env->regs[R_EBX],
197 env->regs[R_ECX],
198 env->regs[R_EDX],
199 env->regs[R_ESI],
200 env->regs[R_EDI],
201 env->regs[R_EBP],
202 env->regs[R_ESP],
203 env->regs[8],
204 env->regs[9],
205 env->regs[10],
206 env->regs[11],
207 env->regs[12],
208 env->regs[13],
209 env->regs[14],
210 env->regs[15],
211 env->eip, eflags,
212 eflags & DF_MASK ? 'D' : '-',
213 eflags & CC_O ? 'O' : '-',
214 eflags & CC_S ? 'S' : '-',
215 eflags & CC_Z ? 'Z' : '-',
216 eflags & CC_A ? 'A' : '-',
217 eflags & CC_P ? 'P' : '-',
218 eflags & CC_C ? 'C' : '-',
219 env->hflags & HF_CPL_MASK,
220 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
221 (env->a20_mask >> 20) & 1,
222 (env->hflags >> HF_SMM_SHIFT) & 1,
223 env->halted);
224 } else
225 #endif
227 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
228 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
229 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
230 (uint32_t)env->regs[R_EAX],
231 (uint32_t)env->regs[R_EBX],
232 (uint32_t)env->regs[R_ECX],
233 (uint32_t)env->regs[R_EDX],
234 (uint32_t)env->regs[R_ESI],
235 (uint32_t)env->regs[R_EDI],
236 (uint32_t)env->regs[R_EBP],
237 (uint32_t)env->regs[R_ESP],
238 (uint32_t)env->eip, eflags,
239 eflags & DF_MASK ? 'D' : '-',
240 eflags & CC_O ? 'O' : '-',
241 eflags & CC_S ? 'S' : '-',
242 eflags & CC_Z ? 'Z' : '-',
243 eflags & CC_A ? 'A' : '-',
244 eflags & CC_P ? 'P' : '-',
245 eflags & CC_C ? 'C' : '-',
246 env->hflags & HF_CPL_MASK,
247 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
248 (env->a20_mask >> 20) & 1,
249 (env->hflags >> HF_SMM_SHIFT) & 1,
250 env->halted);
253 for(i = 0; i < 6; i++) {
254 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
255 &env->segs[i]);
257 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
258 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
260 #ifdef TARGET_X86_64
261 if (env->hflags & HF_LMA_MASK) {
262 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
263 env->gdt.base, env->gdt.limit);
264 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
265 env->idt.base, env->idt.limit);
266 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
267 (uint32_t)env->cr[0],
268 env->cr[2],
269 env->cr[3],
270 (uint32_t)env->cr[4]);
271 for(i = 0; i < 4; i++)
272 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
273 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
274 env->dr[6], env->dr[7]);
275 } else
276 #endif
278 cpu_fprintf(f, "GDT= %08x %08x\n",
279 (uint32_t)env->gdt.base, env->gdt.limit);
280 cpu_fprintf(f, "IDT= %08x %08x\n",
281 (uint32_t)env->idt.base, env->idt.limit);
282 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
283 (uint32_t)env->cr[0],
284 (uint32_t)env->cr[2],
285 (uint32_t)env->cr[3],
286 (uint32_t)env->cr[4]);
287 for(i = 0; i < 4; i++) {
288 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
290 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
291 env->dr[6], env->dr[7]);
293 if (flags & X86_DUMP_CCOP) {
294 if ((unsigned)env->cc_op < CC_OP_NB)
295 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
296 else
297 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
298 #ifdef TARGET_X86_64
299 if (env->hflags & HF_CS64_MASK) {
300 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
301 env->cc_src, env->cc_dst,
302 cc_op_name);
303 } else
304 #endif
306 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
307 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
308 cc_op_name);
311 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
312 if (flags & X86_DUMP_FPU) {
313 int fptag;
314 fptag = 0;
315 for(i = 0; i < 8; i++) {
316 fptag |= ((!env->fptags[i]) << i);
318 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
319 env->fpuc,
320 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
321 env->fpstt,
322 fptag,
323 env->mxcsr);
324 for(i=0;i<8;i++) {
325 CPU_LDoubleU u;
326 u.d = env->fpregs[i].d;
327 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
328 i, u.l.lower, u.l.upper);
329 if ((i & 1) == 1)
330 cpu_fprintf(f, "\n");
331 else
332 cpu_fprintf(f, " ");
334 if (env->hflags & HF_CS64_MASK)
335 nb = 16;
336 else
337 nb = 8;
338 for(i=0;i<nb;i++) {
339 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
341 env->xmm_regs[i].XMM_L(3),
342 env->xmm_regs[i].XMM_L(2),
343 env->xmm_regs[i].XMM_L(1),
344 env->xmm_regs[i].XMM_L(0));
345 if ((i & 1) == 1)
346 cpu_fprintf(f, "\n");
347 else
348 cpu_fprintf(f, " ");
351 if (flags & CPU_DUMP_CODE) {
352 target_ulong base = env->segs[R_CS].base + env->eip;
353 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
354 uint8_t code;
355 char codestr[3];
357 cpu_fprintf(f, "Code=");
358 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
359 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
360 snprintf(codestr, sizeof(codestr), "%02x", code);
361 } else {
362 snprintf(codestr, sizeof(codestr), "??");
364 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
365 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
367 cpu_fprintf(f, "\n");
371 /***********************************************************/
372 /* x86 mmu */
373 /* XXX: add PGE support */
375 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
377 a20_state = (a20_state != 0);
378 if (a20_state != ((env->a20_mask >> 20) & 1)) {
379 #if defined(DEBUG_MMU)
380 printf("A20 update: a20=%d\n", a20_state);
381 #endif
382 /* if the cpu is currently executing code, we must unlink it and
383 all the potentially executing TB */
384 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
386 /* when a20 is changed, all the MMU mappings are invalid, so
387 we must flush everything */
388 tlb_flush(env, 1);
389 env->a20_mask = ~(1 << 20) | (a20_state << 20);
393 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
395 int pe_state;
397 #if defined(DEBUG_MMU)
398 printf("CR0 update: CR0=0x%08x\n", new_cr0);
399 #endif
400 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
401 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
402 tlb_flush(env, 1);
405 #ifdef TARGET_X86_64
406 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
407 (env->efer & MSR_EFER_LME)) {
408 /* enter in long mode */
409 /* XXX: generate an exception */
410 if (!(env->cr[4] & CR4_PAE_MASK))
411 return;
412 env->efer |= MSR_EFER_LMA;
413 env->hflags |= HF_LMA_MASK;
414 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
415 (env->efer & MSR_EFER_LMA)) {
416 /* exit long mode */
417 env->efer &= ~MSR_EFER_LMA;
418 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
419 env->eip &= 0xffffffff;
421 #endif
422 env->cr[0] = new_cr0 | CR0_ET_MASK;
424 /* update PE flag in hidden flags */
425 pe_state = (env->cr[0] & CR0_PE_MASK);
426 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
427 /* ensure that ADDSEG is always set in real mode */
428 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
429 /* update FPU flags */
430 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
431 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
434 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
435 the PDPT */
436 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
438 env->cr[3] = new_cr3;
439 if (env->cr[0] & CR0_PG_MASK) {
440 #if defined(DEBUG_MMU)
441 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
442 #endif
443 tlb_flush(env, 0);
447 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
449 #if defined(DEBUG_MMU)
450 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
451 #endif
452 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
453 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
454 tlb_flush(env, 1);
456 /* SSE handling */
457 if (!(env->cpuid_features & CPUID_SSE))
458 new_cr4 &= ~CR4_OSFXSR_MASK;
459 if (new_cr4 & CR4_OSFXSR_MASK)
460 env->hflags |= HF_OSFXSR_MASK;
461 else
462 env->hflags &= ~HF_OSFXSR_MASK;
464 env->cr[4] = new_cr4;
467 #if defined(CONFIG_USER_ONLY)
469 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
470 int is_write, int mmu_idx)
472 /* user mode only emulation */
473 is_write &= 1;
474 env->cr[2] = addr;
475 env->error_code = (is_write << PG_ERROR_W_BIT);
476 env->error_code |= PG_ERROR_U_MASK;
477 env->exception_index = EXCP0E_PAGE;
478 return 1;
481 #else
483 /* XXX: This value should match the one returned by CPUID
484 * and in exec.c */
485 # if defined(TARGET_X86_64)
486 # define PHYS_ADDR_MASK 0xfffffff000LL
487 # else
488 # define PHYS_ADDR_MASK 0xffffff000LL
489 # endif
491 /* return value:
492 -1 = cannot handle fault
493 0 = nothing more to do
494 1 = generate PF fault
496 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
497 int is_write1, int mmu_idx)
499 uint64_t ptep, pte;
500 target_ulong pde_addr, pte_addr;
501 int error_code, is_dirty, prot, page_size, is_write, is_user;
502 target_phys_addr_t paddr;
503 uint32_t page_offset;
504 target_ulong vaddr, virt_addr;
506 is_user = mmu_idx == MMU_USER_IDX;
507 #if defined(DEBUG_MMU)
508 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
509 addr, is_write1, is_user, env->eip);
510 #endif
511 is_write = is_write1 & 1;
513 if (!(env->cr[0] & CR0_PG_MASK)) {
514 pte = addr;
515 virt_addr = addr & TARGET_PAGE_MASK;
516 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
517 page_size = 4096;
518 goto do_mapping;
521 if (env->cr[4] & CR4_PAE_MASK) {
522 uint64_t pde, pdpe;
523 target_ulong pdpe_addr;
525 #ifdef TARGET_X86_64
526 if (env->hflags & HF_LMA_MASK) {
527 uint64_t pml4e_addr, pml4e;
528 int32_t sext;
530 /* test virtual address sign extension */
531 sext = (int64_t)addr >> 47;
532 if (sext != 0 && sext != -1) {
533 env->error_code = 0;
534 env->exception_index = EXCP0D_GPF;
535 return 1;
538 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
539 env->a20_mask;
540 pml4e = ldq_phys(pml4e_addr);
541 if (!(pml4e & PG_PRESENT_MASK)) {
542 error_code = 0;
543 goto do_fault;
545 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
546 error_code = PG_ERROR_RSVD_MASK;
547 goto do_fault;
549 if (!(pml4e & PG_ACCESSED_MASK)) {
550 pml4e |= PG_ACCESSED_MASK;
551 stl_phys_notdirty(pml4e_addr, pml4e);
553 ptep = pml4e ^ PG_NX_MASK;
554 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
555 env->a20_mask;
556 pdpe = ldq_phys(pdpe_addr);
557 if (!(pdpe & PG_PRESENT_MASK)) {
558 error_code = 0;
559 goto do_fault;
561 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
562 error_code = PG_ERROR_RSVD_MASK;
563 goto do_fault;
565 ptep &= pdpe ^ PG_NX_MASK;
566 if (!(pdpe & PG_ACCESSED_MASK)) {
567 pdpe |= PG_ACCESSED_MASK;
568 stl_phys_notdirty(pdpe_addr, pdpe);
570 } else
571 #endif
573 /* XXX: load them when cr3 is loaded ? */
574 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
575 env->a20_mask;
576 pdpe = ldq_phys(pdpe_addr);
577 if (!(pdpe & PG_PRESENT_MASK)) {
578 error_code = 0;
579 goto do_fault;
581 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
584 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
585 env->a20_mask;
586 pde = ldq_phys(pde_addr);
587 if (!(pde & PG_PRESENT_MASK)) {
588 error_code = 0;
589 goto do_fault;
591 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
592 error_code = PG_ERROR_RSVD_MASK;
593 goto do_fault;
595 ptep &= pde ^ PG_NX_MASK;
596 if (pde & PG_PSE_MASK) {
597 /* 2 MB page */
598 page_size = 2048 * 1024;
599 ptep ^= PG_NX_MASK;
600 if ((ptep & PG_NX_MASK) && is_write1 == 2)
601 goto do_fault_protect;
602 if (is_user) {
603 if (!(ptep & PG_USER_MASK))
604 goto do_fault_protect;
605 if (is_write && !(ptep & PG_RW_MASK))
606 goto do_fault_protect;
607 } else {
608 if ((env->cr[0] & CR0_WP_MASK) &&
609 is_write && !(ptep & PG_RW_MASK))
610 goto do_fault_protect;
612 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
613 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
614 pde |= PG_ACCESSED_MASK;
615 if (is_dirty)
616 pde |= PG_DIRTY_MASK;
617 stl_phys_notdirty(pde_addr, pde);
619 /* align to page_size */
620 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
621 virt_addr = addr & ~(page_size - 1);
622 } else {
623 /* 4 KB page */
624 if (!(pde & PG_ACCESSED_MASK)) {
625 pde |= PG_ACCESSED_MASK;
626 stl_phys_notdirty(pde_addr, pde);
628 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
629 env->a20_mask;
630 pte = ldq_phys(pte_addr);
631 if (!(pte & PG_PRESENT_MASK)) {
632 error_code = 0;
633 goto do_fault;
635 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
636 error_code = PG_ERROR_RSVD_MASK;
637 goto do_fault;
639 /* combine pde and pte nx, user and rw protections */
640 ptep &= pte ^ PG_NX_MASK;
641 ptep ^= PG_NX_MASK;
642 if ((ptep & PG_NX_MASK) && is_write1 == 2)
643 goto do_fault_protect;
644 if (is_user) {
645 if (!(ptep & PG_USER_MASK))
646 goto do_fault_protect;
647 if (is_write && !(ptep & PG_RW_MASK))
648 goto do_fault_protect;
649 } else {
650 if ((env->cr[0] & CR0_WP_MASK) &&
651 is_write && !(ptep & PG_RW_MASK))
652 goto do_fault_protect;
654 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
655 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
656 pte |= PG_ACCESSED_MASK;
657 if (is_dirty)
658 pte |= PG_DIRTY_MASK;
659 stl_phys_notdirty(pte_addr, pte);
661 page_size = 4096;
662 virt_addr = addr & ~0xfff;
663 pte = pte & (PHYS_ADDR_MASK | 0xfff);
665 } else {
666 uint32_t pde;
668 /* page directory entry */
669 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
670 env->a20_mask;
671 pde = ldl_phys(pde_addr);
672 if (!(pde & PG_PRESENT_MASK)) {
673 error_code = 0;
674 goto do_fault;
676 /* if PSE bit is set, then we use a 4MB page */
677 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
678 page_size = 4096 * 1024;
679 if (is_user) {
680 if (!(pde & PG_USER_MASK))
681 goto do_fault_protect;
682 if (is_write && !(pde & PG_RW_MASK))
683 goto do_fault_protect;
684 } else {
685 if ((env->cr[0] & CR0_WP_MASK) &&
686 is_write && !(pde & PG_RW_MASK))
687 goto do_fault_protect;
689 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
690 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
691 pde |= PG_ACCESSED_MASK;
692 if (is_dirty)
693 pde |= PG_DIRTY_MASK;
694 stl_phys_notdirty(pde_addr, pde);
697 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
698 ptep = pte;
699 virt_addr = addr & ~(page_size - 1);
700 } else {
701 if (!(pde & PG_ACCESSED_MASK)) {
702 pde |= PG_ACCESSED_MASK;
703 stl_phys_notdirty(pde_addr, pde);
706 /* page directory entry */
707 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
708 env->a20_mask;
709 pte = ldl_phys(pte_addr);
710 if (!(pte & PG_PRESENT_MASK)) {
711 error_code = 0;
712 goto do_fault;
714 /* combine pde and pte user and rw protections */
715 ptep = pte & pde;
716 if (is_user) {
717 if (!(ptep & PG_USER_MASK))
718 goto do_fault_protect;
719 if (is_write && !(ptep & PG_RW_MASK))
720 goto do_fault_protect;
721 } else {
722 if ((env->cr[0] & CR0_WP_MASK) &&
723 is_write && !(ptep & PG_RW_MASK))
724 goto do_fault_protect;
726 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
727 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
728 pte |= PG_ACCESSED_MASK;
729 if (is_dirty)
730 pte |= PG_DIRTY_MASK;
731 stl_phys_notdirty(pte_addr, pte);
733 page_size = 4096;
734 virt_addr = addr & ~0xfff;
737 /* the page can be put in the TLB */
738 prot = PAGE_READ;
739 if (!(ptep & PG_NX_MASK))
740 prot |= PAGE_EXEC;
741 if (pte & PG_DIRTY_MASK) {
742 /* only set write access if already dirty... otherwise wait
743 for dirty access */
744 if (is_user) {
745 if (ptep & PG_RW_MASK)
746 prot |= PAGE_WRITE;
747 } else {
748 if (!(env->cr[0] & CR0_WP_MASK) ||
749 (ptep & PG_RW_MASK))
750 prot |= PAGE_WRITE;
753 do_mapping:
754 pte = pte & env->a20_mask;
756 /* Even if 4MB pages, we map only one 4KB page in the cache to
757 avoid filling it too fast */
758 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
759 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
760 vaddr = virt_addr + page_offset;
762 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
763 return 0;
764 do_fault_protect:
765 error_code = PG_ERROR_P_MASK;
766 do_fault:
767 error_code |= (is_write << PG_ERROR_W_BIT);
768 if (is_user)
769 error_code |= PG_ERROR_U_MASK;
770 if (is_write1 == 2 &&
771 (env->efer & MSR_EFER_NXE) &&
772 (env->cr[4] & CR4_PAE_MASK))
773 error_code |= PG_ERROR_I_D_MASK;
774 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
775 /* cr2 is not modified in case of exceptions */
776 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
777 addr);
778 } else {
779 env->cr[2] = addr;
781 env->error_code = error_code;
782 env->exception_index = EXCP0E_PAGE;
783 return 1;
786 target_phys_addr_t cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
788 target_ulong pde_addr, pte_addr;
789 uint64_t pte;
790 target_phys_addr_t paddr;
791 uint32_t page_offset;
792 int page_size;
794 if (env->cr[4] & CR4_PAE_MASK) {
795 target_ulong pdpe_addr;
796 uint64_t pde, pdpe;
798 #ifdef TARGET_X86_64
799 if (env->hflags & HF_LMA_MASK) {
800 uint64_t pml4e_addr, pml4e;
801 int32_t sext;
803 /* test virtual address sign extension */
804 sext = (int64_t)addr >> 47;
805 if (sext != 0 && sext != -1)
806 return -1;
808 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
809 env->a20_mask;
810 pml4e = ldq_phys(pml4e_addr);
811 if (!(pml4e & PG_PRESENT_MASK))
812 return -1;
814 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
815 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
816 pdpe = ldq_phys(pdpe_addr);
817 if (!(pdpe & PG_PRESENT_MASK))
818 return -1;
819 } else
820 #endif
822 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
823 env->a20_mask;
824 pdpe = ldq_phys(pdpe_addr);
825 if (!(pdpe & PG_PRESENT_MASK))
826 return -1;
829 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
830 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
831 pde = ldq_phys(pde_addr);
832 if (!(pde & PG_PRESENT_MASK)) {
833 return -1;
835 if (pde & PG_PSE_MASK) {
836 /* 2 MB page */
837 page_size = 2048 * 1024;
838 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
839 } else {
840 /* 4 KB page */
841 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
842 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
843 page_size = 4096;
844 pte = ldq_phys(pte_addr);
846 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
847 if (!(pte & PG_PRESENT_MASK))
848 return -1;
849 } else {
850 uint32_t pde;
852 if (!(env->cr[0] & CR0_PG_MASK)) {
853 pte = addr;
854 page_size = 4096;
855 } else {
856 /* page directory entry */
857 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
858 pde = ldl_phys(pde_addr);
859 if (!(pde & PG_PRESENT_MASK))
860 return -1;
861 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
862 pte = pde & ~0x003ff000; /* align to 4MB */
863 page_size = 4096 * 1024;
864 } else {
865 /* page directory entry */
866 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
867 pte = ldl_phys(pte_addr);
868 if (!(pte & PG_PRESENT_MASK))
869 return -1;
870 page_size = 4096;
873 pte = pte & env->a20_mask;
876 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
877 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
878 return paddr;
881 void hw_breakpoint_insert(CPUX86State *env, int index)
883 int type, err = 0;
885 switch (hw_breakpoint_type(env->dr[7], index)) {
886 case 0:
887 if (hw_breakpoint_enabled(env->dr[7], index))
888 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
889 &env->cpu_breakpoint[index]);
890 break;
891 case 1:
892 type = BP_CPU | BP_MEM_WRITE;
893 goto insert_wp;
894 case 2:
895 /* No support for I/O watchpoints yet */
896 break;
897 case 3:
898 type = BP_CPU | BP_MEM_ACCESS;
899 insert_wp:
900 err = cpu_watchpoint_insert(env, env->dr[index],
901 hw_breakpoint_len(env->dr[7], index),
902 type, &env->cpu_watchpoint[index]);
903 break;
905 if (err)
906 env->cpu_breakpoint[index] = NULL;
909 void hw_breakpoint_remove(CPUX86State *env, int index)
911 if (!env->cpu_breakpoint[index])
912 return;
913 switch (hw_breakpoint_type(env->dr[7], index)) {
914 case 0:
915 if (hw_breakpoint_enabled(env->dr[7], index))
916 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
917 break;
918 case 1:
919 case 3:
920 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
921 break;
922 case 2:
923 /* No support for I/O watchpoints yet */
924 break;
928 int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
930 target_ulong dr6;
931 int reg, type;
932 int hit_enabled = 0;
934 dr6 = env->dr[6] & ~0xf;
935 for (reg = 0; reg < 4; reg++) {
936 type = hw_breakpoint_type(env->dr[7], reg);
937 if ((type == 0 && env->dr[reg] == env->eip) ||
938 ((type & 1) && env->cpu_watchpoint[reg] &&
939 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
940 dr6 |= 1 << reg;
941 if (hw_breakpoint_enabled(env->dr[7], reg))
942 hit_enabled = 1;
945 if (hit_enabled || force_dr6_update)
946 env->dr[6] = dr6;
947 return hit_enabled;
950 static CPUDebugExcpHandler *prev_debug_excp_handler;
952 static void breakpoint_handler(CPUX86State *env)
954 CPUBreakpoint *bp;
956 if (env->watchpoint_hit) {
957 if (env->watchpoint_hit->flags & BP_CPU) {
958 env->watchpoint_hit = NULL;
959 if (check_hw_breakpoints(env, 0))
960 raise_exception_env(EXCP01_DB, env);
961 else
962 cpu_resume_from_signal(env, NULL);
964 } else {
965 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
966 if (bp->pc == env->eip) {
967 if (bp->flags & BP_CPU) {
968 check_hw_breakpoints(env, 1);
969 raise_exception_env(EXCP01_DB, env);
971 break;
974 if (prev_debug_excp_handler)
975 prev_debug_excp_handler(env);
978 typedef struct MCEInjectionParams {
979 Monitor *mon;
980 CPUX86State *env;
981 int bank;
982 uint64_t status;
983 uint64_t mcg_status;
984 uint64_t addr;
985 uint64_t misc;
986 int flags;
987 } MCEInjectionParams;
989 static void do_inject_x86_mce(void *data)
991 MCEInjectionParams *params = data;
992 CPUX86State *cenv = params->env;
993 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
995 cpu_synchronize_state(cenv);
998 * If there is an MCE exception being processed, ignore this SRAO MCE
999 * unless unconditional injection was requested.
1001 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1002 && !(params->status & MCI_STATUS_AR)
1003 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1004 return;
1007 if (params->status & MCI_STATUS_UC) {
1009 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1010 * reporting is disabled
1012 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1013 monitor_printf(params->mon,
1014 "CPU %d: Uncorrected error reporting disabled\n",
1015 cenv->cpu_index);
1016 return;
1020 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1021 * reporting is disabled for the bank
1023 if (banks[0] != ~(uint64_t)0) {
1024 monitor_printf(params->mon,
1025 "CPU %d: Uncorrected error reporting disabled for"
1026 " bank %d\n",
1027 cenv->cpu_index, params->bank);
1028 return;
1031 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1032 !(cenv->cr[4] & CR4_MCE_MASK)) {
1033 monitor_printf(params->mon,
1034 "CPU %d: Previous MCE still in progress, raising"
1035 " triple fault\n",
1036 cenv->cpu_index);
1037 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1038 qemu_system_reset_request();
1039 return;
1041 if (banks[1] & MCI_STATUS_VAL) {
1042 params->status |= MCI_STATUS_OVER;
1044 banks[2] = params->addr;
1045 banks[3] = params->misc;
1046 cenv->mcg_status = params->mcg_status;
1047 banks[1] = params->status;
1048 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1049 } else if (!(banks[1] & MCI_STATUS_VAL)
1050 || !(banks[1] & MCI_STATUS_UC)) {
1051 if (banks[1] & MCI_STATUS_VAL) {
1052 params->status |= MCI_STATUS_OVER;
1054 banks[2] = params->addr;
1055 banks[3] = params->misc;
1056 banks[1] = params->status;
1057 } else {
1058 banks[1] |= MCI_STATUS_OVER;
1062 void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
1063 uint64_t status, uint64_t mcg_status, uint64_t addr,
1064 uint64_t misc, int flags)
1066 MCEInjectionParams params = {
1067 .mon = mon,
1068 .env = cenv,
1069 .bank = bank,
1070 .status = status,
1071 .mcg_status = mcg_status,
1072 .addr = addr,
1073 .misc = misc,
1074 .flags = flags,
1076 unsigned bank_num = cenv->mcg_cap & 0xff;
1077 CPUX86State *env;
1079 if (!cenv->mcg_cap) {
1080 monitor_printf(mon, "MCE injection not supported\n");
1081 return;
1083 if (bank >= bank_num) {
1084 monitor_printf(mon, "Invalid MCE bank number\n");
1085 return;
1087 if (!(status & MCI_STATUS_VAL)) {
1088 monitor_printf(mon, "Invalid MCE status code\n");
1089 return;
1091 if ((flags & MCE_INJECT_BROADCAST)
1092 && !cpu_x86_support_mca_broadcast(cenv)) {
1093 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1094 return;
1097 run_on_cpu(cenv, do_inject_x86_mce, &params);
1098 if (flags & MCE_INJECT_BROADCAST) {
1099 params.bank = 1;
1100 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1101 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1102 params.addr = 0;
1103 params.misc = 0;
1104 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1105 if (cenv == env) {
1106 continue;
1108 params.env = env;
1109 run_on_cpu(cenv, do_inject_x86_mce, &params);
1114 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1116 TranslationBlock *tb;
1118 if (kvm_enabled()) {
1119 env->tpr_access_type = access;
1121 cpu_interrupt(env, CPU_INTERRUPT_TPR);
1122 } else {
1123 tb = tb_find_pc(env->mem_io_pc);
1124 cpu_restore_state(tb, env, env->mem_io_pc);
1126 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1129 #endif /* !CONFIG_USER_ONLY */
1131 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1132 target_ulong *base, unsigned int *limit,
1133 unsigned int *flags)
1135 SegmentCache *dt;
1136 target_ulong ptr;
1137 uint32_t e1, e2;
1138 int index;
1140 if (selector & 0x4)
1141 dt = &env->ldt;
1142 else
1143 dt = &env->gdt;
1144 index = selector & ~7;
1145 ptr = dt->base + index;
1146 if ((index + 7) > dt->limit
1147 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1148 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1149 return 0;
1151 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1152 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1153 if (e2 & DESC_G_MASK)
1154 *limit = (*limit << 12) | 0xfff;
1155 *flags = e2;
1157 return 1;
1160 CPUX86State *cpu_x86_init(const char *cpu_model)
1162 X86CPU *cpu;
1163 CPUX86State *env;
1164 static int inited;
1166 cpu = X86_CPU(object_new(TYPE_X86_CPU));
1167 env = &cpu->env;
1168 env->cpu_model_str = cpu_model;
1170 /* init various static tables used in TCG mode */
1171 if (tcg_enabled() && !inited) {
1172 inited = 1;
1173 optimize_flags_init();
1174 #ifndef CONFIG_USER_ONLY
1175 prev_debug_excp_handler =
1176 cpu_set_debug_excp_handler(breakpoint_handler);
1177 #endif
1179 if (cpu_x86_register(env, cpu_model) < 0) {
1180 object_delete(OBJECT(cpu));
1181 return NULL;
1184 qemu_init_vcpu(env);
1186 return env;
1189 #if !defined(CONFIG_USER_ONLY)
1190 void do_cpu_init(CPUX86State *env)
1192 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1193 uint64_t pat = env->pat;
1195 cpu_state_reset(env);
1196 env->interrupt_request = sipi;
1197 env->pat = pat;
1198 apic_init_reset(env->apic_state);
1199 env->halted = !cpu_is_bsp(env);
1202 void do_cpu_sipi(CPUX86State *env)
1204 apic_sipi(env->apic_state);
1206 #else
1207 void do_cpu_init(CPUX86State *env)
1210 void do_cpu_sipi(CPUX86State *env)
1213 #endif