qapi event: convert DEVICE_TRAY_MOVED
[qemu/cris-port.git] / target-i386 / helper.c
blob11ca8649b5c1d8054a52e6a2a0eb2c2eb2f937ba
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #include "kvm_i386.h"
23 #ifndef CONFIG_USER_ONLY
24 #include "sysemu/sysemu.h"
25 #include "monitor/monitor.h"
26 #endif
28 //#define DEBUG_MMU
30 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
32 int cpuver = env->cpuid_version;
34 if (family == NULL || model == NULL) {
35 return;
38 *family = (cpuver >> 8) & 0x0f;
39 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
42 /* Broadcast MCA signal for processor version 06H_EH and above */
43 int cpu_x86_support_mca_broadcast(CPUX86State *env)
45 int family = 0;
46 int model = 0;
48 cpu_x86_version(env, &family, &model);
49 if ((family == 6 && model >= 14) || family > 6) {
50 return 1;
53 return 0;
56 /***********************************************************/
57 /* x86 debug */
59 static const char *cc_op_str[CC_OP_NB] = {
60 "DYNAMIC",
61 "EFLAGS",
63 "MULB",
64 "MULW",
65 "MULL",
66 "MULQ",
68 "ADDB",
69 "ADDW",
70 "ADDL",
71 "ADDQ",
73 "ADCB",
74 "ADCW",
75 "ADCL",
76 "ADCQ",
78 "SUBB",
79 "SUBW",
80 "SUBL",
81 "SUBQ",
83 "SBBB",
84 "SBBW",
85 "SBBL",
86 "SBBQ",
88 "LOGICB",
89 "LOGICW",
90 "LOGICL",
91 "LOGICQ",
93 "INCB",
94 "INCW",
95 "INCL",
96 "INCQ",
98 "DECB",
99 "DECW",
100 "DECL",
101 "DECQ",
103 "SHLB",
104 "SHLW",
105 "SHLL",
106 "SHLQ",
108 "SARB",
109 "SARW",
110 "SARL",
111 "SARQ",
113 "BMILGB",
114 "BMILGW",
115 "BMILGL",
116 "BMILGQ",
118 "ADCX",
119 "ADOX",
120 "ADCOX",
122 "CLR",
125 static void
126 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
127 const char *name, struct SegmentCache *sc)
129 #ifdef TARGET_X86_64
130 if (env->hflags & HF_CS64_MASK) {
131 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
132 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
133 } else
134 #endif
136 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
137 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
140 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
141 goto done;
143 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
144 if (sc->flags & DESC_S_MASK) {
145 if (sc->flags & DESC_CS_MASK) {
146 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
147 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
148 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
149 (sc->flags & DESC_R_MASK) ? 'R' : '-');
150 } else {
151 cpu_fprintf(f,
152 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
153 ? "DS " : "DS16");
154 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
155 (sc->flags & DESC_W_MASK) ? 'W' : '-');
157 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
158 } else {
159 static const char *sys_type_name[2][16] = {
160 { /* 32 bit mode */
161 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
162 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
163 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
164 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
166 { /* 64 bit mode */
167 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
168 "Reserved", "Reserved", "Reserved", "Reserved",
169 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
170 "Reserved", "IntGate64", "TrapGate64"
173 cpu_fprintf(f, "%s",
174 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
175 [(sc->flags & DESC_TYPE_MASK)
176 >> DESC_TYPE_SHIFT]);
178 done:
179 cpu_fprintf(f, "\n");
182 #define DUMP_CODE_BYTES_TOTAL 50
183 #define DUMP_CODE_BYTES_BACKWARD 20
185 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
186 int flags)
188 X86CPU *cpu = X86_CPU(cs);
189 CPUX86State *env = &cpu->env;
190 int eflags, i, nb;
191 char cc_op_name[32];
192 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
194 eflags = cpu_compute_eflags(env);
195 #ifdef TARGET_X86_64
196 if (env->hflags & HF_CS64_MASK) {
197 cpu_fprintf(f,
198 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
199 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
200 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
201 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
202 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
203 env->regs[R_EAX],
204 env->regs[R_EBX],
205 env->regs[R_ECX],
206 env->regs[R_EDX],
207 env->regs[R_ESI],
208 env->regs[R_EDI],
209 env->regs[R_EBP],
210 env->regs[R_ESP],
211 env->regs[8],
212 env->regs[9],
213 env->regs[10],
214 env->regs[11],
215 env->regs[12],
216 env->regs[13],
217 env->regs[14],
218 env->regs[15],
219 env->eip, eflags,
220 eflags & DF_MASK ? 'D' : '-',
221 eflags & CC_O ? 'O' : '-',
222 eflags & CC_S ? 'S' : '-',
223 eflags & CC_Z ? 'Z' : '-',
224 eflags & CC_A ? 'A' : '-',
225 eflags & CC_P ? 'P' : '-',
226 eflags & CC_C ? 'C' : '-',
227 env->hflags & HF_CPL_MASK,
228 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
229 (env->a20_mask >> 20) & 1,
230 (env->hflags >> HF_SMM_SHIFT) & 1,
231 cs->halted);
232 } else
233 #endif
235 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
236 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
237 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
238 (uint32_t)env->regs[R_EAX],
239 (uint32_t)env->regs[R_EBX],
240 (uint32_t)env->regs[R_ECX],
241 (uint32_t)env->regs[R_EDX],
242 (uint32_t)env->regs[R_ESI],
243 (uint32_t)env->regs[R_EDI],
244 (uint32_t)env->regs[R_EBP],
245 (uint32_t)env->regs[R_ESP],
246 (uint32_t)env->eip, eflags,
247 eflags & DF_MASK ? 'D' : '-',
248 eflags & CC_O ? 'O' : '-',
249 eflags & CC_S ? 'S' : '-',
250 eflags & CC_Z ? 'Z' : '-',
251 eflags & CC_A ? 'A' : '-',
252 eflags & CC_P ? 'P' : '-',
253 eflags & CC_C ? 'C' : '-',
254 env->hflags & HF_CPL_MASK,
255 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
256 (env->a20_mask >> 20) & 1,
257 (env->hflags >> HF_SMM_SHIFT) & 1,
258 cs->halted);
261 for(i = 0; i < 6; i++) {
262 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
263 &env->segs[i]);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
266 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
268 #ifdef TARGET_X86_64
269 if (env->hflags & HF_LMA_MASK) {
270 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
271 env->gdt.base, env->gdt.limit);
272 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
273 env->idt.base, env->idt.limit);
274 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
275 (uint32_t)env->cr[0],
276 env->cr[2],
277 env->cr[3],
278 (uint32_t)env->cr[4]);
279 for(i = 0; i < 4; i++)
280 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
281 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
282 env->dr[6], env->dr[7]);
283 } else
284 #endif
286 cpu_fprintf(f, "GDT= %08x %08x\n",
287 (uint32_t)env->gdt.base, env->gdt.limit);
288 cpu_fprintf(f, "IDT= %08x %08x\n",
289 (uint32_t)env->idt.base, env->idt.limit);
290 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
291 (uint32_t)env->cr[0],
292 (uint32_t)env->cr[2],
293 (uint32_t)env->cr[3],
294 (uint32_t)env->cr[4]);
295 for(i = 0; i < 4; i++) {
296 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
298 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
299 env->dr[6], env->dr[7]);
301 if (flags & CPU_DUMP_CCOP) {
302 if ((unsigned)env->cc_op < CC_OP_NB)
303 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
304 else
305 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
306 #ifdef TARGET_X86_64
307 if (env->hflags & HF_CS64_MASK) {
308 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
309 env->cc_src, env->cc_dst,
310 cc_op_name);
311 } else
312 #endif
314 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
315 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
316 cc_op_name);
319 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
320 if (flags & CPU_DUMP_FPU) {
321 int fptag;
322 fptag = 0;
323 for(i = 0; i < 8; i++) {
324 fptag |= ((!env->fptags[i]) << i);
326 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
327 env->fpuc,
328 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
329 env->fpstt,
330 fptag,
331 env->mxcsr);
332 for(i=0;i<8;i++) {
333 CPU_LDoubleU u;
334 u.d = env->fpregs[i].d;
335 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
336 i, u.l.lower, u.l.upper);
337 if ((i & 1) == 1)
338 cpu_fprintf(f, "\n");
339 else
340 cpu_fprintf(f, " ");
342 if (env->hflags & HF_CS64_MASK)
343 nb = 16;
344 else
345 nb = 8;
346 for(i=0;i<nb;i++) {
347 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
349 env->xmm_regs[i].XMM_L(3),
350 env->xmm_regs[i].XMM_L(2),
351 env->xmm_regs[i].XMM_L(1),
352 env->xmm_regs[i].XMM_L(0));
353 if ((i & 1) == 1)
354 cpu_fprintf(f, "\n");
355 else
356 cpu_fprintf(f, " ");
359 if (flags & CPU_DUMP_CODE) {
360 target_ulong base = env->segs[R_CS].base + env->eip;
361 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
362 uint8_t code;
363 char codestr[3];
365 cpu_fprintf(f, "Code=");
366 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
367 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
368 snprintf(codestr, sizeof(codestr), "%02x", code);
369 } else {
370 snprintf(codestr, sizeof(codestr), "??");
372 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
373 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
375 cpu_fprintf(f, "\n");
379 /***********************************************************/
380 /* x86 mmu */
381 /* XXX: add PGE support */
383 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
385 CPUX86State *env = &cpu->env;
387 a20_state = (a20_state != 0);
388 if (a20_state != ((env->a20_mask >> 20) & 1)) {
389 CPUState *cs = CPU(cpu);
391 #if defined(DEBUG_MMU)
392 printf("A20 update: a20=%d\n", a20_state);
393 #endif
394 /* if the cpu is currently executing code, we must unlink it and
395 all the potentially executing TB */
396 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
398 /* when a20 is changed, all the MMU mappings are invalid, so
399 we must flush everything */
400 tlb_flush(cs, 1);
401 env->a20_mask = ~(1 << 20) | (a20_state << 20);
405 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
407 X86CPU *cpu = x86_env_get_cpu(env);
408 int pe_state;
410 #if defined(DEBUG_MMU)
411 printf("CR0 update: CR0=0x%08x\n", new_cr0);
412 #endif
413 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
414 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
415 tlb_flush(CPU(cpu), 1);
418 #ifdef TARGET_X86_64
419 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
420 (env->efer & MSR_EFER_LME)) {
421 /* enter in long mode */
422 /* XXX: generate an exception */
423 if (!(env->cr[4] & CR4_PAE_MASK))
424 return;
425 env->efer |= MSR_EFER_LMA;
426 env->hflags |= HF_LMA_MASK;
427 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
428 (env->efer & MSR_EFER_LMA)) {
429 /* exit long mode */
430 env->efer &= ~MSR_EFER_LMA;
431 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
432 env->eip &= 0xffffffff;
434 #endif
435 env->cr[0] = new_cr0 | CR0_ET_MASK;
437 /* update PE flag in hidden flags */
438 pe_state = (env->cr[0] & CR0_PE_MASK);
439 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
440 /* ensure that ADDSEG is always set in real mode */
441 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
442 /* update FPU flags */
443 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
444 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
447 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
448 the PDPT */
449 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
451 X86CPU *cpu = x86_env_get_cpu(env);
453 env->cr[3] = new_cr3;
454 if (env->cr[0] & CR0_PG_MASK) {
455 #if defined(DEBUG_MMU)
456 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
457 #endif
458 tlb_flush(CPU(cpu), 0);
462 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
464 X86CPU *cpu = x86_env_get_cpu(env);
466 #if defined(DEBUG_MMU)
467 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
468 #endif
469 if ((new_cr4 ^ env->cr[4]) &
470 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
471 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
472 tlb_flush(CPU(cpu), 1);
474 /* SSE handling */
475 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
476 new_cr4 &= ~CR4_OSFXSR_MASK;
478 env->hflags &= ~HF_OSFXSR_MASK;
479 if (new_cr4 & CR4_OSFXSR_MASK) {
480 env->hflags |= HF_OSFXSR_MASK;
483 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
484 new_cr4 &= ~CR4_SMAP_MASK;
486 env->hflags &= ~HF_SMAP_MASK;
487 if (new_cr4 & CR4_SMAP_MASK) {
488 env->hflags |= HF_SMAP_MASK;
491 env->cr[4] = new_cr4;
494 #if defined(CONFIG_USER_ONLY)
496 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
497 int is_write, int mmu_idx)
499 X86CPU *cpu = X86_CPU(cs);
500 CPUX86State *env = &cpu->env;
502 /* user mode only emulation */
503 is_write &= 1;
504 env->cr[2] = addr;
505 env->error_code = (is_write << PG_ERROR_W_BIT);
506 env->error_code |= PG_ERROR_U_MASK;
507 cs->exception_index = EXCP0E_PAGE;
508 return 1;
511 #else
513 /* return value:
514 * -1 = cannot handle fault
515 * 0 = nothing more to do
516 * 1 = generate PF fault
518 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
519 int is_write1, int mmu_idx)
521 X86CPU *cpu = X86_CPU(cs);
522 CPUX86State *env = &cpu->env;
523 uint64_t ptep, pte;
524 target_ulong pde_addr, pte_addr;
525 int error_code = 0;
526 int is_dirty, prot, page_size, is_write, is_user;
527 hwaddr paddr;
528 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
529 uint32_t page_offset;
530 target_ulong vaddr;
532 is_user = mmu_idx == MMU_USER_IDX;
533 #if defined(DEBUG_MMU)
534 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
535 addr, is_write1, is_user, env->eip);
536 #endif
537 is_write = is_write1 & 1;
539 if (!(env->cr[0] & CR0_PG_MASK)) {
540 pte = addr;
541 #ifdef TARGET_X86_64
542 if (!(env->hflags & HF_LMA_MASK)) {
543 /* Without long mode we can only address 32bits in real mode */
544 pte = (uint32_t)pte;
546 #endif
547 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
548 page_size = 4096;
549 goto do_mapping;
552 if (!(env->efer & MSR_EFER_NXE)) {
553 rsvd_mask |= PG_NX_MASK;
556 if (env->cr[4] & CR4_PAE_MASK) {
557 uint64_t pde, pdpe;
558 target_ulong pdpe_addr;
560 #ifdef TARGET_X86_64
561 if (env->hflags & HF_LMA_MASK) {
562 uint64_t pml4e_addr, pml4e;
563 int32_t sext;
565 /* test virtual address sign extension */
566 sext = (int64_t)addr >> 47;
567 if (sext != 0 && sext != -1) {
568 env->error_code = 0;
569 cs->exception_index = EXCP0D_GPF;
570 return 1;
573 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
574 env->a20_mask;
575 pml4e = ldq_phys(cs->as, pml4e_addr);
576 if (!(pml4e & PG_PRESENT_MASK)) {
577 goto do_fault;
579 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
580 goto do_fault_rsvd;
582 if (!(pml4e & PG_ACCESSED_MASK)) {
583 pml4e |= PG_ACCESSED_MASK;
584 stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
586 ptep = pml4e ^ PG_NX_MASK;
587 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
588 env->a20_mask;
589 pdpe = ldq_phys(cs->as, pdpe_addr);
590 if (!(pdpe & PG_PRESENT_MASK)) {
591 goto do_fault;
593 if (pdpe & rsvd_mask) {
594 goto do_fault_rsvd;
596 ptep &= pdpe ^ PG_NX_MASK;
597 if (!(pdpe & PG_ACCESSED_MASK)) {
598 pdpe |= PG_ACCESSED_MASK;
599 stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
601 if (pdpe & PG_PSE_MASK) {
602 /* 1 GB page */
603 page_size = 1024 * 1024 * 1024;
604 pte_addr = pdpe_addr;
605 pte = pdpe;
606 goto do_check_protect;
608 } else
609 #endif
611 /* XXX: load them when cr3 is loaded ? */
612 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
613 env->a20_mask;
614 pdpe = ldq_phys(cs->as, pdpe_addr);
615 if (!(pdpe & PG_PRESENT_MASK)) {
616 goto do_fault;
618 rsvd_mask |= PG_HI_USER_MASK | PG_NX_MASK;
619 if (pdpe & rsvd_mask) {
620 goto do_fault_rsvd;
622 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
625 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
626 env->a20_mask;
627 pde = ldq_phys(cs->as, pde_addr);
628 if (!(pde & PG_PRESENT_MASK)) {
629 goto do_fault;
631 if (pde & rsvd_mask) {
632 goto do_fault_rsvd;
634 ptep &= pde ^ PG_NX_MASK;
635 if (pde & PG_PSE_MASK) {
636 /* 2 MB page */
637 page_size = 2048 * 1024;
638 pte_addr = pde_addr;
639 pte = pde;
640 goto do_check_protect;
642 /* 4 KB page */
643 if (!(pde & PG_ACCESSED_MASK)) {
644 pde |= PG_ACCESSED_MASK;
645 stl_phys_notdirty(cs->as, pde_addr, pde);
647 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
648 env->a20_mask;
649 pte = ldq_phys(cs->as, pte_addr);
650 if (!(pte & PG_PRESENT_MASK)) {
651 goto do_fault;
653 if (pte & rsvd_mask) {
654 goto do_fault_rsvd;
656 /* combine pde and pte nx, user and rw protections */
657 ptep &= pte ^ PG_NX_MASK;
658 page_size = 4096;
659 } else {
660 uint32_t pde;
662 /* page directory entry */
663 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
664 env->a20_mask;
665 pde = ldl_phys(cs->as, pde_addr);
666 if (!(pde & PG_PRESENT_MASK)) {
667 goto do_fault;
669 ptep = pde | PG_NX_MASK;
671 /* if PSE bit is set, then we use a 4MB page */
672 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
673 page_size = 4096 * 1024;
674 pte_addr = pde_addr;
676 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
677 * Leave bits 20-13 in place for setting accessed/dirty bits below.
679 pte = pde | ((pde & 0x1fe000) << (32 - 13));
680 rsvd_mask = 0x200000;
681 goto do_check_protect_pse36;
684 if (!(pde & PG_ACCESSED_MASK)) {
685 pde |= PG_ACCESSED_MASK;
686 stl_phys_notdirty(cs->as, pde_addr, pde);
689 /* page directory entry */
690 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
691 env->a20_mask;
692 pte = ldl_phys(cs->as, pte_addr);
693 if (!(pte & PG_PRESENT_MASK)) {
694 goto do_fault;
696 /* combine pde and pte user and rw protections */
697 ptep &= pte | PG_NX_MASK;
698 page_size = 4096;
699 rsvd_mask = 0;
702 do_check_protect:
703 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
704 do_check_protect_pse36:
705 if (pte & rsvd_mask) {
706 goto do_fault_rsvd;
708 ptep ^= PG_NX_MASK;
709 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
710 goto do_fault_protect;
712 switch (mmu_idx) {
713 case MMU_USER_IDX:
714 if (!(ptep & PG_USER_MASK)) {
715 goto do_fault_protect;
717 if (is_write && !(ptep & PG_RW_MASK)) {
718 goto do_fault_protect;
720 break;
722 case MMU_KSMAP_IDX:
723 if (is_write1 != 2 && (ptep & PG_USER_MASK)) {
724 goto do_fault_protect;
726 /* fall through */
727 case MMU_KNOSMAP_IDX:
728 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
729 (ptep & PG_USER_MASK)) {
730 goto do_fault_protect;
732 if ((env->cr[0] & CR0_WP_MASK) &&
733 is_write && !(ptep & PG_RW_MASK)) {
734 goto do_fault_protect;
736 break;
738 default: /* cannot happen */
739 break;
741 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
742 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
743 pte |= PG_ACCESSED_MASK;
744 if (is_dirty) {
745 pte |= PG_DIRTY_MASK;
747 stl_phys_notdirty(cs->as, pte_addr, pte);
750 /* the page can be put in the TLB */
751 prot = PAGE_READ;
752 if (!(ptep & PG_NX_MASK) &&
753 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK))) {
754 prot |= PAGE_EXEC;
756 if (pte & PG_DIRTY_MASK) {
757 /* only set write access if already dirty... otherwise wait
758 for dirty access */
759 if (is_user) {
760 if (ptep & PG_RW_MASK)
761 prot |= PAGE_WRITE;
762 } else {
763 if (!(env->cr[0] & CR0_WP_MASK) ||
764 (ptep & PG_RW_MASK))
765 prot |= PAGE_WRITE;
768 do_mapping:
769 pte = pte & env->a20_mask;
771 /* align to page_size */
772 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
774 /* Even if 4MB pages, we map only one 4KB page in the cache to
775 avoid filling it too fast */
776 vaddr = addr & TARGET_PAGE_MASK;
777 page_offset = vaddr & (page_size - 1);
778 paddr = pte + page_offset;
780 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
781 return 0;
782 do_fault_rsvd:
783 error_code |= PG_ERROR_RSVD_MASK;
784 do_fault_protect:
785 error_code |= PG_ERROR_P_MASK;
786 do_fault:
787 error_code |= (is_write << PG_ERROR_W_BIT);
788 if (is_user)
789 error_code |= PG_ERROR_U_MASK;
790 if (is_write1 == 2 &&
791 (((env->efer & MSR_EFER_NXE) &&
792 (env->cr[4] & CR4_PAE_MASK)) ||
793 (env->cr[4] & CR4_SMEP_MASK)))
794 error_code |= PG_ERROR_I_D_MASK;
795 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
796 /* cr2 is not modified in case of exceptions */
797 stq_phys(cs->as,
798 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
799 addr);
800 } else {
801 env->cr[2] = addr;
803 env->error_code = error_code;
804 cs->exception_index = EXCP0E_PAGE;
805 return 1;
808 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
810 X86CPU *cpu = X86_CPU(cs);
811 CPUX86State *env = &cpu->env;
812 target_ulong pde_addr, pte_addr;
813 uint64_t pte;
814 uint32_t page_offset;
815 int page_size;
817 if (!(env->cr[0] & CR0_PG_MASK)) {
818 pte = addr & env->a20_mask;
819 page_size = 4096;
820 } else if (env->cr[4] & CR4_PAE_MASK) {
821 target_ulong pdpe_addr;
822 uint64_t pde, pdpe;
824 #ifdef TARGET_X86_64
825 if (env->hflags & HF_LMA_MASK) {
826 uint64_t pml4e_addr, pml4e;
827 int32_t sext;
829 /* test virtual address sign extension */
830 sext = (int64_t)addr >> 47;
831 if (sext != 0 && sext != -1) {
832 return -1;
834 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
835 env->a20_mask;
836 pml4e = ldq_phys(cs->as, pml4e_addr);
837 if (!(pml4e & PG_PRESENT_MASK)) {
838 return -1;
840 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
841 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
842 pdpe = ldq_phys(cs->as, pdpe_addr);
843 if (!(pdpe & PG_PRESENT_MASK)) {
844 return -1;
846 if (pdpe & PG_PSE_MASK) {
847 page_size = 1024 * 1024 * 1024;
848 pte = pdpe;
849 goto out;
852 } else
853 #endif
855 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
856 env->a20_mask;
857 pdpe = ldq_phys(cs->as, pdpe_addr);
858 if (!(pdpe & PG_PRESENT_MASK))
859 return -1;
862 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
863 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
864 pde = ldq_phys(cs->as, pde_addr);
865 if (!(pde & PG_PRESENT_MASK)) {
866 return -1;
868 if (pde & PG_PSE_MASK) {
869 /* 2 MB page */
870 page_size = 2048 * 1024;
871 pte = pde;
872 } else {
873 /* 4 KB page */
874 pte_addr = ((pde & PG_ADDRESS_MASK) +
875 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
876 page_size = 4096;
877 pte = ldq_phys(cs->as, pte_addr);
879 if (!(pte & PG_PRESENT_MASK)) {
880 return -1;
882 } else {
883 uint32_t pde;
885 /* page directory entry */
886 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
887 pde = ldl_phys(cs->as, pde_addr);
888 if (!(pde & PG_PRESENT_MASK))
889 return -1;
890 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
891 pte = pde | ((pde & 0x1fe000) << (32 - 13));
892 page_size = 4096 * 1024;
893 } else {
894 /* page directory entry */
895 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
896 pte = ldl_phys(cs->as, pte_addr);
897 if (!(pte & PG_PRESENT_MASK)) {
898 return -1;
900 page_size = 4096;
902 pte = pte & env->a20_mask;
905 #ifdef TARGET_X86_64
906 out:
907 #endif
908 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
909 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
910 return pte | page_offset;
913 void hw_breakpoint_insert(CPUX86State *env, int index)
915 CPUState *cs = CPU(x86_env_get_cpu(env));
916 int type = 0, err = 0;
918 switch (hw_breakpoint_type(env->dr[7], index)) {
919 case DR7_TYPE_BP_INST:
920 if (hw_breakpoint_enabled(env->dr[7], index)) {
921 err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU,
922 &env->cpu_breakpoint[index]);
924 break;
925 case DR7_TYPE_DATA_WR:
926 type = BP_CPU | BP_MEM_WRITE;
927 break;
928 case DR7_TYPE_IO_RW:
929 /* No support for I/O watchpoints yet */
930 break;
931 case DR7_TYPE_DATA_RW:
932 type = BP_CPU | BP_MEM_ACCESS;
933 break;
936 if (type != 0) {
937 err = cpu_watchpoint_insert(cs, env->dr[index],
938 hw_breakpoint_len(env->dr[7], index),
939 type, &env->cpu_watchpoint[index]);
942 if (err) {
943 env->cpu_breakpoint[index] = NULL;
947 void hw_breakpoint_remove(CPUX86State *env, int index)
949 CPUState *cs;
951 if (!env->cpu_breakpoint[index]) {
952 return;
954 cs = CPU(x86_env_get_cpu(env));
955 switch (hw_breakpoint_type(env->dr[7], index)) {
956 case DR7_TYPE_BP_INST:
957 if (hw_breakpoint_enabled(env->dr[7], index)) {
958 cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
960 break;
961 case DR7_TYPE_DATA_WR:
962 case DR7_TYPE_DATA_RW:
963 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
964 break;
965 case DR7_TYPE_IO_RW:
966 /* No support for I/O watchpoints yet */
967 break;
971 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
973 target_ulong dr6;
974 int reg;
975 bool hit_enabled = false;
977 dr6 = env->dr[6] & ~0xf;
978 for (reg = 0; reg < DR7_MAX_BP; reg++) {
979 bool bp_match = false;
980 bool wp_match = false;
982 switch (hw_breakpoint_type(env->dr[7], reg)) {
983 case DR7_TYPE_BP_INST:
984 if (env->dr[reg] == env->eip) {
985 bp_match = true;
987 break;
988 case DR7_TYPE_DATA_WR:
989 case DR7_TYPE_DATA_RW:
990 if (env->cpu_watchpoint[reg] &&
991 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
992 wp_match = true;
994 break;
995 case DR7_TYPE_IO_RW:
996 break;
998 if (bp_match || wp_match) {
999 dr6 |= 1 << reg;
1000 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1001 hit_enabled = true;
1006 if (hit_enabled || force_dr6_update) {
1007 env->dr[6] = dr6;
1010 return hit_enabled;
1013 void breakpoint_handler(CPUX86State *env)
1015 CPUState *cs = CPU(x86_env_get_cpu(env));
1016 CPUBreakpoint *bp;
1018 if (cs->watchpoint_hit) {
1019 if (cs->watchpoint_hit->flags & BP_CPU) {
1020 cs->watchpoint_hit = NULL;
1021 if (check_hw_breakpoints(env, false)) {
1022 raise_exception(env, EXCP01_DB);
1023 } else {
1024 cpu_resume_from_signal(cs, NULL);
1027 } else {
1028 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1029 if (bp->pc == env->eip) {
1030 if (bp->flags & BP_CPU) {
1031 check_hw_breakpoints(env, true);
1032 raise_exception(env, EXCP01_DB);
1034 break;
1040 typedef struct MCEInjectionParams {
1041 Monitor *mon;
1042 X86CPU *cpu;
1043 int bank;
1044 uint64_t status;
1045 uint64_t mcg_status;
1046 uint64_t addr;
1047 uint64_t misc;
1048 int flags;
1049 } MCEInjectionParams;
1051 static void do_inject_x86_mce(void *data)
1053 MCEInjectionParams *params = data;
1054 CPUX86State *cenv = &params->cpu->env;
1055 CPUState *cpu = CPU(params->cpu);
1056 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1058 cpu_synchronize_state(cpu);
1061 * If there is an MCE exception being processed, ignore this SRAO MCE
1062 * unless unconditional injection was requested.
1064 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1065 && !(params->status & MCI_STATUS_AR)
1066 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1067 return;
1070 if (params->status & MCI_STATUS_UC) {
1072 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1073 * reporting is disabled
1075 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1076 monitor_printf(params->mon,
1077 "CPU %d: Uncorrected error reporting disabled\n",
1078 cpu->cpu_index);
1079 return;
1083 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1084 * reporting is disabled for the bank
1086 if (banks[0] != ~(uint64_t)0) {
1087 monitor_printf(params->mon,
1088 "CPU %d: Uncorrected error reporting disabled for"
1089 " bank %d\n",
1090 cpu->cpu_index, params->bank);
1091 return;
1094 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1095 !(cenv->cr[4] & CR4_MCE_MASK)) {
1096 monitor_printf(params->mon,
1097 "CPU %d: Previous MCE still in progress, raising"
1098 " triple fault\n",
1099 cpu->cpu_index);
1100 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1101 qemu_system_reset_request();
1102 return;
1104 if (banks[1] & MCI_STATUS_VAL) {
1105 params->status |= MCI_STATUS_OVER;
1107 banks[2] = params->addr;
1108 banks[3] = params->misc;
1109 cenv->mcg_status = params->mcg_status;
1110 banks[1] = params->status;
1111 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1112 } else if (!(banks[1] & MCI_STATUS_VAL)
1113 || !(banks[1] & MCI_STATUS_UC)) {
1114 if (banks[1] & MCI_STATUS_VAL) {
1115 params->status |= MCI_STATUS_OVER;
1117 banks[2] = params->addr;
1118 banks[3] = params->misc;
1119 banks[1] = params->status;
1120 } else {
1121 banks[1] |= MCI_STATUS_OVER;
1125 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1126 uint64_t status, uint64_t mcg_status, uint64_t addr,
1127 uint64_t misc, int flags)
1129 CPUState *cs = CPU(cpu);
1130 CPUX86State *cenv = &cpu->env;
1131 MCEInjectionParams params = {
1132 .mon = mon,
1133 .cpu = cpu,
1134 .bank = bank,
1135 .status = status,
1136 .mcg_status = mcg_status,
1137 .addr = addr,
1138 .misc = misc,
1139 .flags = flags,
1141 unsigned bank_num = cenv->mcg_cap & 0xff;
1143 if (!cenv->mcg_cap) {
1144 monitor_printf(mon, "MCE injection not supported\n");
1145 return;
1147 if (bank >= bank_num) {
1148 monitor_printf(mon, "Invalid MCE bank number\n");
1149 return;
1151 if (!(status & MCI_STATUS_VAL)) {
1152 monitor_printf(mon, "Invalid MCE status code\n");
1153 return;
1155 if ((flags & MCE_INJECT_BROADCAST)
1156 && !cpu_x86_support_mca_broadcast(cenv)) {
1157 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1158 return;
1161 run_on_cpu(cs, do_inject_x86_mce, &params);
1162 if (flags & MCE_INJECT_BROADCAST) {
1163 CPUState *other_cs;
1165 params.bank = 1;
1166 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1167 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1168 params.addr = 0;
1169 params.misc = 0;
1170 CPU_FOREACH(other_cs) {
1171 if (other_cs == cs) {
1172 continue;
1174 params.cpu = X86_CPU(other_cs);
1175 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1180 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1182 X86CPU *cpu = x86_env_get_cpu(env);
1183 CPUState *cs = CPU(cpu);
1185 if (kvm_enabled()) {
1186 env->tpr_access_type = access;
1188 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1189 } else {
1190 cpu_restore_state(cs, cs->mem_io_pc);
1192 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1195 #endif /* !CONFIG_USER_ONLY */
1197 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1198 target_ulong *base, unsigned int *limit,
1199 unsigned int *flags)
1201 X86CPU *cpu = x86_env_get_cpu(env);
1202 CPUState *cs = CPU(cpu);
1203 SegmentCache *dt;
1204 target_ulong ptr;
1205 uint32_t e1, e2;
1206 int index;
1208 if (selector & 0x4)
1209 dt = &env->ldt;
1210 else
1211 dt = &env->gdt;
1212 index = selector & ~7;
1213 ptr = dt->base + index;
1214 if ((index + 7) > dt->limit
1215 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1216 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1217 return 0;
1219 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1220 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1221 if (e2 & DESC_G_MASK)
1222 *limit = (*limit << 12) | 0xfff;
1223 *flags = e2;
1225 return 1;
1228 #if !defined(CONFIG_USER_ONLY)
1229 void do_cpu_init(X86CPU *cpu)
1231 CPUState *cs = CPU(cpu);
1232 CPUX86State *env = &cpu->env;
1233 CPUX86State *save = g_new(CPUX86State, 1);
1234 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1236 *save = *env;
1238 cpu_reset(cs);
1239 cs->interrupt_request = sipi;
1240 memcpy(&env->start_init_save, &save->start_init_save,
1241 offsetof(CPUX86State, end_init_save) -
1242 offsetof(CPUX86State, start_init_save));
1243 g_free(save);
1245 if (kvm_enabled()) {
1246 kvm_arch_do_init_vcpu(cpu);
1248 apic_init_reset(cpu->apic_state);
1251 void do_cpu_sipi(X86CPU *cpu)
1253 apic_sipi(cpu->apic_state);
1255 #else
1256 void do_cpu_init(X86CPU *cpu)
1259 void do_cpu_sipi(X86CPU *cpu)
1262 #endif