Merge remote-tracking branch 'remotes/maxreitz/tags/pull-block-2020-03-26' into staging
[qemu.git] / target / i386 / helper.c
blobc3a6e4fabe20928cfb94eba8227a757107dbd664
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/qemu-print.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/runstate.h"
26 #include "kvm_i386.h"
27 #ifndef CONFIG_USER_ONLY
28 #include "sysemu/tcg.h"
29 #include "sysemu/hw_accel.h"
30 #include "monitor/monitor.h"
31 #include "hw/i386/apic_internal.h"
32 #endif
34 void cpu_sync_bndcs_hflags(CPUX86State *env)
36 uint32_t hflags = env->hflags;
37 uint32_t hflags2 = env->hflags2;
38 uint32_t bndcsr;
40 if ((hflags & HF_CPL_MASK) == 3) {
41 bndcsr = env->bndcs_regs.cfgu;
42 } else {
43 bndcsr = env->msr_bndcfgs;
46 if ((env->cr[4] & CR4_OSXSAVE_MASK)
47 && (env->xcr0 & XSTATE_BNDCSR_MASK)
48 && (bndcsr & BNDCFG_ENABLE)) {
49 hflags |= HF_MPX_EN_MASK;
50 } else {
51 hflags &= ~HF_MPX_EN_MASK;
54 if (bndcsr & BNDCFG_BNDPRESERVE) {
55 hflags2 |= HF2_MPX_PR_MASK;
56 } else {
57 hflags2 &= ~HF2_MPX_PR_MASK;
60 env->hflags = hflags;
61 env->hflags2 = hflags2;
64 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
66 int cpuver = env->cpuid_version;
68 if (family == NULL || model == NULL) {
69 return;
72 *family = (cpuver >> 8) & 0x0f;
73 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
76 /* Broadcast MCA signal for processor version 06H_EH and above */
77 int cpu_x86_support_mca_broadcast(CPUX86State *env)
79 int family = 0;
80 int model = 0;
82 cpu_x86_version(env, &family, &model);
83 if ((family == 6 && model >= 14) || family > 6) {
84 return 1;
87 return 0;
90 /***********************************************************/
91 /* x86 debug */
93 static const char *cc_op_str[CC_OP_NB] = {
94 "DYNAMIC",
95 "EFLAGS",
97 "MULB",
98 "MULW",
99 "MULL",
100 "MULQ",
102 "ADDB",
103 "ADDW",
104 "ADDL",
105 "ADDQ",
107 "ADCB",
108 "ADCW",
109 "ADCL",
110 "ADCQ",
112 "SUBB",
113 "SUBW",
114 "SUBL",
115 "SUBQ",
117 "SBBB",
118 "SBBW",
119 "SBBL",
120 "SBBQ",
122 "LOGICB",
123 "LOGICW",
124 "LOGICL",
125 "LOGICQ",
127 "INCB",
128 "INCW",
129 "INCL",
130 "INCQ",
132 "DECB",
133 "DECW",
134 "DECL",
135 "DECQ",
137 "SHLB",
138 "SHLW",
139 "SHLL",
140 "SHLQ",
142 "SARB",
143 "SARW",
144 "SARL",
145 "SARQ",
147 "BMILGB",
148 "BMILGW",
149 "BMILGL",
150 "BMILGQ",
152 "ADCX",
153 "ADOX",
154 "ADCOX",
156 "CLR",
159 static void
160 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f,
161 const char *name, struct SegmentCache *sc)
163 #ifdef TARGET_X86_64
164 if (env->hflags & HF_CS64_MASK) {
165 qemu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
166 sc->selector, sc->base, sc->limit,
167 sc->flags & 0x00ffff00);
168 } else
169 #endif
171 qemu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
172 (uint32_t)sc->base, sc->limit,
173 sc->flags & 0x00ffff00);
176 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
177 goto done;
179 qemu_fprintf(f, " DPL=%d ",
180 (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
181 if (sc->flags & DESC_S_MASK) {
182 if (sc->flags & DESC_CS_MASK) {
183 qemu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
184 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
185 qemu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
186 (sc->flags & DESC_R_MASK) ? 'R' : '-');
187 } else {
188 qemu_fprintf(f, (sc->flags & DESC_B_MASK
189 || env->hflags & HF_LMA_MASK)
190 ? "DS " : "DS16");
191 qemu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
192 (sc->flags & DESC_W_MASK) ? 'W' : '-');
194 qemu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
195 } else {
196 static const char *sys_type_name[2][16] = {
197 { /* 32 bit mode */
198 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
199 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
200 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
201 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
203 { /* 64 bit mode */
204 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
205 "Reserved", "Reserved", "Reserved", "Reserved",
206 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
207 "Reserved", "IntGate64", "TrapGate64"
210 qemu_fprintf(f, "%s",
211 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
212 [(sc->flags & DESC_TYPE_MASK) >> DESC_TYPE_SHIFT]);
214 done:
215 qemu_fprintf(f, "\n");
218 #ifndef CONFIG_USER_ONLY
220 /* ARRAY_SIZE check is not required because
221 * DeliveryMode(dm) has a size of 3 bit.
223 static inline const char *dm2str(uint32_t dm)
225 static const char *str[] = {
226 "Fixed",
227 "...",
228 "SMI",
229 "...",
230 "NMI",
231 "INIT",
232 "...",
233 "ExtINT"
235 return str[dm];
238 static void dump_apic_lvt(const char *name, uint32_t lvt, bool is_timer)
240 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
241 qemu_printf("%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
242 name, lvt,
243 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
244 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
245 lvt & APIC_LVT_MASKED ? "masked" : "",
246 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
247 !is_timer ?
248 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
249 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
250 "tsc-deadline" : "one-shot",
251 dm2str(dm));
252 if (dm != APIC_DM_NMI) {
253 qemu_printf(" (vec %u)\n", lvt & APIC_VECTOR_MASK);
254 } else {
255 qemu_printf("\n");
259 /* ARRAY_SIZE check is not required because
260 * destination shorthand has a size of 2 bit.
262 static inline const char *shorthand2str(uint32_t shorthand)
264 const char *str[] = {
265 "no-shorthand", "self", "all-self", "all"
267 return str[shorthand];
270 static inline uint8_t divider_conf(uint32_t divide_conf)
272 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
274 return divide_val == 7 ? 1 : 2 << divide_val;
277 static inline void mask2str(char *str, uint32_t val, uint8_t size)
279 while (size--) {
280 *str++ = (val >> size) & 1 ? '1' : '0';
282 *str = 0;
285 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
287 static void dump_apic_icr(APICCommonState *s, CPUX86State *env)
289 uint32_t icr = s->icr[0], icr2 = s->icr[1];
290 uint8_t dest_shorthand = \
291 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
292 bool logical_mod = icr & APIC_ICR_DEST_MOD;
293 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
294 uint32_t dest_field;
295 bool x2apic;
297 qemu_printf("ICR\t 0x%08x %s %s %s %s\n",
298 icr,
299 logical_mod ? "logical" : "physical",
300 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
301 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
302 shorthand2str(dest_shorthand));
304 qemu_printf("ICR2\t 0x%08x", icr2);
305 if (dest_shorthand != 0) {
306 qemu_printf("\n");
307 return;
309 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
310 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
312 if (!logical_mod) {
313 if (x2apic) {
314 qemu_printf(" cpu %u (X2APIC ID)\n", dest_field);
315 } else {
316 qemu_printf(" cpu %u (APIC ID)\n",
317 dest_field & APIC_LOGDEST_XAPIC_ID);
319 return;
322 if (s->dest_mode == 0xf) { /* flat mode */
323 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
324 qemu_printf(" mask %s (APIC ID)\n", apic_id_str);
325 } else if (s->dest_mode == 0) { /* cluster mode */
326 if (x2apic) {
327 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
328 qemu_printf(" cluster %u mask %s (X2APIC ID)\n",
329 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
330 } else {
331 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
332 qemu_printf(" cluster %u mask %s (APIC ID)\n",
333 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
338 static void dump_apic_interrupt(const char *name, uint32_t *ireg_tab,
339 uint32_t *tmr_tab)
341 int i, empty = true;
343 qemu_printf("%s\t ", name);
344 for (i = 0; i < 256; i++) {
345 if (apic_get_bit(ireg_tab, i)) {
346 qemu_printf("%u%s ", i,
347 apic_get_bit(tmr_tab, i) ? "(level)" : "");
348 empty = false;
351 qemu_printf("%s\n", empty ? "(none)" : "");
354 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
356 X86CPU *cpu = X86_CPU(cs);
357 APICCommonState *s = APIC_COMMON(cpu->apic_state);
358 if (!s) {
359 qemu_printf("local apic state not available\n");
360 return;
362 uint32_t *lvt = s->lvt;
364 qemu_printf("dumping local APIC state for CPU %-2u\n\n",
365 CPU(cpu)->cpu_index);
366 dump_apic_lvt("LVT0", lvt[APIC_LVT_LINT0], false);
367 dump_apic_lvt("LVT1", lvt[APIC_LVT_LINT1], false);
368 dump_apic_lvt("LVTPC", lvt[APIC_LVT_PERFORM], false);
369 dump_apic_lvt("LVTERR", lvt[APIC_LVT_ERROR], false);
370 dump_apic_lvt("LVTTHMR", lvt[APIC_LVT_THERMAL], false);
371 dump_apic_lvt("LVTT", lvt[APIC_LVT_TIMER], true);
373 qemu_printf("Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
374 s->divide_conf & APIC_DCR_MASK,
375 divider_conf(s->divide_conf),
376 s->initial_count);
378 qemu_printf("SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
379 s->spurious_vec,
380 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
381 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
382 s->spurious_vec & APIC_VECTOR_MASK);
384 dump_apic_icr(s, &cpu->env);
386 qemu_printf("ESR\t 0x%08x\n", s->esr);
388 dump_apic_interrupt("ISR", s->isr, s->tmr);
389 dump_apic_interrupt("IRR", s->irr, s->tmr);
391 qemu_printf("\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
392 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
393 if (s->dest_mode == 0) {
394 qemu_printf("(cluster %u: id %u)",
395 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
396 s->log_dest & APIC_LOGDEST_XAPIC_ID);
398 qemu_printf(" PPR 0x%02x\n", apic_get_ppr(s));
400 #else
401 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
404 #endif /* !CONFIG_USER_ONLY */
406 #define DUMP_CODE_BYTES_TOTAL 50
407 #define DUMP_CODE_BYTES_BACKWARD 20
409 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
411 X86CPU *cpu = X86_CPU(cs);
412 CPUX86State *env = &cpu->env;
413 int eflags, i, nb;
414 char cc_op_name[32];
415 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
417 eflags = cpu_compute_eflags(env);
418 #ifdef TARGET_X86_64
419 if (env->hflags & HF_CS64_MASK) {
420 qemu_fprintf(f, "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
421 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
422 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
423 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
424 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
425 env->regs[R_EAX],
426 env->regs[R_EBX],
427 env->regs[R_ECX],
428 env->regs[R_EDX],
429 env->regs[R_ESI],
430 env->regs[R_EDI],
431 env->regs[R_EBP],
432 env->regs[R_ESP],
433 env->regs[8],
434 env->regs[9],
435 env->regs[10],
436 env->regs[11],
437 env->regs[12],
438 env->regs[13],
439 env->regs[14],
440 env->regs[15],
441 env->eip, eflags,
442 eflags & DF_MASK ? 'D' : '-',
443 eflags & CC_O ? 'O' : '-',
444 eflags & CC_S ? 'S' : '-',
445 eflags & CC_Z ? 'Z' : '-',
446 eflags & CC_A ? 'A' : '-',
447 eflags & CC_P ? 'P' : '-',
448 eflags & CC_C ? 'C' : '-',
449 env->hflags & HF_CPL_MASK,
450 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
451 (env->a20_mask >> 20) & 1,
452 (env->hflags >> HF_SMM_SHIFT) & 1,
453 cs->halted);
454 } else
455 #endif
457 qemu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
458 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
459 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
460 (uint32_t)env->regs[R_EAX],
461 (uint32_t)env->regs[R_EBX],
462 (uint32_t)env->regs[R_ECX],
463 (uint32_t)env->regs[R_EDX],
464 (uint32_t)env->regs[R_ESI],
465 (uint32_t)env->regs[R_EDI],
466 (uint32_t)env->regs[R_EBP],
467 (uint32_t)env->regs[R_ESP],
468 (uint32_t)env->eip, eflags,
469 eflags & DF_MASK ? 'D' : '-',
470 eflags & CC_O ? 'O' : '-',
471 eflags & CC_S ? 'S' : '-',
472 eflags & CC_Z ? 'Z' : '-',
473 eflags & CC_A ? 'A' : '-',
474 eflags & CC_P ? 'P' : '-',
475 eflags & CC_C ? 'C' : '-',
476 env->hflags & HF_CPL_MASK,
477 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
478 (env->a20_mask >> 20) & 1,
479 (env->hflags >> HF_SMM_SHIFT) & 1,
480 cs->halted);
483 for(i = 0; i < 6; i++) {
484 cpu_x86_dump_seg_cache(env, f, seg_name[i], &env->segs[i]);
486 cpu_x86_dump_seg_cache(env, f, "LDT", &env->ldt);
487 cpu_x86_dump_seg_cache(env, f, "TR", &env->tr);
489 #ifdef TARGET_X86_64
490 if (env->hflags & HF_LMA_MASK) {
491 qemu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
492 env->gdt.base, env->gdt.limit);
493 qemu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
494 env->idt.base, env->idt.limit);
495 qemu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
496 (uint32_t)env->cr[0],
497 env->cr[2],
498 env->cr[3],
499 (uint32_t)env->cr[4]);
500 for(i = 0; i < 4; i++)
501 qemu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
502 qemu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
503 env->dr[6], env->dr[7]);
504 } else
505 #endif
507 qemu_fprintf(f, "GDT= %08x %08x\n",
508 (uint32_t)env->gdt.base, env->gdt.limit);
509 qemu_fprintf(f, "IDT= %08x %08x\n",
510 (uint32_t)env->idt.base, env->idt.limit);
511 qemu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
512 (uint32_t)env->cr[0],
513 (uint32_t)env->cr[2],
514 (uint32_t)env->cr[3],
515 (uint32_t)env->cr[4]);
516 for(i = 0; i < 4; i++) {
517 qemu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
519 qemu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
520 env->dr[6], env->dr[7]);
522 if (flags & CPU_DUMP_CCOP) {
523 if ((unsigned)env->cc_op < CC_OP_NB)
524 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
525 else
526 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
527 #ifdef TARGET_X86_64
528 if (env->hflags & HF_CS64_MASK) {
529 qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
530 env->cc_src, env->cc_dst,
531 cc_op_name);
532 } else
533 #endif
535 qemu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
536 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
537 cc_op_name);
540 qemu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
541 if (flags & CPU_DUMP_FPU) {
542 int fptag;
543 fptag = 0;
544 for(i = 0; i < 8; i++) {
545 fptag |= ((!env->fptags[i]) << i);
547 qemu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
548 env->fpuc,
549 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
550 env->fpstt,
551 fptag,
552 env->mxcsr);
553 for(i=0;i<8;i++) {
554 CPU_LDoubleU u;
555 u.d = env->fpregs[i].d;
556 qemu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
557 i, u.l.lower, u.l.upper);
558 if ((i & 1) == 1)
559 qemu_fprintf(f, "\n");
560 else
561 qemu_fprintf(f, " ");
563 if (env->hflags & HF_CS64_MASK)
564 nb = 16;
565 else
566 nb = 8;
567 for(i=0;i<nb;i++) {
568 qemu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
570 env->xmm_regs[i].ZMM_L(3),
571 env->xmm_regs[i].ZMM_L(2),
572 env->xmm_regs[i].ZMM_L(1),
573 env->xmm_regs[i].ZMM_L(0));
574 if ((i & 1) == 1)
575 qemu_fprintf(f, "\n");
576 else
577 qemu_fprintf(f, " ");
580 if (flags & CPU_DUMP_CODE) {
581 target_ulong base = env->segs[R_CS].base + env->eip;
582 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
583 uint8_t code;
584 char codestr[3];
586 qemu_fprintf(f, "Code=");
587 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
588 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
589 snprintf(codestr, sizeof(codestr), "%02x", code);
590 } else {
591 snprintf(codestr, sizeof(codestr), "??");
593 qemu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
594 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
596 qemu_fprintf(f, "\n");
600 /***********************************************************/
601 /* x86 mmu */
602 /* XXX: add PGE support */
604 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
606 CPUX86State *env = &cpu->env;
608 a20_state = (a20_state != 0);
609 if (a20_state != ((env->a20_mask >> 20) & 1)) {
610 CPUState *cs = CPU(cpu);
612 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
613 /* if the cpu is currently executing code, we must unlink it and
614 all the potentially executing TB */
615 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
617 /* when a20 is changed, all the MMU mappings are invalid, so
618 we must flush everything */
619 tlb_flush(cs);
620 env->a20_mask = ~(1 << 20) | (a20_state << 20);
624 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
626 X86CPU *cpu = env_archcpu(env);
627 int pe_state;
629 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
630 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
631 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
632 tlb_flush(CPU(cpu));
635 #ifdef TARGET_X86_64
636 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
637 (env->efer & MSR_EFER_LME)) {
638 /* enter in long mode */
639 /* XXX: generate an exception */
640 if (!(env->cr[4] & CR4_PAE_MASK))
641 return;
642 env->efer |= MSR_EFER_LMA;
643 env->hflags |= HF_LMA_MASK;
644 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
645 (env->efer & MSR_EFER_LMA)) {
646 /* exit long mode */
647 env->efer &= ~MSR_EFER_LMA;
648 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
649 env->eip &= 0xffffffff;
651 #endif
652 env->cr[0] = new_cr0 | CR0_ET_MASK;
654 /* update PE flag in hidden flags */
655 pe_state = (env->cr[0] & CR0_PE_MASK);
656 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
657 /* ensure that ADDSEG is always set in real mode */
658 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
659 /* update FPU flags */
660 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
661 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
664 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
665 the PDPT */
666 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
668 env->cr[3] = new_cr3;
669 if (env->cr[0] & CR0_PG_MASK) {
670 qemu_log_mask(CPU_LOG_MMU,
671 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
672 tlb_flush(env_cpu(env));
676 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
678 uint32_t hflags;
680 #if defined(DEBUG_MMU)
681 printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
682 #endif
683 if ((new_cr4 ^ env->cr[4]) &
684 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
685 CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
686 tlb_flush(env_cpu(env));
689 /* Clear bits we're going to recompute. */
690 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
692 /* SSE handling */
693 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
694 new_cr4 &= ~CR4_OSFXSR_MASK;
696 if (new_cr4 & CR4_OSFXSR_MASK) {
697 hflags |= HF_OSFXSR_MASK;
700 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
701 new_cr4 &= ~CR4_SMAP_MASK;
703 if (new_cr4 & CR4_SMAP_MASK) {
704 hflags |= HF_SMAP_MASK;
707 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
708 new_cr4 &= ~CR4_PKE_MASK;
711 env->cr[4] = new_cr4;
712 env->hflags = hflags;
714 cpu_sync_bndcs_hflags(env);
717 #if !defined(CONFIG_USER_ONLY)
718 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
719 MemTxAttrs *attrs)
721 X86CPU *cpu = X86_CPU(cs);
722 CPUX86State *env = &cpu->env;
723 target_ulong pde_addr, pte_addr;
724 uint64_t pte;
725 int32_t a20_mask;
726 uint32_t page_offset;
727 int page_size;
729 *attrs = cpu_get_mem_attrs(env);
731 a20_mask = x86_get_a20_mask(env);
732 if (!(env->cr[0] & CR0_PG_MASK)) {
733 pte = addr & a20_mask;
734 page_size = 4096;
735 } else if (env->cr[4] & CR4_PAE_MASK) {
736 target_ulong pdpe_addr;
737 uint64_t pde, pdpe;
739 #ifdef TARGET_X86_64
740 if (env->hflags & HF_LMA_MASK) {
741 bool la57 = env->cr[4] & CR4_LA57_MASK;
742 uint64_t pml5e_addr, pml5e;
743 uint64_t pml4e_addr, pml4e;
744 int32_t sext;
746 /* test virtual address sign extension */
747 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
748 if (sext != 0 && sext != -1) {
749 return -1;
752 if (la57) {
753 pml5e_addr = ((env->cr[3] & ~0xfff) +
754 (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
755 pml5e = x86_ldq_phys(cs, pml5e_addr);
756 if (!(pml5e & PG_PRESENT_MASK)) {
757 return -1;
759 } else {
760 pml5e = env->cr[3];
763 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
764 (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
765 pml4e = x86_ldq_phys(cs, pml4e_addr);
766 if (!(pml4e & PG_PRESENT_MASK)) {
767 return -1;
769 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
770 (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
771 pdpe = x86_ldq_phys(cs, pdpe_addr);
772 if (!(pdpe & PG_PRESENT_MASK)) {
773 return -1;
775 if (pdpe & PG_PSE_MASK) {
776 page_size = 1024 * 1024 * 1024;
777 pte = pdpe;
778 goto out;
781 } else
782 #endif
784 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
785 a20_mask;
786 pdpe = x86_ldq_phys(cs, pdpe_addr);
787 if (!(pdpe & PG_PRESENT_MASK))
788 return -1;
791 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
792 (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
793 pde = x86_ldq_phys(cs, pde_addr);
794 if (!(pde & PG_PRESENT_MASK)) {
795 return -1;
797 if (pde & PG_PSE_MASK) {
798 /* 2 MB page */
799 page_size = 2048 * 1024;
800 pte = pde;
801 } else {
802 /* 4 KB page */
803 pte_addr = ((pde & PG_ADDRESS_MASK) +
804 (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
805 page_size = 4096;
806 pte = x86_ldq_phys(cs, pte_addr);
808 if (!(pte & PG_PRESENT_MASK)) {
809 return -1;
811 } else {
812 uint32_t pde;
814 /* page directory entry */
815 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
816 pde = x86_ldl_phys(cs, pde_addr);
817 if (!(pde & PG_PRESENT_MASK))
818 return -1;
819 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
820 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
821 page_size = 4096 * 1024;
822 } else {
823 /* page directory entry */
824 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
825 pte = x86_ldl_phys(cs, pte_addr);
826 if (!(pte & PG_PRESENT_MASK)) {
827 return -1;
829 page_size = 4096;
831 pte = pte & a20_mask;
834 #ifdef TARGET_X86_64
835 out:
836 #endif
837 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
838 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
839 return pte | page_offset;
842 typedef struct MCEInjectionParams {
843 Monitor *mon;
844 int bank;
845 uint64_t status;
846 uint64_t mcg_status;
847 uint64_t addr;
848 uint64_t misc;
849 int flags;
850 } MCEInjectionParams;
852 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
854 MCEInjectionParams *params = data.host_ptr;
855 X86CPU *cpu = X86_CPU(cs);
856 CPUX86State *cenv = &cpu->env;
857 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
859 cpu_synchronize_state(cs);
862 * If there is an MCE exception being processed, ignore this SRAO MCE
863 * unless unconditional injection was requested.
865 if (!(params->flags & MCE_INJECT_UNCOND_AO)
866 && !(params->status & MCI_STATUS_AR)
867 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
868 return;
871 if (params->status & MCI_STATUS_UC) {
873 * if MSR_MCG_CTL is not all 1s, the uncorrected error
874 * reporting is disabled
876 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
877 monitor_printf(params->mon,
878 "CPU %d: Uncorrected error reporting disabled\n",
879 cs->cpu_index);
880 return;
884 * if MSR_MCi_CTL is not all 1s, the uncorrected error
885 * reporting is disabled for the bank
887 if (banks[0] != ~(uint64_t)0) {
888 monitor_printf(params->mon,
889 "CPU %d: Uncorrected error reporting disabled for"
890 " bank %d\n",
891 cs->cpu_index, params->bank);
892 return;
895 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
896 !(cenv->cr[4] & CR4_MCE_MASK)) {
897 monitor_printf(params->mon,
898 "CPU %d: Previous MCE still in progress, raising"
899 " triple fault\n",
900 cs->cpu_index);
901 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
902 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
903 return;
905 if (banks[1] & MCI_STATUS_VAL) {
906 params->status |= MCI_STATUS_OVER;
908 banks[2] = params->addr;
909 banks[3] = params->misc;
910 cenv->mcg_status = params->mcg_status;
911 banks[1] = params->status;
912 cpu_interrupt(cs, CPU_INTERRUPT_MCE);
913 } else if (!(banks[1] & MCI_STATUS_VAL)
914 || !(banks[1] & MCI_STATUS_UC)) {
915 if (banks[1] & MCI_STATUS_VAL) {
916 params->status |= MCI_STATUS_OVER;
918 banks[2] = params->addr;
919 banks[3] = params->misc;
920 banks[1] = params->status;
921 } else {
922 banks[1] |= MCI_STATUS_OVER;
926 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
927 uint64_t status, uint64_t mcg_status, uint64_t addr,
928 uint64_t misc, int flags)
930 CPUState *cs = CPU(cpu);
931 CPUX86State *cenv = &cpu->env;
932 MCEInjectionParams params = {
933 .mon = mon,
934 .bank = bank,
935 .status = status,
936 .mcg_status = mcg_status,
937 .addr = addr,
938 .misc = misc,
939 .flags = flags,
941 unsigned bank_num = cenv->mcg_cap & 0xff;
943 if (!cenv->mcg_cap) {
944 monitor_printf(mon, "MCE injection not supported\n");
945 return;
947 if (bank >= bank_num) {
948 monitor_printf(mon, "Invalid MCE bank number\n");
949 return;
951 if (!(status & MCI_STATUS_VAL)) {
952 monitor_printf(mon, "Invalid MCE status code\n");
953 return;
955 if ((flags & MCE_INJECT_BROADCAST)
956 && !cpu_x86_support_mca_broadcast(cenv)) {
957 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
958 return;
961 run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
962 if (flags & MCE_INJECT_BROADCAST) {
963 CPUState *other_cs;
965 params.bank = 1;
966 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
967 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
968 params.addr = 0;
969 params.misc = 0;
970 CPU_FOREACH(other_cs) {
971 if (other_cs == cs) {
972 continue;
974 run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
979 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
981 X86CPU *cpu = env_archcpu(env);
982 CPUState *cs = env_cpu(env);
984 if (kvm_enabled() || whpx_enabled()) {
985 env->tpr_access_type = access;
987 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
988 } else if (tcg_enabled()) {
989 cpu_restore_state(cs, cs->mem_io_pc, false);
991 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
994 #endif /* !CONFIG_USER_ONLY */
996 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
997 target_ulong *base, unsigned int *limit,
998 unsigned int *flags)
1000 CPUState *cs = env_cpu(env);
1001 SegmentCache *dt;
1002 target_ulong ptr;
1003 uint32_t e1, e2;
1004 int index;
1006 if (selector & 0x4)
1007 dt = &env->ldt;
1008 else
1009 dt = &env->gdt;
1010 index = selector & ~7;
1011 ptr = dt->base + index;
1012 if ((index + 7) > dt->limit
1013 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1014 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1015 return 0;
1017 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1018 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1019 if (e2 & DESC_G_MASK)
1020 *limit = (*limit << 12) | 0xfff;
1021 *flags = e2;
1023 return 1;
1026 #if !defined(CONFIG_USER_ONLY)
1027 void do_cpu_init(X86CPU *cpu)
1029 CPUState *cs = CPU(cpu);
1030 CPUX86State *env = &cpu->env;
1031 CPUX86State *save = g_new(CPUX86State, 1);
1032 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1034 *save = *env;
1036 cpu_reset(cs);
1037 cs->interrupt_request = sipi;
1038 memcpy(&env->start_init_save, &save->start_init_save,
1039 offsetof(CPUX86State, end_init_save) -
1040 offsetof(CPUX86State, start_init_save));
1041 g_free(save);
1043 if (kvm_enabled()) {
1044 kvm_arch_do_init_vcpu(cpu);
1046 apic_init_reset(cpu->apic_state);
1049 void do_cpu_sipi(X86CPU *cpu)
1051 apic_sipi(cpu->apic_state);
1053 #else
1054 void do_cpu_init(X86CPU *cpu)
1057 void do_cpu_sipi(X86CPU *cpu)
1060 #endif
1062 /* Frob eflags into and out of the CPU temporary format. */
1064 void x86_cpu_exec_enter(CPUState *cs)
1066 X86CPU *cpu = X86_CPU(cs);
1067 CPUX86State *env = &cpu->env;
1069 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1070 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1071 CC_OP = CC_OP_EFLAGS;
1072 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1075 void x86_cpu_exec_exit(CPUState *cs)
1077 X86CPU *cpu = X86_CPU(cs);
1078 CPUX86State *env = &cpu->env;
1080 env->eflags = cpu_compute_eflags(env);
1083 #ifndef CONFIG_USER_ONLY
1084 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1086 X86CPU *cpu = X86_CPU(cs);
1087 CPUX86State *env = &cpu->env;
1088 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1089 AddressSpace *as = cpu_addressspace(cs, attrs);
1091 return address_space_ldub(as, addr, attrs, NULL);
1094 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1096 X86CPU *cpu = X86_CPU(cs);
1097 CPUX86State *env = &cpu->env;
1098 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1099 AddressSpace *as = cpu_addressspace(cs, attrs);
1101 return address_space_lduw(as, addr, attrs, NULL);
1104 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1106 X86CPU *cpu = X86_CPU(cs);
1107 CPUX86State *env = &cpu->env;
1108 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1109 AddressSpace *as = cpu_addressspace(cs, attrs);
1111 return address_space_ldl(as, addr, attrs, NULL);
1114 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1116 X86CPU *cpu = X86_CPU(cs);
1117 CPUX86State *env = &cpu->env;
1118 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1119 AddressSpace *as = cpu_addressspace(cs, attrs);
1121 return address_space_ldq(as, addr, attrs, NULL);
1124 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1126 X86CPU *cpu = X86_CPU(cs);
1127 CPUX86State *env = &cpu->env;
1128 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1129 AddressSpace *as = cpu_addressspace(cs, attrs);
1131 address_space_stb(as, addr, val, attrs, NULL);
1134 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1136 X86CPU *cpu = X86_CPU(cs);
1137 CPUX86State *env = &cpu->env;
1138 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1139 AddressSpace *as = cpu_addressspace(cs, attrs);
1141 address_space_stl_notdirty(as, addr, val, attrs, NULL);
1144 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1146 X86CPU *cpu = X86_CPU(cs);
1147 CPUX86State *env = &cpu->env;
1148 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1149 AddressSpace *as = cpu_addressspace(cs, attrs);
1151 address_space_stw(as, addr, val, attrs, NULL);
1154 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1156 X86CPU *cpu = X86_CPU(cs);
1157 CPUX86State *env = &cpu->env;
1158 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1159 AddressSpace *as = cpu_addressspace(cs, attrs);
1161 address_space_stl(as, addr, val, attrs, NULL);
1164 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1166 X86CPU *cpu = X86_CPU(cs);
1167 CPUX86State *env = &cpu->env;
1168 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1169 AddressSpace *as = cpu_addressspace(cs, attrs);
1171 address_space_stq(as, addr, val, attrs, NULL);
1173 #endif