target/i386: Use env_cpu, env_archcpu
[qemu.git] / target / i386 / helper.c
blobff3a60c7cff8f58f5b4f764c2d6d95890239ad7e
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/qemu-print.h"
24 #include "sysemu/kvm.h"
25 #include "kvm_i386.h"
26 #ifndef CONFIG_USER_ONLY
27 #include "sysemu/sysemu.h"
28 #include "sysemu/hw_accel.h"
29 #include "monitor/monitor.h"
30 #include "hw/i386/apic_internal.h"
31 #endif
33 void cpu_sync_bndcs_hflags(CPUX86State *env)
35 uint32_t hflags = env->hflags;
36 uint32_t hflags2 = env->hflags2;
37 uint32_t bndcsr;
39 if ((hflags & HF_CPL_MASK) == 3) {
40 bndcsr = env->bndcs_regs.cfgu;
41 } else {
42 bndcsr = env->msr_bndcfgs;
45 if ((env->cr[4] & CR4_OSXSAVE_MASK)
46 && (env->xcr0 & XSTATE_BNDCSR_MASK)
47 && (bndcsr & BNDCFG_ENABLE)) {
48 hflags |= HF_MPX_EN_MASK;
49 } else {
50 hflags &= ~HF_MPX_EN_MASK;
53 if (bndcsr & BNDCFG_BNDPRESERVE) {
54 hflags2 |= HF2_MPX_PR_MASK;
55 } else {
56 hflags2 &= ~HF2_MPX_PR_MASK;
59 env->hflags = hflags;
60 env->hflags2 = hflags2;
63 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
65 int cpuver = env->cpuid_version;
67 if (family == NULL || model == NULL) {
68 return;
71 *family = (cpuver >> 8) & 0x0f;
72 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
75 /* Broadcast MCA signal for processor version 06H_EH and above */
76 int cpu_x86_support_mca_broadcast(CPUX86State *env)
78 int family = 0;
79 int model = 0;
81 cpu_x86_version(env, &family, &model);
82 if ((family == 6 && model >= 14) || family > 6) {
83 return 1;
86 return 0;
89 /***********************************************************/
90 /* x86 debug */
92 static const char *cc_op_str[CC_OP_NB] = {
93 "DYNAMIC",
94 "EFLAGS",
96 "MULB",
97 "MULW",
98 "MULL",
99 "MULQ",
101 "ADDB",
102 "ADDW",
103 "ADDL",
104 "ADDQ",
106 "ADCB",
107 "ADCW",
108 "ADCL",
109 "ADCQ",
111 "SUBB",
112 "SUBW",
113 "SUBL",
114 "SUBQ",
116 "SBBB",
117 "SBBW",
118 "SBBL",
119 "SBBQ",
121 "LOGICB",
122 "LOGICW",
123 "LOGICL",
124 "LOGICQ",
126 "INCB",
127 "INCW",
128 "INCL",
129 "INCQ",
131 "DECB",
132 "DECW",
133 "DECL",
134 "DECQ",
136 "SHLB",
137 "SHLW",
138 "SHLL",
139 "SHLQ",
141 "SARB",
142 "SARW",
143 "SARL",
144 "SARQ",
146 "BMILGB",
147 "BMILGW",
148 "BMILGL",
149 "BMILGQ",
151 "ADCX",
152 "ADOX",
153 "ADCOX",
155 "CLR",
158 static void
159 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f,
160 const char *name, struct SegmentCache *sc)
162 #ifdef TARGET_X86_64
163 if (env->hflags & HF_CS64_MASK) {
164 qemu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
165 sc->selector, sc->base, sc->limit,
166 sc->flags & 0x00ffff00);
167 } else
168 #endif
170 qemu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
171 (uint32_t)sc->base, sc->limit,
172 sc->flags & 0x00ffff00);
175 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
176 goto done;
178 qemu_fprintf(f, " DPL=%d ",
179 (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
180 if (sc->flags & DESC_S_MASK) {
181 if (sc->flags & DESC_CS_MASK) {
182 qemu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
183 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
184 qemu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
185 (sc->flags & DESC_R_MASK) ? 'R' : '-');
186 } else {
187 qemu_fprintf(f, (sc->flags & DESC_B_MASK
188 || env->hflags & HF_LMA_MASK)
189 ? "DS " : "DS16");
190 qemu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
191 (sc->flags & DESC_W_MASK) ? 'W' : '-');
193 qemu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
194 } else {
195 static const char *sys_type_name[2][16] = {
196 { /* 32 bit mode */
197 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
198 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
199 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
200 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
202 { /* 64 bit mode */
203 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
204 "Reserved", "Reserved", "Reserved", "Reserved",
205 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
206 "Reserved", "IntGate64", "TrapGate64"
209 qemu_fprintf(f, "%s",
210 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
211 [(sc->flags & DESC_TYPE_MASK) >> DESC_TYPE_SHIFT]);
213 done:
214 qemu_fprintf(f, "\n");
217 #ifndef CONFIG_USER_ONLY
219 /* ARRAY_SIZE check is not required because
220 * DeliveryMode(dm) has a size of 3 bit.
222 static inline const char *dm2str(uint32_t dm)
224 static const char *str[] = {
225 "Fixed",
226 "...",
227 "SMI",
228 "...",
229 "NMI",
230 "INIT",
231 "...",
232 "ExtINT"
234 return str[dm];
237 static void dump_apic_lvt(const char *name, uint32_t lvt, bool is_timer)
239 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
240 qemu_printf("%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
241 name, lvt,
242 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
243 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
244 lvt & APIC_LVT_MASKED ? "masked" : "",
245 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
246 !is_timer ?
247 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
248 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
249 "tsc-deadline" : "one-shot",
250 dm2str(dm));
251 if (dm != APIC_DM_NMI) {
252 qemu_printf(" (vec %u)\n", lvt & APIC_VECTOR_MASK);
253 } else {
254 qemu_printf("\n");
258 /* ARRAY_SIZE check is not required because
259 * destination shorthand has a size of 2 bit.
261 static inline const char *shorthand2str(uint32_t shorthand)
263 const char *str[] = {
264 "no-shorthand", "self", "all-self", "all"
266 return str[shorthand];
269 static inline uint8_t divider_conf(uint32_t divide_conf)
271 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
273 return divide_val == 7 ? 1 : 2 << divide_val;
276 static inline void mask2str(char *str, uint32_t val, uint8_t size)
278 while (size--) {
279 *str++ = (val >> size) & 1 ? '1' : '0';
281 *str = 0;
284 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
286 static void dump_apic_icr(APICCommonState *s, CPUX86State *env)
288 uint32_t icr = s->icr[0], icr2 = s->icr[1];
289 uint8_t dest_shorthand = \
290 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
291 bool logical_mod = icr & APIC_ICR_DEST_MOD;
292 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
293 uint32_t dest_field;
294 bool x2apic;
296 qemu_printf("ICR\t 0x%08x %s %s %s %s\n",
297 icr,
298 logical_mod ? "logical" : "physical",
299 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
300 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
301 shorthand2str(dest_shorthand));
303 qemu_printf("ICR2\t 0x%08x", icr2);
304 if (dest_shorthand != 0) {
305 qemu_printf("\n");
306 return;
308 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
309 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
311 if (!logical_mod) {
312 if (x2apic) {
313 qemu_printf(" cpu %u (X2APIC ID)\n", dest_field);
314 } else {
315 qemu_printf(" cpu %u (APIC ID)\n",
316 dest_field & APIC_LOGDEST_XAPIC_ID);
318 return;
321 if (s->dest_mode == 0xf) { /* flat mode */
322 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
323 qemu_printf(" mask %s (APIC ID)\n", apic_id_str);
324 } else if (s->dest_mode == 0) { /* cluster mode */
325 if (x2apic) {
326 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
327 qemu_printf(" cluster %u mask %s (X2APIC ID)\n",
328 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
329 } else {
330 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
331 qemu_printf(" cluster %u mask %s (APIC ID)\n",
332 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
337 static void dump_apic_interrupt(const char *name, uint32_t *ireg_tab,
338 uint32_t *tmr_tab)
340 int i, empty = true;
342 qemu_printf("%s\t ", name);
343 for (i = 0; i < 256; i++) {
344 if (apic_get_bit(ireg_tab, i)) {
345 qemu_printf("%u%s ", i,
346 apic_get_bit(tmr_tab, i) ? "(level)" : "");
347 empty = false;
350 qemu_printf("%s\n", empty ? "(none)" : "");
353 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
355 X86CPU *cpu = X86_CPU(cs);
356 APICCommonState *s = APIC_COMMON(cpu->apic_state);
357 if (!s) {
358 qemu_printf("local apic state not available\n");
359 return;
361 uint32_t *lvt = s->lvt;
363 qemu_printf("dumping local APIC state for CPU %-2u\n\n",
364 CPU(cpu)->cpu_index);
365 dump_apic_lvt("LVT0", lvt[APIC_LVT_LINT0], false);
366 dump_apic_lvt("LVT1", lvt[APIC_LVT_LINT1], false);
367 dump_apic_lvt("LVTPC", lvt[APIC_LVT_PERFORM], false);
368 dump_apic_lvt("LVTERR", lvt[APIC_LVT_ERROR], false);
369 dump_apic_lvt("LVTTHMR", lvt[APIC_LVT_THERMAL], false);
370 dump_apic_lvt("LVTT", lvt[APIC_LVT_TIMER], true);
372 qemu_printf("Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
373 s->divide_conf & APIC_DCR_MASK,
374 divider_conf(s->divide_conf),
375 s->initial_count);
377 qemu_printf("SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
378 s->spurious_vec,
379 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
380 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
381 s->spurious_vec & APIC_VECTOR_MASK);
383 dump_apic_icr(s, &cpu->env);
385 qemu_printf("ESR\t 0x%08x\n", s->esr);
387 dump_apic_interrupt("ISR", s->isr, s->tmr);
388 dump_apic_interrupt("IRR", s->irr, s->tmr);
390 qemu_printf("\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
391 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
392 if (s->dest_mode == 0) {
393 qemu_printf("(cluster %u: id %u)",
394 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
395 s->log_dest & APIC_LOGDEST_XAPIC_ID);
397 qemu_printf(" PPR 0x%02x\n", apic_get_ppr(s));
399 #else
400 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags)
403 #endif /* !CONFIG_USER_ONLY */
405 #define DUMP_CODE_BYTES_TOTAL 50
406 #define DUMP_CODE_BYTES_BACKWARD 20
408 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags)
410 X86CPU *cpu = X86_CPU(cs);
411 CPUX86State *env = &cpu->env;
412 int eflags, i, nb;
413 char cc_op_name[32];
414 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
416 eflags = cpu_compute_eflags(env);
417 #ifdef TARGET_X86_64
418 if (env->hflags & HF_CS64_MASK) {
419 qemu_fprintf(f, "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
420 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
421 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
422 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
423 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
424 env->regs[R_EAX],
425 env->regs[R_EBX],
426 env->regs[R_ECX],
427 env->regs[R_EDX],
428 env->regs[R_ESI],
429 env->regs[R_EDI],
430 env->regs[R_EBP],
431 env->regs[R_ESP],
432 env->regs[8],
433 env->regs[9],
434 env->regs[10],
435 env->regs[11],
436 env->regs[12],
437 env->regs[13],
438 env->regs[14],
439 env->regs[15],
440 env->eip, eflags,
441 eflags & DF_MASK ? 'D' : '-',
442 eflags & CC_O ? 'O' : '-',
443 eflags & CC_S ? 'S' : '-',
444 eflags & CC_Z ? 'Z' : '-',
445 eflags & CC_A ? 'A' : '-',
446 eflags & CC_P ? 'P' : '-',
447 eflags & CC_C ? 'C' : '-',
448 env->hflags & HF_CPL_MASK,
449 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
450 (env->a20_mask >> 20) & 1,
451 (env->hflags >> HF_SMM_SHIFT) & 1,
452 cs->halted);
453 } else
454 #endif
456 qemu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
457 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
458 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
459 (uint32_t)env->regs[R_EAX],
460 (uint32_t)env->regs[R_EBX],
461 (uint32_t)env->regs[R_ECX],
462 (uint32_t)env->regs[R_EDX],
463 (uint32_t)env->regs[R_ESI],
464 (uint32_t)env->regs[R_EDI],
465 (uint32_t)env->regs[R_EBP],
466 (uint32_t)env->regs[R_ESP],
467 (uint32_t)env->eip, eflags,
468 eflags & DF_MASK ? 'D' : '-',
469 eflags & CC_O ? 'O' : '-',
470 eflags & CC_S ? 'S' : '-',
471 eflags & CC_Z ? 'Z' : '-',
472 eflags & CC_A ? 'A' : '-',
473 eflags & CC_P ? 'P' : '-',
474 eflags & CC_C ? 'C' : '-',
475 env->hflags & HF_CPL_MASK,
476 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
477 (env->a20_mask >> 20) & 1,
478 (env->hflags >> HF_SMM_SHIFT) & 1,
479 cs->halted);
482 for(i = 0; i < 6; i++) {
483 cpu_x86_dump_seg_cache(env, f, seg_name[i], &env->segs[i]);
485 cpu_x86_dump_seg_cache(env, f, "LDT", &env->ldt);
486 cpu_x86_dump_seg_cache(env, f, "TR", &env->tr);
488 #ifdef TARGET_X86_64
489 if (env->hflags & HF_LMA_MASK) {
490 qemu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
491 env->gdt.base, env->gdt.limit);
492 qemu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
493 env->idt.base, env->idt.limit);
494 qemu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
495 (uint32_t)env->cr[0],
496 env->cr[2],
497 env->cr[3],
498 (uint32_t)env->cr[4]);
499 for(i = 0; i < 4; i++)
500 qemu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
501 qemu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
502 env->dr[6], env->dr[7]);
503 } else
504 #endif
506 qemu_fprintf(f, "GDT= %08x %08x\n",
507 (uint32_t)env->gdt.base, env->gdt.limit);
508 qemu_fprintf(f, "IDT= %08x %08x\n",
509 (uint32_t)env->idt.base, env->idt.limit);
510 qemu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
511 (uint32_t)env->cr[0],
512 (uint32_t)env->cr[2],
513 (uint32_t)env->cr[3],
514 (uint32_t)env->cr[4]);
515 for(i = 0; i < 4; i++) {
516 qemu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
518 qemu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
519 env->dr[6], env->dr[7]);
521 if (flags & CPU_DUMP_CCOP) {
522 if ((unsigned)env->cc_op < CC_OP_NB)
523 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
524 else
525 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
526 #ifdef TARGET_X86_64
527 if (env->hflags & HF_CS64_MASK) {
528 qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
529 env->cc_src, env->cc_dst,
530 cc_op_name);
531 } else
532 #endif
534 qemu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
535 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
536 cc_op_name);
539 qemu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
540 if (flags & CPU_DUMP_FPU) {
541 int fptag;
542 fptag = 0;
543 for(i = 0; i < 8; i++) {
544 fptag |= ((!env->fptags[i]) << i);
546 qemu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
547 env->fpuc,
548 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
549 env->fpstt,
550 fptag,
551 env->mxcsr);
552 for(i=0;i<8;i++) {
553 CPU_LDoubleU u;
554 u.d = env->fpregs[i].d;
555 qemu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
556 i, u.l.lower, u.l.upper);
557 if ((i & 1) == 1)
558 qemu_fprintf(f, "\n");
559 else
560 qemu_fprintf(f, " ");
562 if (env->hflags & HF_CS64_MASK)
563 nb = 16;
564 else
565 nb = 8;
566 for(i=0;i<nb;i++) {
567 qemu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
569 env->xmm_regs[i].ZMM_L(3),
570 env->xmm_regs[i].ZMM_L(2),
571 env->xmm_regs[i].ZMM_L(1),
572 env->xmm_regs[i].ZMM_L(0));
573 if ((i & 1) == 1)
574 qemu_fprintf(f, "\n");
575 else
576 qemu_fprintf(f, " ");
579 if (flags & CPU_DUMP_CODE) {
580 target_ulong base = env->segs[R_CS].base + env->eip;
581 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
582 uint8_t code;
583 char codestr[3];
585 qemu_fprintf(f, "Code=");
586 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
587 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
588 snprintf(codestr, sizeof(codestr), "%02x", code);
589 } else {
590 snprintf(codestr, sizeof(codestr), "??");
592 qemu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
593 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
595 qemu_fprintf(f, "\n");
599 /***********************************************************/
600 /* x86 mmu */
601 /* XXX: add PGE support */
603 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
605 CPUX86State *env = &cpu->env;
607 a20_state = (a20_state != 0);
608 if (a20_state != ((env->a20_mask >> 20) & 1)) {
609 CPUState *cs = CPU(cpu);
611 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
612 /* if the cpu is currently executing code, we must unlink it and
613 all the potentially executing TB */
614 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
616 /* when a20 is changed, all the MMU mappings are invalid, so
617 we must flush everything */
618 tlb_flush(cs);
619 env->a20_mask = ~(1 << 20) | (a20_state << 20);
623 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
625 X86CPU *cpu = env_archcpu(env);
626 int pe_state;
628 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
629 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
630 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
631 tlb_flush(CPU(cpu));
634 #ifdef TARGET_X86_64
635 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
636 (env->efer & MSR_EFER_LME)) {
637 /* enter in long mode */
638 /* XXX: generate an exception */
639 if (!(env->cr[4] & CR4_PAE_MASK))
640 return;
641 env->efer |= MSR_EFER_LMA;
642 env->hflags |= HF_LMA_MASK;
643 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
644 (env->efer & MSR_EFER_LMA)) {
645 /* exit long mode */
646 env->efer &= ~MSR_EFER_LMA;
647 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
648 env->eip &= 0xffffffff;
650 #endif
651 env->cr[0] = new_cr0 | CR0_ET_MASK;
653 /* update PE flag in hidden flags */
654 pe_state = (env->cr[0] & CR0_PE_MASK);
655 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
656 /* ensure that ADDSEG is always set in real mode */
657 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
658 /* update FPU flags */
659 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
660 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
663 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
664 the PDPT */
665 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
667 env->cr[3] = new_cr3;
668 if (env->cr[0] & CR0_PG_MASK) {
669 qemu_log_mask(CPU_LOG_MMU,
670 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
671 tlb_flush(env_cpu(env));
675 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
677 uint32_t hflags;
679 #if defined(DEBUG_MMU)
680 printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
681 #endif
682 if ((new_cr4 ^ env->cr[4]) &
683 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
684 CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
685 tlb_flush(env_cpu(env));
688 /* Clear bits we're going to recompute. */
689 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
691 /* SSE handling */
692 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
693 new_cr4 &= ~CR4_OSFXSR_MASK;
695 if (new_cr4 & CR4_OSFXSR_MASK) {
696 hflags |= HF_OSFXSR_MASK;
699 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
700 new_cr4 &= ~CR4_SMAP_MASK;
702 if (new_cr4 & CR4_SMAP_MASK) {
703 hflags |= HF_SMAP_MASK;
706 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
707 new_cr4 &= ~CR4_PKE_MASK;
710 env->cr[4] = new_cr4;
711 env->hflags = hflags;
713 cpu_sync_bndcs_hflags(env);
716 #if !defined(CONFIG_USER_ONLY)
717 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
719 X86CPU *cpu = X86_CPU(cs);
720 CPUX86State *env = &cpu->env;
721 target_ulong pde_addr, pte_addr;
722 uint64_t pte;
723 int32_t a20_mask;
724 uint32_t page_offset;
725 int page_size;
727 a20_mask = x86_get_a20_mask(env);
728 if (!(env->cr[0] & CR0_PG_MASK)) {
729 pte = addr & a20_mask;
730 page_size = 4096;
731 } else if (env->cr[4] & CR4_PAE_MASK) {
732 target_ulong pdpe_addr;
733 uint64_t pde, pdpe;
735 #ifdef TARGET_X86_64
736 if (env->hflags & HF_LMA_MASK) {
737 bool la57 = env->cr[4] & CR4_LA57_MASK;
738 uint64_t pml5e_addr, pml5e;
739 uint64_t pml4e_addr, pml4e;
740 int32_t sext;
742 /* test virtual address sign extension */
743 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
744 if (sext != 0 && sext != -1) {
745 return -1;
748 if (la57) {
749 pml5e_addr = ((env->cr[3] & ~0xfff) +
750 (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
751 pml5e = x86_ldq_phys(cs, pml5e_addr);
752 if (!(pml5e & PG_PRESENT_MASK)) {
753 return -1;
755 } else {
756 pml5e = env->cr[3];
759 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
760 (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
761 pml4e = x86_ldq_phys(cs, pml4e_addr);
762 if (!(pml4e & PG_PRESENT_MASK)) {
763 return -1;
765 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
766 (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
767 pdpe = x86_ldq_phys(cs, pdpe_addr);
768 if (!(pdpe & PG_PRESENT_MASK)) {
769 return -1;
771 if (pdpe & PG_PSE_MASK) {
772 page_size = 1024 * 1024 * 1024;
773 pte = pdpe;
774 goto out;
777 } else
778 #endif
780 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
781 a20_mask;
782 pdpe = x86_ldq_phys(cs, pdpe_addr);
783 if (!(pdpe & PG_PRESENT_MASK))
784 return -1;
787 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
788 (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
789 pde = x86_ldq_phys(cs, pde_addr);
790 if (!(pde & PG_PRESENT_MASK)) {
791 return -1;
793 if (pde & PG_PSE_MASK) {
794 /* 2 MB page */
795 page_size = 2048 * 1024;
796 pte = pde;
797 } else {
798 /* 4 KB page */
799 pte_addr = ((pde & PG_ADDRESS_MASK) +
800 (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
801 page_size = 4096;
802 pte = x86_ldq_phys(cs, pte_addr);
804 if (!(pte & PG_PRESENT_MASK)) {
805 return -1;
807 } else {
808 uint32_t pde;
810 /* page directory entry */
811 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
812 pde = x86_ldl_phys(cs, pde_addr);
813 if (!(pde & PG_PRESENT_MASK))
814 return -1;
815 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
816 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
817 page_size = 4096 * 1024;
818 } else {
819 /* page directory entry */
820 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
821 pte = x86_ldl_phys(cs, pte_addr);
822 if (!(pte & PG_PRESENT_MASK)) {
823 return -1;
825 page_size = 4096;
827 pte = pte & a20_mask;
830 #ifdef TARGET_X86_64
831 out:
832 #endif
833 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
834 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
835 return pte | page_offset;
838 typedef struct MCEInjectionParams {
839 Monitor *mon;
840 int bank;
841 uint64_t status;
842 uint64_t mcg_status;
843 uint64_t addr;
844 uint64_t misc;
845 int flags;
846 } MCEInjectionParams;
848 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
850 MCEInjectionParams *params = data.host_ptr;
851 X86CPU *cpu = X86_CPU(cs);
852 CPUX86State *cenv = &cpu->env;
853 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
855 cpu_synchronize_state(cs);
858 * If there is an MCE exception being processed, ignore this SRAO MCE
859 * unless unconditional injection was requested.
861 if (!(params->flags & MCE_INJECT_UNCOND_AO)
862 && !(params->status & MCI_STATUS_AR)
863 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
864 return;
867 if (params->status & MCI_STATUS_UC) {
869 * if MSR_MCG_CTL is not all 1s, the uncorrected error
870 * reporting is disabled
872 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
873 monitor_printf(params->mon,
874 "CPU %d: Uncorrected error reporting disabled\n",
875 cs->cpu_index);
876 return;
880 * if MSR_MCi_CTL is not all 1s, the uncorrected error
881 * reporting is disabled for the bank
883 if (banks[0] != ~(uint64_t)0) {
884 monitor_printf(params->mon,
885 "CPU %d: Uncorrected error reporting disabled for"
886 " bank %d\n",
887 cs->cpu_index, params->bank);
888 return;
891 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
892 !(cenv->cr[4] & CR4_MCE_MASK)) {
893 monitor_printf(params->mon,
894 "CPU %d: Previous MCE still in progress, raising"
895 " triple fault\n",
896 cs->cpu_index);
897 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
898 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
899 return;
901 if (banks[1] & MCI_STATUS_VAL) {
902 params->status |= MCI_STATUS_OVER;
904 banks[2] = params->addr;
905 banks[3] = params->misc;
906 cenv->mcg_status = params->mcg_status;
907 banks[1] = params->status;
908 cpu_interrupt(cs, CPU_INTERRUPT_MCE);
909 } else if (!(banks[1] & MCI_STATUS_VAL)
910 || !(banks[1] & MCI_STATUS_UC)) {
911 if (banks[1] & MCI_STATUS_VAL) {
912 params->status |= MCI_STATUS_OVER;
914 banks[2] = params->addr;
915 banks[3] = params->misc;
916 banks[1] = params->status;
917 } else {
918 banks[1] |= MCI_STATUS_OVER;
922 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
923 uint64_t status, uint64_t mcg_status, uint64_t addr,
924 uint64_t misc, int flags)
926 CPUState *cs = CPU(cpu);
927 CPUX86State *cenv = &cpu->env;
928 MCEInjectionParams params = {
929 .mon = mon,
930 .bank = bank,
931 .status = status,
932 .mcg_status = mcg_status,
933 .addr = addr,
934 .misc = misc,
935 .flags = flags,
937 unsigned bank_num = cenv->mcg_cap & 0xff;
939 if (!cenv->mcg_cap) {
940 monitor_printf(mon, "MCE injection not supported\n");
941 return;
943 if (bank >= bank_num) {
944 monitor_printf(mon, "Invalid MCE bank number\n");
945 return;
947 if (!(status & MCI_STATUS_VAL)) {
948 monitor_printf(mon, "Invalid MCE status code\n");
949 return;
951 if ((flags & MCE_INJECT_BROADCAST)
952 && !cpu_x86_support_mca_broadcast(cenv)) {
953 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
954 return;
957 run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
958 if (flags & MCE_INJECT_BROADCAST) {
959 CPUState *other_cs;
961 params.bank = 1;
962 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
963 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
964 params.addr = 0;
965 params.misc = 0;
966 CPU_FOREACH(other_cs) {
967 if (other_cs == cs) {
968 continue;
970 run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
975 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
977 X86CPU *cpu = env_archcpu(env);
978 CPUState *cs = env_cpu(env);
980 if (kvm_enabled() || whpx_enabled()) {
981 env->tpr_access_type = access;
983 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
984 } else if (tcg_enabled()) {
985 cpu_restore_state(cs, cs->mem_io_pc, false);
987 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
990 #endif /* !CONFIG_USER_ONLY */
992 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
993 target_ulong *base, unsigned int *limit,
994 unsigned int *flags)
996 CPUState *cs = env_cpu(env);
997 SegmentCache *dt;
998 target_ulong ptr;
999 uint32_t e1, e2;
1000 int index;
1002 if (selector & 0x4)
1003 dt = &env->ldt;
1004 else
1005 dt = &env->gdt;
1006 index = selector & ~7;
1007 ptr = dt->base + index;
1008 if ((index + 7) > dt->limit
1009 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1010 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1011 return 0;
1013 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1014 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1015 if (e2 & DESC_G_MASK)
1016 *limit = (*limit << 12) | 0xfff;
1017 *flags = e2;
1019 return 1;
1022 #if !defined(CONFIG_USER_ONLY)
1023 void do_cpu_init(X86CPU *cpu)
1025 CPUState *cs = CPU(cpu);
1026 CPUX86State *env = &cpu->env;
1027 CPUX86State *save = g_new(CPUX86State, 1);
1028 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1030 *save = *env;
1032 cpu_reset(cs);
1033 cs->interrupt_request = sipi;
1034 memcpy(&env->start_init_save, &save->start_init_save,
1035 offsetof(CPUX86State, end_init_save) -
1036 offsetof(CPUX86State, start_init_save));
1037 g_free(save);
1039 if (kvm_enabled()) {
1040 kvm_arch_do_init_vcpu(cpu);
1042 apic_init_reset(cpu->apic_state);
1045 void do_cpu_sipi(X86CPU *cpu)
1047 apic_sipi(cpu->apic_state);
1049 #else
1050 void do_cpu_init(X86CPU *cpu)
1053 void do_cpu_sipi(X86CPU *cpu)
1056 #endif
1058 /* Frob eflags into and out of the CPU temporary format. */
1060 void x86_cpu_exec_enter(CPUState *cs)
1062 X86CPU *cpu = X86_CPU(cs);
1063 CPUX86State *env = &cpu->env;
1065 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1066 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1067 CC_OP = CC_OP_EFLAGS;
1068 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1071 void x86_cpu_exec_exit(CPUState *cs)
1073 X86CPU *cpu = X86_CPU(cs);
1074 CPUX86State *env = &cpu->env;
1076 env->eflags = cpu_compute_eflags(env);
1079 #ifndef CONFIG_USER_ONLY
1080 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1082 X86CPU *cpu = X86_CPU(cs);
1083 CPUX86State *env = &cpu->env;
1084 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1085 AddressSpace *as = cpu_addressspace(cs, attrs);
1087 return address_space_ldub(as, addr, attrs, NULL);
1090 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1092 X86CPU *cpu = X86_CPU(cs);
1093 CPUX86State *env = &cpu->env;
1094 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1095 AddressSpace *as = cpu_addressspace(cs, attrs);
1097 return address_space_lduw(as, addr, attrs, NULL);
1100 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1102 X86CPU *cpu = X86_CPU(cs);
1103 CPUX86State *env = &cpu->env;
1104 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1105 AddressSpace *as = cpu_addressspace(cs, attrs);
1107 return address_space_ldl(as, addr, attrs, NULL);
1110 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1112 X86CPU *cpu = X86_CPU(cs);
1113 CPUX86State *env = &cpu->env;
1114 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1115 AddressSpace *as = cpu_addressspace(cs, attrs);
1117 return address_space_ldq(as, addr, attrs, NULL);
1120 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1122 X86CPU *cpu = X86_CPU(cs);
1123 CPUX86State *env = &cpu->env;
1124 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1125 AddressSpace *as = cpu_addressspace(cs, attrs);
1127 address_space_stb(as, addr, val, attrs, NULL);
1130 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1132 X86CPU *cpu = X86_CPU(cs);
1133 CPUX86State *env = &cpu->env;
1134 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1135 AddressSpace *as = cpu_addressspace(cs, attrs);
1137 address_space_stl_notdirty(as, addr, val, attrs, NULL);
1140 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1142 X86CPU *cpu = X86_CPU(cs);
1143 CPUX86State *env = &cpu->env;
1144 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1145 AddressSpace *as = cpu_addressspace(cs, attrs);
1147 address_space_stw(as, addr, val, attrs, NULL);
1150 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1152 X86CPU *cpu = X86_CPU(cs);
1153 CPUX86State *env = &cpu->env;
1154 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1155 AddressSpace *as = cpu_addressspace(cs, attrs);
1157 address_space_stl(as, addr, val, attrs, NULL);
1160 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1162 X86CPU *cpu = X86_CPU(cs);
1163 CPUX86State *env = &cpu->env;
1164 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1165 AddressSpace *as = cpu_addressspace(cs, attrs);
1167 address_space_stq(as, addr, val, attrs, NULL);
1169 #endif