target/i386: move cpu_sync_bndcs_hflags() function
[qemu/ar7.git] / target / i386 / helper.c
blob4d584b59841ee159ed22106c9b66c686c2edf4d9
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_i386.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/sysemu.h"
27 #include "sysemu/hw_accel.h"
28 #include "monitor/monitor.h"
29 #include "hw/i386/apic_internal.h"
30 #endif
32 void cpu_sync_bndcs_hflags(CPUX86State *env)
34 uint32_t hflags = env->hflags;
35 uint32_t hflags2 = env->hflags2;
36 uint32_t bndcsr;
38 if ((hflags & HF_CPL_MASK) == 3) {
39 bndcsr = env->bndcs_regs.cfgu;
40 } else {
41 bndcsr = env->msr_bndcfgs;
44 if ((env->cr[4] & CR4_OSXSAVE_MASK)
45 && (env->xcr0 & XSTATE_BNDCSR_MASK)
46 && (bndcsr & BNDCFG_ENABLE)) {
47 hflags |= HF_MPX_EN_MASK;
48 } else {
49 hflags &= ~HF_MPX_EN_MASK;
52 if (bndcsr & BNDCFG_BNDPRESERVE) {
53 hflags2 |= HF2_MPX_PR_MASK;
54 } else {
55 hflags2 &= ~HF2_MPX_PR_MASK;
58 env->hflags = hflags;
59 env->hflags2 = hflags2;
62 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
64 int cpuver = env->cpuid_version;
66 if (family == NULL || model == NULL) {
67 return;
70 *family = (cpuver >> 8) & 0x0f;
71 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
74 /* Broadcast MCA signal for processor version 06H_EH and above */
75 int cpu_x86_support_mca_broadcast(CPUX86State *env)
77 int family = 0;
78 int model = 0;
80 cpu_x86_version(env, &family, &model);
81 if ((family == 6 && model >= 14) || family > 6) {
82 return 1;
85 return 0;
88 /***********************************************************/
89 /* x86 debug */
91 static const char *cc_op_str[CC_OP_NB] = {
92 "DYNAMIC",
93 "EFLAGS",
95 "MULB",
96 "MULW",
97 "MULL",
98 "MULQ",
100 "ADDB",
101 "ADDW",
102 "ADDL",
103 "ADDQ",
105 "ADCB",
106 "ADCW",
107 "ADCL",
108 "ADCQ",
110 "SUBB",
111 "SUBW",
112 "SUBL",
113 "SUBQ",
115 "SBBB",
116 "SBBW",
117 "SBBL",
118 "SBBQ",
120 "LOGICB",
121 "LOGICW",
122 "LOGICL",
123 "LOGICQ",
125 "INCB",
126 "INCW",
127 "INCL",
128 "INCQ",
130 "DECB",
131 "DECW",
132 "DECL",
133 "DECQ",
135 "SHLB",
136 "SHLW",
137 "SHLL",
138 "SHLQ",
140 "SARB",
141 "SARW",
142 "SARL",
143 "SARQ",
145 "BMILGB",
146 "BMILGW",
147 "BMILGL",
148 "BMILGQ",
150 "ADCX",
151 "ADOX",
152 "ADCOX",
154 "CLR",
157 static void
158 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
159 const char *name, struct SegmentCache *sc)
161 #ifdef TARGET_X86_64
162 if (env->hflags & HF_CS64_MASK) {
163 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
164 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
165 } else
166 #endif
168 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
169 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
172 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
173 goto done;
175 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
176 if (sc->flags & DESC_S_MASK) {
177 if (sc->flags & DESC_CS_MASK) {
178 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
179 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
180 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
181 (sc->flags & DESC_R_MASK) ? 'R' : '-');
182 } else {
183 cpu_fprintf(f,
184 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
185 ? "DS " : "DS16");
186 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
187 (sc->flags & DESC_W_MASK) ? 'W' : '-');
189 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
190 } else {
191 static const char *sys_type_name[2][16] = {
192 { /* 32 bit mode */
193 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
194 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
195 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
196 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
198 { /* 64 bit mode */
199 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
200 "Reserved", "Reserved", "Reserved", "Reserved",
201 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
202 "Reserved", "IntGate64", "TrapGate64"
205 cpu_fprintf(f, "%s",
206 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
207 [(sc->flags & DESC_TYPE_MASK)
208 >> DESC_TYPE_SHIFT]);
210 done:
211 cpu_fprintf(f, "\n");
214 #ifndef CONFIG_USER_ONLY
216 /* ARRAY_SIZE check is not required because
217 * DeliveryMode(dm) has a size of 3 bit.
219 static inline const char *dm2str(uint32_t dm)
221 static const char *str[] = {
222 "Fixed",
223 "...",
224 "SMI",
225 "...",
226 "NMI",
227 "INIT",
228 "...",
229 "ExtINT"
231 return str[dm];
234 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
235 const char *name, uint32_t lvt, bool is_timer)
237 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
238 cpu_fprintf(f,
239 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
240 name, lvt,
241 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
242 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
243 lvt & APIC_LVT_MASKED ? "masked" : "",
244 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
245 !is_timer ?
246 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
247 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
248 "tsc-deadline" : "one-shot",
249 dm2str(dm));
250 if (dm != APIC_DM_NMI) {
251 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
252 } else {
253 cpu_fprintf(f, "\n");
257 /* ARRAY_SIZE check is not required because
258 * destination shorthand has a size of 2 bit.
260 static inline const char *shorthand2str(uint32_t shorthand)
262 const char *str[] = {
263 "no-shorthand", "self", "all-self", "all"
265 return str[shorthand];
268 static inline uint8_t divider_conf(uint32_t divide_conf)
270 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
272 return divide_val == 7 ? 1 : 2 << divide_val;
275 static inline void mask2str(char *str, uint32_t val, uint8_t size)
277 while (size--) {
278 *str++ = (val >> size) & 1 ? '1' : '0';
280 *str = 0;
283 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
285 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
286 APICCommonState *s, CPUX86State *env)
288 uint32_t icr = s->icr[0], icr2 = s->icr[1];
289 uint8_t dest_shorthand = \
290 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
291 bool logical_mod = icr & APIC_ICR_DEST_MOD;
292 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
293 uint32_t dest_field;
294 bool x2apic;
296 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
297 icr,
298 logical_mod ? "logical" : "physical",
299 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
300 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
301 shorthand2str(dest_shorthand));
303 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
304 if (dest_shorthand != 0) {
305 cpu_fprintf(f, "\n");
306 return;
308 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
309 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
311 if (!logical_mod) {
312 if (x2apic) {
313 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
314 } else {
315 cpu_fprintf(f, " cpu %u (APIC ID)\n",
316 dest_field & APIC_LOGDEST_XAPIC_ID);
318 return;
321 if (s->dest_mode == 0xf) { /* flat mode */
322 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
323 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
324 } else if (s->dest_mode == 0) { /* cluster mode */
325 if (x2apic) {
326 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
327 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
328 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
329 } else {
330 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
331 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
332 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
337 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
338 const char *name, uint32_t *ireg_tab,
339 uint32_t *tmr_tab)
341 int i, empty = true;
343 cpu_fprintf(f, "%s\t ", name);
344 for (i = 0; i < 256; i++) {
345 if (apic_get_bit(ireg_tab, i)) {
346 cpu_fprintf(f, "%u%s ", i,
347 apic_get_bit(tmr_tab, i) ? "(level)" : "");
348 empty = false;
351 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
354 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
355 fprintf_function cpu_fprintf, int flags)
357 X86CPU *cpu = X86_CPU(cs);
358 APICCommonState *s = APIC_COMMON(cpu->apic_state);
359 if (!s) {
360 cpu_fprintf(f, "local apic state not available\n");
361 return;
363 uint32_t *lvt = s->lvt;
365 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
366 CPU(cpu)->cpu_index);
367 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
368 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
369 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
370 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
371 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
372 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
374 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
375 s->divide_conf & APIC_DCR_MASK,
376 divider_conf(s->divide_conf),
377 s->initial_count);
379 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
380 s->spurious_vec,
381 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
382 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
383 s->spurious_vec & APIC_VECTOR_MASK);
385 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
387 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
389 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
390 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
392 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
393 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
394 if (s->dest_mode == 0) {
395 cpu_fprintf(f, "(cluster %u: id %u)",
396 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
397 s->log_dest & APIC_LOGDEST_XAPIC_ID);
399 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
401 #else
402 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
403 fprintf_function cpu_fprintf, int flags)
406 #endif /* !CONFIG_USER_ONLY */
408 #define DUMP_CODE_BYTES_TOTAL 50
409 #define DUMP_CODE_BYTES_BACKWARD 20
411 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
412 int flags)
414 X86CPU *cpu = X86_CPU(cs);
415 CPUX86State *env = &cpu->env;
416 int eflags, i, nb;
417 char cc_op_name[32];
418 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
420 eflags = cpu_compute_eflags(env);
421 #ifdef TARGET_X86_64
422 if (env->hflags & HF_CS64_MASK) {
423 cpu_fprintf(f,
424 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
425 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
426 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
427 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
428 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
429 env->regs[R_EAX],
430 env->regs[R_EBX],
431 env->regs[R_ECX],
432 env->regs[R_EDX],
433 env->regs[R_ESI],
434 env->regs[R_EDI],
435 env->regs[R_EBP],
436 env->regs[R_ESP],
437 env->regs[8],
438 env->regs[9],
439 env->regs[10],
440 env->regs[11],
441 env->regs[12],
442 env->regs[13],
443 env->regs[14],
444 env->regs[15],
445 env->eip, eflags,
446 eflags & DF_MASK ? 'D' : '-',
447 eflags & CC_O ? 'O' : '-',
448 eflags & CC_S ? 'S' : '-',
449 eflags & CC_Z ? 'Z' : '-',
450 eflags & CC_A ? 'A' : '-',
451 eflags & CC_P ? 'P' : '-',
452 eflags & CC_C ? 'C' : '-',
453 env->hflags & HF_CPL_MASK,
454 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
455 (env->a20_mask >> 20) & 1,
456 (env->hflags >> HF_SMM_SHIFT) & 1,
457 cs->halted);
458 } else
459 #endif
461 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
462 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
463 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
464 (uint32_t)env->regs[R_EAX],
465 (uint32_t)env->regs[R_EBX],
466 (uint32_t)env->regs[R_ECX],
467 (uint32_t)env->regs[R_EDX],
468 (uint32_t)env->regs[R_ESI],
469 (uint32_t)env->regs[R_EDI],
470 (uint32_t)env->regs[R_EBP],
471 (uint32_t)env->regs[R_ESP],
472 (uint32_t)env->eip, eflags,
473 eflags & DF_MASK ? 'D' : '-',
474 eflags & CC_O ? 'O' : '-',
475 eflags & CC_S ? 'S' : '-',
476 eflags & CC_Z ? 'Z' : '-',
477 eflags & CC_A ? 'A' : '-',
478 eflags & CC_P ? 'P' : '-',
479 eflags & CC_C ? 'C' : '-',
480 env->hflags & HF_CPL_MASK,
481 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
482 (env->a20_mask >> 20) & 1,
483 (env->hflags >> HF_SMM_SHIFT) & 1,
484 cs->halted);
487 for(i = 0; i < 6; i++) {
488 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
489 &env->segs[i]);
491 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
492 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
494 #ifdef TARGET_X86_64
495 if (env->hflags & HF_LMA_MASK) {
496 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
497 env->gdt.base, env->gdt.limit);
498 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
499 env->idt.base, env->idt.limit);
500 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
501 (uint32_t)env->cr[0],
502 env->cr[2],
503 env->cr[3],
504 (uint32_t)env->cr[4]);
505 for(i = 0; i < 4; i++)
506 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
507 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
508 env->dr[6], env->dr[7]);
509 } else
510 #endif
512 cpu_fprintf(f, "GDT= %08x %08x\n",
513 (uint32_t)env->gdt.base, env->gdt.limit);
514 cpu_fprintf(f, "IDT= %08x %08x\n",
515 (uint32_t)env->idt.base, env->idt.limit);
516 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
517 (uint32_t)env->cr[0],
518 (uint32_t)env->cr[2],
519 (uint32_t)env->cr[3],
520 (uint32_t)env->cr[4]);
521 for(i = 0; i < 4; i++) {
522 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
524 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
525 env->dr[6], env->dr[7]);
527 if (flags & CPU_DUMP_CCOP) {
528 if ((unsigned)env->cc_op < CC_OP_NB)
529 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
530 else
531 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
532 #ifdef TARGET_X86_64
533 if (env->hflags & HF_CS64_MASK) {
534 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
535 env->cc_src, env->cc_dst,
536 cc_op_name);
537 } else
538 #endif
540 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
541 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
542 cc_op_name);
545 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
546 if (flags & CPU_DUMP_FPU) {
547 int fptag;
548 fptag = 0;
549 for(i = 0; i < 8; i++) {
550 fptag |= ((!env->fptags[i]) << i);
552 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
553 env->fpuc,
554 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
555 env->fpstt,
556 fptag,
557 env->mxcsr);
558 for(i=0;i<8;i++) {
559 CPU_LDoubleU u;
560 u.d = env->fpregs[i].d;
561 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
562 i, u.l.lower, u.l.upper);
563 if ((i & 1) == 1)
564 cpu_fprintf(f, "\n");
565 else
566 cpu_fprintf(f, " ");
568 if (env->hflags & HF_CS64_MASK)
569 nb = 16;
570 else
571 nb = 8;
572 for(i=0;i<nb;i++) {
573 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
575 env->xmm_regs[i].ZMM_L(3),
576 env->xmm_regs[i].ZMM_L(2),
577 env->xmm_regs[i].ZMM_L(1),
578 env->xmm_regs[i].ZMM_L(0));
579 if ((i & 1) == 1)
580 cpu_fprintf(f, "\n");
581 else
582 cpu_fprintf(f, " ");
585 if (flags & CPU_DUMP_CODE) {
586 target_ulong base = env->segs[R_CS].base + env->eip;
587 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
588 uint8_t code;
589 char codestr[3];
591 cpu_fprintf(f, "Code=");
592 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
593 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
594 snprintf(codestr, sizeof(codestr), "%02x", code);
595 } else {
596 snprintf(codestr, sizeof(codestr), "??");
598 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
599 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
601 cpu_fprintf(f, "\n");
605 /***********************************************************/
606 /* x86 mmu */
607 /* XXX: add PGE support */
609 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
611 CPUX86State *env = &cpu->env;
613 a20_state = (a20_state != 0);
614 if (a20_state != ((env->a20_mask >> 20) & 1)) {
615 CPUState *cs = CPU(cpu);
617 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
618 /* if the cpu is currently executing code, we must unlink it and
619 all the potentially executing TB */
620 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
622 /* when a20 is changed, all the MMU mappings are invalid, so
623 we must flush everything */
624 tlb_flush(cs);
625 env->a20_mask = ~(1 << 20) | (a20_state << 20);
629 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
631 X86CPU *cpu = x86_env_get_cpu(env);
632 int pe_state;
634 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
635 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
636 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
637 tlb_flush(CPU(cpu));
640 #ifdef TARGET_X86_64
641 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
642 (env->efer & MSR_EFER_LME)) {
643 /* enter in long mode */
644 /* XXX: generate an exception */
645 if (!(env->cr[4] & CR4_PAE_MASK))
646 return;
647 env->efer |= MSR_EFER_LMA;
648 env->hflags |= HF_LMA_MASK;
649 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
650 (env->efer & MSR_EFER_LMA)) {
651 /* exit long mode */
652 env->efer &= ~MSR_EFER_LMA;
653 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
654 env->eip &= 0xffffffff;
656 #endif
657 env->cr[0] = new_cr0 | CR0_ET_MASK;
659 /* update PE flag in hidden flags */
660 pe_state = (env->cr[0] & CR0_PE_MASK);
661 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
662 /* ensure that ADDSEG is always set in real mode */
663 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
664 /* update FPU flags */
665 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
666 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
669 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
670 the PDPT */
671 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
673 X86CPU *cpu = x86_env_get_cpu(env);
675 env->cr[3] = new_cr3;
676 if (env->cr[0] & CR0_PG_MASK) {
677 qemu_log_mask(CPU_LOG_MMU,
678 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
679 tlb_flush(CPU(cpu));
683 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
685 X86CPU *cpu = x86_env_get_cpu(env);
686 uint32_t hflags;
688 #if defined(DEBUG_MMU)
689 printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
690 #endif
691 if ((new_cr4 ^ env->cr[4]) &
692 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
693 CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
694 tlb_flush(CPU(cpu));
697 /* Clear bits we're going to recompute. */
698 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
700 /* SSE handling */
701 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
702 new_cr4 &= ~CR4_OSFXSR_MASK;
704 if (new_cr4 & CR4_OSFXSR_MASK) {
705 hflags |= HF_OSFXSR_MASK;
708 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
709 new_cr4 &= ~CR4_SMAP_MASK;
711 if (new_cr4 & CR4_SMAP_MASK) {
712 hflags |= HF_SMAP_MASK;
715 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
716 new_cr4 &= ~CR4_PKE_MASK;
719 env->cr[4] = new_cr4;
720 env->hflags = hflags;
722 cpu_sync_bndcs_hflags(env);
725 #if defined(CONFIG_USER_ONLY)
727 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
728 int is_write, int mmu_idx)
730 X86CPU *cpu = X86_CPU(cs);
731 CPUX86State *env = &cpu->env;
733 /* user mode only emulation */
734 is_write &= 1;
735 env->cr[2] = addr;
736 env->error_code = (is_write << PG_ERROR_W_BIT);
737 env->error_code |= PG_ERROR_U_MASK;
738 cs->exception_index = EXCP0E_PAGE;
739 env->exception_is_int = 0;
740 env->exception_next_eip = -1;
741 return 1;
744 #else
746 /* return value:
747 * -1 = cannot handle fault
748 * 0 = nothing more to do
749 * 1 = generate PF fault
751 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
752 int is_write1, int mmu_idx)
754 X86CPU *cpu = X86_CPU(cs);
755 CPUX86State *env = &cpu->env;
756 uint64_t ptep, pte;
757 int32_t a20_mask;
758 target_ulong pde_addr, pte_addr;
759 int error_code = 0;
760 int is_dirty, prot, page_size, is_write, is_user;
761 hwaddr paddr;
762 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
763 uint32_t page_offset;
764 target_ulong vaddr;
766 is_user = mmu_idx == MMU_USER_IDX;
767 #if defined(DEBUG_MMU)
768 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
769 addr, is_write1, is_user, env->eip);
770 #endif
771 is_write = is_write1 & 1;
773 a20_mask = x86_get_a20_mask(env);
774 if (!(env->cr[0] & CR0_PG_MASK)) {
775 pte = addr;
776 #ifdef TARGET_X86_64
777 if (!(env->hflags & HF_LMA_MASK)) {
778 /* Without long mode we can only address 32bits in real mode */
779 pte = (uint32_t)pte;
781 #endif
782 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
783 page_size = 4096;
784 goto do_mapping;
787 if (!(env->efer & MSR_EFER_NXE)) {
788 rsvd_mask |= PG_NX_MASK;
791 if (env->cr[4] & CR4_PAE_MASK) {
792 uint64_t pde, pdpe;
793 target_ulong pdpe_addr;
795 #ifdef TARGET_X86_64
796 if (env->hflags & HF_LMA_MASK) {
797 bool la57 = env->cr[4] & CR4_LA57_MASK;
798 uint64_t pml5e_addr, pml5e;
799 uint64_t pml4e_addr, pml4e;
800 int32_t sext;
802 /* test virtual address sign extension */
803 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
804 if (sext != 0 && sext != -1) {
805 env->error_code = 0;
806 cs->exception_index = EXCP0D_GPF;
807 return 1;
810 if (la57) {
811 pml5e_addr = ((env->cr[3] & ~0xfff) +
812 (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
813 pml5e = x86_ldq_phys(cs, pml5e_addr);
814 if (!(pml5e & PG_PRESENT_MASK)) {
815 goto do_fault;
817 if (pml5e & (rsvd_mask | PG_PSE_MASK)) {
818 goto do_fault_rsvd;
820 if (!(pml5e & PG_ACCESSED_MASK)) {
821 pml5e |= PG_ACCESSED_MASK;
822 x86_stl_phys_notdirty(cs, pml5e_addr, pml5e);
824 ptep = pml5e ^ PG_NX_MASK;
825 } else {
826 pml5e = env->cr[3];
827 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
830 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
831 (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
832 pml4e = x86_ldq_phys(cs, pml4e_addr);
833 if (!(pml4e & PG_PRESENT_MASK)) {
834 goto do_fault;
836 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
837 goto do_fault_rsvd;
839 if (!(pml4e & PG_ACCESSED_MASK)) {
840 pml4e |= PG_ACCESSED_MASK;
841 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
843 ptep &= pml4e ^ PG_NX_MASK;
844 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
845 a20_mask;
846 pdpe = x86_ldq_phys(cs, pdpe_addr);
847 if (!(pdpe & PG_PRESENT_MASK)) {
848 goto do_fault;
850 if (pdpe & rsvd_mask) {
851 goto do_fault_rsvd;
853 ptep &= pdpe ^ PG_NX_MASK;
854 if (!(pdpe & PG_ACCESSED_MASK)) {
855 pdpe |= PG_ACCESSED_MASK;
856 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
858 if (pdpe & PG_PSE_MASK) {
859 /* 1 GB page */
860 page_size = 1024 * 1024 * 1024;
861 pte_addr = pdpe_addr;
862 pte = pdpe;
863 goto do_check_protect;
865 } else
866 #endif
868 /* XXX: load them when cr3 is loaded ? */
869 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
870 a20_mask;
871 pdpe = x86_ldq_phys(cs, pdpe_addr);
872 if (!(pdpe & PG_PRESENT_MASK)) {
873 goto do_fault;
875 rsvd_mask |= PG_HI_USER_MASK;
876 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
877 goto do_fault_rsvd;
879 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
882 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
883 a20_mask;
884 pde = x86_ldq_phys(cs, pde_addr);
885 if (!(pde & PG_PRESENT_MASK)) {
886 goto do_fault;
888 if (pde & rsvd_mask) {
889 goto do_fault_rsvd;
891 ptep &= pde ^ PG_NX_MASK;
892 if (pde & PG_PSE_MASK) {
893 /* 2 MB page */
894 page_size = 2048 * 1024;
895 pte_addr = pde_addr;
896 pte = pde;
897 goto do_check_protect;
899 /* 4 KB page */
900 if (!(pde & PG_ACCESSED_MASK)) {
901 pde |= PG_ACCESSED_MASK;
902 x86_stl_phys_notdirty(cs, pde_addr, pde);
904 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
905 a20_mask;
906 pte = x86_ldq_phys(cs, pte_addr);
907 if (!(pte & PG_PRESENT_MASK)) {
908 goto do_fault;
910 if (pte & rsvd_mask) {
911 goto do_fault_rsvd;
913 /* combine pde and pte nx, user and rw protections */
914 ptep &= pte ^ PG_NX_MASK;
915 page_size = 4096;
916 } else {
917 uint32_t pde;
919 /* page directory entry */
920 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
921 a20_mask;
922 pde = x86_ldl_phys(cs, pde_addr);
923 if (!(pde & PG_PRESENT_MASK)) {
924 goto do_fault;
926 ptep = pde | PG_NX_MASK;
928 /* if PSE bit is set, then we use a 4MB page */
929 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
930 page_size = 4096 * 1024;
931 pte_addr = pde_addr;
933 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
934 * Leave bits 20-13 in place for setting accessed/dirty bits below.
936 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
937 rsvd_mask = 0x200000;
938 goto do_check_protect_pse36;
941 if (!(pde & PG_ACCESSED_MASK)) {
942 pde |= PG_ACCESSED_MASK;
943 x86_stl_phys_notdirty(cs, pde_addr, pde);
946 /* page directory entry */
947 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
948 a20_mask;
949 pte = x86_ldl_phys(cs, pte_addr);
950 if (!(pte & PG_PRESENT_MASK)) {
951 goto do_fault;
953 /* combine pde and pte user and rw protections */
954 ptep &= pte | PG_NX_MASK;
955 page_size = 4096;
956 rsvd_mask = 0;
959 do_check_protect:
960 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
961 do_check_protect_pse36:
962 if (pte & rsvd_mask) {
963 goto do_fault_rsvd;
965 ptep ^= PG_NX_MASK;
967 /* can the page can be put in the TLB? prot will tell us */
968 if (is_user && !(ptep & PG_USER_MASK)) {
969 goto do_fault_protect;
972 prot = 0;
973 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
974 prot |= PAGE_READ;
975 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
976 prot |= PAGE_WRITE;
979 if (!(ptep & PG_NX_MASK) &&
980 (mmu_idx == MMU_USER_IDX ||
981 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
982 prot |= PAGE_EXEC;
984 if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
985 (ptep & PG_USER_MASK) && env->pkru) {
986 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
987 uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
988 uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
989 uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
991 if (pkru_ad) {
992 pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
993 } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
994 pkru_prot &= ~PAGE_WRITE;
997 prot &= pkru_prot;
998 if ((pkru_prot & (1 << is_write1)) == 0) {
999 assert(is_write1 != 2);
1000 error_code |= PG_ERROR_PK_MASK;
1001 goto do_fault_protect;
1005 if ((prot & (1 << is_write1)) == 0) {
1006 goto do_fault_protect;
1009 /* yes, it can! */
1010 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1011 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1012 pte |= PG_ACCESSED_MASK;
1013 if (is_dirty) {
1014 pte |= PG_DIRTY_MASK;
1016 x86_stl_phys_notdirty(cs, pte_addr, pte);
1019 if (!(pte & PG_DIRTY_MASK)) {
1020 /* only set write access if already dirty... otherwise wait
1021 for dirty access */
1022 assert(!is_write);
1023 prot &= ~PAGE_WRITE;
1026 do_mapping:
1027 pte = pte & a20_mask;
1029 /* align to page_size */
1030 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1032 /* Even if 4MB pages, we map only one 4KB page in the cache to
1033 avoid filling it too fast */
1034 vaddr = addr & TARGET_PAGE_MASK;
1035 page_offset = vaddr & (page_size - 1);
1036 paddr = pte + page_offset;
1038 assert(prot & (1 << is_write1));
1039 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
1040 prot, mmu_idx, page_size);
1041 return 0;
1042 do_fault_rsvd:
1043 error_code |= PG_ERROR_RSVD_MASK;
1044 do_fault_protect:
1045 error_code |= PG_ERROR_P_MASK;
1046 do_fault:
1047 error_code |= (is_write << PG_ERROR_W_BIT);
1048 if (is_user)
1049 error_code |= PG_ERROR_U_MASK;
1050 if (is_write1 == 2 &&
1051 (((env->efer & MSR_EFER_NXE) &&
1052 (env->cr[4] & CR4_PAE_MASK)) ||
1053 (env->cr[4] & CR4_SMEP_MASK)))
1054 error_code |= PG_ERROR_I_D_MASK;
1055 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1056 /* cr2 is not modified in case of exceptions */
1057 x86_stq_phys(cs,
1058 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1059 addr);
1060 } else {
1061 env->cr[2] = addr;
1063 env->error_code = error_code;
1064 cs->exception_index = EXCP0E_PAGE;
1065 return 1;
1068 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1070 X86CPU *cpu = X86_CPU(cs);
1071 CPUX86State *env = &cpu->env;
1072 target_ulong pde_addr, pte_addr;
1073 uint64_t pte;
1074 int32_t a20_mask;
1075 uint32_t page_offset;
1076 int page_size;
1078 a20_mask = x86_get_a20_mask(env);
1079 if (!(env->cr[0] & CR0_PG_MASK)) {
1080 pte = addr & a20_mask;
1081 page_size = 4096;
1082 } else if (env->cr[4] & CR4_PAE_MASK) {
1083 target_ulong pdpe_addr;
1084 uint64_t pde, pdpe;
1086 #ifdef TARGET_X86_64
1087 if (env->hflags & HF_LMA_MASK) {
1088 bool la57 = env->cr[4] & CR4_LA57_MASK;
1089 uint64_t pml5e_addr, pml5e;
1090 uint64_t pml4e_addr, pml4e;
1091 int32_t sext;
1093 /* test virtual address sign extension */
1094 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
1095 if (sext != 0 && sext != -1) {
1096 return -1;
1099 if (la57) {
1100 pml5e_addr = ((env->cr[3] & ~0xfff) +
1101 (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
1102 pml5e = x86_ldq_phys(cs, pml5e_addr);
1103 if (!(pml5e & PG_PRESENT_MASK)) {
1104 return -1;
1106 } else {
1107 pml5e = env->cr[3];
1110 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
1111 (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
1112 pml4e = x86_ldq_phys(cs, pml4e_addr);
1113 if (!(pml4e & PG_PRESENT_MASK)) {
1114 return -1;
1116 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1117 (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
1118 pdpe = x86_ldq_phys(cs, pdpe_addr);
1119 if (!(pdpe & PG_PRESENT_MASK)) {
1120 return -1;
1122 if (pdpe & PG_PSE_MASK) {
1123 page_size = 1024 * 1024 * 1024;
1124 pte = pdpe;
1125 goto out;
1128 } else
1129 #endif
1131 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1132 a20_mask;
1133 pdpe = x86_ldq_phys(cs, pdpe_addr);
1134 if (!(pdpe & PG_PRESENT_MASK))
1135 return -1;
1138 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1139 (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
1140 pde = x86_ldq_phys(cs, pde_addr);
1141 if (!(pde & PG_PRESENT_MASK)) {
1142 return -1;
1144 if (pde & PG_PSE_MASK) {
1145 /* 2 MB page */
1146 page_size = 2048 * 1024;
1147 pte = pde;
1148 } else {
1149 /* 4 KB page */
1150 pte_addr = ((pde & PG_ADDRESS_MASK) +
1151 (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
1152 page_size = 4096;
1153 pte = x86_ldq_phys(cs, pte_addr);
1155 if (!(pte & PG_PRESENT_MASK)) {
1156 return -1;
1158 } else {
1159 uint32_t pde;
1161 /* page directory entry */
1162 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
1163 pde = x86_ldl_phys(cs, pde_addr);
1164 if (!(pde & PG_PRESENT_MASK))
1165 return -1;
1166 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1167 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
1168 page_size = 4096 * 1024;
1169 } else {
1170 /* page directory entry */
1171 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
1172 pte = x86_ldl_phys(cs, pte_addr);
1173 if (!(pte & PG_PRESENT_MASK)) {
1174 return -1;
1176 page_size = 4096;
1178 pte = pte & a20_mask;
1181 #ifdef TARGET_X86_64
1182 out:
1183 #endif
1184 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1185 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1186 return pte | page_offset;
1189 typedef struct MCEInjectionParams {
1190 Monitor *mon;
1191 int bank;
1192 uint64_t status;
1193 uint64_t mcg_status;
1194 uint64_t addr;
1195 uint64_t misc;
1196 int flags;
1197 } MCEInjectionParams;
1199 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
1201 MCEInjectionParams *params = data.host_ptr;
1202 X86CPU *cpu = X86_CPU(cs);
1203 CPUX86State *cenv = &cpu->env;
1204 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1206 cpu_synchronize_state(cs);
1209 * If there is an MCE exception being processed, ignore this SRAO MCE
1210 * unless unconditional injection was requested.
1212 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1213 && !(params->status & MCI_STATUS_AR)
1214 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1215 return;
1218 if (params->status & MCI_STATUS_UC) {
1220 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1221 * reporting is disabled
1223 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1224 monitor_printf(params->mon,
1225 "CPU %d: Uncorrected error reporting disabled\n",
1226 cs->cpu_index);
1227 return;
1231 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1232 * reporting is disabled for the bank
1234 if (banks[0] != ~(uint64_t)0) {
1235 monitor_printf(params->mon,
1236 "CPU %d: Uncorrected error reporting disabled for"
1237 " bank %d\n",
1238 cs->cpu_index, params->bank);
1239 return;
1242 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1243 !(cenv->cr[4] & CR4_MCE_MASK)) {
1244 monitor_printf(params->mon,
1245 "CPU %d: Previous MCE still in progress, raising"
1246 " triple fault\n",
1247 cs->cpu_index);
1248 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1249 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
1250 return;
1252 if (banks[1] & MCI_STATUS_VAL) {
1253 params->status |= MCI_STATUS_OVER;
1255 banks[2] = params->addr;
1256 banks[3] = params->misc;
1257 cenv->mcg_status = params->mcg_status;
1258 banks[1] = params->status;
1259 cpu_interrupt(cs, CPU_INTERRUPT_MCE);
1260 } else if (!(banks[1] & MCI_STATUS_VAL)
1261 || !(banks[1] & MCI_STATUS_UC)) {
1262 if (banks[1] & MCI_STATUS_VAL) {
1263 params->status |= MCI_STATUS_OVER;
1265 banks[2] = params->addr;
1266 banks[3] = params->misc;
1267 banks[1] = params->status;
1268 } else {
1269 banks[1] |= MCI_STATUS_OVER;
1273 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1274 uint64_t status, uint64_t mcg_status, uint64_t addr,
1275 uint64_t misc, int flags)
1277 CPUState *cs = CPU(cpu);
1278 CPUX86State *cenv = &cpu->env;
1279 MCEInjectionParams params = {
1280 .mon = mon,
1281 .bank = bank,
1282 .status = status,
1283 .mcg_status = mcg_status,
1284 .addr = addr,
1285 .misc = misc,
1286 .flags = flags,
1288 unsigned bank_num = cenv->mcg_cap & 0xff;
1290 if (!cenv->mcg_cap) {
1291 monitor_printf(mon, "MCE injection not supported\n");
1292 return;
1294 if (bank >= bank_num) {
1295 monitor_printf(mon, "Invalid MCE bank number\n");
1296 return;
1298 if (!(status & MCI_STATUS_VAL)) {
1299 monitor_printf(mon, "Invalid MCE status code\n");
1300 return;
1302 if ((flags & MCE_INJECT_BROADCAST)
1303 && !cpu_x86_support_mca_broadcast(cenv)) {
1304 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1305 return;
1308 run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
1309 if (flags & MCE_INJECT_BROADCAST) {
1310 CPUState *other_cs;
1312 params.bank = 1;
1313 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1314 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1315 params.addr = 0;
1316 params.misc = 0;
1317 CPU_FOREACH(other_cs) {
1318 if (other_cs == cs) {
1319 continue;
1321 run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
1326 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1328 X86CPU *cpu = x86_env_get_cpu(env);
1329 CPUState *cs = CPU(cpu);
1331 if (kvm_enabled()) {
1332 env->tpr_access_type = access;
1334 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1335 } else {
1336 cpu_restore_state(cs, cs->mem_io_pc);
1338 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1341 #endif /* !CONFIG_USER_ONLY */
1343 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1344 target_ulong *base, unsigned int *limit,
1345 unsigned int *flags)
1347 X86CPU *cpu = x86_env_get_cpu(env);
1348 CPUState *cs = CPU(cpu);
1349 SegmentCache *dt;
1350 target_ulong ptr;
1351 uint32_t e1, e2;
1352 int index;
1354 if (selector & 0x4)
1355 dt = &env->ldt;
1356 else
1357 dt = &env->gdt;
1358 index = selector & ~7;
1359 ptr = dt->base + index;
1360 if ((index + 7) > dt->limit
1361 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1362 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1363 return 0;
1365 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1366 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1367 if (e2 & DESC_G_MASK)
1368 *limit = (*limit << 12) | 0xfff;
1369 *flags = e2;
1371 return 1;
1374 #if !defined(CONFIG_USER_ONLY)
1375 void do_cpu_init(X86CPU *cpu)
1377 CPUState *cs = CPU(cpu);
1378 CPUX86State *env = &cpu->env;
1379 CPUX86State *save = g_new(CPUX86State, 1);
1380 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1382 *save = *env;
1384 cpu_reset(cs);
1385 cs->interrupt_request = sipi;
1386 memcpy(&env->start_init_save, &save->start_init_save,
1387 offsetof(CPUX86State, end_init_save) -
1388 offsetof(CPUX86State, start_init_save));
1389 g_free(save);
1391 if (kvm_enabled()) {
1392 kvm_arch_do_init_vcpu(cpu);
1394 apic_init_reset(cpu->apic_state);
1397 void do_cpu_sipi(X86CPU *cpu)
1399 apic_sipi(cpu->apic_state);
1401 #else
1402 void do_cpu_init(X86CPU *cpu)
1405 void do_cpu_sipi(X86CPU *cpu)
1408 #endif
1410 /* Frob eflags into and out of the CPU temporary format. */
1412 void x86_cpu_exec_enter(CPUState *cs)
1414 X86CPU *cpu = X86_CPU(cs);
1415 CPUX86State *env = &cpu->env;
1417 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1418 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1419 CC_OP = CC_OP_EFLAGS;
1420 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1423 void x86_cpu_exec_exit(CPUState *cs)
1425 X86CPU *cpu = X86_CPU(cs);
1426 CPUX86State *env = &cpu->env;
1428 env->eflags = cpu_compute_eflags(env);
1431 #ifndef CONFIG_USER_ONLY
1432 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1434 X86CPU *cpu = X86_CPU(cs);
1435 CPUX86State *env = &cpu->env;
1436 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1437 AddressSpace *as = cpu_addressspace(cs, attrs);
1439 return address_space_ldub(as, addr, attrs, NULL);
1442 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1444 X86CPU *cpu = X86_CPU(cs);
1445 CPUX86State *env = &cpu->env;
1446 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1447 AddressSpace *as = cpu_addressspace(cs, attrs);
1449 return address_space_lduw(as, addr, attrs, NULL);
1452 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1454 X86CPU *cpu = X86_CPU(cs);
1455 CPUX86State *env = &cpu->env;
1456 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1457 AddressSpace *as = cpu_addressspace(cs, attrs);
1459 return address_space_ldl(as, addr, attrs, NULL);
1462 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1464 X86CPU *cpu = X86_CPU(cs);
1465 CPUX86State *env = &cpu->env;
1466 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1467 AddressSpace *as = cpu_addressspace(cs, attrs);
1469 return address_space_ldq(as, addr, attrs, NULL);
1472 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1474 X86CPU *cpu = X86_CPU(cs);
1475 CPUX86State *env = &cpu->env;
1476 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1477 AddressSpace *as = cpu_addressspace(cs, attrs);
1479 address_space_stb(as, addr, val, attrs, NULL);
1482 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1484 X86CPU *cpu = X86_CPU(cs);
1485 CPUX86State *env = &cpu->env;
1486 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1487 AddressSpace *as = cpu_addressspace(cs, attrs);
1489 address_space_stl_notdirty(as, addr, val, attrs, NULL);
1492 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1494 X86CPU *cpu = X86_CPU(cs);
1495 CPUX86State *env = &cpu->env;
1496 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1497 AddressSpace *as = cpu_addressspace(cs, attrs);
1499 address_space_stw(as, addr, val, attrs, NULL);
1502 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1504 X86CPU *cpu = X86_CPU(cs);
1505 CPUX86State *env = &cpu->env;
1506 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1507 AddressSpace *as = cpu_addressspace(cs, attrs);
1509 address_space_stl(as, addr, val, attrs, NULL);
1512 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1514 X86CPU *cpu = X86_CPU(cs);
1515 CPUX86State *env = &cpu->env;
1516 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1517 AddressSpace *as = cpu_addressspace(cs, attrs);
1519 address_space_stq(as, addr, val, attrs, NULL);
1521 #endif