target-i386: do not pass MSR_TSC_AUX to KVM ioctls if CPUID bit is not set
[qemu/ar7.git] / target-i386 / helper.c
blob575583942a1643714a9e72546f4369a50f721806
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/kvm.h"
23 #include "kvm_i386.h"
24 #ifndef CONFIG_USER_ONLY
25 #include "sysemu/sysemu.h"
26 #include "monitor/monitor.h"
27 #include "hw/i386/apic_internal.h"
28 #endif
30 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
32 int cpuver = env->cpuid_version;
34 if (family == NULL || model == NULL) {
35 return;
38 *family = (cpuver >> 8) & 0x0f;
39 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
42 /* Broadcast MCA signal for processor version 06H_EH and above */
43 int cpu_x86_support_mca_broadcast(CPUX86State *env)
45 int family = 0;
46 int model = 0;
48 cpu_x86_version(env, &family, &model);
49 if ((family == 6 && model >= 14) || family > 6) {
50 return 1;
53 return 0;
56 /***********************************************************/
57 /* x86 debug */
59 static const char *cc_op_str[CC_OP_NB] = {
60 "DYNAMIC",
61 "EFLAGS",
63 "MULB",
64 "MULW",
65 "MULL",
66 "MULQ",
68 "ADDB",
69 "ADDW",
70 "ADDL",
71 "ADDQ",
73 "ADCB",
74 "ADCW",
75 "ADCL",
76 "ADCQ",
78 "SUBB",
79 "SUBW",
80 "SUBL",
81 "SUBQ",
83 "SBBB",
84 "SBBW",
85 "SBBL",
86 "SBBQ",
88 "LOGICB",
89 "LOGICW",
90 "LOGICL",
91 "LOGICQ",
93 "INCB",
94 "INCW",
95 "INCL",
96 "INCQ",
98 "DECB",
99 "DECW",
100 "DECL",
101 "DECQ",
103 "SHLB",
104 "SHLW",
105 "SHLL",
106 "SHLQ",
108 "SARB",
109 "SARW",
110 "SARL",
111 "SARQ",
113 "BMILGB",
114 "BMILGW",
115 "BMILGL",
116 "BMILGQ",
118 "ADCX",
119 "ADOX",
120 "ADCOX",
122 "CLR",
125 static void
126 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
127 const char *name, struct SegmentCache *sc)
129 #ifdef TARGET_X86_64
130 if (env->hflags & HF_CS64_MASK) {
131 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
132 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
133 } else
134 #endif
136 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
137 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
140 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
141 goto done;
143 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
144 if (sc->flags & DESC_S_MASK) {
145 if (sc->flags & DESC_CS_MASK) {
146 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
147 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
148 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
149 (sc->flags & DESC_R_MASK) ? 'R' : '-');
150 } else {
151 cpu_fprintf(f,
152 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
153 ? "DS " : "DS16");
154 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
155 (sc->flags & DESC_W_MASK) ? 'W' : '-');
157 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
158 } else {
159 static const char *sys_type_name[2][16] = {
160 { /* 32 bit mode */
161 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
162 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
163 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
164 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
166 { /* 64 bit mode */
167 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
168 "Reserved", "Reserved", "Reserved", "Reserved",
169 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
170 "Reserved", "IntGate64", "TrapGate64"
173 cpu_fprintf(f, "%s",
174 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
175 [(sc->flags & DESC_TYPE_MASK)
176 >> DESC_TYPE_SHIFT]);
178 done:
179 cpu_fprintf(f, "\n");
182 #ifndef CONFIG_USER_ONLY
184 /* ARRAY_SIZE check is not required because
185 * DeliveryMode(dm) has a size of 3 bit.
187 static inline const char *dm2str(uint32_t dm)
189 static const char *str[] = {
190 "Fixed",
191 "...",
192 "SMI",
193 "...",
194 "NMI",
195 "INIT",
196 "...",
197 "ExtINT"
199 return str[dm];
202 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
203 const char *name, uint32_t lvt, bool is_timer)
205 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
206 cpu_fprintf(f,
207 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
208 name, lvt,
209 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
210 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
211 lvt & APIC_LVT_MASKED ? "masked" : "",
212 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
213 !is_timer ?
214 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
215 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
216 "tsc-deadline" : "one-shot",
217 dm2str(dm));
218 if (dm != APIC_DM_NMI) {
219 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
220 } else {
221 cpu_fprintf(f, "\n");
225 /* ARRAY_SIZE check is not required because
226 * destination shorthand has a size of 2 bit.
228 static inline const char *shorthand2str(uint32_t shorthand)
230 const char *str[] = {
231 "no-shorthand", "self", "all-self", "all"
233 return str[shorthand];
236 static inline uint8_t divider_conf(uint32_t divide_conf)
238 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
240 return divide_val == 7 ? 1 : 2 << divide_val;
243 static inline void mask2str(char *str, uint32_t val, uint8_t size)
245 while (size--) {
246 *str++ = (val >> size) & 1 ? '1' : '0';
248 *str = 0;
251 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
253 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
254 APICCommonState *s, CPUX86State *env)
256 uint32_t icr = s->icr[0], icr2 = s->icr[1];
257 uint8_t dest_shorthand = \
258 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
259 bool logical_mod = icr & APIC_ICR_DEST_MOD;
260 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
261 uint32_t dest_field;
262 bool x2apic;
264 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
265 icr,
266 logical_mod ? "logical" : "physical",
267 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
268 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
269 shorthand2str(dest_shorthand));
271 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
272 if (dest_shorthand != 0) {
273 cpu_fprintf(f, "\n");
274 return;
276 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
277 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
279 if (!logical_mod) {
280 if (x2apic) {
281 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
282 } else {
283 cpu_fprintf(f, " cpu %u (APIC ID)\n",
284 dest_field & APIC_LOGDEST_XAPIC_ID);
286 return;
289 if (s->dest_mode == 0xf) { /* flat mode */
290 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
291 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
292 } else if (s->dest_mode == 0) { /* cluster mode */
293 if (x2apic) {
294 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
295 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
296 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
297 } else {
298 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
299 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
300 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
305 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
306 const char *name, uint32_t *ireg_tab,
307 uint32_t *tmr_tab)
309 int i, empty = true;
311 cpu_fprintf(f, "%s\t ", name);
312 for (i = 0; i < 256; i++) {
313 if (apic_get_bit(ireg_tab, i)) {
314 cpu_fprintf(f, "%u%s ", i,
315 apic_get_bit(tmr_tab, i) ? "(level)" : "");
316 empty = false;
319 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
322 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
323 fprintf_function cpu_fprintf, int flags)
325 X86CPU *cpu = X86_CPU(cs);
326 APICCommonState *s = APIC_COMMON(cpu->apic_state);
327 uint32_t *lvt = s->lvt;
329 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
330 CPU(cpu)->cpu_index);
331 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
332 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
333 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
334 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
335 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
336 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
338 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
339 s->divide_conf & APIC_DCR_MASK,
340 divider_conf(s->divide_conf),
341 s->initial_count);
343 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
344 s->spurious_vec,
345 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
346 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
347 s->spurious_vec & APIC_VECTOR_MASK);
349 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
351 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
353 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
354 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
356 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
357 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
358 if (s->dest_mode == 0) {
359 cpu_fprintf(f, "(cluster %u: id %u)",
360 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
361 s->log_dest & APIC_LOGDEST_XAPIC_ID);
363 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
365 #else
366 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
367 fprintf_function cpu_fprintf, int flags)
370 #endif /* !CONFIG_USER_ONLY */
372 #define DUMP_CODE_BYTES_TOTAL 50
373 #define DUMP_CODE_BYTES_BACKWARD 20
375 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
376 int flags)
378 X86CPU *cpu = X86_CPU(cs);
379 CPUX86State *env = &cpu->env;
380 int eflags, i, nb;
381 char cc_op_name[32];
382 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
384 eflags = cpu_compute_eflags(env);
385 #ifdef TARGET_X86_64
386 if (env->hflags & HF_CS64_MASK) {
387 cpu_fprintf(f,
388 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
389 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
390 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
391 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
392 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
393 env->regs[R_EAX],
394 env->regs[R_EBX],
395 env->regs[R_ECX],
396 env->regs[R_EDX],
397 env->regs[R_ESI],
398 env->regs[R_EDI],
399 env->regs[R_EBP],
400 env->regs[R_ESP],
401 env->regs[8],
402 env->regs[9],
403 env->regs[10],
404 env->regs[11],
405 env->regs[12],
406 env->regs[13],
407 env->regs[14],
408 env->regs[15],
409 env->eip, eflags,
410 eflags & DF_MASK ? 'D' : '-',
411 eflags & CC_O ? 'O' : '-',
412 eflags & CC_S ? 'S' : '-',
413 eflags & CC_Z ? 'Z' : '-',
414 eflags & CC_A ? 'A' : '-',
415 eflags & CC_P ? 'P' : '-',
416 eflags & CC_C ? 'C' : '-',
417 env->hflags & HF_CPL_MASK,
418 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
419 (env->a20_mask >> 20) & 1,
420 (env->hflags >> HF_SMM_SHIFT) & 1,
421 cs->halted);
422 } else
423 #endif
425 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
426 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
427 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
428 (uint32_t)env->regs[R_EAX],
429 (uint32_t)env->regs[R_EBX],
430 (uint32_t)env->regs[R_ECX],
431 (uint32_t)env->regs[R_EDX],
432 (uint32_t)env->regs[R_ESI],
433 (uint32_t)env->regs[R_EDI],
434 (uint32_t)env->regs[R_EBP],
435 (uint32_t)env->regs[R_ESP],
436 (uint32_t)env->eip, eflags,
437 eflags & DF_MASK ? 'D' : '-',
438 eflags & CC_O ? 'O' : '-',
439 eflags & CC_S ? 'S' : '-',
440 eflags & CC_Z ? 'Z' : '-',
441 eflags & CC_A ? 'A' : '-',
442 eflags & CC_P ? 'P' : '-',
443 eflags & CC_C ? 'C' : '-',
444 env->hflags & HF_CPL_MASK,
445 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
446 (env->a20_mask >> 20) & 1,
447 (env->hflags >> HF_SMM_SHIFT) & 1,
448 cs->halted);
451 for(i = 0; i < 6; i++) {
452 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
453 &env->segs[i]);
455 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
456 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
458 #ifdef TARGET_X86_64
459 if (env->hflags & HF_LMA_MASK) {
460 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
461 env->gdt.base, env->gdt.limit);
462 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
463 env->idt.base, env->idt.limit);
464 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
465 (uint32_t)env->cr[0],
466 env->cr[2],
467 env->cr[3],
468 (uint32_t)env->cr[4]);
469 for(i = 0; i < 4; i++)
470 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
471 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
472 env->dr[6], env->dr[7]);
473 } else
474 #endif
476 cpu_fprintf(f, "GDT= %08x %08x\n",
477 (uint32_t)env->gdt.base, env->gdt.limit);
478 cpu_fprintf(f, "IDT= %08x %08x\n",
479 (uint32_t)env->idt.base, env->idt.limit);
480 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
481 (uint32_t)env->cr[0],
482 (uint32_t)env->cr[2],
483 (uint32_t)env->cr[3],
484 (uint32_t)env->cr[4]);
485 for(i = 0; i < 4; i++) {
486 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
488 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
489 env->dr[6], env->dr[7]);
491 if (flags & CPU_DUMP_CCOP) {
492 if ((unsigned)env->cc_op < CC_OP_NB)
493 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
494 else
495 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
496 #ifdef TARGET_X86_64
497 if (env->hflags & HF_CS64_MASK) {
498 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
499 env->cc_src, env->cc_dst,
500 cc_op_name);
501 } else
502 #endif
504 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
505 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
506 cc_op_name);
509 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
510 if (flags & CPU_DUMP_FPU) {
511 int fptag;
512 fptag = 0;
513 for(i = 0; i < 8; i++) {
514 fptag |= ((!env->fptags[i]) << i);
516 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
517 env->fpuc,
518 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
519 env->fpstt,
520 fptag,
521 env->mxcsr);
522 for(i=0;i<8;i++) {
523 CPU_LDoubleU u;
524 u.d = env->fpregs[i].d;
525 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
526 i, u.l.lower, u.l.upper);
527 if ((i & 1) == 1)
528 cpu_fprintf(f, "\n");
529 else
530 cpu_fprintf(f, " ");
532 if (env->hflags & HF_CS64_MASK)
533 nb = 16;
534 else
535 nb = 8;
536 for(i=0;i<nb;i++) {
537 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
539 env->xmm_regs[i].ZMM_L(3),
540 env->xmm_regs[i].ZMM_L(2),
541 env->xmm_regs[i].ZMM_L(1),
542 env->xmm_regs[i].ZMM_L(0));
543 if ((i & 1) == 1)
544 cpu_fprintf(f, "\n");
545 else
546 cpu_fprintf(f, " ");
549 if (flags & CPU_DUMP_CODE) {
550 target_ulong base = env->segs[R_CS].base + env->eip;
551 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
552 uint8_t code;
553 char codestr[3];
555 cpu_fprintf(f, "Code=");
556 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
557 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
558 snprintf(codestr, sizeof(codestr), "%02x", code);
559 } else {
560 snprintf(codestr, sizeof(codestr), "??");
562 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
563 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
565 cpu_fprintf(f, "\n");
569 /***********************************************************/
570 /* x86 mmu */
571 /* XXX: add PGE support */
573 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
575 CPUX86State *env = &cpu->env;
577 a20_state = (a20_state != 0);
578 if (a20_state != ((env->a20_mask >> 20) & 1)) {
579 CPUState *cs = CPU(cpu);
581 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
582 /* if the cpu is currently executing code, we must unlink it and
583 all the potentially executing TB */
584 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
586 /* when a20 is changed, all the MMU mappings are invalid, so
587 we must flush everything */
588 tlb_flush(cs, 1);
589 env->a20_mask = ~(1 << 20) | (a20_state << 20);
593 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
595 X86CPU *cpu = x86_env_get_cpu(env);
596 int pe_state;
598 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
599 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
600 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
601 tlb_flush(CPU(cpu), 1);
604 #ifdef TARGET_X86_64
605 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
606 (env->efer & MSR_EFER_LME)) {
607 /* enter in long mode */
608 /* XXX: generate an exception */
609 if (!(env->cr[4] & CR4_PAE_MASK))
610 return;
611 env->efer |= MSR_EFER_LMA;
612 env->hflags |= HF_LMA_MASK;
613 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
614 (env->efer & MSR_EFER_LMA)) {
615 /* exit long mode */
616 env->efer &= ~MSR_EFER_LMA;
617 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
618 env->eip &= 0xffffffff;
620 #endif
621 env->cr[0] = new_cr0 | CR0_ET_MASK;
623 /* update PE flag in hidden flags */
624 pe_state = (env->cr[0] & CR0_PE_MASK);
625 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
626 /* ensure that ADDSEG is always set in real mode */
627 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
628 /* update FPU flags */
629 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
630 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
633 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
634 the PDPT */
635 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
637 X86CPU *cpu = x86_env_get_cpu(env);
639 env->cr[3] = new_cr3;
640 if (env->cr[0] & CR0_PG_MASK) {
641 qemu_log_mask(CPU_LOG_MMU,
642 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
643 tlb_flush(CPU(cpu), 0);
647 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
649 X86CPU *cpu = x86_env_get_cpu(env);
650 uint32_t hflags;
652 #if defined(DEBUG_MMU)
653 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
654 #endif
655 if ((new_cr4 ^ env->cr[4]) &
656 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
657 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
658 tlb_flush(CPU(cpu), 1);
661 /* Clear bits we're going to recompute. */
662 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
664 /* SSE handling */
665 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
666 new_cr4 &= ~CR4_OSFXSR_MASK;
668 if (new_cr4 & CR4_OSFXSR_MASK) {
669 hflags |= HF_OSFXSR_MASK;
672 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
673 new_cr4 &= ~CR4_SMAP_MASK;
675 if (new_cr4 & CR4_SMAP_MASK) {
676 hflags |= HF_SMAP_MASK;
679 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
680 new_cr4 &= ~CR4_PKE_MASK;
683 env->cr[4] = new_cr4;
684 env->hflags = hflags;
686 cpu_sync_bndcs_hflags(env);
689 #if defined(CONFIG_USER_ONLY)
691 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
692 int is_write, int mmu_idx)
694 X86CPU *cpu = X86_CPU(cs);
695 CPUX86State *env = &cpu->env;
697 /* user mode only emulation */
698 is_write &= 1;
699 env->cr[2] = addr;
700 env->error_code = (is_write << PG_ERROR_W_BIT);
701 env->error_code |= PG_ERROR_U_MASK;
702 cs->exception_index = EXCP0E_PAGE;
703 return 1;
706 #else
708 /* return value:
709 * -1 = cannot handle fault
710 * 0 = nothing more to do
711 * 1 = generate PF fault
713 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
714 int is_write1, int mmu_idx)
716 X86CPU *cpu = X86_CPU(cs);
717 CPUX86State *env = &cpu->env;
718 uint64_t ptep, pte;
719 target_ulong pde_addr, pte_addr;
720 int error_code = 0;
721 int is_dirty, prot, page_size, is_write, is_user;
722 hwaddr paddr;
723 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
724 uint32_t page_offset;
725 target_ulong vaddr;
727 is_user = mmu_idx == MMU_USER_IDX;
728 #if defined(DEBUG_MMU)
729 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
730 addr, is_write1, is_user, env->eip);
731 #endif
732 is_write = is_write1 & 1;
734 if (!(env->cr[0] & CR0_PG_MASK)) {
735 pte = addr;
736 #ifdef TARGET_X86_64
737 if (!(env->hflags & HF_LMA_MASK)) {
738 /* Without long mode we can only address 32bits in real mode */
739 pte = (uint32_t)pte;
741 #endif
742 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
743 page_size = 4096;
744 goto do_mapping;
747 if (!(env->efer & MSR_EFER_NXE)) {
748 rsvd_mask |= PG_NX_MASK;
751 if (env->cr[4] & CR4_PAE_MASK) {
752 uint64_t pde, pdpe;
753 target_ulong pdpe_addr;
755 #ifdef TARGET_X86_64
756 if (env->hflags & HF_LMA_MASK) {
757 uint64_t pml4e_addr, pml4e;
758 int32_t sext;
760 /* test virtual address sign extension */
761 sext = (int64_t)addr >> 47;
762 if (sext != 0 && sext != -1) {
763 env->error_code = 0;
764 cs->exception_index = EXCP0D_GPF;
765 return 1;
768 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
769 env->a20_mask;
770 pml4e = x86_ldq_phys(cs, pml4e_addr);
771 if (!(pml4e & PG_PRESENT_MASK)) {
772 goto do_fault;
774 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
775 goto do_fault_rsvd;
777 if (!(pml4e & PG_ACCESSED_MASK)) {
778 pml4e |= PG_ACCESSED_MASK;
779 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
781 ptep = pml4e ^ PG_NX_MASK;
782 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
783 env->a20_mask;
784 pdpe = x86_ldq_phys(cs, pdpe_addr);
785 if (!(pdpe & PG_PRESENT_MASK)) {
786 goto do_fault;
788 if (pdpe & rsvd_mask) {
789 goto do_fault_rsvd;
791 ptep &= pdpe ^ PG_NX_MASK;
792 if (!(pdpe & PG_ACCESSED_MASK)) {
793 pdpe |= PG_ACCESSED_MASK;
794 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
796 if (pdpe & PG_PSE_MASK) {
797 /* 1 GB page */
798 page_size = 1024 * 1024 * 1024;
799 pte_addr = pdpe_addr;
800 pte = pdpe;
801 goto do_check_protect;
803 } else
804 #endif
806 /* XXX: load them when cr3 is loaded ? */
807 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
808 env->a20_mask;
809 pdpe = x86_ldq_phys(cs, pdpe_addr);
810 if (!(pdpe & PG_PRESENT_MASK)) {
811 goto do_fault;
813 rsvd_mask |= PG_HI_USER_MASK;
814 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
815 goto do_fault_rsvd;
817 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
820 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
821 env->a20_mask;
822 pde = x86_ldq_phys(cs, pde_addr);
823 if (!(pde & PG_PRESENT_MASK)) {
824 goto do_fault;
826 if (pde & rsvd_mask) {
827 goto do_fault_rsvd;
829 ptep &= pde ^ PG_NX_MASK;
830 if (pde & PG_PSE_MASK) {
831 /* 2 MB page */
832 page_size = 2048 * 1024;
833 pte_addr = pde_addr;
834 pte = pde;
835 goto do_check_protect;
837 /* 4 KB page */
838 if (!(pde & PG_ACCESSED_MASK)) {
839 pde |= PG_ACCESSED_MASK;
840 x86_stl_phys_notdirty(cs, pde_addr, pde);
842 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
843 env->a20_mask;
844 pte = x86_ldq_phys(cs, pte_addr);
845 if (!(pte & PG_PRESENT_MASK)) {
846 goto do_fault;
848 if (pte & rsvd_mask) {
849 goto do_fault_rsvd;
851 /* combine pde and pte nx, user and rw protections */
852 ptep &= pte ^ PG_NX_MASK;
853 page_size = 4096;
854 } else {
855 uint32_t pde;
857 /* page directory entry */
858 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
859 env->a20_mask;
860 pde = x86_ldl_phys(cs, pde_addr);
861 if (!(pde & PG_PRESENT_MASK)) {
862 goto do_fault;
864 ptep = pde | PG_NX_MASK;
866 /* if PSE bit is set, then we use a 4MB page */
867 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
868 page_size = 4096 * 1024;
869 pte_addr = pde_addr;
871 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
872 * Leave bits 20-13 in place for setting accessed/dirty bits below.
874 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
875 rsvd_mask = 0x200000;
876 goto do_check_protect_pse36;
879 if (!(pde & PG_ACCESSED_MASK)) {
880 pde |= PG_ACCESSED_MASK;
881 x86_stl_phys_notdirty(cs, pde_addr, pde);
884 /* page directory entry */
885 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
886 env->a20_mask;
887 pte = x86_ldl_phys(cs, pte_addr);
888 if (!(pte & PG_PRESENT_MASK)) {
889 goto do_fault;
891 /* combine pde and pte user and rw protections */
892 ptep &= pte | PG_NX_MASK;
893 page_size = 4096;
894 rsvd_mask = 0;
897 do_check_protect:
898 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
899 do_check_protect_pse36:
900 if (pte & rsvd_mask) {
901 goto do_fault_rsvd;
903 ptep ^= PG_NX_MASK;
905 /* can the page can be put in the TLB? prot will tell us */
906 if (is_user && !(ptep & PG_USER_MASK)) {
907 goto do_fault_protect;
910 prot = 0;
911 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
912 prot |= PAGE_READ;
913 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
914 prot |= PAGE_WRITE;
917 if (!(ptep & PG_NX_MASK) &&
918 (mmu_idx == MMU_USER_IDX ||
919 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
920 prot |= PAGE_EXEC;
923 if ((prot & (1 << is_write1)) == 0) {
924 goto do_fault_protect;
927 if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
928 (ptep & PG_USER_MASK) && env->pkru) {
929 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
930 uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
931 uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
933 if (pkru_ad) {
934 prot &= ~(PAGE_READ | PAGE_WRITE);
935 } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
936 prot &= ~PAGE_WRITE;
938 if ((prot & (1 << is_write1)) == 0) {
939 assert(is_write1 != 2);
940 error_code |= PG_ERROR_PK_MASK;
941 goto do_fault_protect;
945 /* yes, it can! */
946 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
947 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
948 pte |= PG_ACCESSED_MASK;
949 if (is_dirty) {
950 pte |= PG_DIRTY_MASK;
952 x86_stl_phys_notdirty(cs, pte_addr, pte);
955 if (!(pte & PG_DIRTY_MASK)) {
956 /* only set write access if already dirty... otherwise wait
957 for dirty access */
958 assert(!is_write);
959 prot &= ~PAGE_WRITE;
962 do_mapping:
963 pte = pte & env->a20_mask;
965 /* align to page_size */
966 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
968 /* Even if 4MB pages, we map only one 4KB page in the cache to
969 avoid filling it too fast */
970 vaddr = addr & TARGET_PAGE_MASK;
971 page_offset = vaddr & (page_size - 1);
972 paddr = pte + page_offset;
974 assert(prot & (1 << is_write1));
975 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
976 prot, mmu_idx, page_size);
977 return 0;
978 do_fault_rsvd:
979 error_code |= PG_ERROR_RSVD_MASK;
980 do_fault_protect:
981 error_code |= PG_ERROR_P_MASK;
982 do_fault:
983 error_code |= (is_write << PG_ERROR_W_BIT);
984 if (is_user)
985 error_code |= PG_ERROR_U_MASK;
986 if (is_write1 == 2 &&
987 (((env->efer & MSR_EFER_NXE) &&
988 (env->cr[4] & CR4_PAE_MASK)) ||
989 (env->cr[4] & CR4_SMEP_MASK)))
990 error_code |= PG_ERROR_I_D_MASK;
991 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
992 /* cr2 is not modified in case of exceptions */
993 x86_stq_phys(cs,
994 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
995 addr);
996 } else {
997 env->cr[2] = addr;
999 env->error_code = error_code;
1000 cs->exception_index = EXCP0E_PAGE;
1001 return 1;
1004 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1006 X86CPU *cpu = X86_CPU(cs);
1007 CPUX86State *env = &cpu->env;
1008 target_ulong pde_addr, pte_addr;
1009 uint64_t pte;
1010 uint32_t page_offset;
1011 int page_size;
1013 if (!(env->cr[0] & CR0_PG_MASK)) {
1014 pte = addr & env->a20_mask;
1015 page_size = 4096;
1016 } else if (env->cr[4] & CR4_PAE_MASK) {
1017 target_ulong pdpe_addr;
1018 uint64_t pde, pdpe;
1020 #ifdef TARGET_X86_64
1021 if (env->hflags & HF_LMA_MASK) {
1022 uint64_t pml4e_addr, pml4e;
1023 int32_t sext;
1025 /* test virtual address sign extension */
1026 sext = (int64_t)addr >> 47;
1027 if (sext != 0 && sext != -1) {
1028 return -1;
1030 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1031 env->a20_mask;
1032 pml4e = x86_ldq_phys(cs, pml4e_addr);
1033 if (!(pml4e & PG_PRESENT_MASK)) {
1034 return -1;
1036 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1037 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
1038 pdpe = x86_ldq_phys(cs, pdpe_addr);
1039 if (!(pdpe & PG_PRESENT_MASK)) {
1040 return -1;
1042 if (pdpe & PG_PSE_MASK) {
1043 page_size = 1024 * 1024 * 1024;
1044 pte = pdpe;
1045 goto out;
1048 } else
1049 #endif
1051 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1052 env->a20_mask;
1053 pdpe = x86_ldq_phys(cs, pdpe_addr);
1054 if (!(pdpe & PG_PRESENT_MASK))
1055 return -1;
1058 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1059 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
1060 pde = x86_ldq_phys(cs, pde_addr);
1061 if (!(pde & PG_PRESENT_MASK)) {
1062 return -1;
1064 if (pde & PG_PSE_MASK) {
1065 /* 2 MB page */
1066 page_size = 2048 * 1024;
1067 pte = pde;
1068 } else {
1069 /* 4 KB page */
1070 pte_addr = ((pde & PG_ADDRESS_MASK) +
1071 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
1072 page_size = 4096;
1073 pte = x86_ldq_phys(cs, pte_addr);
1075 if (!(pte & PG_PRESENT_MASK)) {
1076 return -1;
1078 } else {
1079 uint32_t pde;
1081 /* page directory entry */
1082 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1083 pde = x86_ldl_phys(cs, pde_addr);
1084 if (!(pde & PG_PRESENT_MASK))
1085 return -1;
1086 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1087 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
1088 page_size = 4096 * 1024;
1089 } else {
1090 /* page directory entry */
1091 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1092 pte = x86_ldl_phys(cs, pte_addr);
1093 if (!(pte & PG_PRESENT_MASK)) {
1094 return -1;
1096 page_size = 4096;
1098 pte = pte & env->a20_mask;
1101 #ifdef TARGET_X86_64
1102 out:
1103 #endif
1104 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1105 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1106 return pte | page_offset;
1109 typedef struct MCEInjectionParams {
1110 Monitor *mon;
1111 X86CPU *cpu;
1112 int bank;
1113 uint64_t status;
1114 uint64_t mcg_status;
1115 uint64_t addr;
1116 uint64_t misc;
1117 int flags;
1118 } MCEInjectionParams;
1120 static void do_inject_x86_mce(void *data)
1122 MCEInjectionParams *params = data;
1123 CPUX86State *cenv = &params->cpu->env;
1124 CPUState *cpu = CPU(params->cpu);
1125 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1127 cpu_synchronize_state(cpu);
1130 * If there is an MCE exception being processed, ignore this SRAO MCE
1131 * unless unconditional injection was requested.
1133 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1134 && !(params->status & MCI_STATUS_AR)
1135 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1136 return;
1139 if (params->status & MCI_STATUS_UC) {
1141 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1142 * reporting is disabled
1144 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1145 monitor_printf(params->mon,
1146 "CPU %d: Uncorrected error reporting disabled\n",
1147 cpu->cpu_index);
1148 return;
1152 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1153 * reporting is disabled for the bank
1155 if (banks[0] != ~(uint64_t)0) {
1156 monitor_printf(params->mon,
1157 "CPU %d: Uncorrected error reporting disabled for"
1158 " bank %d\n",
1159 cpu->cpu_index, params->bank);
1160 return;
1163 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1164 !(cenv->cr[4] & CR4_MCE_MASK)) {
1165 monitor_printf(params->mon,
1166 "CPU %d: Previous MCE still in progress, raising"
1167 " triple fault\n",
1168 cpu->cpu_index);
1169 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1170 qemu_system_reset_request();
1171 return;
1173 if (banks[1] & MCI_STATUS_VAL) {
1174 params->status |= MCI_STATUS_OVER;
1176 banks[2] = params->addr;
1177 banks[3] = params->misc;
1178 cenv->mcg_status = params->mcg_status;
1179 banks[1] = params->status;
1180 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1181 } else if (!(banks[1] & MCI_STATUS_VAL)
1182 || !(banks[1] & MCI_STATUS_UC)) {
1183 if (banks[1] & MCI_STATUS_VAL) {
1184 params->status |= MCI_STATUS_OVER;
1186 banks[2] = params->addr;
1187 banks[3] = params->misc;
1188 banks[1] = params->status;
1189 } else {
1190 banks[1] |= MCI_STATUS_OVER;
1194 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1195 uint64_t status, uint64_t mcg_status, uint64_t addr,
1196 uint64_t misc, int flags)
1198 CPUState *cs = CPU(cpu);
1199 CPUX86State *cenv = &cpu->env;
1200 MCEInjectionParams params = {
1201 .mon = mon,
1202 .cpu = cpu,
1203 .bank = bank,
1204 .status = status,
1205 .mcg_status = mcg_status,
1206 .addr = addr,
1207 .misc = misc,
1208 .flags = flags,
1210 unsigned bank_num = cenv->mcg_cap & 0xff;
1212 if (!cenv->mcg_cap) {
1213 monitor_printf(mon, "MCE injection not supported\n");
1214 return;
1216 if (bank >= bank_num) {
1217 monitor_printf(mon, "Invalid MCE bank number\n");
1218 return;
1220 if (!(status & MCI_STATUS_VAL)) {
1221 monitor_printf(mon, "Invalid MCE status code\n");
1222 return;
1224 if ((flags & MCE_INJECT_BROADCAST)
1225 && !cpu_x86_support_mca_broadcast(cenv)) {
1226 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1227 return;
1230 run_on_cpu(cs, do_inject_x86_mce, &params);
1231 if (flags & MCE_INJECT_BROADCAST) {
1232 CPUState *other_cs;
1234 params.bank = 1;
1235 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1236 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1237 params.addr = 0;
1238 params.misc = 0;
1239 CPU_FOREACH(other_cs) {
1240 if (other_cs == cs) {
1241 continue;
1243 params.cpu = X86_CPU(other_cs);
1244 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1249 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1251 X86CPU *cpu = x86_env_get_cpu(env);
1252 CPUState *cs = CPU(cpu);
1254 if (kvm_enabled()) {
1255 env->tpr_access_type = access;
1257 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1258 } else {
1259 cpu_restore_state(cs, cs->mem_io_pc);
1261 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1264 #endif /* !CONFIG_USER_ONLY */
1266 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1267 target_ulong *base, unsigned int *limit,
1268 unsigned int *flags)
1270 X86CPU *cpu = x86_env_get_cpu(env);
1271 CPUState *cs = CPU(cpu);
1272 SegmentCache *dt;
1273 target_ulong ptr;
1274 uint32_t e1, e2;
1275 int index;
1277 if (selector & 0x4)
1278 dt = &env->ldt;
1279 else
1280 dt = &env->gdt;
1281 index = selector & ~7;
1282 ptr = dt->base + index;
1283 if ((index + 7) > dt->limit
1284 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1285 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1286 return 0;
1288 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1289 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1290 if (e2 & DESC_G_MASK)
1291 *limit = (*limit << 12) | 0xfff;
1292 *flags = e2;
1294 return 1;
1297 #if !defined(CONFIG_USER_ONLY)
1298 void do_cpu_init(X86CPU *cpu)
1300 CPUState *cs = CPU(cpu);
1301 CPUX86State *env = &cpu->env;
1302 CPUX86State *save = g_new(CPUX86State, 1);
1303 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1305 *save = *env;
1307 cpu_reset(cs);
1308 cs->interrupt_request = sipi;
1309 memcpy(&env->start_init_save, &save->start_init_save,
1310 offsetof(CPUX86State, end_init_save) -
1311 offsetof(CPUX86State, start_init_save));
1312 g_free(save);
1314 if (kvm_enabled()) {
1315 kvm_arch_do_init_vcpu(cpu);
1317 apic_init_reset(cpu->apic_state);
1320 void do_cpu_sipi(X86CPU *cpu)
1322 apic_sipi(cpu->apic_state);
1324 #else
1325 void do_cpu_init(X86CPU *cpu)
1328 void do_cpu_sipi(X86CPU *cpu)
1331 #endif
1333 /* Frob eflags into and out of the CPU temporary format. */
1335 void x86_cpu_exec_enter(CPUState *cs)
1337 X86CPU *cpu = X86_CPU(cs);
1338 CPUX86State *env = &cpu->env;
1340 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1341 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1342 CC_OP = CC_OP_EFLAGS;
1343 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1346 void x86_cpu_exec_exit(CPUState *cs)
1348 X86CPU *cpu = X86_CPU(cs);
1349 CPUX86State *env = &cpu->env;
1351 env->eflags = cpu_compute_eflags(env);
1354 #ifndef CONFIG_USER_ONLY
1355 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1357 X86CPU *cpu = X86_CPU(cs);
1358 CPUX86State *env = &cpu->env;
1360 return address_space_ldub(cs->as, addr,
1361 cpu_get_mem_attrs(env),
1362 NULL);
1365 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1367 X86CPU *cpu = X86_CPU(cs);
1368 CPUX86State *env = &cpu->env;
1370 return address_space_lduw(cs->as, addr,
1371 cpu_get_mem_attrs(env),
1372 NULL);
1375 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1377 X86CPU *cpu = X86_CPU(cs);
1378 CPUX86State *env = &cpu->env;
1380 return address_space_ldl(cs->as, addr,
1381 cpu_get_mem_attrs(env),
1382 NULL);
1385 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1387 X86CPU *cpu = X86_CPU(cs);
1388 CPUX86State *env = &cpu->env;
1390 return address_space_ldq(cs->as, addr,
1391 cpu_get_mem_attrs(env),
1392 NULL);
1395 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1397 X86CPU *cpu = X86_CPU(cs);
1398 CPUX86State *env = &cpu->env;
1400 address_space_stb(cs->as, addr, val,
1401 cpu_get_mem_attrs(env),
1402 NULL);
1405 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1407 X86CPU *cpu = X86_CPU(cs);
1408 CPUX86State *env = &cpu->env;
1410 address_space_stl_notdirty(cs->as, addr, val,
1411 cpu_get_mem_attrs(env),
1412 NULL);
1415 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1417 X86CPU *cpu = X86_CPU(cs);
1418 CPUX86State *env = &cpu->env;
1420 address_space_stw(cs->as, addr, val,
1421 cpu_get_mem_attrs(env),
1422 NULL);
1425 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1427 X86CPU *cpu = X86_CPU(cs);
1428 CPUX86State *env = &cpu->env;
1430 address_space_stl(cs->as, addr, val,
1431 cpu_get_mem_attrs(env),
1432 NULL);
1435 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1437 X86CPU *cpu = X86_CPU(cs);
1438 CPUX86State *env = &cpu->env;
1440 address_space_stq(cs->as, addr, val,
1441 cpu_get_mem_attrs(env),
1442 NULL);
1444 #endif