s390x: wrap flic savevm calls into vmstate
[qemu/ar7.git] / target-i386 / helper.c
blob1c250b82452d9fd4c8a2c2e3019dbe9aeaebaf5b
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_i386.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/sysemu.h"
27 #include "monitor/monitor.h"
28 #include "hw/i386/apic_internal.h"
29 #endif
31 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
33 int cpuver = env->cpuid_version;
35 if (family == NULL || model == NULL) {
36 return;
39 *family = (cpuver >> 8) & 0x0f;
40 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
43 /* Broadcast MCA signal for processor version 06H_EH and above */
44 int cpu_x86_support_mca_broadcast(CPUX86State *env)
46 int family = 0;
47 int model = 0;
49 cpu_x86_version(env, &family, &model);
50 if ((family == 6 && model >= 14) || family > 6) {
51 return 1;
54 return 0;
57 /***********************************************************/
58 /* x86 debug */
60 static const char *cc_op_str[CC_OP_NB] = {
61 "DYNAMIC",
62 "EFLAGS",
64 "MULB",
65 "MULW",
66 "MULL",
67 "MULQ",
69 "ADDB",
70 "ADDW",
71 "ADDL",
72 "ADDQ",
74 "ADCB",
75 "ADCW",
76 "ADCL",
77 "ADCQ",
79 "SUBB",
80 "SUBW",
81 "SUBL",
82 "SUBQ",
84 "SBBB",
85 "SBBW",
86 "SBBL",
87 "SBBQ",
89 "LOGICB",
90 "LOGICW",
91 "LOGICL",
92 "LOGICQ",
94 "INCB",
95 "INCW",
96 "INCL",
97 "INCQ",
99 "DECB",
100 "DECW",
101 "DECL",
102 "DECQ",
104 "SHLB",
105 "SHLW",
106 "SHLL",
107 "SHLQ",
109 "SARB",
110 "SARW",
111 "SARL",
112 "SARQ",
114 "BMILGB",
115 "BMILGW",
116 "BMILGL",
117 "BMILGQ",
119 "ADCX",
120 "ADOX",
121 "ADCOX",
123 "CLR",
126 static void
127 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
128 const char *name, struct SegmentCache *sc)
130 #ifdef TARGET_X86_64
131 if (env->hflags & HF_CS64_MASK) {
132 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
133 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
134 } else
135 #endif
137 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
138 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
141 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
142 goto done;
144 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
145 if (sc->flags & DESC_S_MASK) {
146 if (sc->flags & DESC_CS_MASK) {
147 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
148 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
149 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
150 (sc->flags & DESC_R_MASK) ? 'R' : '-');
151 } else {
152 cpu_fprintf(f,
153 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
154 ? "DS " : "DS16");
155 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
156 (sc->flags & DESC_W_MASK) ? 'W' : '-');
158 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
159 } else {
160 static const char *sys_type_name[2][16] = {
161 { /* 32 bit mode */
162 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
163 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
164 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
165 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
167 { /* 64 bit mode */
168 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
169 "Reserved", "Reserved", "Reserved", "Reserved",
170 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
171 "Reserved", "IntGate64", "TrapGate64"
174 cpu_fprintf(f, "%s",
175 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
176 [(sc->flags & DESC_TYPE_MASK)
177 >> DESC_TYPE_SHIFT]);
179 done:
180 cpu_fprintf(f, "\n");
183 #ifndef CONFIG_USER_ONLY
185 /* ARRAY_SIZE check is not required because
186 * DeliveryMode(dm) has a size of 3 bit.
188 static inline const char *dm2str(uint32_t dm)
190 static const char *str[] = {
191 "Fixed",
192 "...",
193 "SMI",
194 "...",
195 "NMI",
196 "INIT",
197 "...",
198 "ExtINT"
200 return str[dm];
203 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
204 const char *name, uint32_t lvt, bool is_timer)
206 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
207 cpu_fprintf(f,
208 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
209 name, lvt,
210 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
211 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
212 lvt & APIC_LVT_MASKED ? "masked" : "",
213 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
214 !is_timer ?
215 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
216 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
217 "tsc-deadline" : "one-shot",
218 dm2str(dm));
219 if (dm != APIC_DM_NMI) {
220 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
221 } else {
222 cpu_fprintf(f, "\n");
226 /* ARRAY_SIZE check is not required because
227 * destination shorthand has a size of 2 bit.
229 static inline const char *shorthand2str(uint32_t shorthand)
231 const char *str[] = {
232 "no-shorthand", "self", "all-self", "all"
234 return str[shorthand];
237 static inline uint8_t divider_conf(uint32_t divide_conf)
239 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
241 return divide_val == 7 ? 1 : 2 << divide_val;
244 static inline void mask2str(char *str, uint32_t val, uint8_t size)
246 while (size--) {
247 *str++ = (val >> size) & 1 ? '1' : '0';
249 *str = 0;
252 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
254 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
255 APICCommonState *s, CPUX86State *env)
257 uint32_t icr = s->icr[0], icr2 = s->icr[1];
258 uint8_t dest_shorthand = \
259 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
260 bool logical_mod = icr & APIC_ICR_DEST_MOD;
261 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
262 uint32_t dest_field;
263 bool x2apic;
265 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
266 icr,
267 logical_mod ? "logical" : "physical",
268 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
269 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
270 shorthand2str(dest_shorthand));
272 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
273 if (dest_shorthand != 0) {
274 cpu_fprintf(f, "\n");
275 return;
277 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
278 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
280 if (!logical_mod) {
281 if (x2apic) {
282 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
283 } else {
284 cpu_fprintf(f, " cpu %u (APIC ID)\n",
285 dest_field & APIC_LOGDEST_XAPIC_ID);
287 return;
290 if (s->dest_mode == 0xf) { /* flat mode */
291 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
292 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
293 } else if (s->dest_mode == 0) { /* cluster mode */
294 if (x2apic) {
295 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
296 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
297 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
298 } else {
299 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
300 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
301 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
306 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
307 const char *name, uint32_t *ireg_tab,
308 uint32_t *tmr_tab)
310 int i, empty = true;
312 cpu_fprintf(f, "%s\t ", name);
313 for (i = 0; i < 256; i++) {
314 if (apic_get_bit(ireg_tab, i)) {
315 cpu_fprintf(f, "%u%s ", i,
316 apic_get_bit(tmr_tab, i) ? "(level)" : "");
317 empty = false;
320 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
323 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
324 fprintf_function cpu_fprintf, int flags)
326 X86CPU *cpu = X86_CPU(cs);
327 APICCommonState *s = APIC_COMMON(cpu->apic_state);
328 uint32_t *lvt = s->lvt;
330 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
331 CPU(cpu)->cpu_index);
332 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
333 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
334 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
335 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
336 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
337 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
339 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
340 s->divide_conf & APIC_DCR_MASK,
341 divider_conf(s->divide_conf),
342 s->initial_count);
344 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
345 s->spurious_vec,
346 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
347 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
348 s->spurious_vec & APIC_VECTOR_MASK);
350 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
352 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
354 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
355 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
357 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
358 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
359 if (s->dest_mode == 0) {
360 cpu_fprintf(f, "(cluster %u: id %u)",
361 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
362 s->log_dest & APIC_LOGDEST_XAPIC_ID);
364 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
366 #else
367 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
368 fprintf_function cpu_fprintf, int flags)
371 #endif /* !CONFIG_USER_ONLY */
373 #define DUMP_CODE_BYTES_TOTAL 50
374 #define DUMP_CODE_BYTES_BACKWARD 20
376 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
377 int flags)
379 X86CPU *cpu = X86_CPU(cs);
380 CPUX86State *env = &cpu->env;
381 int eflags, i, nb;
382 char cc_op_name[32];
383 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
385 eflags = cpu_compute_eflags(env);
386 #ifdef TARGET_X86_64
387 if (env->hflags & HF_CS64_MASK) {
388 cpu_fprintf(f,
389 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
390 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
391 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
392 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
393 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
394 env->regs[R_EAX],
395 env->regs[R_EBX],
396 env->regs[R_ECX],
397 env->regs[R_EDX],
398 env->regs[R_ESI],
399 env->regs[R_EDI],
400 env->regs[R_EBP],
401 env->regs[R_ESP],
402 env->regs[8],
403 env->regs[9],
404 env->regs[10],
405 env->regs[11],
406 env->regs[12],
407 env->regs[13],
408 env->regs[14],
409 env->regs[15],
410 env->eip, eflags,
411 eflags & DF_MASK ? 'D' : '-',
412 eflags & CC_O ? 'O' : '-',
413 eflags & CC_S ? 'S' : '-',
414 eflags & CC_Z ? 'Z' : '-',
415 eflags & CC_A ? 'A' : '-',
416 eflags & CC_P ? 'P' : '-',
417 eflags & CC_C ? 'C' : '-',
418 env->hflags & HF_CPL_MASK,
419 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
420 (env->a20_mask >> 20) & 1,
421 (env->hflags >> HF_SMM_SHIFT) & 1,
422 cs->halted);
423 } else
424 #endif
426 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
427 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
428 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
429 (uint32_t)env->regs[R_EAX],
430 (uint32_t)env->regs[R_EBX],
431 (uint32_t)env->regs[R_ECX],
432 (uint32_t)env->regs[R_EDX],
433 (uint32_t)env->regs[R_ESI],
434 (uint32_t)env->regs[R_EDI],
435 (uint32_t)env->regs[R_EBP],
436 (uint32_t)env->regs[R_ESP],
437 (uint32_t)env->eip, eflags,
438 eflags & DF_MASK ? 'D' : '-',
439 eflags & CC_O ? 'O' : '-',
440 eflags & CC_S ? 'S' : '-',
441 eflags & CC_Z ? 'Z' : '-',
442 eflags & CC_A ? 'A' : '-',
443 eflags & CC_P ? 'P' : '-',
444 eflags & CC_C ? 'C' : '-',
445 env->hflags & HF_CPL_MASK,
446 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
447 (env->a20_mask >> 20) & 1,
448 (env->hflags >> HF_SMM_SHIFT) & 1,
449 cs->halted);
452 for(i = 0; i < 6; i++) {
453 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
454 &env->segs[i]);
456 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
457 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
459 #ifdef TARGET_X86_64
460 if (env->hflags & HF_LMA_MASK) {
461 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
462 env->gdt.base, env->gdt.limit);
463 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
464 env->idt.base, env->idt.limit);
465 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
466 (uint32_t)env->cr[0],
467 env->cr[2],
468 env->cr[3],
469 (uint32_t)env->cr[4]);
470 for(i = 0; i < 4; i++)
471 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
472 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
473 env->dr[6], env->dr[7]);
474 } else
475 #endif
477 cpu_fprintf(f, "GDT= %08x %08x\n",
478 (uint32_t)env->gdt.base, env->gdt.limit);
479 cpu_fprintf(f, "IDT= %08x %08x\n",
480 (uint32_t)env->idt.base, env->idt.limit);
481 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
482 (uint32_t)env->cr[0],
483 (uint32_t)env->cr[2],
484 (uint32_t)env->cr[3],
485 (uint32_t)env->cr[4]);
486 for(i = 0; i < 4; i++) {
487 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
489 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
490 env->dr[6], env->dr[7]);
492 if (flags & CPU_DUMP_CCOP) {
493 if ((unsigned)env->cc_op < CC_OP_NB)
494 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
495 else
496 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
497 #ifdef TARGET_X86_64
498 if (env->hflags & HF_CS64_MASK) {
499 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
500 env->cc_src, env->cc_dst,
501 cc_op_name);
502 } else
503 #endif
505 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
506 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
507 cc_op_name);
510 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
511 if (flags & CPU_DUMP_FPU) {
512 int fptag;
513 fptag = 0;
514 for(i = 0; i < 8; i++) {
515 fptag |= ((!env->fptags[i]) << i);
517 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
518 env->fpuc,
519 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
520 env->fpstt,
521 fptag,
522 env->mxcsr);
523 for(i=0;i<8;i++) {
524 CPU_LDoubleU u;
525 u.d = env->fpregs[i].d;
526 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
527 i, u.l.lower, u.l.upper);
528 if ((i & 1) == 1)
529 cpu_fprintf(f, "\n");
530 else
531 cpu_fprintf(f, " ");
533 if (env->hflags & HF_CS64_MASK)
534 nb = 16;
535 else
536 nb = 8;
537 for(i=0;i<nb;i++) {
538 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
540 env->xmm_regs[i].ZMM_L(3),
541 env->xmm_regs[i].ZMM_L(2),
542 env->xmm_regs[i].ZMM_L(1),
543 env->xmm_regs[i].ZMM_L(0));
544 if ((i & 1) == 1)
545 cpu_fprintf(f, "\n");
546 else
547 cpu_fprintf(f, " ");
550 if (flags & CPU_DUMP_CODE) {
551 target_ulong base = env->segs[R_CS].base + env->eip;
552 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
553 uint8_t code;
554 char codestr[3];
556 cpu_fprintf(f, "Code=");
557 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
558 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
559 snprintf(codestr, sizeof(codestr), "%02x", code);
560 } else {
561 snprintf(codestr, sizeof(codestr), "??");
563 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
564 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
566 cpu_fprintf(f, "\n");
570 /***********************************************************/
571 /* x86 mmu */
572 /* XXX: add PGE support */
574 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
576 CPUX86State *env = &cpu->env;
578 a20_state = (a20_state != 0);
579 if (a20_state != ((env->a20_mask >> 20) & 1)) {
580 CPUState *cs = CPU(cpu);
582 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
583 /* if the cpu is currently executing code, we must unlink it and
584 all the potentially executing TB */
585 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
587 /* when a20 is changed, all the MMU mappings are invalid, so
588 we must flush everything */
589 tlb_flush(cs, 1);
590 env->a20_mask = ~(1 << 20) | (a20_state << 20);
594 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
596 X86CPU *cpu = x86_env_get_cpu(env);
597 int pe_state;
599 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
600 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
601 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
602 tlb_flush(CPU(cpu), 1);
605 #ifdef TARGET_X86_64
606 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
607 (env->efer & MSR_EFER_LME)) {
608 /* enter in long mode */
609 /* XXX: generate an exception */
610 if (!(env->cr[4] & CR4_PAE_MASK))
611 return;
612 env->efer |= MSR_EFER_LMA;
613 env->hflags |= HF_LMA_MASK;
614 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
615 (env->efer & MSR_EFER_LMA)) {
616 /* exit long mode */
617 env->efer &= ~MSR_EFER_LMA;
618 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
619 env->eip &= 0xffffffff;
621 #endif
622 env->cr[0] = new_cr0 | CR0_ET_MASK;
624 /* update PE flag in hidden flags */
625 pe_state = (env->cr[0] & CR0_PE_MASK);
626 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
627 /* ensure that ADDSEG is always set in real mode */
628 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
629 /* update FPU flags */
630 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
631 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
634 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
635 the PDPT */
636 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
638 X86CPU *cpu = x86_env_get_cpu(env);
640 env->cr[3] = new_cr3;
641 if (env->cr[0] & CR0_PG_MASK) {
642 qemu_log_mask(CPU_LOG_MMU,
643 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
644 tlb_flush(CPU(cpu), 0);
648 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
650 X86CPU *cpu = x86_env_get_cpu(env);
651 uint32_t hflags;
653 #if defined(DEBUG_MMU)
654 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
655 #endif
656 if ((new_cr4 ^ env->cr[4]) &
657 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
658 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
659 tlb_flush(CPU(cpu), 1);
662 /* Clear bits we're going to recompute. */
663 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
665 /* SSE handling */
666 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
667 new_cr4 &= ~CR4_OSFXSR_MASK;
669 if (new_cr4 & CR4_OSFXSR_MASK) {
670 hflags |= HF_OSFXSR_MASK;
673 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
674 new_cr4 &= ~CR4_SMAP_MASK;
676 if (new_cr4 & CR4_SMAP_MASK) {
677 hflags |= HF_SMAP_MASK;
680 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
681 new_cr4 &= ~CR4_PKE_MASK;
684 env->cr[4] = new_cr4;
685 env->hflags = hflags;
687 cpu_sync_bndcs_hflags(env);
690 #if defined(CONFIG_USER_ONLY)
692 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
693 int is_write, int mmu_idx)
695 X86CPU *cpu = X86_CPU(cs);
696 CPUX86State *env = &cpu->env;
698 /* user mode only emulation */
699 is_write &= 1;
700 env->cr[2] = addr;
701 env->error_code = (is_write << PG_ERROR_W_BIT);
702 env->error_code |= PG_ERROR_U_MASK;
703 cs->exception_index = EXCP0E_PAGE;
704 env->exception_is_int = 0;
705 env->exception_next_eip = -1;
706 return 1;
709 #else
711 /* return value:
712 * -1 = cannot handle fault
713 * 0 = nothing more to do
714 * 1 = generate PF fault
716 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
717 int is_write1, int mmu_idx)
719 X86CPU *cpu = X86_CPU(cs);
720 CPUX86State *env = &cpu->env;
721 uint64_t ptep, pte;
722 target_ulong pde_addr, pte_addr;
723 int error_code = 0;
724 int is_dirty, prot, page_size, is_write, is_user;
725 hwaddr paddr;
726 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
727 uint32_t page_offset;
728 target_ulong vaddr;
730 is_user = mmu_idx == MMU_USER_IDX;
731 #if defined(DEBUG_MMU)
732 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
733 addr, is_write1, is_user, env->eip);
734 #endif
735 is_write = is_write1 & 1;
737 if (!(env->cr[0] & CR0_PG_MASK)) {
738 pte = addr;
739 #ifdef TARGET_X86_64
740 if (!(env->hflags & HF_LMA_MASK)) {
741 /* Without long mode we can only address 32bits in real mode */
742 pte = (uint32_t)pte;
744 #endif
745 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
746 page_size = 4096;
747 goto do_mapping;
750 if (!(env->efer & MSR_EFER_NXE)) {
751 rsvd_mask |= PG_NX_MASK;
754 if (env->cr[4] & CR4_PAE_MASK) {
755 uint64_t pde, pdpe;
756 target_ulong pdpe_addr;
758 #ifdef TARGET_X86_64
759 if (env->hflags & HF_LMA_MASK) {
760 uint64_t pml4e_addr, pml4e;
761 int32_t sext;
763 /* test virtual address sign extension */
764 sext = (int64_t)addr >> 47;
765 if (sext != 0 && sext != -1) {
766 env->error_code = 0;
767 cs->exception_index = EXCP0D_GPF;
768 return 1;
771 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
772 env->a20_mask;
773 pml4e = x86_ldq_phys(cs, pml4e_addr);
774 if (!(pml4e & PG_PRESENT_MASK)) {
775 goto do_fault;
777 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
778 goto do_fault_rsvd;
780 if (!(pml4e & PG_ACCESSED_MASK)) {
781 pml4e |= PG_ACCESSED_MASK;
782 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
784 ptep = pml4e ^ PG_NX_MASK;
785 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
786 env->a20_mask;
787 pdpe = x86_ldq_phys(cs, pdpe_addr);
788 if (!(pdpe & PG_PRESENT_MASK)) {
789 goto do_fault;
791 if (pdpe & rsvd_mask) {
792 goto do_fault_rsvd;
794 ptep &= pdpe ^ PG_NX_MASK;
795 if (!(pdpe & PG_ACCESSED_MASK)) {
796 pdpe |= PG_ACCESSED_MASK;
797 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
799 if (pdpe & PG_PSE_MASK) {
800 /* 1 GB page */
801 page_size = 1024 * 1024 * 1024;
802 pte_addr = pdpe_addr;
803 pte = pdpe;
804 goto do_check_protect;
806 } else
807 #endif
809 /* XXX: load them when cr3 is loaded ? */
810 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
811 env->a20_mask;
812 pdpe = x86_ldq_phys(cs, pdpe_addr);
813 if (!(pdpe & PG_PRESENT_MASK)) {
814 goto do_fault;
816 rsvd_mask |= PG_HI_USER_MASK;
817 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
818 goto do_fault_rsvd;
820 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
823 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
824 env->a20_mask;
825 pde = x86_ldq_phys(cs, pde_addr);
826 if (!(pde & PG_PRESENT_MASK)) {
827 goto do_fault;
829 if (pde & rsvd_mask) {
830 goto do_fault_rsvd;
832 ptep &= pde ^ PG_NX_MASK;
833 if (pde & PG_PSE_MASK) {
834 /* 2 MB page */
835 page_size = 2048 * 1024;
836 pte_addr = pde_addr;
837 pte = pde;
838 goto do_check_protect;
840 /* 4 KB page */
841 if (!(pde & PG_ACCESSED_MASK)) {
842 pde |= PG_ACCESSED_MASK;
843 x86_stl_phys_notdirty(cs, pde_addr, pde);
845 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
846 env->a20_mask;
847 pte = x86_ldq_phys(cs, pte_addr);
848 if (!(pte & PG_PRESENT_MASK)) {
849 goto do_fault;
851 if (pte & rsvd_mask) {
852 goto do_fault_rsvd;
854 /* combine pde and pte nx, user and rw protections */
855 ptep &= pte ^ PG_NX_MASK;
856 page_size = 4096;
857 } else {
858 uint32_t pde;
860 /* page directory entry */
861 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
862 env->a20_mask;
863 pde = x86_ldl_phys(cs, pde_addr);
864 if (!(pde & PG_PRESENT_MASK)) {
865 goto do_fault;
867 ptep = pde | PG_NX_MASK;
869 /* if PSE bit is set, then we use a 4MB page */
870 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
871 page_size = 4096 * 1024;
872 pte_addr = pde_addr;
874 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
875 * Leave bits 20-13 in place for setting accessed/dirty bits below.
877 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
878 rsvd_mask = 0x200000;
879 goto do_check_protect_pse36;
882 if (!(pde & PG_ACCESSED_MASK)) {
883 pde |= PG_ACCESSED_MASK;
884 x86_stl_phys_notdirty(cs, pde_addr, pde);
887 /* page directory entry */
888 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
889 env->a20_mask;
890 pte = x86_ldl_phys(cs, pte_addr);
891 if (!(pte & PG_PRESENT_MASK)) {
892 goto do_fault;
894 /* combine pde and pte user and rw protections */
895 ptep &= pte | PG_NX_MASK;
896 page_size = 4096;
897 rsvd_mask = 0;
900 do_check_protect:
901 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
902 do_check_protect_pse36:
903 if (pte & rsvd_mask) {
904 goto do_fault_rsvd;
906 ptep ^= PG_NX_MASK;
908 /* can the page can be put in the TLB? prot will tell us */
909 if (is_user && !(ptep & PG_USER_MASK)) {
910 goto do_fault_protect;
913 prot = 0;
914 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
915 prot |= PAGE_READ;
916 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
917 prot |= PAGE_WRITE;
920 if (!(ptep & PG_NX_MASK) &&
921 (mmu_idx == MMU_USER_IDX ||
922 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
923 prot |= PAGE_EXEC;
925 if ((env->cr[4] & CR4_PKE_MASK) && (env->hflags & HF_LMA_MASK) &&
926 (ptep & PG_USER_MASK) && env->pkru) {
927 uint32_t pk = (pte & PG_PKRU_MASK) >> PG_PKRU_BIT;
928 uint32_t pkru_ad = (env->pkru >> pk * 2) & 1;
929 uint32_t pkru_wd = (env->pkru >> pk * 2) & 2;
930 uint32_t pkru_prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
932 if (pkru_ad) {
933 pkru_prot &= ~(PAGE_READ | PAGE_WRITE);
934 } else if (pkru_wd && (is_user || env->cr[0] & CR0_WP_MASK)) {
935 pkru_prot &= ~PAGE_WRITE;
938 prot &= pkru_prot;
939 if ((pkru_prot & (1 << is_write1)) == 0) {
940 assert(is_write1 != 2);
941 error_code |= PG_ERROR_PK_MASK;
942 goto do_fault_protect;
946 if ((prot & (1 << is_write1)) == 0) {
947 goto do_fault_protect;
950 /* yes, it can! */
951 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
952 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
953 pte |= PG_ACCESSED_MASK;
954 if (is_dirty) {
955 pte |= PG_DIRTY_MASK;
957 x86_stl_phys_notdirty(cs, pte_addr, pte);
960 if (!(pte & PG_DIRTY_MASK)) {
961 /* only set write access if already dirty... otherwise wait
962 for dirty access */
963 assert(!is_write);
964 prot &= ~PAGE_WRITE;
967 do_mapping:
968 pte = pte & env->a20_mask;
970 /* align to page_size */
971 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
973 /* Even if 4MB pages, we map only one 4KB page in the cache to
974 avoid filling it too fast */
975 vaddr = addr & TARGET_PAGE_MASK;
976 page_offset = vaddr & (page_size - 1);
977 paddr = pte + page_offset;
979 assert(prot & (1 << is_write1));
980 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
981 prot, mmu_idx, page_size);
982 return 0;
983 do_fault_rsvd:
984 error_code |= PG_ERROR_RSVD_MASK;
985 do_fault_protect:
986 error_code |= PG_ERROR_P_MASK;
987 do_fault:
988 error_code |= (is_write << PG_ERROR_W_BIT);
989 if (is_user)
990 error_code |= PG_ERROR_U_MASK;
991 if (is_write1 == 2 &&
992 (((env->efer & MSR_EFER_NXE) &&
993 (env->cr[4] & CR4_PAE_MASK)) ||
994 (env->cr[4] & CR4_SMEP_MASK)))
995 error_code |= PG_ERROR_I_D_MASK;
996 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
997 /* cr2 is not modified in case of exceptions */
998 x86_stq_phys(cs,
999 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1000 addr);
1001 } else {
1002 env->cr[2] = addr;
1004 env->error_code = error_code;
1005 cs->exception_index = EXCP0E_PAGE;
1006 return 1;
1009 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
1011 X86CPU *cpu = X86_CPU(cs);
1012 CPUX86State *env = &cpu->env;
1013 target_ulong pde_addr, pte_addr;
1014 uint64_t pte;
1015 uint32_t page_offset;
1016 int page_size;
1018 if (!(env->cr[0] & CR0_PG_MASK)) {
1019 pte = addr & env->a20_mask;
1020 page_size = 4096;
1021 } else if (env->cr[4] & CR4_PAE_MASK) {
1022 target_ulong pdpe_addr;
1023 uint64_t pde, pdpe;
1025 #ifdef TARGET_X86_64
1026 if (env->hflags & HF_LMA_MASK) {
1027 uint64_t pml4e_addr, pml4e;
1028 int32_t sext;
1030 /* test virtual address sign extension */
1031 sext = (int64_t)addr >> 47;
1032 if (sext != 0 && sext != -1) {
1033 return -1;
1035 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1036 env->a20_mask;
1037 pml4e = x86_ldq_phys(cs, pml4e_addr);
1038 if (!(pml4e & PG_PRESENT_MASK)) {
1039 return -1;
1041 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1042 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
1043 pdpe = x86_ldq_phys(cs, pdpe_addr);
1044 if (!(pdpe & PG_PRESENT_MASK)) {
1045 return -1;
1047 if (pdpe & PG_PSE_MASK) {
1048 page_size = 1024 * 1024 * 1024;
1049 pte = pdpe;
1050 goto out;
1053 } else
1054 #endif
1056 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1057 env->a20_mask;
1058 pdpe = x86_ldq_phys(cs, pdpe_addr);
1059 if (!(pdpe & PG_PRESENT_MASK))
1060 return -1;
1063 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1064 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
1065 pde = x86_ldq_phys(cs, pde_addr);
1066 if (!(pde & PG_PRESENT_MASK)) {
1067 return -1;
1069 if (pde & PG_PSE_MASK) {
1070 /* 2 MB page */
1071 page_size = 2048 * 1024;
1072 pte = pde;
1073 } else {
1074 /* 4 KB page */
1075 pte_addr = ((pde & PG_ADDRESS_MASK) +
1076 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
1077 page_size = 4096;
1078 pte = x86_ldq_phys(cs, pte_addr);
1080 if (!(pte & PG_PRESENT_MASK)) {
1081 return -1;
1083 } else {
1084 uint32_t pde;
1086 /* page directory entry */
1087 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1088 pde = x86_ldl_phys(cs, pde_addr);
1089 if (!(pde & PG_PRESENT_MASK))
1090 return -1;
1091 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1092 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
1093 page_size = 4096 * 1024;
1094 } else {
1095 /* page directory entry */
1096 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1097 pte = x86_ldl_phys(cs, pte_addr);
1098 if (!(pte & PG_PRESENT_MASK)) {
1099 return -1;
1101 page_size = 4096;
1103 pte = pte & env->a20_mask;
1106 #ifdef TARGET_X86_64
1107 out:
1108 #endif
1109 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1110 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1111 return pte | page_offset;
1114 typedef struct MCEInjectionParams {
1115 Monitor *mon;
1116 X86CPU *cpu;
1117 int bank;
1118 uint64_t status;
1119 uint64_t mcg_status;
1120 uint64_t addr;
1121 uint64_t misc;
1122 int flags;
1123 } MCEInjectionParams;
1125 static void do_inject_x86_mce(void *data)
1127 MCEInjectionParams *params = data;
1128 CPUX86State *cenv = &params->cpu->env;
1129 CPUState *cpu = CPU(params->cpu);
1130 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1132 cpu_synchronize_state(cpu);
1135 * If there is an MCE exception being processed, ignore this SRAO MCE
1136 * unless unconditional injection was requested.
1138 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1139 && !(params->status & MCI_STATUS_AR)
1140 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1141 return;
1144 if (params->status & MCI_STATUS_UC) {
1146 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1147 * reporting is disabled
1149 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1150 monitor_printf(params->mon,
1151 "CPU %d: Uncorrected error reporting disabled\n",
1152 cpu->cpu_index);
1153 return;
1157 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1158 * reporting is disabled for the bank
1160 if (banks[0] != ~(uint64_t)0) {
1161 monitor_printf(params->mon,
1162 "CPU %d: Uncorrected error reporting disabled for"
1163 " bank %d\n",
1164 cpu->cpu_index, params->bank);
1165 return;
1168 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1169 !(cenv->cr[4] & CR4_MCE_MASK)) {
1170 monitor_printf(params->mon,
1171 "CPU %d: Previous MCE still in progress, raising"
1172 " triple fault\n",
1173 cpu->cpu_index);
1174 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1175 qemu_system_reset_request();
1176 return;
1178 if (banks[1] & MCI_STATUS_VAL) {
1179 params->status |= MCI_STATUS_OVER;
1181 banks[2] = params->addr;
1182 banks[3] = params->misc;
1183 cenv->mcg_status = params->mcg_status;
1184 banks[1] = params->status;
1185 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1186 } else if (!(banks[1] & MCI_STATUS_VAL)
1187 || !(banks[1] & MCI_STATUS_UC)) {
1188 if (banks[1] & MCI_STATUS_VAL) {
1189 params->status |= MCI_STATUS_OVER;
1191 banks[2] = params->addr;
1192 banks[3] = params->misc;
1193 banks[1] = params->status;
1194 } else {
1195 banks[1] |= MCI_STATUS_OVER;
1199 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1200 uint64_t status, uint64_t mcg_status, uint64_t addr,
1201 uint64_t misc, int flags)
1203 CPUState *cs = CPU(cpu);
1204 CPUX86State *cenv = &cpu->env;
1205 MCEInjectionParams params = {
1206 .mon = mon,
1207 .cpu = cpu,
1208 .bank = bank,
1209 .status = status,
1210 .mcg_status = mcg_status,
1211 .addr = addr,
1212 .misc = misc,
1213 .flags = flags,
1215 unsigned bank_num = cenv->mcg_cap & 0xff;
1217 if (!cenv->mcg_cap) {
1218 monitor_printf(mon, "MCE injection not supported\n");
1219 return;
1221 if (bank >= bank_num) {
1222 monitor_printf(mon, "Invalid MCE bank number\n");
1223 return;
1225 if (!(status & MCI_STATUS_VAL)) {
1226 monitor_printf(mon, "Invalid MCE status code\n");
1227 return;
1229 if ((flags & MCE_INJECT_BROADCAST)
1230 && !cpu_x86_support_mca_broadcast(cenv)) {
1231 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1232 return;
1235 run_on_cpu(cs, do_inject_x86_mce, &params);
1236 if (flags & MCE_INJECT_BROADCAST) {
1237 CPUState *other_cs;
1239 params.bank = 1;
1240 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1241 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1242 params.addr = 0;
1243 params.misc = 0;
1244 CPU_FOREACH(other_cs) {
1245 if (other_cs == cs) {
1246 continue;
1248 params.cpu = X86_CPU(other_cs);
1249 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1254 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1256 X86CPU *cpu = x86_env_get_cpu(env);
1257 CPUState *cs = CPU(cpu);
1259 if (kvm_enabled()) {
1260 env->tpr_access_type = access;
1262 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1263 } else {
1264 cpu_restore_state(cs, cs->mem_io_pc);
1266 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1269 #endif /* !CONFIG_USER_ONLY */
1271 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1272 target_ulong *base, unsigned int *limit,
1273 unsigned int *flags)
1275 X86CPU *cpu = x86_env_get_cpu(env);
1276 CPUState *cs = CPU(cpu);
1277 SegmentCache *dt;
1278 target_ulong ptr;
1279 uint32_t e1, e2;
1280 int index;
1282 if (selector & 0x4)
1283 dt = &env->ldt;
1284 else
1285 dt = &env->gdt;
1286 index = selector & ~7;
1287 ptr = dt->base + index;
1288 if ((index + 7) > dt->limit
1289 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1290 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1291 return 0;
1293 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1294 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1295 if (e2 & DESC_G_MASK)
1296 *limit = (*limit << 12) | 0xfff;
1297 *flags = e2;
1299 return 1;
1302 #if !defined(CONFIG_USER_ONLY)
1303 void do_cpu_init(X86CPU *cpu)
1305 CPUState *cs = CPU(cpu);
1306 CPUX86State *env = &cpu->env;
1307 CPUX86State *save = g_new(CPUX86State, 1);
1308 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1310 *save = *env;
1312 cpu_reset(cs);
1313 cs->interrupt_request = sipi;
1314 memcpy(&env->start_init_save, &save->start_init_save,
1315 offsetof(CPUX86State, end_init_save) -
1316 offsetof(CPUX86State, start_init_save));
1317 g_free(save);
1319 if (kvm_enabled()) {
1320 kvm_arch_do_init_vcpu(cpu);
1322 apic_init_reset(cpu->apic_state);
1325 void do_cpu_sipi(X86CPU *cpu)
1327 apic_sipi(cpu->apic_state);
1329 #else
1330 void do_cpu_init(X86CPU *cpu)
1333 void do_cpu_sipi(X86CPU *cpu)
1336 #endif
1338 /* Frob eflags into and out of the CPU temporary format. */
1340 void x86_cpu_exec_enter(CPUState *cs)
1342 X86CPU *cpu = X86_CPU(cs);
1343 CPUX86State *env = &cpu->env;
1345 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1346 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1347 CC_OP = CC_OP_EFLAGS;
1348 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1351 void x86_cpu_exec_exit(CPUState *cs)
1353 X86CPU *cpu = X86_CPU(cs);
1354 CPUX86State *env = &cpu->env;
1356 env->eflags = cpu_compute_eflags(env);
1359 #ifndef CONFIG_USER_ONLY
1360 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1362 X86CPU *cpu = X86_CPU(cs);
1363 CPUX86State *env = &cpu->env;
1365 return address_space_ldub(cs->as, addr,
1366 cpu_get_mem_attrs(env),
1367 NULL);
1370 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1372 X86CPU *cpu = X86_CPU(cs);
1373 CPUX86State *env = &cpu->env;
1375 return address_space_lduw(cs->as, addr,
1376 cpu_get_mem_attrs(env),
1377 NULL);
1380 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1382 X86CPU *cpu = X86_CPU(cs);
1383 CPUX86State *env = &cpu->env;
1385 return address_space_ldl(cs->as, addr,
1386 cpu_get_mem_attrs(env),
1387 NULL);
1390 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1392 X86CPU *cpu = X86_CPU(cs);
1393 CPUX86State *env = &cpu->env;
1395 return address_space_ldq(cs->as, addr,
1396 cpu_get_mem_attrs(env),
1397 NULL);
1400 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1402 X86CPU *cpu = X86_CPU(cs);
1403 CPUX86State *env = &cpu->env;
1405 address_space_stb(cs->as, addr, val,
1406 cpu_get_mem_attrs(env),
1407 NULL);
1410 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1412 X86CPU *cpu = X86_CPU(cs);
1413 CPUX86State *env = &cpu->env;
1415 address_space_stl_notdirty(cs->as, addr, val,
1416 cpu_get_mem_attrs(env),
1417 NULL);
1420 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1422 X86CPU *cpu = X86_CPU(cs);
1423 CPUX86State *env = &cpu->env;
1425 address_space_stw(cs->as, addr, val,
1426 cpu_get_mem_attrs(env),
1427 NULL);
1430 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1432 X86CPU *cpu = X86_CPU(cs);
1433 CPUX86State *env = &cpu->env;
1435 address_space_stl(cs->as, addr, val,
1436 cpu_get_mem_attrs(env),
1437 NULL);
1440 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1442 X86CPU *cpu = X86_CPU(cs);
1443 CPUX86State *env = &cpu->env;
1445 address_space_stq(cs->as, addr, val,
1446 cpu_get_mem_attrs(env),
1447 NULL);
1449 #endif