qemu-char: add logfile facility to all chardev backends
[qemu/ar7.git] / target-i386 / helper.c
blob6b10019e705bd0b0cc2757ab55e42fd48ebbee87
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #include "kvm_i386.h"
23 #ifndef CONFIG_USER_ONLY
24 #include "sysemu/sysemu.h"
25 #include "monitor/monitor.h"
26 #include "hw/i386/apic_internal.h"
27 #endif
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31 int cpuver = env->cpuid_version;
33 if (family == NULL || model == NULL) {
34 return;
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
44 int family = 0;
45 int model = 0;
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
52 return 0;
55 /***********************************************************/
56 /* x86 debug */
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
117 "ADCX",
118 "ADOX",
119 "ADCOX",
121 "CLR",
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
177 done:
178 cpu_fprintf(f, "\n");
181 #ifndef CONFIG_USER_ONLY
183 /* ARRAY_SIZE check is not required because
184 * DeliveryMode(dm) has a size of 3 bit.
186 static inline const char *dm2str(uint32_t dm)
188 static const char *str[] = {
189 "Fixed",
190 "...",
191 "SMI",
192 "...",
193 "NMI",
194 "INIT",
195 "...",
196 "ExtINT"
198 return str[dm];
201 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
202 const char *name, uint32_t lvt, bool is_timer)
204 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
205 cpu_fprintf(f,
206 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
207 name, lvt,
208 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
209 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
210 lvt & APIC_LVT_MASKED ? "masked" : "",
211 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
212 !is_timer ?
213 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
214 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
215 "tsc-deadline" : "one-shot",
216 dm2str(dm));
217 if (dm != APIC_DM_NMI) {
218 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
219 } else {
220 cpu_fprintf(f, "\n");
224 /* ARRAY_SIZE check is not required because
225 * destination shorthand has a size of 2 bit.
227 static inline const char *shorthand2str(uint32_t shorthand)
229 const char *str[] = {
230 "no-shorthand", "self", "all-self", "all"
232 return str[shorthand];
235 static inline uint8_t divider_conf(uint32_t divide_conf)
237 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
239 return divide_val == 7 ? 1 : 2 << divide_val;
242 static inline void mask2str(char *str, uint32_t val, uint8_t size)
244 while (size--) {
245 *str++ = (val >> size) & 1 ? '1' : '0';
247 *str = 0;
250 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
252 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
253 APICCommonState *s, CPUX86State *env)
255 uint32_t icr = s->icr[0], icr2 = s->icr[1];
256 uint8_t dest_shorthand = \
257 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
258 bool logical_mod = icr & APIC_ICR_DEST_MOD;
259 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
260 uint32_t dest_field;
261 bool x2apic;
263 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
264 icr,
265 logical_mod ? "logical" : "physical",
266 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
267 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
268 shorthand2str(dest_shorthand));
270 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
271 if (dest_shorthand != 0) {
272 cpu_fprintf(f, "\n");
273 return;
275 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
276 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
278 if (!logical_mod) {
279 if (x2apic) {
280 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
281 } else {
282 cpu_fprintf(f, " cpu %u (APIC ID)\n",
283 dest_field & APIC_LOGDEST_XAPIC_ID);
285 return;
288 if (s->dest_mode == 0xf) { /* flat mode */
289 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
290 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
291 } else if (s->dest_mode == 0) { /* cluster mode */
292 if (x2apic) {
293 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
294 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
295 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
296 } else {
297 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
298 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
299 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
304 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
305 const char *name, uint32_t *ireg_tab,
306 uint32_t *tmr_tab)
308 int i, empty = true;
310 cpu_fprintf(f, "%s\t ", name);
311 for (i = 0; i < 256; i++) {
312 if (apic_get_bit(ireg_tab, i)) {
313 cpu_fprintf(f, "%u%s ", i,
314 apic_get_bit(tmr_tab, i) ? "(level)" : "");
315 empty = false;
318 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
321 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
322 fprintf_function cpu_fprintf, int flags)
324 X86CPU *cpu = X86_CPU(cs);
325 APICCommonState *s = APIC_COMMON(cpu->apic_state);
326 uint32_t *lvt = s->lvt;
328 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
329 CPU(cpu)->cpu_index);
330 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
331 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
332 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
333 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
334 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
335 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
337 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
338 s->divide_conf & APIC_DCR_MASK,
339 divider_conf(s->divide_conf),
340 s->initial_count);
342 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
343 s->spurious_vec,
344 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
345 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
346 s->spurious_vec & APIC_VECTOR_MASK);
348 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
350 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
352 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
353 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
355 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
356 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
357 if (s->dest_mode == 0) {
358 cpu_fprintf(f, "(cluster %u: id %u)",
359 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
360 s->log_dest & APIC_LOGDEST_XAPIC_ID);
362 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
364 #else
365 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
366 fprintf_function cpu_fprintf, int flags)
369 #endif /* !CONFIG_USER_ONLY */
371 #define DUMP_CODE_BYTES_TOTAL 50
372 #define DUMP_CODE_BYTES_BACKWARD 20
374 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
375 int flags)
377 X86CPU *cpu = X86_CPU(cs);
378 CPUX86State *env = &cpu->env;
379 int eflags, i, nb;
380 char cc_op_name[32];
381 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
383 eflags = cpu_compute_eflags(env);
384 #ifdef TARGET_X86_64
385 if (env->hflags & HF_CS64_MASK) {
386 cpu_fprintf(f,
387 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
388 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
389 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
390 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
391 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
392 env->regs[R_EAX],
393 env->regs[R_EBX],
394 env->regs[R_ECX],
395 env->regs[R_EDX],
396 env->regs[R_ESI],
397 env->regs[R_EDI],
398 env->regs[R_EBP],
399 env->regs[R_ESP],
400 env->regs[8],
401 env->regs[9],
402 env->regs[10],
403 env->regs[11],
404 env->regs[12],
405 env->regs[13],
406 env->regs[14],
407 env->regs[15],
408 env->eip, eflags,
409 eflags & DF_MASK ? 'D' : '-',
410 eflags & CC_O ? 'O' : '-',
411 eflags & CC_S ? 'S' : '-',
412 eflags & CC_Z ? 'Z' : '-',
413 eflags & CC_A ? 'A' : '-',
414 eflags & CC_P ? 'P' : '-',
415 eflags & CC_C ? 'C' : '-',
416 env->hflags & HF_CPL_MASK,
417 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
418 (env->a20_mask >> 20) & 1,
419 (env->hflags >> HF_SMM_SHIFT) & 1,
420 cs->halted);
421 } else
422 #endif
424 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
425 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
426 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
427 (uint32_t)env->regs[R_EAX],
428 (uint32_t)env->regs[R_EBX],
429 (uint32_t)env->regs[R_ECX],
430 (uint32_t)env->regs[R_EDX],
431 (uint32_t)env->regs[R_ESI],
432 (uint32_t)env->regs[R_EDI],
433 (uint32_t)env->regs[R_EBP],
434 (uint32_t)env->regs[R_ESP],
435 (uint32_t)env->eip, eflags,
436 eflags & DF_MASK ? 'D' : '-',
437 eflags & CC_O ? 'O' : '-',
438 eflags & CC_S ? 'S' : '-',
439 eflags & CC_Z ? 'Z' : '-',
440 eflags & CC_A ? 'A' : '-',
441 eflags & CC_P ? 'P' : '-',
442 eflags & CC_C ? 'C' : '-',
443 env->hflags & HF_CPL_MASK,
444 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
445 (env->a20_mask >> 20) & 1,
446 (env->hflags >> HF_SMM_SHIFT) & 1,
447 cs->halted);
450 for(i = 0; i < 6; i++) {
451 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
452 &env->segs[i]);
454 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
455 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
457 #ifdef TARGET_X86_64
458 if (env->hflags & HF_LMA_MASK) {
459 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
460 env->gdt.base, env->gdt.limit);
461 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
462 env->idt.base, env->idt.limit);
463 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
464 (uint32_t)env->cr[0],
465 env->cr[2],
466 env->cr[3],
467 (uint32_t)env->cr[4]);
468 for(i = 0; i < 4; i++)
469 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
470 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
471 env->dr[6], env->dr[7]);
472 } else
473 #endif
475 cpu_fprintf(f, "GDT= %08x %08x\n",
476 (uint32_t)env->gdt.base, env->gdt.limit);
477 cpu_fprintf(f, "IDT= %08x %08x\n",
478 (uint32_t)env->idt.base, env->idt.limit);
479 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
480 (uint32_t)env->cr[0],
481 (uint32_t)env->cr[2],
482 (uint32_t)env->cr[3],
483 (uint32_t)env->cr[4]);
484 for(i = 0; i < 4; i++) {
485 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
487 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
488 env->dr[6], env->dr[7]);
490 if (flags & CPU_DUMP_CCOP) {
491 if ((unsigned)env->cc_op < CC_OP_NB)
492 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
493 else
494 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
495 #ifdef TARGET_X86_64
496 if (env->hflags & HF_CS64_MASK) {
497 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
498 env->cc_src, env->cc_dst,
499 cc_op_name);
500 } else
501 #endif
503 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
504 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
505 cc_op_name);
508 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
509 if (flags & CPU_DUMP_FPU) {
510 int fptag;
511 fptag = 0;
512 for(i = 0; i < 8; i++) {
513 fptag |= ((!env->fptags[i]) << i);
515 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
516 env->fpuc,
517 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
518 env->fpstt,
519 fptag,
520 env->mxcsr);
521 for(i=0;i<8;i++) {
522 CPU_LDoubleU u;
523 u.d = env->fpregs[i].d;
524 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
525 i, u.l.lower, u.l.upper);
526 if ((i & 1) == 1)
527 cpu_fprintf(f, "\n");
528 else
529 cpu_fprintf(f, " ");
531 if (env->hflags & HF_CS64_MASK)
532 nb = 16;
533 else
534 nb = 8;
535 for(i=0;i<nb;i++) {
536 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
538 env->xmm_regs[i].XMM_L(3),
539 env->xmm_regs[i].XMM_L(2),
540 env->xmm_regs[i].XMM_L(1),
541 env->xmm_regs[i].XMM_L(0));
542 if ((i & 1) == 1)
543 cpu_fprintf(f, "\n");
544 else
545 cpu_fprintf(f, " ");
548 if (flags & CPU_DUMP_CODE) {
549 target_ulong base = env->segs[R_CS].base + env->eip;
550 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
551 uint8_t code;
552 char codestr[3];
554 cpu_fprintf(f, "Code=");
555 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
556 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
557 snprintf(codestr, sizeof(codestr), "%02x", code);
558 } else {
559 snprintf(codestr, sizeof(codestr), "??");
561 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
562 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
564 cpu_fprintf(f, "\n");
568 /***********************************************************/
569 /* x86 mmu */
570 /* XXX: add PGE support */
572 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
574 CPUX86State *env = &cpu->env;
576 a20_state = (a20_state != 0);
577 if (a20_state != ((env->a20_mask >> 20) & 1)) {
578 CPUState *cs = CPU(cpu);
580 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
581 /* if the cpu is currently executing code, we must unlink it and
582 all the potentially executing TB */
583 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
585 /* when a20 is changed, all the MMU mappings are invalid, so
586 we must flush everything */
587 tlb_flush(cs, 1);
588 env->a20_mask = ~(1 << 20) | (a20_state << 20);
592 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
594 X86CPU *cpu = x86_env_get_cpu(env);
595 int pe_state;
597 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
598 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
599 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
600 tlb_flush(CPU(cpu), 1);
603 #ifdef TARGET_X86_64
604 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
605 (env->efer & MSR_EFER_LME)) {
606 /* enter in long mode */
607 /* XXX: generate an exception */
608 if (!(env->cr[4] & CR4_PAE_MASK))
609 return;
610 env->efer |= MSR_EFER_LMA;
611 env->hflags |= HF_LMA_MASK;
612 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
613 (env->efer & MSR_EFER_LMA)) {
614 /* exit long mode */
615 env->efer &= ~MSR_EFER_LMA;
616 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
617 env->eip &= 0xffffffff;
619 #endif
620 env->cr[0] = new_cr0 | CR0_ET_MASK;
622 /* update PE flag in hidden flags */
623 pe_state = (env->cr[0] & CR0_PE_MASK);
624 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
625 /* ensure that ADDSEG is always set in real mode */
626 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
627 /* update FPU flags */
628 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
629 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
632 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
633 the PDPT */
634 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
636 X86CPU *cpu = x86_env_get_cpu(env);
638 env->cr[3] = new_cr3;
639 if (env->cr[0] & CR0_PG_MASK) {
640 qemu_log_mask(CPU_LOG_MMU,
641 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
642 tlb_flush(CPU(cpu), 0);
646 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
648 X86CPU *cpu = x86_env_get_cpu(env);
650 #if defined(DEBUG_MMU)
651 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
652 #endif
653 if ((new_cr4 ^ env->cr[4]) &
654 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
655 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
656 tlb_flush(CPU(cpu), 1);
658 /* SSE handling */
659 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
660 new_cr4 &= ~CR4_OSFXSR_MASK;
662 env->hflags &= ~HF_OSFXSR_MASK;
663 if (new_cr4 & CR4_OSFXSR_MASK) {
664 env->hflags |= HF_OSFXSR_MASK;
667 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
668 new_cr4 &= ~CR4_SMAP_MASK;
670 env->hflags &= ~HF_SMAP_MASK;
671 if (new_cr4 & CR4_SMAP_MASK) {
672 env->hflags |= HF_SMAP_MASK;
675 env->cr[4] = new_cr4;
678 #if defined(CONFIG_USER_ONLY)
680 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
681 int is_write, int mmu_idx)
683 X86CPU *cpu = X86_CPU(cs);
684 CPUX86State *env = &cpu->env;
686 /* user mode only emulation */
687 is_write &= 1;
688 env->cr[2] = addr;
689 env->error_code = (is_write << PG_ERROR_W_BIT);
690 env->error_code |= PG_ERROR_U_MASK;
691 cs->exception_index = EXCP0E_PAGE;
692 return 1;
695 #else
697 /* return value:
698 * -1 = cannot handle fault
699 * 0 = nothing more to do
700 * 1 = generate PF fault
702 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
703 int is_write1, int mmu_idx)
705 X86CPU *cpu = X86_CPU(cs);
706 CPUX86State *env = &cpu->env;
707 uint64_t ptep, pte;
708 target_ulong pde_addr, pte_addr;
709 int error_code = 0;
710 int is_dirty, prot, page_size, is_write, is_user;
711 hwaddr paddr;
712 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
713 uint32_t page_offset;
714 target_ulong vaddr;
716 is_user = mmu_idx == MMU_USER_IDX;
717 #if defined(DEBUG_MMU)
718 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
719 addr, is_write1, is_user, env->eip);
720 #endif
721 is_write = is_write1 & 1;
723 if (!(env->cr[0] & CR0_PG_MASK)) {
724 pte = addr;
725 #ifdef TARGET_X86_64
726 if (!(env->hflags & HF_LMA_MASK)) {
727 /* Without long mode we can only address 32bits in real mode */
728 pte = (uint32_t)pte;
730 #endif
731 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
732 page_size = 4096;
733 goto do_mapping;
736 if (!(env->efer & MSR_EFER_NXE)) {
737 rsvd_mask |= PG_NX_MASK;
740 if (env->cr[4] & CR4_PAE_MASK) {
741 uint64_t pde, pdpe;
742 target_ulong pdpe_addr;
744 #ifdef TARGET_X86_64
745 if (env->hflags & HF_LMA_MASK) {
746 uint64_t pml4e_addr, pml4e;
747 int32_t sext;
749 /* test virtual address sign extension */
750 sext = (int64_t)addr >> 47;
751 if (sext != 0 && sext != -1) {
752 env->error_code = 0;
753 cs->exception_index = EXCP0D_GPF;
754 return 1;
757 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
758 env->a20_mask;
759 pml4e = x86_ldq_phys(cs, pml4e_addr);
760 if (!(pml4e & PG_PRESENT_MASK)) {
761 goto do_fault;
763 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
764 goto do_fault_rsvd;
766 if (!(pml4e & PG_ACCESSED_MASK)) {
767 pml4e |= PG_ACCESSED_MASK;
768 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
770 ptep = pml4e ^ PG_NX_MASK;
771 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
772 env->a20_mask;
773 pdpe = x86_ldq_phys(cs, pdpe_addr);
774 if (!(pdpe & PG_PRESENT_MASK)) {
775 goto do_fault;
777 if (pdpe & rsvd_mask) {
778 goto do_fault_rsvd;
780 ptep &= pdpe ^ PG_NX_MASK;
781 if (!(pdpe & PG_ACCESSED_MASK)) {
782 pdpe |= PG_ACCESSED_MASK;
783 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
785 if (pdpe & PG_PSE_MASK) {
786 /* 1 GB page */
787 page_size = 1024 * 1024 * 1024;
788 pte_addr = pdpe_addr;
789 pte = pdpe;
790 goto do_check_protect;
792 } else
793 #endif
795 /* XXX: load them when cr3 is loaded ? */
796 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
797 env->a20_mask;
798 pdpe = x86_ldq_phys(cs, pdpe_addr);
799 if (!(pdpe & PG_PRESENT_MASK)) {
800 goto do_fault;
802 rsvd_mask |= PG_HI_USER_MASK;
803 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
804 goto do_fault_rsvd;
806 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
809 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
810 env->a20_mask;
811 pde = x86_ldq_phys(cs, pde_addr);
812 if (!(pde & PG_PRESENT_MASK)) {
813 goto do_fault;
815 if (pde & rsvd_mask) {
816 goto do_fault_rsvd;
818 ptep &= pde ^ PG_NX_MASK;
819 if (pde & PG_PSE_MASK) {
820 /* 2 MB page */
821 page_size = 2048 * 1024;
822 pte_addr = pde_addr;
823 pte = pde;
824 goto do_check_protect;
826 /* 4 KB page */
827 if (!(pde & PG_ACCESSED_MASK)) {
828 pde |= PG_ACCESSED_MASK;
829 x86_stl_phys_notdirty(cs, pde_addr, pde);
831 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
832 env->a20_mask;
833 pte = x86_ldq_phys(cs, pte_addr);
834 if (!(pte & PG_PRESENT_MASK)) {
835 goto do_fault;
837 if (pte & rsvd_mask) {
838 goto do_fault_rsvd;
840 /* combine pde and pte nx, user and rw protections */
841 ptep &= pte ^ PG_NX_MASK;
842 page_size = 4096;
843 } else {
844 uint32_t pde;
846 /* page directory entry */
847 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
848 env->a20_mask;
849 pde = x86_ldl_phys(cs, pde_addr);
850 if (!(pde & PG_PRESENT_MASK)) {
851 goto do_fault;
853 ptep = pde | PG_NX_MASK;
855 /* if PSE bit is set, then we use a 4MB page */
856 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
857 page_size = 4096 * 1024;
858 pte_addr = pde_addr;
860 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
861 * Leave bits 20-13 in place for setting accessed/dirty bits below.
863 pte = pde | ((pde & 0x1fe000) << (32 - 13));
864 rsvd_mask = 0x200000;
865 goto do_check_protect_pse36;
868 if (!(pde & PG_ACCESSED_MASK)) {
869 pde |= PG_ACCESSED_MASK;
870 x86_stl_phys_notdirty(cs, pde_addr, pde);
873 /* page directory entry */
874 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
875 env->a20_mask;
876 pte = x86_ldl_phys(cs, pte_addr);
877 if (!(pte & PG_PRESENT_MASK)) {
878 goto do_fault;
880 /* combine pde and pte user and rw protections */
881 ptep &= pte | PG_NX_MASK;
882 page_size = 4096;
883 rsvd_mask = 0;
886 do_check_protect:
887 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
888 do_check_protect_pse36:
889 if (pte & rsvd_mask) {
890 goto do_fault_rsvd;
892 ptep ^= PG_NX_MASK;
894 /* can the page can be put in the TLB? prot will tell us */
895 if (is_user && !(ptep & PG_USER_MASK)) {
896 goto do_fault_protect;
899 prot = 0;
900 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
901 prot |= PAGE_READ;
902 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
903 prot |= PAGE_WRITE;
906 if (!(ptep & PG_NX_MASK) &&
907 (mmu_idx == MMU_USER_IDX ||
908 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
909 prot |= PAGE_EXEC;
912 if ((prot & (1 << is_write1)) == 0) {
913 goto do_fault_protect;
916 /* yes, it can! */
917 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
918 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
919 pte |= PG_ACCESSED_MASK;
920 if (is_dirty) {
921 pte |= PG_DIRTY_MASK;
923 x86_stl_phys_notdirty(cs, pte_addr, pte);
926 if (!(pte & PG_DIRTY_MASK)) {
927 /* only set write access if already dirty... otherwise wait
928 for dirty access */
929 assert(!is_write);
930 prot &= ~PAGE_WRITE;
933 do_mapping:
934 pte = pte & env->a20_mask;
936 /* align to page_size */
937 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
939 /* Even if 4MB pages, we map only one 4KB page in the cache to
940 avoid filling it too fast */
941 vaddr = addr & TARGET_PAGE_MASK;
942 page_offset = vaddr & (page_size - 1);
943 paddr = pte + page_offset;
945 assert(prot & (1 << is_write1));
946 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
947 prot, mmu_idx, page_size);
948 return 0;
949 do_fault_rsvd:
950 error_code |= PG_ERROR_RSVD_MASK;
951 do_fault_protect:
952 error_code |= PG_ERROR_P_MASK;
953 do_fault:
954 error_code |= (is_write << PG_ERROR_W_BIT);
955 if (is_user)
956 error_code |= PG_ERROR_U_MASK;
957 if (is_write1 == 2 &&
958 (((env->efer & MSR_EFER_NXE) &&
959 (env->cr[4] & CR4_PAE_MASK)) ||
960 (env->cr[4] & CR4_SMEP_MASK)))
961 error_code |= PG_ERROR_I_D_MASK;
962 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
963 /* cr2 is not modified in case of exceptions */
964 x86_stq_phys(cs,
965 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
966 addr);
967 } else {
968 env->cr[2] = addr;
970 env->error_code = error_code;
971 cs->exception_index = EXCP0E_PAGE;
972 return 1;
975 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
977 X86CPU *cpu = X86_CPU(cs);
978 CPUX86State *env = &cpu->env;
979 target_ulong pde_addr, pte_addr;
980 uint64_t pte;
981 uint32_t page_offset;
982 int page_size;
984 if (!(env->cr[0] & CR0_PG_MASK)) {
985 pte = addr & env->a20_mask;
986 page_size = 4096;
987 } else if (env->cr[4] & CR4_PAE_MASK) {
988 target_ulong pdpe_addr;
989 uint64_t pde, pdpe;
991 #ifdef TARGET_X86_64
992 if (env->hflags & HF_LMA_MASK) {
993 uint64_t pml4e_addr, pml4e;
994 int32_t sext;
996 /* test virtual address sign extension */
997 sext = (int64_t)addr >> 47;
998 if (sext != 0 && sext != -1) {
999 return -1;
1001 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1002 env->a20_mask;
1003 pml4e = x86_ldq_phys(cs, pml4e_addr);
1004 if (!(pml4e & PG_PRESENT_MASK)) {
1005 return -1;
1007 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1008 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
1009 pdpe = x86_ldq_phys(cs, pdpe_addr);
1010 if (!(pdpe & PG_PRESENT_MASK)) {
1011 return -1;
1013 if (pdpe & PG_PSE_MASK) {
1014 page_size = 1024 * 1024 * 1024;
1015 pte = pdpe;
1016 goto out;
1019 } else
1020 #endif
1022 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1023 env->a20_mask;
1024 pdpe = x86_ldq_phys(cs, pdpe_addr);
1025 if (!(pdpe & PG_PRESENT_MASK))
1026 return -1;
1029 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1030 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
1031 pde = x86_ldq_phys(cs, pde_addr);
1032 if (!(pde & PG_PRESENT_MASK)) {
1033 return -1;
1035 if (pde & PG_PSE_MASK) {
1036 /* 2 MB page */
1037 page_size = 2048 * 1024;
1038 pte = pde;
1039 } else {
1040 /* 4 KB page */
1041 pte_addr = ((pde & PG_ADDRESS_MASK) +
1042 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
1043 page_size = 4096;
1044 pte = x86_ldq_phys(cs, pte_addr);
1046 if (!(pte & PG_PRESENT_MASK)) {
1047 return -1;
1049 } else {
1050 uint32_t pde;
1052 /* page directory entry */
1053 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1054 pde = x86_ldl_phys(cs, pde_addr);
1055 if (!(pde & PG_PRESENT_MASK))
1056 return -1;
1057 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1058 pte = pde | ((pde & 0x1fe000) << (32 - 13));
1059 page_size = 4096 * 1024;
1060 } else {
1061 /* page directory entry */
1062 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1063 pte = x86_ldl_phys(cs, pte_addr);
1064 if (!(pte & PG_PRESENT_MASK)) {
1065 return -1;
1067 page_size = 4096;
1069 pte = pte & env->a20_mask;
1072 #ifdef TARGET_X86_64
1073 out:
1074 #endif
1075 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1076 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1077 return pte | page_offset;
1080 typedef struct MCEInjectionParams {
1081 Monitor *mon;
1082 X86CPU *cpu;
1083 int bank;
1084 uint64_t status;
1085 uint64_t mcg_status;
1086 uint64_t addr;
1087 uint64_t misc;
1088 int flags;
1089 } MCEInjectionParams;
1091 static void do_inject_x86_mce(void *data)
1093 MCEInjectionParams *params = data;
1094 CPUX86State *cenv = &params->cpu->env;
1095 CPUState *cpu = CPU(params->cpu);
1096 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1098 cpu_synchronize_state(cpu);
1101 * If there is an MCE exception being processed, ignore this SRAO MCE
1102 * unless unconditional injection was requested.
1104 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1105 && !(params->status & MCI_STATUS_AR)
1106 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1107 return;
1110 if (params->status & MCI_STATUS_UC) {
1112 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1113 * reporting is disabled
1115 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1116 monitor_printf(params->mon,
1117 "CPU %d: Uncorrected error reporting disabled\n",
1118 cpu->cpu_index);
1119 return;
1123 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1124 * reporting is disabled for the bank
1126 if (banks[0] != ~(uint64_t)0) {
1127 monitor_printf(params->mon,
1128 "CPU %d: Uncorrected error reporting disabled for"
1129 " bank %d\n",
1130 cpu->cpu_index, params->bank);
1131 return;
1134 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1135 !(cenv->cr[4] & CR4_MCE_MASK)) {
1136 monitor_printf(params->mon,
1137 "CPU %d: Previous MCE still in progress, raising"
1138 " triple fault\n",
1139 cpu->cpu_index);
1140 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1141 qemu_system_reset_request();
1142 return;
1144 if (banks[1] & MCI_STATUS_VAL) {
1145 params->status |= MCI_STATUS_OVER;
1147 banks[2] = params->addr;
1148 banks[3] = params->misc;
1149 cenv->mcg_status = params->mcg_status;
1150 banks[1] = params->status;
1151 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1152 } else if (!(banks[1] & MCI_STATUS_VAL)
1153 || !(banks[1] & MCI_STATUS_UC)) {
1154 if (banks[1] & MCI_STATUS_VAL) {
1155 params->status |= MCI_STATUS_OVER;
1157 banks[2] = params->addr;
1158 banks[3] = params->misc;
1159 banks[1] = params->status;
1160 } else {
1161 banks[1] |= MCI_STATUS_OVER;
1165 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1166 uint64_t status, uint64_t mcg_status, uint64_t addr,
1167 uint64_t misc, int flags)
1169 CPUState *cs = CPU(cpu);
1170 CPUX86State *cenv = &cpu->env;
1171 MCEInjectionParams params = {
1172 .mon = mon,
1173 .cpu = cpu,
1174 .bank = bank,
1175 .status = status,
1176 .mcg_status = mcg_status,
1177 .addr = addr,
1178 .misc = misc,
1179 .flags = flags,
1181 unsigned bank_num = cenv->mcg_cap & 0xff;
1183 if (!cenv->mcg_cap) {
1184 monitor_printf(mon, "MCE injection not supported\n");
1185 return;
1187 if (bank >= bank_num) {
1188 monitor_printf(mon, "Invalid MCE bank number\n");
1189 return;
1191 if (!(status & MCI_STATUS_VAL)) {
1192 monitor_printf(mon, "Invalid MCE status code\n");
1193 return;
1195 if ((flags & MCE_INJECT_BROADCAST)
1196 && !cpu_x86_support_mca_broadcast(cenv)) {
1197 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1198 return;
1201 run_on_cpu(cs, do_inject_x86_mce, &params);
1202 if (flags & MCE_INJECT_BROADCAST) {
1203 CPUState *other_cs;
1205 params.bank = 1;
1206 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1207 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1208 params.addr = 0;
1209 params.misc = 0;
1210 CPU_FOREACH(other_cs) {
1211 if (other_cs == cs) {
1212 continue;
1214 params.cpu = X86_CPU(other_cs);
1215 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1220 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1222 X86CPU *cpu = x86_env_get_cpu(env);
1223 CPUState *cs = CPU(cpu);
1225 if (kvm_enabled()) {
1226 env->tpr_access_type = access;
1228 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1229 } else {
1230 cpu_restore_state(cs, cs->mem_io_pc);
1232 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1235 #endif /* !CONFIG_USER_ONLY */
1237 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1238 target_ulong *base, unsigned int *limit,
1239 unsigned int *flags)
1241 X86CPU *cpu = x86_env_get_cpu(env);
1242 CPUState *cs = CPU(cpu);
1243 SegmentCache *dt;
1244 target_ulong ptr;
1245 uint32_t e1, e2;
1246 int index;
1248 if (selector & 0x4)
1249 dt = &env->ldt;
1250 else
1251 dt = &env->gdt;
1252 index = selector & ~7;
1253 ptr = dt->base + index;
1254 if ((index + 7) > dt->limit
1255 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1256 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1257 return 0;
1259 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1260 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1261 if (e2 & DESC_G_MASK)
1262 *limit = (*limit << 12) | 0xfff;
1263 *flags = e2;
1265 return 1;
1268 #if !defined(CONFIG_USER_ONLY)
1269 void do_cpu_init(X86CPU *cpu)
1271 CPUState *cs = CPU(cpu);
1272 CPUX86State *env = &cpu->env;
1273 CPUX86State *save = g_new(CPUX86State, 1);
1274 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1276 *save = *env;
1278 cpu_reset(cs);
1279 cs->interrupt_request = sipi;
1280 memcpy(&env->start_init_save, &save->start_init_save,
1281 offsetof(CPUX86State, end_init_save) -
1282 offsetof(CPUX86State, start_init_save));
1283 g_free(save);
1285 if (kvm_enabled()) {
1286 kvm_arch_do_init_vcpu(cpu);
1288 apic_init_reset(cpu->apic_state);
1291 void do_cpu_sipi(X86CPU *cpu)
1293 apic_sipi(cpu->apic_state);
1295 #else
1296 void do_cpu_init(X86CPU *cpu)
1299 void do_cpu_sipi(X86CPU *cpu)
1302 #endif
1304 /* Frob eflags into and out of the CPU temporary format. */
1306 void x86_cpu_exec_enter(CPUState *cs)
1308 X86CPU *cpu = X86_CPU(cs);
1309 CPUX86State *env = &cpu->env;
1311 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1312 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1313 CC_OP = CC_OP_EFLAGS;
1314 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1317 void x86_cpu_exec_exit(CPUState *cs)
1319 X86CPU *cpu = X86_CPU(cs);
1320 CPUX86State *env = &cpu->env;
1322 env->eflags = cpu_compute_eflags(env);
1325 #ifndef CONFIG_USER_ONLY
1326 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1328 X86CPU *cpu = X86_CPU(cs);
1329 CPUX86State *env = &cpu->env;
1331 return address_space_ldub(cs->as, addr,
1332 cpu_get_mem_attrs(env),
1333 NULL);
1336 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1338 X86CPU *cpu = X86_CPU(cs);
1339 CPUX86State *env = &cpu->env;
1341 return address_space_lduw(cs->as, addr,
1342 cpu_get_mem_attrs(env),
1343 NULL);
1346 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1348 X86CPU *cpu = X86_CPU(cs);
1349 CPUX86State *env = &cpu->env;
1351 return address_space_ldl(cs->as, addr,
1352 cpu_get_mem_attrs(env),
1353 NULL);
1356 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1358 X86CPU *cpu = X86_CPU(cs);
1359 CPUX86State *env = &cpu->env;
1361 return address_space_ldq(cs->as, addr,
1362 cpu_get_mem_attrs(env),
1363 NULL);
1366 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1368 X86CPU *cpu = X86_CPU(cs);
1369 CPUX86State *env = &cpu->env;
1371 address_space_stb(cs->as, addr, val,
1372 cpu_get_mem_attrs(env),
1373 NULL);
1376 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1378 X86CPU *cpu = X86_CPU(cs);
1379 CPUX86State *env = &cpu->env;
1381 address_space_stl_notdirty(cs->as, addr, val,
1382 cpu_get_mem_attrs(env),
1383 NULL);
1386 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1388 X86CPU *cpu = X86_CPU(cs);
1389 CPUX86State *env = &cpu->env;
1391 address_space_stw(cs->as, addr, val,
1392 cpu_get_mem_attrs(env),
1393 NULL);
1396 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1398 X86CPU *cpu = X86_CPU(cs);
1399 CPUX86State *env = &cpu->env;
1401 address_space_stl(cs->as, addr, val,
1402 cpu_get_mem_attrs(env),
1403 NULL);
1406 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1408 X86CPU *cpu = X86_CPU(cs);
1409 CPUX86State *env = &cpu->env;
1411 address_space_stq(cs->as, addr, val,
1412 cpu_get_mem_attrs(env),
1413 NULL);
1415 #endif