target-ppc: Remove unused kvmppc_read_segment_page_sizes() stub
[qemu/kevin.git] / target-i386 / helper.c
blob81568c8b2bdc761e8b4c0b574b996034240ea246
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/kvm.h"
23 #include "kvm_i386.h"
24 #ifndef CONFIG_USER_ONLY
25 #include "sysemu/sysemu.h"
26 #include "monitor/monitor.h"
27 #include "hw/i386/apic_internal.h"
28 #endif
30 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
32 int cpuver = env->cpuid_version;
34 if (family == NULL || model == NULL) {
35 return;
38 *family = (cpuver >> 8) & 0x0f;
39 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
42 /* Broadcast MCA signal for processor version 06H_EH and above */
43 int cpu_x86_support_mca_broadcast(CPUX86State *env)
45 int family = 0;
46 int model = 0;
48 cpu_x86_version(env, &family, &model);
49 if ((family == 6 && model >= 14) || family > 6) {
50 return 1;
53 return 0;
56 /***********************************************************/
57 /* x86 debug */
59 static const char *cc_op_str[CC_OP_NB] = {
60 "DYNAMIC",
61 "EFLAGS",
63 "MULB",
64 "MULW",
65 "MULL",
66 "MULQ",
68 "ADDB",
69 "ADDW",
70 "ADDL",
71 "ADDQ",
73 "ADCB",
74 "ADCW",
75 "ADCL",
76 "ADCQ",
78 "SUBB",
79 "SUBW",
80 "SUBL",
81 "SUBQ",
83 "SBBB",
84 "SBBW",
85 "SBBL",
86 "SBBQ",
88 "LOGICB",
89 "LOGICW",
90 "LOGICL",
91 "LOGICQ",
93 "INCB",
94 "INCW",
95 "INCL",
96 "INCQ",
98 "DECB",
99 "DECW",
100 "DECL",
101 "DECQ",
103 "SHLB",
104 "SHLW",
105 "SHLL",
106 "SHLQ",
108 "SARB",
109 "SARW",
110 "SARL",
111 "SARQ",
113 "BMILGB",
114 "BMILGW",
115 "BMILGL",
116 "BMILGQ",
118 "ADCX",
119 "ADOX",
120 "ADCOX",
122 "CLR",
125 static void
126 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
127 const char *name, struct SegmentCache *sc)
129 #ifdef TARGET_X86_64
130 if (env->hflags & HF_CS64_MASK) {
131 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
132 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
133 } else
134 #endif
136 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
137 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
140 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
141 goto done;
143 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
144 if (sc->flags & DESC_S_MASK) {
145 if (sc->flags & DESC_CS_MASK) {
146 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
147 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
148 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
149 (sc->flags & DESC_R_MASK) ? 'R' : '-');
150 } else {
151 cpu_fprintf(f,
152 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
153 ? "DS " : "DS16");
154 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
155 (sc->flags & DESC_W_MASK) ? 'W' : '-');
157 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
158 } else {
159 static const char *sys_type_name[2][16] = {
160 { /* 32 bit mode */
161 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
162 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
163 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
164 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
166 { /* 64 bit mode */
167 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
168 "Reserved", "Reserved", "Reserved", "Reserved",
169 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
170 "Reserved", "IntGate64", "TrapGate64"
173 cpu_fprintf(f, "%s",
174 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
175 [(sc->flags & DESC_TYPE_MASK)
176 >> DESC_TYPE_SHIFT]);
178 done:
179 cpu_fprintf(f, "\n");
182 #ifndef CONFIG_USER_ONLY
184 /* ARRAY_SIZE check is not required because
185 * DeliveryMode(dm) has a size of 3 bit.
187 static inline const char *dm2str(uint32_t dm)
189 static const char *str[] = {
190 "Fixed",
191 "...",
192 "SMI",
193 "...",
194 "NMI",
195 "INIT",
196 "...",
197 "ExtINT"
199 return str[dm];
202 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
203 const char *name, uint32_t lvt, bool is_timer)
205 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
206 cpu_fprintf(f,
207 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
208 name, lvt,
209 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
210 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
211 lvt & APIC_LVT_MASKED ? "masked" : "",
212 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
213 !is_timer ?
214 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
215 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
216 "tsc-deadline" : "one-shot",
217 dm2str(dm));
218 if (dm != APIC_DM_NMI) {
219 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
220 } else {
221 cpu_fprintf(f, "\n");
225 /* ARRAY_SIZE check is not required because
226 * destination shorthand has a size of 2 bit.
228 static inline const char *shorthand2str(uint32_t shorthand)
230 const char *str[] = {
231 "no-shorthand", "self", "all-self", "all"
233 return str[shorthand];
236 static inline uint8_t divider_conf(uint32_t divide_conf)
238 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
240 return divide_val == 7 ? 1 : 2 << divide_val;
243 static inline void mask2str(char *str, uint32_t val, uint8_t size)
245 while (size--) {
246 *str++ = (val >> size) & 1 ? '1' : '0';
248 *str = 0;
251 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
253 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
254 APICCommonState *s, CPUX86State *env)
256 uint32_t icr = s->icr[0], icr2 = s->icr[1];
257 uint8_t dest_shorthand = \
258 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
259 bool logical_mod = icr & APIC_ICR_DEST_MOD;
260 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
261 uint32_t dest_field;
262 bool x2apic;
264 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
265 icr,
266 logical_mod ? "logical" : "physical",
267 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
268 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
269 shorthand2str(dest_shorthand));
271 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
272 if (dest_shorthand != 0) {
273 cpu_fprintf(f, "\n");
274 return;
276 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
277 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
279 if (!logical_mod) {
280 if (x2apic) {
281 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
282 } else {
283 cpu_fprintf(f, " cpu %u (APIC ID)\n",
284 dest_field & APIC_LOGDEST_XAPIC_ID);
286 return;
289 if (s->dest_mode == 0xf) { /* flat mode */
290 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
291 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
292 } else if (s->dest_mode == 0) { /* cluster mode */
293 if (x2apic) {
294 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
295 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
296 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
297 } else {
298 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
299 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
300 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
305 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
306 const char *name, uint32_t *ireg_tab,
307 uint32_t *tmr_tab)
309 int i, empty = true;
311 cpu_fprintf(f, "%s\t ", name);
312 for (i = 0; i < 256; i++) {
313 if (apic_get_bit(ireg_tab, i)) {
314 cpu_fprintf(f, "%u%s ", i,
315 apic_get_bit(tmr_tab, i) ? "(level)" : "");
316 empty = false;
319 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
322 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
323 fprintf_function cpu_fprintf, int flags)
325 X86CPU *cpu = X86_CPU(cs);
326 APICCommonState *s = APIC_COMMON(cpu->apic_state);
327 uint32_t *lvt = s->lvt;
329 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
330 CPU(cpu)->cpu_index);
331 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
332 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
333 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
334 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
335 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
336 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
338 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
339 s->divide_conf & APIC_DCR_MASK,
340 divider_conf(s->divide_conf),
341 s->initial_count);
343 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
344 s->spurious_vec,
345 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
346 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
347 s->spurious_vec & APIC_VECTOR_MASK);
349 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
351 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
353 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
354 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
356 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
357 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
358 if (s->dest_mode == 0) {
359 cpu_fprintf(f, "(cluster %u: id %u)",
360 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
361 s->log_dest & APIC_LOGDEST_XAPIC_ID);
363 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
365 #else
366 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
367 fprintf_function cpu_fprintf, int flags)
370 #endif /* !CONFIG_USER_ONLY */
372 #define DUMP_CODE_BYTES_TOTAL 50
373 #define DUMP_CODE_BYTES_BACKWARD 20
375 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
376 int flags)
378 X86CPU *cpu = X86_CPU(cs);
379 CPUX86State *env = &cpu->env;
380 int eflags, i, nb;
381 char cc_op_name[32];
382 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
384 eflags = cpu_compute_eflags(env);
385 #ifdef TARGET_X86_64
386 if (env->hflags & HF_CS64_MASK) {
387 cpu_fprintf(f,
388 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
389 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
390 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
391 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
392 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
393 env->regs[R_EAX],
394 env->regs[R_EBX],
395 env->regs[R_ECX],
396 env->regs[R_EDX],
397 env->regs[R_ESI],
398 env->regs[R_EDI],
399 env->regs[R_EBP],
400 env->regs[R_ESP],
401 env->regs[8],
402 env->regs[9],
403 env->regs[10],
404 env->regs[11],
405 env->regs[12],
406 env->regs[13],
407 env->regs[14],
408 env->regs[15],
409 env->eip, eflags,
410 eflags & DF_MASK ? 'D' : '-',
411 eflags & CC_O ? 'O' : '-',
412 eflags & CC_S ? 'S' : '-',
413 eflags & CC_Z ? 'Z' : '-',
414 eflags & CC_A ? 'A' : '-',
415 eflags & CC_P ? 'P' : '-',
416 eflags & CC_C ? 'C' : '-',
417 env->hflags & HF_CPL_MASK,
418 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
419 (env->a20_mask >> 20) & 1,
420 (env->hflags >> HF_SMM_SHIFT) & 1,
421 cs->halted);
422 } else
423 #endif
425 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
426 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
427 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
428 (uint32_t)env->regs[R_EAX],
429 (uint32_t)env->regs[R_EBX],
430 (uint32_t)env->regs[R_ECX],
431 (uint32_t)env->regs[R_EDX],
432 (uint32_t)env->regs[R_ESI],
433 (uint32_t)env->regs[R_EDI],
434 (uint32_t)env->regs[R_EBP],
435 (uint32_t)env->regs[R_ESP],
436 (uint32_t)env->eip, eflags,
437 eflags & DF_MASK ? 'D' : '-',
438 eflags & CC_O ? 'O' : '-',
439 eflags & CC_S ? 'S' : '-',
440 eflags & CC_Z ? 'Z' : '-',
441 eflags & CC_A ? 'A' : '-',
442 eflags & CC_P ? 'P' : '-',
443 eflags & CC_C ? 'C' : '-',
444 env->hflags & HF_CPL_MASK,
445 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
446 (env->a20_mask >> 20) & 1,
447 (env->hflags >> HF_SMM_SHIFT) & 1,
448 cs->halted);
451 for(i = 0; i < 6; i++) {
452 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
453 &env->segs[i]);
455 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
456 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
458 #ifdef TARGET_X86_64
459 if (env->hflags & HF_LMA_MASK) {
460 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
461 env->gdt.base, env->gdt.limit);
462 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
463 env->idt.base, env->idt.limit);
464 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
465 (uint32_t)env->cr[0],
466 env->cr[2],
467 env->cr[3],
468 (uint32_t)env->cr[4]);
469 for(i = 0; i < 4; i++)
470 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
471 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
472 env->dr[6], env->dr[7]);
473 } else
474 #endif
476 cpu_fprintf(f, "GDT= %08x %08x\n",
477 (uint32_t)env->gdt.base, env->gdt.limit);
478 cpu_fprintf(f, "IDT= %08x %08x\n",
479 (uint32_t)env->idt.base, env->idt.limit);
480 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
481 (uint32_t)env->cr[0],
482 (uint32_t)env->cr[2],
483 (uint32_t)env->cr[3],
484 (uint32_t)env->cr[4]);
485 for(i = 0; i < 4; i++) {
486 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
488 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
489 env->dr[6], env->dr[7]);
491 if (flags & CPU_DUMP_CCOP) {
492 if ((unsigned)env->cc_op < CC_OP_NB)
493 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
494 else
495 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
496 #ifdef TARGET_X86_64
497 if (env->hflags & HF_CS64_MASK) {
498 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
499 env->cc_src, env->cc_dst,
500 cc_op_name);
501 } else
502 #endif
504 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
505 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
506 cc_op_name);
509 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
510 if (flags & CPU_DUMP_FPU) {
511 int fptag;
512 fptag = 0;
513 for(i = 0; i < 8; i++) {
514 fptag |= ((!env->fptags[i]) << i);
516 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
517 env->fpuc,
518 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
519 env->fpstt,
520 fptag,
521 env->mxcsr);
522 for(i=0;i<8;i++) {
523 CPU_LDoubleU u;
524 u.d = env->fpregs[i].d;
525 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
526 i, u.l.lower, u.l.upper);
527 if ((i & 1) == 1)
528 cpu_fprintf(f, "\n");
529 else
530 cpu_fprintf(f, " ");
532 if (env->hflags & HF_CS64_MASK)
533 nb = 16;
534 else
535 nb = 8;
536 for(i=0;i<nb;i++) {
537 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
539 env->xmm_regs[i].ZMM_L(3),
540 env->xmm_regs[i].ZMM_L(2),
541 env->xmm_regs[i].ZMM_L(1),
542 env->xmm_regs[i].ZMM_L(0));
543 if ((i & 1) == 1)
544 cpu_fprintf(f, "\n");
545 else
546 cpu_fprintf(f, " ");
549 if (flags & CPU_DUMP_CODE) {
550 target_ulong base = env->segs[R_CS].base + env->eip;
551 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
552 uint8_t code;
553 char codestr[3];
555 cpu_fprintf(f, "Code=");
556 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
557 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
558 snprintf(codestr, sizeof(codestr), "%02x", code);
559 } else {
560 snprintf(codestr, sizeof(codestr), "??");
562 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
563 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
565 cpu_fprintf(f, "\n");
569 /***********************************************************/
570 /* x86 mmu */
571 /* XXX: add PGE support */
573 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
575 CPUX86State *env = &cpu->env;
577 a20_state = (a20_state != 0);
578 if (a20_state != ((env->a20_mask >> 20) & 1)) {
579 CPUState *cs = CPU(cpu);
581 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
582 /* if the cpu is currently executing code, we must unlink it and
583 all the potentially executing TB */
584 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
586 /* when a20 is changed, all the MMU mappings are invalid, so
587 we must flush everything */
588 tlb_flush(cs, 1);
589 env->a20_mask = ~(1 << 20) | (a20_state << 20);
593 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
595 X86CPU *cpu = x86_env_get_cpu(env);
596 int pe_state;
598 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
599 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
600 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
601 tlb_flush(CPU(cpu), 1);
604 #ifdef TARGET_X86_64
605 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
606 (env->efer & MSR_EFER_LME)) {
607 /* enter in long mode */
608 /* XXX: generate an exception */
609 if (!(env->cr[4] & CR4_PAE_MASK))
610 return;
611 env->efer |= MSR_EFER_LMA;
612 env->hflags |= HF_LMA_MASK;
613 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
614 (env->efer & MSR_EFER_LMA)) {
615 /* exit long mode */
616 env->efer &= ~MSR_EFER_LMA;
617 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
618 env->eip &= 0xffffffff;
620 #endif
621 env->cr[0] = new_cr0 | CR0_ET_MASK;
623 /* update PE flag in hidden flags */
624 pe_state = (env->cr[0] & CR0_PE_MASK);
625 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
626 /* ensure that ADDSEG is always set in real mode */
627 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
628 /* update FPU flags */
629 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
630 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
633 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
634 the PDPT */
635 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
637 X86CPU *cpu = x86_env_get_cpu(env);
639 env->cr[3] = new_cr3;
640 if (env->cr[0] & CR0_PG_MASK) {
641 qemu_log_mask(CPU_LOG_MMU,
642 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
643 tlb_flush(CPU(cpu), 0);
647 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
649 X86CPU *cpu = x86_env_get_cpu(env);
651 #if defined(DEBUG_MMU)
652 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
653 #endif
654 if ((new_cr4 ^ env->cr[4]) &
655 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
656 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
657 tlb_flush(CPU(cpu), 1);
659 /* SSE handling */
660 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
661 new_cr4 &= ~CR4_OSFXSR_MASK;
663 env->hflags &= ~HF_OSFXSR_MASK;
664 if (new_cr4 & CR4_OSFXSR_MASK) {
665 env->hflags |= HF_OSFXSR_MASK;
668 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
669 new_cr4 &= ~CR4_SMAP_MASK;
671 env->hflags &= ~HF_SMAP_MASK;
672 if (new_cr4 & CR4_SMAP_MASK) {
673 env->hflags |= HF_SMAP_MASK;
676 env->cr[4] = new_cr4;
679 #if defined(CONFIG_USER_ONLY)
681 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
682 int is_write, int mmu_idx)
684 X86CPU *cpu = X86_CPU(cs);
685 CPUX86State *env = &cpu->env;
687 /* user mode only emulation */
688 is_write &= 1;
689 env->cr[2] = addr;
690 env->error_code = (is_write << PG_ERROR_W_BIT);
691 env->error_code |= PG_ERROR_U_MASK;
692 cs->exception_index = EXCP0E_PAGE;
693 return 1;
696 #else
698 /* return value:
699 * -1 = cannot handle fault
700 * 0 = nothing more to do
701 * 1 = generate PF fault
703 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
704 int is_write1, int mmu_idx)
706 X86CPU *cpu = X86_CPU(cs);
707 CPUX86State *env = &cpu->env;
708 uint64_t ptep, pte;
709 target_ulong pde_addr, pte_addr;
710 int error_code = 0;
711 int is_dirty, prot, page_size, is_write, is_user;
712 hwaddr paddr;
713 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
714 uint32_t page_offset;
715 target_ulong vaddr;
717 is_user = mmu_idx == MMU_USER_IDX;
718 #if defined(DEBUG_MMU)
719 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
720 addr, is_write1, is_user, env->eip);
721 #endif
722 is_write = is_write1 & 1;
724 if (!(env->cr[0] & CR0_PG_MASK)) {
725 pte = addr;
726 #ifdef TARGET_X86_64
727 if (!(env->hflags & HF_LMA_MASK)) {
728 /* Without long mode we can only address 32bits in real mode */
729 pte = (uint32_t)pte;
731 #endif
732 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
733 page_size = 4096;
734 goto do_mapping;
737 if (!(env->efer & MSR_EFER_NXE)) {
738 rsvd_mask |= PG_NX_MASK;
741 if (env->cr[4] & CR4_PAE_MASK) {
742 uint64_t pde, pdpe;
743 target_ulong pdpe_addr;
745 #ifdef TARGET_X86_64
746 if (env->hflags & HF_LMA_MASK) {
747 uint64_t pml4e_addr, pml4e;
748 int32_t sext;
750 /* test virtual address sign extension */
751 sext = (int64_t)addr >> 47;
752 if (sext != 0 && sext != -1) {
753 env->error_code = 0;
754 cs->exception_index = EXCP0D_GPF;
755 return 1;
758 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
759 env->a20_mask;
760 pml4e = x86_ldq_phys(cs, pml4e_addr);
761 if (!(pml4e & PG_PRESENT_MASK)) {
762 goto do_fault;
764 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
765 goto do_fault_rsvd;
767 if (!(pml4e & PG_ACCESSED_MASK)) {
768 pml4e |= PG_ACCESSED_MASK;
769 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
771 ptep = pml4e ^ PG_NX_MASK;
772 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
773 env->a20_mask;
774 pdpe = x86_ldq_phys(cs, pdpe_addr);
775 if (!(pdpe & PG_PRESENT_MASK)) {
776 goto do_fault;
778 if (pdpe & rsvd_mask) {
779 goto do_fault_rsvd;
781 ptep &= pdpe ^ PG_NX_MASK;
782 if (!(pdpe & PG_ACCESSED_MASK)) {
783 pdpe |= PG_ACCESSED_MASK;
784 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
786 if (pdpe & PG_PSE_MASK) {
787 /* 1 GB page */
788 page_size = 1024 * 1024 * 1024;
789 pte_addr = pdpe_addr;
790 pte = pdpe;
791 goto do_check_protect;
793 } else
794 #endif
796 /* XXX: load them when cr3 is loaded ? */
797 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
798 env->a20_mask;
799 pdpe = x86_ldq_phys(cs, pdpe_addr);
800 if (!(pdpe & PG_PRESENT_MASK)) {
801 goto do_fault;
803 rsvd_mask |= PG_HI_USER_MASK;
804 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
805 goto do_fault_rsvd;
807 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
810 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
811 env->a20_mask;
812 pde = x86_ldq_phys(cs, pde_addr);
813 if (!(pde & PG_PRESENT_MASK)) {
814 goto do_fault;
816 if (pde & rsvd_mask) {
817 goto do_fault_rsvd;
819 ptep &= pde ^ PG_NX_MASK;
820 if (pde & PG_PSE_MASK) {
821 /* 2 MB page */
822 page_size = 2048 * 1024;
823 pte_addr = pde_addr;
824 pte = pde;
825 goto do_check_protect;
827 /* 4 KB page */
828 if (!(pde & PG_ACCESSED_MASK)) {
829 pde |= PG_ACCESSED_MASK;
830 x86_stl_phys_notdirty(cs, pde_addr, pde);
832 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
833 env->a20_mask;
834 pte = x86_ldq_phys(cs, pte_addr);
835 if (!(pte & PG_PRESENT_MASK)) {
836 goto do_fault;
838 if (pte & rsvd_mask) {
839 goto do_fault_rsvd;
841 /* combine pde and pte nx, user and rw protections */
842 ptep &= pte ^ PG_NX_MASK;
843 page_size = 4096;
844 } else {
845 uint32_t pde;
847 /* page directory entry */
848 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
849 env->a20_mask;
850 pde = x86_ldl_phys(cs, pde_addr);
851 if (!(pde & PG_PRESENT_MASK)) {
852 goto do_fault;
854 ptep = pde | PG_NX_MASK;
856 /* if PSE bit is set, then we use a 4MB page */
857 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
858 page_size = 4096 * 1024;
859 pte_addr = pde_addr;
861 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
862 * Leave bits 20-13 in place for setting accessed/dirty bits below.
864 pte = pde | ((pde & 0x1fe000) << (32 - 13));
865 rsvd_mask = 0x200000;
866 goto do_check_protect_pse36;
869 if (!(pde & PG_ACCESSED_MASK)) {
870 pde |= PG_ACCESSED_MASK;
871 x86_stl_phys_notdirty(cs, pde_addr, pde);
874 /* page directory entry */
875 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
876 env->a20_mask;
877 pte = x86_ldl_phys(cs, pte_addr);
878 if (!(pte & PG_PRESENT_MASK)) {
879 goto do_fault;
881 /* combine pde and pte user and rw protections */
882 ptep &= pte | PG_NX_MASK;
883 page_size = 4096;
884 rsvd_mask = 0;
887 do_check_protect:
888 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
889 do_check_protect_pse36:
890 if (pte & rsvd_mask) {
891 goto do_fault_rsvd;
893 ptep ^= PG_NX_MASK;
895 /* can the page can be put in the TLB? prot will tell us */
896 if (is_user && !(ptep & PG_USER_MASK)) {
897 goto do_fault_protect;
900 prot = 0;
901 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
902 prot |= PAGE_READ;
903 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
904 prot |= PAGE_WRITE;
907 if (!(ptep & PG_NX_MASK) &&
908 (mmu_idx == MMU_USER_IDX ||
909 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
910 prot |= PAGE_EXEC;
913 if ((prot & (1 << is_write1)) == 0) {
914 goto do_fault_protect;
917 /* yes, it can! */
918 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
919 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
920 pte |= PG_ACCESSED_MASK;
921 if (is_dirty) {
922 pte |= PG_DIRTY_MASK;
924 x86_stl_phys_notdirty(cs, pte_addr, pte);
927 if (!(pte & PG_DIRTY_MASK)) {
928 /* only set write access if already dirty... otherwise wait
929 for dirty access */
930 assert(!is_write);
931 prot &= ~PAGE_WRITE;
934 do_mapping:
935 pte = pte & env->a20_mask;
937 /* align to page_size */
938 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
940 /* Even if 4MB pages, we map only one 4KB page in the cache to
941 avoid filling it too fast */
942 vaddr = addr & TARGET_PAGE_MASK;
943 page_offset = vaddr & (page_size - 1);
944 paddr = pte + page_offset;
946 assert(prot & (1 << is_write1));
947 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
948 prot, mmu_idx, page_size);
949 return 0;
950 do_fault_rsvd:
951 error_code |= PG_ERROR_RSVD_MASK;
952 do_fault_protect:
953 error_code |= PG_ERROR_P_MASK;
954 do_fault:
955 error_code |= (is_write << PG_ERROR_W_BIT);
956 if (is_user)
957 error_code |= PG_ERROR_U_MASK;
958 if (is_write1 == 2 &&
959 (((env->efer & MSR_EFER_NXE) &&
960 (env->cr[4] & CR4_PAE_MASK)) ||
961 (env->cr[4] & CR4_SMEP_MASK)))
962 error_code |= PG_ERROR_I_D_MASK;
963 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
964 /* cr2 is not modified in case of exceptions */
965 x86_stq_phys(cs,
966 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
967 addr);
968 } else {
969 env->cr[2] = addr;
971 env->error_code = error_code;
972 cs->exception_index = EXCP0E_PAGE;
973 return 1;
976 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
978 X86CPU *cpu = X86_CPU(cs);
979 CPUX86State *env = &cpu->env;
980 target_ulong pde_addr, pte_addr;
981 uint64_t pte;
982 uint32_t page_offset;
983 int page_size;
985 if (!(env->cr[0] & CR0_PG_MASK)) {
986 pte = addr & env->a20_mask;
987 page_size = 4096;
988 } else if (env->cr[4] & CR4_PAE_MASK) {
989 target_ulong pdpe_addr;
990 uint64_t pde, pdpe;
992 #ifdef TARGET_X86_64
993 if (env->hflags & HF_LMA_MASK) {
994 uint64_t pml4e_addr, pml4e;
995 int32_t sext;
997 /* test virtual address sign extension */
998 sext = (int64_t)addr >> 47;
999 if (sext != 0 && sext != -1) {
1000 return -1;
1002 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1003 env->a20_mask;
1004 pml4e = x86_ldq_phys(cs, pml4e_addr);
1005 if (!(pml4e & PG_PRESENT_MASK)) {
1006 return -1;
1008 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1009 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
1010 pdpe = x86_ldq_phys(cs, pdpe_addr);
1011 if (!(pdpe & PG_PRESENT_MASK)) {
1012 return -1;
1014 if (pdpe & PG_PSE_MASK) {
1015 page_size = 1024 * 1024 * 1024;
1016 pte = pdpe;
1017 goto out;
1020 } else
1021 #endif
1023 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1024 env->a20_mask;
1025 pdpe = x86_ldq_phys(cs, pdpe_addr);
1026 if (!(pdpe & PG_PRESENT_MASK))
1027 return -1;
1030 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1031 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
1032 pde = x86_ldq_phys(cs, pde_addr);
1033 if (!(pde & PG_PRESENT_MASK)) {
1034 return -1;
1036 if (pde & PG_PSE_MASK) {
1037 /* 2 MB page */
1038 page_size = 2048 * 1024;
1039 pte = pde;
1040 } else {
1041 /* 4 KB page */
1042 pte_addr = ((pde & PG_ADDRESS_MASK) +
1043 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
1044 page_size = 4096;
1045 pte = x86_ldq_phys(cs, pte_addr);
1047 if (!(pte & PG_PRESENT_MASK)) {
1048 return -1;
1050 } else {
1051 uint32_t pde;
1053 /* page directory entry */
1054 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1055 pde = x86_ldl_phys(cs, pde_addr);
1056 if (!(pde & PG_PRESENT_MASK))
1057 return -1;
1058 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1059 pte = pde | ((pde & 0x1fe000) << (32 - 13));
1060 page_size = 4096 * 1024;
1061 } else {
1062 /* page directory entry */
1063 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1064 pte = x86_ldl_phys(cs, pte_addr);
1065 if (!(pte & PG_PRESENT_MASK)) {
1066 return -1;
1068 page_size = 4096;
1070 pte = pte & env->a20_mask;
1073 #ifdef TARGET_X86_64
1074 out:
1075 #endif
1076 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1077 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1078 return pte | page_offset;
1081 typedef struct MCEInjectionParams {
1082 Monitor *mon;
1083 X86CPU *cpu;
1084 int bank;
1085 uint64_t status;
1086 uint64_t mcg_status;
1087 uint64_t addr;
1088 uint64_t misc;
1089 int flags;
1090 } MCEInjectionParams;
1092 static void do_inject_x86_mce(void *data)
1094 MCEInjectionParams *params = data;
1095 CPUX86State *cenv = &params->cpu->env;
1096 CPUState *cpu = CPU(params->cpu);
1097 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1099 cpu_synchronize_state(cpu);
1102 * If there is an MCE exception being processed, ignore this SRAO MCE
1103 * unless unconditional injection was requested.
1105 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1106 && !(params->status & MCI_STATUS_AR)
1107 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1108 return;
1111 if (params->status & MCI_STATUS_UC) {
1113 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1114 * reporting is disabled
1116 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1117 monitor_printf(params->mon,
1118 "CPU %d: Uncorrected error reporting disabled\n",
1119 cpu->cpu_index);
1120 return;
1124 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1125 * reporting is disabled for the bank
1127 if (banks[0] != ~(uint64_t)0) {
1128 monitor_printf(params->mon,
1129 "CPU %d: Uncorrected error reporting disabled for"
1130 " bank %d\n",
1131 cpu->cpu_index, params->bank);
1132 return;
1135 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1136 !(cenv->cr[4] & CR4_MCE_MASK)) {
1137 monitor_printf(params->mon,
1138 "CPU %d: Previous MCE still in progress, raising"
1139 " triple fault\n",
1140 cpu->cpu_index);
1141 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1142 qemu_system_reset_request();
1143 return;
1145 if (banks[1] & MCI_STATUS_VAL) {
1146 params->status |= MCI_STATUS_OVER;
1148 banks[2] = params->addr;
1149 banks[3] = params->misc;
1150 cenv->mcg_status = params->mcg_status;
1151 banks[1] = params->status;
1152 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1153 } else if (!(banks[1] & MCI_STATUS_VAL)
1154 || !(banks[1] & MCI_STATUS_UC)) {
1155 if (banks[1] & MCI_STATUS_VAL) {
1156 params->status |= MCI_STATUS_OVER;
1158 banks[2] = params->addr;
1159 banks[3] = params->misc;
1160 banks[1] = params->status;
1161 } else {
1162 banks[1] |= MCI_STATUS_OVER;
1166 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1167 uint64_t status, uint64_t mcg_status, uint64_t addr,
1168 uint64_t misc, int flags)
1170 CPUState *cs = CPU(cpu);
1171 CPUX86State *cenv = &cpu->env;
1172 MCEInjectionParams params = {
1173 .mon = mon,
1174 .cpu = cpu,
1175 .bank = bank,
1176 .status = status,
1177 .mcg_status = mcg_status,
1178 .addr = addr,
1179 .misc = misc,
1180 .flags = flags,
1182 unsigned bank_num = cenv->mcg_cap & 0xff;
1184 if (!cenv->mcg_cap) {
1185 monitor_printf(mon, "MCE injection not supported\n");
1186 return;
1188 if (bank >= bank_num) {
1189 monitor_printf(mon, "Invalid MCE bank number\n");
1190 return;
1192 if (!(status & MCI_STATUS_VAL)) {
1193 monitor_printf(mon, "Invalid MCE status code\n");
1194 return;
1196 if ((flags & MCE_INJECT_BROADCAST)
1197 && !cpu_x86_support_mca_broadcast(cenv)) {
1198 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1199 return;
1202 run_on_cpu(cs, do_inject_x86_mce, &params);
1203 if (flags & MCE_INJECT_BROADCAST) {
1204 CPUState *other_cs;
1206 params.bank = 1;
1207 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1208 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1209 params.addr = 0;
1210 params.misc = 0;
1211 CPU_FOREACH(other_cs) {
1212 if (other_cs == cs) {
1213 continue;
1215 params.cpu = X86_CPU(other_cs);
1216 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1221 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1223 X86CPU *cpu = x86_env_get_cpu(env);
1224 CPUState *cs = CPU(cpu);
1226 if (kvm_enabled()) {
1227 env->tpr_access_type = access;
1229 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1230 } else {
1231 cpu_restore_state(cs, cs->mem_io_pc);
1233 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1236 #endif /* !CONFIG_USER_ONLY */
1238 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1239 target_ulong *base, unsigned int *limit,
1240 unsigned int *flags)
1242 X86CPU *cpu = x86_env_get_cpu(env);
1243 CPUState *cs = CPU(cpu);
1244 SegmentCache *dt;
1245 target_ulong ptr;
1246 uint32_t e1, e2;
1247 int index;
1249 if (selector & 0x4)
1250 dt = &env->ldt;
1251 else
1252 dt = &env->gdt;
1253 index = selector & ~7;
1254 ptr = dt->base + index;
1255 if ((index + 7) > dt->limit
1256 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1257 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1258 return 0;
1260 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1261 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1262 if (e2 & DESC_G_MASK)
1263 *limit = (*limit << 12) | 0xfff;
1264 *flags = e2;
1266 return 1;
1269 #if !defined(CONFIG_USER_ONLY)
1270 void do_cpu_init(X86CPU *cpu)
1272 CPUState *cs = CPU(cpu);
1273 CPUX86State *env = &cpu->env;
1274 CPUX86State *save = g_new(CPUX86State, 1);
1275 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1277 *save = *env;
1279 cpu_reset(cs);
1280 cs->interrupt_request = sipi;
1281 memcpy(&env->start_init_save, &save->start_init_save,
1282 offsetof(CPUX86State, end_init_save) -
1283 offsetof(CPUX86State, start_init_save));
1284 g_free(save);
1286 if (kvm_enabled()) {
1287 kvm_arch_do_init_vcpu(cpu);
1289 apic_init_reset(cpu->apic_state);
1292 void do_cpu_sipi(X86CPU *cpu)
1294 apic_sipi(cpu->apic_state);
1296 #else
1297 void do_cpu_init(X86CPU *cpu)
1300 void do_cpu_sipi(X86CPU *cpu)
1303 #endif
1305 /* Frob eflags into and out of the CPU temporary format. */
1307 void x86_cpu_exec_enter(CPUState *cs)
1309 X86CPU *cpu = X86_CPU(cs);
1310 CPUX86State *env = &cpu->env;
1312 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1313 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1314 CC_OP = CC_OP_EFLAGS;
1315 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1318 void x86_cpu_exec_exit(CPUState *cs)
1320 X86CPU *cpu = X86_CPU(cs);
1321 CPUX86State *env = &cpu->env;
1323 env->eflags = cpu_compute_eflags(env);
1326 #ifndef CONFIG_USER_ONLY
1327 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1329 X86CPU *cpu = X86_CPU(cs);
1330 CPUX86State *env = &cpu->env;
1332 return address_space_ldub(cs->as, addr,
1333 cpu_get_mem_attrs(env),
1334 NULL);
1337 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1339 X86CPU *cpu = X86_CPU(cs);
1340 CPUX86State *env = &cpu->env;
1342 return address_space_lduw(cs->as, addr,
1343 cpu_get_mem_attrs(env),
1344 NULL);
1347 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1349 X86CPU *cpu = X86_CPU(cs);
1350 CPUX86State *env = &cpu->env;
1352 return address_space_ldl(cs->as, addr,
1353 cpu_get_mem_attrs(env),
1354 NULL);
1357 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1359 X86CPU *cpu = X86_CPU(cs);
1360 CPUX86State *env = &cpu->env;
1362 return address_space_ldq(cs->as, addr,
1363 cpu_get_mem_attrs(env),
1364 NULL);
1367 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1369 X86CPU *cpu = X86_CPU(cs);
1370 CPUX86State *env = &cpu->env;
1372 address_space_stb(cs->as, addr, val,
1373 cpu_get_mem_attrs(env),
1374 NULL);
1377 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1379 X86CPU *cpu = X86_CPU(cs);
1380 CPUX86State *env = &cpu->env;
1382 address_space_stl_notdirty(cs->as, addr, val,
1383 cpu_get_mem_attrs(env),
1384 NULL);
1387 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1389 X86CPU *cpu = X86_CPU(cs);
1390 CPUX86State *env = &cpu->env;
1392 address_space_stw(cs->as, addr, val,
1393 cpu_get_mem_attrs(env),
1394 NULL);
1397 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1399 X86CPU *cpu = X86_CPU(cs);
1400 CPUX86State *env = &cpu->env;
1402 address_space_stl(cs->as, addr, val,
1403 cpu_get_mem_attrs(env),
1404 NULL);
1407 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1409 X86CPU *cpu = X86_CPU(cs);
1410 CPUX86State *env = &cpu->env;
1412 address_space_stq(cs->as, addr, val,
1413 cpu_get_mem_attrs(env),
1414 NULL);
1416 #endif