ARM: PL061: Checking register r/w accesses to reserved area
[qemu/ar7.git] / target-i386 / helper.c
blob3f60ec61222b1e0a93277920abd6d26dacb5c8a7
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "sysemu/kvm.h"
23 #include "kvm_i386.h"
24 #ifndef CONFIG_USER_ONLY
25 #include "sysemu/sysemu.h"
26 #include "monitor/monitor.h"
27 #include "hw/i386/apic_internal.h"
28 #endif
30 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
32 int cpuver = env->cpuid_version;
34 if (family == NULL || model == NULL) {
35 return;
38 *family = (cpuver >> 8) & 0x0f;
39 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
42 /* Broadcast MCA signal for processor version 06H_EH and above */
43 int cpu_x86_support_mca_broadcast(CPUX86State *env)
45 int family = 0;
46 int model = 0;
48 cpu_x86_version(env, &family, &model);
49 if ((family == 6 && model >= 14) || family > 6) {
50 return 1;
53 return 0;
56 /***********************************************************/
57 /* x86 debug */
59 static const char *cc_op_str[CC_OP_NB] = {
60 "DYNAMIC",
61 "EFLAGS",
63 "MULB",
64 "MULW",
65 "MULL",
66 "MULQ",
68 "ADDB",
69 "ADDW",
70 "ADDL",
71 "ADDQ",
73 "ADCB",
74 "ADCW",
75 "ADCL",
76 "ADCQ",
78 "SUBB",
79 "SUBW",
80 "SUBL",
81 "SUBQ",
83 "SBBB",
84 "SBBW",
85 "SBBL",
86 "SBBQ",
88 "LOGICB",
89 "LOGICW",
90 "LOGICL",
91 "LOGICQ",
93 "INCB",
94 "INCW",
95 "INCL",
96 "INCQ",
98 "DECB",
99 "DECW",
100 "DECL",
101 "DECQ",
103 "SHLB",
104 "SHLW",
105 "SHLL",
106 "SHLQ",
108 "SARB",
109 "SARW",
110 "SARL",
111 "SARQ",
113 "BMILGB",
114 "BMILGW",
115 "BMILGL",
116 "BMILGQ",
118 "ADCX",
119 "ADOX",
120 "ADCOX",
122 "CLR",
125 static void
126 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
127 const char *name, struct SegmentCache *sc)
129 #ifdef TARGET_X86_64
130 if (env->hflags & HF_CS64_MASK) {
131 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
132 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
133 } else
134 #endif
136 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
137 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
140 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
141 goto done;
143 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
144 if (sc->flags & DESC_S_MASK) {
145 if (sc->flags & DESC_CS_MASK) {
146 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
147 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
148 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
149 (sc->flags & DESC_R_MASK) ? 'R' : '-');
150 } else {
151 cpu_fprintf(f,
152 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
153 ? "DS " : "DS16");
154 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
155 (sc->flags & DESC_W_MASK) ? 'W' : '-');
157 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
158 } else {
159 static const char *sys_type_name[2][16] = {
160 { /* 32 bit mode */
161 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
162 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
163 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
164 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
166 { /* 64 bit mode */
167 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
168 "Reserved", "Reserved", "Reserved", "Reserved",
169 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
170 "Reserved", "IntGate64", "TrapGate64"
173 cpu_fprintf(f, "%s",
174 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
175 [(sc->flags & DESC_TYPE_MASK)
176 >> DESC_TYPE_SHIFT]);
178 done:
179 cpu_fprintf(f, "\n");
182 #ifndef CONFIG_USER_ONLY
184 /* ARRAY_SIZE check is not required because
185 * DeliveryMode(dm) has a size of 3 bit.
187 static inline const char *dm2str(uint32_t dm)
189 static const char *str[] = {
190 "Fixed",
191 "...",
192 "SMI",
193 "...",
194 "NMI",
195 "INIT",
196 "...",
197 "ExtINT"
199 return str[dm];
202 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
203 const char *name, uint32_t lvt, bool is_timer)
205 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
206 cpu_fprintf(f,
207 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
208 name, lvt,
209 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
210 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
211 lvt & APIC_LVT_MASKED ? "masked" : "",
212 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
213 !is_timer ?
214 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
215 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
216 "tsc-deadline" : "one-shot",
217 dm2str(dm));
218 if (dm != APIC_DM_NMI) {
219 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
220 } else {
221 cpu_fprintf(f, "\n");
225 /* ARRAY_SIZE check is not required because
226 * destination shorthand has a size of 2 bit.
228 static inline const char *shorthand2str(uint32_t shorthand)
230 const char *str[] = {
231 "no-shorthand", "self", "all-self", "all"
233 return str[shorthand];
236 static inline uint8_t divider_conf(uint32_t divide_conf)
238 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
240 return divide_val == 7 ? 1 : 2 << divide_val;
243 static inline void mask2str(char *str, uint32_t val, uint8_t size)
245 while (size--) {
246 *str++ = (val >> size) & 1 ? '1' : '0';
248 *str = 0;
251 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
253 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
254 APICCommonState *s, CPUX86State *env)
256 uint32_t icr = s->icr[0], icr2 = s->icr[1];
257 uint8_t dest_shorthand = \
258 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
259 bool logical_mod = icr & APIC_ICR_DEST_MOD;
260 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
261 uint32_t dest_field;
262 bool x2apic;
264 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
265 icr,
266 logical_mod ? "logical" : "physical",
267 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
268 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
269 shorthand2str(dest_shorthand));
271 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
272 if (dest_shorthand != 0) {
273 cpu_fprintf(f, "\n");
274 return;
276 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
277 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
279 if (!logical_mod) {
280 if (x2apic) {
281 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
282 } else {
283 cpu_fprintf(f, " cpu %u (APIC ID)\n",
284 dest_field & APIC_LOGDEST_XAPIC_ID);
286 return;
289 if (s->dest_mode == 0xf) { /* flat mode */
290 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
291 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
292 } else if (s->dest_mode == 0) { /* cluster mode */
293 if (x2apic) {
294 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
295 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
296 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
297 } else {
298 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
299 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
300 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
305 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
306 const char *name, uint32_t *ireg_tab,
307 uint32_t *tmr_tab)
309 int i, empty = true;
311 cpu_fprintf(f, "%s\t ", name);
312 for (i = 0; i < 256; i++) {
313 if (apic_get_bit(ireg_tab, i)) {
314 cpu_fprintf(f, "%u%s ", i,
315 apic_get_bit(tmr_tab, i) ? "(level)" : "");
316 empty = false;
319 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
322 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
323 fprintf_function cpu_fprintf, int flags)
325 X86CPU *cpu = X86_CPU(cs);
326 APICCommonState *s = APIC_COMMON(cpu->apic_state);
327 uint32_t *lvt = s->lvt;
329 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
330 CPU(cpu)->cpu_index);
331 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
332 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
333 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
334 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
335 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
336 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
338 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
339 s->divide_conf & APIC_DCR_MASK,
340 divider_conf(s->divide_conf),
341 s->initial_count);
343 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
344 s->spurious_vec,
345 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
346 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
347 s->spurious_vec & APIC_VECTOR_MASK);
349 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
351 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
353 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
354 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
356 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
357 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
358 if (s->dest_mode == 0) {
359 cpu_fprintf(f, "(cluster %u: id %u)",
360 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
361 s->log_dest & APIC_LOGDEST_XAPIC_ID);
363 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
365 #else
366 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
367 fprintf_function cpu_fprintf, int flags)
370 #endif /* !CONFIG_USER_ONLY */
372 #define DUMP_CODE_BYTES_TOTAL 50
373 #define DUMP_CODE_BYTES_BACKWARD 20
375 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
376 int flags)
378 X86CPU *cpu = X86_CPU(cs);
379 CPUX86State *env = &cpu->env;
380 int eflags, i, nb;
381 char cc_op_name[32];
382 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
384 eflags = cpu_compute_eflags(env);
385 #ifdef TARGET_X86_64
386 if (env->hflags & HF_CS64_MASK) {
387 cpu_fprintf(f,
388 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
389 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
390 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
391 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
392 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
393 env->regs[R_EAX],
394 env->regs[R_EBX],
395 env->regs[R_ECX],
396 env->regs[R_EDX],
397 env->regs[R_ESI],
398 env->regs[R_EDI],
399 env->regs[R_EBP],
400 env->regs[R_ESP],
401 env->regs[8],
402 env->regs[9],
403 env->regs[10],
404 env->regs[11],
405 env->regs[12],
406 env->regs[13],
407 env->regs[14],
408 env->regs[15],
409 env->eip, eflags,
410 eflags & DF_MASK ? 'D' : '-',
411 eflags & CC_O ? 'O' : '-',
412 eflags & CC_S ? 'S' : '-',
413 eflags & CC_Z ? 'Z' : '-',
414 eflags & CC_A ? 'A' : '-',
415 eflags & CC_P ? 'P' : '-',
416 eflags & CC_C ? 'C' : '-',
417 env->hflags & HF_CPL_MASK,
418 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
419 (env->a20_mask >> 20) & 1,
420 (env->hflags >> HF_SMM_SHIFT) & 1,
421 cs->halted);
422 } else
423 #endif
425 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
426 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
427 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
428 (uint32_t)env->regs[R_EAX],
429 (uint32_t)env->regs[R_EBX],
430 (uint32_t)env->regs[R_ECX],
431 (uint32_t)env->regs[R_EDX],
432 (uint32_t)env->regs[R_ESI],
433 (uint32_t)env->regs[R_EDI],
434 (uint32_t)env->regs[R_EBP],
435 (uint32_t)env->regs[R_ESP],
436 (uint32_t)env->eip, eflags,
437 eflags & DF_MASK ? 'D' : '-',
438 eflags & CC_O ? 'O' : '-',
439 eflags & CC_S ? 'S' : '-',
440 eflags & CC_Z ? 'Z' : '-',
441 eflags & CC_A ? 'A' : '-',
442 eflags & CC_P ? 'P' : '-',
443 eflags & CC_C ? 'C' : '-',
444 env->hflags & HF_CPL_MASK,
445 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
446 (env->a20_mask >> 20) & 1,
447 (env->hflags >> HF_SMM_SHIFT) & 1,
448 cs->halted);
451 for(i = 0; i < 6; i++) {
452 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
453 &env->segs[i]);
455 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
456 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
458 #ifdef TARGET_X86_64
459 if (env->hflags & HF_LMA_MASK) {
460 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
461 env->gdt.base, env->gdt.limit);
462 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
463 env->idt.base, env->idt.limit);
464 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
465 (uint32_t)env->cr[0],
466 env->cr[2],
467 env->cr[3],
468 (uint32_t)env->cr[4]);
469 for(i = 0; i < 4; i++)
470 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
471 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
472 env->dr[6], env->dr[7]);
473 } else
474 #endif
476 cpu_fprintf(f, "GDT= %08x %08x\n",
477 (uint32_t)env->gdt.base, env->gdt.limit);
478 cpu_fprintf(f, "IDT= %08x %08x\n",
479 (uint32_t)env->idt.base, env->idt.limit);
480 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
481 (uint32_t)env->cr[0],
482 (uint32_t)env->cr[2],
483 (uint32_t)env->cr[3],
484 (uint32_t)env->cr[4]);
485 for(i = 0; i < 4; i++) {
486 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
488 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
489 env->dr[6], env->dr[7]);
491 if (flags & CPU_DUMP_CCOP) {
492 if ((unsigned)env->cc_op < CC_OP_NB)
493 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
494 else
495 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
496 #ifdef TARGET_X86_64
497 if (env->hflags & HF_CS64_MASK) {
498 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
499 env->cc_src, env->cc_dst,
500 cc_op_name);
501 } else
502 #endif
504 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
505 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
506 cc_op_name);
509 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
510 if (flags & CPU_DUMP_FPU) {
511 int fptag;
512 fptag = 0;
513 for(i = 0; i < 8; i++) {
514 fptag |= ((!env->fptags[i]) << i);
516 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
517 env->fpuc,
518 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
519 env->fpstt,
520 fptag,
521 env->mxcsr);
522 for(i=0;i<8;i++) {
523 CPU_LDoubleU u;
524 u.d = env->fpregs[i].d;
525 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
526 i, u.l.lower, u.l.upper);
527 if ((i & 1) == 1)
528 cpu_fprintf(f, "\n");
529 else
530 cpu_fprintf(f, " ");
532 if (env->hflags & HF_CS64_MASK)
533 nb = 16;
534 else
535 nb = 8;
536 for(i=0;i<nb;i++) {
537 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
539 env->xmm_regs[i].ZMM_L(3),
540 env->xmm_regs[i].ZMM_L(2),
541 env->xmm_regs[i].ZMM_L(1),
542 env->xmm_regs[i].ZMM_L(0));
543 if ((i & 1) == 1)
544 cpu_fprintf(f, "\n");
545 else
546 cpu_fprintf(f, " ");
549 if (flags & CPU_DUMP_CODE) {
550 target_ulong base = env->segs[R_CS].base + env->eip;
551 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
552 uint8_t code;
553 char codestr[3];
555 cpu_fprintf(f, "Code=");
556 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
557 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
558 snprintf(codestr, sizeof(codestr), "%02x", code);
559 } else {
560 snprintf(codestr, sizeof(codestr), "??");
562 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
563 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
565 cpu_fprintf(f, "\n");
569 /***********************************************************/
570 /* x86 mmu */
571 /* XXX: add PGE support */
573 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
575 CPUX86State *env = &cpu->env;
577 a20_state = (a20_state != 0);
578 if (a20_state != ((env->a20_mask >> 20) & 1)) {
579 CPUState *cs = CPU(cpu);
581 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
582 /* if the cpu is currently executing code, we must unlink it and
583 all the potentially executing TB */
584 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
586 /* when a20 is changed, all the MMU mappings are invalid, so
587 we must flush everything */
588 tlb_flush(cs, 1);
589 env->a20_mask = ~(1 << 20) | (a20_state << 20);
593 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
595 X86CPU *cpu = x86_env_get_cpu(env);
596 int pe_state;
598 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
599 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
600 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
601 tlb_flush(CPU(cpu), 1);
604 #ifdef TARGET_X86_64
605 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
606 (env->efer & MSR_EFER_LME)) {
607 /* enter in long mode */
608 /* XXX: generate an exception */
609 if (!(env->cr[4] & CR4_PAE_MASK))
610 return;
611 env->efer |= MSR_EFER_LMA;
612 env->hflags |= HF_LMA_MASK;
613 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
614 (env->efer & MSR_EFER_LMA)) {
615 /* exit long mode */
616 env->efer &= ~MSR_EFER_LMA;
617 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
618 env->eip &= 0xffffffff;
620 #endif
621 env->cr[0] = new_cr0 | CR0_ET_MASK;
623 /* update PE flag in hidden flags */
624 pe_state = (env->cr[0] & CR0_PE_MASK);
625 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
626 /* ensure that ADDSEG is always set in real mode */
627 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
628 /* update FPU flags */
629 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
630 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
633 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
634 the PDPT */
635 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
637 X86CPU *cpu = x86_env_get_cpu(env);
639 env->cr[3] = new_cr3;
640 if (env->cr[0] & CR0_PG_MASK) {
641 qemu_log_mask(CPU_LOG_MMU,
642 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
643 tlb_flush(CPU(cpu), 0);
647 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
649 X86CPU *cpu = x86_env_get_cpu(env);
650 uint32_t hflags;
652 #if defined(DEBUG_MMU)
653 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
654 #endif
655 if ((new_cr4 ^ env->cr[4]) &
656 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
657 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
658 tlb_flush(CPU(cpu), 1);
661 /* Clear bits we're going to recompute. */
662 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
664 /* SSE handling */
665 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
666 new_cr4 &= ~CR4_OSFXSR_MASK;
668 if (new_cr4 & CR4_OSFXSR_MASK) {
669 hflags |= HF_OSFXSR_MASK;
672 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
673 new_cr4 &= ~CR4_SMAP_MASK;
675 if (new_cr4 & CR4_SMAP_MASK) {
676 hflags |= HF_SMAP_MASK;
679 env->cr[4] = new_cr4;
680 env->hflags = hflags;
682 cpu_sync_bndcs_hflags(env);
685 #if defined(CONFIG_USER_ONLY)
687 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
688 int is_write, int mmu_idx)
690 X86CPU *cpu = X86_CPU(cs);
691 CPUX86State *env = &cpu->env;
693 /* user mode only emulation */
694 is_write &= 1;
695 env->cr[2] = addr;
696 env->error_code = (is_write << PG_ERROR_W_BIT);
697 env->error_code |= PG_ERROR_U_MASK;
698 cs->exception_index = EXCP0E_PAGE;
699 return 1;
702 #else
704 /* return value:
705 * -1 = cannot handle fault
706 * 0 = nothing more to do
707 * 1 = generate PF fault
709 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
710 int is_write1, int mmu_idx)
712 X86CPU *cpu = X86_CPU(cs);
713 CPUX86State *env = &cpu->env;
714 uint64_t ptep, pte;
715 target_ulong pde_addr, pte_addr;
716 int error_code = 0;
717 int is_dirty, prot, page_size, is_write, is_user;
718 hwaddr paddr;
719 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
720 uint32_t page_offset;
721 target_ulong vaddr;
723 is_user = mmu_idx == MMU_USER_IDX;
724 #if defined(DEBUG_MMU)
725 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
726 addr, is_write1, is_user, env->eip);
727 #endif
728 is_write = is_write1 & 1;
730 if (!(env->cr[0] & CR0_PG_MASK)) {
731 pte = addr;
732 #ifdef TARGET_X86_64
733 if (!(env->hflags & HF_LMA_MASK)) {
734 /* Without long mode we can only address 32bits in real mode */
735 pte = (uint32_t)pte;
737 #endif
738 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
739 page_size = 4096;
740 goto do_mapping;
743 if (!(env->efer & MSR_EFER_NXE)) {
744 rsvd_mask |= PG_NX_MASK;
747 if (env->cr[4] & CR4_PAE_MASK) {
748 uint64_t pde, pdpe;
749 target_ulong pdpe_addr;
751 #ifdef TARGET_X86_64
752 if (env->hflags & HF_LMA_MASK) {
753 uint64_t pml4e_addr, pml4e;
754 int32_t sext;
756 /* test virtual address sign extension */
757 sext = (int64_t)addr >> 47;
758 if (sext != 0 && sext != -1) {
759 env->error_code = 0;
760 cs->exception_index = EXCP0D_GPF;
761 return 1;
764 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
765 env->a20_mask;
766 pml4e = x86_ldq_phys(cs, pml4e_addr);
767 if (!(pml4e & PG_PRESENT_MASK)) {
768 goto do_fault;
770 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
771 goto do_fault_rsvd;
773 if (!(pml4e & PG_ACCESSED_MASK)) {
774 pml4e |= PG_ACCESSED_MASK;
775 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
777 ptep = pml4e ^ PG_NX_MASK;
778 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
779 env->a20_mask;
780 pdpe = x86_ldq_phys(cs, pdpe_addr);
781 if (!(pdpe & PG_PRESENT_MASK)) {
782 goto do_fault;
784 if (pdpe & rsvd_mask) {
785 goto do_fault_rsvd;
787 ptep &= pdpe ^ PG_NX_MASK;
788 if (!(pdpe & PG_ACCESSED_MASK)) {
789 pdpe |= PG_ACCESSED_MASK;
790 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
792 if (pdpe & PG_PSE_MASK) {
793 /* 1 GB page */
794 page_size = 1024 * 1024 * 1024;
795 pte_addr = pdpe_addr;
796 pte = pdpe;
797 goto do_check_protect;
799 } else
800 #endif
802 /* XXX: load them when cr3 is loaded ? */
803 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
804 env->a20_mask;
805 pdpe = x86_ldq_phys(cs, pdpe_addr);
806 if (!(pdpe & PG_PRESENT_MASK)) {
807 goto do_fault;
809 rsvd_mask |= PG_HI_USER_MASK;
810 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
811 goto do_fault_rsvd;
813 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
816 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
817 env->a20_mask;
818 pde = x86_ldq_phys(cs, pde_addr);
819 if (!(pde & PG_PRESENT_MASK)) {
820 goto do_fault;
822 if (pde & rsvd_mask) {
823 goto do_fault_rsvd;
825 ptep &= pde ^ PG_NX_MASK;
826 if (pde & PG_PSE_MASK) {
827 /* 2 MB page */
828 page_size = 2048 * 1024;
829 pte_addr = pde_addr;
830 pte = pde;
831 goto do_check_protect;
833 /* 4 KB page */
834 if (!(pde & PG_ACCESSED_MASK)) {
835 pde |= PG_ACCESSED_MASK;
836 x86_stl_phys_notdirty(cs, pde_addr, pde);
838 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
839 env->a20_mask;
840 pte = x86_ldq_phys(cs, pte_addr);
841 if (!(pte & PG_PRESENT_MASK)) {
842 goto do_fault;
844 if (pte & rsvd_mask) {
845 goto do_fault_rsvd;
847 /* combine pde and pte nx, user and rw protections */
848 ptep &= pte ^ PG_NX_MASK;
849 page_size = 4096;
850 } else {
851 uint32_t pde;
853 /* page directory entry */
854 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
855 env->a20_mask;
856 pde = x86_ldl_phys(cs, pde_addr);
857 if (!(pde & PG_PRESENT_MASK)) {
858 goto do_fault;
860 ptep = pde | PG_NX_MASK;
862 /* if PSE bit is set, then we use a 4MB page */
863 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
864 page_size = 4096 * 1024;
865 pte_addr = pde_addr;
867 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
868 * Leave bits 20-13 in place for setting accessed/dirty bits below.
870 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
871 rsvd_mask = 0x200000;
872 goto do_check_protect_pse36;
875 if (!(pde & PG_ACCESSED_MASK)) {
876 pde |= PG_ACCESSED_MASK;
877 x86_stl_phys_notdirty(cs, pde_addr, pde);
880 /* page directory entry */
881 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
882 env->a20_mask;
883 pte = x86_ldl_phys(cs, pte_addr);
884 if (!(pte & PG_PRESENT_MASK)) {
885 goto do_fault;
887 /* combine pde and pte user and rw protections */
888 ptep &= pte | PG_NX_MASK;
889 page_size = 4096;
890 rsvd_mask = 0;
893 do_check_protect:
894 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
895 do_check_protect_pse36:
896 if (pte & rsvd_mask) {
897 goto do_fault_rsvd;
899 ptep ^= PG_NX_MASK;
901 /* can the page can be put in the TLB? prot will tell us */
902 if (is_user && !(ptep & PG_USER_MASK)) {
903 goto do_fault_protect;
906 prot = 0;
907 if (mmu_idx != MMU_KSMAP_IDX || !(ptep & PG_USER_MASK)) {
908 prot |= PAGE_READ;
909 if ((ptep & PG_RW_MASK) || (!is_user && !(env->cr[0] & CR0_WP_MASK))) {
910 prot |= PAGE_WRITE;
913 if (!(ptep & PG_NX_MASK) &&
914 (mmu_idx == MMU_USER_IDX ||
915 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
916 prot |= PAGE_EXEC;
919 if ((prot & (1 << is_write1)) == 0) {
920 goto do_fault_protect;
923 /* yes, it can! */
924 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
925 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
926 pte |= PG_ACCESSED_MASK;
927 if (is_dirty) {
928 pte |= PG_DIRTY_MASK;
930 x86_stl_phys_notdirty(cs, pte_addr, pte);
933 if (!(pte & PG_DIRTY_MASK)) {
934 /* only set write access if already dirty... otherwise wait
935 for dirty access */
936 assert(!is_write);
937 prot &= ~PAGE_WRITE;
940 do_mapping:
941 pte = pte & env->a20_mask;
943 /* align to page_size */
944 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
946 /* Even if 4MB pages, we map only one 4KB page in the cache to
947 avoid filling it too fast */
948 vaddr = addr & TARGET_PAGE_MASK;
949 page_offset = vaddr & (page_size - 1);
950 paddr = pte + page_offset;
952 assert(prot & (1 << is_write1));
953 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
954 prot, mmu_idx, page_size);
955 return 0;
956 do_fault_rsvd:
957 error_code |= PG_ERROR_RSVD_MASK;
958 do_fault_protect:
959 error_code |= PG_ERROR_P_MASK;
960 do_fault:
961 error_code |= (is_write << PG_ERROR_W_BIT);
962 if (is_user)
963 error_code |= PG_ERROR_U_MASK;
964 if (is_write1 == 2 &&
965 (((env->efer & MSR_EFER_NXE) &&
966 (env->cr[4] & CR4_PAE_MASK)) ||
967 (env->cr[4] & CR4_SMEP_MASK)))
968 error_code |= PG_ERROR_I_D_MASK;
969 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
970 /* cr2 is not modified in case of exceptions */
971 x86_stq_phys(cs,
972 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
973 addr);
974 } else {
975 env->cr[2] = addr;
977 env->error_code = error_code;
978 cs->exception_index = EXCP0E_PAGE;
979 return 1;
982 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
984 X86CPU *cpu = X86_CPU(cs);
985 CPUX86State *env = &cpu->env;
986 target_ulong pde_addr, pte_addr;
987 uint64_t pte;
988 uint32_t page_offset;
989 int page_size;
991 if (!(env->cr[0] & CR0_PG_MASK)) {
992 pte = addr & env->a20_mask;
993 page_size = 4096;
994 } else if (env->cr[4] & CR4_PAE_MASK) {
995 target_ulong pdpe_addr;
996 uint64_t pde, pdpe;
998 #ifdef TARGET_X86_64
999 if (env->hflags & HF_LMA_MASK) {
1000 uint64_t pml4e_addr, pml4e;
1001 int32_t sext;
1003 /* test virtual address sign extension */
1004 sext = (int64_t)addr >> 47;
1005 if (sext != 0 && sext != -1) {
1006 return -1;
1008 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1009 env->a20_mask;
1010 pml4e = x86_ldq_phys(cs, pml4e_addr);
1011 if (!(pml4e & PG_PRESENT_MASK)) {
1012 return -1;
1014 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
1015 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
1016 pdpe = x86_ldq_phys(cs, pdpe_addr);
1017 if (!(pdpe & PG_PRESENT_MASK)) {
1018 return -1;
1020 if (pdpe & PG_PSE_MASK) {
1021 page_size = 1024 * 1024 * 1024;
1022 pte = pdpe;
1023 goto out;
1026 } else
1027 #endif
1029 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1030 env->a20_mask;
1031 pdpe = x86_ldq_phys(cs, pdpe_addr);
1032 if (!(pdpe & PG_PRESENT_MASK))
1033 return -1;
1036 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
1037 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
1038 pde = x86_ldq_phys(cs, pde_addr);
1039 if (!(pde & PG_PRESENT_MASK)) {
1040 return -1;
1042 if (pde & PG_PSE_MASK) {
1043 /* 2 MB page */
1044 page_size = 2048 * 1024;
1045 pte = pde;
1046 } else {
1047 /* 4 KB page */
1048 pte_addr = ((pde & PG_ADDRESS_MASK) +
1049 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
1050 page_size = 4096;
1051 pte = x86_ldq_phys(cs, pte_addr);
1053 if (!(pte & PG_PRESENT_MASK)) {
1054 return -1;
1056 } else {
1057 uint32_t pde;
1059 /* page directory entry */
1060 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1061 pde = x86_ldl_phys(cs, pde_addr);
1062 if (!(pde & PG_PRESENT_MASK))
1063 return -1;
1064 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1065 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
1066 page_size = 4096 * 1024;
1067 } else {
1068 /* page directory entry */
1069 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1070 pte = x86_ldl_phys(cs, pte_addr);
1071 if (!(pte & PG_PRESENT_MASK)) {
1072 return -1;
1074 page_size = 4096;
1076 pte = pte & env->a20_mask;
1079 #ifdef TARGET_X86_64
1080 out:
1081 #endif
1082 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
1083 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1084 return pte | page_offset;
1087 typedef struct MCEInjectionParams {
1088 Monitor *mon;
1089 X86CPU *cpu;
1090 int bank;
1091 uint64_t status;
1092 uint64_t mcg_status;
1093 uint64_t addr;
1094 uint64_t misc;
1095 int flags;
1096 } MCEInjectionParams;
1098 static void do_inject_x86_mce(void *data)
1100 MCEInjectionParams *params = data;
1101 CPUX86State *cenv = &params->cpu->env;
1102 CPUState *cpu = CPU(params->cpu);
1103 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1105 cpu_synchronize_state(cpu);
1108 * If there is an MCE exception being processed, ignore this SRAO MCE
1109 * unless unconditional injection was requested.
1111 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1112 && !(params->status & MCI_STATUS_AR)
1113 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1114 return;
1117 if (params->status & MCI_STATUS_UC) {
1119 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1120 * reporting is disabled
1122 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1123 monitor_printf(params->mon,
1124 "CPU %d: Uncorrected error reporting disabled\n",
1125 cpu->cpu_index);
1126 return;
1130 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1131 * reporting is disabled for the bank
1133 if (banks[0] != ~(uint64_t)0) {
1134 monitor_printf(params->mon,
1135 "CPU %d: Uncorrected error reporting disabled for"
1136 " bank %d\n",
1137 cpu->cpu_index, params->bank);
1138 return;
1141 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1142 !(cenv->cr[4] & CR4_MCE_MASK)) {
1143 monitor_printf(params->mon,
1144 "CPU %d: Previous MCE still in progress, raising"
1145 " triple fault\n",
1146 cpu->cpu_index);
1147 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1148 qemu_system_reset_request();
1149 return;
1151 if (banks[1] & MCI_STATUS_VAL) {
1152 params->status |= MCI_STATUS_OVER;
1154 banks[2] = params->addr;
1155 banks[3] = params->misc;
1156 cenv->mcg_status = params->mcg_status;
1157 banks[1] = params->status;
1158 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1159 } else if (!(banks[1] & MCI_STATUS_VAL)
1160 || !(banks[1] & MCI_STATUS_UC)) {
1161 if (banks[1] & MCI_STATUS_VAL) {
1162 params->status |= MCI_STATUS_OVER;
1164 banks[2] = params->addr;
1165 banks[3] = params->misc;
1166 banks[1] = params->status;
1167 } else {
1168 banks[1] |= MCI_STATUS_OVER;
1172 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1173 uint64_t status, uint64_t mcg_status, uint64_t addr,
1174 uint64_t misc, int flags)
1176 CPUState *cs = CPU(cpu);
1177 CPUX86State *cenv = &cpu->env;
1178 MCEInjectionParams params = {
1179 .mon = mon,
1180 .cpu = cpu,
1181 .bank = bank,
1182 .status = status,
1183 .mcg_status = mcg_status,
1184 .addr = addr,
1185 .misc = misc,
1186 .flags = flags,
1188 unsigned bank_num = cenv->mcg_cap & 0xff;
1190 if (!cenv->mcg_cap) {
1191 monitor_printf(mon, "MCE injection not supported\n");
1192 return;
1194 if (bank >= bank_num) {
1195 monitor_printf(mon, "Invalid MCE bank number\n");
1196 return;
1198 if (!(status & MCI_STATUS_VAL)) {
1199 monitor_printf(mon, "Invalid MCE status code\n");
1200 return;
1202 if ((flags & MCE_INJECT_BROADCAST)
1203 && !cpu_x86_support_mca_broadcast(cenv)) {
1204 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1205 return;
1208 run_on_cpu(cs, do_inject_x86_mce, &params);
1209 if (flags & MCE_INJECT_BROADCAST) {
1210 CPUState *other_cs;
1212 params.bank = 1;
1213 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1214 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1215 params.addr = 0;
1216 params.misc = 0;
1217 CPU_FOREACH(other_cs) {
1218 if (other_cs == cs) {
1219 continue;
1221 params.cpu = X86_CPU(other_cs);
1222 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1227 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1229 X86CPU *cpu = x86_env_get_cpu(env);
1230 CPUState *cs = CPU(cpu);
1232 if (kvm_enabled()) {
1233 env->tpr_access_type = access;
1235 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1236 } else {
1237 cpu_restore_state(cs, cs->mem_io_pc);
1239 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1242 #endif /* !CONFIG_USER_ONLY */
1244 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1245 target_ulong *base, unsigned int *limit,
1246 unsigned int *flags)
1248 X86CPU *cpu = x86_env_get_cpu(env);
1249 CPUState *cs = CPU(cpu);
1250 SegmentCache *dt;
1251 target_ulong ptr;
1252 uint32_t e1, e2;
1253 int index;
1255 if (selector & 0x4)
1256 dt = &env->ldt;
1257 else
1258 dt = &env->gdt;
1259 index = selector & ~7;
1260 ptr = dt->base + index;
1261 if ((index + 7) > dt->limit
1262 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1263 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1264 return 0;
1266 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1267 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1268 if (e2 & DESC_G_MASK)
1269 *limit = (*limit << 12) | 0xfff;
1270 *flags = e2;
1272 return 1;
1275 #if !defined(CONFIG_USER_ONLY)
1276 void do_cpu_init(X86CPU *cpu)
1278 CPUState *cs = CPU(cpu);
1279 CPUX86State *env = &cpu->env;
1280 CPUX86State *save = g_new(CPUX86State, 1);
1281 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1283 *save = *env;
1285 cpu_reset(cs);
1286 cs->interrupt_request = sipi;
1287 memcpy(&env->start_init_save, &save->start_init_save,
1288 offsetof(CPUX86State, end_init_save) -
1289 offsetof(CPUX86State, start_init_save));
1290 g_free(save);
1292 if (kvm_enabled()) {
1293 kvm_arch_do_init_vcpu(cpu);
1295 apic_init_reset(cpu->apic_state);
1298 void do_cpu_sipi(X86CPU *cpu)
1300 apic_sipi(cpu->apic_state);
1302 #else
1303 void do_cpu_init(X86CPU *cpu)
1306 void do_cpu_sipi(X86CPU *cpu)
1309 #endif
1311 /* Frob eflags into and out of the CPU temporary format. */
1313 void x86_cpu_exec_enter(CPUState *cs)
1315 X86CPU *cpu = X86_CPU(cs);
1316 CPUX86State *env = &cpu->env;
1318 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1319 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1320 CC_OP = CC_OP_EFLAGS;
1321 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1324 void x86_cpu_exec_exit(CPUState *cs)
1326 X86CPU *cpu = X86_CPU(cs);
1327 CPUX86State *env = &cpu->env;
1329 env->eflags = cpu_compute_eflags(env);
1332 #ifndef CONFIG_USER_ONLY
1333 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1335 X86CPU *cpu = X86_CPU(cs);
1336 CPUX86State *env = &cpu->env;
1338 return address_space_ldub(cs->as, addr,
1339 cpu_get_mem_attrs(env),
1340 NULL);
1343 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1345 X86CPU *cpu = X86_CPU(cs);
1346 CPUX86State *env = &cpu->env;
1348 return address_space_lduw(cs->as, addr,
1349 cpu_get_mem_attrs(env),
1350 NULL);
1353 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1355 X86CPU *cpu = X86_CPU(cs);
1356 CPUX86State *env = &cpu->env;
1358 return address_space_ldl(cs->as, addr,
1359 cpu_get_mem_attrs(env),
1360 NULL);
1363 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1365 X86CPU *cpu = X86_CPU(cs);
1366 CPUX86State *env = &cpu->env;
1368 return address_space_ldq(cs->as, addr,
1369 cpu_get_mem_attrs(env),
1370 NULL);
1373 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1375 X86CPU *cpu = X86_CPU(cs);
1376 CPUX86State *env = &cpu->env;
1378 address_space_stb(cs->as, addr, val,
1379 cpu_get_mem_attrs(env),
1380 NULL);
1383 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1385 X86CPU *cpu = X86_CPU(cs);
1386 CPUX86State *env = &cpu->env;
1388 address_space_stl_notdirty(cs->as, addr, val,
1389 cpu_get_mem_attrs(env),
1390 NULL);
1393 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1395 X86CPU *cpu = X86_CPU(cs);
1396 CPUX86State *env = &cpu->env;
1398 address_space_stw(cs->as, addr, val,
1399 cpu_get_mem_attrs(env),
1400 NULL);
1403 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1405 X86CPU *cpu = X86_CPU(cs);
1406 CPUX86State *env = &cpu->env;
1408 address_space_stl(cs->as, addr, val,
1409 cpu_get_mem_attrs(env),
1410 NULL);
1413 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1415 X86CPU *cpu = X86_CPU(cs);
1416 CPUX86State *env = &cpu->env;
1418 address_space_stq(cs->as, addr, val,
1419 cpu_get_mem_attrs(env),
1420 NULL);
1422 #endif