ui: avoid risk of 32-bit int overflow in VNC buffer check
[qemu/ar7.git] / target / i386 / helper.c
blob9fba146b7fb0edad2a947b9bcdedc2f0f5a127d9
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "sysemu/kvm.h"
24 #include "kvm_i386.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/sysemu.h"
27 #include "sysemu/hw_accel.h"
28 #include "monitor/monitor.h"
29 #include "hw/i386/apic_internal.h"
30 #endif
32 void cpu_sync_bndcs_hflags(CPUX86State *env)
34 uint32_t hflags = env->hflags;
35 uint32_t hflags2 = env->hflags2;
36 uint32_t bndcsr;
38 if ((hflags & HF_CPL_MASK) == 3) {
39 bndcsr = env->bndcs_regs.cfgu;
40 } else {
41 bndcsr = env->msr_bndcfgs;
44 if ((env->cr[4] & CR4_OSXSAVE_MASK)
45 && (env->xcr0 & XSTATE_BNDCSR_MASK)
46 && (bndcsr & BNDCFG_ENABLE)) {
47 hflags |= HF_MPX_EN_MASK;
48 } else {
49 hflags &= ~HF_MPX_EN_MASK;
52 if (bndcsr & BNDCFG_BNDPRESERVE) {
53 hflags2 |= HF2_MPX_PR_MASK;
54 } else {
55 hflags2 &= ~HF2_MPX_PR_MASK;
58 env->hflags = hflags;
59 env->hflags2 = hflags2;
62 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
64 int cpuver = env->cpuid_version;
66 if (family == NULL || model == NULL) {
67 return;
70 *family = (cpuver >> 8) & 0x0f;
71 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
74 /* Broadcast MCA signal for processor version 06H_EH and above */
75 int cpu_x86_support_mca_broadcast(CPUX86State *env)
77 int family = 0;
78 int model = 0;
80 cpu_x86_version(env, &family, &model);
81 if ((family == 6 && model >= 14) || family > 6) {
82 return 1;
85 return 0;
88 /***********************************************************/
89 /* x86 debug */
91 static const char *cc_op_str[CC_OP_NB] = {
92 "DYNAMIC",
93 "EFLAGS",
95 "MULB",
96 "MULW",
97 "MULL",
98 "MULQ",
100 "ADDB",
101 "ADDW",
102 "ADDL",
103 "ADDQ",
105 "ADCB",
106 "ADCW",
107 "ADCL",
108 "ADCQ",
110 "SUBB",
111 "SUBW",
112 "SUBL",
113 "SUBQ",
115 "SBBB",
116 "SBBW",
117 "SBBL",
118 "SBBQ",
120 "LOGICB",
121 "LOGICW",
122 "LOGICL",
123 "LOGICQ",
125 "INCB",
126 "INCW",
127 "INCL",
128 "INCQ",
130 "DECB",
131 "DECW",
132 "DECL",
133 "DECQ",
135 "SHLB",
136 "SHLW",
137 "SHLL",
138 "SHLQ",
140 "SARB",
141 "SARW",
142 "SARL",
143 "SARQ",
145 "BMILGB",
146 "BMILGW",
147 "BMILGL",
148 "BMILGQ",
150 "ADCX",
151 "ADOX",
152 "ADCOX",
154 "CLR",
157 static void
158 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
159 const char *name, struct SegmentCache *sc)
161 #ifdef TARGET_X86_64
162 if (env->hflags & HF_CS64_MASK) {
163 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
164 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
165 } else
166 #endif
168 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
169 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
172 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
173 goto done;
175 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
176 if (sc->flags & DESC_S_MASK) {
177 if (sc->flags & DESC_CS_MASK) {
178 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
179 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
180 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
181 (sc->flags & DESC_R_MASK) ? 'R' : '-');
182 } else {
183 cpu_fprintf(f,
184 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
185 ? "DS " : "DS16");
186 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
187 (sc->flags & DESC_W_MASK) ? 'W' : '-');
189 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
190 } else {
191 static const char *sys_type_name[2][16] = {
192 { /* 32 bit mode */
193 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
194 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
195 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
196 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
198 { /* 64 bit mode */
199 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
200 "Reserved", "Reserved", "Reserved", "Reserved",
201 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
202 "Reserved", "IntGate64", "TrapGate64"
205 cpu_fprintf(f, "%s",
206 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
207 [(sc->flags & DESC_TYPE_MASK)
208 >> DESC_TYPE_SHIFT]);
210 done:
211 cpu_fprintf(f, "\n");
214 #ifndef CONFIG_USER_ONLY
216 /* ARRAY_SIZE check is not required because
217 * DeliveryMode(dm) has a size of 3 bit.
219 static inline const char *dm2str(uint32_t dm)
221 static const char *str[] = {
222 "Fixed",
223 "...",
224 "SMI",
225 "...",
226 "NMI",
227 "INIT",
228 "...",
229 "ExtINT"
231 return str[dm];
234 static void dump_apic_lvt(FILE *f, fprintf_function cpu_fprintf,
235 const char *name, uint32_t lvt, bool is_timer)
237 uint32_t dm = (lvt & APIC_LVT_DELIV_MOD) >> APIC_LVT_DELIV_MOD_SHIFT;
238 cpu_fprintf(f,
239 "%s\t 0x%08x %s %-5s %-6s %-7s %-12s %-6s",
240 name, lvt,
241 lvt & APIC_LVT_INT_POLARITY ? "active-lo" : "active-hi",
242 lvt & APIC_LVT_LEVEL_TRIGGER ? "level" : "edge",
243 lvt & APIC_LVT_MASKED ? "masked" : "",
244 lvt & APIC_LVT_DELIV_STS ? "pending" : "",
245 !is_timer ?
246 "" : lvt & APIC_LVT_TIMER_PERIODIC ?
247 "periodic" : lvt & APIC_LVT_TIMER_TSCDEADLINE ?
248 "tsc-deadline" : "one-shot",
249 dm2str(dm));
250 if (dm != APIC_DM_NMI) {
251 cpu_fprintf(f, " (vec %u)\n", lvt & APIC_VECTOR_MASK);
252 } else {
253 cpu_fprintf(f, "\n");
257 /* ARRAY_SIZE check is not required because
258 * destination shorthand has a size of 2 bit.
260 static inline const char *shorthand2str(uint32_t shorthand)
262 const char *str[] = {
263 "no-shorthand", "self", "all-self", "all"
265 return str[shorthand];
268 static inline uint8_t divider_conf(uint32_t divide_conf)
270 uint8_t divide_val = ((divide_conf & 0x8) >> 1) | (divide_conf & 0x3);
272 return divide_val == 7 ? 1 : 2 << divide_val;
275 static inline void mask2str(char *str, uint32_t val, uint8_t size)
277 while (size--) {
278 *str++ = (val >> size) & 1 ? '1' : '0';
280 *str = 0;
283 #define MAX_LOGICAL_APIC_ID_MASK_SIZE 16
285 static void dump_apic_icr(FILE *f, fprintf_function cpu_fprintf,
286 APICCommonState *s, CPUX86State *env)
288 uint32_t icr = s->icr[0], icr2 = s->icr[1];
289 uint8_t dest_shorthand = \
290 (icr & APIC_ICR_DEST_SHORT) >> APIC_ICR_DEST_SHORT_SHIFT;
291 bool logical_mod = icr & APIC_ICR_DEST_MOD;
292 char apic_id_str[MAX_LOGICAL_APIC_ID_MASK_SIZE + 1];
293 uint32_t dest_field;
294 bool x2apic;
296 cpu_fprintf(f, "ICR\t 0x%08x %s %s %s %s\n",
297 icr,
298 logical_mod ? "logical" : "physical",
299 icr & APIC_ICR_TRIGGER_MOD ? "level" : "edge",
300 icr & APIC_ICR_LEVEL ? "assert" : "de-assert",
301 shorthand2str(dest_shorthand));
303 cpu_fprintf(f, "ICR2\t 0x%08x", icr2);
304 if (dest_shorthand != 0) {
305 cpu_fprintf(f, "\n");
306 return;
308 x2apic = env->features[FEAT_1_ECX] & CPUID_EXT_X2APIC;
309 dest_field = x2apic ? icr2 : icr2 >> APIC_ICR_DEST_SHIFT;
311 if (!logical_mod) {
312 if (x2apic) {
313 cpu_fprintf(f, " cpu %u (X2APIC ID)\n", dest_field);
314 } else {
315 cpu_fprintf(f, " cpu %u (APIC ID)\n",
316 dest_field & APIC_LOGDEST_XAPIC_ID);
318 return;
321 if (s->dest_mode == 0xf) { /* flat mode */
322 mask2str(apic_id_str, icr2 >> APIC_ICR_DEST_SHIFT, 8);
323 cpu_fprintf(f, " mask %s (APIC ID)\n", apic_id_str);
324 } else if (s->dest_mode == 0) { /* cluster mode */
325 if (x2apic) {
326 mask2str(apic_id_str, dest_field & APIC_LOGDEST_X2APIC_ID, 16);
327 cpu_fprintf(f, " cluster %u mask %s (X2APIC ID)\n",
328 dest_field >> APIC_LOGDEST_X2APIC_SHIFT, apic_id_str);
329 } else {
330 mask2str(apic_id_str, dest_field & APIC_LOGDEST_XAPIC_ID, 4);
331 cpu_fprintf(f, " cluster %u mask %s (APIC ID)\n",
332 dest_field >> APIC_LOGDEST_XAPIC_SHIFT, apic_id_str);
337 static void dump_apic_interrupt(FILE *f, fprintf_function cpu_fprintf,
338 const char *name, uint32_t *ireg_tab,
339 uint32_t *tmr_tab)
341 int i, empty = true;
343 cpu_fprintf(f, "%s\t ", name);
344 for (i = 0; i < 256; i++) {
345 if (apic_get_bit(ireg_tab, i)) {
346 cpu_fprintf(f, "%u%s ", i,
347 apic_get_bit(tmr_tab, i) ? "(level)" : "");
348 empty = false;
351 cpu_fprintf(f, "%s\n", empty ? "(none)" : "");
354 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
355 fprintf_function cpu_fprintf, int flags)
357 X86CPU *cpu = X86_CPU(cs);
358 APICCommonState *s = APIC_COMMON(cpu->apic_state);
359 if (!s) {
360 cpu_fprintf(f, "local apic state not available\n");
361 return;
363 uint32_t *lvt = s->lvt;
365 cpu_fprintf(f, "dumping local APIC state for CPU %-2u\n\n",
366 CPU(cpu)->cpu_index);
367 dump_apic_lvt(f, cpu_fprintf, "LVT0", lvt[APIC_LVT_LINT0], false);
368 dump_apic_lvt(f, cpu_fprintf, "LVT1", lvt[APIC_LVT_LINT1], false);
369 dump_apic_lvt(f, cpu_fprintf, "LVTPC", lvt[APIC_LVT_PERFORM], false);
370 dump_apic_lvt(f, cpu_fprintf, "LVTERR", lvt[APIC_LVT_ERROR], false);
371 dump_apic_lvt(f, cpu_fprintf, "LVTTHMR", lvt[APIC_LVT_THERMAL], false);
372 dump_apic_lvt(f, cpu_fprintf, "LVTT", lvt[APIC_LVT_TIMER], true);
374 cpu_fprintf(f, "Timer\t DCR=0x%x (divide by %u) initial_count = %u\n",
375 s->divide_conf & APIC_DCR_MASK,
376 divider_conf(s->divide_conf),
377 s->initial_count);
379 cpu_fprintf(f, "SPIV\t 0x%08x APIC %s, focus=%s, spurious vec %u\n",
380 s->spurious_vec,
381 s->spurious_vec & APIC_SPURIO_ENABLED ? "enabled" : "disabled",
382 s->spurious_vec & APIC_SPURIO_FOCUS ? "on" : "off",
383 s->spurious_vec & APIC_VECTOR_MASK);
385 dump_apic_icr(f, cpu_fprintf, s, &cpu->env);
387 cpu_fprintf(f, "ESR\t 0x%08x\n", s->esr);
389 dump_apic_interrupt(f, cpu_fprintf, "ISR", s->isr, s->tmr);
390 dump_apic_interrupt(f, cpu_fprintf, "IRR", s->irr, s->tmr);
392 cpu_fprintf(f, "\nAPR 0x%02x TPR 0x%02x DFR 0x%02x LDR 0x%02x",
393 s->arb_id, s->tpr, s->dest_mode, s->log_dest);
394 if (s->dest_mode == 0) {
395 cpu_fprintf(f, "(cluster %u: id %u)",
396 s->log_dest >> APIC_LOGDEST_XAPIC_SHIFT,
397 s->log_dest & APIC_LOGDEST_XAPIC_ID);
399 cpu_fprintf(f, " PPR 0x%02x\n", apic_get_ppr(s));
401 #else
402 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
403 fprintf_function cpu_fprintf, int flags)
406 #endif /* !CONFIG_USER_ONLY */
408 #define DUMP_CODE_BYTES_TOTAL 50
409 #define DUMP_CODE_BYTES_BACKWARD 20
411 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
412 int flags)
414 X86CPU *cpu = X86_CPU(cs);
415 CPUX86State *env = &cpu->env;
416 int eflags, i, nb;
417 char cc_op_name[32];
418 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
420 eflags = cpu_compute_eflags(env);
421 #ifdef TARGET_X86_64
422 if (env->hflags & HF_CS64_MASK) {
423 cpu_fprintf(f,
424 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
425 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
426 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
427 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
428 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
429 env->regs[R_EAX],
430 env->regs[R_EBX],
431 env->regs[R_ECX],
432 env->regs[R_EDX],
433 env->regs[R_ESI],
434 env->regs[R_EDI],
435 env->regs[R_EBP],
436 env->regs[R_ESP],
437 env->regs[8],
438 env->regs[9],
439 env->regs[10],
440 env->regs[11],
441 env->regs[12],
442 env->regs[13],
443 env->regs[14],
444 env->regs[15],
445 env->eip, eflags,
446 eflags & DF_MASK ? 'D' : '-',
447 eflags & CC_O ? 'O' : '-',
448 eflags & CC_S ? 'S' : '-',
449 eflags & CC_Z ? 'Z' : '-',
450 eflags & CC_A ? 'A' : '-',
451 eflags & CC_P ? 'P' : '-',
452 eflags & CC_C ? 'C' : '-',
453 env->hflags & HF_CPL_MASK,
454 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
455 (env->a20_mask >> 20) & 1,
456 (env->hflags >> HF_SMM_SHIFT) & 1,
457 cs->halted);
458 } else
459 #endif
461 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
462 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
463 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
464 (uint32_t)env->regs[R_EAX],
465 (uint32_t)env->regs[R_EBX],
466 (uint32_t)env->regs[R_ECX],
467 (uint32_t)env->regs[R_EDX],
468 (uint32_t)env->regs[R_ESI],
469 (uint32_t)env->regs[R_EDI],
470 (uint32_t)env->regs[R_EBP],
471 (uint32_t)env->regs[R_ESP],
472 (uint32_t)env->eip, eflags,
473 eflags & DF_MASK ? 'D' : '-',
474 eflags & CC_O ? 'O' : '-',
475 eflags & CC_S ? 'S' : '-',
476 eflags & CC_Z ? 'Z' : '-',
477 eflags & CC_A ? 'A' : '-',
478 eflags & CC_P ? 'P' : '-',
479 eflags & CC_C ? 'C' : '-',
480 env->hflags & HF_CPL_MASK,
481 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
482 (env->a20_mask >> 20) & 1,
483 (env->hflags >> HF_SMM_SHIFT) & 1,
484 cs->halted);
487 for(i = 0; i < 6; i++) {
488 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
489 &env->segs[i]);
491 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
492 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
494 #ifdef TARGET_X86_64
495 if (env->hflags & HF_LMA_MASK) {
496 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
497 env->gdt.base, env->gdt.limit);
498 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
499 env->idt.base, env->idt.limit);
500 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
501 (uint32_t)env->cr[0],
502 env->cr[2],
503 env->cr[3],
504 (uint32_t)env->cr[4]);
505 for(i = 0; i < 4; i++)
506 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
507 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
508 env->dr[6], env->dr[7]);
509 } else
510 #endif
512 cpu_fprintf(f, "GDT= %08x %08x\n",
513 (uint32_t)env->gdt.base, env->gdt.limit);
514 cpu_fprintf(f, "IDT= %08x %08x\n",
515 (uint32_t)env->idt.base, env->idt.limit);
516 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
517 (uint32_t)env->cr[0],
518 (uint32_t)env->cr[2],
519 (uint32_t)env->cr[3],
520 (uint32_t)env->cr[4]);
521 for(i = 0; i < 4; i++) {
522 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
524 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
525 env->dr[6], env->dr[7]);
527 if (flags & CPU_DUMP_CCOP) {
528 if ((unsigned)env->cc_op < CC_OP_NB)
529 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
530 else
531 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
532 #ifdef TARGET_X86_64
533 if (env->hflags & HF_CS64_MASK) {
534 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
535 env->cc_src, env->cc_dst,
536 cc_op_name);
537 } else
538 #endif
540 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
541 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
542 cc_op_name);
545 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
546 if (flags & CPU_DUMP_FPU) {
547 int fptag;
548 fptag = 0;
549 for(i = 0; i < 8; i++) {
550 fptag |= ((!env->fptags[i]) << i);
552 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
553 env->fpuc,
554 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
555 env->fpstt,
556 fptag,
557 env->mxcsr);
558 for(i=0;i<8;i++) {
559 CPU_LDoubleU u;
560 u.d = env->fpregs[i].d;
561 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
562 i, u.l.lower, u.l.upper);
563 if ((i & 1) == 1)
564 cpu_fprintf(f, "\n");
565 else
566 cpu_fprintf(f, " ");
568 if (env->hflags & HF_CS64_MASK)
569 nb = 16;
570 else
571 nb = 8;
572 for(i=0;i<nb;i++) {
573 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
575 env->xmm_regs[i].ZMM_L(3),
576 env->xmm_regs[i].ZMM_L(2),
577 env->xmm_regs[i].ZMM_L(1),
578 env->xmm_regs[i].ZMM_L(0));
579 if ((i & 1) == 1)
580 cpu_fprintf(f, "\n");
581 else
582 cpu_fprintf(f, " ");
585 if (flags & CPU_DUMP_CODE) {
586 target_ulong base = env->segs[R_CS].base + env->eip;
587 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
588 uint8_t code;
589 char codestr[3];
591 cpu_fprintf(f, "Code=");
592 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
593 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
594 snprintf(codestr, sizeof(codestr), "%02x", code);
595 } else {
596 snprintf(codestr, sizeof(codestr), "??");
598 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
599 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
601 cpu_fprintf(f, "\n");
605 /***********************************************************/
606 /* x86 mmu */
607 /* XXX: add PGE support */
609 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
611 CPUX86State *env = &cpu->env;
613 a20_state = (a20_state != 0);
614 if (a20_state != ((env->a20_mask >> 20) & 1)) {
615 CPUState *cs = CPU(cpu);
617 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
618 /* if the cpu is currently executing code, we must unlink it and
619 all the potentially executing TB */
620 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
622 /* when a20 is changed, all the MMU mappings are invalid, so
623 we must flush everything */
624 tlb_flush(cs);
625 env->a20_mask = ~(1 << 20) | (a20_state << 20);
629 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
631 X86CPU *cpu = x86_env_get_cpu(env);
632 int pe_state;
634 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
635 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
636 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
637 tlb_flush(CPU(cpu));
640 #ifdef TARGET_X86_64
641 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
642 (env->efer & MSR_EFER_LME)) {
643 /* enter in long mode */
644 /* XXX: generate an exception */
645 if (!(env->cr[4] & CR4_PAE_MASK))
646 return;
647 env->efer |= MSR_EFER_LMA;
648 env->hflags |= HF_LMA_MASK;
649 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
650 (env->efer & MSR_EFER_LMA)) {
651 /* exit long mode */
652 env->efer &= ~MSR_EFER_LMA;
653 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
654 env->eip &= 0xffffffff;
656 #endif
657 env->cr[0] = new_cr0 | CR0_ET_MASK;
659 /* update PE flag in hidden flags */
660 pe_state = (env->cr[0] & CR0_PE_MASK);
661 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
662 /* ensure that ADDSEG is always set in real mode */
663 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
664 /* update FPU flags */
665 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
666 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
669 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
670 the PDPT */
671 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
673 X86CPU *cpu = x86_env_get_cpu(env);
675 env->cr[3] = new_cr3;
676 if (env->cr[0] & CR0_PG_MASK) {
677 qemu_log_mask(CPU_LOG_MMU,
678 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
679 tlb_flush(CPU(cpu));
683 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
685 X86CPU *cpu = x86_env_get_cpu(env);
686 uint32_t hflags;
688 #if defined(DEBUG_MMU)
689 printf("CR4 update: %08x -> %08x\n", (uint32_t)env->cr[4], new_cr4);
690 #endif
691 if ((new_cr4 ^ env->cr[4]) &
692 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
693 CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_LA57_MASK)) {
694 tlb_flush(CPU(cpu));
697 /* Clear bits we're going to recompute. */
698 hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
700 /* SSE handling */
701 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
702 new_cr4 &= ~CR4_OSFXSR_MASK;
704 if (new_cr4 & CR4_OSFXSR_MASK) {
705 hflags |= HF_OSFXSR_MASK;
708 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
709 new_cr4 &= ~CR4_SMAP_MASK;
711 if (new_cr4 & CR4_SMAP_MASK) {
712 hflags |= HF_SMAP_MASK;
715 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
716 new_cr4 &= ~CR4_PKE_MASK;
719 env->cr[4] = new_cr4;
720 env->hflags = hflags;
722 cpu_sync_bndcs_hflags(env);
725 #if !defined(CONFIG_USER_ONLY)
726 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
728 X86CPU *cpu = X86_CPU(cs);
729 CPUX86State *env = &cpu->env;
730 target_ulong pde_addr, pte_addr;
731 uint64_t pte;
732 int32_t a20_mask;
733 uint32_t page_offset;
734 int page_size;
736 a20_mask = x86_get_a20_mask(env);
737 if (!(env->cr[0] & CR0_PG_MASK)) {
738 pte = addr & a20_mask;
739 page_size = 4096;
740 } else if (env->cr[4] & CR4_PAE_MASK) {
741 target_ulong pdpe_addr;
742 uint64_t pde, pdpe;
744 #ifdef TARGET_X86_64
745 if (env->hflags & HF_LMA_MASK) {
746 bool la57 = env->cr[4] & CR4_LA57_MASK;
747 uint64_t pml5e_addr, pml5e;
748 uint64_t pml4e_addr, pml4e;
749 int32_t sext;
751 /* test virtual address sign extension */
752 sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47;
753 if (sext != 0 && sext != -1) {
754 return -1;
757 if (la57) {
758 pml5e_addr = ((env->cr[3] & ~0xfff) +
759 (((addr >> 48) & 0x1ff) << 3)) & a20_mask;
760 pml5e = x86_ldq_phys(cs, pml5e_addr);
761 if (!(pml5e & PG_PRESENT_MASK)) {
762 return -1;
764 } else {
765 pml5e = env->cr[3];
768 pml4e_addr = ((pml5e & PG_ADDRESS_MASK) +
769 (((addr >> 39) & 0x1ff) << 3)) & a20_mask;
770 pml4e = x86_ldq_phys(cs, pml4e_addr);
771 if (!(pml4e & PG_PRESENT_MASK)) {
772 return -1;
774 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
775 (((addr >> 30) & 0x1ff) << 3)) & a20_mask;
776 pdpe = x86_ldq_phys(cs, pdpe_addr);
777 if (!(pdpe & PG_PRESENT_MASK)) {
778 return -1;
780 if (pdpe & PG_PSE_MASK) {
781 page_size = 1024 * 1024 * 1024;
782 pte = pdpe;
783 goto out;
786 } else
787 #endif
789 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
790 a20_mask;
791 pdpe = x86_ldq_phys(cs, pdpe_addr);
792 if (!(pdpe & PG_PRESENT_MASK))
793 return -1;
796 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
797 (((addr >> 21) & 0x1ff) << 3)) & a20_mask;
798 pde = x86_ldq_phys(cs, pde_addr);
799 if (!(pde & PG_PRESENT_MASK)) {
800 return -1;
802 if (pde & PG_PSE_MASK) {
803 /* 2 MB page */
804 page_size = 2048 * 1024;
805 pte = pde;
806 } else {
807 /* 4 KB page */
808 pte_addr = ((pde & PG_ADDRESS_MASK) +
809 (((addr >> 12) & 0x1ff) << 3)) & a20_mask;
810 page_size = 4096;
811 pte = x86_ldq_phys(cs, pte_addr);
813 if (!(pte & PG_PRESENT_MASK)) {
814 return -1;
816 } else {
817 uint32_t pde;
819 /* page directory entry */
820 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & a20_mask;
821 pde = x86_ldl_phys(cs, pde_addr);
822 if (!(pde & PG_PRESENT_MASK))
823 return -1;
824 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
825 pte = pde | ((pde & 0x1fe000LL) << (32 - 13));
826 page_size = 4096 * 1024;
827 } else {
828 /* page directory entry */
829 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & a20_mask;
830 pte = x86_ldl_phys(cs, pte_addr);
831 if (!(pte & PG_PRESENT_MASK)) {
832 return -1;
834 page_size = 4096;
836 pte = pte & a20_mask;
839 #ifdef TARGET_X86_64
840 out:
841 #endif
842 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
843 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
844 return pte | page_offset;
847 typedef struct MCEInjectionParams {
848 Monitor *mon;
849 int bank;
850 uint64_t status;
851 uint64_t mcg_status;
852 uint64_t addr;
853 uint64_t misc;
854 int flags;
855 } MCEInjectionParams;
857 static void do_inject_x86_mce(CPUState *cs, run_on_cpu_data data)
859 MCEInjectionParams *params = data.host_ptr;
860 X86CPU *cpu = X86_CPU(cs);
861 CPUX86State *cenv = &cpu->env;
862 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
864 cpu_synchronize_state(cs);
867 * If there is an MCE exception being processed, ignore this SRAO MCE
868 * unless unconditional injection was requested.
870 if (!(params->flags & MCE_INJECT_UNCOND_AO)
871 && !(params->status & MCI_STATUS_AR)
872 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
873 return;
876 if (params->status & MCI_STATUS_UC) {
878 * if MSR_MCG_CTL is not all 1s, the uncorrected error
879 * reporting is disabled
881 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
882 monitor_printf(params->mon,
883 "CPU %d: Uncorrected error reporting disabled\n",
884 cs->cpu_index);
885 return;
889 * if MSR_MCi_CTL is not all 1s, the uncorrected error
890 * reporting is disabled for the bank
892 if (banks[0] != ~(uint64_t)0) {
893 monitor_printf(params->mon,
894 "CPU %d: Uncorrected error reporting disabled for"
895 " bank %d\n",
896 cs->cpu_index, params->bank);
897 return;
900 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
901 !(cenv->cr[4] & CR4_MCE_MASK)) {
902 monitor_printf(params->mon,
903 "CPU %d: Previous MCE still in progress, raising"
904 " triple fault\n",
905 cs->cpu_index);
906 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
907 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
908 return;
910 if (banks[1] & MCI_STATUS_VAL) {
911 params->status |= MCI_STATUS_OVER;
913 banks[2] = params->addr;
914 banks[3] = params->misc;
915 cenv->mcg_status = params->mcg_status;
916 banks[1] = params->status;
917 cpu_interrupt(cs, CPU_INTERRUPT_MCE);
918 } else if (!(banks[1] & MCI_STATUS_VAL)
919 || !(banks[1] & MCI_STATUS_UC)) {
920 if (banks[1] & MCI_STATUS_VAL) {
921 params->status |= MCI_STATUS_OVER;
923 banks[2] = params->addr;
924 banks[3] = params->misc;
925 banks[1] = params->status;
926 } else {
927 banks[1] |= MCI_STATUS_OVER;
931 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
932 uint64_t status, uint64_t mcg_status, uint64_t addr,
933 uint64_t misc, int flags)
935 CPUState *cs = CPU(cpu);
936 CPUX86State *cenv = &cpu->env;
937 MCEInjectionParams params = {
938 .mon = mon,
939 .bank = bank,
940 .status = status,
941 .mcg_status = mcg_status,
942 .addr = addr,
943 .misc = misc,
944 .flags = flags,
946 unsigned bank_num = cenv->mcg_cap & 0xff;
948 if (!cenv->mcg_cap) {
949 monitor_printf(mon, "MCE injection not supported\n");
950 return;
952 if (bank >= bank_num) {
953 monitor_printf(mon, "Invalid MCE bank number\n");
954 return;
956 if (!(status & MCI_STATUS_VAL)) {
957 monitor_printf(mon, "Invalid MCE status code\n");
958 return;
960 if ((flags & MCE_INJECT_BROADCAST)
961 && !cpu_x86_support_mca_broadcast(cenv)) {
962 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
963 return;
966 run_on_cpu(cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
967 if (flags & MCE_INJECT_BROADCAST) {
968 CPUState *other_cs;
970 params.bank = 1;
971 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
972 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
973 params.addr = 0;
974 params.misc = 0;
975 CPU_FOREACH(other_cs) {
976 if (other_cs == cs) {
977 continue;
979 run_on_cpu(other_cs, do_inject_x86_mce, RUN_ON_CPU_HOST_PTR(&params));
984 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
986 X86CPU *cpu = x86_env_get_cpu(env);
987 CPUState *cs = CPU(cpu);
989 if (kvm_enabled() || whpx_enabled()) {
990 env->tpr_access_type = access;
992 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
993 } else if (tcg_enabled()) {
994 cpu_restore_state(cs, cs->mem_io_pc);
996 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
999 #endif /* !CONFIG_USER_ONLY */
1001 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1002 target_ulong *base, unsigned int *limit,
1003 unsigned int *flags)
1005 X86CPU *cpu = x86_env_get_cpu(env);
1006 CPUState *cs = CPU(cpu);
1007 SegmentCache *dt;
1008 target_ulong ptr;
1009 uint32_t e1, e2;
1010 int index;
1012 if (selector & 0x4)
1013 dt = &env->ldt;
1014 else
1015 dt = &env->gdt;
1016 index = selector & ~7;
1017 ptr = dt->base + index;
1018 if ((index + 7) > dt->limit
1019 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1020 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1021 return 0;
1023 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1024 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1025 if (e2 & DESC_G_MASK)
1026 *limit = (*limit << 12) | 0xfff;
1027 *flags = e2;
1029 return 1;
1032 #if !defined(CONFIG_USER_ONLY)
1033 void do_cpu_init(X86CPU *cpu)
1035 CPUState *cs = CPU(cpu);
1036 CPUX86State *env = &cpu->env;
1037 CPUX86State *save = g_new(CPUX86State, 1);
1038 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1040 *save = *env;
1042 cpu_reset(cs);
1043 cs->interrupt_request = sipi;
1044 memcpy(&env->start_init_save, &save->start_init_save,
1045 offsetof(CPUX86State, end_init_save) -
1046 offsetof(CPUX86State, start_init_save));
1047 g_free(save);
1049 if (kvm_enabled()) {
1050 kvm_arch_do_init_vcpu(cpu);
1052 apic_init_reset(cpu->apic_state);
1055 void do_cpu_sipi(X86CPU *cpu)
1057 apic_sipi(cpu->apic_state);
1059 #else
1060 void do_cpu_init(X86CPU *cpu)
1063 void do_cpu_sipi(X86CPU *cpu)
1066 #endif
1068 /* Frob eflags into and out of the CPU temporary format. */
1070 void x86_cpu_exec_enter(CPUState *cs)
1072 X86CPU *cpu = X86_CPU(cs);
1073 CPUX86State *env = &cpu->env;
1075 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1076 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1077 CC_OP = CC_OP_EFLAGS;
1078 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1081 void x86_cpu_exec_exit(CPUState *cs)
1083 X86CPU *cpu = X86_CPU(cs);
1084 CPUX86State *env = &cpu->env;
1086 env->eflags = cpu_compute_eflags(env);
1089 #ifndef CONFIG_USER_ONLY
1090 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1092 X86CPU *cpu = X86_CPU(cs);
1093 CPUX86State *env = &cpu->env;
1094 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1095 AddressSpace *as = cpu_addressspace(cs, attrs);
1097 return address_space_ldub(as, addr, attrs, NULL);
1100 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1102 X86CPU *cpu = X86_CPU(cs);
1103 CPUX86State *env = &cpu->env;
1104 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1105 AddressSpace *as = cpu_addressspace(cs, attrs);
1107 return address_space_lduw(as, addr, attrs, NULL);
1110 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1112 X86CPU *cpu = X86_CPU(cs);
1113 CPUX86State *env = &cpu->env;
1114 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1115 AddressSpace *as = cpu_addressspace(cs, attrs);
1117 return address_space_ldl(as, addr, attrs, NULL);
1120 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1122 X86CPU *cpu = X86_CPU(cs);
1123 CPUX86State *env = &cpu->env;
1124 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1125 AddressSpace *as = cpu_addressspace(cs, attrs);
1127 return address_space_ldq(as, addr, attrs, NULL);
1130 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1132 X86CPU *cpu = X86_CPU(cs);
1133 CPUX86State *env = &cpu->env;
1134 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1135 AddressSpace *as = cpu_addressspace(cs, attrs);
1137 address_space_stb(as, addr, val, attrs, NULL);
1140 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1142 X86CPU *cpu = X86_CPU(cs);
1143 CPUX86State *env = &cpu->env;
1144 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1145 AddressSpace *as = cpu_addressspace(cs, attrs);
1147 address_space_stl_notdirty(as, addr, val, attrs, NULL);
1150 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1152 X86CPU *cpu = X86_CPU(cs);
1153 CPUX86State *env = &cpu->env;
1154 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1155 AddressSpace *as = cpu_addressspace(cs, attrs);
1157 address_space_stw(as, addr, val, attrs, NULL);
1160 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1162 X86CPU *cpu = X86_CPU(cs);
1163 CPUX86State *env = &cpu->env;
1164 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1165 AddressSpace *as = cpu_addressspace(cs, attrs);
1167 address_space_stl(as, addr, val, attrs, NULL);
1170 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1172 X86CPU *cpu = X86_CPU(cs);
1173 CPUX86State *env = &cpu->env;
1174 MemTxAttrs attrs = cpu_get_mem_attrs(env);
1175 AddressSpace *as = cpu_addressspace(cs, attrs);
1177 address_space_stq(as, addr, val, attrs, NULL);
1179 #endif