qmp_change_blockdev() remove unused has_format
[qemu/ar7.git] / target-i386 / helper.c
blob7c196ffc42167d1ec5777b7c9588478ee7e2aa05
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31 int cpuver = env->cpuid_version;
33 if (family == NULL || model == NULL) {
34 return;
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
44 int family = 0;
45 int model = 0;
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
52 return 0;
55 /***********************************************************/
56 /* x86 debug */
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
117 "ADCX",
118 "ADOX",
119 "ADCOX",
121 "CLR",
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
177 done:
178 cpu_fprintf(f, "\n");
181 #define DUMP_CODE_BYTES_TOTAL 50
182 #define DUMP_CODE_BYTES_BACKWARD 20
184 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
193 eflags = cpu_compute_eflags(env);
194 #ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232 #endif
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283 #endif
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305 #ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311 #endif
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
374 cpu_fprintf(f, "\n");
378 /***********************************************************/
379 /* x86 mmu */
380 /* XXX: add PGE support */
382 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
384 CPUX86State *env = &cpu->env;
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 #if defined(DEBUG_MMU)
389 printf("A20 update: a20=%d\n", a20_state);
390 #endif
391 /* if the cpu is currently executing code, we must unlink it and
392 all the potentially executing TB */
393 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
395 /* when a20 is changed, all the MMU mappings are invalid, so
396 we must flush everything */
397 tlb_flush(env, 1);
398 env->a20_mask = ~(1 << 20) | (a20_state << 20);
402 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
404 int pe_state;
406 #if defined(DEBUG_MMU)
407 printf("CR0 update: CR0=0x%08x\n", new_cr0);
408 #endif
409 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
410 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
411 tlb_flush(env, 1);
414 #ifdef TARGET_X86_64
415 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
416 (env->efer & MSR_EFER_LME)) {
417 /* enter in long mode */
418 /* XXX: generate an exception */
419 if (!(env->cr[4] & CR4_PAE_MASK))
420 return;
421 env->efer |= MSR_EFER_LMA;
422 env->hflags |= HF_LMA_MASK;
423 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
424 (env->efer & MSR_EFER_LMA)) {
425 /* exit long mode */
426 env->efer &= ~MSR_EFER_LMA;
427 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
428 env->eip &= 0xffffffff;
430 #endif
431 env->cr[0] = new_cr0 | CR0_ET_MASK;
433 /* update PE flag in hidden flags */
434 pe_state = (env->cr[0] & CR0_PE_MASK);
435 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
436 /* ensure that ADDSEG is always set in real mode */
437 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
438 /* update FPU flags */
439 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
440 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
443 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
444 the PDPT */
445 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449 #if defined(DEBUG_MMU)
450 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451 #endif
452 tlb_flush(env, 0);
456 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
458 #if defined(DEBUG_MMU)
459 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
460 #endif
461 if ((new_cr4 ^ env->cr[4]) &
462 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
463 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
464 tlb_flush(env, 1);
466 /* SSE handling */
467 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
468 new_cr4 &= ~CR4_OSFXSR_MASK;
470 env->hflags &= ~HF_OSFXSR_MASK;
471 if (new_cr4 & CR4_OSFXSR_MASK) {
472 env->hflags |= HF_OSFXSR_MASK;
475 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
476 new_cr4 &= ~CR4_SMAP_MASK;
478 env->hflags &= ~HF_SMAP_MASK;
479 if (new_cr4 & CR4_SMAP_MASK) {
480 env->hflags |= HF_SMAP_MASK;
483 env->cr[4] = new_cr4;
486 #if defined(CONFIG_USER_ONLY)
488 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
489 int is_write, int mmu_idx)
491 /* user mode only emulation */
492 is_write &= 1;
493 env->cr[2] = addr;
494 env->error_code = (is_write << PG_ERROR_W_BIT);
495 env->error_code |= PG_ERROR_U_MASK;
496 env->exception_index = EXCP0E_PAGE;
497 return 1;
500 #else
502 /* XXX: This value should match the one returned by CPUID
503 * and in exec.c */
504 # if defined(TARGET_X86_64)
505 # define PHYS_ADDR_MASK 0xfffffff000LL
506 # else
507 # define PHYS_ADDR_MASK 0xffffff000LL
508 # endif
510 /* return value:
511 -1 = cannot handle fault
512 0 = nothing more to do
513 1 = generate PF fault
515 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
516 int is_write1, int mmu_idx)
518 uint64_t ptep, pte;
519 target_ulong pde_addr, pte_addr;
520 int error_code, is_dirty, prot, page_size, is_write, is_user;
521 hwaddr paddr;
522 uint32_t page_offset;
523 target_ulong vaddr, virt_addr;
525 is_user = mmu_idx == MMU_USER_IDX;
526 #if defined(DEBUG_MMU)
527 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
528 addr, is_write1, is_user, env->eip);
529 #endif
530 is_write = is_write1 & 1;
532 if (!(env->cr[0] & CR0_PG_MASK)) {
533 pte = addr;
534 virt_addr = addr & TARGET_PAGE_MASK;
535 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
536 page_size = 4096;
537 goto do_mapping;
540 if (env->cr[4] & CR4_PAE_MASK) {
541 uint64_t pde, pdpe;
542 target_ulong pdpe_addr;
544 #ifdef TARGET_X86_64
545 if (env->hflags & HF_LMA_MASK) {
546 uint64_t pml4e_addr, pml4e;
547 int32_t sext;
549 /* test virtual address sign extension */
550 sext = (int64_t)addr >> 47;
551 if (sext != 0 && sext != -1) {
552 env->error_code = 0;
553 env->exception_index = EXCP0D_GPF;
554 return 1;
557 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
558 env->a20_mask;
559 pml4e = ldq_phys(pml4e_addr);
560 if (!(pml4e & PG_PRESENT_MASK)) {
561 error_code = 0;
562 goto do_fault;
564 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
565 error_code = PG_ERROR_RSVD_MASK;
566 goto do_fault;
568 if (!(pml4e & PG_ACCESSED_MASK)) {
569 pml4e |= PG_ACCESSED_MASK;
570 stl_phys_notdirty(pml4e_addr, pml4e);
572 ptep = pml4e ^ PG_NX_MASK;
573 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
574 env->a20_mask;
575 pdpe = ldq_phys(pdpe_addr);
576 if (!(pdpe & PG_PRESENT_MASK)) {
577 error_code = 0;
578 goto do_fault;
580 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
581 error_code = PG_ERROR_RSVD_MASK;
582 goto do_fault;
584 ptep &= pdpe ^ PG_NX_MASK;
585 if (!(pdpe & PG_ACCESSED_MASK)) {
586 pdpe |= PG_ACCESSED_MASK;
587 stl_phys_notdirty(pdpe_addr, pdpe);
589 } else
590 #endif
592 /* XXX: load them when cr3 is loaded ? */
593 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
594 env->a20_mask;
595 pdpe = ldq_phys(pdpe_addr);
596 if (!(pdpe & PG_PRESENT_MASK)) {
597 error_code = 0;
598 goto do_fault;
600 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
603 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
604 env->a20_mask;
605 pde = ldq_phys(pde_addr);
606 if (!(pde & PG_PRESENT_MASK)) {
607 error_code = 0;
608 goto do_fault;
610 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
611 error_code = PG_ERROR_RSVD_MASK;
612 goto do_fault;
614 ptep &= pde ^ PG_NX_MASK;
615 if (pde & PG_PSE_MASK) {
616 /* 2 MB page */
617 page_size = 2048 * 1024;
618 ptep ^= PG_NX_MASK;
619 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
620 goto do_fault_protect;
622 switch (mmu_idx) {
623 case MMU_USER_IDX:
624 if (!(ptep & PG_USER_MASK)) {
625 goto do_fault_protect;
627 if (is_write && !(ptep & PG_RW_MASK)) {
628 goto do_fault_protect;
630 break;
632 case MMU_KERNEL_IDX:
633 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
634 (ptep & PG_USER_MASK)) {
635 goto do_fault_protect;
637 /* fall through */
638 case MMU_KSMAP_IDX:
639 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
640 (ptep & PG_USER_MASK)) {
641 goto do_fault_protect;
643 if ((env->cr[0] & CR0_WP_MASK) &&
644 is_write && !(ptep & PG_RW_MASK)) {
645 goto do_fault_protect;
647 break;
649 default: /* cannot happen */
650 break;
652 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
653 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
654 pde |= PG_ACCESSED_MASK;
655 if (is_dirty)
656 pde |= PG_DIRTY_MASK;
657 stl_phys_notdirty(pde_addr, pde);
659 /* align to page_size */
660 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
661 virt_addr = addr & ~(page_size - 1);
662 } else {
663 /* 4 KB page */
664 if (!(pde & PG_ACCESSED_MASK)) {
665 pde |= PG_ACCESSED_MASK;
666 stl_phys_notdirty(pde_addr, pde);
668 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
669 env->a20_mask;
670 pte = ldq_phys(pte_addr);
671 if (!(pte & PG_PRESENT_MASK)) {
672 error_code = 0;
673 goto do_fault;
675 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
676 error_code = PG_ERROR_RSVD_MASK;
677 goto do_fault;
679 /* combine pde and pte nx, user and rw protections */
680 ptep &= pte ^ PG_NX_MASK;
681 ptep ^= PG_NX_MASK;
682 if ((ptep & PG_NX_MASK) && is_write1 == 2)
683 goto do_fault_protect;
684 switch (mmu_idx) {
685 case MMU_USER_IDX:
686 if (!(ptep & PG_USER_MASK)) {
687 goto do_fault_protect;
689 if (is_write && !(ptep & PG_RW_MASK)) {
690 goto do_fault_protect;
692 break;
694 case MMU_KERNEL_IDX:
695 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
696 (ptep & PG_USER_MASK)) {
697 goto do_fault_protect;
699 /* fall through */
700 case MMU_KSMAP_IDX:
701 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
702 (ptep & PG_USER_MASK)) {
703 goto do_fault_protect;
705 if ((env->cr[0] & CR0_WP_MASK) &&
706 is_write && !(ptep & PG_RW_MASK)) {
707 goto do_fault_protect;
709 break;
711 default: /* cannot happen */
712 break;
714 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
715 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
716 pte |= PG_ACCESSED_MASK;
717 if (is_dirty)
718 pte |= PG_DIRTY_MASK;
719 stl_phys_notdirty(pte_addr, pte);
721 page_size = 4096;
722 virt_addr = addr & ~0xfff;
723 pte = pte & (PHYS_ADDR_MASK | 0xfff);
725 } else {
726 uint32_t pde;
728 /* page directory entry */
729 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
730 env->a20_mask;
731 pde = ldl_phys(pde_addr);
732 if (!(pde & PG_PRESENT_MASK)) {
733 error_code = 0;
734 goto do_fault;
736 /* if PSE bit is set, then we use a 4MB page */
737 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
738 page_size = 4096 * 1024;
739 switch (mmu_idx) {
740 case MMU_USER_IDX:
741 if (!(pde & PG_USER_MASK)) {
742 goto do_fault_protect;
744 if (is_write && !(pde & PG_RW_MASK)) {
745 goto do_fault_protect;
747 break;
749 case MMU_KERNEL_IDX:
750 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
751 (pde & PG_USER_MASK)) {
752 goto do_fault_protect;
754 /* fall through */
755 case MMU_KSMAP_IDX:
756 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
757 (pde & PG_USER_MASK)) {
758 goto do_fault_protect;
760 if ((env->cr[0] & CR0_WP_MASK) &&
761 is_write && !(pde & PG_RW_MASK)) {
762 goto do_fault_protect;
764 break;
766 default: /* cannot happen */
767 break;
769 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
770 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
771 pde |= PG_ACCESSED_MASK;
772 if (is_dirty)
773 pde |= PG_DIRTY_MASK;
774 stl_phys_notdirty(pde_addr, pde);
777 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
778 ptep = pte;
779 virt_addr = addr & ~(page_size - 1);
780 } else {
781 if (!(pde & PG_ACCESSED_MASK)) {
782 pde |= PG_ACCESSED_MASK;
783 stl_phys_notdirty(pde_addr, pde);
786 /* page directory entry */
787 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
788 env->a20_mask;
789 pte = ldl_phys(pte_addr);
790 if (!(pte & PG_PRESENT_MASK)) {
791 error_code = 0;
792 goto do_fault;
794 /* combine pde and pte user and rw protections */
795 ptep = pte & pde;
796 switch (mmu_idx) {
797 case MMU_USER_IDX:
798 if (!(ptep & PG_USER_MASK)) {
799 goto do_fault_protect;
801 if (is_write && !(ptep & PG_RW_MASK)) {
802 goto do_fault_protect;
804 break;
806 case MMU_KERNEL_IDX:
807 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
808 (ptep & PG_USER_MASK)) {
809 goto do_fault_protect;
811 /* fall through */
812 case MMU_KSMAP_IDX:
813 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
814 (ptep & PG_USER_MASK)) {
815 goto do_fault_protect;
817 if ((env->cr[0] & CR0_WP_MASK) &&
818 is_write && !(ptep & PG_RW_MASK)) {
819 goto do_fault_protect;
821 break;
823 default: /* cannot happen */
824 break;
826 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
827 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
828 pte |= PG_ACCESSED_MASK;
829 if (is_dirty)
830 pte |= PG_DIRTY_MASK;
831 stl_phys_notdirty(pte_addr, pte);
833 page_size = 4096;
834 virt_addr = addr & ~0xfff;
837 /* the page can be put in the TLB */
838 prot = PAGE_READ;
839 if (!(ptep & PG_NX_MASK))
840 prot |= PAGE_EXEC;
841 if (pte & PG_DIRTY_MASK) {
842 /* only set write access if already dirty... otherwise wait
843 for dirty access */
844 if (is_user) {
845 if (ptep & PG_RW_MASK)
846 prot |= PAGE_WRITE;
847 } else {
848 if (!(env->cr[0] & CR0_WP_MASK) ||
849 (ptep & PG_RW_MASK))
850 prot |= PAGE_WRITE;
853 do_mapping:
854 pte = pte & env->a20_mask;
856 /* Even if 4MB pages, we map only one 4KB page in the cache to
857 avoid filling it too fast */
858 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
859 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
860 vaddr = virt_addr + page_offset;
862 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
863 return 0;
864 do_fault_protect:
865 error_code = PG_ERROR_P_MASK;
866 do_fault:
867 error_code |= (is_write << PG_ERROR_W_BIT);
868 if (is_user)
869 error_code |= PG_ERROR_U_MASK;
870 if (is_write1 == 2 &&
871 (((env->efer & MSR_EFER_NXE) &&
872 (env->cr[4] & CR4_PAE_MASK)) ||
873 (env->cr[4] & CR4_SMEP_MASK)))
874 error_code |= PG_ERROR_I_D_MASK;
875 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
876 /* cr2 is not modified in case of exceptions */
877 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
878 addr);
879 } else {
880 env->cr[2] = addr;
882 env->error_code = error_code;
883 env->exception_index = EXCP0E_PAGE;
884 return 1;
887 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
889 X86CPU *cpu = X86_CPU(cs);
890 CPUX86State *env = &cpu->env;
891 target_ulong pde_addr, pte_addr;
892 uint64_t pte;
893 hwaddr paddr;
894 uint32_t page_offset;
895 int page_size;
897 if (!(env->cr[0] & CR0_PG_MASK)) {
898 pte = addr & env->a20_mask;
899 page_size = 4096;
900 } else if (env->cr[4] & CR4_PAE_MASK) {
901 target_ulong pdpe_addr;
902 uint64_t pde, pdpe;
904 #ifdef TARGET_X86_64
905 if (env->hflags & HF_LMA_MASK) {
906 uint64_t pml4e_addr, pml4e;
907 int32_t sext;
909 /* test virtual address sign extension */
910 sext = (int64_t)addr >> 47;
911 if (sext != 0 && sext != -1)
912 return -1;
914 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
915 env->a20_mask;
916 pml4e = ldq_phys(pml4e_addr);
917 if (!(pml4e & PG_PRESENT_MASK))
918 return -1;
920 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
921 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
922 pdpe = ldq_phys(pdpe_addr);
923 if (!(pdpe & PG_PRESENT_MASK))
924 return -1;
925 } else
926 #endif
928 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
929 env->a20_mask;
930 pdpe = ldq_phys(pdpe_addr);
931 if (!(pdpe & PG_PRESENT_MASK))
932 return -1;
935 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
936 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
937 pde = ldq_phys(pde_addr);
938 if (!(pde & PG_PRESENT_MASK)) {
939 return -1;
941 if (pde & PG_PSE_MASK) {
942 /* 2 MB page */
943 page_size = 2048 * 1024;
944 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
945 } else {
946 /* 4 KB page */
947 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
948 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
949 page_size = 4096;
950 pte = ldq_phys(pte_addr);
952 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
953 if (!(pte & PG_PRESENT_MASK))
954 return -1;
955 } else {
956 uint32_t pde;
958 /* page directory entry */
959 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
960 pde = ldl_phys(pde_addr);
961 if (!(pde & PG_PRESENT_MASK))
962 return -1;
963 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
964 pte = pde & ~0x003ff000; /* align to 4MB */
965 page_size = 4096 * 1024;
966 } else {
967 /* page directory entry */
968 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
969 pte = ldl_phys(pte_addr);
970 if (!(pte & PG_PRESENT_MASK))
971 return -1;
972 page_size = 4096;
974 pte = pte & env->a20_mask;
977 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
978 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
979 return paddr;
982 void hw_breakpoint_insert(CPUX86State *env, int index)
984 int type = 0, err = 0;
986 switch (hw_breakpoint_type(env->dr[7], index)) {
987 case DR7_TYPE_BP_INST:
988 if (hw_breakpoint_enabled(env->dr[7], index)) {
989 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
990 &env->cpu_breakpoint[index]);
992 break;
993 case DR7_TYPE_DATA_WR:
994 type = BP_CPU | BP_MEM_WRITE;
995 break;
996 case DR7_TYPE_IO_RW:
997 /* No support for I/O watchpoints yet */
998 break;
999 case DR7_TYPE_DATA_RW:
1000 type = BP_CPU | BP_MEM_ACCESS;
1001 break;
1004 if (type != 0) {
1005 err = cpu_watchpoint_insert(env, env->dr[index],
1006 hw_breakpoint_len(env->dr[7], index),
1007 type, &env->cpu_watchpoint[index]);
1010 if (err) {
1011 env->cpu_breakpoint[index] = NULL;
1015 void hw_breakpoint_remove(CPUX86State *env, int index)
1017 if (!env->cpu_breakpoint[index])
1018 return;
1019 switch (hw_breakpoint_type(env->dr[7], index)) {
1020 case DR7_TYPE_BP_INST:
1021 if (hw_breakpoint_enabled(env->dr[7], index)) {
1022 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1024 break;
1025 case DR7_TYPE_DATA_WR:
1026 case DR7_TYPE_DATA_RW:
1027 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1028 break;
1029 case DR7_TYPE_IO_RW:
1030 /* No support for I/O watchpoints yet */
1031 break;
1035 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1037 target_ulong dr6;
1038 int reg;
1039 bool hit_enabled = false;
1041 dr6 = env->dr[6] & ~0xf;
1042 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1043 bool bp_match = false;
1044 bool wp_match = false;
1046 switch (hw_breakpoint_type(env->dr[7], reg)) {
1047 case DR7_TYPE_BP_INST:
1048 if (env->dr[reg] == env->eip) {
1049 bp_match = true;
1051 break;
1052 case DR7_TYPE_DATA_WR:
1053 case DR7_TYPE_DATA_RW:
1054 if (env->cpu_watchpoint[reg] &&
1055 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1056 wp_match = true;
1058 break;
1059 case DR7_TYPE_IO_RW:
1060 break;
1062 if (bp_match || wp_match) {
1063 dr6 |= 1 << reg;
1064 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1065 hit_enabled = true;
1070 if (hit_enabled || force_dr6_update) {
1071 env->dr[6] = dr6;
1074 return hit_enabled;
1077 void breakpoint_handler(CPUX86State *env)
1079 CPUBreakpoint *bp;
1081 if (env->watchpoint_hit) {
1082 if (env->watchpoint_hit->flags & BP_CPU) {
1083 env->watchpoint_hit = NULL;
1084 if (check_hw_breakpoints(env, false)) {
1085 raise_exception(env, EXCP01_DB);
1086 } else {
1087 cpu_resume_from_signal(env, NULL);
1090 } else {
1091 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1092 if (bp->pc == env->eip) {
1093 if (bp->flags & BP_CPU) {
1094 check_hw_breakpoints(env, true);
1095 raise_exception(env, EXCP01_DB);
1097 break;
1102 typedef struct MCEInjectionParams {
1103 Monitor *mon;
1104 X86CPU *cpu;
1105 int bank;
1106 uint64_t status;
1107 uint64_t mcg_status;
1108 uint64_t addr;
1109 uint64_t misc;
1110 int flags;
1111 } MCEInjectionParams;
1113 static void do_inject_x86_mce(void *data)
1115 MCEInjectionParams *params = data;
1116 CPUX86State *cenv = &params->cpu->env;
1117 CPUState *cpu = CPU(params->cpu);
1118 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1120 cpu_synchronize_state(cpu);
1123 * If there is an MCE exception being processed, ignore this SRAO MCE
1124 * unless unconditional injection was requested.
1126 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1127 && !(params->status & MCI_STATUS_AR)
1128 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1129 return;
1132 if (params->status & MCI_STATUS_UC) {
1134 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1135 * reporting is disabled
1137 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1138 monitor_printf(params->mon,
1139 "CPU %d: Uncorrected error reporting disabled\n",
1140 cpu->cpu_index);
1141 return;
1145 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1146 * reporting is disabled for the bank
1148 if (banks[0] != ~(uint64_t)0) {
1149 monitor_printf(params->mon,
1150 "CPU %d: Uncorrected error reporting disabled for"
1151 " bank %d\n",
1152 cpu->cpu_index, params->bank);
1153 return;
1156 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1157 !(cenv->cr[4] & CR4_MCE_MASK)) {
1158 monitor_printf(params->mon,
1159 "CPU %d: Previous MCE still in progress, raising"
1160 " triple fault\n",
1161 cpu->cpu_index);
1162 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1163 qemu_system_reset_request();
1164 return;
1166 if (banks[1] & MCI_STATUS_VAL) {
1167 params->status |= MCI_STATUS_OVER;
1169 banks[2] = params->addr;
1170 banks[3] = params->misc;
1171 cenv->mcg_status = params->mcg_status;
1172 banks[1] = params->status;
1173 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1174 } else if (!(banks[1] & MCI_STATUS_VAL)
1175 || !(banks[1] & MCI_STATUS_UC)) {
1176 if (banks[1] & MCI_STATUS_VAL) {
1177 params->status |= MCI_STATUS_OVER;
1179 banks[2] = params->addr;
1180 banks[3] = params->misc;
1181 banks[1] = params->status;
1182 } else {
1183 banks[1] |= MCI_STATUS_OVER;
1187 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1188 uint64_t status, uint64_t mcg_status, uint64_t addr,
1189 uint64_t misc, int flags)
1191 CPUState *cs = CPU(cpu);
1192 CPUX86State *cenv = &cpu->env;
1193 MCEInjectionParams params = {
1194 .mon = mon,
1195 .cpu = cpu,
1196 .bank = bank,
1197 .status = status,
1198 .mcg_status = mcg_status,
1199 .addr = addr,
1200 .misc = misc,
1201 .flags = flags,
1203 unsigned bank_num = cenv->mcg_cap & 0xff;
1205 if (!cenv->mcg_cap) {
1206 monitor_printf(mon, "MCE injection not supported\n");
1207 return;
1209 if (bank >= bank_num) {
1210 monitor_printf(mon, "Invalid MCE bank number\n");
1211 return;
1213 if (!(status & MCI_STATUS_VAL)) {
1214 monitor_printf(mon, "Invalid MCE status code\n");
1215 return;
1217 if ((flags & MCE_INJECT_BROADCAST)
1218 && !cpu_x86_support_mca_broadcast(cenv)) {
1219 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1220 return;
1223 run_on_cpu(cs, do_inject_x86_mce, &params);
1224 if (flags & MCE_INJECT_BROADCAST) {
1225 CPUState *other_cs;
1227 params.bank = 1;
1228 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1229 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1230 params.addr = 0;
1231 params.misc = 0;
1232 CPU_FOREACH(other_cs) {
1233 if (other_cs == cs) {
1234 continue;
1236 params.cpu = X86_CPU(other_cs);
1237 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1242 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1244 if (kvm_enabled()) {
1245 env->tpr_access_type = access;
1247 cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_TPR);
1248 } else {
1249 cpu_restore_state(env, env->mem_io_pc);
1251 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1254 #endif /* !CONFIG_USER_ONLY */
1256 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1257 target_ulong *base, unsigned int *limit,
1258 unsigned int *flags)
1260 X86CPU *cpu = x86_env_get_cpu(env);
1261 CPUState *cs = CPU(cpu);
1262 SegmentCache *dt;
1263 target_ulong ptr;
1264 uint32_t e1, e2;
1265 int index;
1267 if (selector & 0x4)
1268 dt = &env->ldt;
1269 else
1270 dt = &env->gdt;
1271 index = selector & ~7;
1272 ptr = dt->base + index;
1273 if ((index + 7) > dt->limit
1274 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1275 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1276 return 0;
1278 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1279 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1280 if (e2 & DESC_G_MASK)
1281 *limit = (*limit << 12) | 0xfff;
1282 *flags = e2;
1284 return 1;
1287 #if !defined(CONFIG_USER_ONLY)
1288 void do_cpu_init(X86CPU *cpu)
1290 CPUState *cs = CPU(cpu);
1291 CPUX86State *env = &cpu->env;
1292 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1293 uint64_t pat = env->pat;
1295 cpu_reset(cs);
1296 cs->interrupt_request = sipi;
1297 env->pat = pat;
1298 apic_init_reset(env->apic_state);
1301 void do_cpu_sipi(X86CPU *cpu)
1303 CPUX86State *env = &cpu->env;
1305 apic_sipi(env->apic_state);
1307 #else
1308 void do_cpu_init(X86CPU *cpu)
1311 void do_cpu_sipi(X86CPU *cpu)
1314 #endif