virtio-serial: make flow control explicit in virtio-console
[qemu/agraf.git] / target-i386 / helper.c
blob82a731c77de8538bb968f60058949b890840f872
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31 int cpuver = env->cpuid_version;
33 if (family == NULL || model == NULL) {
34 return;
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
44 int family = 0;
45 int model = 0;
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
52 return 0;
55 /***********************************************************/
56 /* x86 debug */
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
117 "ADCX",
118 "ADOX",
119 "ADCOX",
121 "CLR",
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
151 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
152 (sc->flags & DESC_W_MASK) ? 'W' : '-');
154 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
155 } else {
156 static const char *sys_type_name[2][16] = {
157 { /* 32 bit mode */
158 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
159 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
160 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
161 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
163 { /* 64 bit mode */
164 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
165 "Reserved", "Reserved", "Reserved", "Reserved",
166 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
167 "Reserved", "IntGate64", "TrapGate64"
170 cpu_fprintf(f, "%s",
171 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
172 [(sc->flags & DESC_TYPE_MASK)
173 >> DESC_TYPE_SHIFT]);
175 done:
176 cpu_fprintf(f, "\n");
179 #define DUMP_CODE_BYTES_TOTAL 50
180 #define DUMP_CODE_BYTES_BACKWARD 20
182 void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
183 int flags)
185 int eflags, i, nb;
186 char cc_op_name[32];
187 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
189 cpu_synchronize_state(env);
191 eflags = env->eflags;
192 #ifdef TARGET_X86_64
193 if (env->hflags & HF_CS64_MASK) {
194 cpu_fprintf(f,
195 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
196 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
197 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
198 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
199 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
200 env->regs[R_EAX],
201 env->regs[R_EBX],
202 env->regs[R_ECX],
203 env->regs[R_EDX],
204 env->regs[R_ESI],
205 env->regs[R_EDI],
206 env->regs[R_EBP],
207 env->regs[R_ESP],
208 env->regs[8],
209 env->regs[9],
210 env->regs[10],
211 env->regs[11],
212 env->regs[12],
213 env->regs[13],
214 env->regs[14],
215 env->regs[15],
216 env->eip, eflags,
217 eflags & DF_MASK ? 'D' : '-',
218 eflags & CC_O ? 'O' : '-',
219 eflags & CC_S ? 'S' : '-',
220 eflags & CC_Z ? 'Z' : '-',
221 eflags & CC_A ? 'A' : '-',
222 eflags & CC_P ? 'P' : '-',
223 eflags & CC_C ? 'C' : '-',
224 env->hflags & HF_CPL_MASK,
225 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
226 (env->a20_mask >> 20) & 1,
227 (env->hflags >> HF_SMM_SHIFT) & 1,
228 env->halted);
229 } else
230 #endif
232 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
233 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
234 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
235 (uint32_t)env->regs[R_EAX],
236 (uint32_t)env->regs[R_EBX],
237 (uint32_t)env->regs[R_ECX],
238 (uint32_t)env->regs[R_EDX],
239 (uint32_t)env->regs[R_ESI],
240 (uint32_t)env->regs[R_EDI],
241 (uint32_t)env->regs[R_EBP],
242 (uint32_t)env->regs[R_ESP],
243 (uint32_t)env->eip, eflags,
244 eflags & DF_MASK ? 'D' : '-',
245 eflags & CC_O ? 'O' : '-',
246 eflags & CC_S ? 'S' : '-',
247 eflags & CC_Z ? 'Z' : '-',
248 eflags & CC_A ? 'A' : '-',
249 eflags & CC_P ? 'P' : '-',
250 eflags & CC_C ? 'C' : '-',
251 env->hflags & HF_CPL_MASK,
252 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
253 (env->a20_mask >> 20) & 1,
254 (env->hflags >> HF_SMM_SHIFT) & 1,
255 env->halted);
258 for(i = 0; i < 6; i++) {
259 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
260 &env->segs[i]);
262 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
263 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
265 #ifdef TARGET_X86_64
266 if (env->hflags & HF_LMA_MASK) {
267 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
268 env->gdt.base, env->gdt.limit);
269 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
270 env->idt.base, env->idt.limit);
271 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
272 (uint32_t)env->cr[0],
273 env->cr[2],
274 env->cr[3],
275 (uint32_t)env->cr[4]);
276 for(i = 0; i < 4; i++)
277 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
278 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
279 env->dr[6], env->dr[7]);
280 } else
281 #endif
283 cpu_fprintf(f, "GDT= %08x %08x\n",
284 (uint32_t)env->gdt.base, env->gdt.limit);
285 cpu_fprintf(f, "IDT= %08x %08x\n",
286 (uint32_t)env->idt.base, env->idt.limit);
287 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
288 (uint32_t)env->cr[0],
289 (uint32_t)env->cr[2],
290 (uint32_t)env->cr[3],
291 (uint32_t)env->cr[4]);
292 for(i = 0; i < 4; i++) {
293 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
295 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
296 env->dr[6], env->dr[7]);
298 if (flags & CPU_DUMP_CCOP) {
299 if ((unsigned)env->cc_op < CC_OP_NB)
300 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
301 else
302 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
303 #ifdef TARGET_X86_64
304 if (env->hflags & HF_CS64_MASK) {
305 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
306 env->cc_src, env->cc_dst,
307 cc_op_name);
308 } else
309 #endif
311 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
312 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
313 cc_op_name);
316 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
317 if (flags & CPU_DUMP_FPU) {
318 int fptag;
319 fptag = 0;
320 for(i = 0; i < 8; i++) {
321 fptag |= ((!env->fptags[i]) << i);
323 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
324 env->fpuc,
325 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
326 env->fpstt,
327 fptag,
328 env->mxcsr);
329 for(i=0;i<8;i++) {
330 CPU_LDoubleU u;
331 u.d = env->fpregs[i].d;
332 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
333 i, u.l.lower, u.l.upper);
334 if ((i & 1) == 1)
335 cpu_fprintf(f, "\n");
336 else
337 cpu_fprintf(f, " ");
339 if (env->hflags & HF_CS64_MASK)
340 nb = 16;
341 else
342 nb = 8;
343 for(i=0;i<nb;i++) {
344 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
346 env->xmm_regs[i].XMM_L(3),
347 env->xmm_regs[i].XMM_L(2),
348 env->xmm_regs[i].XMM_L(1),
349 env->xmm_regs[i].XMM_L(0));
350 if ((i & 1) == 1)
351 cpu_fprintf(f, "\n");
352 else
353 cpu_fprintf(f, " ");
356 if (flags & CPU_DUMP_CODE) {
357 target_ulong base = env->segs[R_CS].base + env->eip;
358 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
359 uint8_t code;
360 char codestr[3];
362 cpu_fprintf(f, "Code=");
363 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
364 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
365 snprintf(codestr, sizeof(codestr), "%02x", code);
366 } else {
367 snprintf(codestr, sizeof(codestr), "??");
369 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
370 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
372 cpu_fprintf(f, "\n");
376 /***********************************************************/
377 /* x86 mmu */
378 /* XXX: add PGE support */
380 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
382 CPUX86State *env = &cpu->env;
384 a20_state = (a20_state != 0);
385 if (a20_state != ((env->a20_mask >> 20) & 1)) {
386 #if defined(DEBUG_MMU)
387 printf("A20 update: a20=%d\n", a20_state);
388 #endif
389 /* if the cpu is currently executing code, we must unlink it and
390 all the potentially executing TB */
391 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
393 /* when a20 is changed, all the MMU mappings are invalid, so
394 we must flush everything */
395 tlb_flush(env, 1);
396 env->a20_mask = ~(1 << 20) | (a20_state << 20);
400 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
402 int pe_state;
404 #if defined(DEBUG_MMU)
405 printf("CR0 update: CR0=0x%08x\n", new_cr0);
406 #endif
407 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
408 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
409 tlb_flush(env, 1);
412 #ifdef TARGET_X86_64
413 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
414 (env->efer & MSR_EFER_LME)) {
415 /* enter in long mode */
416 /* XXX: generate an exception */
417 if (!(env->cr[4] & CR4_PAE_MASK))
418 return;
419 env->efer |= MSR_EFER_LMA;
420 env->hflags |= HF_LMA_MASK;
421 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
422 (env->efer & MSR_EFER_LMA)) {
423 /* exit long mode */
424 env->efer &= ~MSR_EFER_LMA;
425 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
426 env->eip &= 0xffffffff;
428 #endif
429 env->cr[0] = new_cr0 | CR0_ET_MASK;
431 /* update PE flag in hidden flags */
432 pe_state = (env->cr[0] & CR0_PE_MASK);
433 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
434 /* ensure that ADDSEG is always set in real mode */
435 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
436 /* update FPU flags */
437 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
438 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
441 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
442 the PDPT */
443 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
445 env->cr[3] = new_cr3;
446 if (env->cr[0] & CR0_PG_MASK) {
447 #if defined(DEBUG_MMU)
448 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
449 #endif
450 tlb_flush(env, 0);
454 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
456 #if defined(DEBUG_MMU)
457 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
458 #endif
459 if ((new_cr4 ^ env->cr[4]) &
460 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
461 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
462 tlb_flush(env, 1);
464 /* SSE handling */
465 if (!(env->cpuid_features & CPUID_SSE)) {
466 new_cr4 &= ~CR4_OSFXSR_MASK;
468 env->hflags &= ~HF_OSFXSR_MASK;
469 if (new_cr4 & CR4_OSFXSR_MASK) {
470 env->hflags |= HF_OSFXSR_MASK;
473 if (!(env->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)) {
474 new_cr4 &= ~CR4_SMAP_MASK;
476 env->hflags &= ~HF_SMAP_MASK;
477 if (new_cr4 & CR4_SMAP_MASK) {
478 env->hflags |= HF_SMAP_MASK;
481 env->cr[4] = new_cr4;
484 #if defined(CONFIG_USER_ONLY)
486 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
487 int is_write, int mmu_idx)
489 /* user mode only emulation */
490 is_write &= 1;
491 env->cr[2] = addr;
492 env->error_code = (is_write << PG_ERROR_W_BIT);
493 env->error_code |= PG_ERROR_U_MASK;
494 env->exception_index = EXCP0E_PAGE;
495 return 1;
498 #else
500 /* XXX: This value should match the one returned by CPUID
501 * and in exec.c */
502 # if defined(TARGET_X86_64)
503 # define PHYS_ADDR_MASK 0xfffffff000LL
504 # else
505 # define PHYS_ADDR_MASK 0xffffff000LL
506 # endif
508 /* return value:
509 -1 = cannot handle fault
510 0 = nothing more to do
511 1 = generate PF fault
513 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
514 int is_write1, int mmu_idx)
516 uint64_t ptep, pte;
517 target_ulong pde_addr, pte_addr;
518 int error_code, is_dirty, prot, page_size, is_write, is_user;
519 hwaddr paddr;
520 uint32_t page_offset;
521 target_ulong vaddr, virt_addr;
523 is_user = mmu_idx == MMU_USER_IDX;
524 #if defined(DEBUG_MMU)
525 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
526 addr, is_write1, is_user, env->eip);
527 #endif
528 is_write = is_write1 & 1;
530 if (!(env->cr[0] & CR0_PG_MASK)) {
531 pte = addr;
532 virt_addr = addr & TARGET_PAGE_MASK;
533 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
534 page_size = 4096;
535 goto do_mapping;
538 if (env->cr[4] & CR4_PAE_MASK) {
539 uint64_t pde, pdpe;
540 target_ulong pdpe_addr;
542 #ifdef TARGET_X86_64
543 if (env->hflags & HF_LMA_MASK) {
544 uint64_t pml4e_addr, pml4e;
545 int32_t sext;
547 /* test virtual address sign extension */
548 sext = (int64_t)addr >> 47;
549 if (sext != 0 && sext != -1) {
550 env->error_code = 0;
551 env->exception_index = EXCP0D_GPF;
552 return 1;
555 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
556 env->a20_mask;
557 pml4e = ldq_phys(pml4e_addr);
558 if (!(pml4e & PG_PRESENT_MASK)) {
559 error_code = 0;
560 goto do_fault;
562 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
563 error_code = PG_ERROR_RSVD_MASK;
564 goto do_fault;
566 if (!(pml4e & PG_ACCESSED_MASK)) {
567 pml4e |= PG_ACCESSED_MASK;
568 stl_phys_notdirty(pml4e_addr, pml4e);
570 ptep = pml4e ^ PG_NX_MASK;
571 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
572 env->a20_mask;
573 pdpe = ldq_phys(pdpe_addr);
574 if (!(pdpe & PG_PRESENT_MASK)) {
575 error_code = 0;
576 goto do_fault;
578 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
579 error_code = PG_ERROR_RSVD_MASK;
580 goto do_fault;
582 ptep &= pdpe ^ PG_NX_MASK;
583 if (!(pdpe & PG_ACCESSED_MASK)) {
584 pdpe |= PG_ACCESSED_MASK;
585 stl_phys_notdirty(pdpe_addr, pdpe);
587 } else
588 #endif
590 /* XXX: load them when cr3 is loaded ? */
591 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
592 env->a20_mask;
593 pdpe = ldq_phys(pdpe_addr);
594 if (!(pdpe & PG_PRESENT_MASK)) {
595 error_code = 0;
596 goto do_fault;
598 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
601 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
602 env->a20_mask;
603 pde = ldq_phys(pde_addr);
604 if (!(pde & PG_PRESENT_MASK)) {
605 error_code = 0;
606 goto do_fault;
608 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
609 error_code = PG_ERROR_RSVD_MASK;
610 goto do_fault;
612 ptep &= pde ^ PG_NX_MASK;
613 if (pde & PG_PSE_MASK) {
614 /* 2 MB page */
615 page_size = 2048 * 1024;
616 ptep ^= PG_NX_MASK;
617 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
618 goto do_fault_protect;
620 switch (mmu_idx) {
621 case MMU_USER_IDX:
622 if (!(ptep & PG_USER_MASK)) {
623 goto do_fault_protect;
625 if (is_write && !(ptep & PG_RW_MASK)) {
626 goto do_fault_protect;
628 break;
630 case MMU_KERNEL_IDX:
631 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
632 (ptep & PG_USER_MASK)) {
633 goto do_fault_protect;
635 /* fall through */
636 case MMU_KSMAP_IDX:
637 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
638 (ptep & PG_USER_MASK)) {
639 goto do_fault_protect;
641 if ((env->cr[0] & CR0_WP_MASK) &&
642 is_write && !(ptep & PG_RW_MASK)) {
643 goto do_fault_protect;
645 break;
647 default: /* cannot happen */
648 break;
650 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
651 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
652 pde |= PG_ACCESSED_MASK;
653 if (is_dirty)
654 pde |= PG_DIRTY_MASK;
655 stl_phys_notdirty(pde_addr, pde);
657 /* align to page_size */
658 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
659 virt_addr = addr & ~(page_size - 1);
660 } else {
661 /* 4 KB page */
662 if (!(pde & PG_ACCESSED_MASK)) {
663 pde |= PG_ACCESSED_MASK;
664 stl_phys_notdirty(pde_addr, pde);
666 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
667 env->a20_mask;
668 pte = ldq_phys(pte_addr);
669 if (!(pte & PG_PRESENT_MASK)) {
670 error_code = 0;
671 goto do_fault;
673 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
674 error_code = PG_ERROR_RSVD_MASK;
675 goto do_fault;
677 /* combine pde and pte nx, user and rw protections */
678 ptep &= pte ^ PG_NX_MASK;
679 ptep ^= PG_NX_MASK;
680 if ((ptep & PG_NX_MASK) && is_write1 == 2)
681 goto do_fault_protect;
682 switch (mmu_idx) {
683 case MMU_USER_IDX:
684 if (!(ptep & PG_USER_MASK)) {
685 goto do_fault_protect;
687 if (is_write && !(ptep & PG_RW_MASK)) {
688 goto do_fault_protect;
690 break;
692 case MMU_KERNEL_IDX:
693 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
694 (ptep & PG_USER_MASK)) {
695 goto do_fault_protect;
697 /* fall through */
698 case MMU_KSMAP_IDX:
699 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
700 (ptep & PG_USER_MASK)) {
701 goto do_fault_protect;
703 if ((env->cr[0] & CR0_WP_MASK) &&
704 is_write && !(ptep & PG_RW_MASK)) {
705 goto do_fault_protect;
707 break;
709 default: /* cannot happen */
710 break;
712 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
713 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
714 pte |= PG_ACCESSED_MASK;
715 if (is_dirty)
716 pte |= PG_DIRTY_MASK;
717 stl_phys_notdirty(pte_addr, pte);
719 page_size = 4096;
720 virt_addr = addr & ~0xfff;
721 pte = pte & (PHYS_ADDR_MASK | 0xfff);
723 } else {
724 uint32_t pde;
726 /* page directory entry */
727 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
728 env->a20_mask;
729 pde = ldl_phys(pde_addr);
730 if (!(pde & PG_PRESENT_MASK)) {
731 error_code = 0;
732 goto do_fault;
734 /* if PSE bit is set, then we use a 4MB page */
735 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
736 page_size = 4096 * 1024;
737 switch (mmu_idx) {
738 case MMU_USER_IDX:
739 if (!(pde & PG_USER_MASK)) {
740 goto do_fault_protect;
742 if (is_write && !(pde & PG_RW_MASK)) {
743 goto do_fault_protect;
745 break;
747 case MMU_KERNEL_IDX:
748 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
749 (pde & PG_USER_MASK)) {
750 goto do_fault_protect;
752 /* fall through */
753 case MMU_KSMAP_IDX:
754 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
755 (pde & PG_USER_MASK)) {
756 goto do_fault_protect;
758 if ((env->cr[0] & CR0_WP_MASK) &&
759 is_write && !(pde & PG_RW_MASK)) {
760 goto do_fault_protect;
762 break;
764 default: /* cannot happen */
765 break;
767 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
768 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
769 pde |= PG_ACCESSED_MASK;
770 if (is_dirty)
771 pde |= PG_DIRTY_MASK;
772 stl_phys_notdirty(pde_addr, pde);
775 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
776 ptep = pte;
777 virt_addr = addr & ~(page_size - 1);
778 } else {
779 if (!(pde & PG_ACCESSED_MASK)) {
780 pde |= PG_ACCESSED_MASK;
781 stl_phys_notdirty(pde_addr, pde);
784 /* page directory entry */
785 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
786 env->a20_mask;
787 pte = ldl_phys(pte_addr);
788 if (!(pte & PG_PRESENT_MASK)) {
789 error_code = 0;
790 goto do_fault;
792 /* combine pde and pte user and rw protections */
793 ptep = pte & pde;
794 switch (mmu_idx) {
795 case MMU_USER_IDX:
796 if (!(ptep & PG_USER_MASK)) {
797 goto do_fault_protect;
799 if (is_write && !(ptep & PG_RW_MASK)) {
800 goto do_fault_protect;
802 break;
804 case MMU_KERNEL_IDX:
805 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
806 (ptep & PG_USER_MASK)) {
807 goto do_fault_protect;
809 /* fall through */
810 case MMU_KSMAP_IDX:
811 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
812 (ptep & PG_USER_MASK)) {
813 goto do_fault_protect;
815 if ((env->cr[0] & CR0_WP_MASK) &&
816 is_write && !(ptep & PG_RW_MASK)) {
817 goto do_fault_protect;
819 break;
821 default: /* cannot happen */
822 break;
824 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
825 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
826 pte |= PG_ACCESSED_MASK;
827 if (is_dirty)
828 pte |= PG_DIRTY_MASK;
829 stl_phys_notdirty(pte_addr, pte);
831 page_size = 4096;
832 virt_addr = addr & ~0xfff;
835 /* the page can be put in the TLB */
836 prot = PAGE_READ;
837 if (!(ptep & PG_NX_MASK))
838 prot |= PAGE_EXEC;
839 if (pte & PG_DIRTY_MASK) {
840 /* only set write access if already dirty... otherwise wait
841 for dirty access */
842 if (is_user) {
843 if (ptep & PG_RW_MASK)
844 prot |= PAGE_WRITE;
845 } else {
846 if (!(env->cr[0] & CR0_WP_MASK) ||
847 (ptep & PG_RW_MASK))
848 prot |= PAGE_WRITE;
851 do_mapping:
852 pte = pte & env->a20_mask;
854 /* Even if 4MB pages, we map only one 4KB page in the cache to
855 avoid filling it too fast */
856 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
857 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
858 vaddr = virt_addr + page_offset;
860 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
861 return 0;
862 do_fault_protect:
863 error_code = PG_ERROR_P_MASK;
864 do_fault:
865 error_code |= (is_write << PG_ERROR_W_BIT);
866 if (is_user)
867 error_code |= PG_ERROR_U_MASK;
868 if (is_write1 == 2 &&
869 (((env->efer & MSR_EFER_NXE) &&
870 (env->cr[4] & CR4_PAE_MASK)) ||
871 (env->cr[4] & CR4_SMEP_MASK)))
872 error_code |= PG_ERROR_I_D_MASK;
873 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
874 /* cr2 is not modified in case of exceptions */
875 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
876 addr);
877 } else {
878 env->cr[2] = addr;
880 env->error_code = error_code;
881 env->exception_index = EXCP0E_PAGE;
882 return 1;
885 hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
887 target_ulong pde_addr, pte_addr;
888 uint64_t pte;
889 hwaddr paddr;
890 uint32_t page_offset;
891 int page_size;
893 if (env->cr[4] & CR4_PAE_MASK) {
894 target_ulong pdpe_addr;
895 uint64_t pde, pdpe;
897 #ifdef TARGET_X86_64
898 if (env->hflags & HF_LMA_MASK) {
899 uint64_t pml4e_addr, pml4e;
900 int32_t sext;
902 /* test virtual address sign extension */
903 sext = (int64_t)addr >> 47;
904 if (sext != 0 && sext != -1)
905 return -1;
907 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
908 env->a20_mask;
909 pml4e = ldq_phys(pml4e_addr);
910 if (!(pml4e & PG_PRESENT_MASK))
911 return -1;
913 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
914 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
915 pdpe = ldq_phys(pdpe_addr);
916 if (!(pdpe & PG_PRESENT_MASK))
917 return -1;
918 } else
919 #endif
921 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
922 env->a20_mask;
923 pdpe = ldq_phys(pdpe_addr);
924 if (!(pdpe & PG_PRESENT_MASK))
925 return -1;
928 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
929 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
930 pde = ldq_phys(pde_addr);
931 if (!(pde & PG_PRESENT_MASK)) {
932 return -1;
934 if (pde & PG_PSE_MASK) {
935 /* 2 MB page */
936 page_size = 2048 * 1024;
937 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
938 } else {
939 /* 4 KB page */
940 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
941 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
942 page_size = 4096;
943 pte = ldq_phys(pte_addr);
945 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
946 if (!(pte & PG_PRESENT_MASK))
947 return -1;
948 } else {
949 uint32_t pde;
951 if (!(env->cr[0] & CR0_PG_MASK)) {
952 pte = addr;
953 page_size = 4096;
954 } else {
955 /* page directory entry */
956 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
957 pde = ldl_phys(pde_addr);
958 if (!(pde & PG_PRESENT_MASK))
959 return -1;
960 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
961 pte = pde & ~0x003ff000; /* align to 4MB */
962 page_size = 4096 * 1024;
963 } else {
964 /* page directory entry */
965 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
966 pte = ldl_phys(pte_addr);
967 if (!(pte & PG_PRESENT_MASK))
968 return -1;
969 page_size = 4096;
972 pte = pte & env->a20_mask;
975 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
976 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
977 return paddr;
980 void hw_breakpoint_insert(CPUX86State *env, int index)
982 int type = 0, err = 0;
984 switch (hw_breakpoint_type(env->dr[7], index)) {
985 case DR7_TYPE_BP_INST:
986 if (hw_breakpoint_enabled(env->dr[7], index)) {
987 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
988 &env->cpu_breakpoint[index]);
990 break;
991 case DR7_TYPE_DATA_WR:
992 type = BP_CPU | BP_MEM_WRITE;
993 break;
994 case DR7_TYPE_IO_RW:
995 /* No support for I/O watchpoints yet */
996 break;
997 case DR7_TYPE_DATA_RW:
998 type = BP_CPU | BP_MEM_ACCESS;
999 break;
1002 if (type != 0) {
1003 err = cpu_watchpoint_insert(env, env->dr[index],
1004 hw_breakpoint_len(env->dr[7], index),
1005 type, &env->cpu_watchpoint[index]);
1008 if (err) {
1009 env->cpu_breakpoint[index] = NULL;
1013 void hw_breakpoint_remove(CPUX86State *env, int index)
1015 if (!env->cpu_breakpoint[index])
1016 return;
1017 switch (hw_breakpoint_type(env->dr[7], index)) {
1018 case DR7_TYPE_BP_INST:
1019 if (hw_breakpoint_enabled(env->dr[7], index)) {
1020 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1022 break;
1023 case DR7_TYPE_DATA_WR:
1024 case DR7_TYPE_DATA_RW:
1025 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1026 break;
1027 case DR7_TYPE_IO_RW:
1028 /* No support for I/O watchpoints yet */
1029 break;
1033 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1035 target_ulong dr6;
1036 int reg;
1037 bool hit_enabled = false;
1039 dr6 = env->dr[6] & ~0xf;
1040 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1041 bool bp_match = false;
1042 bool wp_match = false;
1044 switch (hw_breakpoint_type(env->dr[7], reg)) {
1045 case DR7_TYPE_BP_INST:
1046 if (env->dr[reg] == env->eip) {
1047 bp_match = true;
1049 break;
1050 case DR7_TYPE_DATA_WR:
1051 case DR7_TYPE_DATA_RW:
1052 if (env->cpu_watchpoint[reg] &&
1053 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1054 wp_match = true;
1056 break;
1057 case DR7_TYPE_IO_RW:
1058 break;
1060 if (bp_match || wp_match) {
1061 dr6 |= 1 << reg;
1062 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1063 hit_enabled = true;
1068 if (hit_enabled || force_dr6_update) {
1069 env->dr[6] = dr6;
1072 return hit_enabled;
1075 void breakpoint_handler(CPUX86State *env)
1077 CPUBreakpoint *bp;
1079 if (env->watchpoint_hit) {
1080 if (env->watchpoint_hit->flags & BP_CPU) {
1081 env->watchpoint_hit = NULL;
1082 if (check_hw_breakpoints(env, false)) {
1083 raise_exception(env, EXCP01_DB);
1084 } else {
1085 cpu_resume_from_signal(env, NULL);
1088 } else {
1089 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1090 if (bp->pc == env->eip) {
1091 if (bp->flags & BP_CPU) {
1092 check_hw_breakpoints(env, true);
1093 raise_exception(env, EXCP01_DB);
1095 break;
1100 typedef struct MCEInjectionParams {
1101 Monitor *mon;
1102 X86CPU *cpu;
1103 int bank;
1104 uint64_t status;
1105 uint64_t mcg_status;
1106 uint64_t addr;
1107 uint64_t misc;
1108 int flags;
1109 } MCEInjectionParams;
1111 static void do_inject_x86_mce(void *data)
1113 MCEInjectionParams *params = data;
1114 CPUX86State *cenv = &params->cpu->env;
1115 CPUState *cpu = CPU(params->cpu);
1116 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1118 cpu_synchronize_state(cenv);
1121 * If there is an MCE exception being processed, ignore this SRAO MCE
1122 * unless unconditional injection was requested.
1124 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1125 && !(params->status & MCI_STATUS_AR)
1126 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1127 return;
1130 if (params->status & MCI_STATUS_UC) {
1132 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1133 * reporting is disabled
1135 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1136 monitor_printf(params->mon,
1137 "CPU %d: Uncorrected error reporting disabled\n",
1138 cpu->cpu_index);
1139 return;
1143 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1144 * reporting is disabled for the bank
1146 if (banks[0] != ~(uint64_t)0) {
1147 monitor_printf(params->mon,
1148 "CPU %d: Uncorrected error reporting disabled for"
1149 " bank %d\n",
1150 cpu->cpu_index, params->bank);
1151 return;
1154 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1155 !(cenv->cr[4] & CR4_MCE_MASK)) {
1156 monitor_printf(params->mon,
1157 "CPU %d: Previous MCE still in progress, raising"
1158 " triple fault\n",
1159 cpu->cpu_index);
1160 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1161 qemu_system_reset_request();
1162 return;
1164 if (banks[1] & MCI_STATUS_VAL) {
1165 params->status |= MCI_STATUS_OVER;
1167 banks[2] = params->addr;
1168 banks[3] = params->misc;
1169 cenv->mcg_status = params->mcg_status;
1170 banks[1] = params->status;
1171 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1172 } else if (!(banks[1] & MCI_STATUS_VAL)
1173 || !(banks[1] & MCI_STATUS_UC)) {
1174 if (banks[1] & MCI_STATUS_VAL) {
1175 params->status |= MCI_STATUS_OVER;
1177 banks[2] = params->addr;
1178 banks[3] = params->misc;
1179 banks[1] = params->status;
1180 } else {
1181 banks[1] |= MCI_STATUS_OVER;
1185 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1186 uint64_t status, uint64_t mcg_status, uint64_t addr,
1187 uint64_t misc, int flags)
1189 CPUX86State *cenv = &cpu->env;
1190 MCEInjectionParams params = {
1191 .mon = mon,
1192 .cpu = cpu,
1193 .bank = bank,
1194 .status = status,
1195 .mcg_status = mcg_status,
1196 .addr = addr,
1197 .misc = misc,
1198 .flags = flags,
1200 unsigned bank_num = cenv->mcg_cap & 0xff;
1201 CPUX86State *env;
1203 if (!cenv->mcg_cap) {
1204 monitor_printf(mon, "MCE injection not supported\n");
1205 return;
1207 if (bank >= bank_num) {
1208 monitor_printf(mon, "Invalid MCE bank number\n");
1209 return;
1211 if (!(status & MCI_STATUS_VAL)) {
1212 monitor_printf(mon, "Invalid MCE status code\n");
1213 return;
1215 if ((flags & MCE_INJECT_BROADCAST)
1216 && !cpu_x86_support_mca_broadcast(cenv)) {
1217 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1218 return;
1221 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
1222 if (flags & MCE_INJECT_BROADCAST) {
1223 params.bank = 1;
1224 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1225 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1226 params.addr = 0;
1227 params.misc = 0;
1228 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1229 if (cenv == env) {
1230 continue;
1232 params.cpu = x86_env_get_cpu(env);
1233 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
1238 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1240 if (kvm_enabled()) {
1241 env->tpr_access_type = access;
1243 cpu_interrupt(env, CPU_INTERRUPT_TPR);
1244 } else {
1245 cpu_restore_state(env, env->mem_io_pc);
1247 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1250 #endif /* !CONFIG_USER_ONLY */
1252 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1253 target_ulong *base, unsigned int *limit,
1254 unsigned int *flags)
1256 SegmentCache *dt;
1257 target_ulong ptr;
1258 uint32_t e1, e2;
1259 int index;
1261 if (selector & 0x4)
1262 dt = &env->ldt;
1263 else
1264 dt = &env->gdt;
1265 index = selector & ~7;
1266 ptr = dt->base + index;
1267 if ((index + 7) > dt->limit
1268 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1269 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1270 return 0;
1272 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1273 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1274 if (e2 & DESC_G_MASK)
1275 *limit = (*limit << 12) | 0xfff;
1276 *flags = e2;
1278 return 1;
1281 #if !defined(CONFIG_USER_ONLY)
1282 void do_cpu_init(X86CPU *cpu)
1284 CPUX86State *env = &cpu->env;
1285 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1286 uint64_t pat = env->pat;
1288 cpu_reset(CPU(cpu));
1289 env->interrupt_request = sipi;
1290 env->pat = pat;
1291 apic_init_reset(env->apic_state);
1294 void do_cpu_sipi(X86CPU *cpu)
1296 CPUX86State *env = &cpu->env;
1298 apic_sipi(env->apic_state);
1300 #else
1301 void do_cpu_init(X86CPU *cpu)
1304 void do_cpu_sipi(X86CPU *cpu)
1307 #endif