target-arm: Initialize cpreg list from KVM when using KVM
[qemu/ar7.git] / target-i386 / helper.c
blob158710a89c6a84cd0b8160de980c092925a1840f
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31 int cpuver = env->cpuid_version;
33 if (family == NULL || model == NULL) {
34 return;
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
44 int family = 0;
45 int model = 0;
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
52 return 0;
55 /***********************************************************/
56 /* x86 debug */
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
117 "ADCX",
118 "ADOX",
119 "ADCOX",
121 "CLR",
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
151 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
152 (sc->flags & DESC_W_MASK) ? 'W' : '-');
154 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
155 } else {
156 static const char *sys_type_name[2][16] = {
157 { /* 32 bit mode */
158 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
159 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
160 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
161 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
163 { /* 64 bit mode */
164 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
165 "Reserved", "Reserved", "Reserved", "Reserved",
166 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
167 "Reserved", "IntGate64", "TrapGate64"
170 cpu_fprintf(f, "%s",
171 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
172 [(sc->flags & DESC_TYPE_MASK)
173 >> DESC_TYPE_SHIFT]);
175 done:
176 cpu_fprintf(f, "\n");
179 #define DUMP_CODE_BYTES_TOTAL 50
180 #define DUMP_CODE_BYTES_BACKWARD 20
182 void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
183 int flags)
185 CPUState *cs = CPU(x86_env_get_cpu(env));
186 int eflags, i, nb;
187 char cc_op_name[32];
188 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
190 cpu_synchronize_state(env);
192 eflags = cpu_compute_eflags(env);
193 #ifdef TARGET_X86_64
194 if (env->hflags & HF_CS64_MASK) {
195 cpu_fprintf(f,
196 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
197 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
198 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
199 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
200 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
201 env->regs[R_EAX],
202 env->regs[R_EBX],
203 env->regs[R_ECX],
204 env->regs[R_EDX],
205 env->regs[R_ESI],
206 env->regs[R_EDI],
207 env->regs[R_EBP],
208 env->regs[R_ESP],
209 env->regs[8],
210 env->regs[9],
211 env->regs[10],
212 env->regs[11],
213 env->regs[12],
214 env->regs[13],
215 env->regs[14],
216 env->regs[15],
217 env->eip, eflags,
218 eflags & DF_MASK ? 'D' : '-',
219 eflags & CC_O ? 'O' : '-',
220 eflags & CC_S ? 'S' : '-',
221 eflags & CC_Z ? 'Z' : '-',
222 eflags & CC_A ? 'A' : '-',
223 eflags & CC_P ? 'P' : '-',
224 eflags & CC_C ? 'C' : '-',
225 env->hflags & HF_CPL_MASK,
226 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
227 (env->a20_mask >> 20) & 1,
228 (env->hflags >> HF_SMM_SHIFT) & 1,
229 cs->halted);
230 } else
231 #endif
233 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
234 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
235 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
236 (uint32_t)env->regs[R_EAX],
237 (uint32_t)env->regs[R_EBX],
238 (uint32_t)env->regs[R_ECX],
239 (uint32_t)env->regs[R_EDX],
240 (uint32_t)env->regs[R_ESI],
241 (uint32_t)env->regs[R_EDI],
242 (uint32_t)env->regs[R_EBP],
243 (uint32_t)env->regs[R_ESP],
244 (uint32_t)env->eip, eflags,
245 eflags & DF_MASK ? 'D' : '-',
246 eflags & CC_O ? 'O' : '-',
247 eflags & CC_S ? 'S' : '-',
248 eflags & CC_Z ? 'Z' : '-',
249 eflags & CC_A ? 'A' : '-',
250 eflags & CC_P ? 'P' : '-',
251 eflags & CC_C ? 'C' : '-',
252 env->hflags & HF_CPL_MASK,
253 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
254 (env->a20_mask >> 20) & 1,
255 (env->hflags >> HF_SMM_SHIFT) & 1,
256 cs->halted);
259 for(i = 0; i < 6; i++) {
260 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
261 &env->segs[i]);
263 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
266 #ifdef TARGET_X86_64
267 if (env->hflags & HF_LMA_MASK) {
268 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
269 env->gdt.base, env->gdt.limit);
270 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
271 env->idt.base, env->idt.limit);
272 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
273 (uint32_t)env->cr[0],
274 env->cr[2],
275 env->cr[3],
276 (uint32_t)env->cr[4]);
277 for(i = 0; i < 4; i++)
278 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
279 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
280 env->dr[6], env->dr[7]);
281 } else
282 #endif
284 cpu_fprintf(f, "GDT= %08x %08x\n",
285 (uint32_t)env->gdt.base, env->gdt.limit);
286 cpu_fprintf(f, "IDT= %08x %08x\n",
287 (uint32_t)env->idt.base, env->idt.limit);
288 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
289 (uint32_t)env->cr[0],
290 (uint32_t)env->cr[2],
291 (uint32_t)env->cr[3],
292 (uint32_t)env->cr[4]);
293 for(i = 0; i < 4; i++) {
294 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
296 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
297 env->dr[6], env->dr[7]);
299 if (flags & CPU_DUMP_CCOP) {
300 if ((unsigned)env->cc_op < CC_OP_NB)
301 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
302 else
303 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
304 #ifdef TARGET_X86_64
305 if (env->hflags & HF_CS64_MASK) {
306 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
307 env->cc_src, env->cc_dst,
308 cc_op_name);
309 } else
310 #endif
312 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
313 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
314 cc_op_name);
317 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
318 if (flags & CPU_DUMP_FPU) {
319 int fptag;
320 fptag = 0;
321 for(i = 0; i < 8; i++) {
322 fptag |= ((!env->fptags[i]) << i);
324 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
325 env->fpuc,
326 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
327 env->fpstt,
328 fptag,
329 env->mxcsr);
330 for(i=0;i<8;i++) {
331 CPU_LDoubleU u;
332 u.d = env->fpregs[i].d;
333 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
334 i, u.l.lower, u.l.upper);
335 if ((i & 1) == 1)
336 cpu_fprintf(f, "\n");
337 else
338 cpu_fprintf(f, " ");
340 if (env->hflags & HF_CS64_MASK)
341 nb = 16;
342 else
343 nb = 8;
344 for(i=0;i<nb;i++) {
345 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
347 env->xmm_regs[i].XMM_L(3),
348 env->xmm_regs[i].XMM_L(2),
349 env->xmm_regs[i].XMM_L(1),
350 env->xmm_regs[i].XMM_L(0));
351 if ((i & 1) == 1)
352 cpu_fprintf(f, "\n");
353 else
354 cpu_fprintf(f, " ");
357 if (flags & CPU_DUMP_CODE) {
358 target_ulong base = env->segs[R_CS].base + env->eip;
359 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
360 uint8_t code;
361 char codestr[3];
363 cpu_fprintf(f, "Code=");
364 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
365 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
366 snprintf(codestr, sizeof(codestr), "%02x", code);
367 } else {
368 snprintf(codestr, sizeof(codestr), "??");
370 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
371 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
373 cpu_fprintf(f, "\n");
377 /***********************************************************/
378 /* x86 mmu */
379 /* XXX: add PGE support */
381 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
383 CPUX86State *env = &cpu->env;
385 a20_state = (a20_state != 0);
386 if (a20_state != ((env->a20_mask >> 20) & 1)) {
387 #if defined(DEBUG_MMU)
388 printf("A20 update: a20=%d\n", a20_state);
389 #endif
390 /* if the cpu is currently executing code, we must unlink it and
391 all the potentially executing TB */
392 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
394 /* when a20 is changed, all the MMU mappings are invalid, so
395 we must flush everything */
396 tlb_flush(env, 1);
397 env->a20_mask = ~(1 << 20) | (a20_state << 20);
401 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
403 int pe_state;
405 #if defined(DEBUG_MMU)
406 printf("CR0 update: CR0=0x%08x\n", new_cr0);
407 #endif
408 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
409 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
410 tlb_flush(env, 1);
413 #ifdef TARGET_X86_64
414 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
415 (env->efer & MSR_EFER_LME)) {
416 /* enter in long mode */
417 /* XXX: generate an exception */
418 if (!(env->cr[4] & CR4_PAE_MASK))
419 return;
420 env->efer |= MSR_EFER_LMA;
421 env->hflags |= HF_LMA_MASK;
422 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
423 (env->efer & MSR_EFER_LMA)) {
424 /* exit long mode */
425 env->efer &= ~MSR_EFER_LMA;
426 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
427 env->eip &= 0xffffffff;
429 #endif
430 env->cr[0] = new_cr0 | CR0_ET_MASK;
432 /* update PE flag in hidden flags */
433 pe_state = (env->cr[0] & CR0_PE_MASK);
434 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
435 /* ensure that ADDSEG is always set in real mode */
436 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
437 /* update FPU flags */
438 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
439 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
442 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
443 the PDPT */
444 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
446 env->cr[3] = new_cr3;
447 if (env->cr[0] & CR0_PG_MASK) {
448 #if defined(DEBUG_MMU)
449 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
450 #endif
451 tlb_flush(env, 0);
455 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
457 #if defined(DEBUG_MMU)
458 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
459 #endif
460 if ((new_cr4 ^ env->cr[4]) &
461 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
462 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
463 tlb_flush(env, 1);
465 /* SSE handling */
466 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
467 new_cr4 &= ~CR4_OSFXSR_MASK;
469 env->hflags &= ~HF_OSFXSR_MASK;
470 if (new_cr4 & CR4_OSFXSR_MASK) {
471 env->hflags |= HF_OSFXSR_MASK;
474 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
475 new_cr4 &= ~CR4_SMAP_MASK;
477 env->hflags &= ~HF_SMAP_MASK;
478 if (new_cr4 & CR4_SMAP_MASK) {
479 env->hflags |= HF_SMAP_MASK;
482 env->cr[4] = new_cr4;
485 #if defined(CONFIG_USER_ONLY)
487 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
488 int is_write, int mmu_idx)
490 /* user mode only emulation */
491 is_write &= 1;
492 env->cr[2] = addr;
493 env->error_code = (is_write << PG_ERROR_W_BIT);
494 env->error_code |= PG_ERROR_U_MASK;
495 env->exception_index = EXCP0E_PAGE;
496 return 1;
499 #else
501 /* XXX: This value should match the one returned by CPUID
502 * and in exec.c */
503 # if defined(TARGET_X86_64)
504 # define PHYS_ADDR_MASK 0xfffffff000LL
505 # else
506 # define PHYS_ADDR_MASK 0xffffff000LL
507 # endif
509 /* return value:
510 -1 = cannot handle fault
511 0 = nothing more to do
512 1 = generate PF fault
514 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
515 int is_write1, int mmu_idx)
517 uint64_t ptep, pte;
518 target_ulong pde_addr, pte_addr;
519 int error_code, is_dirty, prot, page_size, is_write, is_user;
520 hwaddr paddr;
521 uint32_t page_offset;
522 target_ulong vaddr, virt_addr;
524 is_user = mmu_idx == MMU_USER_IDX;
525 #if defined(DEBUG_MMU)
526 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
527 addr, is_write1, is_user, env->eip);
528 #endif
529 is_write = is_write1 & 1;
531 if (!(env->cr[0] & CR0_PG_MASK)) {
532 pte = addr;
533 virt_addr = addr & TARGET_PAGE_MASK;
534 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
535 page_size = 4096;
536 goto do_mapping;
539 if (env->cr[4] & CR4_PAE_MASK) {
540 uint64_t pde, pdpe;
541 target_ulong pdpe_addr;
543 #ifdef TARGET_X86_64
544 if (env->hflags & HF_LMA_MASK) {
545 uint64_t pml4e_addr, pml4e;
546 int32_t sext;
548 /* test virtual address sign extension */
549 sext = (int64_t)addr >> 47;
550 if (sext != 0 && sext != -1) {
551 env->error_code = 0;
552 env->exception_index = EXCP0D_GPF;
553 return 1;
556 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
557 env->a20_mask;
558 pml4e = ldq_phys(pml4e_addr);
559 if (!(pml4e & PG_PRESENT_MASK)) {
560 error_code = 0;
561 goto do_fault;
563 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
564 error_code = PG_ERROR_RSVD_MASK;
565 goto do_fault;
567 if (!(pml4e & PG_ACCESSED_MASK)) {
568 pml4e |= PG_ACCESSED_MASK;
569 stl_phys_notdirty(pml4e_addr, pml4e);
571 ptep = pml4e ^ PG_NX_MASK;
572 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
573 env->a20_mask;
574 pdpe = ldq_phys(pdpe_addr);
575 if (!(pdpe & PG_PRESENT_MASK)) {
576 error_code = 0;
577 goto do_fault;
579 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
580 error_code = PG_ERROR_RSVD_MASK;
581 goto do_fault;
583 ptep &= pdpe ^ PG_NX_MASK;
584 if (!(pdpe & PG_ACCESSED_MASK)) {
585 pdpe |= PG_ACCESSED_MASK;
586 stl_phys_notdirty(pdpe_addr, pdpe);
588 } else
589 #endif
591 /* XXX: load them when cr3 is loaded ? */
592 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
593 env->a20_mask;
594 pdpe = ldq_phys(pdpe_addr);
595 if (!(pdpe & PG_PRESENT_MASK)) {
596 error_code = 0;
597 goto do_fault;
599 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
602 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
603 env->a20_mask;
604 pde = ldq_phys(pde_addr);
605 if (!(pde & PG_PRESENT_MASK)) {
606 error_code = 0;
607 goto do_fault;
609 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
610 error_code = PG_ERROR_RSVD_MASK;
611 goto do_fault;
613 ptep &= pde ^ PG_NX_MASK;
614 if (pde & PG_PSE_MASK) {
615 /* 2 MB page */
616 page_size = 2048 * 1024;
617 ptep ^= PG_NX_MASK;
618 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
619 goto do_fault_protect;
621 switch (mmu_idx) {
622 case MMU_USER_IDX:
623 if (!(ptep & PG_USER_MASK)) {
624 goto do_fault_protect;
626 if (is_write && !(ptep & PG_RW_MASK)) {
627 goto do_fault_protect;
629 break;
631 case MMU_KERNEL_IDX:
632 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
633 (ptep & PG_USER_MASK)) {
634 goto do_fault_protect;
636 /* fall through */
637 case MMU_KSMAP_IDX:
638 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
639 (ptep & PG_USER_MASK)) {
640 goto do_fault_protect;
642 if ((env->cr[0] & CR0_WP_MASK) &&
643 is_write && !(ptep & PG_RW_MASK)) {
644 goto do_fault_protect;
646 break;
648 default: /* cannot happen */
649 break;
651 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
652 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
653 pde |= PG_ACCESSED_MASK;
654 if (is_dirty)
655 pde |= PG_DIRTY_MASK;
656 stl_phys_notdirty(pde_addr, pde);
658 /* align to page_size */
659 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
660 virt_addr = addr & ~(page_size - 1);
661 } else {
662 /* 4 KB page */
663 if (!(pde & PG_ACCESSED_MASK)) {
664 pde |= PG_ACCESSED_MASK;
665 stl_phys_notdirty(pde_addr, pde);
667 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
668 env->a20_mask;
669 pte = ldq_phys(pte_addr);
670 if (!(pte & PG_PRESENT_MASK)) {
671 error_code = 0;
672 goto do_fault;
674 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
675 error_code = PG_ERROR_RSVD_MASK;
676 goto do_fault;
678 /* combine pde and pte nx, user and rw protections */
679 ptep &= pte ^ PG_NX_MASK;
680 ptep ^= PG_NX_MASK;
681 if ((ptep & PG_NX_MASK) && is_write1 == 2)
682 goto do_fault_protect;
683 switch (mmu_idx) {
684 case MMU_USER_IDX:
685 if (!(ptep & PG_USER_MASK)) {
686 goto do_fault_protect;
688 if (is_write && !(ptep & PG_RW_MASK)) {
689 goto do_fault_protect;
691 break;
693 case MMU_KERNEL_IDX:
694 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
695 (ptep & PG_USER_MASK)) {
696 goto do_fault_protect;
698 /* fall through */
699 case MMU_KSMAP_IDX:
700 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
701 (ptep & PG_USER_MASK)) {
702 goto do_fault_protect;
704 if ((env->cr[0] & CR0_WP_MASK) &&
705 is_write && !(ptep & PG_RW_MASK)) {
706 goto do_fault_protect;
708 break;
710 default: /* cannot happen */
711 break;
713 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
714 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
715 pte |= PG_ACCESSED_MASK;
716 if (is_dirty)
717 pte |= PG_DIRTY_MASK;
718 stl_phys_notdirty(pte_addr, pte);
720 page_size = 4096;
721 virt_addr = addr & ~0xfff;
722 pte = pte & (PHYS_ADDR_MASK | 0xfff);
724 } else {
725 uint32_t pde;
727 /* page directory entry */
728 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
729 env->a20_mask;
730 pde = ldl_phys(pde_addr);
731 if (!(pde & PG_PRESENT_MASK)) {
732 error_code = 0;
733 goto do_fault;
735 /* if PSE bit is set, then we use a 4MB page */
736 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
737 page_size = 4096 * 1024;
738 switch (mmu_idx) {
739 case MMU_USER_IDX:
740 if (!(pde & PG_USER_MASK)) {
741 goto do_fault_protect;
743 if (is_write && !(pde & PG_RW_MASK)) {
744 goto do_fault_protect;
746 break;
748 case MMU_KERNEL_IDX:
749 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
750 (pde & PG_USER_MASK)) {
751 goto do_fault_protect;
753 /* fall through */
754 case MMU_KSMAP_IDX:
755 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
756 (pde & PG_USER_MASK)) {
757 goto do_fault_protect;
759 if ((env->cr[0] & CR0_WP_MASK) &&
760 is_write && !(pde & PG_RW_MASK)) {
761 goto do_fault_protect;
763 break;
765 default: /* cannot happen */
766 break;
768 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
769 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
770 pde |= PG_ACCESSED_MASK;
771 if (is_dirty)
772 pde |= PG_DIRTY_MASK;
773 stl_phys_notdirty(pde_addr, pde);
776 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
777 ptep = pte;
778 virt_addr = addr & ~(page_size - 1);
779 } else {
780 if (!(pde & PG_ACCESSED_MASK)) {
781 pde |= PG_ACCESSED_MASK;
782 stl_phys_notdirty(pde_addr, pde);
785 /* page directory entry */
786 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
787 env->a20_mask;
788 pte = ldl_phys(pte_addr);
789 if (!(pte & PG_PRESENT_MASK)) {
790 error_code = 0;
791 goto do_fault;
793 /* combine pde and pte user and rw protections */
794 ptep = pte & pde;
795 switch (mmu_idx) {
796 case MMU_USER_IDX:
797 if (!(ptep & PG_USER_MASK)) {
798 goto do_fault_protect;
800 if (is_write && !(ptep & PG_RW_MASK)) {
801 goto do_fault_protect;
803 break;
805 case MMU_KERNEL_IDX:
806 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
807 (ptep & PG_USER_MASK)) {
808 goto do_fault_protect;
810 /* fall through */
811 case MMU_KSMAP_IDX:
812 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
813 (ptep & PG_USER_MASK)) {
814 goto do_fault_protect;
816 if ((env->cr[0] & CR0_WP_MASK) &&
817 is_write && !(ptep & PG_RW_MASK)) {
818 goto do_fault_protect;
820 break;
822 default: /* cannot happen */
823 break;
825 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
826 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
827 pte |= PG_ACCESSED_MASK;
828 if (is_dirty)
829 pte |= PG_DIRTY_MASK;
830 stl_phys_notdirty(pte_addr, pte);
832 page_size = 4096;
833 virt_addr = addr & ~0xfff;
836 /* the page can be put in the TLB */
837 prot = PAGE_READ;
838 if (!(ptep & PG_NX_MASK))
839 prot |= PAGE_EXEC;
840 if (pte & PG_DIRTY_MASK) {
841 /* only set write access if already dirty... otherwise wait
842 for dirty access */
843 if (is_user) {
844 if (ptep & PG_RW_MASK)
845 prot |= PAGE_WRITE;
846 } else {
847 if (!(env->cr[0] & CR0_WP_MASK) ||
848 (ptep & PG_RW_MASK))
849 prot |= PAGE_WRITE;
852 do_mapping:
853 pte = pte & env->a20_mask;
855 /* Even if 4MB pages, we map only one 4KB page in the cache to
856 avoid filling it too fast */
857 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
858 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
859 vaddr = virt_addr + page_offset;
861 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
862 return 0;
863 do_fault_protect:
864 error_code = PG_ERROR_P_MASK;
865 do_fault:
866 error_code |= (is_write << PG_ERROR_W_BIT);
867 if (is_user)
868 error_code |= PG_ERROR_U_MASK;
869 if (is_write1 == 2 &&
870 (((env->efer & MSR_EFER_NXE) &&
871 (env->cr[4] & CR4_PAE_MASK)) ||
872 (env->cr[4] & CR4_SMEP_MASK)))
873 error_code |= PG_ERROR_I_D_MASK;
874 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
875 /* cr2 is not modified in case of exceptions */
876 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
877 addr);
878 } else {
879 env->cr[2] = addr;
881 env->error_code = error_code;
882 env->exception_index = EXCP0E_PAGE;
883 return 1;
886 hwaddr cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
888 target_ulong pde_addr, pte_addr;
889 uint64_t pte;
890 hwaddr paddr;
891 uint32_t page_offset;
892 int page_size;
894 if (env->cr[4] & CR4_PAE_MASK) {
895 target_ulong pdpe_addr;
896 uint64_t pde, pdpe;
898 #ifdef TARGET_X86_64
899 if (env->hflags & HF_LMA_MASK) {
900 uint64_t pml4e_addr, pml4e;
901 int32_t sext;
903 /* test virtual address sign extension */
904 sext = (int64_t)addr >> 47;
905 if (sext != 0 && sext != -1)
906 return -1;
908 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
909 env->a20_mask;
910 pml4e = ldq_phys(pml4e_addr);
911 if (!(pml4e & PG_PRESENT_MASK))
912 return -1;
914 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
915 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
916 pdpe = ldq_phys(pdpe_addr);
917 if (!(pdpe & PG_PRESENT_MASK))
918 return -1;
919 } else
920 #endif
922 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
923 env->a20_mask;
924 pdpe = ldq_phys(pdpe_addr);
925 if (!(pdpe & PG_PRESENT_MASK))
926 return -1;
929 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
930 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
931 pde = ldq_phys(pde_addr);
932 if (!(pde & PG_PRESENT_MASK)) {
933 return -1;
935 if (pde & PG_PSE_MASK) {
936 /* 2 MB page */
937 page_size = 2048 * 1024;
938 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
939 } else {
940 /* 4 KB page */
941 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
942 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
943 page_size = 4096;
944 pte = ldq_phys(pte_addr);
946 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
947 if (!(pte & PG_PRESENT_MASK))
948 return -1;
949 } else {
950 uint32_t pde;
952 if (!(env->cr[0] & CR0_PG_MASK)) {
953 pte = addr;
954 page_size = 4096;
955 } else {
956 /* page directory entry */
957 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
958 pde = ldl_phys(pde_addr);
959 if (!(pde & PG_PRESENT_MASK))
960 return -1;
961 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
962 pte = pde & ~0x003ff000; /* align to 4MB */
963 page_size = 4096 * 1024;
964 } else {
965 /* page directory entry */
966 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
967 pte = ldl_phys(pte_addr);
968 if (!(pte & PG_PRESENT_MASK))
969 return -1;
970 page_size = 4096;
973 pte = pte & env->a20_mask;
976 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
977 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
978 return paddr;
981 void hw_breakpoint_insert(CPUX86State *env, int index)
983 int type = 0, err = 0;
985 switch (hw_breakpoint_type(env->dr[7], index)) {
986 case DR7_TYPE_BP_INST:
987 if (hw_breakpoint_enabled(env->dr[7], index)) {
988 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
989 &env->cpu_breakpoint[index]);
991 break;
992 case DR7_TYPE_DATA_WR:
993 type = BP_CPU | BP_MEM_WRITE;
994 break;
995 case DR7_TYPE_IO_RW:
996 /* No support for I/O watchpoints yet */
997 break;
998 case DR7_TYPE_DATA_RW:
999 type = BP_CPU | BP_MEM_ACCESS;
1000 break;
1003 if (type != 0) {
1004 err = cpu_watchpoint_insert(env, env->dr[index],
1005 hw_breakpoint_len(env->dr[7], index),
1006 type, &env->cpu_watchpoint[index]);
1009 if (err) {
1010 env->cpu_breakpoint[index] = NULL;
1014 void hw_breakpoint_remove(CPUX86State *env, int index)
1016 if (!env->cpu_breakpoint[index])
1017 return;
1018 switch (hw_breakpoint_type(env->dr[7], index)) {
1019 case DR7_TYPE_BP_INST:
1020 if (hw_breakpoint_enabled(env->dr[7], index)) {
1021 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1023 break;
1024 case DR7_TYPE_DATA_WR:
1025 case DR7_TYPE_DATA_RW:
1026 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1027 break;
1028 case DR7_TYPE_IO_RW:
1029 /* No support for I/O watchpoints yet */
1030 break;
1034 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1036 target_ulong dr6;
1037 int reg;
1038 bool hit_enabled = false;
1040 dr6 = env->dr[6] & ~0xf;
1041 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1042 bool bp_match = false;
1043 bool wp_match = false;
1045 switch (hw_breakpoint_type(env->dr[7], reg)) {
1046 case DR7_TYPE_BP_INST:
1047 if (env->dr[reg] == env->eip) {
1048 bp_match = true;
1050 break;
1051 case DR7_TYPE_DATA_WR:
1052 case DR7_TYPE_DATA_RW:
1053 if (env->cpu_watchpoint[reg] &&
1054 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1055 wp_match = true;
1057 break;
1058 case DR7_TYPE_IO_RW:
1059 break;
1061 if (bp_match || wp_match) {
1062 dr6 |= 1 << reg;
1063 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1064 hit_enabled = true;
1069 if (hit_enabled || force_dr6_update) {
1070 env->dr[6] = dr6;
1073 return hit_enabled;
1076 void breakpoint_handler(CPUX86State *env)
1078 CPUBreakpoint *bp;
1080 if (env->watchpoint_hit) {
1081 if (env->watchpoint_hit->flags & BP_CPU) {
1082 env->watchpoint_hit = NULL;
1083 if (check_hw_breakpoints(env, false)) {
1084 raise_exception(env, EXCP01_DB);
1085 } else {
1086 cpu_resume_from_signal(env, NULL);
1089 } else {
1090 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1091 if (bp->pc == env->eip) {
1092 if (bp->flags & BP_CPU) {
1093 check_hw_breakpoints(env, true);
1094 raise_exception(env, EXCP01_DB);
1096 break;
1101 typedef struct MCEInjectionParams {
1102 Monitor *mon;
1103 X86CPU *cpu;
1104 int bank;
1105 uint64_t status;
1106 uint64_t mcg_status;
1107 uint64_t addr;
1108 uint64_t misc;
1109 int flags;
1110 } MCEInjectionParams;
1112 static void do_inject_x86_mce(void *data)
1114 MCEInjectionParams *params = data;
1115 CPUX86State *cenv = &params->cpu->env;
1116 CPUState *cpu = CPU(params->cpu);
1117 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1119 cpu_synchronize_state(cenv);
1122 * If there is an MCE exception being processed, ignore this SRAO MCE
1123 * unless unconditional injection was requested.
1125 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1126 && !(params->status & MCI_STATUS_AR)
1127 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1128 return;
1131 if (params->status & MCI_STATUS_UC) {
1133 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1134 * reporting is disabled
1136 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1137 monitor_printf(params->mon,
1138 "CPU %d: Uncorrected error reporting disabled\n",
1139 cpu->cpu_index);
1140 return;
1144 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1145 * reporting is disabled for the bank
1147 if (banks[0] != ~(uint64_t)0) {
1148 monitor_printf(params->mon,
1149 "CPU %d: Uncorrected error reporting disabled for"
1150 " bank %d\n",
1151 cpu->cpu_index, params->bank);
1152 return;
1155 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1156 !(cenv->cr[4] & CR4_MCE_MASK)) {
1157 monitor_printf(params->mon,
1158 "CPU %d: Previous MCE still in progress, raising"
1159 " triple fault\n",
1160 cpu->cpu_index);
1161 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1162 qemu_system_reset_request();
1163 return;
1165 if (banks[1] & MCI_STATUS_VAL) {
1166 params->status |= MCI_STATUS_OVER;
1168 banks[2] = params->addr;
1169 banks[3] = params->misc;
1170 cenv->mcg_status = params->mcg_status;
1171 banks[1] = params->status;
1172 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1173 } else if (!(banks[1] & MCI_STATUS_VAL)
1174 || !(banks[1] & MCI_STATUS_UC)) {
1175 if (banks[1] & MCI_STATUS_VAL) {
1176 params->status |= MCI_STATUS_OVER;
1178 banks[2] = params->addr;
1179 banks[3] = params->misc;
1180 banks[1] = params->status;
1181 } else {
1182 banks[1] |= MCI_STATUS_OVER;
1186 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1187 uint64_t status, uint64_t mcg_status, uint64_t addr,
1188 uint64_t misc, int flags)
1190 CPUX86State *cenv = &cpu->env;
1191 MCEInjectionParams params = {
1192 .mon = mon,
1193 .cpu = cpu,
1194 .bank = bank,
1195 .status = status,
1196 .mcg_status = mcg_status,
1197 .addr = addr,
1198 .misc = misc,
1199 .flags = flags,
1201 unsigned bank_num = cenv->mcg_cap & 0xff;
1202 CPUX86State *env;
1204 if (!cenv->mcg_cap) {
1205 monitor_printf(mon, "MCE injection not supported\n");
1206 return;
1208 if (bank >= bank_num) {
1209 monitor_printf(mon, "Invalid MCE bank number\n");
1210 return;
1212 if (!(status & MCI_STATUS_VAL)) {
1213 monitor_printf(mon, "Invalid MCE status code\n");
1214 return;
1216 if ((flags & MCE_INJECT_BROADCAST)
1217 && !cpu_x86_support_mca_broadcast(cenv)) {
1218 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1219 return;
1222 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
1223 if (flags & MCE_INJECT_BROADCAST) {
1224 params.bank = 1;
1225 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1226 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1227 params.addr = 0;
1228 params.misc = 0;
1229 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1230 if (cenv == env) {
1231 continue;
1233 params.cpu = x86_env_get_cpu(env);
1234 run_on_cpu(CPU(cpu), do_inject_x86_mce, &params);
1239 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1241 if (kvm_enabled()) {
1242 env->tpr_access_type = access;
1244 cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_TPR);
1245 } else {
1246 cpu_restore_state(env, env->mem_io_pc);
1248 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1251 #endif /* !CONFIG_USER_ONLY */
1253 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1254 target_ulong *base, unsigned int *limit,
1255 unsigned int *flags)
1257 SegmentCache *dt;
1258 target_ulong ptr;
1259 uint32_t e1, e2;
1260 int index;
1262 if (selector & 0x4)
1263 dt = &env->ldt;
1264 else
1265 dt = &env->gdt;
1266 index = selector & ~7;
1267 ptr = dt->base + index;
1268 if ((index + 7) > dt->limit
1269 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1270 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1271 return 0;
1273 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1274 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1275 if (e2 & DESC_G_MASK)
1276 *limit = (*limit << 12) | 0xfff;
1277 *flags = e2;
1279 return 1;
1282 #if !defined(CONFIG_USER_ONLY)
1283 void do_cpu_init(X86CPU *cpu)
1285 CPUState *cs = CPU(cpu);
1286 CPUX86State *env = &cpu->env;
1287 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1288 uint64_t pat = env->pat;
1290 cpu_reset(cs);
1291 cs->interrupt_request = sipi;
1292 env->pat = pat;
1293 apic_init_reset(env->apic_state);
1296 void do_cpu_sipi(X86CPU *cpu)
1298 CPUX86State *env = &cpu->env;
1300 apic_sipi(env->apic_state);
1302 #else
1303 void do_cpu_init(X86CPU *cpu)
1306 void do_cpu_sipi(X86CPU *cpu)
1309 #endif