kvmvapic: Catch invalid ROM size
[qemu-kvm.git] / target-i386 / helper.c
blob0ad7c8e3b6d2d5abcd59258b9418614b51831909
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31 int cpuver = env->cpuid_version;
33 if (family == NULL || model == NULL) {
34 return;
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
44 int family = 0;
45 int model = 0;
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
52 return 0;
55 /***********************************************************/
56 /* x86 debug */
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
117 "ADCX",
118 "ADOX",
119 "ADCOX",
121 "CLR",
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
151 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
152 (sc->flags & DESC_W_MASK) ? 'W' : '-');
154 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
155 } else {
156 static const char *sys_type_name[2][16] = {
157 { /* 32 bit mode */
158 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
159 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
160 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
161 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
163 { /* 64 bit mode */
164 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
165 "Reserved", "Reserved", "Reserved", "Reserved",
166 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
167 "Reserved", "IntGate64", "TrapGate64"
170 cpu_fprintf(f, "%s",
171 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
172 [(sc->flags & DESC_TYPE_MASK)
173 >> DESC_TYPE_SHIFT]);
175 done:
176 cpu_fprintf(f, "\n");
179 #define DUMP_CODE_BYTES_TOTAL 50
180 #define DUMP_CODE_BYTES_BACKWARD 20
182 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
183 int flags)
185 X86CPU *cpu = X86_CPU(cs);
186 CPUX86State *env = &cpu->env;
187 int eflags, i, nb;
188 char cc_op_name[32];
189 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
191 eflags = cpu_compute_eflags(env);
192 #ifdef TARGET_X86_64
193 if (env->hflags & HF_CS64_MASK) {
194 cpu_fprintf(f,
195 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
196 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
197 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
198 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
199 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
200 env->regs[R_EAX],
201 env->regs[R_EBX],
202 env->regs[R_ECX],
203 env->regs[R_EDX],
204 env->regs[R_ESI],
205 env->regs[R_EDI],
206 env->regs[R_EBP],
207 env->regs[R_ESP],
208 env->regs[8],
209 env->regs[9],
210 env->regs[10],
211 env->regs[11],
212 env->regs[12],
213 env->regs[13],
214 env->regs[14],
215 env->regs[15],
216 env->eip, eflags,
217 eflags & DF_MASK ? 'D' : '-',
218 eflags & CC_O ? 'O' : '-',
219 eflags & CC_S ? 'S' : '-',
220 eflags & CC_Z ? 'Z' : '-',
221 eflags & CC_A ? 'A' : '-',
222 eflags & CC_P ? 'P' : '-',
223 eflags & CC_C ? 'C' : '-',
224 env->hflags & HF_CPL_MASK,
225 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
226 (env->a20_mask >> 20) & 1,
227 (env->hflags >> HF_SMM_SHIFT) & 1,
228 cs->halted);
229 } else
230 #endif
232 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
233 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
234 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
235 (uint32_t)env->regs[R_EAX],
236 (uint32_t)env->regs[R_EBX],
237 (uint32_t)env->regs[R_ECX],
238 (uint32_t)env->regs[R_EDX],
239 (uint32_t)env->regs[R_ESI],
240 (uint32_t)env->regs[R_EDI],
241 (uint32_t)env->regs[R_EBP],
242 (uint32_t)env->regs[R_ESP],
243 (uint32_t)env->eip, eflags,
244 eflags & DF_MASK ? 'D' : '-',
245 eflags & CC_O ? 'O' : '-',
246 eflags & CC_S ? 'S' : '-',
247 eflags & CC_Z ? 'Z' : '-',
248 eflags & CC_A ? 'A' : '-',
249 eflags & CC_P ? 'P' : '-',
250 eflags & CC_C ? 'C' : '-',
251 env->hflags & HF_CPL_MASK,
252 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
253 (env->a20_mask >> 20) & 1,
254 (env->hflags >> HF_SMM_SHIFT) & 1,
255 cs->halted);
258 for(i = 0; i < 6; i++) {
259 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
260 &env->segs[i]);
262 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
263 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
265 #ifdef TARGET_X86_64
266 if (env->hflags & HF_LMA_MASK) {
267 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
268 env->gdt.base, env->gdt.limit);
269 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
270 env->idt.base, env->idt.limit);
271 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
272 (uint32_t)env->cr[0],
273 env->cr[2],
274 env->cr[3],
275 (uint32_t)env->cr[4]);
276 for(i = 0; i < 4; i++)
277 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
278 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
279 env->dr[6], env->dr[7]);
280 } else
281 #endif
283 cpu_fprintf(f, "GDT= %08x %08x\n",
284 (uint32_t)env->gdt.base, env->gdt.limit);
285 cpu_fprintf(f, "IDT= %08x %08x\n",
286 (uint32_t)env->idt.base, env->idt.limit);
287 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
288 (uint32_t)env->cr[0],
289 (uint32_t)env->cr[2],
290 (uint32_t)env->cr[3],
291 (uint32_t)env->cr[4]);
292 for(i = 0; i < 4; i++) {
293 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
295 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
296 env->dr[6], env->dr[7]);
298 if (flags & CPU_DUMP_CCOP) {
299 if ((unsigned)env->cc_op < CC_OP_NB)
300 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
301 else
302 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
303 #ifdef TARGET_X86_64
304 if (env->hflags & HF_CS64_MASK) {
305 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
306 env->cc_src, env->cc_dst,
307 cc_op_name);
308 } else
309 #endif
311 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
312 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
313 cc_op_name);
316 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
317 if (flags & CPU_DUMP_FPU) {
318 int fptag;
319 fptag = 0;
320 for(i = 0; i < 8; i++) {
321 fptag |= ((!env->fptags[i]) << i);
323 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
324 env->fpuc,
325 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
326 env->fpstt,
327 fptag,
328 env->mxcsr);
329 for(i=0;i<8;i++) {
330 CPU_LDoubleU u;
331 u.d = env->fpregs[i].d;
332 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
333 i, u.l.lower, u.l.upper);
334 if ((i & 1) == 1)
335 cpu_fprintf(f, "\n");
336 else
337 cpu_fprintf(f, " ");
339 if (env->hflags & HF_CS64_MASK)
340 nb = 16;
341 else
342 nb = 8;
343 for(i=0;i<nb;i++) {
344 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
346 env->xmm_regs[i].XMM_L(3),
347 env->xmm_regs[i].XMM_L(2),
348 env->xmm_regs[i].XMM_L(1),
349 env->xmm_regs[i].XMM_L(0));
350 if ((i & 1) == 1)
351 cpu_fprintf(f, "\n");
352 else
353 cpu_fprintf(f, " ");
356 if (flags & CPU_DUMP_CODE) {
357 target_ulong base = env->segs[R_CS].base + env->eip;
358 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
359 uint8_t code;
360 char codestr[3];
362 cpu_fprintf(f, "Code=");
363 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
364 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
365 snprintf(codestr, sizeof(codestr), "%02x", code);
366 } else {
367 snprintf(codestr, sizeof(codestr), "??");
369 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
370 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
372 cpu_fprintf(f, "\n");
376 /***********************************************************/
377 /* x86 mmu */
378 /* XXX: add PGE support */
380 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
382 CPUX86State *env = &cpu->env;
384 a20_state = (a20_state != 0);
385 if (a20_state != ((env->a20_mask >> 20) & 1)) {
386 #if defined(DEBUG_MMU)
387 printf("A20 update: a20=%d\n", a20_state);
388 #endif
389 /* if the cpu is currently executing code, we must unlink it and
390 all the potentially executing TB */
391 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
393 /* when a20 is changed, all the MMU mappings are invalid, so
394 we must flush everything */
395 tlb_flush(env, 1);
396 env->a20_mask = ~(1 << 20) | (a20_state << 20);
400 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
402 int pe_state;
404 #if defined(DEBUG_MMU)
405 printf("CR0 update: CR0=0x%08x\n", new_cr0);
406 #endif
407 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
408 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
409 tlb_flush(env, 1);
412 #ifdef TARGET_X86_64
413 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
414 (env->efer & MSR_EFER_LME)) {
415 /* enter in long mode */
416 /* XXX: generate an exception */
417 if (!(env->cr[4] & CR4_PAE_MASK))
418 return;
419 env->efer |= MSR_EFER_LMA;
420 env->hflags |= HF_LMA_MASK;
421 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
422 (env->efer & MSR_EFER_LMA)) {
423 /* exit long mode */
424 env->efer &= ~MSR_EFER_LMA;
425 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
426 env->eip &= 0xffffffff;
428 #endif
429 env->cr[0] = new_cr0 | CR0_ET_MASK;
431 /* update PE flag in hidden flags */
432 pe_state = (env->cr[0] & CR0_PE_MASK);
433 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
434 /* ensure that ADDSEG is always set in real mode */
435 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
436 /* update FPU flags */
437 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
438 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
441 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
442 the PDPT */
443 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
445 env->cr[3] = new_cr3;
446 if (env->cr[0] & CR0_PG_MASK) {
447 #if defined(DEBUG_MMU)
448 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
449 #endif
450 tlb_flush(env, 0);
454 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
456 #if defined(DEBUG_MMU)
457 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
458 #endif
459 if ((new_cr4 ^ env->cr[4]) &
460 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
461 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
462 tlb_flush(env, 1);
464 /* SSE handling */
465 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
466 new_cr4 &= ~CR4_OSFXSR_MASK;
468 env->hflags &= ~HF_OSFXSR_MASK;
469 if (new_cr4 & CR4_OSFXSR_MASK) {
470 env->hflags |= HF_OSFXSR_MASK;
473 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
474 new_cr4 &= ~CR4_SMAP_MASK;
476 env->hflags &= ~HF_SMAP_MASK;
477 if (new_cr4 & CR4_SMAP_MASK) {
478 env->hflags |= HF_SMAP_MASK;
481 env->cr[4] = new_cr4;
484 #if defined(CONFIG_USER_ONLY)
486 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
487 int is_write, int mmu_idx)
489 /* user mode only emulation */
490 is_write &= 1;
491 env->cr[2] = addr;
492 env->error_code = (is_write << PG_ERROR_W_BIT);
493 env->error_code |= PG_ERROR_U_MASK;
494 env->exception_index = EXCP0E_PAGE;
495 return 1;
498 #else
500 /* XXX: This value should match the one returned by CPUID
501 * and in exec.c */
502 # if defined(TARGET_X86_64)
503 # define PHYS_ADDR_MASK 0xfffffff000LL
504 # else
505 # define PHYS_ADDR_MASK 0xffffff000LL
506 # endif
508 /* return value:
509 -1 = cannot handle fault
510 0 = nothing more to do
511 1 = generate PF fault
513 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
514 int is_write1, int mmu_idx)
516 uint64_t ptep, pte;
517 target_ulong pde_addr, pte_addr;
518 int error_code, is_dirty, prot, page_size, is_write, is_user;
519 hwaddr paddr;
520 uint32_t page_offset;
521 target_ulong vaddr, virt_addr;
523 is_user = mmu_idx == MMU_USER_IDX;
524 #if defined(DEBUG_MMU)
525 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
526 addr, is_write1, is_user, env->eip);
527 #endif
528 is_write = is_write1 & 1;
530 if (!(env->cr[0] & CR0_PG_MASK)) {
531 pte = addr;
532 virt_addr = addr & TARGET_PAGE_MASK;
533 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
534 page_size = 4096;
535 goto do_mapping;
538 if (env->cr[4] & CR4_PAE_MASK) {
539 uint64_t pde, pdpe;
540 target_ulong pdpe_addr;
542 #ifdef TARGET_X86_64
543 if (env->hflags & HF_LMA_MASK) {
544 uint64_t pml4e_addr, pml4e;
545 int32_t sext;
547 /* test virtual address sign extension */
548 sext = (int64_t)addr >> 47;
549 if (sext != 0 && sext != -1) {
550 env->error_code = 0;
551 env->exception_index = EXCP0D_GPF;
552 return 1;
555 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
556 env->a20_mask;
557 pml4e = ldq_phys(pml4e_addr);
558 if (!(pml4e & PG_PRESENT_MASK)) {
559 error_code = 0;
560 goto do_fault;
562 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
563 error_code = PG_ERROR_RSVD_MASK;
564 goto do_fault;
566 if (!(pml4e & PG_ACCESSED_MASK)) {
567 pml4e |= PG_ACCESSED_MASK;
568 stl_phys_notdirty(pml4e_addr, pml4e);
570 ptep = pml4e ^ PG_NX_MASK;
571 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
572 env->a20_mask;
573 pdpe = ldq_phys(pdpe_addr);
574 if (!(pdpe & PG_PRESENT_MASK)) {
575 error_code = 0;
576 goto do_fault;
578 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
579 error_code = PG_ERROR_RSVD_MASK;
580 goto do_fault;
582 ptep &= pdpe ^ PG_NX_MASK;
583 if (!(pdpe & PG_ACCESSED_MASK)) {
584 pdpe |= PG_ACCESSED_MASK;
585 stl_phys_notdirty(pdpe_addr, pdpe);
587 } else
588 #endif
590 /* XXX: load them when cr3 is loaded ? */
591 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
592 env->a20_mask;
593 pdpe = ldq_phys(pdpe_addr);
594 if (!(pdpe & PG_PRESENT_MASK)) {
595 error_code = 0;
596 goto do_fault;
598 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
601 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
602 env->a20_mask;
603 pde = ldq_phys(pde_addr);
604 if (!(pde & PG_PRESENT_MASK)) {
605 error_code = 0;
606 goto do_fault;
608 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
609 error_code = PG_ERROR_RSVD_MASK;
610 goto do_fault;
612 ptep &= pde ^ PG_NX_MASK;
613 if (pde & PG_PSE_MASK) {
614 /* 2 MB page */
615 page_size = 2048 * 1024;
616 ptep ^= PG_NX_MASK;
617 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
618 goto do_fault_protect;
620 switch (mmu_idx) {
621 case MMU_USER_IDX:
622 if (!(ptep & PG_USER_MASK)) {
623 goto do_fault_protect;
625 if (is_write && !(ptep & PG_RW_MASK)) {
626 goto do_fault_protect;
628 break;
630 case MMU_KERNEL_IDX:
631 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
632 (ptep & PG_USER_MASK)) {
633 goto do_fault_protect;
635 /* fall through */
636 case MMU_KSMAP_IDX:
637 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
638 (ptep & PG_USER_MASK)) {
639 goto do_fault_protect;
641 if ((env->cr[0] & CR0_WP_MASK) &&
642 is_write && !(ptep & PG_RW_MASK)) {
643 goto do_fault_protect;
645 break;
647 default: /* cannot happen */
648 break;
650 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
651 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
652 pde |= PG_ACCESSED_MASK;
653 if (is_dirty)
654 pde |= PG_DIRTY_MASK;
655 stl_phys_notdirty(pde_addr, pde);
657 /* align to page_size */
658 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
659 virt_addr = addr & ~(page_size - 1);
660 } else {
661 /* 4 KB page */
662 if (!(pde & PG_ACCESSED_MASK)) {
663 pde |= PG_ACCESSED_MASK;
664 stl_phys_notdirty(pde_addr, pde);
666 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
667 env->a20_mask;
668 pte = ldq_phys(pte_addr);
669 if (!(pte & PG_PRESENT_MASK)) {
670 error_code = 0;
671 goto do_fault;
673 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
674 error_code = PG_ERROR_RSVD_MASK;
675 goto do_fault;
677 /* combine pde and pte nx, user and rw protections */
678 ptep &= pte ^ PG_NX_MASK;
679 ptep ^= PG_NX_MASK;
680 if ((ptep & PG_NX_MASK) && is_write1 == 2)
681 goto do_fault_protect;
682 switch (mmu_idx) {
683 case MMU_USER_IDX:
684 if (!(ptep & PG_USER_MASK)) {
685 goto do_fault_protect;
687 if (is_write && !(ptep & PG_RW_MASK)) {
688 goto do_fault_protect;
690 break;
692 case MMU_KERNEL_IDX:
693 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
694 (ptep & PG_USER_MASK)) {
695 goto do_fault_protect;
697 /* fall through */
698 case MMU_KSMAP_IDX:
699 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
700 (ptep & PG_USER_MASK)) {
701 goto do_fault_protect;
703 if ((env->cr[0] & CR0_WP_MASK) &&
704 is_write && !(ptep & PG_RW_MASK)) {
705 goto do_fault_protect;
707 break;
709 default: /* cannot happen */
710 break;
712 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
713 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
714 pte |= PG_ACCESSED_MASK;
715 if (is_dirty)
716 pte |= PG_DIRTY_MASK;
717 stl_phys_notdirty(pte_addr, pte);
719 page_size = 4096;
720 virt_addr = addr & ~0xfff;
721 pte = pte & (PHYS_ADDR_MASK | 0xfff);
723 } else {
724 uint32_t pde;
726 /* page directory entry */
727 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
728 env->a20_mask;
729 pde = ldl_phys(pde_addr);
730 if (!(pde & PG_PRESENT_MASK)) {
731 error_code = 0;
732 goto do_fault;
734 /* if PSE bit is set, then we use a 4MB page */
735 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
736 page_size = 4096 * 1024;
737 switch (mmu_idx) {
738 case MMU_USER_IDX:
739 if (!(pde & PG_USER_MASK)) {
740 goto do_fault_protect;
742 if (is_write && !(pde & PG_RW_MASK)) {
743 goto do_fault_protect;
745 break;
747 case MMU_KERNEL_IDX:
748 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
749 (pde & PG_USER_MASK)) {
750 goto do_fault_protect;
752 /* fall through */
753 case MMU_KSMAP_IDX:
754 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
755 (pde & PG_USER_MASK)) {
756 goto do_fault_protect;
758 if ((env->cr[0] & CR0_WP_MASK) &&
759 is_write && !(pde & PG_RW_MASK)) {
760 goto do_fault_protect;
762 break;
764 default: /* cannot happen */
765 break;
767 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
768 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
769 pde |= PG_ACCESSED_MASK;
770 if (is_dirty)
771 pde |= PG_DIRTY_MASK;
772 stl_phys_notdirty(pde_addr, pde);
775 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
776 ptep = pte;
777 virt_addr = addr & ~(page_size - 1);
778 } else {
779 if (!(pde & PG_ACCESSED_MASK)) {
780 pde |= PG_ACCESSED_MASK;
781 stl_phys_notdirty(pde_addr, pde);
784 /* page directory entry */
785 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
786 env->a20_mask;
787 pte = ldl_phys(pte_addr);
788 if (!(pte & PG_PRESENT_MASK)) {
789 error_code = 0;
790 goto do_fault;
792 /* combine pde and pte user and rw protections */
793 ptep = pte & pde;
794 switch (mmu_idx) {
795 case MMU_USER_IDX:
796 if (!(ptep & PG_USER_MASK)) {
797 goto do_fault_protect;
799 if (is_write && !(ptep & PG_RW_MASK)) {
800 goto do_fault_protect;
802 break;
804 case MMU_KERNEL_IDX:
805 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
806 (ptep & PG_USER_MASK)) {
807 goto do_fault_protect;
809 /* fall through */
810 case MMU_KSMAP_IDX:
811 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
812 (ptep & PG_USER_MASK)) {
813 goto do_fault_protect;
815 if ((env->cr[0] & CR0_WP_MASK) &&
816 is_write && !(ptep & PG_RW_MASK)) {
817 goto do_fault_protect;
819 break;
821 default: /* cannot happen */
822 break;
824 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
825 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
826 pte |= PG_ACCESSED_MASK;
827 if (is_dirty)
828 pte |= PG_DIRTY_MASK;
829 stl_phys_notdirty(pte_addr, pte);
831 page_size = 4096;
832 virt_addr = addr & ~0xfff;
835 /* the page can be put in the TLB */
836 prot = PAGE_READ;
837 if (!(ptep & PG_NX_MASK))
838 prot |= PAGE_EXEC;
839 if (pte & PG_DIRTY_MASK) {
840 /* only set write access if already dirty... otherwise wait
841 for dirty access */
842 if (is_user) {
843 if (ptep & PG_RW_MASK)
844 prot |= PAGE_WRITE;
845 } else {
846 if (!(env->cr[0] & CR0_WP_MASK) ||
847 (ptep & PG_RW_MASK))
848 prot |= PAGE_WRITE;
851 do_mapping:
852 pte = pte & env->a20_mask;
854 /* Even if 4MB pages, we map only one 4KB page in the cache to
855 avoid filling it too fast */
856 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
857 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
858 vaddr = virt_addr + page_offset;
860 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
861 return 0;
862 do_fault_protect:
863 error_code = PG_ERROR_P_MASK;
864 do_fault:
865 error_code |= (is_write << PG_ERROR_W_BIT);
866 if (is_user)
867 error_code |= PG_ERROR_U_MASK;
868 if (is_write1 == 2 &&
869 (((env->efer & MSR_EFER_NXE) &&
870 (env->cr[4] & CR4_PAE_MASK)) ||
871 (env->cr[4] & CR4_SMEP_MASK)))
872 error_code |= PG_ERROR_I_D_MASK;
873 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
874 /* cr2 is not modified in case of exceptions */
875 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
876 addr);
877 } else {
878 env->cr[2] = addr;
880 env->error_code = error_code;
881 env->exception_index = EXCP0E_PAGE;
882 return 1;
885 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
887 X86CPU *cpu = X86_CPU(cs);
888 CPUX86State *env = &cpu->env;
889 target_ulong pde_addr, pte_addr;
890 uint64_t pte;
891 hwaddr paddr;
892 uint32_t page_offset;
893 int page_size;
895 if (env->cr[4] & CR4_PAE_MASK) {
896 target_ulong pdpe_addr;
897 uint64_t pde, pdpe;
899 #ifdef TARGET_X86_64
900 if (env->hflags & HF_LMA_MASK) {
901 uint64_t pml4e_addr, pml4e;
902 int32_t sext;
904 /* test virtual address sign extension */
905 sext = (int64_t)addr >> 47;
906 if (sext != 0 && sext != -1)
907 return -1;
909 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
910 env->a20_mask;
911 pml4e = ldq_phys(pml4e_addr);
912 if (!(pml4e & PG_PRESENT_MASK))
913 return -1;
915 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
916 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
917 pdpe = ldq_phys(pdpe_addr);
918 if (!(pdpe & PG_PRESENT_MASK))
919 return -1;
920 } else
921 #endif
923 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
924 env->a20_mask;
925 pdpe = ldq_phys(pdpe_addr);
926 if (!(pdpe & PG_PRESENT_MASK))
927 return -1;
930 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
931 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
932 pde = ldq_phys(pde_addr);
933 if (!(pde & PG_PRESENT_MASK)) {
934 return -1;
936 if (pde & PG_PSE_MASK) {
937 /* 2 MB page */
938 page_size = 2048 * 1024;
939 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
940 } else {
941 /* 4 KB page */
942 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
943 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
944 page_size = 4096;
945 pte = ldq_phys(pte_addr);
947 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
948 if (!(pte & PG_PRESENT_MASK))
949 return -1;
950 } else {
951 uint32_t pde;
953 if (!(env->cr[0] & CR0_PG_MASK)) {
954 pte = addr;
955 page_size = 4096;
956 } else {
957 /* page directory entry */
958 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
959 pde = ldl_phys(pde_addr);
960 if (!(pde & PG_PRESENT_MASK))
961 return -1;
962 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
963 pte = pde & ~0x003ff000; /* align to 4MB */
964 page_size = 4096 * 1024;
965 } else {
966 /* page directory entry */
967 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
968 pte = ldl_phys(pte_addr);
969 if (!(pte & PG_PRESENT_MASK))
970 return -1;
971 page_size = 4096;
974 pte = pte & env->a20_mask;
977 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
978 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
979 return paddr;
982 void hw_breakpoint_insert(CPUX86State *env, int index)
984 int type = 0, err = 0;
986 switch (hw_breakpoint_type(env->dr[7], index)) {
987 case DR7_TYPE_BP_INST:
988 if (hw_breakpoint_enabled(env->dr[7], index)) {
989 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
990 &env->cpu_breakpoint[index]);
992 break;
993 case DR7_TYPE_DATA_WR:
994 type = BP_CPU | BP_MEM_WRITE;
995 break;
996 case DR7_TYPE_IO_RW:
997 /* No support for I/O watchpoints yet */
998 break;
999 case DR7_TYPE_DATA_RW:
1000 type = BP_CPU | BP_MEM_ACCESS;
1001 break;
1004 if (type != 0) {
1005 err = cpu_watchpoint_insert(env, env->dr[index],
1006 hw_breakpoint_len(env->dr[7], index),
1007 type, &env->cpu_watchpoint[index]);
1010 if (err) {
1011 env->cpu_breakpoint[index] = NULL;
1015 void hw_breakpoint_remove(CPUX86State *env, int index)
1017 if (!env->cpu_breakpoint[index])
1018 return;
1019 switch (hw_breakpoint_type(env->dr[7], index)) {
1020 case DR7_TYPE_BP_INST:
1021 if (hw_breakpoint_enabled(env->dr[7], index)) {
1022 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1024 break;
1025 case DR7_TYPE_DATA_WR:
1026 case DR7_TYPE_DATA_RW:
1027 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1028 break;
1029 case DR7_TYPE_IO_RW:
1030 /* No support for I/O watchpoints yet */
1031 break;
1035 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1037 target_ulong dr6;
1038 int reg;
1039 bool hit_enabled = false;
1041 dr6 = env->dr[6] & ~0xf;
1042 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1043 bool bp_match = false;
1044 bool wp_match = false;
1046 switch (hw_breakpoint_type(env->dr[7], reg)) {
1047 case DR7_TYPE_BP_INST:
1048 if (env->dr[reg] == env->eip) {
1049 bp_match = true;
1051 break;
1052 case DR7_TYPE_DATA_WR:
1053 case DR7_TYPE_DATA_RW:
1054 if (env->cpu_watchpoint[reg] &&
1055 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1056 wp_match = true;
1058 break;
1059 case DR7_TYPE_IO_RW:
1060 break;
1062 if (bp_match || wp_match) {
1063 dr6 |= 1 << reg;
1064 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1065 hit_enabled = true;
1070 if (hit_enabled || force_dr6_update) {
1071 env->dr[6] = dr6;
1074 return hit_enabled;
1077 void breakpoint_handler(CPUX86State *env)
1079 CPUBreakpoint *bp;
1081 if (env->watchpoint_hit) {
1082 if (env->watchpoint_hit->flags & BP_CPU) {
1083 env->watchpoint_hit = NULL;
1084 if (check_hw_breakpoints(env, false)) {
1085 raise_exception(env, EXCP01_DB);
1086 } else {
1087 cpu_resume_from_signal(env, NULL);
1090 } else {
1091 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1092 if (bp->pc == env->eip) {
1093 if (bp->flags & BP_CPU) {
1094 check_hw_breakpoints(env, true);
1095 raise_exception(env, EXCP01_DB);
1097 break;
1102 typedef struct MCEInjectionParams {
1103 Monitor *mon;
1104 X86CPU *cpu;
1105 int bank;
1106 uint64_t status;
1107 uint64_t mcg_status;
1108 uint64_t addr;
1109 uint64_t misc;
1110 int flags;
1111 } MCEInjectionParams;
1113 static void do_inject_x86_mce(void *data)
1115 MCEInjectionParams *params = data;
1116 CPUX86State *cenv = &params->cpu->env;
1117 CPUState *cpu = CPU(params->cpu);
1118 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1120 cpu_synchronize_state(cpu);
1123 * If there is an MCE exception being processed, ignore this SRAO MCE
1124 * unless unconditional injection was requested.
1126 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1127 && !(params->status & MCI_STATUS_AR)
1128 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1129 return;
1132 if (params->status & MCI_STATUS_UC) {
1134 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1135 * reporting is disabled
1137 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1138 monitor_printf(params->mon,
1139 "CPU %d: Uncorrected error reporting disabled\n",
1140 cpu->cpu_index);
1141 return;
1145 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1146 * reporting is disabled for the bank
1148 if (banks[0] != ~(uint64_t)0) {
1149 monitor_printf(params->mon,
1150 "CPU %d: Uncorrected error reporting disabled for"
1151 " bank %d\n",
1152 cpu->cpu_index, params->bank);
1153 return;
1156 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1157 !(cenv->cr[4] & CR4_MCE_MASK)) {
1158 monitor_printf(params->mon,
1159 "CPU %d: Previous MCE still in progress, raising"
1160 " triple fault\n",
1161 cpu->cpu_index);
1162 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1163 qemu_system_reset_request();
1164 return;
1166 if (banks[1] & MCI_STATUS_VAL) {
1167 params->status |= MCI_STATUS_OVER;
1169 banks[2] = params->addr;
1170 banks[3] = params->misc;
1171 cenv->mcg_status = params->mcg_status;
1172 banks[1] = params->status;
1173 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1174 } else if (!(banks[1] & MCI_STATUS_VAL)
1175 || !(banks[1] & MCI_STATUS_UC)) {
1176 if (banks[1] & MCI_STATUS_VAL) {
1177 params->status |= MCI_STATUS_OVER;
1179 banks[2] = params->addr;
1180 banks[3] = params->misc;
1181 banks[1] = params->status;
1182 } else {
1183 banks[1] |= MCI_STATUS_OVER;
1187 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1188 uint64_t status, uint64_t mcg_status, uint64_t addr,
1189 uint64_t misc, int flags)
1191 CPUState *cs = CPU(cpu);
1192 CPUX86State *cenv = &cpu->env;
1193 MCEInjectionParams params = {
1194 .mon = mon,
1195 .cpu = cpu,
1196 .bank = bank,
1197 .status = status,
1198 .mcg_status = mcg_status,
1199 .addr = addr,
1200 .misc = misc,
1201 .flags = flags,
1203 unsigned bank_num = cenv->mcg_cap & 0xff;
1205 if (!cenv->mcg_cap) {
1206 monitor_printf(mon, "MCE injection not supported\n");
1207 return;
1209 if (bank >= bank_num) {
1210 monitor_printf(mon, "Invalid MCE bank number\n");
1211 return;
1213 if (!(status & MCI_STATUS_VAL)) {
1214 monitor_printf(mon, "Invalid MCE status code\n");
1215 return;
1217 if ((flags & MCE_INJECT_BROADCAST)
1218 && !cpu_x86_support_mca_broadcast(cenv)) {
1219 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1220 return;
1223 run_on_cpu(cs, do_inject_x86_mce, &params);
1224 if (flags & MCE_INJECT_BROADCAST) {
1225 CPUState *other_cs;
1227 params.bank = 1;
1228 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1229 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1230 params.addr = 0;
1231 params.misc = 0;
1232 CPU_FOREACH(other_cs) {
1233 if (other_cs == cs) {
1234 continue;
1236 params.cpu = X86_CPU(other_cs);
1237 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1242 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1244 if (kvm_enabled()) {
1245 env->tpr_access_type = access;
1247 cpu_interrupt(CPU(x86_env_get_cpu(env)), CPU_INTERRUPT_TPR);
1248 } else {
1249 cpu_restore_state(env, env->mem_io_pc);
1251 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1254 #endif /* !CONFIG_USER_ONLY */
1256 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1257 target_ulong *base, unsigned int *limit,
1258 unsigned int *flags)
1260 X86CPU *cpu = x86_env_get_cpu(env);
1261 CPUState *cs = CPU(cpu);
1262 SegmentCache *dt;
1263 target_ulong ptr;
1264 uint32_t e1, e2;
1265 int index;
1267 if (selector & 0x4)
1268 dt = &env->ldt;
1269 else
1270 dt = &env->gdt;
1271 index = selector & ~7;
1272 ptr = dt->base + index;
1273 if ((index + 7) > dt->limit
1274 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1275 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1276 return 0;
1278 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1279 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1280 if (e2 & DESC_G_MASK)
1281 *limit = (*limit << 12) | 0xfff;
1282 *flags = e2;
1284 return 1;
1287 #if !defined(CONFIG_USER_ONLY)
1288 void do_cpu_init(X86CPU *cpu)
1290 CPUState *cs = CPU(cpu);
1291 CPUX86State *env = &cpu->env;
1292 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1293 uint64_t pat = env->pat;
1295 cpu_reset(cs);
1296 cs->interrupt_request = sipi;
1297 env->pat = pat;
1298 apic_init_reset(env->apic_state);
1301 void do_cpu_sipi(X86CPU *cpu)
1303 CPUX86State *env = &cpu->env;
1305 apic_sipi(env->apic_state);
1307 #else
1308 void do_cpu_init(X86CPU *cpu)
1311 void do_cpu_sipi(X86CPU *cpu)
1314 #endif