acpi_piix4: Re-define PCI hotplug eject register read
[qemu/kevin.git] / target-i386 / helper.c
blob83122bf617e6218da5dda6c0758ecdc88e5ed73c
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu.h"
24 #include "monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 /* NOTE: must be called outside the CPU execute loop */
30 void cpu_state_reset(CPUX86State *env)
32 int i;
34 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
35 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
36 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
39 memset(env, 0, offsetof(CPUX86State, breakpoints));
41 tlb_flush(env, 1);
43 env->old_exception = -1;
45 /* init to reset state */
47 #ifdef CONFIG_SOFTMMU
48 env->hflags |= HF_SOFTMMU_MASK;
49 #endif
50 env->hflags2 |= HF2_GIF_MASK;
52 cpu_x86_update_cr0(env, 0x60000010);
53 env->a20_mask = ~0x0;
54 env->smbase = 0x30000;
56 env->idt.limit = 0xffff;
57 env->gdt.limit = 0xffff;
58 env->ldt.limit = 0xffff;
59 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
60 env->tr.limit = 0xffff;
61 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
63 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
64 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
65 DESC_R_MASK | DESC_A_MASK);
66 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
67 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
68 DESC_A_MASK);
69 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
70 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
71 DESC_A_MASK);
72 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
73 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
74 DESC_A_MASK);
75 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
76 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
77 DESC_A_MASK);
78 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
79 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
80 DESC_A_MASK);
82 env->eip = 0xfff0;
83 env->regs[R_EDX] = env->cpuid_version;
85 env->eflags = 0x2;
87 /* FPU init */
88 for(i = 0;i < 8; i++)
89 env->fptags[i] = 1;
90 env->fpuc = 0x37f;
92 env->mxcsr = 0x1f80;
94 env->pat = 0x0007040600070406ULL;
95 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
97 memset(env->dr, 0, sizeof(env->dr));
98 env->dr[6] = DR6_FIXED_1;
99 env->dr[7] = DR7_FIXED_1;
100 cpu_breakpoint_remove_all(env, BP_CPU);
101 cpu_watchpoint_remove_all(env, BP_CPU);
104 void cpu_x86_close(CPUX86State *env)
106 g_free(env);
109 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
111 int cpuver = env->cpuid_version;
113 if (family == NULL || model == NULL) {
114 return;
117 *family = (cpuver >> 8) & 0x0f;
118 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
121 /* Broadcast MCA signal for processor version 06H_EH and above */
122 int cpu_x86_support_mca_broadcast(CPUX86State *env)
124 int family = 0;
125 int model = 0;
127 cpu_x86_version(env, &family, &model);
128 if ((family == 6 && model >= 14) || family > 6) {
129 return 1;
132 return 0;
135 /***********************************************************/
136 /* x86 debug */
138 static const char *cc_op_str[] = {
139 "DYNAMIC",
140 "EFLAGS",
142 "MULB",
143 "MULW",
144 "MULL",
145 "MULQ",
147 "ADDB",
148 "ADDW",
149 "ADDL",
150 "ADDQ",
152 "ADCB",
153 "ADCW",
154 "ADCL",
155 "ADCQ",
157 "SUBB",
158 "SUBW",
159 "SUBL",
160 "SUBQ",
162 "SBBB",
163 "SBBW",
164 "SBBL",
165 "SBBQ",
167 "LOGICB",
168 "LOGICW",
169 "LOGICL",
170 "LOGICQ",
172 "INCB",
173 "INCW",
174 "INCL",
175 "INCQ",
177 "DECB",
178 "DECW",
179 "DECL",
180 "DECQ",
182 "SHLB",
183 "SHLW",
184 "SHLL",
185 "SHLQ",
187 "SARB",
188 "SARW",
189 "SARL",
190 "SARQ",
193 static void
194 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
195 const char *name, struct SegmentCache *sc)
197 #ifdef TARGET_X86_64
198 if (env->hflags & HF_CS64_MASK) {
199 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
200 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
201 } else
202 #endif
204 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
205 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
208 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
209 goto done;
211 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
212 if (sc->flags & DESC_S_MASK) {
213 if (sc->flags & DESC_CS_MASK) {
214 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
215 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
216 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
217 (sc->flags & DESC_R_MASK) ? 'R' : '-');
218 } else {
219 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
220 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
221 (sc->flags & DESC_W_MASK) ? 'W' : '-');
223 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
224 } else {
225 static const char *sys_type_name[2][16] = {
226 { /* 32 bit mode */
227 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
228 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
229 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
230 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
232 { /* 64 bit mode */
233 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
234 "Reserved", "Reserved", "Reserved", "Reserved",
235 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
236 "Reserved", "IntGate64", "TrapGate64"
239 cpu_fprintf(f, "%s",
240 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
241 [(sc->flags & DESC_TYPE_MASK)
242 >> DESC_TYPE_SHIFT]);
244 done:
245 cpu_fprintf(f, "\n");
248 #define DUMP_CODE_BYTES_TOTAL 50
249 #define DUMP_CODE_BYTES_BACKWARD 20
251 void cpu_dump_state(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
252 int flags)
254 int eflags, i, nb;
255 char cc_op_name[32];
256 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
258 cpu_synchronize_state(env);
260 eflags = env->eflags;
261 #ifdef TARGET_X86_64
262 if (env->hflags & HF_CS64_MASK) {
263 cpu_fprintf(f,
264 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
265 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
266 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
267 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
268 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
269 env->regs[R_EAX],
270 env->regs[R_EBX],
271 env->regs[R_ECX],
272 env->regs[R_EDX],
273 env->regs[R_ESI],
274 env->regs[R_EDI],
275 env->regs[R_EBP],
276 env->regs[R_ESP],
277 env->regs[8],
278 env->regs[9],
279 env->regs[10],
280 env->regs[11],
281 env->regs[12],
282 env->regs[13],
283 env->regs[14],
284 env->regs[15],
285 env->eip, eflags,
286 eflags & DF_MASK ? 'D' : '-',
287 eflags & CC_O ? 'O' : '-',
288 eflags & CC_S ? 'S' : '-',
289 eflags & CC_Z ? 'Z' : '-',
290 eflags & CC_A ? 'A' : '-',
291 eflags & CC_P ? 'P' : '-',
292 eflags & CC_C ? 'C' : '-',
293 env->hflags & HF_CPL_MASK,
294 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
295 (env->a20_mask >> 20) & 1,
296 (env->hflags >> HF_SMM_SHIFT) & 1,
297 env->halted);
298 } else
299 #endif
301 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
302 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
303 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
304 (uint32_t)env->regs[R_EAX],
305 (uint32_t)env->regs[R_EBX],
306 (uint32_t)env->regs[R_ECX],
307 (uint32_t)env->regs[R_EDX],
308 (uint32_t)env->regs[R_ESI],
309 (uint32_t)env->regs[R_EDI],
310 (uint32_t)env->regs[R_EBP],
311 (uint32_t)env->regs[R_ESP],
312 (uint32_t)env->eip, eflags,
313 eflags & DF_MASK ? 'D' : '-',
314 eflags & CC_O ? 'O' : '-',
315 eflags & CC_S ? 'S' : '-',
316 eflags & CC_Z ? 'Z' : '-',
317 eflags & CC_A ? 'A' : '-',
318 eflags & CC_P ? 'P' : '-',
319 eflags & CC_C ? 'C' : '-',
320 env->hflags & HF_CPL_MASK,
321 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
322 (env->a20_mask >> 20) & 1,
323 (env->hflags >> HF_SMM_SHIFT) & 1,
324 env->halted);
327 for(i = 0; i < 6; i++) {
328 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
329 &env->segs[i]);
331 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
332 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
334 #ifdef TARGET_X86_64
335 if (env->hflags & HF_LMA_MASK) {
336 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
337 env->gdt.base, env->gdt.limit);
338 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
339 env->idt.base, env->idt.limit);
340 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
341 (uint32_t)env->cr[0],
342 env->cr[2],
343 env->cr[3],
344 (uint32_t)env->cr[4]);
345 for(i = 0; i < 4; i++)
346 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
347 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
348 env->dr[6], env->dr[7]);
349 } else
350 #endif
352 cpu_fprintf(f, "GDT= %08x %08x\n",
353 (uint32_t)env->gdt.base, env->gdt.limit);
354 cpu_fprintf(f, "IDT= %08x %08x\n",
355 (uint32_t)env->idt.base, env->idt.limit);
356 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
357 (uint32_t)env->cr[0],
358 (uint32_t)env->cr[2],
359 (uint32_t)env->cr[3],
360 (uint32_t)env->cr[4]);
361 for(i = 0; i < 4; i++) {
362 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
364 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
365 env->dr[6], env->dr[7]);
367 if (flags & X86_DUMP_CCOP) {
368 if ((unsigned)env->cc_op < CC_OP_NB)
369 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
370 else
371 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
372 #ifdef TARGET_X86_64
373 if (env->hflags & HF_CS64_MASK) {
374 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
375 env->cc_src, env->cc_dst,
376 cc_op_name);
377 } else
378 #endif
380 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
381 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
382 cc_op_name);
385 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
386 if (flags & X86_DUMP_FPU) {
387 int fptag;
388 fptag = 0;
389 for(i = 0; i < 8; i++) {
390 fptag |= ((!env->fptags[i]) << i);
392 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
393 env->fpuc,
394 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
395 env->fpstt,
396 fptag,
397 env->mxcsr);
398 for(i=0;i<8;i++) {
399 CPU_LDoubleU u;
400 u.d = env->fpregs[i].d;
401 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
402 i, u.l.lower, u.l.upper);
403 if ((i & 1) == 1)
404 cpu_fprintf(f, "\n");
405 else
406 cpu_fprintf(f, " ");
408 if (env->hflags & HF_CS64_MASK)
409 nb = 16;
410 else
411 nb = 8;
412 for(i=0;i<nb;i++) {
413 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
415 env->xmm_regs[i].XMM_L(3),
416 env->xmm_regs[i].XMM_L(2),
417 env->xmm_regs[i].XMM_L(1),
418 env->xmm_regs[i].XMM_L(0));
419 if ((i & 1) == 1)
420 cpu_fprintf(f, "\n");
421 else
422 cpu_fprintf(f, " ");
425 if (flags & CPU_DUMP_CODE) {
426 target_ulong base = env->segs[R_CS].base + env->eip;
427 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
428 uint8_t code;
429 char codestr[3];
431 cpu_fprintf(f, "Code=");
432 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
433 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
434 snprintf(codestr, sizeof(codestr), "%02x", code);
435 } else {
436 snprintf(codestr, sizeof(codestr), "??");
438 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
439 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
441 cpu_fprintf(f, "\n");
445 /***********************************************************/
446 /* x86 mmu */
447 /* XXX: add PGE support */
449 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
451 a20_state = (a20_state != 0);
452 if (a20_state != ((env->a20_mask >> 20) & 1)) {
453 #if defined(DEBUG_MMU)
454 printf("A20 update: a20=%d\n", a20_state);
455 #endif
456 /* if the cpu is currently executing code, we must unlink it and
457 all the potentially executing TB */
458 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
460 /* when a20 is changed, all the MMU mappings are invalid, so
461 we must flush everything */
462 tlb_flush(env, 1);
463 env->a20_mask = ~(1 << 20) | (a20_state << 20);
467 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
469 int pe_state;
471 #if defined(DEBUG_MMU)
472 printf("CR0 update: CR0=0x%08x\n", new_cr0);
473 #endif
474 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
475 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
476 tlb_flush(env, 1);
479 #ifdef TARGET_X86_64
480 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
481 (env->efer & MSR_EFER_LME)) {
482 /* enter in long mode */
483 /* XXX: generate an exception */
484 if (!(env->cr[4] & CR4_PAE_MASK))
485 return;
486 env->efer |= MSR_EFER_LMA;
487 env->hflags |= HF_LMA_MASK;
488 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
489 (env->efer & MSR_EFER_LMA)) {
490 /* exit long mode */
491 env->efer &= ~MSR_EFER_LMA;
492 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
493 env->eip &= 0xffffffff;
495 #endif
496 env->cr[0] = new_cr0 | CR0_ET_MASK;
498 /* update PE flag in hidden flags */
499 pe_state = (env->cr[0] & CR0_PE_MASK);
500 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
501 /* ensure that ADDSEG is always set in real mode */
502 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
503 /* update FPU flags */
504 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
505 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
508 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
509 the PDPT */
510 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
512 env->cr[3] = new_cr3;
513 if (env->cr[0] & CR0_PG_MASK) {
514 #if defined(DEBUG_MMU)
515 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
516 #endif
517 tlb_flush(env, 0);
521 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
523 #if defined(DEBUG_MMU)
524 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
525 #endif
526 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
527 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
528 tlb_flush(env, 1);
530 /* SSE handling */
531 if (!(env->cpuid_features & CPUID_SSE))
532 new_cr4 &= ~CR4_OSFXSR_MASK;
533 if (new_cr4 & CR4_OSFXSR_MASK)
534 env->hflags |= HF_OSFXSR_MASK;
535 else
536 env->hflags &= ~HF_OSFXSR_MASK;
538 env->cr[4] = new_cr4;
541 #if defined(CONFIG_USER_ONLY)
543 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
544 int is_write, int mmu_idx)
546 /* user mode only emulation */
547 is_write &= 1;
548 env->cr[2] = addr;
549 env->error_code = (is_write << PG_ERROR_W_BIT);
550 env->error_code |= PG_ERROR_U_MASK;
551 env->exception_index = EXCP0E_PAGE;
552 return 1;
555 #else
557 /* XXX: This value should match the one returned by CPUID
558 * and in exec.c */
559 # if defined(TARGET_X86_64)
560 # define PHYS_ADDR_MASK 0xfffffff000LL
561 # else
562 # define PHYS_ADDR_MASK 0xffffff000LL
563 # endif
565 /* return value:
566 -1 = cannot handle fault
567 0 = nothing more to do
568 1 = generate PF fault
570 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
571 int is_write1, int mmu_idx)
573 uint64_t ptep, pte;
574 target_ulong pde_addr, pte_addr;
575 int error_code, is_dirty, prot, page_size, is_write, is_user;
576 target_phys_addr_t paddr;
577 uint32_t page_offset;
578 target_ulong vaddr, virt_addr;
580 is_user = mmu_idx == MMU_USER_IDX;
581 #if defined(DEBUG_MMU)
582 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
583 addr, is_write1, is_user, env->eip);
584 #endif
585 is_write = is_write1 & 1;
587 if (!(env->cr[0] & CR0_PG_MASK)) {
588 pte = addr;
589 virt_addr = addr & TARGET_PAGE_MASK;
590 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
591 page_size = 4096;
592 goto do_mapping;
595 if (env->cr[4] & CR4_PAE_MASK) {
596 uint64_t pde, pdpe;
597 target_ulong pdpe_addr;
599 #ifdef TARGET_X86_64
600 if (env->hflags & HF_LMA_MASK) {
601 uint64_t pml4e_addr, pml4e;
602 int32_t sext;
604 /* test virtual address sign extension */
605 sext = (int64_t)addr >> 47;
606 if (sext != 0 && sext != -1) {
607 env->error_code = 0;
608 env->exception_index = EXCP0D_GPF;
609 return 1;
612 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
613 env->a20_mask;
614 pml4e = ldq_phys(pml4e_addr);
615 if (!(pml4e & PG_PRESENT_MASK)) {
616 error_code = 0;
617 goto do_fault;
619 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
620 error_code = PG_ERROR_RSVD_MASK;
621 goto do_fault;
623 if (!(pml4e & PG_ACCESSED_MASK)) {
624 pml4e |= PG_ACCESSED_MASK;
625 stl_phys_notdirty(pml4e_addr, pml4e);
627 ptep = pml4e ^ PG_NX_MASK;
628 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
629 env->a20_mask;
630 pdpe = ldq_phys(pdpe_addr);
631 if (!(pdpe & PG_PRESENT_MASK)) {
632 error_code = 0;
633 goto do_fault;
635 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
636 error_code = PG_ERROR_RSVD_MASK;
637 goto do_fault;
639 ptep &= pdpe ^ PG_NX_MASK;
640 if (!(pdpe & PG_ACCESSED_MASK)) {
641 pdpe |= PG_ACCESSED_MASK;
642 stl_phys_notdirty(pdpe_addr, pdpe);
644 } else
645 #endif
647 /* XXX: load them when cr3 is loaded ? */
648 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
649 env->a20_mask;
650 pdpe = ldq_phys(pdpe_addr);
651 if (!(pdpe & PG_PRESENT_MASK)) {
652 error_code = 0;
653 goto do_fault;
655 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
658 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
659 env->a20_mask;
660 pde = ldq_phys(pde_addr);
661 if (!(pde & PG_PRESENT_MASK)) {
662 error_code = 0;
663 goto do_fault;
665 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
666 error_code = PG_ERROR_RSVD_MASK;
667 goto do_fault;
669 ptep &= pde ^ PG_NX_MASK;
670 if (pde & PG_PSE_MASK) {
671 /* 2 MB page */
672 page_size = 2048 * 1024;
673 ptep ^= PG_NX_MASK;
674 if ((ptep & PG_NX_MASK) && is_write1 == 2)
675 goto do_fault_protect;
676 if (is_user) {
677 if (!(ptep & PG_USER_MASK))
678 goto do_fault_protect;
679 if (is_write && !(ptep & PG_RW_MASK))
680 goto do_fault_protect;
681 } else {
682 if ((env->cr[0] & CR0_WP_MASK) &&
683 is_write && !(ptep & PG_RW_MASK))
684 goto do_fault_protect;
686 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
687 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
688 pde |= PG_ACCESSED_MASK;
689 if (is_dirty)
690 pde |= PG_DIRTY_MASK;
691 stl_phys_notdirty(pde_addr, pde);
693 /* align to page_size */
694 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
695 virt_addr = addr & ~(page_size - 1);
696 } else {
697 /* 4 KB page */
698 if (!(pde & PG_ACCESSED_MASK)) {
699 pde |= PG_ACCESSED_MASK;
700 stl_phys_notdirty(pde_addr, pde);
702 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
703 env->a20_mask;
704 pte = ldq_phys(pte_addr);
705 if (!(pte & PG_PRESENT_MASK)) {
706 error_code = 0;
707 goto do_fault;
709 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
710 error_code = PG_ERROR_RSVD_MASK;
711 goto do_fault;
713 /* combine pde and pte nx, user and rw protections */
714 ptep &= pte ^ PG_NX_MASK;
715 ptep ^= PG_NX_MASK;
716 if ((ptep & PG_NX_MASK) && is_write1 == 2)
717 goto do_fault_protect;
718 if (is_user) {
719 if (!(ptep & PG_USER_MASK))
720 goto do_fault_protect;
721 if (is_write && !(ptep & PG_RW_MASK))
722 goto do_fault_protect;
723 } else {
724 if ((env->cr[0] & CR0_WP_MASK) &&
725 is_write && !(ptep & PG_RW_MASK))
726 goto do_fault_protect;
728 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
729 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
730 pte |= PG_ACCESSED_MASK;
731 if (is_dirty)
732 pte |= PG_DIRTY_MASK;
733 stl_phys_notdirty(pte_addr, pte);
735 page_size = 4096;
736 virt_addr = addr & ~0xfff;
737 pte = pte & (PHYS_ADDR_MASK | 0xfff);
739 } else {
740 uint32_t pde;
742 /* page directory entry */
743 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
744 env->a20_mask;
745 pde = ldl_phys(pde_addr);
746 if (!(pde & PG_PRESENT_MASK)) {
747 error_code = 0;
748 goto do_fault;
750 /* if PSE bit is set, then we use a 4MB page */
751 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
752 page_size = 4096 * 1024;
753 if (is_user) {
754 if (!(pde & PG_USER_MASK))
755 goto do_fault_protect;
756 if (is_write && !(pde & PG_RW_MASK))
757 goto do_fault_protect;
758 } else {
759 if ((env->cr[0] & CR0_WP_MASK) &&
760 is_write && !(pde & PG_RW_MASK))
761 goto do_fault_protect;
763 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
764 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
765 pde |= PG_ACCESSED_MASK;
766 if (is_dirty)
767 pde |= PG_DIRTY_MASK;
768 stl_phys_notdirty(pde_addr, pde);
771 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
772 ptep = pte;
773 virt_addr = addr & ~(page_size - 1);
774 } else {
775 if (!(pde & PG_ACCESSED_MASK)) {
776 pde |= PG_ACCESSED_MASK;
777 stl_phys_notdirty(pde_addr, pde);
780 /* page directory entry */
781 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
782 env->a20_mask;
783 pte = ldl_phys(pte_addr);
784 if (!(pte & PG_PRESENT_MASK)) {
785 error_code = 0;
786 goto do_fault;
788 /* combine pde and pte user and rw protections */
789 ptep = pte & pde;
790 if (is_user) {
791 if (!(ptep & PG_USER_MASK))
792 goto do_fault_protect;
793 if (is_write && !(ptep & PG_RW_MASK))
794 goto do_fault_protect;
795 } else {
796 if ((env->cr[0] & CR0_WP_MASK) &&
797 is_write && !(ptep & PG_RW_MASK))
798 goto do_fault_protect;
800 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
801 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
802 pte |= PG_ACCESSED_MASK;
803 if (is_dirty)
804 pte |= PG_DIRTY_MASK;
805 stl_phys_notdirty(pte_addr, pte);
807 page_size = 4096;
808 virt_addr = addr & ~0xfff;
811 /* the page can be put in the TLB */
812 prot = PAGE_READ;
813 if (!(ptep & PG_NX_MASK))
814 prot |= PAGE_EXEC;
815 if (pte & PG_DIRTY_MASK) {
816 /* only set write access if already dirty... otherwise wait
817 for dirty access */
818 if (is_user) {
819 if (ptep & PG_RW_MASK)
820 prot |= PAGE_WRITE;
821 } else {
822 if (!(env->cr[0] & CR0_WP_MASK) ||
823 (ptep & PG_RW_MASK))
824 prot |= PAGE_WRITE;
827 do_mapping:
828 pte = pte & env->a20_mask;
830 /* Even if 4MB pages, we map only one 4KB page in the cache to
831 avoid filling it too fast */
832 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
833 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
834 vaddr = virt_addr + page_offset;
836 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
837 return 0;
838 do_fault_protect:
839 error_code = PG_ERROR_P_MASK;
840 do_fault:
841 error_code |= (is_write << PG_ERROR_W_BIT);
842 if (is_user)
843 error_code |= PG_ERROR_U_MASK;
844 if (is_write1 == 2 &&
845 (env->efer & MSR_EFER_NXE) &&
846 (env->cr[4] & CR4_PAE_MASK))
847 error_code |= PG_ERROR_I_D_MASK;
848 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
849 /* cr2 is not modified in case of exceptions */
850 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
851 addr);
852 } else {
853 env->cr[2] = addr;
855 env->error_code = error_code;
856 env->exception_index = EXCP0E_PAGE;
857 return 1;
860 target_phys_addr_t cpu_get_phys_page_debug(CPUX86State *env, target_ulong addr)
862 target_ulong pde_addr, pte_addr;
863 uint64_t pte;
864 target_phys_addr_t paddr;
865 uint32_t page_offset;
866 int page_size;
868 if (env->cr[4] & CR4_PAE_MASK) {
869 target_ulong pdpe_addr;
870 uint64_t pde, pdpe;
872 #ifdef TARGET_X86_64
873 if (env->hflags & HF_LMA_MASK) {
874 uint64_t pml4e_addr, pml4e;
875 int32_t sext;
877 /* test virtual address sign extension */
878 sext = (int64_t)addr >> 47;
879 if (sext != 0 && sext != -1)
880 return -1;
882 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
883 env->a20_mask;
884 pml4e = ldq_phys(pml4e_addr);
885 if (!(pml4e & PG_PRESENT_MASK))
886 return -1;
888 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
889 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
890 pdpe = ldq_phys(pdpe_addr);
891 if (!(pdpe & PG_PRESENT_MASK))
892 return -1;
893 } else
894 #endif
896 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
897 env->a20_mask;
898 pdpe = ldq_phys(pdpe_addr);
899 if (!(pdpe & PG_PRESENT_MASK))
900 return -1;
903 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
904 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
905 pde = ldq_phys(pde_addr);
906 if (!(pde & PG_PRESENT_MASK)) {
907 return -1;
909 if (pde & PG_PSE_MASK) {
910 /* 2 MB page */
911 page_size = 2048 * 1024;
912 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
913 } else {
914 /* 4 KB page */
915 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
916 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
917 page_size = 4096;
918 pte = ldq_phys(pte_addr);
920 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
921 if (!(pte & PG_PRESENT_MASK))
922 return -1;
923 } else {
924 uint32_t pde;
926 if (!(env->cr[0] & CR0_PG_MASK)) {
927 pte = addr;
928 page_size = 4096;
929 } else {
930 /* page directory entry */
931 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
932 pde = ldl_phys(pde_addr);
933 if (!(pde & PG_PRESENT_MASK))
934 return -1;
935 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
936 pte = pde & ~0x003ff000; /* align to 4MB */
937 page_size = 4096 * 1024;
938 } else {
939 /* page directory entry */
940 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
941 pte = ldl_phys(pte_addr);
942 if (!(pte & PG_PRESENT_MASK))
943 return -1;
944 page_size = 4096;
947 pte = pte & env->a20_mask;
950 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
951 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
952 return paddr;
955 void hw_breakpoint_insert(CPUX86State *env, int index)
957 int type, err = 0;
959 switch (hw_breakpoint_type(env->dr[7], index)) {
960 case 0:
961 if (hw_breakpoint_enabled(env->dr[7], index))
962 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
963 &env->cpu_breakpoint[index]);
964 break;
965 case 1:
966 type = BP_CPU | BP_MEM_WRITE;
967 goto insert_wp;
968 case 2:
969 /* No support for I/O watchpoints yet */
970 break;
971 case 3:
972 type = BP_CPU | BP_MEM_ACCESS;
973 insert_wp:
974 err = cpu_watchpoint_insert(env, env->dr[index],
975 hw_breakpoint_len(env->dr[7], index),
976 type, &env->cpu_watchpoint[index]);
977 break;
979 if (err)
980 env->cpu_breakpoint[index] = NULL;
983 void hw_breakpoint_remove(CPUX86State *env, int index)
985 if (!env->cpu_breakpoint[index])
986 return;
987 switch (hw_breakpoint_type(env->dr[7], index)) {
988 case 0:
989 if (hw_breakpoint_enabled(env->dr[7], index))
990 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
991 break;
992 case 1:
993 case 3:
994 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
995 break;
996 case 2:
997 /* No support for I/O watchpoints yet */
998 break;
1002 int check_hw_breakpoints(CPUX86State *env, int force_dr6_update)
1004 target_ulong dr6;
1005 int reg, type;
1006 int hit_enabled = 0;
1008 dr6 = env->dr[6] & ~0xf;
1009 for (reg = 0; reg < 4; reg++) {
1010 type = hw_breakpoint_type(env->dr[7], reg);
1011 if ((type == 0 && env->dr[reg] == env->eip) ||
1012 ((type & 1) && env->cpu_watchpoint[reg] &&
1013 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1014 dr6 |= 1 << reg;
1015 if (hw_breakpoint_enabled(env->dr[7], reg))
1016 hit_enabled = 1;
1019 if (hit_enabled || force_dr6_update)
1020 env->dr[6] = dr6;
1021 return hit_enabled;
1024 static CPUDebugExcpHandler *prev_debug_excp_handler;
1026 static void breakpoint_handler(CPUX86State *env)
1028 CPUBreakpoint *bp;
1030 if (env->watchpoint_hit) {
1031 if (env->watchpoint_hit->flags & BP_CPU) {
1032 env->watchpoint_hit = NULL;
1033 if (check_hw_breakpoints(env, 0))
1034 raise_exception_env(EXCP01_DB, env);
1035 else
1036 cpu_resume_from_signal(env, NULL);
1038 } else {
1039 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1040 if (bp->pc == env->eip) {
1041 if (bp->flags & BP_CPU) {
1042 check_hw_breakpoints(env, 1);
1043 raise_exception_env(EXCP01_DB, env);
1045 break;
1048 if (prev_debug_excp_handler)
1049 prev_debug_excp_handler(env);
1052 typedef struct MCEInjectionParams {
1053 Monitor *mon;
1054 CPUX86State *env;
1055 int bank;
1056 uint64_t status;
1057 uint64_t mcg_status;
1058 uint64_t addr;
1059 uint64_t misc;
1060 int flags;
1061 } MCEInjectionParams;
1063 static void do_inject_x86_mce(void *data)
1065 MCEInjectionParams *params = data;
1066 CPUX86State *cenv = params->env;
1067 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1069 cpu_synchronize_state(cenv);
1072 * If there is an MCE exception being processed, ignore this SRAO MCE
1073 * unless unconditional injection was requested.
1075 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1076 && !(params->status & MCI_STATUS_AR)
1077 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1078 return;
1081 if (params->status & MCI_STATUS_UC) {
1083 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1084 * reporting is disabled
1086 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1087 monitor_printf(params->mon,
1088 "CPU %d: Uncorrected error reporting disabled\n",
1089 cenv->cpu_index);
1090 return;
1094 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1095 * reporting is disabled for the bank
1097 if (banks[0] != ~(uint64_t)0) {
1098 monitor_printf(params->mon,
1099 "CPU %d: Uncorrected error reporting disabled for"
1100 " bank %d\n",
1101 cenv->cpu_index, params->bank);
1102 return;
1105 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1106 !(cenv->cr[4] & CR4_MCE_MASK)) {
1107 monitor_printf(params->mon,
1108 "CPU %d: Previous MCE still in progress, raising"
1109 " triple fault\n",
1110 cenv->cpu_index);
1111 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1112 qemu_system_reset_request();
1113 return;
1115 if (banks[1] & MCI_STATUS_VAL) {
1116 params->status |= MCI_STATUS_OVER;
1118 banks[2] = params->addr;
1119 banks[3] = params->misc;
1120 cenv->mcg_status = params->mcg_status;
1121 banks[1] = params->status;
1122 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1123 } else if (!(banks[1] & MCI_STATUS_VAL)
1124 || !(banks[1] & MCI_STATUS_UC)) {
1125 if (banks[1] & MCI_STATUS_VAL) {
1126 params->status |= MCI_STATUS_OVER;
1128 banks[2] = params->addr;
1129 banks[3] = params->misc;
1130 banks[1] = params->status;
1131 } else {
1132 banks[1] |= MCI_STATUS_OVER;
1136 void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
1137 uint64_t status, uint64_t mcg_status, uint64_t addr,
1138 uint64_t misc, int flags)
1140 MCEInjectionParams params = {
1141 .mon = mon,
1142 .env = cenv,
1143 .bank = bank,
1144 .status = status,
1145 .mcg_status = mcg_status,
1146 .addr = addr,
1147 .misc = misc,
1148 .flags = flags,
1150 unsigned bank_num = cenv->mcg_cap & 0xff;
1151 CPUX86State *env;
1153 if (!cenv->mcg_cap) {
1154 monitor_printf(mon, "MCE injection not supported\n");
1155 return;
1157 if (bank >= bank_num) {
1158 monitor_printf(mon, "Invalid MCE bank number\n");
1159 return;
1161 if (!(status & MCI_STATUS_VAL)) {
1162 monitor_printf(mon, "Invalid MCE status code\n");
1163 return;
1165 if ((flags & MCE_INJECT_BROADCAST)
1166 && !cpu_x86_support_mca_broadcast(cenv)) {
1167 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1168 return;
1171 run_on_cpu(cenv, do_inject_x86_mce, &params);
1172 if (flags & MCE_INJECT_BROADCAST) {
1173 params.bank = 1;
1174 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1175 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1176 params.addr = 0;
1177 params.misc = 0;
1178 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1179 if (cenv == env) {
1180 continue;
1182 params.env = env;
1183 run_on_cpu(cenv, do_inject_x86_mce, &params);
1188 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1190 TranslationBlock *tb;
1192 if (kvm_enabled()) {
1193 env->tpr_access_type = access;
1195 cpu_interrupt(env, CPU_INTERRUPT_TPR);
1196 } else {
1197 tb = tb_find_pc(env->mem_io_pc);
1198 cpu_restore_state(tb, env, env->mem_io_pc);
1200 apic_handle_tpr_access_report(env->apic_state, env->eip, access);
1203 #endif /* !CONFIG_USER_ONLY */
1205 static void mce_init(CPUX86State *cenv)
1207 unsigned int bank;
1209 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1210 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1211 (CPUID_MCE | CPUID_MCA)) {
1212 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1213 cenv->mcg_ctl = ~(uint64_t)0;
1214 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1215 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1220 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1221 target_ulong *base, unsigned int *limit,
1222 unsigned int *flags)
1224 SegmentCache *dt;
1225 target_ulong ptr;
1226 uint32_t e1, e2;
1227 int index;
1229 if (selector & 0x4)
1230 dt = &env->ldt;
1231 else
1232 dt = &env->gdt;
1233 index = selector & ~7;
1234 ptr = dt->base + index;
1235 if ((index + 7) > dt->limit
1236 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1237 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1238 return 0;
1240 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1241 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1242 if (e2 & DESC_G_MASK)
1243 *limit = (*limit << 12) | 0xfff;
1244 *flags = e2;
1246 return 1;
1249 CPUX86State *cpu_x86_init(const char *cpu_model)
1251 CPUX86State *env;
1252 static int inited;
1254 env = g_malloc0(sizeof(CPUX86State));
1255 cpu_exec_init(env);
1256 env->cpu_model_str = cpu_model;
1258 /* init various static tables used in TCG mode */
1259 if (tcg_enabled() && !inited) {
1260 inited = 1;
1261 optimize_flags_init();
1262 #ifndef CONFIG_USER_ONLY
1263 prev_debug_excp_handler =
1264 cpu_set_debug_excp_handler(breakpoint_handler);
1265 #endif
1267 if (cpu_x86_register(env, cpu_model) < 0) {
1268 cpu_x86_close(env);
1269 return NULL;
1271 env->cpuid_apic_id = env->cpu_index;
1272 mce_init(env);
1274 qemu_init_vcpu(env);
1276 return env;
1279 #if !defined(CONFIG_USER_ONLY)
1280 void do_cpu_init(CPUX86State *env)
1282 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1283 uint64_t pat = env->pat;
1285 cpu_state_reset(env);
1286 env->interrupt_request = sipi;
1287 env->pat = pat;
1288 apic_init_reset(env->apic_state);
1289 env->halted = !cpu_is_bsp(env);
1292 void do_cpu_sipi(CPUX86State *env)
1294 apic_sipi(env->apic_state);
1296 #else
1297 void do_cpu_init(CPUX86State *env)
1300 void do_cpu_sipi(CPUX86State *env)
1303 #endif