atapi: GESN: Use structs for commonly-used field types
[qemu/cris-port.git] / target-i386 / helper.c
blobd15fca591e204e9ac0ff6dc0b5c6c8e46b30a2fe
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
30 #ifndef CONFIG_USER_ONLY
31 #include "sysemu.h"
32 #include "monitor.h"
33 #endif
35 //#define DEBUG_MMU
37 /* NOTE: must be called outside the CPU execute loop */
38 void cpu_reset(CPUX86State *env)
40 int i;
42 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
43 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
44 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
47 memset(env, 0, offsetof(CPUX86State, breakpoints));
49 tlb_flush(env, 1);
51 env->old_exception = -1;
53 /* init to reset state */
55 #ifdef CONFIG_SOFTMMU
56 env->hflags |= HF_SOFTMMU_MASK;
57 #endif
58 env->hflags2 |= HF2_GIF_MASK;
60 cpu_x86_update_cr0(env, 0x60000010);
61 env->a20_mask = ~0x0;
62 env->smbase = 0x30000;
64 env->idt.limit = 0xffff;
65 env->gdt.limit = 0xffff;
66 env->ldt.limit = 0xffff;
67 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
68 env->tr.limit = 0xffff;
69 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
71 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
72 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
73 DESC_R_MASK | DESC_A_MASK);
74 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
75 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
76 DESC_A_MASK);
77 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
78 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
79 DESC_A_MASK);
80 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
81 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
82 DESC_A_MASK);
83 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
84 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
85 DESC_A_MASK);
86 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
87 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
88 DESC_A_MASK);
90 env->eip = 0xfff0;
91 env->regs[R_EDX] = env->cpuid_version;
93 env->eflags = 0x2;
95 /* FPU init */
96 for(i = 0;i < 8; i++)
97 env->fptags[i] = 1;
98 env->fpuc = 0x37f;
100 env->mxcsr = 0x1f80;
102 env->pat = 0x0007040600070406ULL;
104 memset(env->dr, 0, sizeof(env->dr));
105 env->dr[6] = DR6_FIXED_1;
106 env->dr[7] = DR7_FIXED_1;
107 cpu_breakpoint_remove_all(env, BP_CPU);
108 cpu_watchpoint_remove_all(env, BP_CPU);
111 void cpu_x86_close(CPUX86State *env)
113 qemu_free(env);
116 static void cpu_x86_version(CPUState *env, int *family, int *model)
118 int cpuver = env->cpuid_version;
120 if (family == NULL || model == NULL) {
121 return;
124 *family = (cpuver >> 8) & 0x0f;
125 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
128 /* Broadcast MCA signal for processor version 06H_EH and above */
129 int cpu_x86_support_mca_broadcast(CPUState *env)
131 int family = 0;
132 int model = 0;
134 cpu_x86_version(env, &family, &model);
135 if ((family == 6 && model >= 14) || family > 6) {
136 return 1;
139 return 0;
142 /***********************************************************/
143 /* x86 debug */
145 static const char *cc_op_str[] = {
146 "DYNAMIC",
147 "EFLAGS",
149 "MULB",
150 "MULW",
151 "MULL",
152 "MULQ",
154 "ADDB",
155 "ADDW",
156 "ADDL",
157 "ADDQ",
159 "ADCB",
160 "ADCW",
161 "ADCL",
162 "ADCQ",
164 "SUBB",
165 "SUBW",
166 "SUBL",
167 "SUBQ",
169 "SBBB",
170 "SBBW",
171 "SBBL",
172 "SBBQ",
174 "LOGICB",
175 "LOGICW",
176 "LOGICL",
177 "LOGICQ",
179 "INCB",
180 "INCW",
181 "INCL",
182 "INCQ",
184 "DECB",
185 "DECW",
186 "DECL",
187 "DECQ",
189 "SHLB",
190 "SHLW",
191 "SHLL",
192 "SHLQ",
194 "SARB",
195 "SARW",
196 "SARL",
197 "SARQ",
200 static void
201 cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
202 const char *name, struct SegmentCache *sc)
204 #ifdef TARGET_X86_64
205 if (env->hflags & HF_CS64_MASK) {
206 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
207 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
208 } else
209 #endif
211 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
212 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
215 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
216 goto done;
218 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
219 if (sc->flags & DESC_S_MASK) {
220 if (sc->flags & DESC_CS_MASK) {
221 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
222 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
223 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
224 (sc->flags & DESC_R_MASK) ? 'R' : '-');
225 } else {
226 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
227 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
228 (sc->flags & DESC_W_MASK) ? 'W' : '-');
230 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
231 } else {
232 static const char *sys_type_name[2][16] = {
233 { /* 32 bit mode */
234 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
235 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
236 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
237 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
239 { /* 64 bit mode */
240 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
241 "Reserved", "Reserved", "Reserved", "Reserved",
242 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
243 "Reserved", "IntGate64", "TrapGate64"
246 cpu_fprintf(f, "%s",
247 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
248 [(sc->flags & DESC_TYPE_MASK)
249 >> DESC_TYPE_SHIFT]);
251 done:
252 cpu_fprintf(f, "\n");
255 #define DUMP_CODE_BYTES_TOTAL 50
256 #define DUMP_CODE_BYTES_BACKWARD 20
258 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
259 int flags)
261 int eflags, i, nb;
262 char cc_op_name[32];
263 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
265 cpu_synchronize_state(env);
267 eflags = env->eflags;
268 #ifdef TARGET_X86_64
269 if (env->hflags & HF_CS64_MASK) {
270 cpu_fprintf(f,
271 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
272 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
273 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
274 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
275 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
276 env->regs[R_EAX],
277 env->regs[R_EBX],
278 env->regs[R_ECX],
279 env->regs[R_EDX],
280 env->regs[R_ESI],
281 env->regs[R_EDI],
282 env->regs[R_EBP],
283 env->regs[R_ESP],
284 env->regs[8],
285 env->regs[9],
286 env->regs[10],
287 env->regs[11],
288 env->regs[12],
289 env->regs[13],
290 env->regs[14],
291 env->regs[15],
292 env->eip, eflags,
293 eflags & DF_MASK ? 'D' : '-',
294 eflags & CC_O ? 'O' : '-',
295 eflags & CC_S ? 'S' : '-',
296 eflags & CC_Z ? 'Z' : '-',
297 eflags & CC_A ? 'A' : '-',
298 eflags & CC_P ? 'P' : '-',
299 eflags & CC_C ? 'C' : '-',
300 env->hflags & HF_CPL_MASK,
301 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
302 (env->a20_mask >> 20) & 1,
303 (env->hflags >> HF_SMM_SHIFT) & 1,
304 env->halted);
305 } else
306 #endif
308 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
309 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
310 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
311 (uint32_t)env->regs[R_EAX],
312 (uint32_t)env->regs[R_EBX],
313 (uint32_t)env->regs[R_ECX],
314 (uint32_t)env->regs[R_EDX],
315 (uint32_t)env->regs[R_ESI],
316 (uint32_t)env->regs[R_EDI],
317 (uint32_t)env->regs[R_EBP],
318 (uint32_t)env->regs[R_ESP],
319 (uint32_t)env->eip, eflags,
320 eflags & DF_MASK ? 'D' : '-',
321 eflags & CC_O ? 'O' : '-',
322 eflags & CC_S ? 'S' : '-',
323 eflags & CC_Z ? 'Z' : '-',
324 eflags & CC_A ? 'A' : '-',
325 eflags & CC_P ? 'P' : '-',
326 eflags & CC_C ? 'C' : '-',
327 env->hflags & HF_CPL_MASK,
328 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
329 (env->a20_mask >> 20) & 1,
330 (env->hflags >> HF_SMM_SHIFT) & 1,
331 env->halted);
334 for(i = 0; i < 6; i++) {
335 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
336 &env->segs[i]);
338 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
339 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
341 #ifdef TARGET_X86_64
342 if (env->hflags & HF_LMA_MASK) {
343 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
344 env->gdt.base, env->gdt.limit);
345 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
346 env->idt.base, env->idt.limit);
347 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
348 (uint32_t)env->cr[0],
349 env->cr[2],
350 env->cr[3],
351 (uint32_t)env->cr[4]);
352 for(i = 0; i < 4; i++)
353 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
354 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
355 env->dr[6], env->dr[7]);
356 } else
357 #endif
359 cpu_fprintf(f, "GDT= %08x %08x\n",
360 (uint32_t)env->gdt.base, env->gdt.limit);
361 cpu_fprintf(f, "IDT= %08x %08x\n",
362 (uint32_t)env->idt.base, env->idt.limit);
363 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
364 (uint32_t)env->cr[0],
365 (uint32_t)env->cr[2],
366 (uint32_t)env->cr[3],
367 (uint32_t)env->cr[4]);
368 for(i = 0; i < 4; i++) {
369 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
371 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
372 env->dr[6], env->dr[7]);
374 if (flags & X86_DUMP_CCOP) {
375 if ((unsigned)env->cc_op < CC_OP_NB)
376 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
377 else
378 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
379 #ifdef TARGET_X86_64
380 if (env->hflags & HF_CS64_MASK) {
381 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
382 env->cc_src, env->cc_dst,
383 cc_op_name);
384 } else
385 #endif
387 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
388 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
389 cc_op_name);
392 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
393 if (flags & X86_DUMP_FPU) {
394 int fptag;
395 fptag = 0;
396 for(i = 0; i < 8; i++) {
397 fptag |= ((!env->fptags[i]) << i);
399 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
400 env->fpuc,
401 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
402 env->fpstt,
403 fptag,
404 env->mxcsr);
405 for(i=0;i<8;i++) {
406 #if defined(USE_X86LDOUBLE)
407 union {
408 long double d;
409 struct {
410 uint64_t lower;
411 uint16_t upper;
412 } l;
413 } tmp;
414 tmp.d = env->fpregs[i].d;
415 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
416 i, tmp.l.lower, tmp.l.upper);
417 #else
418 cpu_fprintf(f, "FPR%d=%016" PRIx64,
419 i, env->fpregs[i].mmx.q);
420 #endif
421 if ((i & 1) == 1)
422 cpu_fprintf(f, "\n");
423 else
424 cpu_fprintf(f, " ");
426 if (env->hflags & HF_CS64_MASK)
427 nb = 16;
428 else
429 nb = 8;
430 for(i=0;i<nb;i++) {
431 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
433 env->xmm_regs[i].XMM_L(3),
434 env->xmm_regs[i].XMM_L(2),
435 env->xmm_regs[i].XMM_L(1),
436 env->xmm_regs[i].XMM_L(0));
437 if ((i & 1) == 1)
438 cpu_fprintf(f, "\n");
439 else
440 cpu_fprintf(f, " ");
443 if (flags & CPU_DUMP_CODE) {
444 target_ulong base = env->segs[R_CS].base + env->eip;
445 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
446 uint8_t code;
447 char codestr[3];
449 cpu_fprintf(f, "Code=");
450 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
451 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
452 snprintf(codestr, sizeof(codestr), "%02x", code);
453 } else {
454 snprintf(codestr, sizeof(codestr), "??");
456 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
457 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
459 cpu_fprintf(f, "\n");
463 /***********************************************************/
464 /* x86 mmu */
465 /* XXX: add PGE support */
467 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
469 a20_state = (a20_state != 0);
470 if (a20_state != ((env->a20_mask >> 20) & 1)) {
471 #if defined(DEBUG_MMU)
472 printf("A20 update: a20=%d\n", a20_state);
473 #endif
474 /* if the cpu is currently executing code, we must unlink it and
475 all the potentially executing TB */
476 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
478 /* when a20 is changed, all the MMU mappings are invalid, so
479 we must flush everything */
480 tlb_flush(env, 1);
481 env->a20_mask = ~(1 << 20) | (a20_state << 20);
485 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
487 int pe_state;
489 #if defined(DEBUG_MMU)
490 printf("CR0 update: CR0=0x%08x\n", new_cr0);
491 #endif
492 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
493 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
494 tlb_flush(env, 1);
497 #ifdef TARGET_X86_64
498 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
499 (env->efer & MSR_EFER_LME)) {
500 /* enter in long mode */
501 /* XXX: generate an exception */
502 if (!(env->cr[4] & CR4_PAE_MASK))
503 return;
504 env->efer |= MSR_EFER_LMA;
505 env->hflags |= HF_LMA_MASK;
506 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
507 (env->efer & MSR_EFER_LMA)) {
508 /* exit long mode */
509 env->efer &= ~MSR_EFER_LMA;
510 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
511 env->eip &= 0xffffffff;
513 #endif
514 env->cr[0] = new_cr0 | CR0_ET_MASK;
516 /* update PE flag in hidden flags */
517 pe_state = (env->cr[0] & CR0_PE_MASK);
518 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
519 /* ensure that ADDSEG is always set in real mode */
520 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
521 /* update FPU flags */
522 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
523 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
526 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
527 the PDPT */
528 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
530 env->cr[3] = new_cr3;
531 if (env->cr[0] & CR0_PG_MASK) {
532 #if defined(DEBUG_MMU)
533 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
534 #endif
535 tlb_flush(env, 0);
539 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
541 #if defined(DEBUG_MMU)
542 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
543 #endif
544 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
545 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
546 tlb_flush(env, 1);
548 /* SSE handling */
549 if (!(env->cpuid_features & CPUID_SSE))
550 new_cr4 &= ~CR4_OSFXSR_MASK;
551 if (new_cr4 & CR4_OSFXSR_MASK)
552 env->hflags |= HF_OSFXSR_MASK;
553 else
554 env->hflags &= ~HF_OSFXSR_MASK;
556 env->cr[4] = new_cr4;
559 #if defined(CONFIG_USER_ONLY)
561 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
562 int is_write, int mmu_idx, int is_softmmu)
564 /* user mode only emulation */
565 is_write &= 1;
566 env->cr[2] = addr;
567 env->error_code = (is_write << PG_ERROR_W_BIT);
568 env->error_code |= PG_ERROR_U_MASK;
569 env->exception_index = EXCP0E_PAGE;
570 return 1;
573 #else
575 /* XXX: This value should match the one returned by CPUID
576 * and in exec.c */
577 # if defined(TARGET_X86_64)
578 # define PHYS_ADDR_MASK 0xfffffff000LL
579 # else
580 # define PHYS_ADDR_MASK 0xffffff000LL
581 # endif
583 /* return value:
584 -1 = cannot handle fault
585 0 = nothing more to do
586 1 = generate PF fault
588 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
589 int is_write1, int mmu_idx, int is_softmmu)
591 uint64_t ptep, pte;
592 target_ulong pde_addr, pte_addr;
593 int error_code, is_dirty, prot, page_size, is_write, is_user;
594 target_phys_addr_t paddr;
595 uint32_t page_offset;
596 target_ulong vaddr, virt_addr;
598 is_user = mmu_idx == MMU_USER_IDX;
599 #if defined(DEBUG_MMU)
600 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
601 addr, is_write1, is_user, env->eip);
602 #endif
603 is_write = is_write1 & 1;
605 if (!(env->cr[0] & CR0_PG_MASK)) {
606 pte = addr;
607 virt_addr = addr & TARGET_PAGE_MASK;
608 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
609 page_size = 4096;
610 goto do_mapping;
613 if (env->cr[4] & CR4_PAE_MASK) {
614 uint64_t pde, pdpe;
615 target_ulong pdpe_addr;
617 #ifdef TARGET_X86_64
618 if (env->hflags & HF_LMA_MASK) {
619 uint64_t pml4e_addr, pml4e;
620 int32_t sext;
622 /* test virtual address sign extension */
623 sext = (int64_t)addr >> 47;
624 if (sext != 0 && sext != -1) {
625 env->error_code = 0;
626 env->exception_index = EXCP0D_GPF;
627 return 1;
630 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
631 env->a20_mask;
632 pml4e = ldq_phys(pml4e_addr);
633 if (!(pml4e & PG_PRESENT_MASK)) {
634 error_code = 0;
635 goto do_fault;
637 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
638 error_code = PG_ERROR_RSVD_MASK;
639 goto do_fault;
641 if (!(pml4e & PG_ACCESSED_MASK)) {
642 pml4e |= PG_ACCESSED_MASK;
643 stl_phys_notdirty(pml4e_addr, pml4e);
645 ptep = pml4e ^ PG_NX_MASK;
646 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
647 env->a20_mask;
648 pdpe = ldq_phys(pdpe_addr);
649 if (!(pdpe & PG_PRESENT_MASK)) {
650 error_code = 0;
651 goto do_fault;
653 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
654 error_code = PG_ERROR_RSVD_MASK;
655 goto do_fault;
657 ptep &= pdpe ^ PG_NX_MASK;
658 if (!(pdpe & PG_ACCESSED_MASK)) {
659 pdpe |= PG_ACCESSED_MASK;
660 stl_phys_notdirty(pdpe_addr, pdpe);
662 } else
663 #endif
665 /* XXX: load them when cr3 is loaded ? */
666 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
667 env->a20_mask;
668 pdpe = ldq_phys(pdpe_addr);
669 if (!(pdpe & PG_PRESENT_MASK)) {
670 error_code = 0;
671 goto do_fault;
673 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
676 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
677 env->a20_mask;
678 pde = ldq_phys(pde_addr);
679 if (!(pde & PG_PRESENT_MASK)) {
680 error_code = 0;
681 goto do_fault;
683 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
684 error_code = PG_ERROR_RSVD_MASK;
685 goto do_fault;
687 ptep &= pde ^ PG_NX_MASK;
688 if (pde & PG_PSE_MASK) {
689 /* 2 MB page */
690 page_size = 2048 * 1024;
691 ptep ^= PG_NX_MASK;
692 if ((ptep & PG_NX_MASK) && is_write1 == 2)
693 goto do_fault_protect;
694 if (is_user) {
695 if (!(ptep & PG_USER_MASK))
696 goto do_fault_protect;
697 if (is_write && !(ptep & PG_RW_MASK))
698 goto do_fault_protect;
699 } else {
700 if ((env->cr[0] & CR0_WP_MASK) &&
701 is_write && !(ptep & PG_RW_MASK))
702 goto do_fault_protect;
704 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
705 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
706 pde |= PG_ACCESSED_MASK;
707 if (is_dirty)
708 pde |= PG_DIRTY_MASK;
709 stl_phys_notdirty(pde_addr, pde);
711 /* align to page_size */
712 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
713 virt_addr = addr & ~(page_size - 1);
714 } else {
715 /* 4 KB page */
716 if (!(pde & PG_ACCESSED_MASK)) {
717 pde |= PG_ACCESSED_MASK;
718 stl_phys_notdirty(pde_addr, pde);
720 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
721 env->a20_mask;
722 pte = ldq_phys(pte_addr);
723 if (!(pte & PG_PRESENT_MASK)) {
724 error_code = 0;
725 goto do_fault;
727 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
728 error_code = PG_ERROR_RSVD_MASK;
729 goto do_fault;
731 /* combine pde and pte nx, user and rw protections */
732 ptep &= pte ^ PG_NX_MASK;
733 ptep ^= PG_NX_MASK;
734 if ((ptep & PG_NX_MASK) && is_write1 == 2)
735 goto do_fault_protect;
736 if (is_user) {
737 if (!(ptep & PG_USER_MASK))
738 goto do_fault_protect;
739 if (is_write && !(ptep & PG_RW_MASK))
740 goto do_fault_protect;
741 } else {
742 if ((env->cr[0] & CR0_WP_MASK) &&
743 is_write && !(ptep & PG_RW_MASK))
744 goto do_fault_protect;
746 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
747 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
748 pte |= PG_ACCESSED_MASK;
749 if (is_dirty)
750 pte |= PG_DIRTY_MASK;
751 stl_phys_notdirty(pte_addr, pte);
753 page_size = 4096;
754 virt_addr = addr & ~0xfff;
755 pte = pte & (PHYS_ADDR_MASK | 0xfff);
757 } else {
758 uint32_t pde;
760 /* page directory entry */
761 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
762 env->a20_mask;
763 pde = ldl_phys(pde_addr);
764 if (!(pde & PG_PRESENT_MASK)) {
765 error_code = 0;
766 goto do_fault;
768 /* if PSE bit is set, then we use a 4MB page */
769 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
770 page_size = 4096 * 1024;
771 if (is_user) {
772 if (!(pde & PG_USER_MASK))
773 goto do_fault_protect;
774 if (is_write && !(pde & PG_RW_MASK))
775 goto do_fault_protect;
776 } else {
777 if ((env->cr[0] & CR0_WP_MASK) &&
778 is_write && !(pde & PG_RW_MASK))
779 goto do_fault_protect;
781 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
782 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
783 pde |= PG_ACCESSED_MASK;
784 if (is_dirty)
785 pde |= PG_DIRTY_MASK;
786 stl_phys_notdirty(pde_addr, pde);
789 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
790 ptep = pte;
791 virt_addr = addr & ~(page_size - 1);
792 } else {
793 if (!(pde & PG_ACCESSED_MASK)) {
794 pde |= PG_ACCESSED_MASK;
795 stl_phys_notdirty(pde_addr, pde);
798 /* page directory entry */
799 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
800 env->a20_mask;
801 pte = ldl_phys(pte_addr);
802 if (!(pte & PG_PRESENT_MASK)) {
803 error_code = 0;
804 goto do_fault;
806 /* combine pde and pte user and rw protections */
807 ptep = pte & pde;
808 if (is_user) {
809 if (!(ptep & PG_USER_MASK))
810 goto do_fault_protect;
811 if (is_write && !(ptep & PG_RW_MASK))
812 goto do_fault_protect;
813 } else {
814 if ((env->cr[0] & CR0_WP_MASK) &&
815 is_write && !(ptep & PG_RW_MASK))
816 goto do_fault_protect;
818 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
819 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
820 pte |= PG_ACCESSED_MASK;
821 if (is_dirty)
822 pte |= PG_DIRTY_MASK;
823 stl_phys_notdirty(pte_addr, pte);
825 page_size = 4096;
826 virt_addr = addr & ~0xfff;
829 /* the page can be put in the TLB */
830 prot = PAGE_READ;
831 if (!(ptep & PG_NX_MASK))
832 prot |= PAGE_EXEC;
833 if (pte & PG_DIRTY_MASK) {
834 /* only set write access if already dirty... otherwise wait
835 for dirty access */
836 if (is_user) {
837 if (ptep & PG_RW_MASK)
838 prot |= PAGE_WRITE;
839 } else {
840 if (!(env->cr[0] & CR0_WP_MASK) ||
841 (ptep & PG_RW_MASK))
842 prot |= PAGE_WRITE;
845 do_mapping:
846 pte = pte & env->a20_mask;
848 /* Even if 4MB pages, we map only one 4KB page in the cache to
849 avoid filling it too fast */
850 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
851 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
852 vaddr = virt_addr + page_offset;
854 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
855 return 0;
856 do_fault_protect:
857 error_code = PG_ERROR_P_MASK;
858 do_fault:
859 error_code |= (is_write << PG_ERROR_W_BIT);
860 if (is_user)
861 error_code |= PG_ERROR_U_MASK;
862 if (is_write1 == 2 &&
863 (env->efer & MSR_EFER_NXE) &&
864 (env->cr[4] & CR4_PAE_MASK))
865 error_code |= PG_ERROR_I_D_MASK;
866 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
867 /* cr2 is not modified in case of exceptions */
868 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
869 addr);
870 } else {
871 env->cr[2] = addr;
873 env->error_code = error_code;
874 env->exception_index = EXCP0E_PAGE;
875 return 1;
878 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
880 target_ulong pde_addr, pte_addr;
881 uint64_t pte;
882 target_phys_addr_t paddr;
883 uint32_t page_offset;
884 int page_size;
886 if (env->cr[4] & CR4_PAE_MASK) {
887 target_ulong pdpe_addr;
888 uint64_t pde, pdpe;
890 #ifdef TARGET_X86_64
891 if (env->hflags & HF_LMA_MASK) {
892 uint64_t pml4e_addr, pml4e;
893 int32_t sext;
895 /* test virtual address sign extension */
896 sext = (int64_t)addr >> 47;
897 if (sext != 0 && sext != -1)
898 return -1;
900 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
901 env->a20_mask;
902 pml4e = ldq_phys(pml4e_addr);
903 if (!(pml4e & PG_PRESENT_MASK))
904 return -1;
906 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
907 env->a20_mask;
908 pdpe = ldq_phys(pdpe_addr);
909 if (!(pdpe & PG_PRESENT_MASK))
910 return -1;
911 } else
912 #endif
914 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
915 env->a20_mask;
916 pdpe = ldq_phys(pdpe_addr);
917 if (!(pdpe & PG_PRESENT_MASK))
918 return -1;
921 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
922 env->a20_mask;
923 pde = ldq_phys(pde_addr);
924 if (!(pde & PG_PRESENT_MASK)) {
925 return -1;
927 if (pde & PG_PSE_MASK) {
928 /* 2 MB page */
929 page_size = 2048 * 1024;
930 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
931 } else {
932 /* 4 KB page */
933 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
934 env->a20_mask;
935 page_size = 4096;
936 pte = ldq_phys(pte_addr);
938 if (!(pte & PG_PRESENT_MASK))
939 return -1;
940 } else {
941 uint32_t pde;
943 if (!(env->cr[0] & CR0_PG_MASK)) {
944 pte = addr;
945 page_size = 4096;
946 } else {
947 /* page directory entry */
948 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
949 pde = ldl_phys(pde_addr);
950 if (!(pde & PG_PRESENT_MASK))
951 return -1;
952 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
953 pte = pde & ~0x003ff000; /* align to 4MB */
954 page_size = 4096 * 1024;
955 } else {
956 /* page directory entry */
957 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
958 pte = ldl_phys(pte_addr);
959 if (!(pte & PG_PRESENT_MASK))
960 return -1;
961 page_size = 4096;
964 pte = pte & env->a20_mask;
967 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
968 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
969 return paddr;
972 void hw_breakpoint_insert(CPUState *env, int index)
974 int type, err = 0;
976 switch (hw_breakpoint_type(env->dr[7], index)) {
977 case 0:
978 if (hw_breakpoint_enabled(env->dr[7], index))
979 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
980 &env->cpu_breakpoint[index]);
981 break;
982 case 1:
983 type = BP_CPU | BP_MEM_WRITE;
984 goto insert_wp;
985 case 2:
986 /* No support for I/O watchpoints yet */
987 break;
988 case 3:
989 type = BP_CPU | BP_MEM_ACCESS;
990 insert_wp:
991 err = cpu_watchpoint_insert(env, env->dr[index],
992 hw_breakpoint_len(env->dr[7], index),
993 type, &env->cpu_watchpoint[index]);
994 break;
996 if (err)
997 env->cpu_breakpoint[index] = NULL;
1000 void hw_breakpoint_remove(CPUState *env, int index)
1002 if (!env->cpu_breakpoint[index])
1003 return;
1004 switch (hw_breakpoint_type(env->dr[7], index)) {
1005 case 0:
1006 if (hw_breakpoint_enabled(env->dr[7], index))
1007 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1008 break;
1009 case 1:
1010 case 3:
1011 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1012 break;
1013 case 2:
1014 /* No support for I/O watchpoints yet */
1015 break;
1019 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1021 target_ulong dr6;
1022 int reg, type;
1023 int hit_enabled = 0;
1025 dr6 = env->dr[6] & ~0xf;
1026 for (reg = 0; reg < 4; reg++) {
1027 type = hw_breakpoint_type(env->dr[7], reg);
1028 if ((type == 0 && env->dr[reg] == env->eip) ||
1029 ((type & 1) && env->cpu_watchpoint[reg] &&
1030 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1031 dr6 |= 1 << reg;
1032 if (hw_breakpoint_enabled(env->dr[7], reg))
1033 hit_enabled = 1;
1036 if (hit_enabled || force_dr6_update)
1037 env->dr[6] = dr6;
1038 return hit_enabled;
1041 static CPUDebugExcpHandler *prev_debug_excp_handler;
1043 void raise_exception_env(int exception_index, CPUState *env);
1045 static void breakpoint_handler(CPUState *env)
1047 CPUBreakpoint *bp;
1049 if (env->watchpoint_hit) {
1050 if (env->watchpoint_hit->flags & BP_CPU) {
1051 env->watchpoint_hit = NULL;
1052 if (check_hw_breakpoints(env, 0))
1053 raise_exception_env(EXCP01_DB, env);
1054 else
1055 cpu_resume_from_signal(env, NULL);
1057 } else {
1058 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1059 if (bp->pc == env->eip) {
1060 if (bp->flags & BP_CPU) {
1061 check_hw_breakpoints(env, 1);
1062 raise_exception_env(EXCP01_DB, env);
1064 break;
1067 if (prev_debug_excp_handler)
1068 prev_debug_excp_handler(env);
1071 typedef struct MCEInjectionParams {
1072 Monitor *mon;
1073 CPUState *env;
1074 int bank;
1075 uint64_t status;
1076 uint64_t mcg_status;
1077 uint64_t addr;
1078 uint64_t misc;
1079 int flags;
1080 } MCEInjectionParams;
1082 static void do_inject_x86_mce(void *data)
1084 MCEInjectionParams *params = data;
1085 CPUState *cenv = params->env;
1086 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1088 cpu_synchronize_state(cenv);
1091 * If there is an MCE exception being processed, ignore this SRAO MCE
1092 * unless unconditional injection was requested.
1094 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1095 && !(params->status & MCI_STATUS_AR)
1096 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1097 return;
1100 if (params->status & MCI_STATUS_UC) {
1102 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1103 * reporting is disabled
1105 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1106 monitor_printf(params->mon,
1107 "CPU %d: Uncorrected error reporting disabled\n",
1108 cenv->cpu_index);
1109 return;
1113 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1114 * reporting is disabled for the bank
1116 if (banks[0] != ~(uint64_t)0) {
1117 monitor_printf(params->mon,
1118 "CPU %d: Uncorrected error reporting disabled for"
1119 " bank %d\n",
1120 cenv->cpu_index, params->bank);
1121 return;
1124 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1125 !(cenv->cr[4] & CR4_MCE_MASK)) {
1126 monitor_printf(params->mon,
1127 "CPU %d: Previous MCE still in progress, raising"
1128 " triple fault\n",
1129 cenv->cpu_index);
1130 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1131 qemu_system_reset_request();
1132 return;
1134 if (banks[1] & MCI_STATUS_VAL) {
1135 params->status |= MCI_STATUS_OVER;
1137 banks[2] = params->addr;
1138 banks[3] = params->misc;
1139 cenv->mcg_status = params->mcg_status;
1140 banks[1] = params->status;
1141 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1142 } else if (!(banks[1] & MCI_STATUS_VAL)
1143 || !(banks[1] & MCI_STATUS_UC)) {
1144 if (banks[1] & MCI_STATUS_VAL) {
1145 params->status |= MCI_STATUS_OVER;
1147 banks[2] = params->addr;
1148 banks[3] = params->misc;
1149 banks[1] = params->status;
1150 } else {
1151 banks[1] |= MCI_STATUS_OVER;
1155 void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
1156 uint64_t status, uint64_t mcg_status, uint64_t addr,
1157 uint64_t misc, int flags)
1159 MCEInjectionParams params = {
1160 .mon = mon,
1161 .env = cenv,
1162 .bank = bank,
1163 .status = status,
1164 .mcg_status = mcg_status,
1165 .addr = addr,
1166 .misc = misc,
1167 .flags = flags,
1169 unsigned bank_num = cenv->mcg_cap & 0xff;
1170 CPUState *env;
1172 if (!cenv->mcg_cap) {
1173 monitor_printf(mon, "MCE injection not supported\n");
1174 return;
1176 if (bank >= bank_num) {
1177 monitor_printf(mon, "Invalid MCE bank number\n");
1178 return;
1180 if (!(status & MCI_STATUS_VAL)) {
1181 monitor_printf(mon, "Invalid MCE status code\n");
1182 return;
1184 if ((flags & MCE_INJECT_BROADCAST)
1185 && !cpu_x86_support_mca_broadcast(cenv)) {
1186 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1187 return;
1190 run_on_cpu(cenv, do_inject_x86_mce, &params);
1191 if (flags & MCE_INJECT_BROADCAST) {
1192 params.bank = 1;
1193 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1194 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1195 params.addr = 0;
1196 params.misc = 0;
1197 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1198 if (cenv == env) {
1199 continue;
1201 params.env = env;
1202 run_on_cpu(cenv, do_inject_x86_mce, &params);
1206 #endif /* !CONFIG_USER_ONLY */
1208 static void mce_init(CPUX86State *cenv)
1210 unsigned int bank;
1212 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1213 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1214 (CPUID_MCE | CPUID_MCA)) {
1215 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1216 cenv->mcg_ctl = ~(uint64_t)0;
1217 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1218 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1223 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1224 target_ulong *base, unsigned int *limit,
1225 unsigned int *flags)
1227 SegmentCache *dt;
1228 target_ulong ptr;
1229 uint32_t e1, e2;
1230 int index;
1232 if (selector & 0x4)
1233 dt = &env->ldt;
1234 else
1235 dt = &env->gdt;
1236 index = selector & ~7;
1237 ptr = dt->base + index;
1238 if ((index + 7) > dt->limit
1239 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1240 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1241 return 0;
1243 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1244 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1245 if (e2 & DESC_G_MASK)
1246 *limit = (*limit << 12) | 0xfff;
1247 *flags = e2;
1249 return 1;
1252 CPUX86State *cpu_x86_init(const char *cpu_model)
1254 CPUX86State *env;
1255 static int inited;
1257 env = qemu_mallocz(sizeof(CPUX86State));
1258 cpu_exec_init(env);
1259 env->cpu_model_str = cpu_model;
1261 /* init various static tables */
1262 if (!inited) {
1263 inited = 1;
1264 optimize_flags_init();
1265 #ifndef CONFIG_USER_ONLY
1266 prev_debug_excp_handler =
1267 cpu_set_debug_excp_handler(breakpoint_handler);
1268 #endif
1270 if (cpu_x86_register(env, cpu_model) < 0) {
1271 cpu_x86_close(env);
1272 return NULL;
1274 mce_init(env);
1276 qemu_init_vcpu(env);
1278 return env;
1281 #if !defined(CONFIG_USER_ONLY)
1282 void do_cpu_init(CPUState *env)
1284 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1285 uint64_t pat = env->pat;
1287 cpu_reset(env);
1288 env->interrupt_request = sipi;
1289 env->pat = pat;
1290 apic_init_reset(env->apic_state);
1291 env->halted = !cpu_is_bsp(env);
1294 void do_cpu_sipi(CPUState *env)
1296 apic_sipi(env->apic_state);
1298 #else
1299 void do_cpu_init(CPUState *env)
1302 void do_cpu_sipi(CPUState *env)
1305 #endif