target-i386: preserve FPU and MSR state on INIT
[qemu/kevin.git] / target-i386 / helper.c
blob46d20e4b8952fc6f8f42d6d11f37d4221eb7c29d
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #include "kvm_i386.h"
23 #ifndef CONFIG_USER_ONLY
24 #include "sysemu/sysemu.h"
25 #include "monitor/monitor.h"
26 #endif
28 //#define DEBUG_MMU
30 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
32 int cpuver = env->cpuid_version;
34 if (family == NULL || model == NULL) {
35 return;
38 *family = (cpuver >> 8) & 0x0f;
39 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
42 /* Broadcast MCA signal for processor version 06H_EH and above */
43 int cpu_x86_support_mca_broadcast(CPUX86State *env)
45 int family = 0;
46 int model = 0;
48 cpu_x86_version(env, &family, &model);
49 if ((family == 6 && model >= 14) || family > 6) {
50 return 1;
53 return 0;
56 /***********************************************************/
57 /* x86 debug */
59 static const char *cc_op_str[CC_OP_NB] = {
60 "DYNAMIC",
61 "EFLAGS",
63 "MULB",
64 "MULW",
65 "MULL",
66 "MULQ",
68 "ADDB",
69 "ADDW",
70 "ADDL",
71 "ADDQ",
73 "ADCB",
74 "ADCW",
75 "ADCL",
76 "ADCQ",
78 "SUBB",
79 "SUBW",
80 "SUBL",
81 "SUBQ",
83 "SBBB",
84 "SBBW",
85 "SBBL",
86 "SBBQ",
88 "LOGICB",
89 "LOGICW",
90 "LOGICL",
91 "LOGICQ",
93 "INCB",
94 "INCW",
95 "INCL",
96 "INCQ",
98 "DECB",
99 "DECW",
100 "DECL",
101 "DECQ",
103 "SHLB",
104 "SHLW",
105 "SHLL",
106 "SHLQ",
108 "SARB",
109 "SARW",
110 "SARL",
111 "SARQ",
113 "BMILGB",
114 "BMILGW",
115 "BMILGL",
116 "BMILGQ",
118 "ADCX",
119 "ADOX",
120 "ADCOX",
122 "CLR",
125 static void
126 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
127 const char *name, struct SegmentCache *sc)
129 #ifdef TARGET_X86_64
130 if (env->hflags & HF_CS64_MASK) {
131 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
132 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
133 } else
134 #endif
136 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
137 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
140 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
141 goto done;
143 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
144 if (sc->flags & DESC_S_MASK) {
145 if (sc->flags & DESC_CS_MASK) {
146 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
147 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
148 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
149 (sc->flags & DESC_R_MASK) ? 'R' : '-');
150 } else {
151 cpu_fprintf(f,
152 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
153 ? "DS " : "DS16");
154 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
155 (sc->flags & DESC_W_MASK) ? 'W' : '-');
157 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
158 } else {
159 static const char *sys_type_name[2][16] = {
160 { /* 32 bit mode */
161 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
162 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
163 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
164 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
166 { /* 64 bit mode */
167 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
168 "Reserved", "Reserved", "Reserved", "Reserved",
169 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
170 "Reserved", "IntGate64", "TrapGate64"
173 cpu_fprintf(f, "%s",
174 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
175 [(sc->flags & DESC_TYPE_MASK)
176 >> DESC_TYPE_SHIFT]);
178 done:
179 cpu_fprintf(f, "\n");
182 #define DUMP_CODE_BYTES_TOTAL 50
183 #define DUMP_CODE_BYTES_BACKWARD 20
185 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
186 int flags)
188 X86CPU *cpu = X86_CPU(cs);
189 CPUX86State *env = &cpu->env;
190 int eflags, i, nb;
191 char cc_op_name[32];
192 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
194 eflags = cpu_compute_eflags(env);
195 #ifdef TARGET_X86_64
196 if (env->hflags & HF_CS64_MASK) {
197 cpu_fprintf(f,
198 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
199 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
200 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
201 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
202 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
203 env->regs[R_EAX],
204 env->regs[R_EBX],
205 env->regs[R_ECX],
206 env->regs[R_EDX],
207 env->regs[R_ESI],
208 env->regs[R_EDI],
209 env->regs[R_EBP],
210 env->regs[R_ESP],
211 env->regs[8],
212 env->regs[9],
213 env->regs[10],
214 env->regs[11],
215 env->regs[12],
216 env->regs[13],
217 env->regs[14],
218 env->regs[15],
219 env->eip, eflags,
220 eflags & DF_MASK ? 'D' : '-',
221 eflags & CC_O ? 'O' : '-',
222 eflags & CC_S ? 'S' : '-',
223 eflags & CC_Z ? 'Z' : '-',
224 eflags & CC_A ? 'A' : '-',
225 eflags & CC_P ? 'P' : '-',
226 eflags & CC_C ? 'C' : '-',
227 env->hflags & HF_CPL_MASK,
228 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
229 (env->a20_mask >> 20) & 1,
230 (env->hflags >> HF_SMM_SHIFT) & 1,
231 cs->halted);
232 } else
233 #endif
235 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
236 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
237 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
238 (uint32_t)env->regs[R_EAX],
239 (uint32_t)env->regs[R_EBX],
240 (uint32_t)env->regs[R_ECX],
241 (uint32_t)env->regs[R_EDX],
242 (uint32_t)env->regs[R_ESI],
243 (uint32_t)env->regs[R_EDI],
244 (uint32_t)env->regs[R_EBP],
245 (uint32_t)env->regs[R_ESP],
246 (uint32_t)env->eip, eflags,
247 eflags & DF_MASK ? 'D' : '-',
248 eflags & CC_O ? 'O' : '-',
249 eflags & CC_S ? 'S' : '-',
250 eflags & CC_Z ? 'Z' : '-',
251 eflags & CC_A ? 'A' : '-',
252 eflags & CC_P ? 'P' : '-',
253 eflags & CC_C ? 'C' : '-',
254 env->hflags & HF_CPL_MASK,
255 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
256 (env->a20_mask >> 20) & 1,
257 (env->hflags >> HF_SMM_SHIFT) & 1,
258 cs->halted);
261 for(i = 0; i < 6; i++) {
262 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
263 &env->segs[i]);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
266 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
268 #ifdef TARGET_X86_64
269 if (env->hflags & HF_LMA_MASK) {
270 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
271 env->gdt.base, env->gdt.limit);
272 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
273 env->idt.base, env->idt.limit);
274 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
275 (uint32_t)env->cr[0],
276 env->cr[2],
277 env->cr[3],
278 (uint32_t)env->cr[4]);
279 for(i = 0; i < 4; i++)
280 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
281 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
282 env->dr[6], env->dr[7]);
283 } else
284 #endif
286 cpu_fprintf(f, "GDT= %08x %08x\n",
287 (uint32_t)env->gdt.base, env->gdt.limit);
288 cpu_fprintf(f, "IDT= %08x %08x\n",
289 (uint32_t)env->idt.base, env->idt.limit);
290 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
291 (uint32_t)env->cr[0],
292 (uint32_t)env->cr[2],
293 (uint32_t)env->cr[3],
294 (uint32_t)env->cr[4]);
295 for(i = 0; i < 4; i++) {
296 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
298 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
299 env->dr[6], env->dr[7]);
301 if (flags & CPU_DUMP_CCOP) {
302 if ((unsigned)env->cc_op < CC_OP_NB)
303 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
304 else
305 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
306 #ifdef TARGET_X86_64
307 if (env->hflags & HF_CS64_MASK) {
308 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
309 env->cc_src, env->cc_dst,
310 cc_op_name);
311 } else
312 #endif
314 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
315 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
316 cc_op_name);
319 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
320 if (flags & CPU_DUMP_FPU) {
321 int fptag;
322 fptag = 0;
323 for(i = 0; i < 8; i++) {
324 fptag |= ((!env->fptags[i]) << i);
326 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
327 env->fpuc,
328 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
329 env->fpstt,
330 fptag,
331 env->mxcsr);
332 for(i=0;i<8;i++) {
333 CPU_LDoubleU u;
334 u.d = env->fpregs[i].d;
335 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
336 i, u.l.lower, u.l.upper);
337 if ((i & 1) == 1)
338 cpu_fprintf(f, "\n");
339 else
340 cpu_fprintf(f, " ");
342 if (env->hflags & HF_CS64_MASK)
343 nb = 16;
344 else
345 nb = 8;
346 for(i=0;i<nb;i++) {
347 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
349 env->xmm_regs[i].XMM_L(3),
350 env->xmm_regs[i].XMM_L(2),
351 env->xmm_regs[i].XMM_L(1),
352 env->xmm_regs[i].XMM_L(0));
353 if ((i & 1) == 1)
354 cpu_fprintf(f, "\n");
355 else
356 cpu_fprintf(f, " ");
359 if (flags & CPU_DUMP_CODE) {
360 target_ulong base = env->segs[R_CS].base + env->eip;
361 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
362 uint8_t code;
363 char codestr[3];
365 cpu_fprintf(f, "Code=");
366 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
367 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
368 snprintf(codestr, sizeof(codestr), "%02x", code);
369 } else {
370 snprintf(codestr, sizeof(codestr), "??");
372 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
373 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
375 cpu_fprintf(f, "\n");
379 /***********************************************************/
380 /* x86 mmu */
381 /* XXX: add PGE support */
383 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
385 CPUX86State *env = &cpu->env;
387 a20_state = (a20_state != 0);
388 if (a20_state != ((env->a20_mask >> 20) & 1)) {
389 CPUState *cs = CPU(cpu);
391 #if defined(DEBUG_MMU)
392 printf("A20 update: a20=%d\n", a20_state);
393 #endif
394 /* if the cpu is currently executing code, we must unlink it and
395 all the potentially executing TB */
396 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
398 /* when a20 is changed, all the MMU mappings are invalid, so
399 we must flush everything */
400 tlb_flush(cs, 1);
401 env->a20_mask = ~(1 << 20) | (a20_state << 20);
405 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
407 X86CPU *cpu = x86_env_get_cpu(env);
408 int pe_state;
410 #if defined(DEBUG_MMU)
411 printf("CR0 update: CR0=0x%08x\n", new_cr0);
412 #endif
413 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
414 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
415 tlb_flush(CPU(cpu), 1);
418 #ifdef TARGET_X86_64
419 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
420 (env->efer & MSR_EFER_LME)) {
421 /* enter in long mode */
422 /* XXX: generate an exception */
423 if (!(env->cr[4] & CR4_PAE_MASK))
424 return;
425 env->efer |= MSR_EFER_LMA;
426 env->hflags |= HF_LMA_MASK;
427 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
428 (env->efer & MSR_EFER_LMA)) {
429 /* exit long mode */
430 env->efer &= ~MSR_EFER_LMA;
431 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
432 env->eip &= 0xffffffff;
434 #endif
435 env->cr[0] = new_cr0 | CR0_ET_MASK;
437 /* update PE flag in hidden flags */
438 pe_state = (env->cr[0] & CR0_PE_MASK);
439 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
440 /* ensure that ADDSEG is always set in real mode */
441 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
442 /* update FPU flags */
443 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
444 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
447 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
448 the PDPT */
449 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
451 X86CPU *cpu = x86_env_get_cpu(env);
453 env->cr[3] = new_cr3;
454 if (env->cr[0] & CR0_PG_MASK) {
455 #if defined(DEBUG_MMU)
456 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
457 #endif
458 tlb_flush(CPU(cpu), 0);
462 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
464 X86CPU *cpu = x86_env_get_cpu(env);
466 #if defined(DEBUG_MMU)
467 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
468 #endif
469 if ((new_cr4 ^ env->cr[4]) &
470 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
471 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
472 tlb_flush(CPU(cpu), 1);
474 /* SSE handling */
475 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
476 new_cr4 &= ~CR4_OSFXSR_MASK;
478 env->hflags &= ~HF_OSFXSR_MASK;
479 if (new_cr4 & CR4_OSFXSR_MASK) {
480 env->hflags |= HF_OSFXSR_MASK;
483 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
484 new_cr4 &= ~CR4_SMAP_MASK;
486 env->hflags &= ~HF_SMAP_MASK;
487 if (new_cr4 & CR4_SMAP_MASK) {
488 env->hflags |= HF_SMAP_MASK;
491 env->cr[4] = new_cr4;
494 #if defined(CONFIG_USER_ONLY)
496 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
497 int is_write, int mmu_idx)
499 X86CPU *cpu = X86_CPU(cs);
500 CPUX86State *env = &cpu->env;
502 /* user mode only emulation */
503 is_write &= 1;
504 env->cr[2] = addr;
505 env->error_code = (is_write << PG_ERROR_W_BIT);
506 env->error_code |= PG_ERROR_U_MASK;
507 cs->exception_index = EXCP0E_PAGE;
508 return 1;
511 #else
513 /* XXX: This value should match the one returned by CPUID
514 * and in exec.c */
515 # if defined(TARGET_X86_64)
516 # define PHYS_ADDR_MASK 0xfffffff000LL
517 # else
518 # define PHYS_ADDR_MASK 0xffffff000LL
519 # endif
521 /* return value:
522 * -1 = cannot handle fault
523 * 0 = nothing more to do
524 * 1 = generate PF fault
526 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
527 int is_write1, int mmu_idx)
529 X86CPU *cpu = X86_CPU(cs);
530 CPUX86State *env = &cpu->env;
531 uint64_t ptep, pte;
532 target_ulong pde_addr, pte_addr;
533 int error_code, is_dirty, prot, page_size, is_write, is_user;
534 hwaddr paddr;
535 uint32_t page_offset;
536 target_ulong vaddr, virt_addr;
538 is_user = mmu_idx == MMU_USER_IDX;
539 #if defined(DEBUG_MMU)
540 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
541 addr, is_write1, is_user, env->eip);
542 #endif
543 is_write = is_write1 & 1;
545 if (!(env->cr[0] & CR0_PG_MASK)) {
546 pte = addr;
547 #ifdef TARGET_X86_64
548 if (!(env->hflags & HF_LMA_MASK)) {
549 /* Without long mode we can only address 32bits in real mode */
550 pte = (uint32_t)pte;
552 #endif
553 virt_addr = addr & TARGET_PAGE_MASK;
554 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
555 page_size = 4096;
556 goto do_mapping;
559 if (env->cr[4] & CR4_PAE_MASK) {
560 uint64_t pde, pdpe;
561 target_ulong pdpe_addr;
563 #ifdef TARGET_X86_64
564 if (env->hflags & HF_LMA_MASK) {
565 uint64_t pml4e_addr, pml4e;
566 int32_t sext;
568 /* test virtual address sign extension */
569 sext = (int64_t)addr >> 47;
570 if (sext != 0 && sext != -1) {
571 env->error_code = 0;
572 cs->exception_index = EXCP0D_GPF;
573 return 1;
576 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
577 env->a20_mask;
578 pml4e = ldq_phys(cs->as, pml4e_addr);
579 if (!(pml4e & PG_PRESENT_MASK)) {
580 error_code = 0;
581 goto do_fault;
583 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
584 error_code = PG_ERROR_RSVD_MASK;
585 goto do_fault;
587 if (!(pml4e & PG_ACCESSED_MASK)) {
588 pml4e |= PG_ACCESSED_MASK;
589 stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
591 ptep = pml4e ^ PG_NX_MASK;
592 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
593 env->a20_mask;
594 pdpe = ldq_phys(cs->as, pdpe_addr);
595 if (!(pdpe & PG_PRESENT_MASK)) {
596 error_code = 0;
597 goto do_fault;
599 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
600 error_code = PG_ERROR_RSVD_MASK;
601 goto do_fault;
603 ptep &= pdpe ^ PG_NX_MASK;
604 if (!(pdpe & PG_ACCESSED_MASK)) {
605 pdpe |= PG_ACCESSED_MASK;
606 stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
608 } else
609 #endif
611 /* XXX: load them when cr3 is loaded ? */
612 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
613 env->a20_mask;
614 pdpe = ldq_phys(cs->as, pdpe_addr);
615 if (!(pdpe & PG_PRESENT_MASK)) {
616 error_code = 0;
617 goto do_fault;
619 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
622 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
623 env->a20_mask;
624 pde = ldq_phys(cs->as, pde_addr);
625 if (!(pde & PG_PRESENT_MASK)) {
626 error_code = 0;
627 goto do_fault;
629 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
630 error_code = PG_ERROR_RSVD_MASK;
631 goto do_fault;
633 ptep &= pde ^ PG_NX_MASK;
634 if (pde & PG_PSE_MASK) {
635 /* 2 MB page */
636 page_size = 2048 * 1024;
637 ptep ^= PG_NX_MASK;
638 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
639 goto do_fault_protect;
641 switch (mmu_idx) {
642 case MMU_USER_IDX:
643 if (!(ptep & PG_USER_MASK)) {
644 goto do_fault_protect;
646 if (is_write && !(ptep & PG_RW_MASK)) {
647 goto do_fault_protect;
649 break;
651 case MMU_KERNEL_IDX:
652 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
653 (ptep & PG_USER_MASK)) {
654 goto do_fault_protect;
656 /* fall through */
657 case MMU_KSMAP_IDX:
658 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
659 (ptep & PG_USER_MASK)) {
660 goto do_fault_protect;
662 if ((env->cr[0] & CR0_WP_MASK) &&
663 is_write && !(ptep & PG_RW_MASK)) {
664 goto do_fault_protect;
666 break;
668 default: /* cannot happen */
669 break;
671 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
672 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
673 pde |= PG_ACCESSED_MASK;
674 if (is_dirty)
675 pde |= PG_DIRTY_MASK;
676 stl_phys_notdirty(cs->as, pde_addr, pde);
678 /* align to page_size */
679 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
680 virt_addr = addr & ~(page_size - 1);
681 } else {
682 /* 4 KB page */
683 if (!(pde & PG_ACCESSED_MASK)) {
684 pde |= PG_ACCESSED_MASK;
685 stl_phys_notdirty(cs->as, pde_addr, pde);
687 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
688 env->a20_mask;
689 pte = ldq_phys(cs->as, pte_addr);
690 if (!(pte & PG_PRESENT_MASK)) {
691 error_code = 0;
692 goto do_fault;
694 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
695 error_code = PG_ERROR_RSVD_MASK;
696 goto do_fault;
698 /* combine pde and pte nx, user and rw protections */
699 ptep &= pte ^ PG_NX_MASK;
700 ptep ^= PG_NX_MASK;
701 if ((ptep & PG_NX_MASK) && is_write1 == 2)
702 goto do_fault_protect;
703 switch (mmu_idx) {
704 case MMU_USER_IDX:
705 if (!(ptep & PG_USER_MASK)) {
706 goto do_fault_protect;
708 if (is_write && !(ptep & PG_RW_MASK)) {
709 goto do_fault_protect;
711 break;
713 case MMU_KERNEL_IDX:
714 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
715 (ptep & PG_USER_MASK)) {
716 goto do_fault_protect;
718 /* fall through */
719 case MMU_KSMAP_IDX:
720 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
721 (ptep & PG_USER_MASK)) {
722 goto do_fault_protect;
724 if ((env->cr[0] & CR0_WP_MASK) &&
725 is_write && !(ptep & PG_RW_MASK)) {
726 goto do_fault_protect;
728 break;
730 default: /* cannot happen */
731 break;
733 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
734 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
735 pte |= PG_ACCESSED_MASK;
736 if (is_dirty)
737 pte |= PG_DIRTY_MASK;
738 stl_phys_notdirty(cs->as, pte_addr, pte);
740 page_size = 4096;
741 virt_addr = addr & ~0xfff;
742 pte = pte & (PHYS_ADDR_MASK | 0xfff);
744 } else {
745 uint32_t pde;
747 /* page directory entry */
748 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
749 env->a20_mask;
750 pde = ldl_phys(cs->as, pde_addr);
751 if (!(pde & PG_PRESENT_MASK)) {
752 error_code = 0;
753 goto do_fault;
755 /* if PSE bit is set, then we use a 4MB page */
756 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
757 page_size = 4096 * 1024;
758 switch (mmu_idx) {
759 case MMU_USER_IDX:
760 if (!(pde & PG_USER_MASK)) {
761 goto do_fault_protect;
763 if (is_write && !(pde & PG_RW_MASK)) {
764 goto do_fault_protect;
766 break;
768 case MMU_KERNEL_IDX:
769 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
770 (pde & PG_USER_MASK)) {
771 goto do_fault_protect;
773 /* fall through */
774 case MMU_KSMAP_IDX:
775 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
776 (pde & PG_USER_MASK)) {
777 goto do_fault_protect;
779 if ((env->cr[0] & CR0_WP_MASK) &&
780 is_write && !(pde & PG_RW_MASK)) {
781 goto do_fault_protect;
783 break;
785 default: /* cannot happen */
786 break;
788 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
789 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
790 pde |= PG_ACCESSED_MASK;
791 if (is_dirty)
792 pde |= PG_DIRTY_MASK;
793 stl_phys_notdirty(cs->as, pde_addr, pde);
796 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
797 ptep = pte;
798 virt_addr = addr & ~(page_size - 1);
799 } else {
800 if (!(pde & PG_ACCESSED_MASK)) {
801 pde |= PG_ACCESSED_MASK;
802 stl_phys_notdirty(cs->as, pde_addr, pde);
805 /* page directory entry */
806 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
807 env->a20_mask;
808 pte = ldl_phys(cs->as, pte_addr);
809 if (!(pte & PG_PRESENT_MASK)) {
810 error_code = 0;
811 goto do_fault;
813 /* combine pde and pte user and rw protections */
814 ptep = pte & pde;
815 switch (mmu_idx) {
816 case MMU_USER_IDX:
817 if (!(ptep & PG_USER_MASK)) {
818 goto do_fault_protect;
820 if (is_write && !(ptep & PG_RW_MASK)) {
821 goto do_fault_protect;
823 break;
825 case MMU_KERNEL_IDX:
826 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
827 (ptep & PG_USER_MASK)) {
828 goto do_fault_protect;
830 /* fall through */
831 case MMU_KSMAP_IDX:
832 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
833 (ptep & PG_USER_MASK)) {
834 goto do_fault_protect;
836 if ((env->cr[0] & CR0_WP_MASK) &&
837 is_write && !(ptep & PG_RW_MASK)) {
838 goto do_fault_protect;
840 break;
842 default: /* cannot happen */
843 break;
845 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
846 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
847 pte |= PG_ACCESSED_MASK;
848 if (is_dirty)
849 pte |= PG_DIRTY_MASK;
850 stl_phys_notdirty(cs->as, pte_addr, pte);
852 page_size = 4096;
853 virt_addr = addr & ~0xfff;
856 /* the page can be put in the TLB */
857 prot = PAGE_READ;
858 if (!(ptep & PG_NX_MASK))
859 prot |= PAGE_EXEC;
860 if (pte & PG_DIRTY_MASK) {
861 /* only set write access if already dirty... otherwise wait
862 for dirty access */
863 if (is_user) {
864 if (ptep & PG_RW_MASK)
865 prot |= PAGE_WRITE;
866 } else {
867 if (!(env->cr[0] & CR0_WP_MASK) ||
868 (ptep & PG_RW_MASK))
869 prot |= PAGE_WRITE;
872 do_mapping:
873 pte = pte & env->a20_mask;
875 /* Even if 4MB pages, we map only one 4KB page in the cache to
876 avoid filling it too fast */
877 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
878 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
879 vaddr = virt_addr + page_offset;
881 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
882 return 0;
883 do_fault_protect:
884 error_code = PG_ERROR_P_MASK;
885 do_fault:
886 error_code |= (is_write << PG_ERROR_W_BIT);
887 if (is_user)
888 error_code |= PG_ERROR_U_MASK;
889 if (is_write1 == 2 &&
890 (((env->efer & MSR_EFER_NXE) &&
891 (env->cr[4] & CR4_PAE_MASK)) ||
892 (env->cr[4] & CR4_SMEP_MASK)))
893 error_code |= PG_ERROR_I_D_MASK;
894 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
895 /* cr2 is not modified in case of exceptions */
896 stq_phys(cs->as,
897 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
898 addr);
899 } else {
900 env->cr[2] = addr;
902 env->error_code = error_code;
903 cs->exception_index = EXCP0E_PAGE;
904 return 1;
907 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
909 X86CPU *cpu = X86_CPU(cs);
910 CPUX86State *env = &cpu->env;
911 target_ulong pde_addr, pte_addr;
912 uint64_t pte;
913 hwaddr paddr;
914 uint32_t page_offset;
915 int page_size;
917 if (!(env->cr[0] & CR0_PG_MASK)) {
918 pte = addr & env->a20_mask;
919 page_size = 4096;
920 } else if (env->cr[4] & CR4_PAE_MASK) {
921 target_ulong pdpe_addr;
922 uint64_t pde, pdpe;
924 #ifdef TARGET_X86_64
925 if (env->hflags & HF_LMA_MASK) {
926 uint64_t pml4e_addr, pml4e;
927 int32_t sext;
929 /* test virtual address sign extension */
930 sext = (int64_t)addr >> 47;
931 if (sext != 0 && sext != -1)
932 return -1;
934 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
935 env->a20_mask;
936 pml4e = ldq_phys(cs->as, pml4e_addr);
937 if (!(pml4e & PG_PRESENT_MASK))
938 return -1;
940 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
941 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
942 pdpe = ldq_phys(cs->as, pdpe_addr);
943 if (!(pdpe & PG_PRESENT_MASK))
944 return -1;
946 if (pdpe & PG_PSE_MASK) {
947 page_size = 1024 * 1024 * 1024;
948 pte = pdpe & ~( (page_size - 1) & ~0xfff);
949 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
950 goto out;
953 } else
954 #endif
956 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
957 env->a20_mask;
958 pdpe = ldq_phys(cs->as, pdpe_addr);
959 if (!(pdpe & PG_PRESENT_MASK))
960 return -1;
963 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
964 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
965 pde = ldq_phys(cs->as, pde_addr);
966 if (!(pde & PG_PRESENT_MASK)) {
967 return -1;
969 if (pde & PG_PSE_MASK) {
970 /* 2 MB page */
971 page_size = 2048 * 1024;
972 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
973 } else {
974 /* 4 KB page */
975 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
976 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
977 page_size = 4096;
978 pte = ldq_phys(cs->as, pte_addr);
980 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
981 if (!(pte & PG_PRESENT_MASK))
982 return -1;
983 } else {
984 uint32_t pde;
986 /* page directory entry */
987 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
988 pde = ldl_phys(cs->as, pde_addr);
989 if (!(pde & PG_PRESENT_MASK))
990 return -1;
991 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
992 pte = pde & ~0x003ff000; /* align to 4MB */
993 page_size = 4096 * 1024;
994 } else {
995 /* page directory entry */
996 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
997 pte = ldl_phys(cs->as, pte_addr);
998 if (!(pte & PG_PRESENT_MASK))
999 return -1;
1000 page_size = 4096;
1002 pte = pte & env->a20_mask;
1005 #ifdef TARGET_X86_64
1006 out:
1007 #endif
1008 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1009 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1010 return paddr;
1013 void hw_breakpoint_insert(CPUX86State *env, int index)
1015 CPUState *cs = CPU(x86_env_get_cpu(env));
1016 int type = 0, err = 0;
1018 switch (hw_breakpoint_type(env->dr[7], index)) {
1019 case DR7_TYPE_BP_INST:
1020 if (hw_breakpoint_enabled(env->dr[7], index)) {
1021 err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU,
1022 &env->cpu_breakpoint[index]);
1024 break;
1025 case DR7_TYPE_DATA_WR:
1026 type = BP_CPU | BP_MEM_WRITE;
1027 break;
1028 case DR7_TYPE_IO_RW:
1029 /* No support for I/O watchpoints yet */
1030 break;
1031 case DR7_TYPE_DATA_RW:
1032 type = BP_CPU | BP_MEM_ACCESS;
1033 break;
1036 if (type != 0) {
1037 err = cpu_watchpoint_insert(cs, env->dr[index],
1038 hw_breakpoint_len(env->dr[7], index),
1039 type, &env->cpu_watchpoint[index]);
1042 if (err) {
1043 env->cpu_breakpoint[index] = NULL;
1047 void hw_breakpoint_remove(CPUX86State *env, int index)
1049 CPUState *cs;
1051 if (!env->cpu_breakpoint[index]) {
1052 return;
1054 cs = CPU(x86_env_get_cpu(env));
1055 switch (hw_breakpoint_type(env->dr[7], index)) {
1056 case DR7_TYPE_BP_INST:
1057 if (hw_breakpoint_enabled(env->dr[7], index)) {
1058 cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
1060 break;
1061 case DR7_TYPE_DATA_WR:
1062 case DR7_TYPE_DATA_RW:
1063 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
1064 break;
1065 case DR7_TYPE_IO_RW:
1066 /* No support for I/O watchpoints yet */
1067 break;
1071 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1073 target_ulong dr6;
1074 int reg;
1075 bool hit_enabled = false;
1077 dr6 = env->dr[6] & ~0xf;
1078 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1079 bool bp_match = false;
1080 bool wp_match = false;
1082 switch (hw_breakpoint_type(env->dr[7], reg)) {
1083 case DR7_TYPE_BP_INST:
1084 if (env->dr[reg] == env->eip) {
1085 bp_match = true;
1087 break;
1088 case DR7_TYPE_DATA_WR:
1089 case DR7_TYPE_DATA_RW:
1090 if (env->cpu_watchpoint[reg] &&
1091 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1092 wp_match = true;
1094 break;
1095 case DR7_TYPE_IO_RW:
1096 break;
1098 if (bp_match || wp_match) {
1099 dr6 |= 1 << reg;
1100 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1101 hit_enabled = true;
1106 if (hit_enabled || force_dr6_update) {
1107 env->dr[6] = dr6;
1110 return hit_enabled;
1113 void breakpoint_handler(CPUX86State *env)
1115 CPUState *cs = CPU(x86_env_get_cpu(env));
1116 CPUBreakpoint *bp;
1118 if (cs->watchpoint_hit) {
1119 if (cs->watchpoint_hit->flags & BP_CPU) {
1120 cs->watchpoint_hit = NULL;
1121 if (check_hw_breakpoints(env, false)) {
1122 raise_exception(env, EXCP01_DB);
1123 } else {
1124 cpu_resume_from_signal(cs, NULL);
1127 } else {
1128 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1129 if (bp->pc == env->eip) {
1130 if (bp->flags & BP_CPU) {
1131 check_hw_breakpoints(env, true);
1132 raise_exception(env, EXCP01_DB);
1134 break;
1140 typedef struct MCEInjectionParams {
1141 Monitor *mon;
1142 X86CPU *cpu;
1143 int bank;
1144 uint64_t status;
1145 uint64_t mcg_status;
1146 uint64_t addr;
1147 uint64_t misc;
1148 int flags;
1149 } MCEInjectionParams;
1151 static void do_inject_x86_mce(void *data)
1153 MCEInjectionParams *params = data;
1154 CPUX86State *cenv = &params->cpu->env;
1155 CPUState *cpu = CPU(params->cpu);
1156 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1158 cpu_synchronize_state(cpu);
1161 * If there is an MCE exception being processed, ignore this SRAO MCE
1162 * unless unconditional injection was requested.
1164 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1165 && !(params->status & MCI_STATUS_AR)
1166 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1167 return;
1170 if (params->status & MCI_STATUS_UC) {
1172 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1173 * reporting is disabled
1175 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1176 monitor_printf(params->mon,
1177 "CPU %d: Uncorrected error reporting disabled\n",
1178 cpu->cpu_index);
1179 return;
1183 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1184 * reporting is disabled for the bank
1186 if (banks[0] != ~(uint64_t)0) {
1187 monitor_printf(params->mon,
1188 "CPU %d: Uncorrected error reporting disabled for"
1189 " bank %d\n",
1190 cpu->cpu_index, params->bank);
1191 return;
1194 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1195 !(cenv->cr[4] & CR4_MCE_MASK)) {
1196 monitor_printf(params->mon,
1197 "CPU %d: Previous MCE still in progress, raising"
1198 " triple fault\n",
1199 cpu->cpu_index);
1200 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1201 qemu_system_reset_request();
1202 return;
1204 if (banks[1] & MCI_STATUS_VAL) {
1205 params->status |= MCI_STATUS_OVER;
1207 banks[2] = params->addr;
1208 banks[3] = params->misc;
1209 cenv->mcg_status = params->mcg_status;
1210 banks[1] = params->status;
1211 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1212 } else if (!(banks[1] & MCI_STATUS_VAL)
1213 || !(banks[1] & MCI_STATUS_UC)) {
1214 if (banks[1] & MCI_STATUS_VAL) {
1215 params->status |= MCI_STATUS_OVER;
1217 banks[2] = params->addr;
1218 banks[3] = params->misc;
1219 banks[1] = params->status;
1220 } else {
1221 banks[1] |= MCI_STATUS_OVER;
1225 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1226 uint64_t status, uint64_t mcg_status, uint64_t addr,
1227 uint64_t misc, int flags)
1229 CPUState *cs = CPU(cpu);
1230 CPUX86State *cenv = &cpu->env;
1231 MCEInjectionParams params = {
1232 .mon = mon,
1233 .cpu = cpu,
1234 .bank = bank,
1235 .status = status,
1236 .mcg_status = mcg_status,
1237 .addr = addr,
1238 .misc = misc,
1239 .flags = flags,
1241 unsigned bank_num = cenv->mcg_cap & 0xff;
1243 if (!cenv->mcg_cap) {
1244 monitor_printf(mon, "MCE injection not supported\n");
1245 return;
1247 if (bank >= bank_num) {
1248 monitor_printf(mon, "Invalid MCE bank number\n");
1249 return;
1251 if (!(status & MCI_STATUS_VAL)) {
1252 monitor_printf(mon, "Invalid MCE status code\n");
1253 return;
1255 if ((flags & MCE_INJECT_BROADCAST)
1256 && !cpu_x86_support_mca_broadcast(cenv)) {
1257 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1258 return;
1261 run_on_cpu(cs, do_inject_x86_mce, &params);
1262 if (flags & MCE_INJECT_BROADCAST) {
1263 CPUState *other_cs;
1265 params.bank = 1;
1266 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1267 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1268 params.addr = 0;
1269 params.misc = 0;
1270 CPU_FOREACH(other_cs) {
1271 if (other_cs == cs) {
1272 continue;
1274 params.cpu = X86_CPU(other_cs);
1275 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1280 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1282 X86CPU *cpu = x86_env_get_cpu(env);
1283 CPUState *cs = CPU(cpu);
1285 if (kvm_enabled()) {
1286 env->tpr_access_type = access;
1288 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1289 } else {
1290 cpu_restore_state(cs, cs->mem_io_pc);
1292 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1295 #endif /* !CONFIG_USER_ONLY */
1297 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1298 target_ulong *base, unsigned int *limit,
1299 unsigned int *flags)
1301 X86CPU *cpu = x86_env_get_cpu(env);
1302 CPUState *cs = CPU(cpu);
1303 SegmentCache *dt;
1304 target_ulong ptr;
1305 uint32_t e1, e2;
1306 int index;
1308 if (selector & 0x4)
1309 dt = &env->ldt;
1310 else
1311 dt = &env->gdt;
1312 index = selector & ~7;
1313 ptr = dt->base + index;
1314 if ((index + 7) > dt->limit
1315 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1316 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1317 return 0;
1319 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1320 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1321 if (e2 & DESC_G_MASK)
1322 *limit = (*limit << 12) | 0xfff;
1323 *flags = e2;
1325 return 1;
1328 #if !defined(CONFIG_USER_ONLY)
1329 void do_cpu_init(X86CPU *cpu)
1331 CPUState *cs = CPU(cpu);
1332 CPUX86State *env = &cpu->env;
1333 CPUX86State *save = g_new(CPUX86State, 1);
1334 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1336 *save = *env;
1338 cpu_reset(cs);
1339 cs->interrupt_request = sipi;
1340 memcpy(&env->start_init_save, &save->start_init_save,
1341 offsetof(CPUX86State, end_init_save) -
1342 offsetof(CPUX86State, start_init_save));
1343 g_free(save);
1345 if (kvm_enabled()) {
1346 kvm_arch_do_init_vcpu(cpu);
1348 apic_init_reset(cpu->apic_state);
1351 void do_cpu_sipi(X86CPU *cpu)
1353 apic_sipi(cpu->apic_state);
1355 #else
1356 void do_cpu_init(X86CPU *cpu)
1359 void do_cpu_sipi(X86CPU *cpu)
1362 #endif