cpu: Move mem_io_{pc,vaddr} fields from CPU_COMMON to CPUState
[qemu/cris-port.git] / target-i386 / helper.c
blob4910e40c17e372393a52403fd920c36e0b22489a
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31 int cpuver = env->cpuid_version;
33 if (family == NULL || model == NULL) {
34 return;
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
44 int family = 0;
45 int model = 0;
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
52 return 0;
55 /***********************************************************/
56 /* x86 debug */
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
117 "ADCX",
118 "ADOX",
119 "ADCOX",
121 "CLR",
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
177 done:
178 cpu_fprintf(f, "\n");
181 #define DUMP_CODE_BYTES_TOTAL 50
182 #define DUMP_CODE_BYTES_BACKWARD 20
184 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
193 eflags = cpu_compute_eflags(env);
194 #ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232 #endif
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283 #endif
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305 #ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311 #endif
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
374 cpu_fprintf(f, "\n");
378 /***********************************************************/
379 /* x86 mmu */
380 /* XXX: add PGE support */
382 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
384 CPUX86State *env = &cpu->env;
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 #if defined(DEBUG_MMU)
389 printf("A20 update: a20=%d\n", a20_state);
390 #endif
391 /* if the cpu is currently executing code, we must unlink it and
392 all the potentially executing TB */
393 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
395 /* when a20 is changed, all the MMU mappings are invalid, so
396 we must flush everything */
397 tlb_flush(env, 1);
398 env->a20_mask = ~(1 << 20) | (a20_state << 20);
402 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
404 int pe_state;
406 #if defined(DEBUG_MMU)
407 printf("CR0 update: CR0=0x%08x\n", new_cr0);
408 #endif
409 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
410 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
411 tlb_flush(env, 1);
414 #ifdef TARGET_X86_64
415 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
416 (env->efer & MSR_EFER_LME)) {
417 /* enter in long mode */
418 /* XXX: generate an exception */
419 if (!(env->cr[4] & CR4_PAE_MASK))
420 return;
421 env->efer |= MSR_EFER_LMA;
422 env->hflags |= HF_LMA_MASK;
423 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
424 (env->efer & MSR_EFER_LMA)) {
425 /* exit long mode */
426 env->efer &= ~MSR_EFER_LMA;
427 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
428 env->eip &= 0xffffffff;
430 #endif
431 env->cr[0] = new_cr0 | CR0_ET_MASK;
433 /* update PE flag in hidden flags */
434 pe_state = (env->cr[0] & CR0_PE_MASK);
435 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
436 /* ensure that ADDSEG is always set in real mode */
437 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
438 /* update FPU flags */
439 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
440 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
443 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
444 the PDPT */
445 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449 #if defined(DEBUG_MMU)
450 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451 #endif
452 tlb_flush(env, 0);
456 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
458 #if defined(DEBUG_MMU)
459 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
460 #endif
461 if ((new_cr4 ^ env->cr[4]) &
462 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
463 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
464 tlb_flush(env, 1);
466 /* SSE handling */
467 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
468 new_cr4 &= ~CR4_OSFXSR_MASK;
470 env->hflags &= ~HF_OSFXSR_MASK;
471 if (new_cr4 & CR4_OSFXSR_MASK) {
472 env->hflags |= HF_OSFXSR_MASK;
475 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
476 new_cr4 &= ~CR4_SMAP_MASK;
478 env->hflags &= ~HF_SMAP_MASK;
479 if (new_cr4 & CR4_SMAP_MASK) {
480 env->hflags |= HF_SMAP_MASK;
483 env->cr[4] = new_cr4;
486 #if defined(CONFIG_USER_ONLY)
488 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
489 int is_write, int mmu_idx)
491 X86CPU *cpu = X86_CPU(cs);
492 CPUX86State *env = &cpu->env;
494 /* user mode only emulation */
495 is_write &= 1;
496 env->cr[2] = addr;
497 env->error_code = (is_write << PG_ERROR_W_BIT);
498 env->error_code |= PG_ERROR_U_MASK;
499 env->exception_index = EXCP0E_PAGE;
500 return 1;
503 #else
505 /* XXX: This value should match the one returned by CPUID
506 * and in exec.c */
507 # if defined(TARGET_X86_64)
508 # define PHYS_ADDR_MASK 0xfffffff000LL
509 # else
510 # define PHYS_ADDR_MASK 0xffffff000LL
511 # endif
513 /* return value:
514 * -1 = cannot handle fault
515 * 0 = nothing more to do
516 * 1 = generate PF fault
518 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
519 int is_write1, int mmu_idx)
521 X86CPU *cpu = X86_CPU(cs);
522 CPUX86State *env = &cpu->env;
523 uint64_t ptep, pte;
524 target_ulong pde_addr, pte_addr;
525 int error_code, is_dirty, prot, page_size, is_write, is_user;
526 hwaddr paddr;
527 uint32_t page_offset;
528 target_ulong vaddr, virt_addr;
530 is_user = mmu_idx == MMU_USER_IDX;
531 #if defined(DEBUG_MMU)
532 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
533 addr, is_write1, is_user, env->eip);
534 #endif
535 is_write = is_write1 & 1;
537 if (!(env->cr[0] & CR0_PG_MASK)) {
538 pte = addr;
539 #ifdef TARGET_X86_64
540 if (!(env->hflags & HF_LMA_MASK)) {
541 /* Without long mode we can only address 32bits in real mode */
542 pte = (uint32_t)pte;
544 #endif
545 virt_addr = addr & TARGET_PAGE_MASK;
546 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
547 page_size = 4096;
548 goto do_mapping;
551 if (env->cr[4] & CR4_PAE_MASK) {
552 uint64_t pde, pdpe;
553 target_ulong pdpe_addr;
555 #ifdef TARGET_X86_64
556 if (env->hflags & HF_LMA_MASK) {
557 uint64_t pml4e_addr, pml4e;
558 int32_t sext;
560 /* test virtual address sign extension */
561 sext = (int64_t)addr >> 47;
562 if (sext != 0 && sext != -1) {
563 env->error_code = 0;
564 env->exception_index = EXCP0D_GPF;
565 return 1;
568 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
569 env->a20_mask;
570 pml4e = ldq_phys(cs->as, pml4e_addr);
571 if (!(pml4e & PG_PRESENT_MASK)) {
572 error_code = 0;
573 goto do_fault;
575 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
576 error_code = PG_ERROR_RSVD_MASK;
577 goto do_fault;
579 if (!(pml4e & PG_ACCESSED_MASK)) {
580 pml4e |= PG_ACCESSED_MASK;
581 stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
583 ptep = pml4e ^ PG_NX_MASK;
584 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
585 env->a20_mask;
586 pdpe = ldq_phys(cs->as, pdpe_addr);
587 if (!(pdpe & PG_PRESENT_MASK)) {
588 error_code = 0;
589 goto do_fault;
591 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
592 error_code = PG_ERROR_RSVD_MASK;
593 goto do_fault;
595 ptep &= pdpe ^ PG_NX_MASK;
596 if (!(pdpe & PG_ACCESSED_MASK)) {
597 pdpe |= PG_ACCESSED_MASK;
598 stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
600 } else
601 #endif
603 /* XXX: load them when cr3 is loaded ? */
604 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
605 env->a20_mask;
606 pdpe = ldq_phys(cs->as, pdpe_addr);
607 if (!(pdpe & PG_PRESENT_MASK)) {
608 error_code = 0;
609 goto do_fault;
611 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
614 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
615 env->a20_mask;
616 pde = ldq_phys(cs->as, pde_addr);
617 if (!(pde & PG_PRESENT_MASK)) {
618 error_code = 0;
619 goto do_fault;
621 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
622 error_code = PG_ERROR_RSVD_MASK;
623 goto do_fault;
625 ptep &= pde ^ PG_NX_MASK;
626 if (pde & PG_PSE_MASK) {
627 /* 2 MB page */
628 page_size = 2048 * 1024;
629 ptep ^= PG_NX_MASK;
630 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
631 goto do_fault_protect;
633 switch (mmu_idx) {
634 case MMU_USER_IDX:
635 if (!(ptep & PG_USER_MASK)) {
636 goto do_fault_protect;
638 if (is_write && !(ptep & PG_RW_MASK)) {
639 goto do_fault_protect;
641 break;
643 case MMU_KERNEL_IDX:
644 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
645 (ptep & PG_USER_MASK)) {
646 goto do_fault_protect;
648 /* fall through */
649 case MMU_KSMAP_IDX:
650 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
651 (ptep & PG_USER_MASK)) {
652 goto do_fault_protect;
654 if ((env->cr[0] & CR0_WP_MASK) &&
655 is_write && !(ptep & PG_RW_MASK)) {
656 goto do_fault_protect;
658 break;
660 default: /* cannot happen */
661 break;
663 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
664 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
665 pde |= PG_ACCESSED_MASK;
666 if (is_dirty)
667 pde |= PG_DIRTY_MASK;
668 stl_phys_notdirty(cs->as, pde_addr, pde);
670 /* align to page_size */
671 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
672 virt_addr = addr & ~(page_size - 1);
673 } else {
674 /* 4 KB page */
675 if (!(pde & PG_ACCESSED_MASK)) {
676 pde |= PG_ACCESSED_MASK;
677 stl_phys_notdirty(cs->as, pde_addr, pde);
679 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
680 env->a20_mask;
681 pte = ldq_phys(cs->as, pte_addr);
682 if (!(pte & PG_PRESENT_MASK)) {
683 error_code = 0;
684 goto do_fault;
686 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
687 error_code = PG_ERROR_RSVD_MASK;
688 goto do_fault;
690 /* combine pde and pte nx, user and rw protections */
691 ptep &= pte ^ PG_NX_MASK;
692 ptep ^= PG_NX_MASK;
693 if ((ptep & PG_NX_MASK) && is_write1 == 2)
694 goto do_fault_protect;
695 switch (mmu_idx) {
696 case MMU_USER_IDX:
697 if (!(ptep & PG_USER_MASK)) {
698 goto do_fault_protect;
700 if (is_write && !(ptep & PG_RW_MASK)) {
701 goto do_fault_protect;
703 break;
705 case MMU_KERNEL_IDX:
706 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
707 (ptep & PG_USER_MASK)) {
708 goto do_fault_protect;
710 /* fall through */
711 case MMU_KSMAP_IDX:
712 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
713 (ptep & PG_USER_MASK)) {
714 goto do_fault_protect;
716 if ((env->cr[0] & CR0_WP_MASK) &&
717 is_write && !(ptep & PG_RW_MASK)) {
718 goto do_fault_protect;
720 break;
722 default: /* cannot happen */
723 break;
725 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
726 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
727 pte |= PG_ACCESSED_MASK;
728 if (is_dirty)
729 pte |= PG_DIRTY_MASK;
730 stl_phys_notdirty(cs->as, pte_addr, pte);
732 page_size = 4096;
733 virt_addr = addr & ~0xfff;
734 pte = pte & (PHYS_ADDR_MASK | 0xfff);
736 } else {
737 uint32_t pde;
739 /* page directory entry */
740 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
741 env->a20_mask;
742 pde = ldl_phys(cs->as, pde_addr);
743 if (!(pde & PG_PRESENT_MASK)) {
744 error_code = 0;
745 goto do_fault;
747 /* if PSE bit is set, then we use a 4MB page */
748 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
749 page_size = 4096 * 1024;
750 switch (mmu_idx) {
751 case MMU_USER_IDX:
752 if (!(pde & PG_USER_MASK)) {
753 goto do_fault_protect;
755 if (is_write && !(pde & PG_RW_MASK)) {
756 goto do_fault_protect;
758 break;
760 case MMU_KERNEL_IDX:
761 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
762 (pde & PG_USER_MASK)) {
763 goto do_fault_protect;
765 /* fall through */
766 case MMU_KSMAP_IDX:
767 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
768 (pde & PG_USER_MASK)) {
769 goto do_fault_protect;
771 if ((env->cr[0] & CR0_WP_MASK) &&
772 is_write && !(pde & PG_RW_MASK)) {
773 goto do_fault_protect;
775 break;
777 default: /* cannot happen */
778 break;
780 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
781 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
782 pde |= PG_ACCESSED_MASK;
783 if (is_dirty)
784 pde |= PG_DIRTY_MASK;
785 stl_phys_notdirty(cs->as, pde_addr, pde);
788 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
789 ptep = pte;
790 virt_addr = addr & ~(page_size - 1);
791 } else {
792 if (!(pde & PG_ACCESSED_MASK)) {
793 pde |= PG_ACCESSED_MASK;
794 stl_phys_notdirty(cs->as, pde_addr, pde);
797 /* page directory entry */
798 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
799 env->a20_mask;
800 pte = ldl_phys(cs->as, pte_addr);
801 if (!(pte & PG_PRESENT_MASK)) {
802 error_code = 0;
803 goto do_fault;
805 /* combine pde and pte user and rw protections */
806 ptep = pte & pde;
807 switch (mmu_idx) {
808 case MMU_USER_IDX:
809 if (!(ptep & PG_USER_MASK)) {
810 goto do_fault_protect;
812 if (is_write && !(ptep & PG_RW_MASK)) {
813 goto do_fault_protect;
815 break;
817 case MMU_KERNEL_IDX:
818 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
819 (ptep & PG_USER_MASK)) {
820 goto do_fault_protect;
822 /* fall through */
823 case MMU_KSMAP_IDX:
824 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
825 (ptep & PG_USER_MASK)) {
826 goto do_fault_protect;
828 if ((env->cr[0] & CR0_WP_MASK) &&
829 is_write && !(ptep & PG_RW_MASK)) {
830 goto do_fault_protect;
832 break;
834 default: /* cannot happen */
835 break;
837 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
838 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
839 pte |= PG_ACCESSED_MASK;
840 if (is_dirty)
841 pte |= PG_DIRTY_MASK;
842 stl_phys_notdirty(cs->as, pte_addr, pte);
844 page_size = 4096;
845 virt_addr = addr & ~0xfff;
848 /* the page can be put in the TLB */
849 prot = PAGE_READ;
850 if (!(ptep & PG_NX_MASK))
851 prot |= PAGE_EXEC;
852 if (pte & PG_DIRTY_MASK) {
853 /* only set write access if already dirty... otherwise wait
854 for dirty access */
855 if (is_user) {
856 if (ptep & PG_RW_MASK)
857 prot |= PAGE_WRITE;
858 } else {
859 if (!(env->cr[0] & CR0_WP_MASK) ||
860 (ptep & PG_RW_MASK))
861 prot |= PAGE_WRITE;
864 do_mapping:
865 pte = pte & env->a20_mask;
867 /* Even if 4MB pages, we map only one 4KB page in the cache to
868 avoid filling it too fast */
869 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
870 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
871 vaddr = virt_addr + page_offset;
873 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
874 return 0;
875 do_fault_protect:
876 error_code = PG_ERROR_P_MASK;
877 do_fault:
878 error_code |= (is_write << PG_ERROR_W_BIT);
879 if (is_user)
880 error_code |= PG_ERROR_U_MASK;
881 if (is_write1 == 2 &&
882 (((env->efer & MSR_EFER_NXE) &&
883 (env->cr[4] & CR4_PAE_MASK)) ||
884 (env->cr[4] & CR4_SMEP_MASK)))
885 error_code |= PG_ERROR_I_D_MASK;
886 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
887 /* cr2 is not modified in case of exceptions */
888 stq_phys(cs->as,
889 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
890 addr);
891 } else {
892 env->cr[2] = addr;
894 env->error_code = error_code;
895 env->exception_index = EXCP0E_PAGE;
896 return 1;
899 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
901 X86CPU *cpu = X86_CPU(cs);
902 CPUX86State *env = &cpu->env;
903 target_ulong pde_addr, pte_addr;
904 uint64_t pte;
905 hwaddr paddr;
906 uint32_t page_offset;
907 int page_size;
909 if (!(env->cr[0] & CR0_PG_MASK)) {
910 pte = addr & env->a20_mask;
911 page_size = 4096;
912 } else if (env->cr[4] & CR4_PAE_MASK) {
913 target_ulong pdpe_addr;
914 uint64_t pde, pdpe;
916 #ifdef TARGET_X86_64
917 if (env->hflags & HF_LMA_MASK) {
918 uint64_t pml4e_addr, pml4e;
919 int32_t sext;
921 /* test virtual address sign extension */
922 sext = (int64_t)addr >> 47;
923 if (sext != 0 && sext != -1)
924 return -1;
926 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
927 env->a20_mask;
928 pml4e = ldq_phys(cs->as, pml4e_addr);
929 if (!(pml4e & PG_PRESENT_MASK))
930 return -1;
932 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
933 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
934 pdpe = ldq_phys(cs->as, pdpe_addr);
935 if (!(pdpe & PG_PRESENT_MASK))
936 return -1;
937 } else
938 #endif
940 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
941 env->a20_mask;
942 pdpe = ldq_phys(cs->as, pdpe_addr);
943 if (!(pdpe & PG_PRESENT_MASK))
944 return -1;
947 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
948 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
949 pde = ldq_phys(cs->as, pde_addr);
950 if (!(pde & PG_PRESENT_MASK)) {
951 return -1;
953 if (pde & PG_PSE_MASK) {
954 /* 2 MB page */
955 page_size = 2048 * 1024;
956 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
957 } else {
958 /* 4 KB page */
959 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
960 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
961 page_size = 4096;
962 pte = ldq_phys(cs->as, pte_addr);
964 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
965 if (!(pte & PG_PRESENT_MASK))
966 return -1;
967 } else {
968 uint32_t pde;
970 /* page directory entry */
971 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
972 pde = ldl_phys(cs->as, pde_addr);
973 if (!(pde & PG_PRESENT_MASK))
974 return -1;
975 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
976 pte = pde & ~0x003ff000; /* align to 4MB */
977 page_size = 4096 * 1024;
978 } else {
979 /* page directory entry */
980 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
981 pte = ldl_phys(cs->as, pte_addr);
982 if (!(pte & PG_PRESENT_MASK))
983 return -1;
984 page_size = 4096;
986 pte = pte & env->a20_mask;
989 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
990 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
991 return paddr;
994 void hw_breakpoint_insert(CPUX86State *env, int index)
996 int type = 0, err = 0;
998 switch (hw_breakpoint_type(env->dr[7], index)) {
999 case DR7_TYPE_BP_INST:
1000 if (hw_breakpoint_enabled(env->dr[7], index)) {
1001 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1002 &env->cpu_breakpoint[index]);
1004 break;
1005 case DR7_TYPE_DATA_WR:
1006 type = BP_CPU | BP_MEM_WRITE;
1007 break;
1008 case DR7_TYPE_IO_RW:
1009 /* No support for I/O watchpoints yet */
1010 break;
1011 case DR7_TYPE_DATA_RW:
1012 type = BP_CPU | BP_MEM_ACCESS;
1013 break;
1016 if (type != 0) {
1017 err = cpu_watchpoint_insert(env, env->dr[index],
1018 hw_breakpoint_len(env->dr[7], index),
1019 type, &env->cpu_watchpoint[index]);
1022 if (err) {
1023 env->cpu_breakpoint[index] = NULL;
1027 void hw_breakpoint_remove(CPUX86State *env, int index)
1029 if (!env->cpu_breakpoint[index])
1030 return;
1031 switch (hw_breakpoint_type(env->dr[7], index)) {
1032 case DR7_TYPE_BP_INST:
1033 if (hw_breakpoint_enabled(env->dr[7], index)) {
1034 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1036 break;
1037 case DR7_TYPE_DATA_WR:
1038 case DR7_TYPE_DATA_RW:
1039 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1040 break;
1041 case DR7_TYPE_IO_RW:
1042 /* No support for I/O watchpoints yet */
1043 break;
1047 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1049 target_ulong dr6;
1050 int reg;
1051 bool hit_enabled = false;
1053 dr6 = env->dr[6] & ~0xf;
1054 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1055 bool bp_match = false;
1056 bool wp_match = false;
1058 switch (hw_breakpoint_type(env->dr[7], reg)) {
1059 case DR7_TYPE_BP_INST:
1060 if (env->dr[reg] == env->eip) {
1061 bp_match = true;
1063 break;
1064 case DR7_TYPE_DATA_WR:
1065 case DR7_TYPE_DATA_RW:
1066 if (env->cpu_watchpoint[reg] &&
1067 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1068 wp_match = true;
1070 break;
1071 case DR7_TYPE_IO_RW:
1072 break;
1074 if (bp_match || wp_match) {
1075 dr6 |= 1 << reg;
1076 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1077 hit_enabled = true;
1082 if (hit_enabled || force_dr6_update) {
1083 env->dr[6] = dr6;
1086 return hit_enabled;
1089 void breakpoint_handler(CPUX86State *env)
1091 CPUBreakpoint *bp;
1093 if (env->watchpoint_hit) {
1094 if (env->watchpoint_hit->flags & BP_CPU) {
1095 env->watchpoint_hit = NULL;
1096 if (check_hw_breakpoints(env, false)) {
1097 raise_exception(env, EXCP01_DB);
1098 } else {
1099 cpu_resume_from_signal(env, NULL);
1102 } else {
1103 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1104 if (bp->pc == env->eip) {
1105 if (bp->flags & BP_CPU) {
1106 check_hw_breakpoints(env, true);
1107 raise_exception(env, EXCP01_DB);
1109 break;
1114 typedef struct MCEInjectionParams {
1115 Monitor *mon;
1116 X86CPU *cpu;
1117 int bank;
1118 uint64_t status;
1119 uint64_t mcg_status;
1120 uint64_t addr;
1121 uint64_t misc;
1122 int flags;
1123 } MCEInjectionParams;
1125 static void do_inject_x86_mce(void *data)
1127 MCEInjectionParams *params = data;
1128 CPUX86State *cenv = &params->cpu->env;
1129 CPUState *cpu = CPU(params->cpu);
1130 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1132 cpu_synchronize_state(cpu);
1135 * If there is an MCE exception being processed, ignore this SRAO MCE
1136 * unless unconditional injection was requested.
1138 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1139 && !(params->status & MCI_STATUS_AR)
1140 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1141 return;
1144 if (params->status & MCI_STATUS_UC) {
1146 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1147 * reporting is disabled
1149 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1150 monitor_printf(params->mon,
1151 "CPU %d: Uncorrected error reporting disabled\n",
1152 cpu->cpu_index);
1153 return;
1157 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1158 * reporting is disabled for the bank
1160 if (banks[0] != ~(uint64_t)0) {
1161 monitor_printf(params->mon,
1162 "CPU %d: Uncorrected error reporting disabled for"
1163 " bank %d\n",
1164 cpu->cpu_index, params->bank);
1165 return;
1168 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1169 !(cenv->cr[4] & CR4_MCE_MASK)) {
1170 monitor_printf(params->mon,
1171 "CPU %d: Previous MCE still in progress, raising"
1172 " triple fault\n",
1173 cpu->cpu_index);
1174 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1175 qemu_system_reset_request();
1176 return;
1178 if (banks[1] & MCI_STATUS_VAL) {
1179 params->status |= MCI_STATUS_OVER;
1181 banks[2] = params->addr;
1182 banks[3] = params->misc;
1183 cenv->mcg_status = params->mcg_status;
1184 banks[1] = params->status;
1185 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1186 } else if (!(banks[1] & MCI_STATUS_VAL)
1187 || !(banks[1] & MCI_STATUS_UC)) {
1188 if (banks[1] & MCI_STATUS_VAL) {
1189 params->status |= MCI_STATUS_OVER;
1191 banks[2] = params->addr;
1192 banks[3] = params->misc;
1193 banks[1] = params->status;
1194 } else {
1195 banks[1] |= MCI_STATUS_OVER;
1199 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1200 uint64_t status, uint64_t mcg_status, uint64_t addr,
1201 uint64_t misc, int flags)
1203 CPUState *cs = CPU(cpu);
1204 CPUX86State *cenv = &cpu->env;
1205 MCEInjectionParams params = {
1206 .mon = mon,
1207 .cpu = cpu,
1208 .bank = bank,
1209 .status = status,
1210 .mcg_status = mcg_status,
1211 .addr = addr,
1212 .misc = misc,
1213 .flags = flags,
1215 unsigned bank_num = cenv->mcg_cap & 0xff;
1217 if (!cenv->mcg_cap) {
1218 monitor_printf(mon, "MCE injection not supported\n");
1219 return;
1221 if (bank >= bank_num) {
1222 monitor_printf(mon, "Invalid MCE bank number\n");
1223 return;
1225 if (!(status & MCI_STATUS_VAL)) {
1226 monitor_printf(mon, "Invalid MCE status code\n");
1227 return;
1229 if ((flags & MCE_INJECT_BROADCAST)
1230 && !cpu_x86_support_mca_broadcast(cenv)) {
1231 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1232 return;
1235 run_on_cpu(cs, do_inject_x86_mce, &params);
1236 if (flags & MCE_INJECT_BROADCAST) {
1237 CPUState *other_cs;
1239 params.bank = 1;
1240 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1241 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1242 params.addr = 0;
1243 params.misc = 0;
1244 CPU_FOREACH(other_cs) {
1245 if (other_cs == cs) {
1246 continue;
1248 params.cpu = X86_CPU(other_cs);
1249 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1254 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1256 X86CPU *cpu = x86_env_get_cpu(env);
1257 CPUState *cs = CPU(cpu);
1259 if (kvm_enabled()) {
1260 env->tpr_access_type = access;
1262 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1263 } else {
1264 cpu_restore_state(env, cs->mem_io_pc);
1266 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1269 #endif /* !CONFIG_USER_ONLY */
1271 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1272 target_ulong *base, unsigned int *limit,
1273 unsigned int *flags)
1275 X86CPU *cpu = x86_env_get_cpu(env);
1276 CPUState *cs = CPU(cpu);
1277 SegmentCache *dt;
1278 target_ulong ptr;
1279 uint32_t e1, e2;
1280 int index;
1282 if (selector & 0x4)
1283 dt = &env->ldt;
1284 else
1285 dt = &env->gdt;
1286 index = selector & ~7;
1287 ptr = dt->base + index;
1288 if ((index + 7) > dt->limit
1289 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1290 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1291 return 0;
1293 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1294 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1295 if (e2 & DESC_G_MASK)
1296 *limit = (*limit << 12) | 0xfff;
1297 *flags = e2;
1299 return 1;
1302 #if !defined(CONFIG_USER_ONLY)
1303 void do_cpu_init(X86CPU *cpu)
1305 CPUState *cs = CPU(cpu);
1306 CPUX86State *env = &cpu->env;
1307 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1308 uint64_t pat = env->pat;
1310 cpu_reset(cs);
1311 cs->interrupt_request = sipi;
1312 env->pat = pat;
1313 apic_init_reset(cpu->apic_state);
1316 void do_cpu_sipi(X86CPU *cpu)
1318 apic_sipi(cpu->apic_state);
1320 #else
1321 void do_cpu_init(X86CPU *cpu)
1324 void do_cpu_sipi(X86CPU *cpu)
1327 #endif