target-microblaze: Convert dcache-writeback to a CPU property
[qemu.git] / target-i386 / helper.c
blob5480a96a0f4ba42b2dfdf595af7a6d7aa65fb411
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #include "kvm_i386.h"
23 #ifndef CONFIG_USER_ONLY
24 #include "sysemu/sysemu.h"
25 #include "monitor/monitor.h"
26 #endif
28 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
30 int cpuver = env->cpuid_version;
32 if (family == NULL || model == NULL) {
33 return;
36 *family = (cpuver >> 8) & 0x0f;
37 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
40 /* Broadcast MCA signal for processor version 06H_EH and above */
41 int cpu_x86_support_mca_broadcast(CPUX86State *env)
43 int family = 0;
44 int model = 0;
46 cpu_x86_version(env, &family, &model);
47 if ((family == 6 && model >= 14) || family > 6) {
48 return 1;
51 return 0;
54 /***********************************************************/
55 /* x86 debug */
57 static const char *cc_op_str[CC_OP_NB] = {
58 "DYNAMIC",
59 "EFLAGS",
61 "MULB",
62 "MULW",
63 "MULL",
64 "MULQ",
66 "ADDB",
67 "ADDW",
68 "ADDL",
69 "ADDQ",
71 "ADCB",
72 "ADCW",
73 "ADCL",
74 "ADCQ",
76 "SUBB",
77 "SUBW",
78 "SUBL",
79 "SUBQ",
81 "SBBB",
82 "SBBW",
83 "SBBL",
84 "SBBQ",
86 "LOGICB",
87 "LOGICW",
88 "LOGICL",
89 "LOGICQ",
91 "INCB",
92 "INCW",
93 "INCL",
94 "INCQ",
96 "DECB",
97 "DECW",
98 "DECL",
99 "DECQ",
101 "SHLB",
102 "SHLW",
103 "SHLL",
104 "SHLQ",
106 "SARB",
107 "SARW",
108 "SARL",
109 "SARQ",
111 "BMILGB",
112 "BMILGW",
113 "BMILGL",
114 "BMILGQ",
116 "ADCX",
117 "ADOX",
118 "ADCOX",
120 "CLR",
123 static void
124 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
125 const char *name, struct SegmentCache *sc)
127 #ifdef TARGET_X86_64
128 if (env->hflags & HF_CS64_MASK) {
129 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
130 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
131 } else
132 #endif
134 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
135 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
138 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
139 goto done;
141 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
142 if (sc->flags & DESC_S_MASK) {
143 if (sc->flags & DESC_CS_MASK) {
144 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
145 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
146 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
147 (sc->flags & DESC_R_MASK) ? 'R' : '-');
148 } else {
149 cpu_fprintf(f,
150 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
151 ? "DS " : "DS16");
152 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
153 (sc->flags & DESC_W_MASK) ? 'W' : '-');
155 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
156 } else {
157 static const char *sys_type_name[2][16] = {
158 { /* 32 bit mode */
159 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
160 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
161 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
162 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
164 { /* 64 bit mode */
165 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
166 "Reserved", "Reserved", "Reserved", "Reserved",
167 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
168 "Reserved", "IntGate64", "TrapGate64"
171 cpu_fprintf(f, "%s",
172 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
173 [(sc->flags & DESC_TYPE_MASK)
174 >> DESC_TYPE_SHIFT]);
176 done:
177 cpu_fprintf(f, "\n");
180 #define DUMP_CODE_BYTES_TOTAL 50
181 #define DUMP_CODE_BYTES_BACKWARD 20
183 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
184 int flags)
186 X86CPU *cpu = X86_CPU(cs);
187 CPUX86State *env = &cpu->env;
188 int eflags, i, nb;
189 char cc_op_name[32];
190 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
192 eflags = cpu_compute_eflags(env);
193 #ifdef TARGET_X86_64
194 if (env->hflags & HF_CS64_MASK) {
195 cpu_fprintf(f,
196 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
197 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
198 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
199 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
200 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
201 env->regs[R_EAX],
202 env->regs[R_EBX],
203 env->regs[R_ECX],
204 env->regs[R_EDX],
205 env->regs[R_ESI],
206 env->regs[R_EDI],
207 env->regs[R_EBP],
208 env->regs[R_ESP],
209 env->regs[8],
210 env->regs[9],
211 env->regs[10],
212 env->regs[11],
213 env->regs[12],
214 env->regs[13],
215 env->regs[14],
216 env->regs[15],
217 env->eip, eflags,
218 eflags & DF_MASK ? 'D' : '-',
219 eflags & CC_O ? 'O' : '-',
220 eflags & CC_S ? 'S' : '-',
221 eflags & CC_Z ? 'Z' : '-',
222 eflags & CC_A ? 'A' : '-',
223 eflags & CC_P ? 'P' : '-',
224 eflags & CC_C ? 'C' : '-',
225 env->hflags & HF_CPL_MASK,
226 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
227 (env->a20_mask >> 20) & 1,
228 (env->hflags >> HF_SMM_SHIFT) & 1,
229 cs->halted);
230 } else
231 #endif
233 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
234 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
235 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
236 (uint32_t)env->regs[R_EAX],
237 (uint32_t)env->regs[R_EBX],
238 (uint32_t)env->regs[R_ECX],
239 (uint32_t)env->regs[R_EDX],
240 (uint32_t)env->regs[R_ESI],
241 (uint32_t)env->regs[R_EDI],
242 (uint32_t)env->regs[R_EBP],
243 (uint32_t)env->regs[R_ESP],
244 (uint32_t)env->eip, eflags,
245 eflags & DF_MASK ? 'D' : '-',
246 eflags & CC_O ? 'O' : '-',
247 eflags & CC_S ? 'S' : '-',
248 eflags & CC_Z ? 'Z' : '-',
249 eflags & CC_A ? 'A' : '-',
250 eflags & CC_P ? 'P' : '-',
251 eflags & CC_C ? 'C' : '-',
252 env->hflags & HF_CPL_MASK,
253 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
254 (env->a20_mask >> 20) & 1,
255 (env->hflags >> HF_SMM_SHIFT) & 1,
256 cs->halted);
259 for(i = 0; i < 6; i++) {
260 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
261 &env->segs[i]);
263 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
266 #ifdef TARGET_X86_64
267 if (env->hflags & HF_LMA_MASK) {
268 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
269 env->gdt.base, env->gdt.limit);
270 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
271 env->idt.base, env->idt.limit);
272 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
273 (uint32_t)env->cr[0],
274 env->cr[2],
275 env->cr[3],
276 (uint32_t)env->cr[4]);
277 for(i = 0; i < 4; i++)
278 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
279 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
280 env->dr[6], env->dr[7]);
281 } else
282 #endif
284 cpu_fprintf(f, "GDT= %08x %08x\n",
285 (uint32_t)env->gdt.base, env->gdt.limit);
286 cpu_fprintf(f, "IDT= %08x %08x\n",
287 (uint32_t)env->idt.base, env->idt.limit);
288 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
289 (uint32_t)env->cr[0],
290 (uint32_t)env->cr[2],
291 (uint32_t)env->cr[3],
292 (uint32_t)env->cr[4]);
293 for(i = 0; i < 4; i++) {
294 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
296 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
297 env->dr[6], env->dr[7]);
299 if (flags & CPU_DUMP_CCOP) {
300 if ((unsigned)env->cc_op < CC_OP_NB)
301 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
302 else
303 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
304 #ifdef TARGET_X86_64
305 if (env->hflags & HF_CS64_MASK) {
306 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
307 env->cc_src, env->cc_dst,
308 cc_op_name);
309 } else
310 #endif
312 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
313 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
314 cc_op_name);
317 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
318 if (flags & CPU_DUMP_FPU) {
319 int fptag;
320 fptag = 0;
321 for(i = 0; i < 8; i++) {
322 fptag |= ((!env->fptags[i]) << i);
324 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
325 env->fpuc,
326 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
327 env->fpstt,
328 fptag,
329 env->mxcsr);
330 for(i=0;i<8;i++) {
331 CPU_LDoubleU u;
332 u.d = env->fpregs[i].d;
333 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
334 i, u.l.lower, u.l.upper);
335 if ((i & 1) == 1)
336 cpu_fprintf(f, "\n");
337 else
338 cpu_fprintf(f, " ");
340 if (env->hflags & HF_CS64_MASK)
341 nb = 16;
342 else
343 nb = 8;
344 for(i=0;i<nb;i++) {
345 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
347 env->xmm_regs[i].XMM_L(3),
348 env->xmm_regs[i].XMM_L(2),
349 env->xmm_regs[i].XMM_L(1),
350 env->xmm_regs[i].XMM_L(0));
351 if ((i & 1) == 1)
352 cpu_fprintf(f, "\n");
353 else
354 cpu_fprintf(f, " ");
357 if (flags & CPU_DUMP_CODE) {
358 target_ulong base = env->segs[R_CS].base + env->eip;
359 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
360 uint8_t code;
361 char codestr[3];
363 cpu_fprintf(f, "Code=");
364 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
365 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
366 snprintf(codestr, sizeof(codestr), "%02x", code);
367 } else {
368 snprintf(codestr, sizeof(codestr), "??");
370 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
371 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
373 cpu_fprintf(f, "\n");
377 /***********************************************************/
378 /* x86 mmu */
379 /* XXX: add PGE support */
381 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
383 CPUX86State *env = &cpu->env;
385 a20_state = (a20_state != 0);
386 if (a20_state != ((env->a20_mask >> 20) & 1)) {
387 CPUState *cs = CPU(cpu);
389 qemu_log_mask(CPU_LOG_MMU, "A20 update: a20=%d\n", a20_state);
390 /* if the cpu is currently executing code, we must unlink it and
391 all the potentially executing TB */
392 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
394 /* when a20 is changed, all the MMU mappings are invalid, so
395 we must flush everything */
396 tlb_flush(cs, 1);
397 env->a20_mask = ~(1 << 20) | (a20_state << 20);
401 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
403 X86CPU *cpu = x86_env_get_cpu(env);
404 int pe_state;
406 qemu_log_mask(CPU_LOG_MMU, "CR0 update: CR0=0x%08x\n", new_cr0);
407 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
408 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
409 tlb_flush(CPU(cpu), 1);
412 #ifdef TARGET_X86_64
413 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
414 (env->efer & MSR_EFER_LME)) {
415 /* enter in long mode */
416 /* XXX: generate an exception */
417 if (!(env->cr[4] & CR4_PAE_MASK))
418 return;
419 env->efer |= MSR_EFER_LMA;
420 env->hflags |= HF_LMA_MASK;
421 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
422 (env->efer & MSR_EFER_LMA)) {
423 /* exit long mode */
424 env->efer &= ~MSR_EFER_LMA;
425 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
426 env->eip &= 0xffffffff;
428 #endif
429 env->cr[0] = new_cr0 | CR0_ET_MASK;
431 /* update PE flag in hidden flags */
432 pe_state = (env->cr[0] & CR0_PE_MASK);
433 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
434 /* ensure that ADDSEG is always set in real mode */
435 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
436 /* update FPU flags */
437 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
438 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
441 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
442 the PDPT */
443 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
445 X86CPU *cpu = x86_env_get_cpu(env);
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449 qemu_log_mask(CPU_LOG_MMU,
450 "CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451 tlb_flush(CPU(cpu), 0);
455 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
457 X86CPU *cpu = x86_env_get_cpu(env);
459 #if defined(DEBUG_MMU)
460 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
461 #endif
462 if ((new_cr4 ^ env->cr[4]) &
463 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
464 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
465 tlb_flush(CPU(cpu), 1);
467 /* SSE handling */
468 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
469 new_cr4 &= ~CR4_OSFXSR_MASK;
471 env->hflags &= ~HF_OSFXSR_MASK;
472 if (new_cr4 & CR4_OSFXSR_MASK) {
473 env->hflags |= HF_OSFXSR_MASK;
476 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
477 new_cr4 &= ~CR4_SMAP_MASK;
479 env->hflags &= ~HF_SMAP_MASK;
480 if (new_cr4 & CR4_SMAP_MASK) {
481 env->hflags |= HF_SMAP_MASK;
484 env->cr[4] = new_cr4;
487 #if defined(CONFIG_USER_ONLY)
489 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
490 int is_write, int mmu_idx)
492 X86CPU *cpu = X86_CPU(cs);
493 CPUX86State *env = &cpu->env;
495 /* user mode only emulation */
496 is_write &= 1;
497 env->cr[2] = addr;
498 env->error_code = (is_write << PG_ERROR_W_BIT);
499 env->error_code |= PG_ERROR_U_MASK;
500 cs->exception_index = EXCP0E_PAGE;
501 return 1;
504 #else
506 /* return value:
507 * -1 = cannot handle fault
508 * 0 = nothing more to do
509 * 1 = generate PF fault
511 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
512 int is_write1, int mmu_idx)
514 X86CPU *cpu = X86_CPU(cs);
515 CPUX86State *env = &cpu->env;
516 uint64_t ptep, pte;
517 target_ulong pde_addr, pte_addr;
518 int error_code = 0;
519 int is_dirty, prot, page_size, is_write, is_user;
520 hwaddr paddr;
521 uint64_t rsvd_mask = PG_HI_RSVD_MASK;
522 uint32_t page_offset;
523 target_ulong vaddr;
525 is_user = mmu_idx == MMU_USER_IDX;
526 #if defined(DEBUG_MMU)
527 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
528 addr, is_write1, is_user, env->eip);
529 #endif
530 is_write = is_write1 & 1;
532 if (!(env->cr[0] & CR0_PG_MASK)) {
533 pte = addr;
534 #ifdef TARGET_X86_64
535 if (!(env->hflags & HF_LMA_MASK)) {
536 /* Without long mode we can only address 32bits in real mode */
537 pte = (uint32_t)pte;
539 #endif
540 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
541 page_size = 4096;
542 goto do_mapping;
545 if (!(env->efer & MSR_EFER_NXE)) {
546 rsvd_mask |= PG_NX_MASK;
549 if (env->cr[4] & CR4_PAE_MASK) {
550 uint64_t pde, pdpe;
551 target_ulong pdpe_addr;
553 #ifdef TARGET_X86_64
554 if (env->hflags & HF_LMA_MASK) {
555 uint64_t pml4e_addr, pml4e;
556 int32_t sext;
558 /* test virtual address sign extension */
559 sext = (int64_t)addr >> 47;
560 if (sext != 0 && sext != -1) {
561 env->error_code = 0;
562 cs->exception_index = EXCP0D_GPF;
563 return 1;
566 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
567 env->a20_mask;
568 pml4e = x86_ldq_phys(cs, pml4e_addr);
569 if (!(pml4e & PG_PRESENT_MASK)) {
570 goto do_fault;
572 if (pml4e & (rsvd_mask | PG_PSE_MASK)) {
573 goto do_fault_rsvd;
575 if (!(pml4e & PG_ACCESSED_MASK)) {
576 pml4e |= PG_ACCESSED_MASK;
577 x86_stl_phys_notdirty(cs, pml4e_addr, pml4e);
579 ptep = pml4e ^ PG_NX_MASK;
580 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
581 env->a20_mask;
582 pdpe = x86_ldq_phys(cs, pdpe_addr);
583 if (!(pdpe & PG_PRESENT_MASK)) {
584 goto do_fault;
586 if (pdpe & rsvd_mask) {
587 goto do_fault_rsvd;
589 ptep &= pdpe ^ PG_NX_MASK;
590 if (!(pdpe & PG_ACCESSED_MASK)) {
591 pdpe |= PG_ACCESSED_MASK;
592 x86_stl_phys_notdirty(cs, pdpe_addr, pdpe);
594 if (pdpe & PG_PSE_MASK) {
595 /* 1 GB page */
596 page_size = 1024 * 1024 * 1024;
597 pte_addr = pdpe_addr;
598 pte = pdpe;
599 goto do_check_protect;
601 } else
602 #endif
604 /* XXX: load them when cr3 is loaded ? */
605 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
606 env->a20_mask;
607 pdpe = x86_ldq_phys(cs, pdpe_addr);
608 if (!(pdpe & PG_PRESENT_MASK)) {
609 goto do_fault;
611 rsvd_mask |= PG_HI_USER_MASK;
612 if (pdpe & (rsvd_mask | PG_NX_MASK)) {
613 goto do_fault_rsvd;
615 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
618 pde_addr = ((pdpe & PG_ADDRESS_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
619 env->a20_mask;
620 pde = x86_ldq_phys(cs, pde_addr);
621 if (!(pde & PG_PRESENT_MASK)) {
622 goto do_fault;
624 if (pde & rsvd_mask) {
625 goto do_fault_rsvd;
627 ptep &= pde ^ PG_NX_MASK;
628 if (pde & PG_PSE_MASK) {
629 /* 2 MB page */
630 page_size = 2048 * 1024;
631 pte_addr = pde_addr;
632 pte = pde;
633 goto do_check_protect;
635 /* 4 KB page */
636 if (!(pde & PG_ACCESSED_MASK)) {
637 pde |= PG_ACCESSED_MASK;
638 x86_stl_phys_notdirty(cs, pde_addr, pde);
640 pte_addr = ((pde & PG_ADDRESS_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
641 env->a20_mask;
642 pte = x86_ldq_phys(cs, pte_addr);
643 if (!(pte & PG_PRESENT_MASK)) {
644 goto do_fault;
646 if (pte & rsvd_mask) {
647 goto do_fault_rsvd;
649 /* combine pde and pte nx, user and rw protections */
650 ptep &= pte ^ PG_NX_MASK;
651 page_size = 4096;
652 } else {
653 uint32_t pde;
655 /* page directory entry */
656 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
657 env->a20_mask;
658 pde = x86_ldl_phys(cs, pde_addr);
659 if (!(pde & PG_PRESENT_MASK)) {
660 goto do_fault;
662 ptep = pde | PG_NX_MASK;
664 /* if PSE bit is set, then we use a 4MB page */
665 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
666 page_size = 4096 * 1024;
667 pte_addr = pde_addr;
669 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
670 * Leave bits 20-13 in place for setting accessed/dirty bits below.
672 pte = pde | ((pde & 0x1fe000) << (32 - 13));
673 rsvd_mask = 0x200000;
674 goto do_check_protect_pse36;
677 if (!(pde & PG_ACCESSED_MASK)) {
678 pde |= PG_ACCESSED_MASK;
679 x86_stl_phys_notdirty(cs, pde_addr, pde);
682 /* page directory entry */
683 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
684 env->a20_mask;
685 pte = x86_ldl_phys(cs, pte_addr);
686 if (!(pte & PG_PRESENT_MASK)) {
687 goto do_fault;
689 /* combine pde and pte user and rw protections */
690 ptep &= pte | PG_NX_MASK;
691 page_size = 4096;
692 rsvd_mask = 0;
695 do_check_protect:
696 rsvd_mask |= (page_size - 1) & PG_ADDRESS_MASK & ~PG_PSE_PAT_MASK;
697 do_check_protect_pse36:
698 if (pte & rsvd_mask) {
699 goto do_fault_rsvd;
701 ptep ^= PG_NX_MASK;
702 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
703 goto do_fault_protect;
705 switch (mmu_idx) {
706 case MMU_USER_IDX:
707 if (!(ptep & PG_USER_MASK)) {
708 goto do_fault_protect;
710 if (is_write && !(ptep & PG_RW_MASK)) {
711 goto do_fault_protect;
713 break;
715 case MMU_KSMAP_IDX:
716 if (is_write1 != 2 && (ptep & PG_USER_MASK)) {
717 goto do_fault_protect;
719 /* fall through */
720 case MMU_KNOSMAP_IDX:
721 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
722 (ptep & PG_USER_MASK)) {
723 goto do_fault_protect;
725 if ((env->cr[0] & CR0_WP_MASK) &&
726 is_write && !(ptep & PG_RW_MASK)) {
727 goto do_fault_protect;
729 break;
731 default: /* cannot happen */
732 break;
734 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
735 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
736 pte |= PG_ACCESSED_MASK;
737 if (is_dirty) {
738 pte |= PG_DIRTY_MASK;
740 x86_stl_phys_notdirty(cs, pte_addr, pte);
743 /* the page can be put in the TLB */
744 prot = PAGE_READ;
745 if (!(ptep & PG_NX_MASK) &&
746 (mmu_idx == MMU_USER_IDX ||
747 !((env->cr[4] & CR4_SMEP_MASK) && (ptep & PG_USER_MASK)))) {
748 prot |= PAGE_EXEC;
750 if (pte & PG_DIRTY_MASK) {
751 /* only set write access if already dirty... otherwise wait
752 for dirty access */
753 if (is_user) {
754 if (ptep & PG_RW_MASK)
755 prot |= PAGE_WRITE;
756 } else {
757 if (!(env->cr[0] & CR0_WP_MASK) ||
758 (ptep & PG_RW_MASK))
759 prot |= PAGE_WRITE;
762 do_mapping:
763 pte = pte & env->a20_mask;
765 /* align to page_size */
766 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
768 /* Even if 4MB pages, we map only one 4KB page in the cache to
769 avoid filling it too fast */
770 vaddr = addr & TARGET_PAGE_MASK;
771 page_offset = vaddr & (page_size - 1);
772 paddr = pte + page_offset;
774 tlb_set_page_with_attrs(cs, vaddr, paddr, cpu_get_mem_attrs(env),
775 prot, mmu_idx, page_size);
776 return 0;
777 do_fault_rsvd:
778 error_code |= PG_ERROR_RSVD_MASK;
779 do_fault_protect:
780 error_code |= PG_ERROR_P_MASK;
781 do_fault:
782 error_code |= (is_write << PG_ERROR_W_BIT);
783 if (is_user)
784 error_code |= PG_ERROR_U_MASK;
785 if (is_write1 == 2 &&
786 (((env->efer & MSR_EFER_NXE) &&
787 (env->cr[4] & CR4_PAE_MASK)) ||
788 (env->cr[4] & CR4_SMEP_MASK)))
789 error_code |= PG_ERROR_I_D_MASK;
790 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
791 /* cr2 is not modified in case of exceptions */
792 x86_stq_phys(cs,
793 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
794 addr);
795 } else {
796 env->cr[2] = addr;
798 env->error_code = error_code;
799 cs->exception_index = EXCP0E_PAGE;
800 return 1;
803 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
805 X86CPU *cpu = X86_CPU(cs);
806 CPUX86State *env = &cpu->env;
807 target_ulong pde_addr, pte_addr;
808 uint64_t pte;
809 uint32_t page_offset;
810 int page_size;
812 if (!(env->cr[0] & CR0_PG_MASK)) {
813 pte = addr & env->a20_mask;
814 page_size = 4096;
815 } else if (env->cr[4] & CR4_PAE_MASK) {
816 target_ulong pdpe_addr;
817 uint64_t pde, pdpe;
819 #ifdef TARGET_X86_64
820 if (env->hflags & HF_LMA_MASK) {
821 uint64_t pml4e_addr, pml4e;
822 int32_t sext;
824 /* test virtual address sign extension */
825 sext = (int64_t)addr >> 47;
826 if (sext != 0 && sext != -1) {
827 return -1;
829 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
830 env->a20_mask;
831 pml4e = x86_ldq_phys(cs, pml4e_addr);
832 if (!(pml4e & PG_PRESENT_MASK)) {
833 return -1;
835 pdpe_addr = ((pml4e & PG_ADDRESS_MASK) +
836 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
837 pdpe = x86_ldq_phys(cs, pdpe_addr);
838 if (!(pdpe & PG_PRESENT_MASK)) {
839 return -1;
841 if (pdpe & PG_PSE_MASK) {
842 page_size = 1024 * 1024 * 1024;
843 pte = pdpe;
844 goto out;
847 } else
848 #endif
850 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
851 env->a20_mask;
852 pdpe = x86_ldq_phys(cs, pdpe_addr);
853 if (!(pdpe & PG_PRESENT_MASK))
854 return -1;
857 pde_addr = ((pdpe & PG_ADDRESS_MASK) +
858 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
859 pde = x86_ldq_phys(cs, pde_addr);
860 if (!(pde & PG_PRESENT_MASK)) {
861 return -1;
863 if (pde & PG_PSE_MASK) {
864 /* 2 MB page */
865 page_size = 2048 * 1024;
866 pte = pde;
867 } else {
868 /* 4 KB page */
869 pte_addr = ((pde & PG_ADDRESS_MASK) +
870 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
871 page_size = 4096;
872 pte = x86_ldq_phys(cs, pte_addr);
874 if (!(pte & PG_PRESENT_MASK)) {
875 return -1;
877 } else {
878 uint32_t pde;
880 /* page directory entry */
881 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
882 pde = x86_ldl_phys(cs, pde_addr);
883 if (!(pde & PG_PRESENT_MASK))
884 return -1;
885 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
886 pte = pde | ((pde & 0x1fe000) << (32 - 13));
887 page_size = 4096 * 1024;
888 } else {
889 /* page directory entry */
890 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
891 pte = x86_ldl_phys(cs, pte_addr);
892 if (!(pte & PG_PRESENT_MASK)) {
893 return -1;
895 page_size = 4096;
897 pte = pte & env->a20_mask;
900 #ifdef TARGET_X86_64
901 out:
902 #endif
903 pte &= PG_ADDRESS_MASK & ~(page_size - 1);
904 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
905 return pte | page_offset;
908 void hw_breakpoint_insert(CPUX86State *env, int index)
910 CPUState *cs = CPU(x86_env_get_cpu(env));
911 int type = 0, err = 0;
913 switch (hw_breakpoint_type(env->dr[7], index)) {
914 case DR7_TYPE_BP_INST:
915 if (hw_breakpoint_enabled(env->dr[7], index)) {
916 err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU,
917 &env->cpu_breakpoint[index]);
919 break;
920 case DR7_TYPE_DATA_WR:
921 type = BP_CPU | BP_MEM_WRITE;
922 break;
923 case DR7_TYPE_IO_RW:
924 /* No support for I/O watchpoints yet */
925 break;
926 case DR7_TYPE_DATA_RW:
927 type = BP_CPU | BP_MEM_ACCESS;
928 break;
931 if (type != 0) {
932 err = cpu_watchpoint_insert(cs, env->dr[index],
933 hw_breakpoint_len(env->dr[7], index),
934 type, &env->cpu_watchpoint[index]);
937 if (err) {
938 env->cpu_breakpoint[index] = NULL;
942 void hw_breakpoint_remove(CPUX86State *env, int index)
944 CPUState *cs;
946 if (!env->cpu_breakpoint[index]) {
947 return;
949 cs = CPU(x86_env_get_cpu(env));
950 switch (hw_breakpoint_type(env->dr[7], index)) {
951 case DR7_TYPE_BP_INST:
952 if (hw_breakpoint_enabled(env->dr[7], index)) {
953 cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
955 break;
956 case DR7_TYPE_DATA_WR:
957 case DR7_TYPE_DATA_RW:
958 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
959 break;
960 case DR7_TYPE_IO_RW:
961 /* No support for I/O watchpoints yet */
962 break;
966 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
968 target_ulong dr6;
969 int reg;
970 bool hit_enabled = false;
972 dr6 = env->dr[6] & ~0xf;
973 for (reg = 0; reg < DR7_MAX_BP; reg++) {
974 bool bp_match = false;
975 bool wp_match = false;
977 switch (hw_breakpoint_type(env->dr[7], reg)) {
978 case DR7_TYPE_BP_INST:
979 if (env->dr[reg] == env->eip) {
980 bp_match = true;
982 break;
983 case DR7_TYPE_DATA_WR:
984 case DR7_TYPE_DATA_RW:
985 if (env->cpu_watchpoint[reg] &&
986 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
987 wp_match = true;
989 break;
990 case DR7_TYPE_IO_RW:
991 break;
993 if (bp_match || wp_match) {
994 dr6 |= 1 << reg;
995 if (hw_breakpoint_enabled(env->dr[7], reg)) {
996 hit_enabled = true;
1001 if (hit_enabled || force_dr6_update) {
1002 env->dr[6] = dr6;
1005 return hit_enabled;
1008 void breakpoint_handler(CPUState *cs)
1010 X86CPU *cpu = X86_CPU(cs);
1011 CPUX86State *env = &cpu->env;
1012 CPUBreakpoint *bp;
1014 if (cs->watchpoint_hit) {
1015 if (cs->watchpoint_hit->flags & BP_CPU) {
1016 cs->watchpoint_hit = NULL;
1017 if (check_hw_breakpoints(env, false)) {
1018 raise_exception(env, EXCP01_DB);
1019 } else {
1020 cpu_resume_from_signal(cs, NULL);
1023 } else {
1024 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1025 if (bp->pc == env->eip) {
1026 if (bp->flags & BP_CPU) {
1027 check_hw_breakpoints(env, true);
1028 raise_exception(env, EXCP01_DB);
1030 break;
1036 typedef struct MCEInjectionParams {
1037 Monitor *mon;
1038 X86CPU *cpu;
1039 int bank;
1040 uint64_t status;
1041 uint64_t mcg_status;
1042 uint64_t addr;
1043 uint64_t misc;
1044 int flags;
1045 } MCEInjectionParams;
1047 static void do_inject_x86_mce(void *data)
1049 MCEInjectionParams *params = data;
1050 CPUX86State *cenv = &params->cpu->env;
1051 CPUState *cpu = CPU(params->cpu);
1052 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1054 cpu_synchronize_state(cpu);
1057 * If there is an MCE exception being processed, ignore this SRAO MCE
1058 * unless unconditional injection was requested.
1060 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1061 && !(params->status & MCI_STATUS_AR)
1062 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1063 return;
1066 if (params->status & MCI_STATUS_UC) {
1068 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1069 * reporting is disabled
1071 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1072 monitor_printf(params->mon,
1073 "CPU %d: Uncorrected error reporting disabled\n",
1074 cpu->cpu_index);
1075 return;
1079 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1080 * reporting is disabled for the bank
1082 if (banks[0] != ~(uint64_t)0) {
1083 monitor_printf(params->mon,
1084 "CPU %d: Uncorrected error reporting disabled for"
1085 " bank %d\n",
1086 cpu->cpu_index, params->bank);
1087 return;
1090 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1091 !(cenv->cr[4] & CR4_MCE_MASK)) {
1092 monitor_printf(params->mon,
1093 "CPU %d: Previous MCE still in progress, raising"
1094 " triple fault\n",
1095 cpu->cpu_index);
1096 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1097 qemu_system_reset_request();
1098 return;
1100 if (banks[1] & MCI_STATUS_VAL) {
1101 params->status |= MCI_STATUS_OVER;
1103 banks[2] = params->addr;
1104 banks[3] = params->misc;
1105 cenv->mcg_status = params->mcg_status;
1106 banks[1] = params->status;
1107 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1108 } else if (!(banks[1] & MCI_STATUS_VAL)
1109 || !(banks[1] & MCI_STATUS_UC)) {
1110 if (banks[1] & MCI_STATUS_VAL) {
1111 params->status |= MCI_STATUS_OVER;
1113 banks[2] = params->addr;
1114 banks[3] = params->misc;
1115 banks[1] = params->status;
1116 } else {
1117 banks[1] |= MCI_STATUS_OVER;
1121 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1122 uint64_t status, uint64_t mcg_status, uint64_t addr,
1123 uint64_t misc, int flags)
1125 CPUState *cs = CPU(cpu);
1126 CPUX86State *cenv = &cpu->env;
1127 MCEInjectionParams params = {
1128 .mon = mon,
1129 .cpu = cpu,
1130 .bank = bank,
1131 .status = status,
1132 .mcg_status = mcg_status,
1133 .addr = addr,
1134 .misc = misc,
1135 .flags = flags,
1137 unsigned bank_num = cenv->mcg_cap & 0xff;
1139 if (!cenv->mcg_cap) {
1140 monitor_printf(mon, "MCE injection not supported\n");
1141 return;
1143 if (bank >= bank_num) {
1144 monitor_printf(mon, "Invalid MCE bank number\n");
1145 return;
1147 if (!(status & MCI_STATUS_VAL)) {
1148 monitor_printf(mon, "Invalid MCE status code\n");
1149 return;
1151 if ((flags & MCE_INJECT_BROADCAST)
1152 && !cpu_x86_support_mca_broadcast(cenv)) {
1153 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1154 return;
1157 run_on_cpu(cs, do_inject_x86_mce, &params);
1158 if (flags & MCE_INJECT_BROADCAST) {
1159 CPUState *other_cs;
1161 params.bank = 1;
1162 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1163 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1164 params.addr = 0;
1165 params.misc = 0;
1166 CPU_FOREACH(other_cs) {
1167 if (other_cs == cs) {
1168 continue;
1170 params.cpu = X86_CPU(other_cs);
1171 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1176 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1178 X86CPU *cpu = x86_env_get_cpu(env);
1179 CPUState *cs = CPU(cpu);
1181 if (kvm_enabled()) {
1182 env->tpr_access_type = access;
1184 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1185 } else {
1186 cpu_restore_state(cs, cs->mem_io_pc);
1188 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1191 #endif /* !CONFIG_USER_ONLY */
1193 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1194 target_ulong *base, unsigned int *limit,
1195 unsigned int *flags)
1197 X86CPU *cpu = x86_env_get_cpu(env);
1198 CPUState *cs = CPU(cpu);
1199 SegmentCache *dt;
1200 target_ulong ptr;
1201 uint32_t e1, e2;
1202 int index;
1204 if (selector & 0x4)
1205 dt = &env->ldt;
1206 else
1207 dt = &env->gdt;
1208 index = selector & ~7;
1209 ptr = dt->base + index;
1210 if ((index + 7) > dt->limit
1211 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1212 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1213 return 0;
1215 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1216 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1217 if (e2 & DESC_G_MASK)
1218 *limit = (*limit << 12) | 0xfff;
1219 *flags = e2;
1221 return 1;
1224 #if !defined(CONFIG_USER_ONLY)
1225 void do_cpu_init(X86CPU *cpu)
1227 CPUState *cs = CPU(cpu);
1228 CPUX86State *env = &cpu->env;
1229 CPUX86State *save = g_new(CPUX86State, 1);
1230 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1232 *save = *env;
1234 cpu_reset(cs);
1235 cs->interrupt_request = sipi;
1236 memcpy(&env->start_init_save, &save->start_init_save,
1237 offsetof(CPUX86State, end_init_save) -
1238 offsetof(CPUX86State, start_init_save));
1239 g_free(save);
1241 if (kvm_enabled()) {
1242 kvm_arch_do_init_vcpu(cpu);
1244 apic_init_reset(cpu->apic_state);
1247 void do_cpu_sipi(X86CPU *cpu)
1249 apic_sipi(cpu->apic_state);
1251 #else
1252 void do_cpu_init(X86CPU *cpu)
1255 void do_cpu_sipi(X86CPU *cpu)
1258 #endif
1260 /* Frob eflags into and out of the CPU temporary format. */
1262 void x86_cpu_exec_enter(CPUState *cs)
1264 X86CPU *cpu = X86_CPU(cs);
1265 CPUX86State *env = &cpu->env;
1267 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1268 env->df = 1 - (2 * ((env->eflags >> 10) & 1));
1269 CC_OP = CC_OP_EFLAGS;
1270 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1273 void x86_cpu_exec_exit(CPUState *cs)
1275 X86CPU *cpu = X86_CPU(cs);
1276 CPUX86State *env = &cpu->env;
1278 env->eflags = cpu_compute_eflags(env);
1281 #ifndef CONFIG_USER_ONLY
1282 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr)
1284 X86CPU *cpu = X86_CPU(cs);
1285 CPUX86State *env = &cpu->env;
1287 return address_space_ldub(cs->as, addr,
1288 cpu_get_mem_attrs(env),
1289 NULL);
1292 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr)
1294 X86CPU *cpu = X86_CPU(cs);
1295 CPUX86State *env = &cpu->env;
1297 return address_space_lduw(cs->as, addr,
1298 cpu_get_mem_attrs(env),
1299 NULL);
1302 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr)
1304 X86CPU *cpu = X86_CPU(cs);
1305 CPUX86State *env = &cpu->env;
1307 return address_space_ldl(cs->as, addr,
1308 cpu_get_mem_attrs(env),
1309 NULL);
1312 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr)
1314 X86CPU *cpu = X86_CPU(cs);
1315 CPUX86State *env = &cpu->env;
1317 return address_space_ldq(cs->as, addr,
1318 cpu_get_mem_attrs(env),
1319 NULL);
1322 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val)
1324 X86CPU *cpu = X86_CPU(cs);
1325 CPUX86State *env = &cpu->env;
1327 address_space_stb(cs->as, addr, val,
1328 cpu_get_mem_attrs(env),
1329 NULL);
1332 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val)
1334 X86CPU *cpu = X86_CPU(cs);
1335 CPUX86State *env = &cpu->env;
1337 address_space_stl_notdirty(cs->as, addr, val,
1338 cpu_get_mem_attrs(env),
1339 NULL);
1342 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val)
1344 X86CPU *cpu = X86_CPU(cs);
1345 CPUX86State *env = &cpu->env;
1347 address_space_stw(cs->as, addr, val,
1348 cpu_get_mem_attrs(env),
1349 NULL);
1352 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val)
1354 X86CPU *cpu = X86_CPU(cs);
1355 CPUX86State *env = &cpu->env;
1357 address_space_stl(cs->as, addr, val,
1358 cpu_get_mem_attrs(env),
1359 NULL);
1362 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val)
1364 X86CPU *cpu = X86_CPU(cs);
1365 CPUX86State *env = &cpu->env;
1367 address_space_stq(cs->as, addr, val,
1368 cpu_get_mem_attrs(env),
1369 NULL);
1371 #endif