kvm: bios: provide _MAT to acpi processor
[qemu-kvm/fedora.git] / target-i386 / helper2.c
blob3ada676439f043d57913ac79226234c597147183
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
32 #include "qemu-kvm.h"
34 //#define DEBUG_MMU
36 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
38 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
39 uint32_t *ext_features,
40 uint32_t *ext2_features,
41 uint32_t *ext3_features)
43 int i;
44 /* feature flags taken from "Intel Processor Identification and the CPUID
45 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
46 * about feature names, the Linux name is used. */
47 const char *feature_name[] = {
48 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
49 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
50 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
51 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
53 const char *ext_feature_name[] = {
54 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
55 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
56 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59 const char *ext2_feature_name[] = {
60 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
61 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
62 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
63 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
65 const char *ext3_feature_name[] = {
66 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
67 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
72 for ( i = 0 ; i < 32 ; i++ )
73 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
74 *features |= 1 << i;
75 return;
77 for ( i = 0 ; i < 32 ; i++ )
78 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
79 *ext_features |= 1 << i;
80 return;
82 for ( i = 0 ; i < 32 ; i++ )
83 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
84 *ext2_features |= 1 << i;
85 return;
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
89 *ext3_features |= 1 << i;
90 return;
92 fprintf(stderr, "CPU feature %s not found\n", flagname);
95 extern const char *cpu_vendor_string;
97 CPUX86State *cpu_x86_init(const char *cpu_model)
99 CPUX86State *env;
100 static int inited;
102 env = qemu_mallocz(sizeof(CPUX86State));
103 if (!env)
104 return NULL;
105 cpu_exec_init(env);
106 env->cpu_model_str = cpu_model;
108 /* init various static tables */
109 if (!inited) {
110 inited = 1;
111 optimize_flags_init();
113 if (cpu_x86_register(env, cpu_model) < 0) {
114 cpu_x86_close(env);
115 return NULL;
117 cpu_reset(env);
118 #ifdef USE_KQEMU
119 kqemu_init(env);
120 #endif
121 return env;
124 typedef struct x86_def_t {
125 const char *name;
126 uint32_t level;
127 uint32_t vendor1, vendor2, vendor3;
128 int family;
129 int model;
130 int stepping;
131 uint32_t features, ext_features, ext2_features, ext3_features;
132 uint32_t xlevel;
133 } x86_def_t;
135 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
136 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
137 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
138 CPUID_PAE | CPUID_SEP | CPUID_APIC)
139 static x86_def_t x86_defs[] = {
140 #ifdef TARGET_X86_64
142 .name = "qemu64",
143 .level = 2,
144 .vendor1 = 0x68747541, /* "Auth" */
145 .vendor2 = 0x69746e65, /* "enti" */
146 .vendor3 = 0x444d4163, /* "cAMD" */
147 .family = 6,
148 .model = 2,
149 .stepping = 3,
150 .features = PPRO_FEATURES |
151 /* these features are needed for Win64 and aren't fully implemented */
152 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
153 /* this feature is needed for Solaris and isn't fully implemented */
154 CPUID_PSE36,
155 .ext_features = CPUID_EXT_SSE3,
156 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
157 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
158 .ext3_features = CPUID_EXT3_SVM,
159 .xlevel = 0x8000000A,
161 #endif
163 .name = "qemu32",
164 .level = 2,
165 .family = 6,
166 .model = 3,
167 .stepping = 3,
168 .features = PPRO_FEATURES,
169 .ext_features = CPUID_EXT_SSE3,
170 .xlevel = 0,
173 .name = "486",
174 .level = 0,
175 .family = 4,
176 .model = 0,
177 .stepping = 0,
178 .features = 0x0000000B,
179 .xlevel = 0,
182 .name = "pentium",
183 .level = 1,
184 .family = 5,
185 .model = 4,
186 .stepping = 3,
187 .features = 0x008001BF,
188 .xlevel = 0,
191 .name = "pentium2",
192 .level = 2,
193 .family = 6,
194 .model = 5,
195 .stepping = 2,
196 .features = 0x0183F9FF,
197 .xlevel = 0,
200 .name = "pentium3",
201 .level = 2,
202 .family = 6,
203 .model = 7,
204 .stepping = 3,
205 .features = 0x0383F9FF,
206 .xlevel = 0,
210 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
212 unsigned int i;
213 x86_def_t *def;
215 char *s = strdup(cpu_model);
216 char *featurestr, *name = strtok(s, ",");
217 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
218 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
219 int family = -1, model = -1, stepping = -1;
221 def = NULL;
222 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
223 if (strcmp(name, x86_defs[i].name) == 0) {
224 def = &x86_defs[i];
225 break;
228 if (!def)
229 goto error;
230 memcpy(x86_cpu_def, def, sizeof(*def));
232 featurestr = strtok(NULL, ",");
234 while (featurestr) {
235 char *val;
236 if (featurestr[0] == '+') {
237 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
238 } else if (featurestr[0] == '-') {
239 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
240 } else if ((val = strchr(featurestr, '='))) {
241 *val = 0; val++;
242 if (!strcmp(featurestr, "family")) {
243 char *err;
244 family = strtol(val, &err, 10);
245 if (!*val || *err || family < 0) {
246 fprintf(stderr, "bad numerical value %s\n", val);
247 x86_cpu_def = 0;
248 goto error;
250 x86_cpu_def->family = family;
251 } else if (!strcmp(featurestr, "model")) {
252 char *err;
253 model = strtol(val, &err, 10);
254 if (!*val || *err || model < 0 || model > 0xf) {
255 fprintf(stderr, "bad numerical value %s\n", val);
256 x86_cpu_def = 0;
257 goto error;
259 x86_cpu_def->model = model;
260 } else if (!strcmp(featurestr, "stepping")) {
261 char *err;
262 stepping = strtol(val, &err, 10);
263 if (!*val || *err || stepping < 0 || stepping > 0xf) {
264 fprintf(stderr, "bad numerical value %s\n", val);
265 x86_cpu_def = 0;
266 goto error;
268 x86_cpu_def->stepping = stepping;
269 } else {
270 fprintf(stderr, "unrecognized feature %s\n", featurestr);
271 x86_cpu_def = 0;
272 goto error;
274 } else {
275 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
276 x86_cpu_def = 0;
277 goto error;
279 featurestr = strtok(NULL, ",");
281 x86_cpu_def->features |= plus_features;
282 x86_cpu_def->ext_features |= plus_ext_features;
283 x86_cpu_def->ext2_features |= plus_ext2_features;
284 x86_cpu_def->ext3_features |= plus_ext3_features;
285 x86_cpu_def->features &= ~minus_features;
286 x86_cpu_def->ext_features &= ~minus_ext_features;
287 x86_cpu_def->ext2_features &= ~minus_ext2_features;
288 x86_cpu_def->ext3_features &= ~minus_ext3_features;
289 free(s);
290 return 0;
292 error:
293 free(s);
294 return -1;
297 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
299 unsigned int i;
301 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
302 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
305 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
307 x86_def_t def1, *def = &def1;
309 if (cpu_x86_find_by_name(def, cpu_model) < 0)
310 return -1;
311 if (def->vendor1) {
312 env->cpuid_vendor1 = def->vendor1;
313 env->cpuid_vendor2 = def->vendor2;
314 env->cpuid_vendor3 = def->vendor3;
315 } else {
316 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
317 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
318 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
320 env->cpuid_level = def->level;
321 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
322 env->cpuid_features = def->features;
323 env->pat = 0x0007040600070406ULL;
324 env->cpuid_ext_features = def->ext_features;
325 env->cpuid_ext2_features = def->ext2_features;
326 env->cpuid_xlevel = def->xlevel;
327 env->cpuid_ext3_features = def->ext3_features;
329 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
330 int c, len, i;
331 if (cpu_vendor_string != NULL)
332 model_id = cpu_vendor_string;
333 len = strlen(model_id);
334 for(i = 0; i < 48; i++) {
335 if (i >= len)
336 c = '\0';
337 else
338 c = model_id[i];
339 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
342 return 0;
345 /* NOTE: must be called outside the CPU execute loop */
346 void cpu_reset(CPUX86State *env)
348 int i;
350 memset(env, 0, offsetof(CPUX86State, breakpoints));
352 tlb_flush(env, 1);
354 env->old_exception = -1;
356 /* init to reset state */
358 #ifdef CONFIG_SOFTMMU
359 env->hflags |= HF_SOFTMMU_MASK;
360 #endif
361 env->hflags |= HF_GIF_MASK;
363 cpu_x86_update_cr0(env, 0x60000010);
364 env->a20_mask = 0xffffffff;
365 env->smbase = 0x30000;
367 env->idt.limit = 0xffff;
368 env->gdt.limit = 0xffff;
369 env->ldt.limit = 0xffff;
370 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
371 env->tr.limit = 0xffff;
372 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
374 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
375 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
376 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
377 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
378 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
379 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
381 env->eip = 0xfff0;
382 env->regs[R_EDX] = env->cpuid_version;
384 env->eflags = 0x2;
386 /* FPU init */
387 for(i = 0;i < 8; i++)
388 env->fptags[i] = 1;
389 env->fpuc = 0x37f;
391 env->mxcsr = 0x1f80;
394 void cpu_x86_close(CPUX86State *env)
396 free(env);
399 /***********************************************************/
400 /* x86 debug */
402 static const char *cc_op_str[] = {
403 "DYNAMIC",
404 "EFLAGS",
406 "MULB",
407 "MULW",
408 "MULL",
409 "MULQ",
411 "ADDB",
412 "ADDW",
413 "ADDL",
414 "ADDQ",
416 "ADCB",
417 "ADCW",
418 "ADCL",
419 "ADCQ",
421 "SUBB",
422 "SUBW",
423 "SUBL",
424 "SUBQ",
426 "SBBB",
427 "SBBW",
428 "SBBL",
429 "SBBQ",
431 "LOGICB",
432 "LOGICW",
433 "LOGICL",
434 "LOGICQ",
436 "INCB",
437 "INCW",
438 "INCL",
439 "INCQ",
441 "DECB",
442 "DECW",
443 "DECL",
444 "DECQ",
446 "SHLB",
447 "SHLW",
448 "SHLL",
449 "SHLQ",
451 "SARB",
452 "SARW",
453 "SARL",
454 "SARQ",
457 void cpu_dump_state(CPUState *env, FILE *f,
458 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
459 int flags)
461 int eflags, i, nb;
462 char cc_op_name[32];
463 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
465 eflags = env->eflags;
466 #ifdef TARGET_X86_64
467 if (env->hflags & HF_CS64_MASK) {
468 cpu_fprintf(f,
469 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
470 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
471 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
472 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
473 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
474 env->regs[R_EAX],
475 env->regs[R_EBX],
476 env->regs[R_ECX],
477 env->regs[R_EDX],
478 env->regs[R_ESI],
479 env->regs[R_EDI],
480 env->regs[R_EBP],
481 env->regs[R_ESP],
482 env->regs[8],
483 env->regs[9],
484 env->regs[10],
485 env->regs[11],
486 env->regs[12],
487 env->regs[13],
488 env->regs[14],
489 env->regs[15],
490 env->eip, eflags,
491 eflags & DF_MASK ? 'D' : '-',
492 eflags & CC_O ? 'O' : '-',
493 eflags & CC_S ? 'S' : '-',
494 eflags & CC_Z ? 'Z' : '-',
495 eflags & CC_A ? 'A' : '-',
496 eflags & CC_P ? 'P' : '-',
497 eflags & CC_C ? 'C' : '-',
498 env->hflags & HF_CPL_MASK,
499 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
500 (env->a20_mask >> 20) & 1,
501 (env->hflags >> HF_SMM_SHIFT) & 1,
502 (env->hflags >> HF_HALTED_SHIFT) & 1);
503 } else
504 #endif
506 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
507 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
508 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
509 (uint32_t)env->regs[R_EAX],
510 (uint32_t)env->regs[R_EBX],
511 (uint32_t)env->regs[R_ECX],
512 (uint32_t)env->regs[R_EDX],
513 (uint32_t)env->regs[R_ESI],
514 (uint32_t)env->regs[R_EDI],
515 (uint32_t)env->regs[R_EBP],
516 (uint32_t)env->regs[R_ESP],
517 (uint32_t)env->eip, eflags,
518 eflags & DF_MASK ? 'D' : '-',
519 eflags & CC_O ? 'O' : '-',
520 eflags & CC_S ? 'S' : '-',
521 eflags & CC_Z ? 'Z' : '-',
522 eflags & CC_A ? 'A' : '-',
523 eflags & CC_P ? 'P' : '-',
524 eflags & CC_C ? 'C' : '-',
525 env->hflags & HF_CPL_MASK,
526 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
527 (env->a20_mask >> 20) & 1,
528 (env->hflags >> HF_SMM_SHIFT) & 1,
529 (env->hflags >> HF_HALTED_SHIFT) & 1);
532 #ifdef TARGET_X86_64
533 if (env->hflags & HF_LMA_MASK) {
534 for(i = 0; i < 6; i++) {
535 SegmentCache *sc = &env->segs[i];
536 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
537 seg_name[i],
538 sc->selector,
539 sc->base,
540 sc->limit,
541 sc->flags);
543 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
544 env->ldt.selector,
545 env->ldt.base,
546 env->ldt.limit,
547 env->ldt.flags);
548 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
549 env->tr.selector,
550 env->tr.base,
551 env->tr.limit,
552 env->tr.flags);
553 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
554 env->gdt.base, env->gdt.limit);
555 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
556 env->idt.base, env->idt.limit);
557 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
558 (uint32_t)env->cr[0],
559 env->cr[2],
560 env->cr[3],
561 (uint32_t)env->cr[4]);
562 } else
563 #endif
565 for(i = 0; i < 6; i++) {
566 SegmentCache *sc = &env->segs[i];
567 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
568 seg_name[i],
569 sc->selector,
570 (uint32_t)sc->base,
571 sc->limit,
572 sc->flags);
574 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
575 env->ldt.selector,
576 (uint32_t)env->ldt.base,
577 env->ldt.limit,
578 env->ldt.flags);
579 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
580 env->tr.selector,
581 (uint32_t)env->tr.base,
582 env->tr.limit,
583 env->tr.flags);
584 cpu_fprintf(f, "GDT= %08x %08x\n",
585 (uint32_t)env->gdt.base, env->gdt.limit);
586 cpu_fprintf(f, "IDT= %08x %08x\n",
587 (uint32_t)env->idt.base, env->idt.limit);
588 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
589 (uint32_t)env->cr[0],
590 (uint32_t)env->cr[2],
591 (uint32_t)env->cr[3],
592 (uint32_t)env->cr[4]);
594 if (flags & X86_DUMP_CCOP) {
595 if ((unsigned)env->cc_op < CC_OP_NB)
596 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
597 else
598 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
599 #ifdef TARGET_X86_64
600 if (env->hflags & HF_CS64_MASK) {
601 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
602 env->cc_src, env->cc_dst,
603 cc_op_name);
604 } else
605 #endif
607 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
608 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
609 cc_op_name);
612 if (flags & X86_DUMP_FPU) {
613 int fptag;
614 fptag = 0;
615 for(i = 0; i < 8; i++) {
616 fptag |= ((!env->fptags[i]) << i);
618 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
619 env->fpuc,
620 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
621 env->fpstt,
622 fptag,
623 env->mxcsr);
624 for(i=0;i<8;i++) {
625 #if defined(USE_X86LDOUBLE)
626 union {
627 long double d;
628 struct {
629 uint64_t lower;
630 uint16_t upper;
631 } l;
632 } tmp;
633 tmp.d = env->fpregs[i].d;
634 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
635 i, tmp.l.lower, tmp.l.upper);
636 #else
637 cpu_fprintf(f, "FPR%d=%016" PRIx64,
638 i, env->fpregs[i].mmx.q);
639 #endif
640 if ((i & 1) == 1)
641 cpu_fprintf(f, "\n");
642 else
643 cpu_fprintf(f, " ");
645 if (env->hflags & HF_CS64_MASK)
646 nb = 16;
647 else
648 nb = 8;
649 for(i=0;i<nb;i++) {
650 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
652 env->xmm_regs[i].XMM_L(3),
653 env->xmm_regs[i].XMM_L(2),
654 env->xmm_regs[i].XMM_L(1),
655 env->xmm_regs[i].XMM_L(0));
656 if ((i & 1) == 1)
657 cpu_fprintf(f, "\n");
658 else
659 cpu_fprintf(f, " ");
664 /***********************************************************/
665 /* x86 mmu */
666 /* XXX: add PGE support */
668 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
670 a20_state = (a20_state != 0);
671 if (a20_state != ((env->a20_mask >> 20) & 1)) {
672 #if defined(DEBUG_MMU)
673 printf("A20 update: a20=%d\n", a20_state);
674 #endif
675 /* if the cpu is currently executing code, we must unlink it and
676 all the potentially executing TB */
677 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
679 /* when a20 is changed, all the MMU mappings are invalid, so
680 we must flush everything */
681 tlb_flush(env, 1);
682 env->a20_mask = 0xffefffff | (a20_state << 20);
686 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
688 int pe_state;
690 #if defined(DEBUG_MMU)
691 printf("CR0 update: CR0=0x%08x\n", new_cr0);
692 #endif
693 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
694 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
695 tlb_flush(env, 1);
698 #ifdef TARGET_X86_64
699 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
700 (env->efer & MSR_EFER_LME)) {
701 /* enter in long mode */
702 /* XXX: generate an exception */
703 if (!(env->cr[4] & CR4_PAE_MASK))
704 return;
705 env->efer |= MSR_EFER_LMA;
706 env->hflags |= HF_LMA_MASK;
707 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
708 (env->efer & MSR_EFER_LMA)) {
709 /* exit long mode */
710 env->efer &= ~MSR_EFER_LMA;
711 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
712 env->eip &= 0xffffffff;
714 #endif
715 env->cr[0] = new_cr0 | CR0_ET_MASK;
717 /* update PE flag in hidden flags */
718 pe_state = (env->cr[0] & CR0_PE_MASK);
719 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
720 /* ensure that ADDSEG is always set in real mode */
721 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
722 /* update FPU flags */
723 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
724 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
727 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
728 the PDPT */
729 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
731 env->cr[3] = new_cr3;
732 if (env->cr[0] & CR0_PG_MASK) {
733 #if defined(DEBUG_MMU)
734 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
735 #endif
736 tlb_flush(env, 0);
740 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
742 #if defined(DEBUG_MMU)
743 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
744 #endif
745 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
746 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
747 tlb_flush(env, 1);
749 /* SSE handling */
750 if (!(env->cpuid_features & CPUID_SSE))
751 new_cr4 &= ~CR4_OSFXSR_MASK;
752 if (new_cr4 & CR4_OSFXSR_MASK)
753 env->hflags |= HF_OSFXSR_MASK;
754 else
755 env->hflags &= ~HF_OSFXSR_MASK;
757 env->cr[4] = new_cr4;
760 /* XXX: also flush 4MB pages */
761 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
763 tlb_flush_page(env, addr);
766 #if defined(CONFIG_USER_ONLY)
768 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
769 int is_write, int mmu_idx, int is_softmmu)
771 /* user mode only emulation */
772 is_write &= 1;
773 env->cr[2] = addr;
774 env->error_code = (is_write << PG_ERROR_W_BIT);
775 env->error_code |= PG_ERROR_U_MASK;
776 env->exception_index = EXCP0E_PAGE;
777 return 1;
780 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
782 return addr;
785 #else
787 #define PHYS_ADDR_MASK 0xfffff000
789 /* return value:
790 -1 = cannot handle fault
791 0 = nothing more to do
792 1 = generate PF fault
793 2 = soft MMU activation required for this block
795 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
796 int is_write1, int mmu_idx, int is_softmmu)
798 uint64_t ptep, pte;
799 uint32_t pdpe_addr, pde_addr, pte_addr;
800 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
801 unsigned long paddr, page_offset;
802 target_ulong vaddr, virt_addr;
804 is_user = mmu_idx == MMU_USER_IDX;
805 #if defined(DEBUG_MMU)
806 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
807 addr, is_write1, is_user, env->eip);
808 #endif
809 is_write = is_write1 & 1;
811 if (!(env->cr[0] & CR0_PG_MASK)) {
812 pte = addr;
813 virt_addr = addr & TARGET_PAGE_MASK;
814 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
815 page_size = 4096;
816 goto do_mapping;
819 if (env->cr[4] & CR4_PAE_MASK) {
820 uint64_t pde, pdpe;
822 /* XXX: we only use 32 bit physical addresses */
823 #ifdef TARGET_X86_64
824 if (env->hflags & HF_LMA_MASK) {
825 uint32_t pml4e_addr;
826 uint64_t pml4e;
827 int32_t sext;
829 /* test virtual address sign extension */
830 sext = (int64_t)addr >> 47;
831 if (sext != 0 && sext != -1) {
832 env->error_code = 0;
833 env->exception_index = EXCP0D_GPF;
834 return 1;
837 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
838 env->a20_mask;
839 pml4e = ldq_phys(pml4e_addr);
840 if (!(pml4e & PG_PRESENT_MASK)) {
841 error_code = 0;
842 goto do_fault;
844 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
845 error_code = PG_ERROR_RSVD_MASK;
846 goto do_fault;
848 if (!(pml4e & PG_ACCESSED_MASK)) {
849 pml4e |= PG_ACCESSED_MASK;
850 stl_phys_notdirty(pml4e_addr, pml4e);
852 ptep = pml4e ^ PG_NX_MASK;
853 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
854 env->a20_mask;
855 pdpe = ldq_phys(pdpe_addr);
856 if (!(pdpe & PG_PRESENT_MASK)) {
857 error_code = 0;
858 goto do_fault;
860 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
861 error_code = PG_ERROR_RSVD_MASK;
862 goto do_fault;
864 ptep &= pdpe ^ PG_NX_MASK;
865 if (!(pdpe & PG_ACCESSED_MASK)) {
866 pdpe |= PG_ACCESSED_MASK;
867 stl_phys_notdirty(pdpe_addr, pdpe);
869 } else
870 #endif
872 /* XXX: load them when cr3 is loaded ? */
873 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
874 env->a20_mask;
875 pdpe = ldq_phys(pdpe_addr);
876 if (!(pdpe & PG_PRESENT_MASK)) {
877 error_code = 0;
878 goto do_fault;
880 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
883 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
884 env->a20_mask;
885 pde = ldq_phys(pde_addr);
886 if (!(pde & PG_PRESENT_MASK)) {
887 error_code = 0;
888 goto do_fault;
890 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
891 error_code = PG_ERROR_RSVD_MASK;
892 goto do_fault;
894 ptep &= pde ^ PG_NX_MASK;
895 if (pde & PG_PSE_MASK) {
896 /* 2 MB page */
897 page_size = 2048 * 1024;
898 ptep ^= PG_NX_MASK;
899 if ((ptep & PG_NX_MASK) && is_write1 == 2)
900 goto do_fault_protect;
901 if (is_user) {
902 if (!(ptep & PG_USER_MASK))
903 goto do_fault_protect;
904 if (is_write && !(ptep & PG_RW_MASK))
905 goto do_fault_protect;
906 } else {
907 if ((env->cr[0] & CR0_WP_MASK) &&
908 is_write && !(ptep & PG_RW_MASK))
909 goto do_fault_protect;
911 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
912 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
913 pde |= PG_ACCESSED_MASK;
914 if (is_dirty)
915 pde |= PG_DIRTY_MASK;
916 stl_phys_notdirty(pde_addr, pde);
918 /* align to page_size */
919 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
920 virt_addr = addr & ~(page_size - 1);
921 } else {
922 /* 4 KB page */
923 if (!(pde & PG_ACCESSED_MASK)) {
924 pde |= PG_ACCESSED_MASK;
925 stl_phys_notdirty(pde_addr, pde);
927 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
928 env->a20_mask;
929 pte = ldq_phys(pte_addr);
930 if (!(pte & PG_PRESENT_MASK)) {
931 error_code = 0;
932 goto do_fault;
934 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
935 error_code = PG_ERROR_RSVD_MASK;
936 goto do_fault;
938 /* combine pde and pte nx, user and rw protections */
939 ptep &= pte ^ PG_NX_MASK;
940 ptep ^= PG_NX_MASK;
941 if ((ptep & PG_NX_MASK) && is_write1 == 2)
942 goto do_fault_protect;
943 if (is_user) {
944 if (!(ptep & PG_USER_MASK))
945 goto do_fault_protect;
946 if (is_write && !(ptep & PG_RW_MASK))
947 goto do_fault_protect;
948 } else {
949 if ((env->cr[0] & CR0_WP_MASK) &&
950 is_write && !(ptep & PG_RW_MASK))
951 goto do_fault_protect;
953 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
954 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
955 pte |= PG_ACCESSED_MASK;
956 if (is_dirty)
957 pte |= PG_DIRTY_MASK;
958 stl_phys_notdirty(pte_addr, pte);
960 page_size = 4096;
961 virt_addr = addr & ~0xfff;
962 pte = pte & (PHYS_ADDR_MASK | 0xfff);
964 } else {
965 uint32_t pde;
967 /* page directory entry */
968 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
969 env->a20_mask;
970 pde = ldl_phys(pde_addr);
971 if (!(pde & PG_PRESENT_MASK)) {
972 error_code = 0;
973 goto do_fault;
975 /* if PSE bit is set, then we use a 4MB page */
976 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
977 page_size = 4096 * 1024;
978 if (is_user) {
979 if (!(pde & PG_USER_MASK))
980 goto do_fault_protect;
981 if (is_write && !(pde & PG_RW_MASK))
982 goto do_fault_protect;
983 } else {
984 if ((env->cr[0] & CR0_WP_MASK) &&
985 is_write && !(pde & PG_RW_MASK))
986 goto do_fault_protect;
988 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
989 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
990 pde |= PG_ACCESSED_MASK;
991 if (is_dirty)
992 pde |= PG_DIRTY_MASK;
993 stl_phys_notdirty(pde_addr, pde);
996 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
997 ptep = pte;
998 virt_addr = addr & ~(page_size - 1);
999 } else {
1000 if (!(pde & PG_ACCESSED_MASK)) {
1001 pde |= PG_ACCESSED_MASK;
1002 stl_phys_notdirty(pde_addr, pde);
1005 /* page directory entry */
1006 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1007 env->a20_mask;
1008 pte = ldl_phys(pte_addr);
1009 if (!(pte & PG_PRESENT_MASK)) {
1010 error_code = 0;
1011 goto do_fault;
1013 /* combine pde and pte user and rw protections */
1014 ptep = pte & pde;
1015 if (is_user) {
1016 if (!(ptep & PG_USER_MASK))
1017 goto do_fault_protect;
1018 if (is_write && !(ptep & PG_RW_MASK))
1019 goto do_fault_protect;
1020 } else {
1021 if ((env->cr[0] & CR0_WP_MASK) &&
1022 is_write && !(ptep & PG_RW_MASK))
1023 goto do_fault_protect;
1025 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1026 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1027 pte |= PG_ACCESSED_MASK;
1028 if (is_dirty)
1029 pte |= PG_DIRTY_MASK;
1030 stl_phys_notdirty(pte_addr, pte);
1032 page_size = 4096;
1033 virt_addr = addr & ~0xfff;
1036 /* the page can be put in the TLB */
1037 prot = PAGE_READ;
1038 if (!(ptep & PG_NX_MASK))
1039 prot |= PAGE_EXEC;
1040 if (pte & PG_DIRTY_MASK) {
1041 /* only set write access if already dirty... otherwise wait
1042 for dirty access */
1043 if (is_user) {
1044 if (ptep & PG_RW_MASK)
1045 prot |= PAGE_WRITE;
1046 } else {
1047 if (!(env->cr[0] & CR0_WP_MASK) ||
1048 (ptep & PG_RW_MASK))
1049 prot |= PAGE_WRITE;
1052 do_mapping:
1053 pte = pte & env->a20_mask;
1055 /* Even if 4MB pages, we map only one 4KB page in the cache to
1056 avoid filling it too fast */
1057 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1058 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1059 vaddr = virt_addr + page_offset;
1061 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1062 return ret;
1063 do_fault_protect:
1064 error_code = PG_ERROR_P_MASK;
1065 do_fault:
1066 error_code |= (is_write << PG_ERROR_W_BIT);
1067 if (is_user)
1068 error_code |= PG_ERROR_U_MASK;
1069 if (is_write1 == 2 &&
1070 (env->efer & MSR_EFER_NXE) &&
1071 (env->cr[4] & CR4_PAE_MASK))
1072 error_code |= PG_ERROR_I_D_MASK;
1073 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
1074 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
1075 } else {
1076 env->cr[2] = addr;
1078 env->error_code = error_code;
1079 env->exception_index = EXCP0E_PAGE;
1080 /* the VMM will handle this */
1081 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
1082 return 2;
1083 return 1;
1086 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1088 uint32_t pde_addr, pte_addr;
1089 uint32_t pde, pte, paddr, page_offset, page_size;
1091 if (env->cr[4] & CR4_PAE_MASK) {
1092 uint32_t pdpe_addr, pde_addr, pte_addr;
1093 uint32_t pdpe;
1095 /* XXX: we only use 32 bit physical addresses */
1096 #ifdef TARGET_X86_64
1097 if (env->hflags & HF_LMA_MASK) {
1098 uint32_t pml4e_addr, pml4e;
1099 int32_t sext;
1101 /* test virtual address sign extension */
1102 sext = (int64_t)addr >> 47;
1103 if (sext != 0 && sext != -1)
1104 return -1;
1106 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1107 env->a20_mask;
1108 pml4e = ldl_phys(pml4e_addr);
1109 if (!(pml4e & PG_PRESENT_MASK))
1110 return -1;
1112 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1113 env->a20_mask;
1114 pdpe = ldl_phys(pdpe_addr);
1115 if (!(pdpe & PG_PRESENT_MASK))
1116 return -1;
1117 } else
1118 #endif
1120 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1121 env->a20_mask;
1122 pdpe = ldl_phys(pdpe_addr);
1123 if (!(pdpe & PG_PRESENT_MASK))
1124 return -1;
1127 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1128 env->a20_mask;
1129 pde = ldl_phys(pde_addr);
1130 if (!(pde & PG_PRESENT_MASK)) {
1131 return -1;
1133 if (pde & PG_PSE_MASK) {
1134 /* 2 MB page */
1135 page_size = 2048 * 1024;
1136 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1137 } else {
1138 /* 4 KB page */
1139 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1140 env->a20_mask;
1141 page_size = 4096;
1142 pte = ldl_phys(pte_addr);
1144 } else {
1145 if (!(env->cr[0] & CR0_PG_MASK)) {
1146 pte = addr;
1147 page_size = 4096;
1148 } else {
1149 /* page directory entry */
1150 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1151 pde = ldl_phys(pde_addr);
1152 if (!(pde & PG_PRESENT_MASK))
1153 return -1;
1154 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1155 pte = pde & ~0x003ff000; /* align to 4MB */
1156 page_size = 4096 * 1024;
1157 } else {
1158 /* page directory entry */
1159 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1160 pte = ldl_phys(pte_addr);
1161 if (!(pte & PG_PRESENT_MASK))
1162 return -1;
1163 page_size = 4096;
1166 pte = pte & env->a20_mask;
1169 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1170 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1171 return paddr;
1173 #endif /* !CONFIG_USER_ONLY */