More TCG updates for CRIS
[qemu/qemu-JZ.git] / target-i386 / helper2.c
blob6cf218fa0dd9a21c08ce6055cd0086709385beb3
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
33 //#define DEBUG_MMU
35 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
37 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
38 uint32_t *ext_features,
39 uint32_t *ext2_features,
40 uint32_t *ext3_features)
42 int i;
43 /* feature flags taken from "Intel Processor Identification and the CPUID
44 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
45 * about feature names, the Linux name is used. */
46 const char *feature_name[] = {
47 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
48 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
50 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
52 const char *ext_feature_name[] = {
53 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
54 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
55 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
56 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 const char *ext2_feature_name[] = {
59 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
61 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
62 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
64 const char *ext3_feature_name[] = {
65 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
66 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 return;
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 return;
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 return;
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 return;
91 fprintf(stderr, "CPU feature %s not found\n", flagname);
94 CPUX86State *cpu_x86_init(const char *cpu_model)
96 CPUX86State *env;
97 static int inited;
99 env = qemu_mallocz(sizeof(CPUX86State));
100 if (!env)
101 return NULL;
102 cpu_exec_init(env);
103 env->cpu_model_str = cpu_model;
105 /* init various static tables */
106 if (!inited) {
107 inited = 1;
108 optimize_flags_init();
110 if (cpu_x86_register(env, cpu_model) < 0) {
111 cpu_x86_close(env);
112 return NULL;
114 cpu_reset(env);
115 #ifdef USE_KQEMU
116 kqemu_init(env);
117 #endif
118 return env;
121 typedef struct x86_def_t {
122 const char *name;
123 uint32_t level;
124 uint32_t vendor1, vendor2, vendor3;
125 int family;
126 int model;
127 int stepping;
128 uint32_t features, ext_features, ext2_features, ext3_features;
129 uint32_t xlevel;
130 } x86_def_t;
132 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
133 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
134 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
135 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
136 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
137 CPUID_PSE36 | CPUID_FXSR)
138 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
139 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
140 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
141 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
142 CPUID_PAE | CPUID_SEP | CPUID_APIC)
143 static x86_def_t x86_defs[] = {
144 #ifdef TARGET_X86_64
146 .name = "qemu64",
147 .level = 2,
148 .vendor1 = 0x68747541, /* "Auth" */
149 .vendor2 = 0x69746e65, /* "enti" */
150 .vendor3 = 0x444d4163, /* "cAMD" */
151 .family = 6,
152 .model = 2,
153 .stepping = 3,
154 .features = PPRO_FEATURES |
155 /* these features are needed for Win64 and aren't fully implemented */
156 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
157 /* this feature is needed for Solaris and isn't fully implemented */
158 CPUID_PSE36,
159 .ext_features = CPUID_EXT_SSE3,
160 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
161 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
162 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
163 .ext3_features = CPUID_EXT3_SVM,
164 .xlevel = 0x8000000A,
166 #endif
168 .name = "qemu32",
169 .level = 2,
170 .family = 6,
171 .model = 3,
172 .stepping = 3,
173 .features = PPRO_FEATURES,
174 .ext_features = CPUID_EXT_SSE3,
175 .xlevel = 0,
178 .name = "486",
179 .level = 0,
180 .family = 4,
181 .model = 0,
182 .stepping = 0,
183 .features = I486_FEATURES,
184 .xlevel = 0,
187 .name = "pentium",
188 .level = 1,
189 .family = 5,
190 .model = 4,
191 .stepping = 3,
192 .features = PENTIUM_FEATURES,
193 .xlevel = 0,
196 .name = "pentium2",
197 .level = 2,
198 .family = 6,
199 .model = 5,
200 .stepping = 2,
201 .features = PENTIUM2_FEATURES,
202 .xlevel = 0,
205 .name = "pentium3",
206 .level = 2,
207 .family = 6,
208 .model = 7,
209 .stepping = 3,
210 .features = PENTIUM3_FEATURES,
211 .xlevel = 0,
214 .name = "athlon",
215 .level = 2,
216 .vendor1 = 0x68747541, /* "Auth" */
217 .vendor2 = 0x69746e65, /* "enti" */
218 .vendor3 = 0x444d4163, /* "cAMD" */
219 .family = 6,
220 .model = 2,
221 .stepping = 3,
222 .features = PPRO_FEATURES | PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
223 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
224 .xlevel = 0x80000008,
228 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
230 unsigned int i;
231 x86_def_t *def;
233 char *s = strdup(cpu_model);
234 char *featurestr, *name = strtok(s, ",");
235 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
236 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
237 int family = -1, model = -1, stepping = -1;
239 def = NULL;
240 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
241 if (strcmp(name, x86_defs[i].name) == 0) {
242 def = &x86_defs[i];
243 break;
246 if (!def)
247 goto error;
248 memcpy(x86_cpu_def, def, sizeof(*def));
250 featurestr = strtok(NULL, ",");
252 while (featurestr) {
253 char *val;
254 if (featurestr[0] == '+') {
255 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
256 } else if (featurestr[0] == '-') {
257 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
258 } else if ((val = strchr(featurestr, '='))) {
259 *val = 0; val++;
260 if (!strcmp(featurestr, "family")) {
261 char *err;
262 family = strtol(val, &err, 10);
263 if (!*val || *err || family < 0) {
264 fprintf(stderr, "bad numerical value %s\n", val);
265 x86_cpu_def = 0;
266 goto error;
268 x86_cpu_def->family = family;
269 } else if (!strcmp(featurestr, "model")) {
270 char *err;
271 model = strtol(val, &err, 10);
272 if (!*val || *err || model < 0 || model > 0xf) {
273 fprintf(stderr, "bad numerical value %s\n", val);
274 x86_cpu_def = 0;
275 goto error;
277 x86_cpu_def->model = model;
278 } else if (!strcmp(featurestr, "stepping")) {
279 char *err;
280 stepping = strtol(val, &err, 10);
281 if (!*val || *err || stepping < 0 || stepping > 0xf) {
282 fprintf(stderr, "bad numerical value %s\n", val);
283 x86_cpu_def = 0;
284 goto error;
286 x86_cpu_def->stepping = stepping;
287 } else {
288 fprintf(stderr, "unrecognized feature %s\n", featurestr);
289 x86_cpu_def = 0;
290 goto error;
292 } else {
293 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
294 x86_cpu_def = 0;
295 goto error;
297 featurestr = strtok(NULL, ",");
299 x86_cpu_def->features |= plus_features;
300 x86_cpu_def->ext_features |= plus_ext_features;
301 x86_cpu_def->ext2_features |= plus_ext2_features;
302 x86_cpu_def->ext3_features |= plus_ext3_features;
303 x86_cpu_def->features &= ~minus_features;
304 x86_cpu_def->ext_features &= ~minus_ext_features;
305 x86_cpu_def->ext2_features &= ~minus_ext2_features;
306 x86_cpu_def->ext3_features &= ~minus_ext3_features;
307 free(s);
308 return 0;
310 error:
311 free(s);
312 return -1;
315 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
317 unsigned int i;
319 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
320 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
323 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
325 x86_def_t def1, *def = &def1;
327 if (cpu_x86_find_by_name(def, cpu_model) < 0)
328 return -1;
329 if (def->vendor1) {
330 env->cpuid_vendor1 = def->vendor1;
331 env->cpuid_vendor2 = def->vendor2;
332 env->cpuid_vendor3 = def->vendor3;
333 } else {
334 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
335 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
336 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
338 env->cpuid_level = def->level;
339 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
340 env->cpuid_features = def->features;
341 env->pat = 0x0007040600070406ULL;
342 env->cpuid_ext_features = def->ext_features;
343 env->cpuid_ext2_features = def->ext2_features;
344 env->cpuid_xlevel = def->xlevel;
345 env->cpuid_ext3_features = def->ext3_features;
347 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
348 int c, len, i;
349 len = strlen(model_id);
350 for(i = 0; i < 48; i++) {
351 if (i >= len)
352 c = '\0';
353 else
354 c = model_id[i];
355 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
358 return 0;
361 /* NOTE: must be called outside the CPU execute loop */
362 void cpu_reset(CPUX86State *env)
364 int i;
366 memset(env, 0, offsetof(CPUX86State, breakpoints));
368 tlb_flush(env, 1);
370 env->old_exception = -1;
372 /* init to reset state */
374 #ifdef CONFIG_SOFTMMU
375 env->hflags |= HF_SOFTMMU_MASK;
376 #endif
377 env->hflags |= HF_GIF_MASK;
379 cpu_x86_update_cr0(env, 0x60000010);
380 env->a20_mask = ~0x0;
381 env->smbase = 0x30000;
383 env->idt.limit = 0xffff;
384 env->gdt.limit = 0xffff;
385 env->ldt.limit = 0xffff;
386 env->ldt.flags = DESC_P_MASK;
387 env->tr.limit = 0xffff;
388 env->tr.flags = DESC_P_MASK;
390 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
391 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
392 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
393 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
394 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
395 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
397 env->eip = 0xfff0;
398 env->regs[R_EDX] = env->cpuid_version;
400 env->eflags = 0x2;
402 /* FPU init */
403 for(i = 0;i < 8; i++)
404 env->fptags[i] = 1;
405 env->fpuc = 0x37f;
407 env->mxcsr = 0x1f80;
410 void cpu_x86_close(CPUX86State *env)
412 free(env);
415 /***********************************************************/
416 /* x86 debug */
418 static const char *cc_op_str[] = {
419 "DYNAMIC",
420 "EFLAGS",
422 "MULB",
423 "MULW",
424 "MULL",
425 "MULQ",
427 "ADDB",
428 "ADDW",
429 "ADDL",
430 "ADDQ",
432 "ADCB",
433 "ADCW",
434 "ADCL",
435 "ADCQ",
437 "SUBB",
438 "SUBW",
439 "SUBL",
440 "SUBQ",
442 "SBBB",
443 "SBBW",
444 "SBBL",
445 "SBBQ",
447 "LOGICB",
448 "LOGICW",
449 "LOGICL",
450 "LOGICQ",
452 "INCB",
453 "INCW",
454 "INCL",
455 "INCQ",
457 "DECB",
458 "DECW",
459 "DECL",
460 "DECQ",
462 "SHLB",
463 "SHLW",
464 "SHLL",
465 "SHLQ",
467 "SARB",
468 "SARW",
469 "SARL",
470 "SARQ",
473 void cpu_dump_state(CPUState *env, FILE *f,
474 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
475 int flags)
477 int eflags, i, nb;
478 char cc_op_name[32];
479 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
481 eflags = env->eflags;
482 #ifdef TARGET_X86_64
483 if (env->hflags & HF_CS64_MASK) {
484 cpu_fprintf(f,
485 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
486 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
487 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
488 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
489 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
490 env->regs[R_EAX],
491 env->regs[R_EBX],
492 env->regs[R_ECX],
493 env->regs[R_EDX],
494 env->regs[R_ESI],
495 env->regs[R_EDI],
496 env->regs[R_EBP],
497 env->regs[R_ESP],
498 env->regs[8],
499 env->regs[9],
500 env->regs[10],
501 env->regs[11],
502 env->regs[12],
503 env->regs[13],
504 env->regs[14],
505 env->regs[15],
506 env->eip, eflags,
507 eflags & DF_MASK ? 'D' : '-',
508 eflags & CC_O ? 'O' : '-',
509 eflags & CC_S ? 'S' : '-',
510 eflags & CC_Z ? 'Z' : '-',
511 eflags & CC_A ? 'A' : '-',
512 eflags & CC_P ? 'P' : '-',
513 eflags & CC_C ? 'C' : '-',
514 env->hflags & HF_CPL_MASK,
515 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
516 (env->a20_mask >> 20) & 1,
517 (env->hflags >> HF_SMM_SHIFT) & 1,
518 (env->hflags >> HF_HALTED_SHIFT) & 1);
519 } else
520 #endif
522 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
523 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
524 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
525 (uint32_t)env->regs[R_EAX],
526 (uint32_t)env->regs[R_EBX],
527 (uint32_t)env->regs[R_ECX],
528 (uint32_t)env->regs[R_EDX],
529 (uint32_t)env->regs[R_ESI],
530 (uint32_t)env->regs[R_EDI],
531 (uint32_t)env->regs[R_EBP],
532 (uint32_t)env->regs[R_ESP],
533 (uint32_t)env->eip, eflags,
534 eflags & DF_MASK ? 'D' : '-',
535 eflags & CC_O ? 'O' : '-',
536 eflags & CC_S ? 'S' : '-',
537 eflags & CC_Z ? 'Z' : '-',
538 eflags & CC_A ? 'A' : '-',
539 eflags & CC_P ? 'P' : '-',
540 eflags & CC_C ? 'C' : '-',
541 env->hflags & HF_CPL_MASK,
542 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
543 (env->a20_mask >> 20) & 1,
544 (env->hflags >> HF_SMM_SHIFT) & 1,
545 (env->hflags >> HF_HALTED_SHIFT) & 1);
548 #ifdef TARGET_X86_64
549 if (env->hflags & HF_LMA_MASK) {
550 for(i = 0; i < 6; i++) {
551 SegmentCache *sc = &env->segs[i];
552 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
553 seg_name[i],
554 sc->selector,
555 sc->base,
556 sc->limit,
557 sc->flags);
559 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
560 env->ldt.selector,
561 env->ldt.base,
562 env->ldt.limit,
563 env->ldt.flags);
564 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
565 env->tr.selector,
566 env->tr.base,
567 env->tr.limit,
568 env->tr.flags);
569 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
570 env->gdt.base, env->gdt.limit);
571 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
572 env->idt.base, env->idt.limit);
573 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
574 (uint32_t)env->cr[0],
575 env->cr[2],
576 env->cr[3],
577 (uint32_t)env->cr[4]);
578 } else
579 #endif
581 for(i = 0; i < 6; i++) {
582 SegmentCache *sc = &env->segs[i];
583 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
584 seg_name[i],
585 sc->selector,
586 (uint32_t)sc->base,
587 sc->limit,
588 sc->flags);
590 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
591 env->ldt.selector,
592 (uint32_t)env->ldt.base,
593 env->ldt.limit,
594 env->ldt.flags);
595 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
596 env->tr.selector,
597 (uint32_t)env->tr.base,
598 env->tr.limit,
599 env->tr.flags);
600 cpu_fprintf(f, "GDT= %08x %08x\n",
601 (uint32_t)env->gdt.base, env->gdt.limit);
602 cpu_fprintf(f, "IDT= %08x %08x\n",
603 (uint32_t)env->idt.base, env->idt.limit);
604 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
605 (uint32_t)env->cr[0],
606 (uint32_t)env->cr[2],
607 (uint32_t)env->cr[3],
608 (uint32_t)env->cr[4]);
610 if (flags & X86_DUMP_CCOP) {
611 if ((unsigned)env->cc_op < CC_OP_NB)
612 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
613 else
614 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
615 #ifdef TARGET_X86_64
616 if (env->hflags & HF_CS64_MASK) {
617 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
618 env->cc_src, env->cc_dst,
619 cc_op_name);
620 } else
621 #endif
623 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
624 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
625 cc_op_name);
628 if (flags & X86_DUMP_FPU) {
629 int fptag;
630 fptag = 0;
631 for(i = 0; i < 8; i++) {
632 fptag |= ((!env->fptags[i]) << i);
634 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
635 env->fpuc,
636 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
637 env->fpstt,
638 fptag,
639 env->mxcsr);
640 for(i=0;i<8;i++) {
641 #if defined(USE_X86LDOUBLE)
642 union {
643 long double d;
644 struct {
645 uint64_t lower;
646 uint16_t upper;
647 } l;
648 } tmp;
649 tmp.d = env->fpregs[i].d;
650 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
651 i, tmp.l.lower, tmp.l.upper);
652 #else
653 cpu_fprintf(f, "FPR%d=%016" PRIx64,
654 i, env->fpregs[i].mmx.q);
655 #endif
656 if ((i & 1) == 1)
657 cpu_fprintf(f, "\n");
658 else
659 cpu_fprintf(f, " ");
661 if (env->hflags & HF_CS64_MASK)
662 nb = 16;
663 else
664 nb = 8;
665 for(i=0;i<nb;i++) {
666 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
668 env->xmm_regs[i].XMM_L(3),
669 env->xmm_regs[i].XMM_L(2),
670 env->xmm_regs[i].XMM_L(1),
671 env->xmm_regs[i].XMM_L(0));
672 if ((i & 1) == 1)
673 cpu_fprintf(f, "\n");
674 else
675 cpu_fprintf(f, " ");
680 /***********************************************************/
681 /* x86 mmu */
682 /* XXX: add PGE support */
684 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
686 a20_state = (a20_state != 0);
687 if (a20_state != ((env->a20_mask >> 20) & 1)) {
688 #if defined(DEBUG_MMU)
689 printf("A20 update: a20=%d\n", a20_state);
690 #endif
691 /* if the cpu is currently executing code, we must unlink it and
692 all the potentially executing TB */
693 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
695 /* when a20 is changed, all the MMU mappings are invalid, so
696 we must flush everything */
697 tlb_flush(env, 1);
698 env->a20_mask = (~0x100000) | (a20_state << 20);
702 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
704 int pe_state;
706 #if defined(DEBUG_MMU)
707 printf("CR0 update: CR0=0x%08x\n", new_cr0);
708 #endif
709 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
710 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
711 tlb_flush(env, 1);
714 #ifdef TARGET_X86_64
715 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
716 (env->efer & MSR_EFER_LME)) {
717 /* enter in long mode */
718 /* XXX: generate an exception */
719 if (!(env->cr[4] & CR4_PAE_MASK))
720 return;
721 env->efer |= MSR_EFER_LMA;
722 env->hflags |= HF_LMA_MASK;
723 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
724 (env->efer & MSR_EFER_LMA)) {
725 /* exit long mode */
726 env->efer &= ~MSR_EFER_LMA;
727 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
728 env->eip &= 0xffffffff;
730 #endif
731 env->cr[0] = new_cr0 | CR0_ET_MASK;
733 /* update PE flag in hidden flags */
734 pe_state = (env->cr[0] & CR0_PE_MASK);
735 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
736 /* ensure that ADDSEG is always set in real mode */
737 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
738 /* update FPU flags */
739 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
740 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
743 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
744 the PDPT */
745 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
747 env->cr[3] = new_cr3;
748 if (env->cr[0] & CR0_PG_MASK) {
749 #if defined(DEBUG_MMU)
750 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
751 #endif
752 tlb_flush(env, 0);
756 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
758 #if defined(DEBUG_MMU)
759 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
760 #endif
761 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
762 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
763 tlb_flush(env, 1);
765 /* SSE handling */
766 if (!(env->cpuid_features & CPUID_SSE))
767 new_cr4 &= ~CR4_OSFXSR_MASK;
768 if (new_cr4 & CR4_OSFXSR_MASK)
769 env->hflags |= HF_OSFXSR_MASK;
770 else
771 env->hflags &= ~HF_OSFXSR_MASK;
773 env->cr[4] = new_cr4;
776 /* XXX: also flush 4MB pages */
777 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
779 tlb_flush_page(env, addr);
782 #if defined(CONFIG_USER_ONLY)
784 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
785 int is_write, int mmu_idx, int is_softmmu)
787 /* user mode only emulation */
788 is_write &= 1;
789 env->cr[2] = addr;
790 env->error_code = (is_write << PG_ERROR_W_BIT);
791 env->error_code |= PG_ERROR_U_MASK;
792 env->exception_index = EXCP0E_PAGE;
793 return 1;
796 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
798 return addr;
801 #else
803 /* XXX: This value should match the one returned by CPUID
804 * and in exec.c */
805 #if defined(USE_KQEMU)
806 #define PHYS_ADDR_MASK 0xfffff000L
807 #else
808 # if defined(TARGET_X86_64)
809 # define PHYS_ADDR_MASK 0xfffffff000L
810 # else
811 # define PHYS_ADDR_MASK 0xffffff000L
812 # endif
813 #endif
815 /* return value:
816 -1 = cannot handle fault
817 0 = nothing more to do
818 1 = generate PF fault
819 2 = soft MMU activation required for this block
821 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
822 int is_write1, int mmu_idx, int is_softmmu)
824 uint64_t ptep, pte;
825 target_ulong pde_addr, pte_addr;
826 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
827 target_phys_addr_t paddr;
828 uint32_t page_offset;
829 target_ulong vaddr, virt_addr;
831 is_user = mmu_idx == MMU_USER_IDX;
832 #if defined(DEBUG_MMU)
833 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
834 addr, is_write1, is_user, env->eip);
835 #endif
836 is_write = is_write1 & 1;
838 if (!(env->cr[0] & CR0_PG_MASK)) {
839 pte = addr;
840 virt_addr = addr & TARGET_PAGE_MASK;
841 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
842 page_size = 4096;
843 goto do_mapping;
846 if (env->cr[4] & CR4_PAE_MASK) {
847 uint64_t pde, pdpe;
848 target_ulong pdpe_addr;
850 #ifdef TARGET_X86_64
851 if (env->hflags & HF_LMA_MASK) {
852 uint64_t pml4e_addr, pml4e;
853 int32_t sext;
855 /* test virtual address sign extension */
856 sext = (int64_t)addr >> 47;
857 if (sext != 0 && sext != -1) {
858 env->error_code = 0;
859 env->exception_index = EXCP0D_GPF;
860 return 1;
863 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
864 env->a20_mask;
865 pml4e = ldq_phys(pml4e_addr);
866 if (!(pml4e & PG_PRESENT_MASK)) {
867 error_code = 0;
868 goto do_fault;
870 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
871 error_code = PG_ERROR_RSVD_MASK;
872 goto do_fault;
874 if (!(pml4e & PG_ACCESSED_MASK)) {
875 pml4e |= PG_ACCESSED_MASK;
876 stl_phys_notdirty(pml4e_addr, pml4e);
878 ptep = pml4e ^ PG_NX_MASK;
879 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
880 env->a20_mask;
881 pdpe = ldq_phys(pdpe_addr);
882 if (!(pdpe & PG_PRESENT_MASK)) {
883 error_code = 0;
884 goto do_fault;
886 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
887 error_code = PG_ERROR_RSVD_MASK;
888 goto do_fault;
890 ptep &= pdpe ^ PG_NX_MASK;
891 if (!(pdpe & PG_ACCESSED_MASK)) {
892 pdpe |= PG_ACCESSED_MASK;
893 stl_phys_notdirty(pdpe_addr, pdpe);
895 } else
896 #endif
898 /* XXX: load them when cr3 is loaded ? */
899 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
900 env->a20_mask;
901 pdpe = ldq_phys(pdpe_addr);
902 if (!(pdpe & PG_PRESENT_MASK)) {
903 error_code = 0;
904 goto do_fault;
906 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
909 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
910 env->a20_mask;
911 pde = ldq_phys(pde_addr);
912 if (!(pde & PG_PRESENT_MASK)) {
913 error_code = 0;
914 goto do_fault;
916 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
917 error_code = PG_ERROR_RSVD_MASK;
918 goto do_fault;
920 ptep &= pde ^ PG_NX_MASK;
921 if (pde & PG_PSE_MASK) {
922 /* 2 MB page */
923 page_size = 2048 * 1024;
924 ptep ^= PG_NX_MASK;
925 if ((ptep & PG_NX_MASK) && is_write1 == 2)
926 goto do_fault_protect;
927 if (is_user) {
928 if (!(ptep & PG_USER_MASK))
929 goto do_fault_protect;
930 if (is_write && !(ptep & PG_RW_MASK))
931 goto do_fault_protect;
932 } else {
933 if ((env->cr[0] & CR0_WP_MASK) &&
934 is_write && !(ptep & PG_RW_MASK))
935 goto do_fault_protect;
937 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
938 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
939 pde |= PG_ACCESSED_MASK;
940 if (is_dirty)
941 pde |= PG_DIRTY_MASK;
942 stl_phys_notdirty(pde_addr, pde);
944 /* align to page_size */
945 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
946 virt_addr = addr & ~(page_size - 1);
947 } else {
948 /* 4 KB page */
949 if (!(pde & PG_ACCESSED_MASK)) {
950 pde |= PG_ACCESSED_MASK;
951 stl_phys_notdirty(pde_addr, pde);
953 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
954 env->a20_mask;
955 pte = ldq_phys(pte_addr);
956 if (!(pte & PG_PRESENT_MASK)) {
957 error_code = 0;
958 goto do_fault;
960 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
961 error_code = PG_ERROR_RSVD_MASK;
962 goto do_fault;
964 /* combine pde and pte nx, user and rw protections */
965 ptep &= pte ^ PG_NX_MASK;
966 ptep ^= PG_NX_MASK;
967 if ((ptep & PG_NX_MASK) && is_write1 == 2)
968 goto do_fault_protect;
969 if (is_user) {
970 if (!(ptep & PG_USER_MASK))
971 goto do_fault_protect;
972 if (is_write && !(ptep & PG_RW_MASK))
973 goto do_fault_protect;
974 } else {
975 if ((env->cr[0] & CR0_WP_MASK) &&
976 is_write && !(ptep & PG_RW_MASK))
977 goto do_fault_protect;
979 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
980 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
981 pte |= PG_ACCESSED_MASK;
982 if (is_dirty)
983 pte |= PG_DIRTY_MASK;
984 stl_phys_notdirty(pte_addr, pte);
986 page_size = 4096;
987 virt_addr = addr & ~0xfff;
988 pte = pte & (PHYS_ADDR_MASK | 0xfff);
990 } else {
991 uint32_t pde;
993 /* page directory entry */
994 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
995 env->a20_mask;
996 pde = ldl_phys(pde_addr);
997 if (!(pde & PG_PRESENT_MASK)) {
998 error_code = 0;
999 goto do_fault;
1001 /* if PSE bit is set, then we use a 4MB page */
1002 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1003 page_size = 4096 * 1024;
1004 if (is_user) {
1005 if (!(pde & PG_USER_MASK))
1006 goto do_fault_protect;
1007 if (is_write && !(pde & PG_RW_MASK))
1008 goto do_fault_protect;
1009 } else {
1010 if ((env->cr[0] & CR0_WP_MASK) &&
1011 is_write && !(pde & PG_RW_MASK))
1012 goto do_fault_protect;
1014 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1015 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1016 pde |= PG_ACCESSED_MASK;
1017 if (is_dirty)
1018 pde |= PG_DIRTY_MASK;
1019 stl_phys_notdirty(pde_addr, pde);
1022 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1023 ptep = pte;
1024 virt_addr = addr & ~(page_size - 1);
1025 } else {
1026 if (!(pde & PG_ACCESSED_MASK)) {
1027 pde |= PG_ACCESSED_MASK;
1028 stl_phys_notdirty(pde_addr, pde);
1031 /* page directory entry */
1032 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1033 env->a20_mask;
1034 pte = ldl_phys(pte_addr);
1035 if (!(pte & PG_PRESENT_MASK)) {
1036 error_code = 0;
1037 goto do_fault;
1039 /* combine pde and pte user and rw protections */
1040 ptep = pte & pde;
1041 if (is_user) {
1042 if (!(ptep & PG_USER_MASK))
1043 goto do_fault_protect;
1044 if (is_write && !(ptep & PG_RW_MASK))
1045 goto do_fault_protect;
1046 } else {
1047 if ((env->cr[0] & CR0_WP_MASK) &&
1048 is_write && !(ptep & PG_RW_MASK))
1049 goto do_fault_protect;
1051 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1052 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1053 pte |= PG_ACCESSED_MASK;
1054 if (is_dirty)
1055 pte |= PG_DIRTY_MASK;
1056 stl_phys_notdirty(pte_addr, pte);
1058 page_size = 4096;
1059 virt_addr = addr & ~0xfff;
1062 /* the page can be put in the TLB */
1063 prot = PAGE_READ;
1064 if (!(ptep & PG_NX_MASK))
1065 prot |= PAGE_EXEC;
1066 if (pte & PG_DIRTY_MASK) {
1067 /* only set write access if already dirty... otherwise wait
1068 for dirty access */
1069 if (is_user) {
1070 if (ptep & PG_RW_MASK)
1071 prot |= PAGE_WRITE;
1072 } else {
1073 if (!(env->cr[0] & CR0_WP_MASK) ||
1074 (ptep & PG_RW_MASK))
1075 prot |= PAGE_WRITE;
1078 do_mapping:
1079 pte = pte & env->a20_mask;
1081 /* Even if 4MB pages, we map only one 4KB page in the cache to
1082 avoid filling it too fast */
1083 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1084 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1085 vaddr = virt_addr + page_offset;
1087 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1088 return ret;
1089 do_fault_protect:
1090 error_code = PG_ERROR_P_MASK;
1091 do_fault:
1092 error_code |= (is_write << PG_ERROR_W_BIT);
1093 if (is_user)
1094 error_code |= PG_ERROR_U_MASK;
1095 if (is_write1 == 2 &&
1096 (env->efer & MSR_EFER_NXE) &&
1097 (env->cr[4] & CR4_PAE_MASK))
1098 error_code |= PG_ERROR_I_D_MASK;
1099 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
1100 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
1101 } else {
1102 env->cr[2] = addr;
1104 env->error_code = error_code;
1105 env->exception_index = EXCP0E_PAGE;
1106 /* the VMM will handle this */
1107 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
1108 return 2;
1109 return 1;
1112 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1114 target_ulong pde_addr, pte_addr;
1115 uint64_t pte;
1116 target_phys_addr_t paddr;
1117 uint32_t page_offset;
1118 int page_size;
1120 if (env->cr[4] & CR4_PAE_MASK) {
1121 target_ulong pdpe_addr;
1122 uint64_t pde, pdpe;
1124 #ifdef TARGET_X86_64
1125 if (env->hflags & HF_LMA_MASK) {
1126 uint64_t pml4e_addr, pml4e;
1127 int32_t sext;
1129 /* test virtual address sign extension */
1130 sext = (int64_t)addr >> 47;
1131 if (sext != 0 && sext != -1)
1132 return -1;
1134 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1135 env->a20_mask;
1136 pml4e = ldq_phys(pml4e_addr);
1137 if (!(pml4e & PG_PRESENT_MASK))
1138 return -1;
1140 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1141 env->a20_mask;
1142 pdpe = ldq_phys(pdpe_addr);
1143 if (!(pdpe & PG_PRESENT_MASK))
1144 return -1;
1145 } else
1146 #endif
1148 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1149 env->a20_mask;
1150 pdpe = ldq_phys(pdpe_addr);
1151 if (!(pdpe & PG_PRESENT_MASK))
1152 return -1;
1155 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1156 env->a20_mask;
1157 pde = ldq_phys(pde_addr);
1158 if (!(pde & PG_PRESENT_MASK)) {
1159 return -1;
1161 if (pde & PG_PSE_MASK) {
1162 /* 2 MB page */
1163 page_size = 2048 * 1024;
1164 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1165 } else {
1166 /* 4 KB page */
1167 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1168 env->a20_mask;
1169 page_size = 4096;
1170 pte = ldq_phys(pte_addr);
1172 } else {
1173 uint32_t pde;
1175 if (!(env->cr[0] & CR0_PG_MASK)) {
1176 pte = addr;
1177 page_size = 4096;
1178 } else {
1179 /* page directory entry */
1180 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1181 pde = ldl_phys(pde_addr);
1182 if (!(pde & PG_PRESENT_MASK))
1183 return -1;
1184 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1185 pte = pde & ~0x003ff000; /* align to 4MB */
1186 page_size = 4096 * 1024;
1187 } else {
1188 /* page directory entry */
1189 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1190 pte = ldl_phys(pte_addr);
1191 if (!(pte & PG_PRESENT_MASK))
1192 return -1;
1193 page_size = 4096;
1196 pte = pte & env->a20_mask;
1199 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1200 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1201 return paddr;
1203 #endif /* !CONFIG_USER_ONLY */