Add statics and missing #includes for prototypes.
[qemu/mini2440.git] / target-i386 / helper2.c
blobb46475e3027f12d0bb529156af3d5c4009c01138
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
32 //#define DEBUG_MMU
34 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
36 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
37 uint32_t *ext_features,
38 uint32_t *ext2_features,
39 uint32_t *ext3_features)
41 int i;
42 /* feature flags taken from "Intel Processor Identification and the CPUID
43 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
44 * about feature names, the Linux name is used. */
45 const char *feature_name[] = {
46 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
47 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
48 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
49 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
51 const char *ext_feature_name[] = {
52 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
53 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
54 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
55 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
57 const char *ext2_feature_name[] = {
58 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
59 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
60 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
61 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
63 const char *ext3_feature_name[] = {
64 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
65 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
66 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
70 for ( i = 0 ; i < 32 ; i++ )
71 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
72 *features |= 1 << i;
73 return;
75 for ( i = 0 ; i < 32 ; i++ )
76 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
77 *ext_features |= 1 << i;
78 return;
80 for ( i = 0 ; i < 32 ; i++ )
81 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
82 *ext2_features |= 1 << i;
83 return;
85 for ( i = 0 ; i < 32 ; i++ )
86 if (ext3_features[i] && !strcmp (flagname, ext3_feature_name[i])) {
87 *ext3_features |= 1 << i;
88 return;
90 fprintf(stderr, "CPU feature %s not found\n", flagname);
93 CPUX86State *cpu_x86_init(const char *cpu_model)
95 CPUX86State *env;
96 static int inited;
98 env = qemu_mallocz(sizeof(CPUX86State));
99 if (!env)
100 return NULL;
101 cpu_exec_init(env);
103 /* init various static tables */
104 if (!inited) {
105 inited = 1;
106 optimize_flags_init();
108 if (cpu_x86_register(env, cpu_model) < 0) {
109 cpu_x86_close(env);
110 return NULL;
112 cpu_reset(env);
113 #ifdef USE_KQEMU
114 kqemu_init(env);
115 #endif
116 return env;
119 typedef struct x86_def_t {
120 const char *name;
121 uint32_t vendor1, vendor2, vendor3;
122 int family;
123 int model;
124 int stepping;
125 uint32_t features, ext_features, ext2_features, ext3_features;
126 uint32_t xlevel;
127 } x86_def_t;
129 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
130 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
131 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
132 CPUID_PAE | CPUID_SEP | CPUID_APIC)
133 static x86_def_t x86_defs[] = {
134 #ifdef TARGET_X86_64
136 .name = "qemu64",
137 .vendor1 = 0x68747541, /* "Auth" */
138 .vendor2 = 0x69746e65, /* "enti" */
139 .vendor3 = 0x444d4163, /* "cAMD" */
140 .family = 6,
141 .model = 2,
142 .stepping = 3,
143 .features = PPRO_FEATURES |
144 /* these features are needed for Win64 and aren't fully implemented */
145 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
146 /* this feature is needed for Solaris and isn't fully implemented */
147 CPUID_PSE36,
148 .ext_features = CPUID_EXT_SSE3,
149 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
150 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
151 .ext3_features = CPUID_EXT3_SVM,
152 .xlevel = 0x80000008,
154 #endif
156 .name = "qemu32",
157 .family = 6,
158 .model = 3,
159 .stepping = 3,
160 .features = PPRO_FEATURES,
161 .ext_features = CPUID_EXT_SSE3,
162 .xlevel = 0,
165 .name = "486",
166 .family = 4,
167 .model = 0,
168 .stepping = 0,
169 .features = 0x0000000B,
170 .xlevel = 0,
173 .name = "pentium",
174 .family = 5,
175 .model = 4,
176 .stepping = 3,
177 .features = 0x008001BF,
178 .xlevel = 0,
181 .name = "pentium2",
182 .family = 6,
183 .model = 5,
184 .stepping = 2,
185 .features = 0x0183F9FF,
186 .xlevel = 0,
189 .name = "pentium3",
190 .family = 6,
191 .model = 7,
192 .stepping = 3,
193 .features = 0x0383F9FF,
194 .xlevel = 0,
198 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
200 unsigned int i;
201 x86_def_t *def;
203 char *s = strdup(cpu_model);
204 char *featurestr, *name = strtok(s, ",");
205 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
206 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
207 int family = -1, model = -1, stepping = -1;
209 def = NULL;
210 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
211 if (strcmp(name, x86_defs[i].name) == 0) {
212 def = &x86_defs[i];
213 break;
216 if (!def)
217 goto error;
218 memcpy(x86_cpu_def, def, sizeof(*def));
220 featurestr = strtok(NULL, ",");
222 while (featurestr) {
223 char *val;
224 if (featurestr[0] == '+') {
225 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
226 } else if (featurestr[0] == '-') {
227 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
228 } else if ((val = strchr(featurestr, '='))) {
229 *val = 0; val++;
230 if (!strcmp(featurestr, "family")) {
231 char *err;
232 family = strtol(val, &err, 10);
233 if (!*val || *err || family < 0) {
234 fprintf(stderr, "bad numerical value %s\n", val);
235 x86_cpu_def = 0;
236 goto error;
238 x86_cpu_def->family = family;
239 } else if (!strcmp(featurestr, "model")) {
240 char *err;
241 model = strtol(val, &err, 10);
242 if (!*val || *err || model < 0 || model > 0xf) {
243 fprintf(stderr, "bad numerical value %s\n", val);
244 x86_cpu_def = 0;
245 goto error;
247 x86_cpu_def->model = model;
248 } else if (!strcmp(featurestr, "stepping")) {
249 char *err;
250 stepping = strtol(val, &err, 10);
251 if (!*val || *err || stepping < 0 || stepping > 0xf) {
252 fprintf(stderr, "bad numerical value %s\n", val);
253 x86_cpu_def = 0;
254 goto error;
256 x86_cpu_def->stepping = stepping;
257 } else {
258 fprintf(stderr, "unregnized feature %s\n", featurestr);
259 x86_cpu_def = 0;
260 goto error;
262 } else {
263 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
264 x86_cpu_def = 0;
265 goto error;
267 featurestr = strtok(NULL, ",");
269 x86_cpu_def->features |= plus_features;
270 x86_cpu_def->ext_features |= plus_ext_features;
271 x86_cpu_def->ext2_features |= plus_ext2_features;
272 x86_cpu_def->ext3_features |= plus_ext3_features;
273 x86_cpu_def->features &= ~minus_features;
274 x86_cpu_def->ext_features &= ~minus_ext_features;
275 x86_cpu_def->ext2_features &= ~minus_ext2_features;
276 x86_cpu_def->ext3_features &= ~minus_ext3_features;
277 free(s);
278 return 0;
280 error:
281 free(s);
282 return -1;
285 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
287 unsigned int i;
289 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
290 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
293 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
295 x86_def_t def1, *def = &def1;
297 if (cpu_x86_find_by_name(def, cpu_model) < 0)
298 return -1;
299 if (def->vendor1) {
300 env->cpuid_vendor1 = def->vendor1;
301 env->cpuid_vendor2 = def->vendor2;
302 env->cpuid_vendor3 = def->vendor3;
303 } else {
304 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
305 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
306 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
308 env->cpuid_level = 2;
309 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
310 env->cpuid_features = def->features;
311 env->pat = 0x0007040600070406ULL;
312 env->cpuid_ext_features = def->ext_features;
313 env->cpuid_ext2_features = def->ext2_features;
314 env->cpuid_xlevel = def->xlevel;
315 env->cpuid_ext3_features = def->ext3_features;
317 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
318 int c, len, i;
319 len = strlen(model_id);
320 for(i = 0; i < 48; i++) {
321 if (i >= len)
322 c = '\0';
323 else
324 c = model_id[i];
325 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
328 return 0;
331 /* NOTE: must be called outside the CPU execute loop */
332 void cpu_reset(CPUX86State *env)
334 int i;
336 memset(env, 0, offsetof(CPUX86State, breakpoints));
338 tlb_flush(env, 1);
340 env->old_exception = -1;
342 /* init to reset state */
344 #ifdef CONFIG_SOFTMMU
345 env->hflags |= HF_SOFTMMU_MASK;
346 #endif
347 env->hflags |= HF_GIF_MASK;
349 cpu_x86_update_cr0(env, 0x60000010);
350 env->a20_mask = 0xffffffff;
351 env->smbase = 0x30000;
353 env->idt.limit = 0xffff;
354 env->gdt.limit = 0xffff;
355 env->ldt.limit = 0xffff;
356 env->ldt.flags = DESC_P_MASK;
357 env->tr.limit = 0xffff;
358 env->tr.flags = DESC_P_MASK;
360 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
361 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
362 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
363 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
364 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
365 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
367 env->eip = 0xfff0;
368 env->regs[R_EDX] = env->cpuid_version;
370 env->eflags = 0x2;
372 /* FPU init */
373 for(i = 0;i < 8; i++)
374 env->fptags[i] = 1;
375 env->fpuc = 0x37f;
377 env->mxcsr = 0x1f80;
380 void cpu_x86_close(CPUX86State *env)
382 free(env);
385 /***********************************************************/
386 /* x86 debug */
388 static const char *cc_op_str[] = {
389 "DYNAMIC",
390 "EFLAGS",
392 "MULB",
393 "MULW",
394 "MULL",
395 "MULQ",
397 "ADDB",
398 "ADDW",
399 "ADDL",
400 "ADDQ",
402 "ADCB",
403 "ADCW",
404 "ADCL",
405 "ADCQ",
407 "SUBB",
408 "SUBW",
409 "SUBL",
410 "SUBQ",
412 "SBBB",
413 "SBBW",
414 "SBBL",
415 "SBBQ",
417 "LOGICB",
418 "LOGICW",
419 "LOGICL",
420 "LOGICQ",
422 "INCB",
423 "INCW",
424 "INCL",
425 "INCQ",
427 "DECB",
428 "DECW",
429 "DECL",
430 "DECQ",
432 "SHLB",
433 "SHLW",
434 "SHLL",
435 "SHLQ",
437 "SARB",
438 "SARW",
439 "SARL",
440 "SARQ",
443 void cpu_dump_state(CPUState *env, FILE *f,
444 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
445 int flags)
447 int eflags, i, nb;
448 char cc_op_name[32];
449 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
451 eflags = env->eflags;
452 #ifdef TARGET_X86_64
453 if (env->hflags & HF_CS64_MASK) {
454 cpu_fprintf(f,
455 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
456 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
457 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
458 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
459 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
460 env->regs[R_EAX],
461 env->regs[R_EBX],
462 env->regs[R_ECX],
463 env->regs[R_EDX],
464 env->regs[R_ESI],
465 env->regs[R_EDI],
466 env->regs[R_EBP],
467 env->regs[R_ESP],
468 env->regs[8],
469 env->regs[9],
470 env->regs[10],
471 env->regs[11],
472 env->regs[12],
473 env->regs[13],
474 env->regs[14],
475 env->regs[15],
476 env->eip, eflags,
477 eflags & DF_MASK ? 'D' : '-',
478 eflags & CC_O ? 'O' : '-',
479 eflags & CC_S ? 'S' : '-',
480 eflags & CC_Z ? 'Z' : '-',
481 eflags & CC_A ? 'A' : '-',
482 eflags & CC_P ? 'P' : '-',
483 eflags & CC_C ? 'C' : '-',
484 env->hflags & HF_CPL_MASK,
485 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
486 (env->a20_mask >> 20) & 1,
487 (env->hflags >> HF_SMM_SHIFT) & 1,
488 (env->hflags >> HF_HALTED_SHIFT) & 1);
489 } else
490 #endif
492 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
493 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
494 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
495 (uint32_t)env->regs[R_EAX],
496 (uint32_t)env->regs[R_EBX],
497 (uint32_t)env->regs[R_ECX],
498 (uint32_t)env->regs[R_EDX],
499 (uint32_t)env->regs[R_ESI],
500 (uint32_t)env->regs[R_EDI],
501 (uint32_t)env->regs[R_EBP],
502 (uint32_t)env->regs[R_ESP],
503 (uint32_t)env->eip, eflags,
504 eflags & DF_MASK ? 'D' : '-',
505 eflags & CC_O ? 'O' : '-',
506 eflags & CC_S ? 'S' : '-',
507 eflags & CC_Z ? 'Z' : '-',
508 eflags & CC_A ? 'A' : '-',
509 eflags & CC_P ? 'P' : '-',
510 eflags & CC_C ? 'C' : '-',
511 env->hflags & HF_CPL_MASK,
512 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
513 (env->a20_mask >> 20) & 1,
514 (env->hflags >> HF_SMM_SHIFT) & 1,
515 (env->hflags >> HF_HALTED_SHIFT) & 1);
518 #ifdef TARGET_X86_64
519 if (env->hflags & HF_LMA_MASK) {
520 for(i = 0; i < 6; i++) {
521 SegmentCache *sc = &env->segs[i];
522 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
523 seg_name[i],
524 sc->selector,
525 sc->base,
526 sc->limit,
527 sc->flags);
529 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
530 env->ldt.selector,
531 env->ldt.base,
532 env->ldt.limit,
533 env->ldt.flags);
534 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
535 env->tr.selector,
536 env->tr.base,
537 env->tr.limit,
538 env->tr.flags);
539 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
540 env->gdt.base, env->gdt.limit);
541 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
542 env->idt.base, env->idt.limit);
543 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
544 (uint32_t)env->cr[0],
545 env->cr[2],
546 env->cr[3],
547 (uint32_t)env->cr[4]);
548 } else
549 #endif
551 for(i = 0; i < 6; i++) {
552 SegmentCache *sc = &env->segs[i];
553 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
554 seg_name[i],
555 sc->selector,
556 (uint32_t)sc->base,
557 sc->limit,
558 sc->flags);
560 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
561 env->ldt.selector,
562 (uint32_t)env->ldt.base,
563 env->ldt.limit,
564 env->ldt.flags);
565 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
566 env->tr.selector,
567 (uint32_t)env->tr.base,
568 env->tr.limit,
569 env->tr.flags);
570 cpu_fprintf(f, "GDT= %08x %08x\n",
571 (uint32_t)env->gdt.base, env->gdt.limit);
572 cpu_fprintf(f, "IDT= %08x %08x\n",
573 (uint32_t)env->idt.base, env->idt.limit);
574 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
575 (uint32_t)env->cr[0],
576 (uint32_t)env->cr[2],
577 (uint32_t)env->cr[3],
578 (uint32_t)env->cr[4]);
580 if (flags & X86_DUMP_CCOP) {
581 if ((unsigned)env->cc_op < CC_OP_NB)
582 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
583 else
584 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
585 #ifdef TARGET_X86_64
586 if (env->hflags & HF_CS64_MASK) {
587 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
588 env->cc_src, env->cc_dst,
589 cc_op_name);
590 } else
591 #endif
593 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
594 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
595 cc_op_name);
598 if (flags & X86_DUMP_FPU) {
599 int fptag;
600 fptag = 0;
601 for(i = 0; i < 8; i++) {
602 fptag |= ((!env->fptags[i]) << i);
604 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
605 env->fpuc,
606 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
607 env->fpstt,
608 fptag,
609 env->mxcsr);
610 for(i=0;i<8;i++) {
611 #if defined(USE_X86LDOUBLE)
612 union {
613 long double d;
614 struct {
615 uint64_t lower;
616 uint16_t upper;
617 } l;
618 } tmp;
619 tmp.d = env->fpregs[i].d;
620 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
621 i, tmp.l.lower, tmp.l.upper);
622 #else
623 cpu_fprintf(f, "FPR%d=%016" PRIx64,
624 i, env->fpregs[i].mmx.q);
625 #endif
626 if ((i & 1) == 1)
627 cpu_fprintf(f, "\n");
628 else
629 cpu_fprintf(f, " ");
631 if (env->hflags & HF_CS64_MASK)
632 nb = 16;
633 else
634 nb = 8;
635 for(i=0;i<nb;i++) {
636 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
638 env->xmm_regs[i].XMM_L(3),
639 env->xmm_regs[i].XMM_L(2),
640 env->xmm_regs[i].XMM_L(1),
641 env->xmm_regs[i].XMM_L(0));
642 if ((i & 1) == 1)
643 cpu_fprintf(f, "\n");
644 else
645 cpu_fprintf(f, " ");
650 /***********************************************************/
651 /* x86 mmu */
652 /* XXX: add PGE support */
654 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
656 a20_state = (a20_state != 0);
657 if (a20_state != ((env->a20_mask >> 20) & 1)) {
658 #if defined(DEBUG_MMU)
659 printf("A20 update: a20=%d\n", a20_state);
660 #endif
661 /* if the cpu is currently executing code, we must unlink it and
662 all the potentially executing TB */
663 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
665 /* when a20 is changed, all the MMU mappings are invalid, so
666 we must flush everything */
667 tlb_flush(env, 1);
668 env->a20_mask = 0xffefffff | (a20_state << 20);
672 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
674 int pe_state;
676 #if defined(DEBUG_MMU)
677 printf("CR0 update: CR0=0x%08x\n", new_cr0);
678 #endif
679 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
680 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
681 tlb_flush(env, 1);
684 #ifdef TARGET_X86_64
685 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
686 (env->efer & MSR_EFER_LME)) {
687 /* enter in long mode */
688 /* XXX: generate an exception */
689 if (!(env->cr[4] & CR4_PAE_MASK))
690 return;
691 env->efer |= MSR_EFER_LMA;
692 env->hflags |= HF_LMA_MASK;
693 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
694 (env->efer & MSR_EFER_LMA)) {
695 /* exit long mode */
696 env->efer &= ~MSR_EFER_LMA;
697 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
698 env->eip &= 0xffffffff;
700 #endif
701 env->cr[0] = new_cr0 | CR0_ET_MASK;
703 /* update PE flag in hidden flags */
704 pe_state = (env->cr[0] & CR0_PE_MASK);
705 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
706 /* ensure that ADDSEG is always set in real mode */
707 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
708 /* update FPU flags */
709 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
710 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
713 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
714 the PDPT */
715 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
717 env->cr[3] = new_cr3;
718 if (env->cr[0] & CR0_PG_MASK) {
719 #if defined(DEBUG_MMU)
720 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
721 #endif
722 tlb_flush(env, 0);
726 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
728 #if defined(DEBUG_MMU)
729 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
730 #endif
731 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
732 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
733 tlb_flush(env, 1);
735 /* SSE handling */
736 if (!(env->cpuid_features & CPUID_SSE))
737 new_cr4 &= ~CR4_OSFXSR_MASK;
738 if (new_cr4 & CR4_OSFXSR_MASK)
739 env->hflags |= HF_OSFXSR_MASK;
740 else
741 env->hflags &= ~HF_OSFXSR_MASK;
743 env->cr[4] = new_cr4;
746 /* XXX: also flush 4MB pages */
747 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
749 tlb_flush_page(env, addr);
752 #if defined(CONFIG_USER_ONLY)
754 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
755 int is_write, int mmu_idx, int is_softmmu)
757 /* user mode only emulation */
758 is_write &= 1;
759 env->cr[2] = addr;
760 env->error_code = (is_write << PG_ERROR_W_BIT);
761 env->error_code |= PG_ERROR_U_MASK;
762 env->exception_index = EXCP0E_PAGE;
763 return 1;
766 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
768 return addr;
771 #else
773 #define PHYS_ADDR_MASK 0xfffff000
775 /* return value:
776 -1 = cannot handle fault
777 0 = nothing more to do
778 1 = generate PF fault
779 2 = soft MMU activation required for this block
781 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
782 int is_write1, int mmu_idx, int is_softmmu)
784 uint64_t ptep, pte;
785 uint32_t pdpe_addr, pde_addr, pte_addr;
786 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
787 unsigned long paddr, page_offset;
788 target_ulong vaddr, virt_addr;
790 is_user = mmu_idx == MMU_USER_IDX;
791 #if defined(DEBUG_MMU)
792 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
793 addr, is_write1, is_user, env->eip);
794 #endif
795 is_write = is_write1 & 1;
797 if (!(env->cr[0] & CR0_PG_MASK)) {
798 pte = addr;
799 virt_addr = addr & TARGET_PAGE_MASK;
800 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
801 page_size = 4096;
802 goto do_mapping;
805 if (env->cr[4] & CR4_PAE_MASK) {
806 uint64_t pde, pdpe;
808 /* XXX: we only use 32 bit physical addresses */
809 #ifdef TARGET_X86_64
810 if (env->hflags & HF_LMA_MASK) {
811 uint32_t pml4e_addr;
812 uint64_t pml4e;
813 int32_t sext;
815 /* test virtual address sign extension */
816 sext = (int64_t)addr >> 47;
817 if (sext != 0 && sext != -1) {
818 env->error_code = 0;
819 env->exception_index = EXCP0D_GPF;
820 return 1;
823 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
824 env->a20_mask;
825 pml4e = ldq_phys(pml4e_addr);
826 if (!(pml4e & PG_PRESENT_MASK)) {
827 error_code = 0;
828 goto do_fault;
830 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
831 error_code = PG_ERROR_RSVD_MASK;
832 goto do_fault;
834 if (!(pml4e & PG_ACCESSED_MASK)) {
835 pml4e |= PG_ACCESSED_MASK;
836 stl_phys_notdirty(pml4e_addr, pml4e);
838 ptep = pml4e ^ PG_NX_MASK;
839 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
840 env->a20_mask;
841 pdpe = ldq_phys(pdpe_addr);
842 if (!(pdpe & PG_PRESENT_MASK)) {
843 error_code = 0;
844 goto do_fault;
846 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
847 error_code = PG_ERROR_RSVD_MASK;
848 goto do_fault;
850 ptep &= pdpe ^ PG_NX_MASK;
851 if (!(pdpe & PG_ACCESSED_MASK)) {
852 pdpe |= PG_ACCESSED_MASK;
853 stl_phys_notdirty(pdpe_addr, pdpe);
855 } else
856 #endif
858 /* XXX: load them when cr3 is loaded ? */
859 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
860 env->a20_mask;
861 pdpe = ldq_phys(pdpe_addr);
862 if (!(pdpe & PG_PRESENT_MASK)) {
863 error_code = 0;
864 goto do_fault;
866 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
869 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
870 env->a20_mask;
871 pde = ldq_phys(pde_addr);
872 if (!(pde & PG_PRESENT_MASK)) {
873 error_code = 0;
874 goto do_fault;
876 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
877 error_code = PG_ERROR_RSVD_MASK;
878 goto do_fault;
880 ptep &= pde ^ PG_NX_MASK;
881 if (pde & PG_PSE_MASK) {
882 /* 2 MB page */
883 page_size = 2048 * 1024;
884 ptep ^= PG_NX_MASK;
885 if ((ptep & PG_NX_MASK) && is_write1 == 2)
886 goto do_fault_protect;
887 if (is_user) {
888 if (!(ptep & PG_USER_MASK))
889 goto do_fault_protect;
890 if (is_write && !(ptep & PG_RW_MASK))
891 goto do_fault_protect;
892 } else {
893 if ((env->cr[0] & CR0_WP_MASK) &&
894 is_write && !(ptep & PG_RW_MASK))
895 goto do_fault_protect;
897 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
898 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
899 pde |= PG_ACCESSED_MASK;
900 if (is_dirty)
901 pde |= PG_DIRTY_MASK;
902 stl_phys_notdirty(pde_addr, pde);
904 /* align to page_size */
905 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
906 virt_addr = addr & ~(page_size - 1);
907 } else {
908 /* 4 KB page */
909 if (!(pde & PG_ACCESSED_MASK)) {
910 pde |= PG_ACCESSED_MASK;
911 stl_phys_notdirty(pde_addr, pde);
913 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
914 env->a20_mask;
915 pte = ldq_phys(pte_addr);
916 if (!(pte & PG_PRESENT_MASK)) {
917 error_code = 0;
918 goto do_fault;
920 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
921 error_code = PG_ERROR_RSVD_MASK;
922 goto do_fault;
924 /* combine pde and pte nx, user and rw protections */
925 ptep &= pte ^ PG_NX_MASK;
926 ptep ^= PG_NX_MASK;
927 if ((ptep & PG_NX_MASK) && is_write1 == 2)
928 goto do_fault_protect;
929 if (is_user) {
930 if (!(ptep & PG_USER_MASK))
931 goto do_fault_protect;
932 if (is_write && !(ptep & PG_RW_MASK))
933 goto do_fault_protect;
934 } else {
935 if ((env->cr[0] & CR0_WP_MASK) &&
936 is_write && !(ptep & PG_RW_MASK))
937 goto do_fault_protect;
939 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
940 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
941 pte |= PG_ACCESSED_MASK;
942 if (is_dirty)
943 pte |= PG_DIRTY_MASK;
944 stl_phys_notdirty(pte_addr, pte);
946 page_size = 4096;
947 virt_addr = addr & ~0xfff;
948 pte = pte & (PHYS_ADDR_MASK | 0xfff);
950 } else {
951 uint32_t pde;
953 /* page directory entry */
954 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
955 env->a20_mask;
956 pde = ldl_phys(pde_addr);
957 if (!(pde & PG_PRESENT_MASK)) {
958 error_code = 0;
959 goto do_fault;
961 /* if PSE bit is set, then we use a 4MB page */
962 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
963 page_size = 4096 * 1024;
964 if (is_user) {
965 if (!(pde & PG_USER_MASK))
966 goto do_fault_protect;
967 if (is_write && !(pde & PG_RW_MASK))
968 goto do_fault_protect;
969 } else {
970 if ((env->cr[0] & CR0_WP_MASK) &&
971 is_write && !(pde & PG_RW_MASK))
972 goto do_fault_protect;
974 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
975 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
976 pde |= PG_ACCESSED_MASK;
977 if (is_dirty)
978 pde |= PG_DIRTY_MASK;
979 stl_phys_notdirty(pde_addr, pde);
982 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
983 ptep = pte;
984 virt_addr = addr & ~(page_size - 1);
985 } else {
986 if (!(pde & PG_ACCESSED_MASK)) {
987 pde |= PG_ACCESSED_MASK;
988 stl_phys_notdirty(pde_addr, pde);
991 /* page directory entry */
992 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
993 env->a20_mask;
994 pte = ldl_phys(pte_addr);
995 if (!(pte & PG_PRESENT_MASK)) {
996 error_code = 0;
997 goto do_fault;
999 /* combine pde and pte user and rw protections */
1000 ptep = pte & pde;
1001 if (is_user) {
1002 if (!(ptep & PG_USER_MASK))
1003 goto do_fault_protect;
1004 if (is_write && !(ptep & PG_RW_MASK))
1005 goto do_fault_protect;
1006 } else {
1007 if ((env->cr[0] & CR0_WP_MASK) &&
1008 is_write && !(ptep & PG_RW_MASK))
1009 goto do_fault_protect;
1011 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1012 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1013 pte |= PG_ACCESSED_MASK;
1014 if (is_dirty)
1015 pte |= PG_DIRTY_MASK;
1016 stl_phys_notdirty(pte_addr, pte);
1018 page_size = 4096;
1019 virt_addr = addr & ~0xfff;
1022 /* the page can be put in the TLB */
1023 prot = PAGE_READ;
1024 if (!(ptep & PG_NX_MASK))
1025 prot |= PAGE_EXEC;
1026 if (pte & PG_DIRTY_MASK) {
1027 /* only set write access if already dirty... otherwise wait
1028 for dirty access */
1029 if (is_user) {
1030 if (ptep & PG_RW_MASK)
1031 prot |= PAGE_WRITE;
1032 } else {
1033 if (!(env->cr[0] & CR0_WP_MASK) ||
1034 (ptep & PG_RW_MASK))
1035 prot |= PAGE_WRITE;
1038 do_mapping:
1039 pte = pte & env->a20_mask;
1041 /* Even if 4MB pages, we map only one 4KB page in the cache to
1042 avoid filling it too fast */
1043 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1044 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1045 vaddr = virt_addr + page_offset;
1047 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1048 return ret;
1049 do_fault_protect:
1050 error_code = PG_ERROR_P_MASK;
1051 do_fault:
1052 error_code |= (is_write << PG_ERROR_W_BIT);
1053 if (is_user)
1054 error_code |= PG_ERROR_U_MASK;
1055 if (is_write1 == 2 &&
1056 (env->efer & MSR_EFER_NXE) &&
1057 (env->cr[4] & CR4_PAE_MASK))
1058 error_code |= PG_ERROR_I_D_MASK;
1059 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
1060 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
1061 } else {
1062 env->cr[2] = addr;
1064 env->error_code = error_code;
1065 env->exception_index = EXCP0E_PAGE;
1066 /* the VMM will handle this */
1067 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
1068 return 2;
1069 return 1;
1072 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1074 uint32_t pde_addr, pte_addr;
1075 uint32_t pde, pte, paddr, page_offset, page_size;
1077 if (env->cr[4] & CR4_PAE_MASK) {
1078 uint32_t pdpe_addr, pde_addr, pte_addr;
1079 uint32_t pdpe;
1081 /* XXX: we only use 32 bit physical addresses */
1082 #ifdef TARGET_X86_64
1083 if (env->hflags & HF_LMA_MASK) {
1084 uint32_t pml4e_addr, pml4e;
1085 int32_t sext;
1087 /* test virtual address sign extension */
1088 sext = (int64_t)addr >> 47;
1089 if (sext != 0 && sext != -1)
1090 return -1;
1092 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1093 env->a20_mask;
1094 pml4e = ldl_phys(pml4e_addr);
1095 if (!(pml4e & PG_PRESENT_MASK))
1096 return -1;
1098 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1099 env->a20_mask;
1100 pdpe = ldl_phys(pdpe_addr);
1101 if (!(pdpe & PG_PRESENT_MASK))
1102 return -1;
1103 } else
1104 #endif
1106 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1107 env->a20_mask;
1108 pdpe = ldl_phys(pdpe_addr);
1109 if (!(pdpe & PG_PRESENT_MASK))
1110 return -1;
1113 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1114 env->a20_mask;
1115 pde = ldl_phys(pde_addr);
1116 if (!(pde & PG_PRESENT_MASK)) {
1117 return -1;
1119 if (pde & PG_PSE_MASK) {
1120 /* 2 MB page */
1121 page_size = 2048 * 1024;
1122 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1123 } else {
1124 /* 4 KB page */
1125 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1126 env->a20_mask;
1127 page_size = 4096;
1128 pte = ldl_phys(pte_addr);
1130 } else {
1131 if (!(env->cr[0] & CR0_PG_MASK)) {
1132 pte = addr;
1133 page_size = 4096;
1134 } else {
1135 /* page directory entry */
1136 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1137 pde = ldl_phys(pde_addr);
1138 if (!(pde & PG_PRESENT_MASK))
1139 return -1;
1140 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1141 pte = pde & ~0x003ff000; /* align to 4MB */
1142 page_size = 4096 * 1024;
1143 } else {
1144 /* page directory entry */
1145 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1146 pte = ldl_phys(pte_addr);
1147 if (!(pte & PG_PRESENT_MASK))
1148 return -1;
1149 page_size = 4096;
1152 pte = pte & env->a20_mask;
1155 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1156 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1157 return paddr;
1159 #endif /* !CONFIG_USER_ONLY */