Fix interrupt exclusion via SSTEP_NOIRQ
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob4b8c5037983664e758bb13108a152c01b5a2aa1d
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
32 #include "kvm.h"
34 //#define DEBUG_MMU
36 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
38 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
39 uint32_t *ext_features,
40 uint32_t *ext2_features,
41 uint32_t *ext3_features)
43 int i;
44 /* feature flags taken from "Intel Processor Identification and the CPUID
45 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
46 * about feature names, the Linux name is used. */
47 static const char *feature_name[] = {
48 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
49 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
50 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
51 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
53 static const char *ext_feature_name[] = {
54 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
55 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
56 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59 static const char *ext2_feature_name[] = {
60 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
61 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
62 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
63 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
65 static const char *ext3_feature_name[] = {
66 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
67 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
72 for ( i = 0 ; i < 32 ; i++ )
73 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
74 *features |= 1 << i;
75 return;
77 for ( i = 0 ; i < 32 ; i++ )
78 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
79 *ext_features |= 1 << i;
80 return;
82 for ( i = 0 ; i < 32 ; i++ )
83 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
84 *ext2_features |= 1 << i;
85 return;
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
89 *ext3_features |= 1 << i;
90 return;
92 fprintf(stderr, "CPU feature %s not found\n", flagname);
95 CPUX86State *cpu_x86_init(const char *cpu_model)
97 CPUX86State *env;
98 static int inited;
100 env = qemu_mallocz(sizeof(CPUX86State));
101 if (!env)
102 return NULL;
103 cpu_exec_init(env);
104 env->cpu_model_str = cpu_model;
106 /* init various static tables */
107 if (!inited) {
108 inited = 1;
109 optimize_flags_init();
111 if (cpu_x86_register(env, cpu_model) < 0) {
112 cpu_x86_close(env);
113 return NULL;
115 cpu_reset(env);
116 #ifdef USE_KQEMU
117 kqemu_init(env);
118 #endif
119 if (kvm_enabled())
120 kvm_init_vcpu(env);
121 return env;
124 typedef struct x86_def_t {
125 const char *name;
126 uint32_t level;
127 uint32_t vendor1, vendor2, vendor3;
128 int family;
129 int model;
130 int stepping;
131 uint32_t features, ext_features, ext2_features, ext3_features;
132 uint32_t xlevel;
133 char model_id[48];
134 } x86_def_t;
136 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
137 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
138 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
139 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
140 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
141 CPUID_PSE36 | CPUID_FXSR)
142 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
143 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
144 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
145 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
146 CPUID_PAE | CPUID_SEP | CPUID_APIC)
147 static x86_def_t x86_defs[] = {
148 #ifdef TARGET_X86_64
150 .name = "qemu64",
151 .level = 2,
152 .vendor1 = CPUID_VENDOR_AMD_1,
153 .vendor2 = CPUID_VENDOR_AMD_2,
154 .vendor3 = CPUID_VENDOR_AMD_3,
155 .family = 6,
156 .model = 2,
157 .stepping = 3,
158 .features = PPRO_FEATURES |
159 /* these features are needed for Win64 and aren't fully implemented */
160 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
161 /* this feature is needed for Solaris and isn't fully implemented */
162 CPUID_PSE36,
163 .ext_features = CPUID_EXT_SSE3,
164 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
165 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
166 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
167 .ext3_features = CPUID_EXT3_SVM,
168 .xlevel = 0x8000000A,
169 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
172 .name = "core2duo",
173 .level = 10,
174 .family = 6,
175 .model = 15,
176 .stepping = 11,
177 /* The original CPU also implements these features:
178 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
179 CPUID_TM, CPUID_PBE */
180 .features = PPRO_FEATURES |
181 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
182 CPUID_PSE36,
183 /* The original CPU also implements these ext features:
184 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
185 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
186 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
187 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
188 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
189 .xlevel = 0x80000008,
190 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
192 #endif
194 .name = "qemu32",
195 .level = 2,
196 .family = 6,
197 .model = 3,
198 .stepping = 3,
199 .features = PPRO_FEATURES,
200 .ext_features = CPUID_EXT_SSE3,
201 .xlevel = 0,
202 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
205 .name = "coreduo",
206 .level = 10,
207 .family = 6,
208 .model = 14,
209 .stepping = 8,
210 /* The original CPU also implements these features:
211 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
212 CPUID_TM, CPUID_PBE */
213 .features = PPRO_FEATURES | CPUID_VME |
214 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
215 /* The original CPU also implements these ext features:
216 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
217 CPUID_EXT_PDCM */
218 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
219 .ext2_features = CPUID_EXT2_NX,
220 .xlevel = 0x80000008,
221 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
224 .name = "486",
225 .level = 0,
226 .family = 4,
227 .model = 0,
228 .stepping = 0,
229 .features = I486_FEATURES,
230 .xlevel = 0,
233 .name = "pentium",
234 .level = 1,
235 .family = 5,
236 .model = 4,
237 .stepping = 3,
238 .features = PENTIUM_FEATURES,
239 .xlevel = 0,
242 .name = "pentium2",
243 .level = 2,
244 .family = 6,
245 .model = 5,
246 .stepping = 2,
247 .features = PENTIUM2_FEATURES,
248 .xlevel = 0,
251 .name = "pentium3",
252 .level = 2,
253 .family = 6,
254 .model = 7,
255 .stepping = 3,
256 .features = PENTIUM3_FEATURES,
257 .xlevel = 0,
260 .name = "athlon",
261 .level = 2,
262 .vendor1 = 0x68747541, /* "Auth" */
263 .vendor2 = 0x69746e65, /* "enti" */
264 .vendor3 = 0x444d4163, /* "cAMD" */
265 .family = 6,
266 .model = 2,
267 .stepping = 3,
268 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
269 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
270 .xlevel = 0x80000008,
271 /* XXX: put another string ? */
272 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
275 .name = "n270",
276 /* original is on level 10 */
277 .level = 5,
278 .family = 6,
279 .model = 28,
280 .stepping = 2,
281 .features = PPRO_FEATURES |
282 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
283 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
284 * CPUID_HT | CPUID_TM | CPUID_PBE */
285 /* Some CPUs got no CPUID_SEP */
286 .ext_features = CPUID_EXT_MONITOR |
287 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
288 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
289 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
290 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
291 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
292 .xlevel = 0x8000000A,
293 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
297 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
299 unsigned int i;
300 x86_def_t *def;
302 char *s = strdup(cpu_model);
303 char *featurestr, *name = strtok(s, ",");
304 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
305 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
306 int family = -1, model = -1, stepping = -1;
308 def = NULL;
309 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
310 if (strcmp(name, x86_defs[i].name) == 0) {
311 def = &x86_defs[i];
312 break;
315 if (!def)
316 goto error;
317 memcpy(x86_cpu_def, def, sizeof(*def));
319 featurestr = strtok(NULL, ",");
321 while (featurestr) {
322 char *val;
323 if (featurestr[0] == '+') {
324 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
325 } else if (featurestr[0] == '-') {
326 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
327 } else if ((val = strchr(featurestr, '='))) {
328 *val = 0; val++;
329 if (!strcmp(featurestr, "family")) {
330 char *err;
331 family = strtol(val, &err, 10);
332 if (!*val || *err || family < 0) {
333 fprintf(stderr, "bad numerical value %s\n", val);
334 goto error;
336 x86_cpu_def->family = family;
337 } else if (!strcmp(featurestr, "model")) {
338 char *err;
339 model = strtol(val, &err, 10);
340 if (!*val || *err || model < 0 || model > 0xf) {
341 fprintf(stderr, "bad numerical value %s\n", val);
342 goto error;
344 x86_cpu_def->model = model;
345 } else if (!strcmp(featurestr, "stepping")) {
346 char *err;
347 stepping = strtol(val, &err, 10);
348 if (!*val || *err || stepping < 0 || stepping > 0xf) {
349 fprintf(stderr, "bad numerical value %s\n", val);
350 goto error;
352 x86_cpu_def->stepping = stepping;
353 } else if (!strcmp(featurestr, "vendor")) {
354 if (strlen(val) != 12) {
355 fprintf(stderr, "vendor string must be 12 chars long\n");
356 goto error;
358 x86_cpu_def->vendor1 = 0;
359 x86_cpu_def->vendor2 = 0;
360 x86_cpu_def->vendor3 = 0;
361 for(i = 0; i < 4; i++) {
362 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
363 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
364 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
366 } else if (!strcmp(featurestr, "model_id")) {
367 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
368 val);
369 } else {
370 fprintf(stderr, "unrecognized feature %s\n", featurestr);
371 goto error;
373 } else {
374 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
375 goto error;
377 featurestr = strtok(NULL, ",");
379 x86_cpu_def->features |= plus_features;
380 x86_cpu_def->ext_features |= plus_ext_features;
381 x86_cpu_def->ext2_features |= plus_ext2_features;
382 x86_cpu_def->ext3_features |= plus_ext3_features;
383 x86_cpu_def->features &= ~minus_features;
384 x86_cpu_def->ext_features &= ~minus_ext_features;
385 x86_cpu_def->ext2_features &= ~minus_ext2_features;
386 x86_cpu_def->ext3_features &= ~minus_ext3_features;
387 free(s);
388 return 0;
390 error:
391 free(s);
392 return -1;
395 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
397 unsigned int i;
399 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
400 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
403 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
405 x86_def_t def1, *def = &def1;
407 if (cpu_x86_find_by_name(def, cpu_model) < 0)
408 return -1;
409 if (def->vendor1) {
410 env->cpuid_vendor1 = def->vendor1;
411 env->cpuid_vendor2 = def->vendor2;
412 env->cpuid_vendor3 = def->vendor3;
413 } else {
414 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
415 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
416 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
418 env->cpuid_level = def->level;
419 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
420 env->cpuid_features = def->features;
421 env->pat = 0x0007040600070406ULL;
422 env->cpuid_ext_features = def->ext_features;
423 env->cpuid_ext2_features = def->ext2_features;
424 env->cpuid_xlevel = def->xlevel;
425 env->cpuid_ext3_features = def->ext3_features;
427 const char *model_id = def->model_id;
428 int c, len, i;
429 if (!model_id)
430 model_id = "";
431 len = strlen(model_id);
432 for(i = 0; i < 48; i++) {
433 if (i >= len)
434 c = '\0';
435 else
436 c = (uint8_t)model_id[i];
437 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
440 return 0;
443 /* NOTE: must be called outside the CPU execute loop */
444 void cpu_reset(CPUX86State *env)
446 int i;
448 memset(env, 0, offsetof(CPUX86State, breakpoints));
450 tlb_flush(env, 1);
452 env->old_exception = -1;
454 /* init to reset state */
456 #ifdef CONFIG_SOFTMMU
457 env->hflags |= HF_SOFTMMU_MASK;
458 #endif
459 env->hflags2 |= HF2_GIF_MASK;
461 cpu_x86_update_cr0(env, 0x60000010);
462 env->a20_mask = ~0x0;
463 env->smbase = 0x30000;
465 env->idt.limit = 0xffff;
466 env->gdt.limit = 0xffff;
467 env->ldt.limit = 0xffff;
468 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
469 env->tr.limit = 0xffff;
470 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
472 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
473 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
474 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
475 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
476 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
477 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
478 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
479 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
480 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
481 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
482 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
483 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
485 env->eip = 0xfff0;
486 env->regs[R_EDX] = env->cpuid_version;
488 env->eflags = 0x2;
490 /* FPU init */
491 for(i = 0;i < 8; i++)
492 env->fptags[i] = 1;
493 env->fpuc = 0x37f;
495 env->mxcsr = 0x1f80;
498 void cpu_x86_close(CPUX86State *env)
500 qemu_free(env);
503 /***********************************************************/
504 /* x86 debug */
506 static const char *cc_op_str[] = {
507 "DYNAMIC",
508 "EFLAGS",
510 "MULB",
511 "MULW",
512 "MULL",
513 "MULQ",
515 "ADDB",
516 "ADDW",
517 "ADDL",
518 "ADDQ",
520 "ADCB",
521 "ADCW",
522 "ADCL",
523 "ADCQ",
525 "SUBB",
526 "SUBW",
527 "SUBL",
528 "SUBQ",
530 "SBBB",
531 "SBBW",
532 "SBBL",
533 "SBBQ",
535 "LOGICB",
536 "LOGICW",
537 "LOGICL",
538 "LOGICQ",
540 "INCB",
541 "INCW",
542 "INCL",
543 "INCQ",
545 "DECB",
546 "DECW",
547 "DECL",
548 "DECQ",
550 "SHLB",
551 "SHLW",
552 "SHLL",
553 "SHLQ",
555 "SARB",
556 "SARW",
557 "SARL",
558 "SARQ",
561 void cpu_dump_state(CPUState *env, FILE *f,
562 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
563 int flags)
565 int eflags, i, nb;
566 char cc_op_name[32];
567 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
569 eflags = env->eflags;
570 #ifdef TARGET_X86_64
571 if (env->hflags & HF_CS64_MASK) {
572 cpu_fprintf(f,
573 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
574 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
575 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
576 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
577 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
578 env->regs[R_EAX],
579 env->regs[R_EBX],
580 env->regs[R_ECX],
581 env->regs[R_EDX],
582 env->regs[R_ESI],
583 env->regs[R_EDI],
584 env->regs[R_EBP],
585 env->regs[R_ESP],
586 env->regs[8],
587 env->regs[9],
588 env->regs[10],
589 env->regs[11],
590 env->regs[12],
591 env->regs[13],
592 env->regs[14],
593 env->regs[15],
594 env->eip, eflags,
595 eflags & DF_MASK ? 'D' : '-',
596 eflags & CC_O ? 'O' : '-',
597 eflags & CC_S ? 'S' : '-',
598 eflags & CC_Z ? 'Z' : '-',
599 eflags & CC_A ? 'A' : '-',
600 eflags & CC_P ? 'P' : '-',
601 eflags & CC_C ? 'C' : '-',
602 env->hflags & HF_CPL_MASK,
603 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
604 (int)(env->a20_mask >> 20) & 1,
605 (env->hflags >> HF_SMM_SHIFT) & 1,
606 env->halted);
607 } else
608 #endif
610 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
611 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
612 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
613 (uint32_t)env->regs[R_EAX],
614 (uint32_t)env->regs[R_EBX],
615 (uint32_t)env->regs[R_ECX],
616 (uint32_t)env->regs[R_EDX],
617 (uint32_t)env->regs[R_ESI],
618 (uint32_t)env->regs[R_EDI],
619 (uint32_t)env->regs[R_EBP],
620 (uint32_t)env->regs[R_ESP],
621 (uint32_t)env->eip, eflags,
622 eflags & DF_MASK ? 'D' : '-',
623 eflags & CC_O ? 'O' : '-',
624 eflags & CC_S ? 'S' : '-',
625 eflags & CC_Z ? 'Z' : '-',
626 eflags & CC_A ? 'A' : '-',
627 eflags & CC_P ? 'P' : '-',
628 eflags & CC_C ? 'C' : '-',
629 env->hflags & HF_CPL_MASK,
630 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
631 (int)(env->a20_mask >> 20) & 1,
632 (env->hflags >> HF_SMM_SHIFT) & 1,
633 env->halted);
636 #ifdef TARGET_X86_64
637 if (env->hflags & HF_LMA_MASK) {
638 for(i = 0; i < 6; i++) {
639 SegmentCache *sc = &env->segs[i];
640 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
641 seg_name[i],
642 sc->selector,
643 sc->base,
644 sc->limit,
645 sc->flags);
647 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
648 env->ldt.selector,
649 env->ldt.base,
650 env->ldt.limit,
651 env->ldt.flags);
652 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
653 env->tr.selector,
654 env->tr.base,
655 env->tr.limit,
656 env->tr.flags);
657 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
658 env->gdt.base, env->gdt.limit);
659 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
660 env->idt.base, env->idt.limit);
661 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
662 (uint32_t)env->cr[0],
663 env->cr[2],
664 env->cr[3],
665 (uint32_t)env->cr[4]);
666 } else
667 #endif
669 for(i = 0; i < 6; i++) {
670 SegmentCache *sc = &env->segs[i];
671 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
672 seg_name[i],
673 sc->selector,
674 (uint32_t)sc->base,
675 sc->limit,
676 sc->flags);
678 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
679 env->ldt.selector,
680 (uint32_t)env->ldt.base,
681 env->ldt.limit,
682 env->ldt.flags);
683 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
684 env->tr.selector,
685 (uint32_t)env->tr.base,
686 env->tr.limit,
687 env->tr.flags);
688 cpu_fprintf(f, "GDT= %08x %08x\n",
689 (uint32_t)env->gdt.base, env->gdt.limit);
690 cpu_fprintf(f, "IDT= %08x %08x\n",
691 (uint32_t)env->idt.base, env->idt.limit);
692 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
693 (uint32_t)env->cr[0],
694 (uint32_t)env->cr[2],
695 (uint32_t)env->cr[3],
696 (uint32_t)env->cr[4]);
698 if (flags & X86_DUMP_CCOP) {
699 if ((unsigned)env->cc_op < CC_OP_NB)
700 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
701 else
702 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
703 #ifdef TARGET_X86_64
704 if (env->hflags & HF_CS64_MASK) {
705 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
706 env->cc_src, env->cc_dst,
707 cc_op_name);
708 } else
709 #endif
711 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
712 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
713 cc_op_name);
716 if (flags & X86_DUMP_FPU) {
717 int fptag;
718 fptag = 0;
719 for(i = 0; i < 8; i++) {
720 fptag |= ((!env->fptags[i]) << i);
722 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
723 env->fpuc,
724 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
725 env->fpstt,
726 fptag,
727 env->mxcsr);
728 for(i=0;i<8;i++) {
729 #if defined(USE_X86LDOUBLE)
730 union {
731 long double d;
732 struct {
733 uint64_t lower;
734 uint16_t upper;
735 } l;
736 } tmp;
737 tmp.d = env->fpregs[i].d;
738 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
739 i, tmp.l.lower, tmp.l.upper);
740 #else
741 cpu_fprintf(f, "FPR%d=%016" PRIx64,
742 i, env->fpregs[i].mmx.q);
743 #endif
744 if ((i & 1) == 1)
745 cpu_fprintf(f, "\n");
746 else
747 cpu_fprintf(f, " ");
749 if (env->hflags & HF_CS64_MASK)
750 nb = 16;
751 else
752 nb = 8;
753 for(i=0;i<nb;i++) {
754 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
756 env->xmm_regs[i].XMM_L(3),
757 env->xmm_regs[i].XMM_L(2),
758 env->xmm_regs[i].XMM_L(1),
759 env->xmm_regs[i].XMM_L(0));
760 if ((i & 1) == 1)
761 cpu_fprintf(f, "\n");
762 else
763 cpu_fprintf(f, " ");
768 /***********************************************************/
769 /* x86 mmu */
770 /* XXX: add PGE support */
772 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
774 a20_state = (a20_state != 0);
775 if (a20_state != ((env->a20_mask >> 20) & 1)) {
776 #if defined(DEBUG_MMU)
777 printf("A20 update: a20=%d\n", a20_state);
778 #endif
779 /* if the cpu is currently executing code, we must unlink it and
780 all the potentially executing TB */
781 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
783 /* when a20 is changed, all the MMU mappings are invalid, so
784 we must flush everything */
785 tlb_flush(env, 1);
786 env->a20_mask = (~0x100000) | (a20_state << 20);
790 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
792 int pe_state;
794 #if defined(DEBUG_MMU)
795 printf("CR0 update: CR0=0x%08x\n", new_cr0);
796 #endif
797 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
798 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
799 tlb_flush(env, 1);
802 #ifdef TARGET_X86_64
803 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
804 (env->efer & MSR_EFER_LME)) {
805 /* enter in long mode */
806 /* XXX: generate an exception */
807 if (!(env->cr[4] & CR4_PAE_MASK))
808 return;
809 env->efer |= MSR_EFER_LMA;
810 env->hflags |= HF_LMA_MASK;
811 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
812 (env->efer & MSR_EFER_LMA)) {
813 /* exit long mode */
814 env->efer &= ~MSR_EFER_LMA;
815 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
816 env->eip &= 0xffffffff;
818 #endif
819 env->cr[0] = new_cr0 | CR0_ET_MASK;
821 /* update PE flag in hidden flags */
822 pe_state = (env->cr[0] & CR0_PE_MASK);
823 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
824 /* ensure that ADDSEG is always set in real mode */
825 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
826 /* update FPU flags */
827 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
828 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
831 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
832 the PDPT */
833 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
835 env->cr[3] = new_cr3;
836 if (env->cr[0] & CR0_PG_MASK) {
837 #if defined(DEBUG_MMU)
838 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
839 #endif
840 tlb_flush(env, 0);
844 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
846 #if defined(DEBUG_MMU)
847 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
848 #endif
849 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
850 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
851 tlb_flush(env, 1);
853 /* SSE handling */
854 if (!(env->cpuid_features & CPUID_SSE))
855 new_cr4 &= ~CR4_OSFXSR_MASK;
856 if (new_cr4 & CR4_OSFXSR_MASK)
857 env->hflags |= HF_OSFXSR_MASK;
858 else
859 env->hflags &= ~HF_OSFXSR_MASK;
861 env->cr[4] = new_cr4;
864 /* XXX: also flush 4MB pages */
865 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
867 tlb_flush_page(env, addr);
870 #if defined(CONFIG_USER_ONLY)
872 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
873 int is_write, int mmu_idx, int is_softmmu)
875 /* user mode only emulation */
876 is_write &= 1;
877 env->cr[2] = addr;
878 env->error_code = (is_write << PG_ERROR_W_BIT);
879 env->error_code |= PG_ERROR_U_MASK;
880 env->exception_index = EXCP0E_PAGE;
881 return 1;
884 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
886 return addr;
889 #else
891 /* XXX: This value should match the one returned by CPUID
892 * and in exec.c */
893 #if defined(USE_KQEMU)
894 #define PHYS_ADDR_MASK 0xfffff000LL
895 #else
896 # if defined(TARGET_X86_64)
897 # define PHYS_ADDR_MASK 0xfffffff000LL
898 # else
899 # define PHYS_ADDR_MASK 0xffffff000LL
900 # endif
901 #endif
903 /* return value:
904 -1 = cannot handle fault
905 0 = nothing more to do
906 1 = generate PF fault
907 2 = soft MMU activation required for this block
909 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
910 int is_write1, int mmu_idx, int is_softmmu)
912 uint64_t ptep, pte;
913 target_ulong pde_addr, pte_addr;
914 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
915 target_phys_addr_t paddr;
916 uint32_t page_offset;
917 target_ulong vaddr, virt_addr;
919 is_user = mmu_idx == MMU_USER_IDX;
920 #if defined(DEBUG_MMU)
921 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
922 addr, is_write1, is_user, env->eip);
923 #endif
924 is_write = is_write1 & 1;
926 if (!(env->cr[0] & CR0_PG_MASK)) {
927 pte = addr;
928 virt_addr = addr & TARGET_PAGE_MASK;
929 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
930 page_size = 4096;
931 goto do_mapping;
934 if (env->cr[4] & CR4_PAE_MASK) {
935 uint64_t pde, pdpe;
936 target_ulong pdpe_addr;
938 #ifdef TARGET_X86_64
939 if (env->hflags & HF_LMA_MASK) {
940 uint64_t pml4e_addr, pml4e;
941 int32_t sext;
943 /* test virtual address sign extension */
944 sext = (int64_t)addr >> 47;
945 if (sext != 0 && sext != -1) {
946 env->error_code = 0;
947 env->exception_index = EXCP0D_GPF;
948 return 1;
951 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
952 env->a20_mask;
953 pml4e = ldq_phys(pml4e_addr);
954 if (!(pml4e & PG_PRESENT_MASK)) {
955 error_code = 0;
956 goto do_fault;
958 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
959 error_code = PG_ERROR_RSVD_MASK;
960 goto do_fault;
962 if (!(pml4e & PG_ACCESSED_MASK)) {
963 pml4e |= PG_ACCESSED_MASK;
964 stl_phys_notdirty(pml4e_addr, pml4e);
966 ptep = pml4e ^ PG_NX_MASK;
967 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
968 env->a20_mask;
969 pdpe = ldq_phys(pdpe_addr);
970 if (!(pdpe & PG_PRESENT_MASK)) {
971 error_code = 0;
972 goto do_fault;
974 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
975 error_code = PG_ERROR_RSVD_MASK;
976 goto do_fault;
978 ptep &= pdpe ^ PG_NX_MASK;
979 if (!(pdpe & PG_ACCESSED_MASK)) {
980 pdpe |= PG_ACCESSED_MASK;
981 stl_phys_notdirty(pdpe_addr, pdpe);
983 } else
984 #endif
986 /* XXX: load them when cr3 is loaded ? */
987 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
988 env->a20_mask;
989 pdpe = ldq_phys(pdpe_addr);
990 if (!(pdpe & PG_PRESENT_MASK)) {
991 error_code = 0;
992 goto do_fault;
994 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
997 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
998 env->a20_mask;
999 pde = ldq_phys(pde_addr);
1000 if (!(pde & PG_PRESENT_MASK)) {
1001 error_code = 0;
1002 goto do_fault;
1004 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1005 error_code = PG_ERROR_RSVD_MASK;
1006 goto do_fault;
1008 ptep &= pde ^ PG_NX_MASK;
1009 if (pde & PG_PSE_MASK) {
1010 /* 2 MB page */
1011 page_size = 2048 * 1024;
1012 ptep ^= PG_NX_MASK;
1013 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1014 goto do_fault_protect;
1015 if (is_user) {
1016 if (!(ptep & PG_USER_MASK))
1017 goto do_fault_protect;
1018 if (is_write && !(ptep & PG_RW_MASK))
1019 goto do_fault_protect;
1020 } else {
1021 if ((env->cr[0] & CR0_WP_MASK) &&
1022 is_write && !(ptep & PG_RW_MASK))
1023 goto do_fault_protect;
1025 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1026 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1027 pde |= PG_ACCESSED_MASK;
1028 if (is_dirty)
1029 pde |= PG_DIRTY_MASK;
1030 stl_phys_notdirty(pde_addr, pde);
1032 /* align to page_size */
1033 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1034 virt_addr = addr & ~(page_size - 1);
1035 } else {
1036 /* 4 KB page */
1037 if (!(pde & PG_ACCESSED_MASK)) {
1038 pde |= PG_ACCESSED_MASK;
1039 stl_phys_notdirty(pde_addr, pde);
1041 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1042 env->a20_mask;
1043 pte = ldq_phys(pte_addr);
1044 if (!(pte & PG_PRESENT_MASK)) {
1045 error_code = 0;
1046 goto do_fault;
1048 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1049 error_code = PG_ERROR_RSVD_MASK;
1050 goto do_fault;
1052 /* combine pde and pte nx, user and rw protections */
1053 ptep &= pte ^ PG_NX_MASK;
1054 ptep ^= PG_NX_MASK;
1055 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1056 goto do_fault_protect;
1057 if (is_user) {
1058 if (!(ptep & PG_USER_MASK))
1059 goto do_fault_protect;
1060 if (is_write && !(ptep & PG_RW_MASK))
1061 goto do_fault_protect;
1062 } else {
1063 if ((env->cr[0] & CR0_WP_MASK) &&
1064 is_write && !(ptep & PG_RW_MASK))
1065 goto do_fault_protect;
1067 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1068 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1069 pte |= PG_ACCESSED_MASK;
1070 if (is_dirty)
1071 pte |= PG_DIRTY_MASK;
1072 stl_phys_notdirty(pte_addr, pte);
1074 page_size = 4096;
1075 virt_addr = addr & ~0xfff;
1076 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1078 } else {
1079 uint32_t pde;
1081 /* page directory entry */
1082 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1083 env->a20_mask;
1084 pde = ldl_phys(pde_addr);
1085 if (!(pde & PG_PRESENT_MASK)) {
1086 error_code = 0;
1087 goto do_fault;
1089 /* if PSE bit is set, then we use a 4MB page */
1090 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1091 page_size = 4096 * 1024;
1092 if (is_user) {
1093 if (!(pde & PG_USER_MASK))
1094 goto do_fault_protect;
1095 if (is_write && !(pde & PG_RW_MASK))
1096 goto do_fault_protect;
1097 } else {
1098 if ((env->cr[0] & CR0_WP_MASK) &&
1099 is_write && !(pde & PG_RW_MASK))
1100 goto do_fault_protect;
1102 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1103 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1104 pde |= PG_ACCESSED_MASK;
1105 if (is_dirty)
1106 pde |= PG_DIRTY_MASK;
1107 stl_phys_notdirty(pde_addr, pde);
1110 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1111 ptep = pte;
1112 virt_addr = addr & ~(page_size - 1);
1113 } else {
1114 if (!(pde & PG_ACCESSED_MASK)) {
1115 pde |= PG_ACCESSED_MASK;
1116 stl_phys_notdirty(pde_addr, pde);
1119 /* page directory entry */
1120 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1121 env->a20_mask;
1122 pte = ldl_phys(pte_addr);
1123 if (!(pte & PG_PRESENT_MASK)) {
1124 error_code = 0;
1125 goto do_fault;
1127 /* combine pde and pte user and rw protections */
1128 ptep = pte & pde;
1129 if (is_user) {
1130 if (!(ptep & PG_USER_MASK))
1131 goto do_fault_protect;
1132 if (is_write && !(ptep & PG_RW_MASK))
1133 goto do_fault_protect;
1134 } else {
1135 if ((env->cr[0] & CR0_WP_MASK) &&
1136 is_write && !(ptep & PG_RW_MASK))
1137 goto do_fault_protect;
1139 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1140 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1141 pte |= PG_ACCESSED_MASK;
1142 if (is_dirty)
1143 pte |= PG_DIRTY_MASK;
1144 stl_phys_notdirty(pte_addr, pte);
1146 page_size = 4096;
1147 virt_addr = addr & ~0xfff;
1150 /* the page can be put in the TLB */
1151 prot = PAGE_READ;
1152 if (!(ptep & PG_NX_MASK))
1153 prot |= PAGE_EXEC;
1154 if (pte & PG_DIRTY_MASK) {
1155 /* only set write access if already dirty... otherwise wait
1156 for dirty access */
1157 if (is_user) {
1158 if (ptep & PG_RW_MASK)
1159 prot |= PAGE_WRITE;
1160 } else {
1161 if (!(env->cr[0] & CR0_WP_MASK) ||
1162 (ptep & PG_RW_MASK))
1163 prot |= PAGE_WRITE;
1166 do_mapping:
1167 pte = pte & env->a20_mask;
1169 /* Even if 4MB pages, we map only one 4KB page in the cache to
1170 avoid filling it too fast */
1171 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1172 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1173 vaddr = virt_addr + page_offset;
1175 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1176 return ret;
1177 do_fault_protect:
1178 error_code = PG_ERROR_P_MASK;
1179 do_fault:
1180 error_code |= (is_write << PG_ERROR_W_BIT);
1181 if (is_user)
1182 error_code |= PG_ERROR_U_MASK;
1183 if (is_write1 == 2 &&
1184 (env->efer & MSR_EFER_NXE) &&
1185 (env->cr[4] & CR4_PAE_MASK))
1186 error_code |= PG_ERROR_I_D_MASK;
1187 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1188 /* cr2 is not modified in case of exceptions */
1189 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1190 addr);
1191 } else {
1192 env->cr[2] = addr;
1194 env->error_code = error_code;
1195 env->exception_index = EXCP0E_PAGE;
1196 return 1;
1199 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1201 target_ulong pde_addr, pte_addr;
1202 uint64_t pte;
1203 target_phys_addr_t paddr;
1204 uint32_t page_offset;
1205 int page_size;
1207 if (env->cr[4] & CR4_PAE_MASK) {
1208 target_ulong pdpe_addr;
1209 uint64_t pde, pdpe;
1211 #ifdef TARGET_X86_64
1212 if (env->hflags & HF_LMA_MASK) {
1213 uint64_t pml4e_addr, pml4e;
1214 int32_t sext;
1216 /* test virtual address sign extension */
1217 sext = (int64_t)addr >> 47;
1218 if (sext != 0 && sext != -1)
1219 return -1;
1221 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1222 env->a20_mask;
1223 pml4e = ldq_phys(pml4e_addr);
1224 if (!(pml4e & PG_PRESENT_MASK))
1225 return -1;
1227 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1228 env->a20_mask;
1229 pdpe = ldq_phys(pdpe_addr);
1230 if (!(pdpe & PG_PRESENT_MASK))
1231 return -1;
1232 } else
1233 #endif
1235 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1236 env->a20_mask;
1237 pdpe = ldq_phys(pdpe_addr);
1238 if (!(pdpe & PG_PRESENT_MASK))
1239 return -1;
1242 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1243 env->a20_mask;
1244 pde = ldq_phys(pde_addr);
1245 if (!(pde & PG_PRESENT_MASK)) {
1246 return -1;
1248 if (pde & PG_PSE_MASK) {
1249 /* 2 MB page */
1250 page_size = 2048 * 1024;
1251 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1252 } else {
1253 /* 4 KB page */
1254 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1255 env->a20_mask;
1256 page_size = 4096;
1257 pte = ldq_phys(pte_addr);
1259 if (!(pte & PG_PRESENT_MASK))
1260 return -1;
1261 } else {
1262 uint32_t pde;
1264 if (!(env->cr[0] & CR0_PG_MASK)) {
1265 pte = addr;
1266 page_size = 4096;
1267 } else {
1268 /* page directory entry */
1269 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1270 pde = ldl_phys(pde_addr);
1271 if (!(pde & PG_PRESENT_MASK))
1272 return -1;
1273 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1274 pte = pde & ~0x003ff000; /* align to 4MB */
1275 page_size = 4096 * 1024;
1276 } else {
1277 /* page directory entry */
1278 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1279 pte = ldl_phys(pte_addr);
1280 if (!(pte & PG_PRESENT_MASK))
1281 return -1;
1282 page_size = 4096;
1285 pte = pte & env->a20_mask;
1288 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1289 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1290 return paddr;
1292 #endif /* !CONFIG_USER_ONLY */
1294 #if defined(CONFIG_KVM)
1295 static void host_cpuid(uint32_t function, uint32_t *eax, uint32_t *ebx,
1296 uint32_t *ecx, uint32_t *edx)
1298 uint32_t vec[4];
1300 #ifdef __x86_64__
1301 asm volatile("cpuid"
1302 : "=a"(vec[0]), "=b"(vec[1]),
1303 "=c"(vec[2]), "=d"(vec[3])
1304 : "0"(function) : "cc");
1305 #else
1306 asm volatile("pusha \n\t"
1307 "cpuid \n\t"
1308 "mov %%eax, 0(%1) \n\t"
1309 "mov %%ebx, 4(%1) \n\t"
1310 "mov %%ecx, 8(%1) \n\t"
1311 "mov %%edx, 12(%1) \n\t"
1312 "popa"
1313 : : "a"(function), "S"(vec)
1314 : "memory", "cc");
1315 #endif
1317 if (eax)
1318 *eax = vec[0];
1319 if (ebx)
1320 *ebx = vec[1];
1321 if (ecx)
1322 *ecx = vec[2];
1323 if (edx)
1324 *edx = vec[3];
1326 #endif
1328 void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
1329 uint32_t *eax, uint32_t *ebx,
1330 uint32_t *ecx, uint32_t *edx)
1332 /* test if maximum index reached */
1333 if (index & 0x80000000) {
1334 if (index > env->cpuid_xlevel)
1335 index = env->cpuid_level;
1336 } else {
1337 if (index > env->cpuid_level)
1338 index = env->cpuid_level;
1341 switch(index) {
1342 case 0:
1343 *eax = env->cpuid_level;
1344 *ebx = env->cpuid_vendor1;
1345 *edx = env->cpuid_vendor2;
1346 *ecx = env->cpuid_vendor3;
1348 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1349 * isn't supported in compatibility mode on Intel. so advertise the
1350 * actuall cpu, and say goodbye to migration between different vendors
1351 * is you use compatibility mode. */
1352 if (kvm_enabled())
1353 host_cpuid(0, NULL, ebx, ecx, edx);
1354 break;
1355 case 1:
1356 *eax = env->cpuid_version;
1357 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1358 *ecx = env->cpuid_ext_features;
1359 *edx = env->cpuid_features;
1361 /* "Hypervisor present" bit required for Microsoft SVVP */
1362 if (kvm_enabled())
1363 *ecx |= (1 << 31);
1364 break;
1365 case 2:
1366 /* cache info: needed for Pentium Pro compatibility */
1367 *eax = 1;
1368 *ebx = 0;
1369 *ecx = 0;
1370 *edx = 0x2c307d;
1371 break;
1372 case 4:
1373 /* cache info: needed for Core compatibility */
1374 switch (*ecx) {
1375 case 0: /* L1 dcache info */
1376 *eax = 0x0000121;
1377 *ebx = 0x1c0003f;
1378 *ecx = 0x000003f;
1379 *edx = 0x0000001;
1380 break;
1381 case 1: /* L1 icache info */
1382 *eax = 0x0000122;
1383 *ebx = 0x1c0003f;
1384 *ecx = 0x000003f;
1385 *edx = 0x0000001;
1386 break;
1387 case 2: /* L2 cache info */
1388 *eax = 0x0000143;
1389 *ebx = 0x3c0003f;
1390 *ecx = 0x0000fff;
1391 *edx = 0x0000001;
1392 break;
1393 default: /* end of info */
1394 *eax = 0;
1395 *ebx = 0;
1396 *ecx = 0;
1397 *edx = 0;
1398 break;
1401 break;
1402 case 5:
1403 /* mwait info: needed for Core compatibility */
1404 *eax = 0; /* Smallest monitor-line size in bytes */
1405 *ebx = 0; /* Largest monitor-line size in bytes */
1406 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1407 *edx = 0;
1408 break;
1409 case 6:
1410 /* Thermal and Power Leaf */
1411 *eax = 0;
1412 *ebx = 0;
1413 *ecx = 0;
1414 *edx = 0;
1415 break;
1416 case 9:
1417 /* Direct Cache Access Information Leaf */
1418 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1419 *ebx = 0;
1420 *ecx = 0;
1421 *edx = 0;
1422 break;
1423 case 0xA:
1424 /* Architectural Performance Monitoring Leaf */
1425 *eax = 0;
1426 *ebx = 0;
1427 *ecx = 0;
1428 *edx = 0;
1429 break;
1430 case 0x80000000:
1431 *eax = env->cpuid_xlevel;
1432 *ebx = env->cpuid_vendor1;
1433 *edx = env->cpuid_vendor2;
1434 *ecx = env->cpuid_vendor3;
1435 break;
1436 case 0x80000001:
1437 *eax = env->cpuid_features;
1438 *ebx = 0;
1439 *ecx = env->cpuid_ext3_features;
1440 *edx = env->cpuid_ext2_features;
1442 if (kvm_enabled()) {
1443 uint32_t h_eax, h_edx;
1445 host_cpuid(0x80000001, &h_eax, NULL, NULL, &h_edx);
1447 /* disable CPU features that the host does not support */
1449 /* long mode */
1450 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1451 *edx &= ~0x20000000;
1452 /* syscall */
1453 if ((h_edx & 0x00000800) == 0)
1454 *edx &= ~0x00000800;
1455 /* nx */
1456 if ((h_edx & 0x00100000) == 0)
1457 *edx &= ~0x00100000;
1459 /* disable CPU features that KVM cannot support */
1461 /* svm */
1462 *ecx &= ~4UL;
1463 /* 3dnow */
1464 *edx = ~0xc0000000;
1466 break;
1467 case 0x80000002:
1468 case 0x80000003:
1469 case 0x80000004:
1470 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1471 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1472 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1473 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1474 break;
1475 case 0x80000005:
1476 /* cache info (L1 cache) */
1477 *eax = 0x01ff01ff;
1478 *ebx = 0x01ff01ff;
1479 *ecx = 0x40020140;
1480 *edx = 0x40020140;
1481 break;
1482 case 0x80000006:
1483 /* cache info (L2 cache) */
1484 *eax = 0;
1485 *ebx = 0x42004200;
1486 *ecx = 0x02008140;
1487 *edx = 0;
1488 break;
1489 case 0x80000008:
1490 /* virtual & phys address size in low 2 bytes. */
1491 /* XXX: This value must match the one used in the MMU code. */
1492 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1493 /* 64 bit processor */
1494 #if defined(USE_KQEMU)
1495 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1496 #else
1497 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1498 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1499 #endif
1500 } else {
1501 #if defined(USE_KQEMU)
1502 *eax = 0x00000020; /* 32 bits physical */
1503 #else
1504 if (env->cpuid_features & CPUID_PSE36)
1505 *eax = 0x00000024; /* 36 bits physical */
1506 else
1507 *eax = 0x00000020; /* 32 bits physical */
1508 #endif
1510 *ebx = 0;
1511 *ecx = 0;
1512 *edx = 0;
1513 break;
1514 case 0x8000000A:
1515 *eax = 0x00000001; /* SVM Revision */
1516 *ebx = 0x00000010; /* nr of ASIDs */
1517 *ecx = 0;
1518 *edx = 0; /* optional features */
1519 break;
1520 default:
1521 /* reserved values: zero */
1522 *eax = 0;
1523 *ebx = 0;
1524 *ecx = 0;
1525 *edx = 0;
1526 break;