Fix x86 feature modifications for features that set multiple bits
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob1433857dae5699c425ac0532252f80de5a7b4452
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 //#define DEBUG_MMU
35 /* feature flags taken from "Intel Processor Identification and the CPUID
36 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
37 * about feature names, the Linux name is used. */
38 static const char *feature_name[] = {
39 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
40 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
41 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
42 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
44 static const char *ext_feature_name[] = {
45 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
46 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
47 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
48 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
50 static const char *ext2_feature_name[] = {
51 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
52 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
53 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
54 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
56 static const char *ext3_feature_name[] = {
57 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
58 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
59 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
60 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
63 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
64 uint32_t *ext_features,
65 uint32_t *ext2_features,
66 uint32_t *ext3_features)
68 int i;
69 int found = 0;
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 found = 1;
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 found = 1;
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 found = 1;
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 found = 1;
91 if (!found) {
92 fprintf(stderr, "CPU feature %s not found\n", flagname);
96 typedef struct x86_def_t {
97 const char *name;
98 uint32_t level;
99 uint32_t vendor1, vendor2, vendor3;
100 int family;
101 int model;
102 int stepping;
103 uint32_t features, ext_features, ext2_features, ext3_features;
104 uint32_t xlevel;
105 char model_id[48];
106 } x86_def_t;
108 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
109 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
110 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
111 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
112 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
113 CPUID_PSE36 | CPUID_FXSR)
114 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
115 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
116 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
117 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
118 CPUID_PAE | CPUID_SEP | CPUID_APIC)
119 static x86_def_t x86_defs[] = {
120 #ifdef TARGET_X86_64
122 .name = "qemu64",
123 .level = 2,
124 .vendor1 = CPUID_VENDOR_AMD_1,
125 .vendor2 = CPUID_VENDOR_AMD_2,
126 .vendor3 = CPUID_VENDOR_AMD_3,
127 .family = 6,
128 .model = 2,
129 .stepping = 3,
130 .features = PPRO_FEATURES |
131 /* these features are needed for Win64 and aren't fully implemented */
132 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
133 /* this feature is needed for Solaris and isn't fully implemented */
134 CPUID_PSE36,
135 .ext_features = CPUID_EXT_SSE3,
136 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
137 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
138 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
139 .ext3_features = CPUID_EXT3_SVM,
140 .xlevel = 0x8000000A,
141 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
144 .name = "phenom",
145 .level = 5,
146 .vendor1 = CPUID_VENDOR_AMD_1,
147 .vendor2 = CPUID_VENDOR_AMD_2,
148 .vendor3 = CPUID_VENDOR_AMD_3,
149 .family = 16,
150 .model = 2,
151 .stepping = 3,
152 /* Missing: CPUID_VME, CPUID_HT */
153 .features = PPRO_FEATURES |
154 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
155 CPUID_PSE36,
156 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
157 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
158 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
159 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
160 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
161 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
162 CPUID_EXT2_FFXSR,
163 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
164 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
165 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
166 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
167 .ext3_features = CPUID_EXT3_SVM,
168 .xlevel = 0x8000001A,
169 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
172 .name = "core2duo",
173 .level = 10,
174 .family = 6,
175 .model = 15,
176 .stepping = 11,
177 /* The original CPU also implements these features:
178 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
179 CPUID_TM, CPUID_PBE */
180 .features = PPRO_FEATURES |
181 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
182 CPUID_PSE36,
183 /* The original CPU also implements these ext features:
184 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
185 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
186 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
187 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
188 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
189 .xlevel = 0x80000008,
190 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
192 #endif
194 .name = "qemu32",
195 .level = 2,
196 .family = 6,
197 .model = 3,
198 .stepping = 3,
199 .features = PPRO_FEATURES,
200 .ext_features = CPUID_EXT_SSE3,
201 .xlevel = 0,
202 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
205 .name = "coreduo",
206 .level = 10,
207 .family = 6,
208 .model = 14,
209 .stepping = 8,
210 /* The original CPU also implements these features:
211 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
212 CPUID_TM, CPUID_PBE */
213 .features = PPRO_FEATURES | CPUID_VME |
214 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
215 /* The original CPU also implements these ext features:
216 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
217 CPUID_EXT_PDCM */
218 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
219 .ext2_features = CPUID_EXT2_NX,
220 .xlevel = 0x80000008,
221 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
224 .name = "486",
225 .level = 0,
226 .family = 4,
227 .model = 0,
228 .stepping = 0,
229 .features = I486_FEATURES,
230 .xlevel = 0,
233 .name = "pentium",
234 .level = 1,
235 .family = 5,
236 .model = 4,
237 .stepping = 3,
238 .features = PENTIUM_FEATURES,
239 .xlevel = 0,
242 .name = "pentium2",
243 .level = 2,
244 .family = 6,
245 .model = 5,
246 .stepping = 2,
247 .features = PENTIUM2_FEATURES,
248 .xlevel = 0,
251 .name = "pentium3",
252 .level = 2,
253 .family = 6,
254 .model = 7,
255 .stepping = 3,
256 .features = PENTIUM3_FEATURES,
257 .xlevel = 0,
260 .name = "athlon",
261 .level = 2,
262 .vendor1 = 0x68747541, /* "Auth" */
263 .vendor2 = 0x69746e65, /* "enti" */
264 .vendor3 = 0x444d4163, /* "cAMD" */
265 .family = 6,
266 .model = 2,
267 .stepping = 3,
268 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
269 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
270 .xlevel = 0x80000008,
271 /* XXX: put another string ? */
272 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
275 .name = "n270",
276 /* original is on level 10 */
277 .level = 5,
278 .family = 6,
279 .model = 28,
280 .stepping = 2,
281 .features = PPRO_FEATURES |
282 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
283 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
284 * CPUID_HT | CPUID_TM | CPUID_PBE */
285 /* Some CPUs got no CPUID_SEP */
286 .ext_features = CPUID_EXT_MONITOR |
287 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
288 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
289 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
290 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
291 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
292 .xlevel = 0x8000000A,
293 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
297 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
299 unsigned int i;
300 x86_def_t *def;
302 char *s = strdup(cpu_model);
303 char *featurestr, *name = strtok(s, ",");
304 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
305 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
306 int family = -1, model = -1, stepping = -1;
308 def = NULL;
309 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
310 if (strcmp(name, x86_defs[i].name) == 0) {
311 def = &x86_defs[i];
312 break;
315 if (!def)
316 goto error;
317 memcpy(x86_cpu_def, def, sizeof(*def));
319 featurestr = strtok(NULL, ",");
321 while (featurestr) {
322 char *val;
323 if (featurestr[0] == '+') {
324 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
325 } else if (featurestr[0] == '-') {
326 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
327 } else if ((val = strchr(featurestr, '='))) {
328 *val = 0; val++;
329 if (!strcmp(featurestr, "family")) {
330 char *err;
331 family = strtol(val, &err, 10);
332 if (!*val || *err || family < 0) {
333 fprintf(stderr, "bad numerical value %s\n", val);
334 goto error;
336 x86_cpu_def->family = family;
337 } else if (!strcmp(featurestr, "model")) {
338 char *err;
339 model = strtol(val, &err, 10);
340 if (!*val || *err || model < 0 || model > 0xff) {
341 fprintf(stderr, "bad numerical value %s\n", val);
342 goto error;
344 x86_cpu_def->model = model;
345 } else if (!strcmp(featurestr, "stepping")) {
346 char *err;
347 stepping = strtol(val, &err, 10);
348 if (!*val || *err || stepping < 0 || stepping > 0xf) {
349 fprintf(stderr, "bad numerical value %s\n", val);
350 goto error;
352 x86_cpu_def->stepping = stepping;
353 } else if (!strcmp(featurestr, "vendor")) {
354 if (strlen(val) != 12) {
355 fprintf(stderr, "vendor string must be 12 chars long\n");
356 goto error;
358 x86_cpu_def->vendor1 = 0;
359 x86_cpu_def->vendor2 = 0;
360 x86_cpu_def->vendor3 = 0;
361 for(i = 0; i < 4; i++) {
362 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
363 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
364 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
366 } else if (!strcmp(featurestr, "model_id")) {
367 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
368 val);
369 } else {
370 fprintf(stderr, "unrecognized feature %s\n", featurestr);
371 goto error;
373 } else {
374 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
375 goto error;
377 featurestr = strtok(NULL, ",");
379 x86_cpu_def->features |= plus_features;
380 x86_cpu_def->ext_features |= plus_ext_features;
381 x86_cpu_def->ext2_features |= plus_ext2_features;
382 x86_cpu_def->ext3_features |= plus_ext3_features;
383 x86_cpu_def->features &= ~minus_features;
384 x86_cpu_def->ext_features &= ~minus_ext_features;
385 x86_cpu_def->ext2_features &= ~minus_ext2_features;
386 x86_cpu_def->ext3_features &= ~minus_ext3_features;
387 free(s);
388 return 0;
390 error:
391 free(s);
392 return -1;
395 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
397 unsigned int i;
399 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
400 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
403 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
405 x86_def_t def1, *def = &def1;
407 if (cpu_x86_find_by_name(def, cpu_model) < 0)
408 return -1;
409 if (def->vendor1) {
410 env->cpuid_vendor1 = def->vendor1;
411 env->cpuid_vendor2 = def->vendor2;
412 env->cpuid_vendor3 = def->vendor3;
413 } else {
414 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
415 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
416 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
418 env->cpuid_level = def->level;
419 if (def->family > 0x0f)
420 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
421 else
422 env->cpuid_version = def->family << 8;
423 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
424 env->cpuid_version |= def->stepping;
425 env->cpuid_features = def->features;
426 env->pat = 0x0007040600070406ULL;
427 env->cpuid_ext_features = def->ext_features;
428 env->cpuid_ext2_features = def->ext2_features;
429 env->cpuid_xlevel = def->xlevel;
430 env->cpuid_ext3_features = def->ext3_features;
432 const char *model_id = def->model_id;
433 int c, len, i;
434 if (!model_id)
435 model_id = "";
436 len = strlen(model_id);
437 for(i = 0; i < 48; i++) {
438 if (i >= len)
439 c = '\0';
440 else
441 c = (uint8_t)model_id[i];
442 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
445 return 0;
448 /* NOTE: must be called outside the CPU execute loop */
449 void cpu_reset(CPUX86State *env)
451 int i;
453 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
454 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
455 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
458 memset(env, 0, offsetof(CPUX86State, breakpoints));
460 tlb_flush(env, 1);
462 env->old_exception = -1;
464 /* init to reset state */
466 #ifdef CONFIG_SOFTMMU
467 env->hflags |= HF_SOFTMMU_MASK;
468 #endif
469 env->hflags2 |= HF2_GIF_MASK;
471 cpu_x86_update_cr0(env, 0x60000010);
472 env->a20_mask = ~0x0;
473 env->smbase = 0x30000;
475 env->idt.limit = 0xffff;
476 env->gdt.limit = 0xffff;
477 env->ldt.limit = 0xffff;
478 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
479 env->tr.limit = 0xffff;
480 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
482 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
483 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
484 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
485 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
486 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
487 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
488 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
489 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
490 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
491 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
492 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
493 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
495 env->eip = 0xfff0;
496 env->regs[R_EDX] = env->cpuid_version;
498 env->eflags = 0x2;
500 /* FPU init */
501 for(i = 0;i < 8; i++)
502 env->fptags[i] = 1;
503 env->fpuc = 0x37f;
505 env->mxcsr = 0x1f80;
507 memset(env->dr, 0, sizeof(env->dr));
508 env->dr[6] = DR6_FIXED_1;
509 env->dr[7] = DR7_FIXED_1;
510 cpu_breakpoint_remove_all(env, BP_CPU);
511 cpu_watchpoint_remove_all(env, BP_CPU);
514 void cpu_x86_close(CPUX86State *env)
516 qemu_free(env);
519 /***********************************************************/
520 /* x86 debug */
522 static const char *cc_op_str[] = {
523 "DYNAMIC",
524 "EFLAGS",
526 "MULB",
527 "MULW",
528 "MULL",
529 "MULQ",
531 "ADDB",
532 "ADDW",
533 "ADDL",
534 "ADDQ",
536 "ADCB",
537 "ADCW",
538 "ADCL",
539 "ADCQ",
541 "SUBB",
542 "SUBW",
543 "SUBL",
544 "SUBQ",
546 "SBBB",
547 "SBBW",
548 "SBBL",
549 "SBBQ",
551 "LOGICB",
552 "LOGICW",
553 "LOGICL",
554 "LOGICQ",
556 "INCB",
557 "INCW",
558 "INCL",
559 "INCQ",
561 "DECB",
562 "DECW",
563 "DECL",
564 "DECQ",
566 "SHLB",
567 "SHLW",
568 "SHLL",
569 "SHLQ",
571 "SARB",
572 "SARW",
573 "SARL",
574 "SARQ",
577 void cpu_dump_state(CPUState *env, FILE *f,
578 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
579 int flags)
581 int eflags, i, nb;
582 char cc_op_name[32];
583 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
585 if (kvm_enabled())
586 kvm_arch_get_registers(env);
588 eflags = env->eflags;
589 #ifdef TARGET_X86_64
590 if (env->hflags & HF_CS64_MASK) {
591 cpu_fprintf(f,
592 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
593 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
594 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
595 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
596 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
597 env->regs[R_EAX],
598 env->regs[R_EBX],
599 env->regs[R_ECX],
600 env->regs[R_EDX],
601 env->regs[R_ESI],
602 env->regs[R_EDI],
603 env->regs[R_EBP],
604 env->regs[R_ESP],
605 env->regs[8],
606 env->regs[9],
607 env->regs[10],
608 env->regs[11],
609 env->regs[12],
610 env->regs[13],
611 env->regs[14],
612 env->regs[15],
613 env->eip, eflags,
614 eflags & DF_MASK ? 'D' : '-',
615 eflags & CC_O ? 'O' : '-',
616 eflags & CC_S ? 'S' : '-',
617 eflags & CC_Z ? 'Z' : '-',
618 eflags & CC_A ? 'A' : '-',
619 eflags & CC_P ? 'P' : '-',
620 eflags & CC_C ? 'C' : '-',
621 env->hflags & HF_CPL_MASK,
622 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
623 (int)(env->a20_mask >> 20) & 1,
624 (env->hflags >> HF_SMM_SHIFT) & 1,
625 env->halted);
626 } else
627 #endif
629 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
630 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
631 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
632 (uint32_t)env->regs[R_EAX],
633 (uint32_t)env->regs[R_EBX],
634 (uint32_t)env->regs[R_ECX],
635 (uint32_t)env->regs[R_EDX],
636 (uint32_t)env->regs[R_ESI],
637 (uint32_t)env->regs[R_EDI],
638 (uint32_t)env->regs[R_EBP],
639 (uint32_t)env->regs[R_ESP],
640 (uint32_t)env->eip, eflags,
641 eflags & DF_MASK ? 'D' : '-',
642 eflags & CC_O ? 'O' : '-',
643 eflags & CC_S ? 'S' : '-',
644 eflags & CC_Z ? 'Z' : '-',
645 eflags & CC_A ? 'A' : '-',
646 eflags & CC_P ? 'P' : '-',
647 eflags & CC_C ? 'C' : '-',
648 env->hflags & HF_CPL_MASK,
649 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
650 (int)(env->a20_mask >> 20) & 1,
651 (env->hflags >> HF_SMM_SHIFT) & 1,
652 env->halted);
655 #ifdef TARGET_X86_64
656 if (env->hflags & HF_LMA_MASK) {
657 for(i = 0; i < 6; i++) {
658 SegmentCache *sc = &env->segs[i];
659 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
660 seg_name[i],
661 sc->selector,
662 sc->base,
663 sc->limit,
664 sc->flags);
666 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
667 env->ldt.selector,
668 env->ldt.base,
669 env->ldt.limit,
670 env->ldt.flags);
671 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
672 env->tr.selector,
673 env->tr.base,
674 env->tr.limit,
675 env->tr.flags);
676 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
677 env->gdt.base, env->gdt.limit);
678 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
679 env->idt.base, env->idt.limit);
680 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
681 (uint32_t)env->cr[0],
682 env->cr[2],
683 env->cr[3],
684 (uint32_t)env->cr[4]);
685 for(i = 0; i < 4; i++)
686 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
687 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
688 env->dr[6], env->dr[7]);
689 } else
690 #endif
692 for(i = 0; i < 6; i++) {
693 SegmentCache *sc = &env->segs[i];
694 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
695 seg_name[i],
696 sc->selector,
697 (uint32_t)sc->base,
698 sc->limit,
699 sc->flags);
701 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
702 env->ldt.selector,
703 (uint32_t)env->ldt.base,
704 env->ldt.limit,
705 env->ldt.flags);
706 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
707 env->tr.selector,
708 (uint32_t)env->tr.base,
709 env->tr.limit,
710 env->tr.flags);
711 cpu_fprintf(f, "GDT= %08x %08x\n",
712 (uint32_t)env->gdt.base, env->gdt.limit);
713 cpu_fprintf(f, "IDT= %08x %08x\n",
714 (uint32_t)env->idt.base, env->idt.limit);
715 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
716 (uint32_t)env->cr[0],
717 (uint32_t)env->cr[2],
718 (uint32_t)env->cr[3],
719 (uint32_t)env->cr[4]);
720 for(i = 0; i < 4; i++)
721 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
722 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
724 if (flags & X86_DUMP_CCOP) {
725 if ((unsigned)env->cc_op < CC_OP_NB)
726 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
727 else
728 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
729 #ifdef TARGET_X86_64
730 if (env->hflags & HF_CS64_MASK) {
731 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
732 env->cc_src, env->cc_dst,
733 cc_op_name);
734 } else
735 #endif
737 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
738 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
739 cc_op_name);
742 if (flags & X86_DUMP_FPU) {
743 int fptag;
744 fptag = 0;
745 for(i = 0; i < 8; i++) {
746 fptag |= ((!env->fptags[i]) << i);
748 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
749 env->fpuc,
750 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
751 env->fpstt,
752 fptag,
753 env->mxcsr);
754 for(i=0;i<8;i++) {
755 #if defined(USE_X86LDOUBLE)
756 union {
757 long double d;
758 struct {
759 uint64_t lower;
760 uint16_t upper;
761 } l;
762 } tmp;
763 tmp.d = env->fpregs[i].d;
764 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
765 i, tmp.l.lower, tmp.l.upper);
766 #else
767 cpu_fprintf(f, "FPR%d=%016" PRIx64,
768 i, env->fpregs[i].mmx.q);
769 #endif
770 if ((i & 1) == 1)
771 cpu_fprintf(f, "\n");
772 else
773 cpu_fprintf(f, " ");
775 if (env->hflags & HF_CS64_MASK)
776 nb = 16;
777 else
778 nb = 8;
779 for(i=0;i<nb;i++) {
780 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
782 env->xmm_regs[i].XMM_L(3),
783 env->xmm_regs[i].XMM_L(2),
784 env->xmm_regs[i].XMM_L(1),
785 env->xmm_regs[i].XMM_L(0));
786 if ((i & 1) == 1)
787 cpu_fprintf(f, "\n");
788 else
789 cpu_fprintf(f, " ");
794 /***********************************************************/
795 /* x86 mmu */
796 /* XXX: add PGE support */
798 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
800 a20_state = (a20_state != 0);
801 if (a20_state != ((env->a20_mask >> 20) & 1)) {
802 #if defined(DEBUG_MMU)
803 printf("A20 update: a20=%d\n", a20_state);
804 #endif
805 /* if the cpu is currently executing code, we must unlink it and
806 all the potentially executing TB */
807 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
809 /* when a20 is changed, all the MMU mappings are invalid, so
810 we must flush everything */
811 tlb_flush(env, 1);
812 env->a20_mask = (~0x100000) | (a20_state << 20);
816 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
818 int pe_state;
820 #if defined(DEBUG_MMU)
821 printf("CR0 update: CR0=0x%08x\n", new_cr0);
822 #endif
823 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
824 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
825 tlb_flush(env, 1);
828 #ifdef TARGET_X86_64
829 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
830 (env->efer & MSR_EFER_LME)) {
831 /* enter in long mode */
832 /* XXX: generate an exception */
833 if (!(env->cr[4] & CR4_PAE_MASK))
834 return;
835 env->efer |= MSR_EFER_LMA;
836 env->hflags |= HF_LMA_MASK;
837 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
838 (env->efer & MSR_EFER_LMA)) {
839 /* exit long mode */
840 env->efer &= ~MSR_EFER_LMA;
841 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
842 env->eip &= 0xffffffff;
844 #endif
845 env->cr[0] = new_cr0 | CR0_ET_MASK;
847 /* update PE flag in hidden flags */
848 pe_state = (env->cr[0] & CR0_PE_MASK);
849 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
850 /* ensure that ADDSEG is always set in real mode */
851 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
852 /* update FPU flags */
853 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
854 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
857 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
858 the PDPT */
859 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
861 env->cr[3] = new_cr3;
862 if (env->cr[0] & CR0_PG_MASK) {
863 #if defined(DEBUG_MMU)
864 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
865 #endif
866 tlb_flush(env, 0);
870 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
872 #if defined(DEBUG_MMU)
873 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
874 #endif
875 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
876 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
877 tlb_flush(env, 1);
879 /* SSE handling */
880 if (!(env->cpuid_features & CPUID_SSE))
881 new_cr4 &= ~CR4_OSFXSR_MASK;
882 if (new_cr4 & CR4_OSFXSR_MASK)
883 env->hflags |= HF_OSFXSR_MASK;
884 else
885 env->hflags &= ~HF_OSFXSR_MASK;
887 env->cr[4] = new_cr4;
890 #if defined(CONFIG_USER_ONLY)
892 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
893 int is_write, int mmu_idx, int is_softmmu)
895 /* user mode only emulation */
896 is_write &= 1;
897 env->cr[2] = addr;
898 env->error_code = (is_write << PG_ERROR_W_BIT);
899 env->error_code |= PG_ERROR_U_MASK;
900 env->exception_index = EXCP0E_PAGE;
901 return 1;
904 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
906 return addr;
909 #else
911 /* XXX: This value should match the one returned by CPUID
912 * and in exec.c */
913 #if defined(USE_KQEMU)
914 #define PHYS_ADDR_MASK 0xfffff000LL
915 #else
916 # if defined(TARGET_X86_64)
917 # define PHYS_ADDR_MASK 0xfffffff000LL
918 # else
919 # define PHYS_ADDR_MASK 0xffffff000LL
920 # endif
921 #endif
923 /* return value:
924 -1 = cannot handle fault
925 0 = nothing more to do
926 1 = generate PF fault
927 2 = soft MMU activation required for this block
929 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
930 int is_write1, int mmu_idx, int is_softmmu)
932 uint64_t ptep, pte;
933 target_ulong pde_addr, pte_addr;
934 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
935 target_phys_addr_t paddr;
936 uint32_t page_offset;
937 target_ulong vaddr, virt_addr;
939 is_user = mmu_idx == MMU_USER_IDX;
940 #if defined(DEBUG_MMU)
941 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
942 addr, is_write1, is_user, env->eip);
943 #endif
944 is_write = is_write1 & 1;
946 if (!(env->cr[0] & CR0_PG_MASK)) {
947 pte = addr;
948 virt_addr = addr & TARGET_PAGE_MASK;
949 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
950 page_size = 4096;
951 goto do_mapping;
954 if (env->cr[4] & CR4_PAE_MASK) {
955 uint64_t pde, pdpe;
956 target_ulong pdpe_addr;
958 #ifdef TARGET_X86_64
959 if (env->hflags & HF_LMA_MASK) {
960 uint64_t pml4e_addr, pml4e;
961 int32_t sext;
963 /* test virtual address sign extension */
964 sext = (int64_t)addr >> 47;
965 if (sext != 0 && sext != -1) {
966 env->error_code = 0;
967 env->exception_index = EXCP0D_GPF;
968 return 1;
971 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
972 env->a20_mask;
973 pml4e = ldq_phys(pml4e_addr);
974 if (!(pml4e & PG_PRESENT_MASK)) {
975 error_code = 0;
976 goto do_fault;
978 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
979 error_code = PG_ERROR_RSVD_MASK;
980 goto do_fault;
982 if (!(pml4e & PG_ACCESSED_MASK)) {
983 pml4e |= PG_ACCESSED_MASK;
984 stl_phys_notdirty(pml4e_addr, pml4e);
986 ptep = pml4e ^ PG_NX_MASK;
987 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
988 env->a20_mask;
989 pdpe = ldq_phys(pdpe_addr);
990 if (!(pdpe & PG_PRESENT_MASK)) {
991 error_code = 0;
992 goto do_fault;
994 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
995 error_code = PG_ERROR_RSVD_MASK;
996 goto do_fault;
998 ptep &= pdpe ^ PG_NX_MASK;
999 if (!(pdpe & PG_ACCESSED_MASK)) {
1000 pdpe |= PG_ACCESSED_MASK;
1001 stl_phys_notdirty(pdpe_addr, pdpe);
1003 } else
1004 #endif
1006 /* XXX: load them when cr3 is loaded ? */
1007 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1008 env->a20_mask;
1009 pdpe = ldq_phys(pdpe_addr);
1010 if (!(pdpe & PG_PRESENT_MASK)) {
1011 error_code = 0;
1012 goto do_fault;
1014 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1017 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1018 env->a20_mask;
1019 pde = ldq_phys(pde_addr);
1020 if (!(pde & PG_PRESENT_MASK)) {
1021 error_code = 0;
1022 goto do_fault;
1024 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1025 error_code = PG_ERROR_RSVD_MASK;
1026 goto do_fault;
1028 ptep &= pde ^ PG_NX_MASK;
1029 if (pde & PG_PSE_MASK) {
1030 /* 2 MB page */
1031 page_size = 2048 * 1024;
1032 ptep ^= PG_NX_MASK;
1033 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1034 goto do_fault_protect;
1035 if (is_user) {
1036 if (!(ptep & PG_USER_MASK))
1037 goto do_fault_protect;
1038 if (is_write && !(ptep & PG_RW_MASK))
1039 goto do_fault_protect;
1040 } else {
1041 if ((env->cr[0] & CR0_WP_MASK) &&
1042 is_write && !(ptep & PG_RW_MASK))
1043 goto do_fault_protect;
1045 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1046 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1047 pde |= PG_ACCESSED_MASK;
1048 if (is_dirty)
1049 pde |= PG_DIRTY_MASK;
1050 stl_phys_notdirty(pde_addr, pde);
1052 /* align to page_size */
1053 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1054 virt_addr = addr & ~(page_size - 1);
1055 } else {
1056 /* 4 KB page */
1057 if (!(pde & PG_ACCESSED_MASK)) {
1058 pde |= PG_ACCESSED_MASK;
1059 stl_phys_notdirty(pde_addr, pde);
1061 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1062 env->a20_mask;
1063 pte = ldq_phys(pte_addr);
1064 if (!(pte & PG_PRESENT_MASK)) {
1065 error_code = 0;
1066 goto do_fault;
1068 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1069 error_code = PG_ERROR_RSVD_MASK;
1070 goto do_fault;
1072 /* combine pde and pte nx, user and rw protections */
1073 ptep &= pte ^ PG_NX_MASK;
1074 ptep ^= PG_NX_MASK;
1075 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1076 goto do_fault_protect;
1077 if (is_user) {
1078 if (!(ptep & PG_USER_MASK))
1079 goto do_fault_protect;
1080 if (is_write && !(ptep & PG_RW_MASK))
1081 goto do_fault_protect;
1082 } else {
1083 if ((env->cr[0] & CR0_WP_MASK) &&
1084 is_write && !(ptep & PG_RW_MASK))
1085 goto do_fault_protect;
1087 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1088 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1089 pte |= PG_ACCESSED_MASK;
1090 if (is_dirty)
1091 pte |= PG_DIRTY_MASK;
1092 stl_phys_notdirty(pte_addr, pte);
1094 page_size = 4096;
1095 virt_addr = addr & ~0xfff;
1096 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1098 } else {
1099 uint32_t pde;
1101 /* page directory entry */
1102 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1103 env->a20_mask;
1104 pde = ldl_phys(pde_addr);
1105 if (!(pde & PG_PRESENT_MASK)) {
1106 error_code = 0;
1107 goto do_fault;
1109 /* if PSE bit is set, then we use a 4MB page */
1110 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1111 page_size = 4096 * 1024;
1112 if (is_user) {
1113 if (!(pde & PG_USER_MASK))
1114 goto do_fault_protect;
1115 if (is_write && !(pde & PG_RW_MASK))
1116 goto do_fault_protect;
1117 } else {
1118 if ((env->cr[0] & CR0_WP_MASK) &&
1119 is_write && !(pde & PG_RW_MASK))
1120 goto do_fault_protect;
1122 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1123 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1124 pde |= PG_ACCESSED_MASK;
1125 if (is_dirty)
1126 pde |= PG_DIRTY_MASK;
1127 stl_phys_notdirty(pde_addr, pde);
1130 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1131 ptep = pte;
1132 virt_addr = addr & ~(page_size - 1);
1133 } else {
1134 if (!(pde & PG_ACCESSED_MASK)) {
1135 pde |= PG_ACCESSED_MASK;
1136 stl_phys_notdirty(pde_addr, pde);
1139 /* page directory entry */
1140 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1141 env->a20_mask;
1142 pte = ldl_phys(pte_addr);
1143 if (!(pte & PG_PRESENT_MASK)) {
1144 error_code = 0;
1145 goto do_fault;
1147 /* combine pde and pte user and rw protections */
1148 ptep = pte & pde;
1149 if (is_user) {
1150 if (!(ptep & PG_USER_MASK))
1151 goto do_fault_protect;
1152 if (is_write && !(ptep & PG_RW_MASK))
1153 goto do_fault_protect;
1154 } else {
1155 if ((env->cr[0] & CR0_WP_MASK) &&
1156 is_write && !(ptep & PG_RW_MASK))
1157 goto do_fault_protect;
1159 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1160 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1161 pte |= PG_ACCESSED_MASK;
1162 if (is_dirty)
1163 pte |= PG_DIRTY_MASK;
1164 stl_phys_notdirty(pte_addr, pte);
1166 page_size = 4096;
1167 virt_addr = addr & ~0xfff;
1170 /* the page can be put in the TLB */
1171 prot = PAGE_READ;
1172 if (!(ptep & PG_NX_MASK))
1173 prot |= PAGE_EXEC;
1174 if (pte & PG_DIRTY_MASK) {
1175 /* only set write access if already dirty... otherwise wait
1176 for dirty access */
1177 if (is_user) {
1178 if (ptep & PG_RW_MASK)
1179 prot |= PAGE_WRITE;
1180 } else {
1181 if (!(env->cr[0] & CR0_WP_MASK) ||
1182 (ptep & PG_RW_MASK))
1183 prot |= PAGE_WRITE;
1186 do_mapping:
1187 pte = pte & env->a20_mask;
1189 /* Even if 4MB pages, we map only one 4KB page in the cache to
1190 avoid filling it too fast */
1191 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1192 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1193 vaddr = virt_addr + page_offset;
1195 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1196 return ret;
1197 do_fault_protect:
1198 error_code = PG_ERROR_P_MASK;
1199 do_fault:
1200 error_code |= (is_write << PG_ERROR_W_BIT);
1201 if (is_user)
1202 error_code |= PG_ERROR_U_MASK;
1203 if (is_write1 == 2 &&
1204 (env->efer & MSR_EFER_NXE) &&
1205 (env->cr[4] & CR4_PAE_MASK))
1206 error_code |= PG_ERROR_I_D_MASK;
1207 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1208 /* cr2 is not modified in case of exceptions */
1209 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1210 addr);
1211 } else {
1212 env->cr[2] = addr;
1214 env->error_code = error_code;
1215 env->exception_index = EXCP0E_PAGE;
1216 return 1;
1219 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1221 target_ulong pde_addr, pte_addr;
1222 uint64_t pte;
1223 target_phys_addr_t paddr;
1224 uint32_t page_offset;
1225 int page_size;
1227 if (env->cr[4] & CR4_PAE_MASK) {
1228 target_ulong pdpe_addr;
1229 uint64_t pde, pdpe;
1231 #ifdef TARGET_X86_64
1232 if (env->hflags & HF_LMA_MASK) {
1233 uint64_t pml4e_addr, pml4e;
1234 int32_t sext;
1236 /* test virtual address sign extension */
1237 sext = (int64_t)addr >> 47;
1238 if (sext != 0 && sext != -1)
1239 return -1;
1241 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1242 env->a20_mask;
1243 pml4e = ldq_phys(pml4e_addr);
1244 if (!(pml4e & PG_PRESENT_MASK))
1245 return -1;
1247 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1248 env->a20_mask;
1249 pdpe = ldq_phys(pdpe_addr);
1250 if (!(pdpe & PG_PRESENT_MASK))
1251 return -1;
1252 } else
1253 #endif
1255 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1256 env->a20_mask;
1257 pdpe = ldq_phys(pdpe_addr);
1258 if (!(pdpe & PG_PRESENT_MASK))
1259 return -1;
1262 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1263 env->a20_mask;
1264 pde = ldq_phys(pde_addr);
1265 if (!(pde & PG_PRESENT_MASK)) {
1266 return -1;
1268 if (pde & PG_PSE_MASK) {
1269 /* 2 MB page */
1270 page_size = 2048 * 1024;
1271 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1272 } else {
1273 /* 4 KB page */
1274 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1275 env->a20_mask;
1276 page_size = 4096;
1277 pte = ldq_phys(pte_addr);
1279 if (!(pte & PG_PRESENT_MASK))
1280 return -1;
1281 } else {
1282 uint32_t pde;
1284 if (!(env->cr[0] & CR0_PG_MASK)) {
1285 pte = addr;
1286 page_size = 4096;
1287 } else {
1288 /* page directory entry */
1289 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1290 pde = ldl_phys(pde_addr);
1291 if (!(pde & PG_PRESENT_MASK))
1292 return -1;
1293 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1294 pte = pde & ~0x003ff000; /* align to 4MB */
1295 page_size = 4096 * 1024;
1296 } else {
1297 /* page directory entry */
1298 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1299 pte = ldl_phys(pte_addr);
1300 if (!(pte & PG_PRESENT_MASK))
1301 return -1;
1302 page_size = 4096;
1305 pte = pte & env->a20_mask;
1308 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1309 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1310 return paddr;
1313 void hw_breakpoint_insert(CPUState *env, int index)
1315 int type, err = 0;
1317 switch (hw_breakpoint_type(env->dr[7], index)) {
1318 case 0:
1319 if (hw_breakpoint_enabled(env->dr[7], index))
1320 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1321 &env->cpu_breakpoint[index]);
1322 break;
1323 case 1:
1324 type = BP_CPU | BP_MEM_WRITE;
1325 goto insert_wp;
1326 case 2:
1327 /* No support for I/O watchpoints yet */
1328 break;
1329 case 3:
1330 type = BP_CPU | BP_MEM_ACCESS;
1331 insert_wp:
1332 err = cpu_watchpoint_insert(env, env->dr[index],
1333 hw_breakpoint_len(env->dr[7], index),
1334 type, &env->cpu_watchpoint[index]);
1335 break;
1337 if (err)
1338 env->cpu_breakpoint[index] = NULL;
1341 void hw_breakpoint_remove(CPUState *env, int index)
1343 if (!env->cpu_breakpoint[index])
1344 return;
1345 switch (hw_breakpoint_type(env->dr[7], index)) {
1346 case 0:
1347 if (hw_breakpoint_enabled(env->dr[7], index))
1348 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1349 break;
1350 case 1:
1351 case 3:
1352 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1353 break;
1354 case 2:
1355 /* No support for I/O watchpoints yet */
1356 break;
1360 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1362 target_ulong dr6;
1363 int reg, type;
1364 int hit_enabled = 0;
1366 dr6 = env->dr[6] & ~0xf;
1367 for (reg = 0; reg < 4; reg++) {
1368 type = hw_breakpoint_type(env->dr[7], reg);
1369 if ((type == 0 && env->dr[reg] == env->eip) ||
1370 ((type & 1) && env->cpu_watchpoint[reg] &&
1371 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1372 dr6 |= 1 << reg;
1373 if (hw_breakpoint_enabled(env->dr[7], reg))
1374 hit_enabled = 1;
1377 if (hit_enabled || force_dr6_update)
1378 env->dr[6] = dr6;
1379 return hit_enabled;
1382 static CPUDebugExcpHandler *prev_debug_excp_handler;
1384 void raise_exception(int exception_index);
1386 static void breakpoint_handler(CPUState *env)
1388 CPUBreakpoint *bp;
1390 if (env->watchpoint_hit) {
1391 if (env->watchpoint_hit->flags & BP_CPU) {
1392 env->watchpoint_hit = NULL;
1393 if (check_hw_breakpoints(env, 0))
1394 raise_exception(EXCP01_DB);
1395 else
1396 cpu_resume_from_signal(env, NULL);
1398 } else {
1399 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1400 if (bp->pc == env->eip) {
1401 if (bp->flags & BP_CPU) {
1402 check_hw_breakpoints(env, 1);
1403 raise_exception(EXCP01_DB);
1405 break;
1408 if (prev_debug_excp_handler)
1409 prev_debug_excp_handler(env);
1411 #endif /* !CONFIG_USER_ONLY */
1413 static void host_cpuid(uint32_t function, uint32_t count,
1414 uint32_t *eax, uint32_t *ebx,
1415 uint32_t *ecx, uint32_t *edx)
1417 #if defined(CONFIG_KVM)
1418 uint32_t vec[4];
1420 #ifdef __x86_64__
1421 asm volatile("cpuid"
1422 : "=a"(vec[0]), "=b"(vec[1]),
1423 "=c"(vec[2]), "=d"(vec[3])
1424 : "0"(function), "c"(count) : "cc");
1425 #else
1426 asm volatile("pusha \n\t"
1427 "cpuid \n\t"
1428 "mov %%eax, 0(%2) \n\t"
1429 "mov %%ebx, 4(%2) \n\t"
1430 "mov %%ecx, 8(%2) \n\t"
1431 "mov %%edx, 12(%2) \n\t"
1432 "popa"
1433 : : "a"(function), "c"(count), "S"(vec)
1434 : "memory", "cc");
1435 #endif
1437 if (eax)
1438 *eax = vec[0];
1439 if (ebx)
1440 *ebx = vec[1];
1441 if (ecx)
1442 *ecx = vec[2];
1443 if (edx)
1444 *edx = vec[3];
1445 #endif
1448 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1449 uint32_t *eax, uint32_t *ebx,
1450 uint32_t *ecx, uint32_t *edx)
1452 /* test if maximum index reached */
1453 if (index & 0x80000000) {
1454 if (index > env->cpuid_xlevel)
1455 index = env->cpuid_level;
1456 } else {
1457 if (index > env->cpuid_level)
1458 index = env->cpuid_level;
1461 switch(index) {
1462 case 0:
1463 *eax = env->cpuid_level;
1464 *ebx = env->cpuid_vendor1;
1465 *edx = env->cpuid_vendor2;
1466 *ecx = env->cpuid_vendor3;
1468 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1469 * isn't supported in compatibility mode on Intel. so advertise the
1470 * actuall cpu, and say goodbye to migration between different vendors
1471 * is you use compatibility mode. */
1472 if (kvm_enabled())
1473 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1474 break;
1475 case 1:
1476 *eax = env->cpuid_version;
1477 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1478 *ecx = env->cpuid_ext_features;
1479 *edx = env->cpuid_features;
1481 /* "Hypervisor present" bit required for Microsoft SVVP */
1482 if (kvm_enabled())
1483 *ecx |= (1 << 31);
1484 break;
1485 case 2:
1486 /* cache info: needed for Pentium Pro compatibility */
1487 *eax = 1;
1488 *ebx = 0;
1489 *ecx = 0;
1490 *edx = 0x2c307d;
1491 break;
1492 case 4:
1493 /* cache info: needed for Core compatibility */
1494 switch (count) {
1495 case 0: /* L1 dcache info */
1496 *eax = 0x0000121;
1497 *ebx = 0x1c0003f;
1498 *ecx = 0x000003f;
1499 *edx = 0x0000001;
1500 break;
1501 case 1: /* L1 icache info */
1502 *eax = 0x0000122;
1503 *ebx = 0x1c0003f;
1504 *ecx = 0x000003f;
1505 *edx = 0x0000001;
1506 break;
1507 case 2: /* L2 cache info */
1508 *eax = 0x0000143;
1509 *ebx = 0x3c0003f;
1510 *ecx = 0x0000fff;
1511 *edx = 0x0000001;
1512 break;
1513 default: /* end of info */
1514 *eax = 0;
1515 *ebx = 0;
1516 *ecx = 0;
1517 *edx = 0;
1518 break;
1520 break;
1521 case 5:
1522 /* mwait info: needed for Core compatibility */
1523 *eax = 0; /* Smallest monitor-line size in bytes */
1524 *ebx = 0; /* Largest monitor-line size in bytes */
1525 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1526 *edx = 0;
1527 break;
1528 case 6:
1529 /* Thermal and Power Leaf */
1530 *eax = 0;
1531 *ebx = 0;
1532 *ecx = 0;
1533 *edx = 0;
1534 break;
1535 case 9:
1536 /* Direct Cache Access Information Leaf */
1537 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1538 *ebx = 0;
1539 *ecx = 0;
1540 *edx = 0;
1541 break;
1542 case 0xA:
1543 /* Architectural Performance Monitoring Leaf */
1544 *eax = 0;
1545 *ebx = 0;
1546 *ecx = 0;
1547 *edx = 0;
1548 break;
1549 case 0x80000000:
1550 *eax = env->cpuid_xlevel;
1551 *ebx = env->cpuid_vendor1;
1552 *edx = env->cpuid_vendor2;
1553 *ecx = env->cpuid_vendor3;
1554 break;
1555 case 0x80000001:
1556 *eax = env->cpuid_features;
1557 *ebx = 0;
1558 *ecx = env->cpuid_ext3_features;
1559 *edx = env->cpuid_ext2_features;
1561 if (kvm_enabled()) {
1562 uint32_t h_eax, h_edx;
1564 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1566 /* disable CPU features that the host does not support */
1568 /* long mode */
1569 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1570 *edx &= ~0x20000000;
1571 /* syscall */
1572 if ((h_edx & 0x00000800) == 0)
1573 *edx &= ~0x00000800;
1574 /* nx */
1575 if ((h_edx & 0x00100000) == 0)
1576 *edx &= ~0x00100000;
1578 /* disable CPU features that KVM cannot support */
1580 /* svm */
1581 *ecx &= ~4UL;
1582 /* 3dnow */
1583 *edx &= ~0xc0000000;
1585 break;
1586 case 0x80000002:
1587 case 0x80000003:
1588 case 0x80000004:
1589 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1590 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1591 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1592 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1593 break;
1594 case 0x80000005:
1595 /* cache info (L1 cache) */
1596 *eax = 0x01ff01ff;
1597 *ebx = 0x01ff01ff;
1598 *ecx = 0x40020140;
1599 *edx = 0x40020140;
1600 break;
1601 case 0x80000006:
1602 /* cache info (L2 cache) */
1603 *eax = 0;
1604 *ebx = 0x42004200;
1605 *ecx = 0x02008140;
1606 *edx = 0;
1607 break;
1608 case 0x80000008:
1609 /* virtual & phys address size in low 2 bytes. */
1610 /* XXX: This value must match the one used in the MMU code. */
1611 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1612 /* 64 bit processor */
1613 #if defined(USE_KQEMU)
1614 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1615 #else
1616 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1617 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1618 #endif
1619 } else {
1620 #if defined(USE_KQEMU)
1621 *eax = 0x00000020; /* 32 bits physical */
1622 #else
1623 if (env->cpuid_features & CPUID_PSE36)
1624 *eax = 0x00000024; /* 36 bits physical */
1625 else
1626 *eax = 0x00000020; /* 32 bits physical */
1627 #endif
1629 *ebx = 0;
1630 *ecx = 0;
1631 *edx = 0;
1632 break;
1633 case 0x8000000A:
1634 *eax = 0x00000001; /* SVM Revision */
1635 *ebx = 0x00000010; /* nr of ASIDs */
1636 *ecx = 0;
1637 *edx = 0; /* optional features */
1638 break;
1639 default:
1640 /* reserved values: zero */
1641 *eax = 0;
1642 *ebx = 0;
1643 *ecx = 0;
1644 *edx = 0;
1645 break;
1649 CPUX86State *cpu_x86_init(const char *cpu_model)
1651 CPUX86State *env;
1652 static int inited;
1654 env = qemu_mallocz(sizeof(CPUX86State));
1655 cpu_exec_init(env);
1656 env->cpu_model_str = cpu_model;
1658 /* init various static tables */
1659 if (!inited) {
1660 inited = 1;
1661 optimize_flags_init();
1662 #ifndef CONFIG_USER_ONLY
1663 prev_debug_excp_handler =
1664 cpu_set_debug_excp_handler(breakpoint_handler);
1665 #endif
1667 if (cpu_x86_register(env, cpu_model) < 0) {
1668 cpu_x86_close(env);
1669 return NULL;
1671 cpu_reset(env);
1672 #ifdef USE_KQEMU
1673 kqemu_init(env);
1674 #endif
1675 if (kvm_enabled())
1676 kvm_init_vcpu(env);
1677 return env;