Fix x86 feature modifications for features that set multiple bits
[qemu/kraxel.git] / target-i386 / helper.c
blob0c9113398e2120e98824b0e8572ff3219c9faf59
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 //#define DEBUG_MMU
35 /* feature flags taken from "Intel Processor Identification and the CPUID
36 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
37 * about feature names, the Linux name is used. */
38 static const char *feature_name[] = {
39 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
40 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
41 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
42 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
44 static const char *ext_feature_name[] = {
45 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
46 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
47 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
48 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
50 static const char *ext2_feature_name[] = {
51 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
52 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
53 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
54 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
56 static const char *ext3_feature_name[] = {
57 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
58 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
59 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
60 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
63 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
64 uint32_t *ext_features,
65 uint32_t *ext2_features,
66 uint32_t *ext3_features)
68 int i;
69 int found = 0;
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 found = 1;
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 found = 1;
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 found = 1;
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 found = 1;
91 if (!found) {
92 fprintf(stderr, "CPU feature %s not found\n", flagname);
96 typedef struct x86_def_t {
97 const char *name;
98 uint32_t level;
99 uint32_t vendor1, vendor2, vendor3;
100 int family;
101 int model;
102 int stepping;
103 uint32_t features, ext_features, ext2_features, ext3_features;
104 uint32_t xlevel;
105 char model_id[48];
106 } x86_def_t;
108 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
109 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
110 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
111 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
112 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
113 CPUID_PSE36 | CPUID_FXSR)
114 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
115 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
116 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
117 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
118 CPUID_PAE | CPUID_SEP | CPUID_APIC)
119 static x86_def_t x86_defs[] = {
120 #ifdef TARGET_X86_64
122 .name = "qemu64",
123 .level = 2,
124 .vendor1 = CPUID_VENDOR_AMD_1,
125 .vendor2 = CPUID_VENDOR_AMD_2,
126 .vendor3 = CPUID_VENDOR_AMD_3,
127 .family = 6,
128 .model = 2,
129 .stepping = 3,
130 .features = PPRO_FEATURES |
131 /* these features are needed for Win64 and aren't fully implemented */
132 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
133 /* this feature is needed for Solaris and isn't fully implemented */
134 CPUID_PSE36,
135 .ext_features = CPUID_EXT_SSE3,
136 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
137 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
138 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
139 .ext3_features = CPUID_EXT3_SVM,
140 .xlevel = 0x8000000A,
141 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
144 .name = "phenom",
145 .level = 5,
146 .vendor1 = CPUID_VENDOR_AMD_1,
147 .vendor2 = CPUID_VENDOR_AMD_2,
148 .vendor3 = CPUID_VENDOR_AMD_3,
149 .family = 16,
150 .model = 2,
151 .stepping = 3,
152 /* Missing: CPUID_VME, CPUID_HT */
153 .features = PPRO_FEATURES |
154 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
155 CPUID_PSE36,
156 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
157 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
158 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
159 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
160 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
161 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
162 CPUID_EXT2_FFXSR,
163 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
164 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
165 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
166 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
167 .ext3_features = CPUID_EXT3_SVM,
168 .xlevel = 0x8000001A,
169 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
172 .name = "core2duo",
173 .level = 10,
174 .family = 6,
175 .model = 15,
176 .stepping = 11,
177 /* The original CPU also implements these features:
178 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
179 CPUID_TM, CPUID_PBE */
180 .features = PPRO_FEATURES |
181 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
182 CPUID_PSE36,
183 /* The original CPU also implements these ext features:
184 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
185 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
186 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
187 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
188 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
189 .xlevel = 0x80000008,
190 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
192 #endif
194 .name = "qemu32",
195 .level = 2,
196 .family = 6,
197 .model = 3,
198 .stepping = 3,
199 .features = PPRO_FEATURES,
200 .ext_features = CPUID_EXT_SSE3,
201 .xlevel = 0,
202 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
205 .name = "coreduo",
206 .level = 10,
207 .family = 6,
208 .model = 14,
209 .stepping = 8,
210 /* The original CPU also implements these features:
211 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
212 CPUID_TM, CPUID_PBE */
213 .features = PPRO_FEATURES | CPUID_VME |
214 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
215 /* The original CPU also implements these ext features:
216 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
217 CPUID_EXT_PDCM */
218 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
219 .ext2_features = CPUID_EXT2_NX,
220 .xlevel = 0x80000008,
221 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
224 .name = "486",
225 .level = 0,
226 .family = 4,
227 .model = 0,
228 .stepping = 0,
229 .features = I486_FEATURES,
230 .xlevel = 0,
233 .name = "pentium",
234 .level = 1,
235 .family = 5,
236 .model = 4,
237 .stepping = 3,
238 .features = PENTIUM_FEATURES,
239 .xlevel = 0,
242 .name = "pentium2",
243 .level = 2,
244 .family = 6,
245 .model = 5,
246 .stepping = 2,
247 .features = PENTIUM2_FEATURES,
248 .xlevel = 0,
251 .name = "pentium3",
252 .level = 2,
253 .family = 6,
254 .model = 7,
255 .stepping = 3,
256 .features = PENTIUM3_FEATURES,
257 .xlevel = 0,
260 .name = "athlon",
261 .level = 2,
262 .vendor1 = 0x68747541, /* "Auth" */
263 .vendor2 = 0x69746e65, /* "enti" */
264 .vendor3 = 0x444d4163, /* "cAMD" */
265 .family = 6,
266 .model = 2,
267 .stepping = 3,
268 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
269 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
270 .xlevel = 0x80000008,
271 /* XXX: put another string ? */
272 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
275 .name = "n270",
276 /* original is on level 10 */
277 .level = 5,
278 .family = 6,
279 .model = 28,
280 .stepping = 2,
281 .features = PPRO_FEATURES |
282 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
283 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
284 * CPUID_HT | CPUID_TM | CPUID_PBE */
285 /* Some CPUs got no CPUID_SEP */
286 .ext_features = CPUID_EXT_MONITOR |
287 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
288 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
289 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
290 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
291 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
292 .xlevel = 0x8000000A,
293 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
297 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
299 unsigned int i;
300 x86_def_t *def;
302 char *s = strdup(cpu_model);
303 char *featurestr, *name = strtok(s, ",");
304 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
305 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
306 int family = -1, model = -1, stepping = -1;
308 def = NULL;
309 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
310 if (strcmp(name, x86_defs[i].name) == 0) {
311 def = &x86_defs[i];
312 break;
315 if (!def)
316 goto error;
317 memcpy(x86_cpu_def, def, sizeof(*def));
319 featurestr = strtok(NULL, ",");
321 while (featurestr) {
322 char *val;
323 if (featurestr[0] == '+') {
324 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
325 } else if (featurestr[0] == '-') {
326 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
327 } else if ((val = strchr(featurestr, '='))) {
328 *val = 0; val++;
329 if (!strcmp(featurestr, "family")) {
330 char *err;
331 family = strtol(val, &err, 10);
332 if (!*val || *err || family < 0) {
333 fprintf(stderr, "bad numerical value %s\n", val);
334 goto error;
336 x86_cpu_def->family = family;
337 } else if (!strcmp(featurestr, "model")) {
338 char *err;
339 model = strtol(val, &err, 10);
340 if (!*val || *err || model < 0 || model > 0xff) {
341 fprintf(stderr, "bad numerical value %s\n", val);
342 goto error;
344 x86_cpu_def->model = model;
345 } else if (!strcmp(featurestr, "stepping")) {
346 char *err;
347 stepping = strtol(val, &err, 10);
348 if (!*val || *err || stepping < 0 || stepping > 0xf) {
349 fprintf(stderr, "bad numerical value %s\n", val);
350 goto error;
352 x86_cpu_def->stepping = stepping;
353 } else if (!strcmp(featurestr, "vendor")) {
354 if (strlen(val) != 12) {
355 fprintf(stderr, "vendor string must be 12 chars long\n");
356 goto error;
358 x86_cpu_def->vendor1 = 0;
359 x86_cpu_def->vendor2 = 0;
360 x86_cpu_def->vendor3 = 0;
361 for(i = 0; i < 4; i++) {
362 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
363 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
364 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
366 } else if (!strcmp(featurestr, "model_id")) {
367 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
368 val);
369 } else {
370 fprintf(stderr, "unrecognized feature %s\n", featurestr);
371 goto error;
373 } else {
374 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
375 goto error;
377 featurestr = strtok(NULL, ",");
379 x86_cpu_def->features |= plus_features;
380 x86_cpu_def->ext_features |= plus_ext_features;
381 x86_cpu_def->ext2_features |= plus_ext2_features;
382 x86_cpu_def->ext3_features |= plus_ext3_features;
383 x86_cpu_def->features &= ~minus_features;
384 x86_cpu_def->ext_features &= ~minus_ext_features;
385 x86_cpu_def->ext2_features &= ~minus_ext2_features;
386 x86_cpu_def->ext3_features &= ~minus_ext3_features;
387 free(s);
388 return 0;
390 error:
391 free(s);
392 return -1;
395 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
397 unsigned int i;
399 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
400 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
403 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
405 x86_def_t def1, *def = &def1;
407 if (cpu_x86_find_by_name(def, cpu_model) < 0)
408 return -1;
409 if (def->vendor1) {
410 env->cpuid_vendor1 = def->vendor1;
411 env->cpuid_vendor2 = def->vendor2;
412 env->cpuid_vendor3 = def->vendor3;
413 } else {
414 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
415 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
416 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
418 env->cpuid_level = def->level;
419 if (def->family > 0x0f)
420 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
421 else
422 env->cpuid_version = def->family << 8;
423 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
424 env->cpuid_version |= def->stepping;
425 env->cpuid_features = def->features;
426 env->pat = 0x0007040600070406ULL;
427 env->cpuid_ext_features = def->ext_features;
428 env->cpuid_ext2_features = def->ext2_features;
429 env->cpuid_xlevel = def->xlevel;
430 env->cpuid_ext3_features = def->ext3_features;
432 const char *model_id = def->model_id;
433 int c, len, i;
434 if (!model_id)
435 model_id = "";
436 len = strlen(model_id);
437 for(i = 0; i < 48; i++) {
438 if (i >= len)
439 c = '\0';
440 else
441 c = (uint8_t)model_id[i];
442 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
445 return 0;
448 /* NOTE: must be called outside the CPU execute loop */
449 void cpu_reset(CPUX86State *env)
451 int i;
453 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
454 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
455 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
458 memset(env, 0, offsetof(CPUX86State, breakpoints));
460 tlb_flush(env, 1);
462 env->old_exception = -1;
464 /* init to reset state */
466 #ifdef CONFIG_SOFTMMU
467 env->hflags |= HF_SOFTMMU_MASK;
468 #endif
469 env->hflags2 |= HF2_GIF_MASK;
471 cpu_x86_update_cr0(env, 0x60000010);
472 env->a20_mask = ~0x0;
473 env->smbase = 0x30000;
475 env->idt.limit = 0xffff;
476 env->gdt.limit = 0xffff;
477 env->ldt.limit = 0xffff;
478 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
479 env->tr.limit = 0xffff;
480 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
482 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
483 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
484 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
485 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
486 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
487 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
488 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
489 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
490 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
491 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
492 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
493 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
495 env->eip = 0xfff0;
496 env->regs[R_EDX] = env->cpuid_version;
498 env->eflags = 0x2;
500 /* FPU init */
501 for(i = 0;i < 8; i++)
502 env->fptags[i] = 1;
503 env->fpuc = 0x37f;
505 env->mxcsr = 0x1f80;
507 memset(env->dr, 0, sizeof(env->dr));
508 env->dr[6] = DR6_FIXED_1;
509 env->dr[7] = DR7_FIXED_1;
510 cpu_breakpoint_remove_all(env, BP_CPU);
511 cpu_watchpoint_remove_all(env, BP_CPU);
514 void cpu_x86_close(CPUX86State *env)
516 qemu_free(env);
519 /***********************************************************/
520 /* x86 debug */
522 static const char *cc_op_str[] = {
523 "DYNAMIC",
524 "EFLAGS",
526 "MULB",
527 "MULW",
528 "MULL",
529 "MULQ",
531 "ADDB",
532 "ADDW",
533 "ADDL",
534 "ADDQ",
536 "ADCB",
537 "ADCW",
538 "ADCL",
539 "ADCQ",
541 "SUBB",
542 "SUBW",
543 "SUBL",
544 "SUBQ",
546 "SBBB",
547 "SBBW",
548 "SBBL",
549 "SBBQ",
551 "LOGICB",
552 "LOGICW",
553 "LOGICL",
554 "LOGICQ",
556 "INCB",
557 "INCW",
558 "INCL",
559 "INCQ",
561 "DECB",
562 "DECW",
563 "DECL",
564 "DECQ",
566 "SHLB",
567 "SHLW",
568 "SHLL",
569 "SHLQ",
571 "SARB",
572 "SARW",
573 "SARL",
574 "SARQ",
577 static void
578 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
579 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
580 const char *name, struct SegmentCache *sc)
582 #ifdef TARGET_X86_64
583 if (env->hflags & HF_CS64_MASK) {
584 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
585 sc->selector, sc->base, sc->limit, sc->flags);
586 } else
587 #endif
589 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
590 (uint32_t)sc->base, sc->limit, sc->flags);
593 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
594 goto done;
596 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
597 if (sc->flags & DESC_S_MASK) {
598 if (sc->flags & DESC_CS_MASK) {
599 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
600 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
601 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
602 (sc->flags & DESC_R_MASK) ? 'R' : '-');
603 } else {
604 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
605 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
606 (sc->flags & DESC_W_MASK) ? 'W' : '-');
608 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
609 } else {
610 static const char *sys_type_name[2][16] = {
611 { /* 32 bit mode */
612 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
613 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
614 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
615 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
617 { /* 64 bit mode */
618 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
619 "Reserved", "Reserved", "Reserved", "Reserved",
620 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
621 "Reserved", "IntGate64", "TrapGate64"
624 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
625 [(sc->flags & DESC_TYPE_MASK)
626 >> DESC_TYPE_SHIFT]);
628 done:
629 cpu_fprintf(f, "\n");
632 void cpu_dump_state(CPUState *env, FILE *f,
633 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
634 int flags)
636 int eflags, i, nb;
637 char cc_op_name[32];
638 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
640 if (kvm_enabled())
641 kvm_arch_get_registers(env);
643 eflags = env->eflags;
644 #ifdef TARGET_X86_64
645 if (env->hflags & HF_CS64_MASK) {
646 cpu_fprintf(f,
647 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
648 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
649 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
650 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
651 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
652 env->regs[R_EAX],
653 env->regs[R_EBX],
654 env->regs[R_ECX],
655 env->regs[R_EDX],
656 env->regs[R_ESI],
657 env->regs[R_EDI],
658 env->regs[R_EBP],
659 env->regs[R_ESP],
660 env->regs[8],
661 env->regs[9],
662 env->regs[10],
663 env->regs[11],
664 env->regs[12],
665 env->regs[13],
666 env->regs[14],
667 env->regs[15],
668 env->eip, eflags,
669 eflags & DF_MASK ? 'D' : '-',
670 eflags & CC_O ? 'O' : '-',
671 eflags & CC_S ? 'S' : '-',
672 eflags & CC_Z ? 'Z' : '-',
673 eflags & CC_A ? 'A' : '-',
674 eflags & CC_P ? 'P' : '-',
675 eflags & CC_C ? 'C' : '-',
676 env->hflags & HF_CPL_MASK,
677 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
678 (int)(env->a20_mask >> 20) & 1,
679 (env->hflags >> HF_SMM_SHIFT) & 1,
680 env->halted);
681 } else
682 #endif
684 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
685 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
686 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
687 (uint32_t)env->regs[R_EAX],
688 (uint32_t)env->regs[R_EBX],
689 (uint32_t)env->regs[R_ECX],
690 (uint32_t)env->regs[R_EDX],
691 (uint32_t)env->regs[R_ESI],
692 (uint32_t)env->regs[R_EDI],
693 (uint32_t)env->regs[R_EBP],
694 (uint32_t)env->regs[R_ESP],
695 (uint32_t)env->eip, eflags,
696 eflags & DF_MASK ? 'D' : '-',
697 eflags & CC_O ? 'O' : '-',
698 eflags & CC_S ? 'S' : '-',
699 eflags & CC_Z ? 'Z' : '-',
700 eflags & CC_A ? 'A' : '-',
701 eflags & CC_P ? 'P' : '-',
702 eflags & CC_C ? 'C' : '-',
703 env->hflags & HF_CPL_MASK,
704 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
705 (int)(env->a20_mask >> 20) & 1,
706 (env->hflags >> HF_SMM_SHIFT) & 1,
707 env->halted);
710 for(i = 0; i < 6; i++) {
711 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
712 &env->segs[i]);
714 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
715 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
717 #ifdef TARGET_X86_64
718 if (env->hflags & HF_LMA_MASK) {
719 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
720 env->gdt.base, env->gdt.limit);
721 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
722 env->idt.base, env->idt.limit);
723 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
724 (uint32_t)env->cr[0],
725 env->cr[2],
726 env->cr[3],
727 (uint32_t)env->cr[4]);
728 for(i = 0; i < 4; i++)
729 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
730 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
731 env->dr[6], env->dr[7]);
732 } else
733 #endif
735 cpu_fprintf(f, "GDT= %08x %08x\n",
736 (uint32_t)env->gdt.base, env->gdt.limit);
737 cpu_fprintf(f, "IDT= %08x %08x\n",
738 (uint32_t)env->idt.base, env->idt.limit);
739 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
740 (uint32_t)env->cr[0],
741 (uint32_t)env->cr[2],
742 (uint32_t)env->cr[3],
743 (uint32_t)env->cr[4]);
744 for(i = 0; i < 4; i++)
745 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
746 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
748 if (flags & X86_DUMP_CCOP) {
749 if ((unsigned)env->cc_op < CC_OP_NB)
750 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
751 else
752 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
753 #ifdef TARGET_X86_64
754 if (env->hflags & HF_CS64_MASK) {
755 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
756 env->cc_src, env->cc_dst,
757 cc_op_name);
758 } else
759 #endif
761 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
762 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
763 cc_op_name);
766 if (flags & X86_DUMP_FPU) {
767 int fptag;
768 fptag = 0;
769 for(i = 0; i < 8; i++) {
770 fptag |= ((!env->fptags[i]) << i);
772 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
773 env->fpuc,
774 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
775 env->fpstt,
776 fptag,
777 env->mxcsr);
778 for(i=0;i<8;i++) {
779 #if defined(USE_X86LDOUBLE)
780 union {
781 long double d;
782 struct {
783 uint64_t lower;
784 uint16_t upper;
785 } l;
786 } tmp;
787 tmp.d = env->fpregs[i].d;
788 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
789 i, tmp.l.lower, tmp.l.upper);
790 #else
791 cpu_fprintf(f, "FPR%d=%016" PRIx64,
792 i, env->fpregs[i].mmx.q);
793 #endif
794 if ((i & 1) == 1)
795 cpu_fprintf(f, "\n");
796 else
797 cpu_fprintf(f, " ");
799 if (env->hflags & HF_CS64_MASK)
800 nb = 16;
801 else
802 nb = 8;
803 for(i=0;i<nb;i++) {
804 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
806 env->xmm_regs[i].XMM_L(3),
807 env->xmm_regs[i].XMM_L(2),
808 env->xmm_regs[i].XMM_L(1),
809 env->xmm_regs[i].XMM_L(0));
810 if ((i & 1) == 1)
811 cpu_fprintf(f, "\n");
812 else
813 cpu_fprintf(f, " ");
818 /***********************************************************/
819 /* x86 mmu */
820 /* XXX: add PGE support */
822 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
824 a20_state = (a20_state != 0);
825 if (a20_state != ((env->a20_mask >> 20) & 1)) {
826 #if defined(DEBUG_MMU)
827 printf("A20 update: a20=%d\n", a20_state);
828 #endif
829 /* if the cpu is currently executing code, we must unlink it and
830 all the potentially executing TB */
831 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
833 /* when a20 is changed, all the MMU mappings are invalid, so
834 we must flush everything */
835 tlb_flush(env, 1);
836 env->a20_mask = (~0x100000) | (a20_state << 20);
840 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
842 int pe_state;
844 #if defined(DEBUG_MMU)
845 printf("CR0 update: CR0=0x%08x\n", new_cr0);
846 #endif
847 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
848 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
849 tlb_flush(env, 1);
852 #ifdef TARGET_X86_64
853 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
854 (env->efer & MSR_EFER_LME)) {
855 /* enter in long mode */
856 /* XXX: generate an exception */
857 if (!(env->cr[4] & CR4_PAE_MASK))
858 return;
859 env->efer |= MSR_EFER_LMA;
860 env->hflags |= HF_LMA_MASK;
861 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
862 (env->efer & MSR_EFER_LMA)) {
863 /* exit long mode */
864 env->efer &= ~MSR_EFER_LMA;
865 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
866 env->eip &= 0xffffffff;
868 #endif
869 env->cr[0] = new_cr0 | CR0_ET_MASK;
871 /* update PE flag in hidden flags */
872 pe_state = (env->cr[0] & CR0_PE_MASK);
873 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
874 /* ensure that ADDSEG is always set in real mode */
875 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
876 /* update FPU flags */
877 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
878 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
881 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
882 the PDPT */
883 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
885 env->cr[3] = new_cr3;
886 if (env->cr[0] & CR0_PG_MASK) {
887 #if defined(DEBUG_MMU)
888 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
889 #endif
890 tlb_flush(env, 0);
894 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
896 #if defined(DEBUG_MMU)
897 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
898 #endif
899 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
900 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
901 tlb_flush(env, 1);
903 /* SSE handling */
904 if (!(env->cpuid_features & CPUID_SSE))
905 new_cr4 &= ~CR4_OSFXSR_MASK;
906 if (new_cr4 & CR4_OSFXSR_MASK)
907 env->hflags |= HF_OSFXSR_MASK;
908 else
909 env->hflags &= ~HF_OSFXSR_MASK;
911 env->cr[4] = new_cr4;
914 #if defined(CONFIG_USER_ONLY)
916 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
917 int is_write, int mmu_idx, int is_softmmu)
919 /* user mode only emulation */
920 is_write &= 1;
921 env->cr[2] = addr;
922 env->error_code = (is_write << PG_ERROR_W_BIT);
923 env->error_code |= PG_ERROR_U_MASK;
924 env->exception_index = EXCP0E_PAGE;
925 return 1;
928 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
930 return addr;
933 #else
935 /* XXX: This value should match the one returned by CPUID
936 * and in exec.c */
937 #if defined(CONFIG_KQEMU)
938 #define PHYS_ADDR_MASK 0xfffff000LL
939 #else
940 # if defined(TARGET_X86_64)
941 # define PHYS_ADDR_MASK 0xfffffff000LL
942 # else
943 # define PHYS_ADDR_MASK 0xffffff000LL
944 # endif
945 #endif
947 /* return value:
948 -1 = cannot handle fault
949 0 = nothing more to do
950 1 = generate PF fault
951 2 = soft MMU activation required for this block
953 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
954 int is_write1, int mmu_idx, int is_softmmu)
956 uint64_t ptep, pte;
957 target_ulong pde_addr, pte_addr;
958 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
959 target_phys_addr_t paddr;
960 uint32_t page_offset;
961 target_ulong vaddr, virt_addr;
963 is_user = mmu_idx == MMU_USER_IDX;
964 #if defined(DEBUG_MMU)
965 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
966 addr, is_write1, is_user, env->eip);
967 #endif
968 is_write = is_write1 & 1;
970 if (!(env->cr[0] & CR0_PG_MASK)) {
971 pte = addr;
972 virt_addr = addr & TARGET_PAGE_MASK;
973 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
974 page_size = 4096;
975 goto do_mapping;
978 if (env->cr[4] & CR4_PAE_MASK) {
979 uint64_t pde, pdpe;
980 target_ulong pdpe_addr;
982 #ifdef TARGET_X86_64
983 if (env->hflags & HF_LMA_MASK) {
984 uint64_t pml4e_addr, pml4e;
985 int32_t sext;
987 /* test virtual address sign extension */
988 sext = (int64_t)addr >> 47;
989 if (sext != 0 && sext != -1) {
990 env->error_code = 0;
991 env->exception_index = EXCP0D_GPF;
992 return 1;
995 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
996 env->a20_mask;
997 pml4e = ldq_phys(pml4e_addr);
998 if (!(pml4e & PG_PRESENT_MASK)) {
999 error_code = 0;
1000 goto do_fault;
1002 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1003 error_code = PG_ERROR_RSVD_MASK;
1004 goto do_fault;
1006 if (!(pml4e & PG_ACCESSED_MASK)) {
1007 pml4e |= PG_ACCESSED_MASK;
1008 stl_phys_notdirty(pml4e_addr, pml4e);
1010 ptep = pml4e ^ PG_NX_MASK;
1011 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1012 env->a20_mask;
1013 pdpe = ldq_phys(pdpe_addr);
1014 if (!(pdpe & PG_PRESENT_MASK)) {
1015 error_code = 0;
1016 goto do_fault;
1018 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1019 error_code = PG_ERROR_RSVD_MASK;
1020 goto do_fault;
1022 ptep &= pdpe ^ PG_NX_MASK;
1023 if (!(pdpe & PG_ACCESSED_MASK)) {
1024 pdpe |= PG_ACCESSED_MASK;
1025 stl_phys_notdirty(pdpe_addr, pdpe);
1027 } else
1028 #endif
1030 /* XXX: load them when cr3 is loaded ? */
1031 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1032 env->a20_mask;
1033 pdpe = ldq_phys(pdpe_addr);
1034 if (!(pdpe & PG_PRESENT_MASK)) {
1035 error_code = 0;
1036 goto do_fault;
1038 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1041 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1042 env->a20_mask;
1043 pde = ldq_phys(pde_addr);
1044 if (!(pde & PG_PRESENT_MASK)) {
1045 error_code = 0;
1046 goto do_fault;
1048 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1049 error_code = PG_ERROR_RSVD_MASK;
1050 goto do_fault;
1052 ptep &= pde ^ PG_NX_MASK;
1053 if (pde & PG_PSE_MASK) {
1054 /* 2 MB page */
1055 page_size = 2048 * 1024;
1056 ptep ^= PG_NX_MASK;
1057 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1058 goto do_fault_protect;
1059 if (is_user) {
1060 if (!(ptep & PG_USER_MASK))
1061 goto do_fault_protect;
1062 if (is_write && !(ptep & PG_RW_MASK))
1063 goto do_fault_protect;
1064 } else {
1065 if ((env->cr[0] & CR0_WP_MASK) &&
1066 is_write && !(ptep & PG_RW_MASK))
1067 goto do_fault_protect;
1069 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1070 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1071 pde |= PG_ACCESSED_MASK;
1072 if (is_dirty)
1073 pde |= PG_DIRTY_MASK;
1074 stl_phys_notdirty(pde_addr, pde);
1076 /* align to page_size */
1077 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1078 virt_addr = addr & ~(page_size - 1);
1079 } else {
1080 /* 4 KB page */
1081 if (!(pde & PG_ACCESSED_MASK)) {
1082 pde |= PG_ACCESSED_MASK;
1083 stl_phys_notdirty(pde_addr, pde);
1085 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1086 env->a20_mask;
1087 pte = ldq_phys(pte_addr);
1088 if (!(pte & PG_PRESENT_MASK)) {
1089 error_code = 0;
1090 goto do_fault;
1092 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1093 error_code = PG_ERROR_RSVD_MASK;
1094 goto do_fault;
1096 /* combine pde and pte nx, user and rw protections */
1097 ptep &= pte ^ PG_NX_MASK;
1098 ptep ^= PG_NX_MASK;
1099 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1100 goto do_fault_protect;
1101 if (is_user) {
1102 if (!(ptep & PG_USER_MASK))
1103 goto do_fault_protect;
1104 if (is_write && !(ptep & PG_RW_MASK))
1105 goto do_fault_protect;
1106 } else {
1107 if ((env->cr[0] & CR0_WP_MASK) &&
1108 is_write && !(ptep & PG_RW_MASK))
1109 goto do_fault_protect;
1111 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1112 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1113 pte |= PG_ACCESSED_MASK;
1114 if (is_dirty)
1115 pte |= PG_DIRTY_MASK;
1116 stl_phys_notdirty(pte_addr, pte);
1118 page_size = 4096;
1119 virt_addr = addr & ~0xfff;
1120 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1122 } else {
1123 uint32_t pde;
1125 /* page directory entry */
1126 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1127 env->a20_mask;
1128 pde = ldl_phys(pde_addr);
1129 if (!(pde & PG_PRESENT_MASK)) {
1130 error_code = 0;
1131 goto do_fault;
1133 /* if PSE bit is set, then we use a 4MB page */
1134 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1135 page_size = 4096 * 1024;
1136 if (is_user) {
1137 if (!(pde & PG_USER_MASK))
1138 goto do_fault_protect;
1139 if (is_write && !(pde & PG_RW_MASK))
1140 goto do_fault_protect;
1141 } else {
1142 if ((env->cr[0] & CR0_WP_MASK) &&
1143 is_write && !(pde & PG_RW_MASK))
1144 goto do_fault_protect;
1146 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1147 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1148 pde |= PG_ACCESSED_MASK;
1149 if (is_dirty)
1150 pde |= PG_DIRTY_MASK;
1151 stl_phys_notdirty(pde_addr, pde);
1154 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1155 ptep = pte;
1156 virt_addr = addr & ~(page_size - 1);
1157 } else {
1158 if (!(pde & PG_ACCESSED_MASK)) {
1159 pde |= PG_ACCESSED_MASK;
1160 stl_phys_notdirty(pde_addr, pde);
1163 /* page directory entry */
1164 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1165 env->a20_mask;
1166 pte = ldl_phys(pte_addr);
1167 if (!(pte & PG_PRESENT_MASK)) {
1168 error_code = 0;
1169 goto do_fault;
1171 /* combine pde and pte user and rw protections */
1172 ptep = pte & pde;
1173 if (is_user) {
1174 if (!(ptep & PG_USER_MASK))
1175 goto do_fault_protect;
1176 if (is_write && !(ptep & PG_RW_MASK))
1177 goto do_fault_protect;
1178 } else {
1179 if ((env->cr[0] & CR0_WP_MASK) &&
1180 is_write && !(ptep & PG_RW_MASK))
1181 goto do_fault_protect;
1183 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1184 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1185 pte |= PG_ACCESSED_MASK;
1186 if (is_dirty)
1187 pte |= PG_DIRTY_MASK;
1188 stl_phys_notdirty(pte_addr, pte);
1190 page_size = 4096;
1191 virt_addr = addr & ~0xfff;
1194 /* the page can be put in the TLB */
1195 prot = PAGE_READ;
1196 if (!(ptep & PG_NX_MASK))
1197 prot |= PAGE_EXEC;
1198 if (pte & PG_DIRTY_MASK) {
1199 /* only set write access if already dirty... otherwise wait
1200 for dirty access */
1201 if (is_user) {
1202 if (ptep & PG_RW_MASK)
1203 prot |= PAGE_WRITE;
1204 } else {
1205 if (!(env->cr[0] & CR0_WP_MASK) ||
1206 (ptep & PG_RW_MASK))
1207 prot |= PAGE_WRITE;
1210 do_mapping:
1211 pte = pte & env->a20_mask;
1213 /* Even if 4MB pages, we map only one 4KB page in the cache to
1214 avoid filling it too fast */
1215 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1216 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1217 vaddr = virt_addr + page_offset;
1219 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1220 return ret;
1221 do_fault_protect:
1222 error_code = PG_ERROR_P_MASK;
1223 do_fault:
1224 error_code |= (is_write << PG_ERROR_W_BIT);
1225 if (is_user)
1226 error_code |= PG_ERROR_U_MASK;
1227 if (is_write1 == 2 &&
1228 (env->efer & MSR_EFER_NXE) &&
1229 (env->cr[4] & CR4_PAE_MASK))
1230 error_code |= PG_ERROR_I_D_MASK;
1231 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1232 /* cr2 is not modified in case of exceptions */
1233 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1234 addr);
1235 } else {
1236 env->cr[2] = addr;
1238 env->error_code = error_code;
1239 env->exception_index = EXCP0E_PAGE;
1240 return 1;
1243 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1245 target_ulong pde_addr, pte_addr;
1246 uint64_t pte;
1247 target_phys_addr_t paddr;
1248 uint32_t page_offset;
1249 int page_size;
1251 if (env->cr[4] & CR4_PAE_MASK) {
1252 target_ulong pdpe_addr;
1253 uint64_t pde, pdpe;
1255 #ifdef TARGET_X86_64
1256 if (env->hflags & HF_LMA_MASK) {
1257 uint64_t pml4e_addr, pml4e;
1258 int32_t sext;
1260 /* test virtual address sign extension */
1261 sext = (int64_t)addr >> 47;
1262 if (sext != 0 && sext != -1)
1263 return -1;
1265 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1266 env->a20_mask;
1267 pml4e = ldq_phys(pml4e_addr);
1268 if (!(pml4e & PG_PRESENT_MASK))
1269 return -1;
1271 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1272 env->a20_mask;
1273 pdpe = ldq_phys(pdpe_addr);
1274 if (!(pdpe & PG_PRESENT_MASK))
1275 return -1;
1276 } else
1277 #endif
1279 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1280 env->a20_mask;
1281 pdpe = ldq_phys(pdpe_addr);
1282 if (!(pdpe & PG_PRESENT_MASK))
1283 return -1;
1286 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1287 env->a20_mask;
1288 pde = ldq_phys(pde_addr);
1289 if (!(pde & PG_PRESENT_MASK)) {
1290 return -1;
1292 if (pde & PG_PSE_MASK) {
1293 /* 2 MB page */
1294 page_size = 2048 * 1024;
1295 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1296 } else {
1297 /* 4 KB page */
1298 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1299 env->a20_mask;
1300 page_size = 4096;
1301 pte = ldq_phys(pte_addr);
1303 if (!(pte & PG_PRESENT_MASK))
1304 return -1;
1305 } else {
1306 uint32_t pde;
1308 if (!(env->cr[0] & CR0_PG_MASK)) {
1309 pte = addr;
1310 page_size = 4096;
1311 } else {
1312 /* page directory entry */
1313 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1314 pde = ldl_phys(pde_addr);
1315 if (!(pde & PG_PRESENT_MASK))
1316 return -1;
1317 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1318 pte = pde & ~0x003ff000; /* align to 4MB */
1319 page_size = 4096 * 1024;
1320 } else {
1321 /* page directory entry */
1322 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1323 pte = ldl_phys(pte_addr);
1324 if (!(pte & PG_PRESENT_MASK))
1325 return -1;
1326 page_size = 4096;
1329 pte = pte & env->a20_mask;
1332 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1333 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1334 return paddr;
1337 void hw_breakpoint_insert(CPUState *env, int index)
1339 int type, err = 0;
1341 switch (hw_breakpoint_type(env->dr[7], index)) {
1342 case 0:
1343 if (hw_breakpoint_enabled(env->dr[7], index))
1344 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1345 &env->cpu_breakpoint[index]);
1346 break;
1347 case 1:
1348 type = BP_CPU | BP_MEM_WRITE;
1349 goto insert_wp;
1350 case 2:
1351 /* No support for I/O watchpoints yet */
1352 break;
1353 case 3:
1354 type = BP_CPU | BP_MEM_ACCESS;
1355 insert_wp:
1356 err = cpu_watchpoint_insert(env, env->dr[index],
1357 hw_breakpoint_len(env->dr[7], index),
1358 type, &env->cpu_watchpoint[index]);
1359 break;
1361 if (err)
1362 env->cpu_breakpoint[index] = NULL;
1365 void hw_breakpoint_remove(CPUState *env, int index)
1367 if (!env->cpu_breakpoint[index])
1368 return;
1369 switch (hw_breakpoint_type(env->dr[7], index)) {
1370 case 0:
1371 if (hw_breakpoint_enabled(env->dr[7], index))
1372 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1373 break;
1374 case 1:
1375 case 3:
1376 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1377 break;
1378 case 2:
1379 /* No support for I/O watchpoints yet */
1380 break;
1384 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1386 target_ulong dr6;
1387 int reg, type;
1388 int hit_enabled = 0;
1390 dr6 = env->dr[6] & ~0xf;
1391 for (reg = 0; reg < 4; reg++) {
1392 type = hw_breakpoint_type(env->dr[7], reg);
1393 if ((type == 0 && env->dr[reg] == env->eip) ||
1394 ((type & 1) && env->cpu_watchpoint[reg] &&
1395 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1396 dr6 |= 1 << reg;
1397 if (hw_breakpoint_enabled(env->dr[7], reg))
1398 hit_enabled = 1;
1401 if (hit_enabled || force_dr6_update)
1402 env->dr[6] = dr6;
1403 return hit_enabled;
1406 static CPUDebugExcpHandler *prev_debug_excp_handler;
1408 void raise_exception(int exception_index);
1410 static void breakpoint_handler(CPUState *env)
1412 CPUBreakpoint *bp;
1414 if (env->watchpoint_hit) {
1415 if (env->watchpoint_hit->flags & BP_CPU) {
1416 env->watchpoint_hit = NULL;
1417 if (check_hw_breakpoints(env, 0))
1418 raise_exception(EXCP01_DB);
1419 else
1420 cpu_resume_from_signal(env, NULL);
1422 } else {
1423 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1424 if (bp->pc == env->eip) {
1425 if (bp->flags & BP_CPU) {
1426 check_hw_breakpoints(env, 1);
1427 raise_exception(EXCP01_DB);
1429 break;
1432 if (prev_debug_excp_handler)
1433 prev_debug_excp_handler(env);
1435 #endif /* !CONFIG_USER_ONLY */
1437 static void host_cpuid(uint32_t function, uint32_t count,
1438 uint32_t *eax, uint32_t *ebx,
1439 uint32_t *ecx, uint32_t *edx)
1441 #if defined(CONFIG_KVM)
1442 uint32_t vec[4];
1444 #ifdef __x86_64__
1445 asm volatile("cpuid"
1446 : "=a"(vec[0]), "=b"(vec[1]),
1447 "=c"(vec[2]), "=d"(vec[3])
1448 : "0"(function), "c"(count) : "cc");
1449 #else
1450 asm volatile("pusha \n\t"
1451 "cpuid \n\t"
1452 "mov %%eax, 0(%2) \n\t"
1453 "mov %%ebx, 4(%2) \n\t"
1454 "mov %%ecx, 8(%2) \n\t"
1455 "mov %%edx, 12(%2) \n\t"
1456 "popa"
1457 : : "a"(function), "c"(count), "S"(vec)
1458 : "memory", "cc");
1459 #endif
1461 if (eax)
1462 *eax = vec[0];
1463 if (ebx)
1464 *ebx = vec[1];
1465 if (ecx)
1466 *ecx = vec[2];
1467 if (edx)
1468 *edx = vec[3];
1469 #endif
1472 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1473 uint32_t *eax, uint32_t *ebx,
1474 uint32_t *ecx, uint32_t *edx)
1476 /* test if maximum index reached */
1477 if (index & 0x80000000) {
1478 if (index > env->cpuid_xlevel)
1479 index = env->cpuid_level;
1480 } else {
1481 if (index > env->cpuid_level)
1482 index = env->cpuid_level;
1485 switch(index) {
1486 case 0:
1487 *eax = env->cpuid_level;
1488 *ebx = env->cpuid_vendor1;
1489 *edx = env->cpuid_vendor2;
1490 *ecx = env->cpuid_vendor3;
1492 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1493 * isn't supported in compatibility mode on Intel. so advertise the
1494 * actuall cpu, and say goodbye to migration between different vendors
1495 * is you use compatibility mode. */
1496 if (kvm_enabled())
1497 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1498 break;
1499 case 1:
1500 *eax = env->cpuid_version;
1501 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1502 *ecx = env->cpuid_ext_features;
1503 *edx = env->cpuid_features;
1505 /* "Hypervisor present" bit required for Microsoft SVVP */
1506 if (kvm_enabled())
1507 *ecx |= (1 << 31);
1508 break;
1509 case 2:
1510 /* cache info: needed for Pentium Pro compatibility */
1511 *eax = 1;
1512 *ebx = 0;
1513 *ecx = 0;
1514 *edx = 0x2c307d;
1515 break;
1516 case 4:
1517 /* cache info: needed for Core compatibility */
1518 switch (count) {
1519 case 0: /* L1 dcache info */
1520 *eax = 0x0000121;
1521 *ebx = 0x1c0003f;
1522 *ecx = 0x000003f;
1523 *edx = 0x0000001;
1524 break;
1525 case 1: /* L1 icache info */
1526 *eax = 0x0000122;
1527 *ebx = 0x1c0003f;
1528 *ecx = 0x000003f;
1529 *edx = 0x0000001;
1530 break;
1531 case 2: /* L2 cache info */
1532 *eax = 0x0000143;
1533 *ebx = 0x3c0003f;
1534 *ecx = 0x0000fff;
1535 *edx = 0x0000001;
1536 break;
1537 default: /* end of info */
1538 *eax = 0;
1539 *ebx = 0;
1540 *ecx = 0;
1541 *edx = 0;
1542 break;
1544 break;
1545 case 5:
1546 /* mwait info: needed for Core compatibility */
1547 *eax = 0; /* Smallest monitor-line size in bytes */
1548 *ebx = 0; /* Largest monitor-line size in bytes */
1549 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1550 *edx = 0;
1551 break;
1552 case 6:
1553 /* Thermal and Power Leaf */
1554 *eax = 0;
1555 *ebx = 0;
1556 *ecx = 0;
1557 *edx = 0;
1558 break;
1559 case 9:
1560 /* Direct Cache Access Information Leaf */
1561 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1562 *ebx = 0;
1563 *ecx = 0;
1564 *edx = 0;
1565 break;
1566 case 0xA:
1567 /* Architectural Performance Monitoring Leaf */
1568 *eax = 0;
1569 *ebx = 0;
1570 *ecx = 0;
1571 *edx = 0;
1572 break;
1573 case 0x80000000:
1574 *eax = env->cpuid_xlevel;
1575 *ebx = env->cpuid_vendor1;
1576 *edx = env->cpuid_vendor2;
1577 *ecx = env->cpuid_vendor3;
1578 break;
1579 case 0x80000001:
1580 *eax = env->cpuid_features;
1581 *ebx = 0;
1582 *ecx = env->cpuid_ext3_features;
1583 *edx = env->cpuid_ext2_features;
1585 if (kvm_enabled()) {
1586 uint32_t h_eax, h_edx;
1588 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1590 /* disable CPU features that the host does not support */
1592 /* long mode */
1593 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1594 *edx &= ~0x20000000;
1595 /* syscall */
1596 if ((h_edx & 0x00000800) == 0)
1597 *edx &= ~0x00000800;
1598 /* nx */
1599 if ((h_edx & 0x00100000) == 0)
1600 *edx &= ~0x00100000;
1602 /* disable CPU features that KVM cannot support */
1604 /* svm */
1605 *ecx &= ~4UL;
1606 /* 3dnow */
1607 *edx &= ~0xc0000000;
1609 break;
1610 case 0x80000002:
1611 case 0x80000003:
1612 case 0x80000004:
1613 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1614 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1615 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1616 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1617 break;
1618 case 0x80000005:
1619 /* cache info (L1 cache) */
1620 *eax = 0x01ff01ff;
1621 *ebx = 0x01ff01ff;
1622 *ecx = 0x40020140;
1623 *edx = 0x40020140;
1624 break;
1625 case 0x80000006:
1626 /* cache info (L2 cache) */
1627 *eax = 0;
1628 *ebx = 0x42004200;
1629 *ecx = 0x02008140;
1630 *edx = 0;
1631 break;
1632 case 0x80000008:
1633 /* virtual & phys address size in low 2 bytes. */
1634 /* XXX: This value must match the one used in the MMU code. */
1635 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1636 /* 64 bit processor */
1637 #if defined(CONFIG_KQEMU)
1638 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1639 #else
1640 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1641 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1642 #endif
1643 } else {
1644 #if defined(CONFIG_KQEMU)
1645 *eax = 0x00000020; /* 32 bits physical */
1646 #else
1647 if (env->cpuid_features & CPUID_PSE36)
1648 *eax = 0x00000024; /* 36 bits physical */
1649 else
1650 *eax = 0x00000020; /* 32 bits physical */
1651 #endif
1653 *ebx = 0;
1654 *ecx = 0;
1655 *edx = 0;
1656 break;
1657 case 0x8000000A:
1658 *eax = 0x00000001; /* SVM Revision */
1659 *ebx = 0x00000010; /* nr of ASIDs */
1660 *ecx = 0;
1661 *edx = 0; /* optional features */
1662 break;
1663 default:
1664 /* reserved values: zero */
1665 *eax = 0;
1666 *ebx = 0;
1667 *ecx = 0;
1668 *edx = 0;
1669 break;
1673 CPUX86State *cpu_x86_init(const char *cpu_model)
1675 CPUX86State *env;
1676 static int inited;
1678 env = qemu_mallocz(sizeof(CPUX86State));
1679 cpu_exec_init(env);
1680 env->cpu_model_str = cpu_model;
1682 /* init various static tables */
1683 if (!inited) {
1684 inited = 1;
1685 optimize_flags_init();
1686 #ifndef CONFIG_USER_ONLY
1687 prev_debug_excp_handler =
1688 cpu_set_debug_excp_handler(breakpoint_handler);
1689 #endif
1691 if (cpu_x86_register(env, cpu_model) < 0) {
1692 cpu_x86_close(env);
1693 return NULL;
1695 cpu_reset(env);
1696 #ifdef CONFIG_KQEMU
1697 kqemu_init(env);
1698 #endif
1700 qemu_init_vcpu(env);
1702 return env;