allow overriding of CPUID level on command line
[qemu.git] / target-i386 / helper.c
blobfa9b4d8991b6a32998598d50c35caeeece07a1d0
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
31 //#define DEBUG_MMU
33 /* feature flags taken from "Intel Processor Identification and the CPUID
34 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35 * about feature names, the Linux name is used. */
36 static const char *feature_name[] = {
37 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
42 static const char *ext_feature_name[] = {
43 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
48 static const char *ext2_feature_name[] = {
49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
54 static const char *ext3_feature_name[] = {
55 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
61 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
62 uint32_t *ext_features,
63 uint32_t *ext2_features,
64 uint32_t *ext3_features)
66 int i;
67 int found = 0;
69 for ( i = 0 ; i < 32 ; i++ )
70 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71 *features |= 1 << i;
72 found = 1;
74 for ( i = 0 ; i < 32 ; i++ )
75 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76 *ext_features |= 1 << i;
77 found = 1;
79 for ( i = 0 ; i < 32 ; i++ )
80 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81 *ext2_features |= 1 << i;
82 found = 1;
84 for ( i = 0 ; i < 32 ; i++ )
85 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86 *ext3_features |= 1 << i;
87 found = 1;
89 if (!found) {
90 fprintf(stderr, "CPU feature %s not found\n", flagname);
94 typedef struct x86_def_t {
95 const char *name;
96 uint32_t level;
97 uint32_t vendor1, vendor2, vendor3;
98 int family;
99 int model;
100 int stepping;
101 uint32_t features, ext_features, ext2_features, ext3_features;
102 uint32_t xlevel;
103 char model_id[48];
104 int vendor_override;
105 } x86_def_t;
107 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
108 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
109 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
110 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
111 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
112 CPUID_PSE36 | CPUID_FXSR)
113 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
114 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
115 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
116 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
117 CPUID_PAE | CPUID_SEP | CPUID_APIC)
118 static x86_def_t x86_defs[] = {
119 #ifdef TARGET_X86_64
121 .name = "qemu64",
122 .level = 4,
123 .vendor1 = CPUID_VENDOR_AMD_1,
124 .vendor2 = CPUID_VENDOR_AMD_2,
125 .vendor3 = CPUID_VENDOR_AMD_3,
126 .family = 6,
127 .model = 2,
128 .stepping = 3,
129 .features = PPRO_FEATURES |
130 /* these features are needed for Win64 and aren't fully implemented */
131 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
132 /* this feature is needed for Solaris and isn't fully implemented */
133 CPUID_PSE36,
134 .ext_features = CPUID_EXT_SSE3,
135 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
136 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
137 .ext3_features = CPUID_EXT3_SVM,
138 .xlevel = 0x8000000A,
139 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
142 .name = "phenom",
143 .level = 5,
144 .vendor1 = CPUID_VENDOR_AMD_1,
145 .vendor2 = CPUID_VENDOR_AMD_2,
146 .vendor3 = CPUID_VENDOR_AMD_3,
147 .family = 16,
148 .model = 2,
149 .stepping = 3,
150 /* Missing: CPUID_VME, CPUID_HT */
151 .features = PPRO_FEATURES |
152 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
153 CPUID_PSE36,
154 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
155 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
156 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
157 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
158 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
159 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
160 CPUID_EXT2_FFXSR,
161 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
162 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
163 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
164 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
165 .ext3_features = CPUID_EXT3_SVM,
166 .xlevel = 0x8000001A,
167 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
170 .name = "core2duo",
171 .level = 10,
172 .family = 6,
173 .model = 15,
174 .stepping = 11,
175 /* The original CPU also implements these features:
176 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
177 CPUID_TM, CPUID_PBE */
178 .features = PPRO_FEATURES |
179 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
180 CPUID_PSE36,
181 /* The original CPU also implements these ext features:
182 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
183 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
184 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
185 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
186 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
187 .xlevel = 0x80000008,
188 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
190 #endif
192 .name = "qemu32",
193 .level = 4,
194 .family = 6,
195 .model = 3,
196 .stepping = 3,
197 .features = PPRO_FEATURES,
198 .ext_features = CPUID_EXT_SSE3,
199 .xlevel = 0,
200 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
203 .name = "coreduo",
204 .level = 10,
205 .family = 6,
206 .model = 14,
207 .stepping = 8,
208 /* The original CPU also implements these features:
209 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
210 CPUID_TM, CPUID_PBE */
211 .features = PPRO_FEATURES | CPUID_VME |
212 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
213 /* The original CPU also implements these ext features:
214 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
215 CPUID_EXT_PDCM */
216 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
217 .ext2_features = CPUID_EXT2_NX,
218 .xlevel = 0x80000008,
219 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
222 .name = "486",
223 .level = 0,
224 .family = 4,
225 .model = 0,
226 .stepping = 0,
227 .features = I486_FEATURES,
228 .xlevel = 0,
231 .name = "pentium",
232 .level = 1,
233 .family = 5,
234 .model = 4,
235 .stepping = 3,
236 .features = PENTIUM_FEATURES,
237 .xlevel = 0,
240 .name = "pentium2",
241 .level = 2,
242 .family = 6,
243 .model = 5,
244 .stepping = 2,
245 .features = PENTIUM2_FEATURES,
246 .xlevel = 0,
249 .name = "pentium3",
250 .level = 2,
251 .family = 6,
252 .model = 7,
253 .stepping = 3,
254 .features = PENTIUM3_FEATURES,
255 .xlevel = 0,
258 .name = "athlon",
259 .level = 2,
260 .vendor1 = CPUID_VENDOR_AMD_1,
261 .vendor2 = CPUID_VENDOR_AMD_2,
262 .vendor3 = CPUID_VENDOR_AMD_3,
263 .family = 6,
264 .model = 2,
265 .stepping = 3,
266 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
267 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
268 .xlevel = 0x80000008,
269 /* XXX: put another string ? */
270 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
273 .name = "n270",
274 /* original is on level 10 */
275 .level = 5,
276 .family = 6,
277 .model = 28,
278 .stepping = 2,
279 .features = PPRO_FEATURES |
280 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
281 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
282 * CPUID_HT | CPUID_TM | CPUID_PBE */
283 /* Some CPUs got no CPUID_SEP */
284 .ext_features = CPUID_EXT_MONITOR |
285 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
286 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
287 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
288 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
289 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
290 .xlevel = 0x8000000A,
291 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
295 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
296 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
298 static int cpu_x86_fill_model_id(char *str)
300 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
301 int i;
303 for (i = 0; i < 3; i++) {
304 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
305 memcpy(str + i * 16 + 0, &eax, 4);
306 memcpy(str + i * 16 + 4, &ebx, 4);
307 memcpy(str + i * 16 + 8, &ecx, 4);
308 memcpy(str + i * 16 + 12, &edx, 4);
310 return 0;
313 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
315 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
317 x86_cpu_def->name = "host";
318 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
319 x86_cpu_def->level = eax;
320 x86_cpu_def->vendor1 = ebx;
321 x86_cpu_def->vendor2 = edx;
322 x86_cpu_def->vendor3 = ecx;
324 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
325 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
326 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
327 x86_cpu_def->stepping = eax & 0x0F;
328 x86_cpu_def->ext_features = ecx;
329 x86_cpu_def->features = edx;
331 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
332 x86_cpu_def->xlevel = eax;
334 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
335 x86_cpu_def->ext2_features = edx;
336 x86_cpu_def->ext3_features = ecx;
337 cpu_x86_fill_model_id(x86_cpu_def->model_id);
338 x86_cpu_def->vendor_override = 0;
340 return 0;
343 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
345 unsigned int i;
346 x86_def_t *def;
348 char *s = strdup(cpu_model);
349 char *featurestr, *name = strtok(s, ",");
350 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
351 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
352 uint32_t numvalue;
354 def = NULL;
355 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
356 if (strcmp(name, x86_defs[i].name) == 0) {
357 def = &x86_defs[i];
358 break;
361 if (kvm_enabled() && strcmp(name, "host") == 0) {
362 cpu_x86_fill_host(x86_cpu_def);
363 } else if (!def) {
364 goto error;
365 } else {
366 memcpy(x86_cpu_def, def, sizeof(*def));
369 add_flagname_to_bitmaps("hypervisor", &plus_features,
370 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
372 featurestr = strtok(NULL, ",");
374 while (featurestr) {
375 char *val;
376 if (featurestr[0] == '+') {
377 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
378 } else if (featurestr[0] == '-') {
379 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
380 } else if ((val = strchr(featurestr, '='))) {
381 *val = 0; val++;
382 if (!strcmp(featurestr, "family")) {
383 char *err;
384 numvalue = strtoul(val, &err, 0);
385 if (!*val || *err) {
386 fprintf(stderr, "bad numerical value %s\n", val);
387 goto error;
389 x86_cpu_def->family = numvalue;
390 } else if (!strcmp(featurestr, "model")) {
391 char *err;
392 numvalue = strtoul(val, &err, 0);
393 if (!*val || *err || numvalue > 0xff) {
394 fprintf(stderr, "bad numerical value %s\n", val);
395 goto error;
397 x86_cpu_def->model = numvalue;
398 } else if (!strcmp(featurestr, "stepping")) {
399 char *err;
400 numvalue = strtoul(val, &err, 0);
401 if (!*val || *err || numvalue > 0xf) {
402 fprintf(stderr, "bad numerical value %s\n", val);
403 goto error;
405 x86_cpu_def->stepping = numvalue ;
406 } else if (!strcmp(featurestr, "level")) {
407 char *err;
408 numvalue = strtoul(val, &err, 0);
409 if (!*val || *err) {
410 fprintf(stderr, "bad numerical value %s\n", val);
411 goto error;
413 x86_cpu_def->level = numvalue;
414 } else if (!strcmp(featurestr, "xlevel")) {
415 char *err;
416 numvalue = strtoul(val, &err, 0);
417 if (!*val || *err) {
418 fprintf(stderr, "bad numerical value %s\n", val);
419 goto error;
421 if (numvalue < 0x80000000) {
422 numvalue += 0x80000000;
424 x86_cpu_def->xlevel = numvalue;
425 } else if (!strcmp(featurestr, "vendor")) {
426 if (strlen(val) != 12) {
427 fprintf(stderr, "vendor string must be 12 chars long\n");
428 goto error;
430 x86_cpu_def->vendor1 = 0;
431 x86_cpu_def->vendor2 = 0;
432 x86_cpu_def->vendor3 = 0;
433 for(i = 0; i < 4; i++) {
434 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
435 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
436 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
438 x86_cpu_def->vendor_override = 1;
439 } else if (!strcmp(featurestr, "model_id")) {
440 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
441 val);
442 } else {
443 fprintf(stderr, "unrecognized feature %s\n", featurestr);
444 goto error;
446 } else {
447 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
448 goto error;
450 featurestr = strtok(NULL, ",");
452 x86_cpu_def->features |= plus_features;
453 x86_cpu_def->ext_features |= plus_ext_features;
454 x86_cpu_def->ext2_features |= plus_ext2_features;
455 x86_cpu_def->ext3_features |= plus_ext3_features;
456 x86_cpu_def->features &= ~minus_features;
457 x86_cpu_def->ext_features &= ~minus_ext_features;
458 x86_cpu_def->ext2_features &= ~minus_ext2_features;
459 x86_cpu_def->ext3_features &= ~minus_ext3_features;
460 free(s);
461 return 0;
463 error:
464 free(s);
465 return -1;
468 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
470 unsigned int i;
472 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
473 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
476 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
478 x86_def_t def1, *def = &def1;
480 if (cpu_x86_find_by_name(def, cpu_model) < 0)
481 return -1;
482 if (def->vendor1) {
483 env->cpuid_vendor1 = def->vendor1;
484 env->cpuid_vendor2 = def->vendor2;
485 env->cpuid_vendor3 = def->vendor3;
486 } else {
487 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
488 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
489 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
491 env->cpuid_vendor_override = def->vendor_override;
492 env->cpuid_level = def->level;
493 if (def->family > 0x0f)
494 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
495 else
496 env->cpuid_version = def->family << 8;
497 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
498 env->cpuid_version |= def->stepping;
499 env->cpuid_features = def->features;
500 env->pat = 0x0007040600070406ULL;
501 env->cpuid_ext_features = def->ext_features;
502 env->cpuid_ext2_features = def->ext2_features;
503 env->cpuid_xlevel = def->xlevel;
504 env->cpuid_ext3_features = def->ext3_features;
506 const char *model_id = def->model_id;
507 int c, len, i;
508 if (!model_id)
509 model_id = "";
510 len = strlen(model_id);
511 for(i = 0; i < 48; i++) {
512 if (i >= len)
513 c = '\0';
514 else
515 c = (uint8_t)model_id[i];
516 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
519 return 0;
522 /* NOTE: must be called outside the CPU execute loop */
523 void cpu_reset(CPUX86State *env)
525 int i;
527 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
528 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
529 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
532 memset(env, 0, offsetof(CPUX86State, breakpoints));
534 tlb_flush(env, 1);
536 env->old_exception = -1;
538 /* init to reset state */
540 #ifdef CONFIG_SOFTMMU
541 env->hflags |= HF_SOFTMMU_MASK;
542 #endif
543 env->hflags2 |= HF2_GIF_MASK;
545 cpu_x86_update_cr0(env, 0x60000010);
546 env->a20_mask = ~0x0;
547 env->smbase = 0x30000;
549 env->idt.limit = 0xffff;
550 env->gdt.limit = 0xffff;
551 env->ldt.limit = 0xffff;
552 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
553 env->tr.limit = 0xffff;
554 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
556 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
557 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
558 DESC_R_MASK | DESC_A_MASK);
559 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
560 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
561 DESC_A_MASK);
562 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
563 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
564 DESC_A_MASK);
565 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
566 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
567 DESC_A_MASK);
568 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
569 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
570 DESC_A_MASK);
571 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
572 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
573 DESC_A_MASK);
575 env->eip = 0xfff0;
576 env->regs[R_EDX] = env->cpuid_version;
578 env->eflags = 0x2;
580 /* FPU init */
581 for(i = 0;i < 8; i++)
582 env->fptags[i] = 1;
583 env->fpuc = 0x37f;
585 env->mxcsr = 0x1f80;
587 memset(env->dr, 0, sizeof(env->dr));
588 env->dr[6] = DR6_FIXED_1;
589 env->dr[7] = DR7_FIXED_1;
590 cpu_breakpoint_remove_all(env, BP_CPU);
591 cpu_watchpoint_remove_all(env, BP_CPU);
594 void cpu_x86_close(CPUX86State *env)
596 qemu_free(env);
599 /***********************************************************/
600 /* x86 debug */
602 static const char *cc_op_str[] = {
603 "DYNAMIC",
604 "EFLAGS",
606 "MULB",
607 "MULW",
608 "MULL",
609 "MULQ",
611 "ADDB",
612 "ADDW",
613 "ADDL",
614 "ADDQ",
616 "ADCB",
617 "ADCW",
618 "ADCL",
619 "ADCQ",
621 "SUBB",
622 "SUBW",
623 "SUBL",
624 "SUBQ",
626 "SBBB",
627 "SBBW",
628 "SBBL",
629 "SBBQ",
631 "LOGICB",
632 "LOGICW",
633 "LOGICL",
634 "LOGICQ",
636 "INCB",
637 "INCW",
638 "INCL",
639 "INCQ",
641 "DECB",
642 "DECW",
643 "DECL",
644 "DECQ",
646 "SHLB",
647 "SHLW",
648 "SHLL",
649 "SHLQ",
651 "SARB",
652 "SARW",
653 "SARL",
654 "SARQ",
657 static void
658 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
659 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
660 const char *name, struct SegmentCache *sc)
662 #ifdef TARGET_X86_64
663 if (env->hflags & HF_CS64_MASK) {
664 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
665 sc->selector, sc->base, sc->limit, sc->flags);
666 } else
667 #endif
669 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
670 (uint32_t)sc->base, sc->limit, sc->flags);
673 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
674 goto done;
676 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
677 if (sc->flags & DESC_S_MASK) {
678 if (sc->flags & DESC_CS_MASK) {
679 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
680 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
681 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
682 (sc->flags & DESC_R_MASK) ? 'R' : '-');
683 } else {
684 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
685 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
686 (sc->flags & DESC_W_MASK) ? 'W' : '-');
688 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
689 } else {
690 static const char *sys_type_name[2][16] = {
691 { /* 32 bit mode */
692 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
693 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
694 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
695 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
697 { /* 64 bit mode */
698 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
699 "Reserved", "Reserved", "Reserved", "Reserved",
700 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
701 "Reserved", "IntGate64", "TrapGate64"
704 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
705 [(sc->flags & DESC_TYPE_MASK)
706 >> DESC_TYPE_SHIFT]);
708 done:
709 cpu_fprintf(f, "\n");
712 void cpu_dump_state(CPUState *env, FILE *f,
713 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
714 int flags)
716 int eflags, i, nb;
717 char cc_op_name[32];
718 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
720 if (kvm_enabled())
721 kvm_arch_get_registers(env);
723 eflags = env->eflags;
724 #ifdef TARGET_X86_64
725 if (env->hflags & HF_CS64_MASK) {
726 cpu_fprintf(f,
727 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
728 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
729 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
730 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
731 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
732 env->regs[R_EAX],
733 env->regs[R_EBX],
734 env->regs[R_ECX],
735 env->regs[R_EDX],
736 env->regs[R_ESI],
737 env->regs[R_EDI],
738 env->regs[R_EBP],
739 env->regs[R_ESP],
740 env->regs[8],
741 env->regs[9],
742 env->regs[10],
743 env->regs[11],
744 env->regs[12],
745 env->regs[13],
746 env->regs[14],
747 env->regs[15],
748 env->eip, eflags,
749 eflags & DF_MASK ? 'D' : '-',
750 eflags & CC_O ? 'O' : '-',
751 eflags & CC_S ? 'S' : '-',
752 eflags & CC_Z ? 'Z' : '-',
753 eflags & CC_A ? 'A' : '-',
754 eflags & CC_P ? 'P' : '-',
755 eflags & CC_C ? 'C' : '-',
756 env->hflags & HF_CPL_MASK,
757 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
758 (int)(env->a20_mask >> 20) & 1,
759 (env->hflags >> HF_SMM_SHIFT) & 1,
760 env->halted);
761 } else
762 #endif
764 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
765 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
766 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
767 (uint32_t)env->regs[R_EAX],
768 (uint32_t)env->regs[R_EBX],
769 (uint32_t)env->regs[R_ECX],
770 (uint32_t)env->regs[R_EDX],
771 (uint32_t)env->regs[R_ESI],
772 (uint32_t)env->regs[R_EDI],
773 (uint32_t)env->regs[R_EBP],
774 (uint32_t)env->regs[R_ESP],
775 (uint32_t)env->eip, eflags,
776 eflags & DF_MASK ? 'D' : '-',
777 eflags & CC_O ? 'O' : '-',
778 eflags & CC_S ? 'S' : '-',
779 eflags & CC_Z ? 'Z' : '-',
780 eflags & CC_A ? 'A' : '-',
781 eflags & CC_P ? 'P' : '-',
782 eflags & CC_C ? 'C' : '-',
783 env->hflags & HF_CPL_MASK,
784 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
785 (int)(env->a20_mask >> 20) & 1,
786 (env->hflags >> HF_SMM_SHIFT) & 1,
787 env->halted);
790 for(i = 0; i < 6; i++) {
791 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
792 &env->segs[i]);
794 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
795 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
797 #ifdef TARGET_X86_64
798 if (env->hflags & HF_LMA_MASK) {
799 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
800 env->gdt.base, env->gdt.limit);
801 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
802 env->idt.base, env->idt.limit);
803 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
804 (uint32_t)env->cr[0],
805 env->cr[2],
806 env->cr[3],
807 (uint32_t)env->cr[4]);
808 for(i = 0; i < 4; i++)
809 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
810 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
811 env->dr[6], env->dr[7]);
812 } else
813 #endif
815 cpu_fprintf(f, "GDT= %08x %08x\n",
816 (uint32_t)env->gdt.base, env->gdt.limit);
817 cpu_fprintf(f, "IDT= %08x %08x\n",
818 (uint32_t)env->idt.base, env->idt.limit);
819 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
820 (uint32_t)env->cr[0],
821 (uint32_t)env->cr[2],
822 (uint32_t)env->cr[3],
823 (uint32_t)env->cr[4]);
824 for(i = 0; i < 4; i++)
825 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
826 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
828 if (flags & X86_DUMP_CCOP) {
829 if ((unsigned)env->cc_op < CC_OP_NB)
830 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
831 else
832 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
833 #ifdef TARGET_X86_64
834 if (env->hflags & HF_CS64_MASK) {
835 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
836 env->cc_src, env->cc_dst,
837 cc_op_name);
838 } else
839 #endif
841 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
842 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
843 cc_op_name);
846 if (flags & X86_DUMP_FPU) {
847 int fptag;
848 fptag = 0;
849 for(i = 0; i < 8; i++) {
850 fptag |= ((!env->fptags[i]) << i);
852 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
853 env->fpuc,
854 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
855 env->fpstt,
856 fptag,
857 env->mxcsr);
858 for(i=0;i<8;i++) {
859 #if defined(USE_X86LDOUBLE)
860 union {
861 long double d;
862 struct {
863 uint64_t lower;
864 uint16_t upper;
865 } l;
866 } tmp;
867 tmp.d = env->fpregs[i].d;
868 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
869 i, tmp.l.lower, tmp.l.upper);
870 #else
871 cpu_fprintf(f, "FPR%d=%016" PRIx64,
872 i, env->fpregs[i].mmx.q);
873 #endif
874 if ((i & 1) == 1)
875 cpu_fprintf(f, "\n");
876 else
877 cpu_fprintf(f, " ");
879 if (env->hflags & HF_CS64_MASK)
880 nb = 16;
881 else
882 nb = 8;
883 for(i=0;i<nb;i++) {
884 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
886 env->xmm_regs[i].XMM_L(3),
887 env->xmm_regs[i].XMM_L(2),
888 env->xmm_regs[i].XMM_L(1),
889 env->xmm_regs[i].XMM_L(0));
890 if ((i & 1) == 1)
891 cpu_fprintf(f, "\n");
892 else
893 cpu_fprintf(f, " ");
898 /***********************************************************/
899 /* x86 mmu */
900 /* XXX: add PGE support */
902 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
904 a20_state = (a20_state != 0);
905 if (a20_state != ((env->a20_mask >> 20) & 1)) {
906 #if defined(DEBUG_MMU)
907 printf("A20 update: a20=%d\n", a20_state);
908 #endif
909 /* if the cpu is currently executing code, we must unlink it and
910 all the potentially executing TB */
911 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
913 /* when a20 is changed, all the MMU mappings are invalid, so
914 we must flush everything */
915 tlb_flush(env, 1);
916 env->a20_mask = (~0x100000) | (a20_state << 20);
920 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
922 int pe_state;
924 #if defined(DEBUG_MMU)
925 printf("CR0 update: CR0=0x%08x\n", new_cr0);
926 #endif
927 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
928 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
929 tlb_flush(env, 1);
932 #ifdef TARGET_X86_64
933 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
934 (env->efer & MSR_EFER_LME)) {
935 /* enter in long mode */
936 /* XXX: generate an exception */
937 if (!(env->cr[4] & CR4_PAE_MASK))
938 return;
939 env->efer |= MSR_EFER_LMA;
940 env->hflags |= HF_LMA_MASK;
941 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
942 (env->efer & MSR_EFER_LMA)) {
943 /* exit long mode */
944 env->efer &= ~MSR_EFER_LMA;
945 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
946 env->eip &= 0xffffffff;
948 #endif
949 env->cr[0] = new_cr0 | CR0_ET_MASK;
951 /* update PE flag in hidden flags */
952 pe_state = (env->cr[0] & CR0_PE_MASK);
953 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
954 /* ensure that ADDSEG is always set in real mode */
955 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
956 /* update FPU flags */
957 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
958 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
961 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
962 the PDPT */
963 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
965 env->cr[3] = new_cr3;
966 if (env->cr[0] & CR0_PG_MASK) {
967 #if defined(DEBUG_MMU)
968 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
969 #endif
970 tlb_flush(env, 0);
974 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
976 #if defined(DEBUG_MMU)
977 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
978 #endif
979 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
980 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
981 tlb_flush(env, 1);
983 /* SSE handling */
984 if (!(env->cpuid_features & CPUID_SSE))
985 new_cr4 &= ~CR4_OSFXSR_MASK;
986 if (new_cr4 & CR4_OSFXSR_MASK)
987 env->hflags |= HF_OSFXSR_MASK;
988 else
989 env->hflags &= ~HF_OSFXSR_MASK;
991 env->cr[4] = new_cr4;
994 #if defined(CONFIG_USER_ONLY)
996 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
997 int is_write, int mmu_idx, int is_softmmu)
999 /* user mode only emulation */
1000 is_write &= 1;
1001 env->cr[2] = addr;
1002 env->error_code = (is_write << PG_ERROR_W_BIT);
1003 env->error_code |= PG_ERROR_U_MASK;
1004 env->exception_index = EXCP0E_PAGE;
1005 return 1;
1008 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1010 return addr;
1013 #else
1015 /* XXX: This value should match the one returned by CPUID
1016 * and in exec.c */
1017 # if defined(TARGET_X86_64)
1018 # define PHYS_ADDR_MASK 0xfffffff000LL
1019 # else
1020 # define PHYS_ADDR_MASK 0xffffff000LL
1021 # endif
1023 /* return value:
1024 -1 = cannot handle fault
1025 0 = nothing more to do
1026 1 = generate PF fault
1027 2 = soft MMU activation required for this block
1029 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1030 int is_write1, int mmu_idx, int is_softmmu)
1032 uint64_t ptep, pte;
1033 target_ulong pde_addr, pte_addr;
1034 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1035 target_phys_addr_t paddr;
1036 uint32_t page_offset;
1037 target_ulong vaddr, virt_addr;
1039 is_user = mmu_idx == MMU_USER_IDX;
1040 #if defined(DEBUG_MMU)
1041 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1042 addr, is_write1, is_user, env->eip);
1043 #endif
1044 is_write = is_write1 & 1;
1046 if (!(env->cr[0] & CR0_PG_MASK)) {
1047 pte = addr;
1048 virt_addr = addr & TARGET_PAGE_MASK;
1049 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1050 page_size = 4096;
1051 goto do_mapping;
1054 if (env->cr[4] & CR4_PAE_MASK) {
1055 uint64_t pde, pdpe;
1056 target_ulong pdpe_addr;
1058 #ifdef TARGET_X86_64
1059 if (env->hflags & HF_LMA_MASK) {
1060 uint64_t pml4e_addr, pml4e;
1061 int32_t sext;
1063 /* test virtual address sign extension */
1064 sext = (int64_t)addr >> 47;
1065 if (sext != 0 && sext != -1) {
1066 env->error_code = 0;
1067 env->exception_index = EXCP0D_GPF;
1068 return 1;
1071 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1072 env->a20_mask;
1073 pml4e = ldq_phys(pml4e_addr);
1074 if (!(pml4e & PG_PRESENT_MASK)) {
1075 error_code = 0;
1076 goto do_fault;
1078 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1079 error_code = PG_ERROR_RSVD_MASK;
1080 goto do_fault;
1082 if (!(pml4e & PG_ACCESSED_MASK)) {
1083 pml4e |= PG_ACCESSED_MASK;
1084 stl_phys_notdirty(pml4e_addr, pml4e);
1086 ptep = pml4e ^ PG_NX_MASK;
1087 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1088 env->a20_mask;
1089 pdpe = ldq_phys(pdpe_addr);
1090 if (!(pdpe & PG_PRESENT_MASK)) {
1091 error_code = 0;
1092 goto do_fault;
1094 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1095 error_code = PG_ERROR_RSVD_MASK;
1096 goto do_fault;
1098 ptep &= pdpe ^ PG_NX_MASK;
1099 if (!(pdpe & PG_ACCESSED_MASK)) {
1100 pdpe |= PG_ACCESSED_MASK;
1101 stl_phys_notdirty(pdpe_addr, pdpe);
1103 } else
1104 #endif
1106 /* XXX: load them when cr3 is loaded ? */
1107 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1108 env->a20_mask;
1109 pdpe = ldq_phys(pdpe_addr);
1110 if (!(pdpe & PG_PRESENT_MASK)) {
1111 error_code = 0;
1112 goto do_fault;
1114 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1117 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1118 env->a20_mask;
1119 pde = ldq_phys(pde_addr);
1120 if (!(pde & PG_PRESENT_MASK)) {
1121 error_code = 0;
1122 goto do_fault;
1124 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1125 error_code = PG_ERROR_RSVD_MASK;
1126 goto do_fault;
1128 ptep &= pde ^ PG_NX_MASK;
1129 if (pde & PG_PSE_MASK) {
1130 /* 2 MB page */
1131 page_size = 2048 * 1024;
1132 ptep ^= PG_NX_MASK;
1133 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1134 goto do_fault_protect;
1135 if (is_user) {
1136 if (!(ptep & PG_USER_MASK))
1137 goto do_fault_protect;
1138 if (is_write && !(ptep & PG_RW_MASK))
1139 goto do_fault_protect;
1140 } else {
1141 if ((env->cr[0] & CR0_WP_MASK) &&
1142 is_write && !(ptep & PG_RW_MASK))
1143 goto do_fault_protect;
1145 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1146 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1147 pde |= PG_ACCESSED_MASK;
1148 if (is_dirty)
1149 pde |= PG_DIRTY_MASK;
1150 stl_phys_notdirty(pde_addr, pde);
1152 /* align to page_size */
1153 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1154 virt_addr = addr & ~(page_size - 1);
1155 } else {
1156 /* 4 KB page */
1157 if (!(pde & PG_ACCESSED_MASK)) {
1158 pde |= PG_ACCESSED_MASK;
1159 stl_phys_notdirty(pde_addr, pde);
1161 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1162 env->a20_mask;
1163 pte = ldq_phys(pte_addr);
1164 if (!(pte & PG_PRESENT_MASK)) {
1165 error_code = 0;
1166 goto do_fault;
1168 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1169 error_code = PG_ERROR_RSVD_MASK;
1170 goto do_fault;
1172 /* combine pde and pte nx, user and rw protections */
1173 ptep &= pte ^ PG_NX_MASK;
1174 ptep ^= PG_NX_MASK;
1175 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1176 goto do_fault_protect;
1177 if (is_user) {
1178 if (!(ptep & PG_USER_MASK))
1179 goto do_fault_protect;
1180 if (is_write && !(ptep & PG_RW_MASK))
1181 goto do_fault_protect;
1182 } else {
1183 if ((env->cr[0] & CR0_WP_MASK) &&
1184 is_write && !(ptep & PG_RW_MASK))
1185 goto do_fault_protect;
1187 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1188 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1189 pte |= PG_ACCESSED_MASK;
1190 if (is_dirty)
1191 pte |= PG_DIRTY_MASK;
1192 stl_phys_notdirty(pte_addr, pte);
1194 page_size = 4096;
1195 virt_addr = addr & ~0xfff;
1196 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1198 } else {
1199 uint32_t pde;
1201 /* page directory entry */
1202 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1203 env->a20_mask;
1204 pde = ldl_phys(pde_addr);
1205 if (!(pde & PG_PRESENT_MASK)) {
1206 error_code = 0;
1207 goto do_fault;
1209 /* if PSE bit is set, then we use a 4MB page */
1210 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1211 page_size = 4096 * 1024;
1212 if (is_user) {
1213 if (!(pde & PG_USER_MASK))
1214 goto do_fault_protect;
1215 if (is_write && !(pde & PG_RW_MASK))
1216 goto do_fault_protect;
1217 } else {
1218 if ((env->cr[0] & CR0_WP_MASK) &&
1219 is_write && !(pde & PG_RW_MASK))
1220 goto do_fault_protect;
1222 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1223 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1224 pde |= PG_ACCESSED_MASK;
1225 if (is_dirty)
1226 pde |= PG_DIRTY_MASK;
1227 stl_phys_notdirty(pde_addr, pde);
1230 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1231 ptep = pte;
1232 virt_addr = addr & ~(page_size - 1);
1233 } else {
1234 if (!(pde & PG_ACCESSED_MASK)) {
1235 pde |= PG_ACCESSED_MASK;
1236 stl_phys_notdirty(pde_addr, pde);
1239 /* page directory entry */
1240 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1241 env->a20_mask;
1242 pte = ldl_phys(pte_addr);
1243 if (!(pte & PG_PRESENT_MASK)) {
1244 error_code = 0;
1245 goto do_fault;
1247 /* combine pde and pte user and rw protections */
1248 ptep = pte & pde;
1249 if (is_user) {
1250 if (!(ptep & PG_USER_MASK))
1251 goto do_fault_protect;
1252 if (is_write && !(ptep & PG_RW_MASK))
1253 goto do_fault_protect;
1254 } else {
1255 if ((env->cr[0] & CR0_WP_MASK) &&
1256 is_write && !(ptep & PG_RW_MASK))
1257 goto do_fault_protect;
1259 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1260 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1261 pte |= PG_ACCESSED_MASK;
1262 if (is_dirty)
1263 pte |= PG_DIRTY_MASK;
1264 stl_phys_notdirty(pte_addr, pte);
1266 page_size = 4096;
1267 virt_addr = addr & ~0xfff;
1270 /* the page can be put in the TLB */
1271 prot = PAGE_READ;
1272 if (!(ptep & PG_NX_MASK))
1273 prot |= PAGE_EXEC;
1274 if (pte & PG_DIRTY_MASK) {
1275 /* only set write access if already dirty... otherwise wait
1276 for dirty access */
1277 if (is_user) {
1278 if (ptep & PG_RW_MASK)
1279 prot |= PAGE_WRITE;
1280 } else {
1281 if (!(env->cr[0] & CR0_WP_MASK) ||
1282 (ptep & PG_RW_MASK))
1283 prot |= PAGE_WRITE;
1286 do_mapping:
1287 pte = pte & env->a20_mask;
1289 /* Even if 4MB pages, we map only one 4KB page in the cache to
1290 avoid filling it too fast */
1291 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1292 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1293 vaddr = virt_addr + page_offset;
1295 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1296 return ret;
1297 do_fault_protect:
1298 error_code = PG_ERROR_P_MASK;
1299 do_fault:
1300 error_code |= (is_write << PG_ERROR_W_BIT);
1301 if (is_user)
1302 error_code |= PG_ERROR_U_MASK;
1303 if (is_write1 == 2 &&
1304 (env->efer & MSR_EFER_NXE) &&
1305 (env->cr[4] & CR4_PAE_MASK))
1306 error_code |= PG_ERROR_I_D_MASK;
1307 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1308 /* cr2 is not modified in case of exceptions */
1309 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1310 addr);
1311 } else {
1312 env->cr[2] = addr;
1314 env->error_code = error_code;
1315 env->exception_index = EXCP0E_PAGE;
1316 return 1;
1319 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1321 target_ulong pde_addr, pte_addr;
1322 uint64_t pte;
1323 target_phys_addr_t paddr;
1324 uint32_t page_offset;
1325 int page_size;
1327 if (env->cr[4] & CR4_PAE_MASK) {
1328 target_ulong pdpe_addr;
1329 uint64_t pde, pdpe;
1331 #ifdef TARGET_X86_64
1332 if (env->hflags & HF_LMA_MASK) {
1333 uint64_t pml4e_addr, pml4e;
1334 int32_t sext;
1336 /* test virtual address sign extension */
1337 sext = (int64_t)addr >> 47;
1338 if (sext != 0 && sext != -1)
1339 return -1;
1341 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1342 env->a20_mask;
1343 pml4e = ldq_phys(pml4e_addr);
1344 if (!(pml4e & PG_PRESENT_MASK))
1345 return -1;
1347 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1348 env->a20_mask;
1349 pdpe = ldq_phys(pdpe_addr);
1350 if (!(pdpe & PG_PRESENT_MASK))
1351 return -1;
1352 } else
1353 #endif
1355 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1356 env->a20_mask;
1357 pdpe = ldq_phys(pdpe_addr);
1358 if (!(pdpe & PG_PRESENT_MASK))
1359 return -1;
1362 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1363 env->a20_mask;
1364 pde = ldq_phys(pde_addr);
1365 if (!(pde & PG_PRESENT_MASK)) {
1366 return -1;
1368 if (pde & PG_PSE_MASK) {
1369 /* 2 MB page */
1370 page_size = 2048 * 1024;
1371 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1372 } else {
1373 /* 4 KB page */
1374 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1375 env->a20_mask;
1376 page_size = 4096;
1377 pte = ldq_phys(pte_addr);
1379 if (!(pte & PG_PRESENT_MASK))
1380 return -1;
1381 } else {
1382 uint32_t pde;
1384 if (!(env->cr[0] & CR0_PG_MASK)) {
1385 pte = addr;
1386 page_size = 4096;
1387 } else {
1388 /* page directory entry */
1389 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1390 pde = ldl_phys(pde_addr);
1391 if (!(pde & PG_PRESENT_MASK))
1392 return -1;
1393 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1394 pte = pde & ~0x003ff000; /* align to 4MB */
1395 page_size = 4096 * 1024;
1396 } else {
1397 /* page directory entry */
1398 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1399 pte = ldl_phys(pte_addr);
1400 if (!(pte & PG_PRESENT_MASK))
1401 return -1;
1402 page_size = 4096;
1405 pte = pte & env->a20_mask;
1408 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1409 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1410 return paddr;
1413 void hw_breakpoint_insert(CPUState *env, int index)
1415 int type, err = 0;
1417 switch (hw_breakpoint_type(env->dr[7], index)) {
1418 case 0:
1419 if (hw_breakpoint_enabled(env->dr[7], index))
1420 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1421 &env->cpu_breakpoint[index]);
1422 break;
1423 case 1:
1424 type = BP_CPU | BP_MEM_WRITE;
1425 goto insert_wp;
1426 case 2:
1427 /* No support for I/O watchpoints yet */
1428 break;
1429 case 3:
1430 type = BP_CPU | BP_MEM_ACCESS;
1431 insert_wp:
1432 err = cpu_watchpoint_insert(env, env->dr[index],
1433 hw_breakpoint_len(env->dr[7], index),
1434 type, &env->cpu_watchpoint[index]);
1435 break;
1437 if (err)
1438 env->cpu_breakpoint[index] = NULL;
1441 void hw_breakpoint_remove(CPUState *env, int index)
1443 if (!env->cpu_breakpoint[index])
1444 return;
1445 switch (hw_breakpoint_type(env->dr[7], index)) {
1446 case 0:
1447 if (hw_breakpoint_enabled(env->dr[7], index))
1448 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1449 break;
1450 case 1:
1451 case 3:
1452 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1453 break;
1454 case 2:
1455 /* No support for I/O watchpoints yet */
1456 break;
1460 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1462 target_ulong dr6;
1463 int reg, type;
1464 int hit_enabled = 0;
1466 dr6 = env->dr[6] & ~0xf;
1467 for (reg = 0; reg < 4; reg++) {
1468 type = hw_breakpoint_type(env->dr[7], reg);
1469 if ((type == 0 && env->dr[reg] == env->eip) ||
1470 ((type & 1) && env->cpu_watchpoint[reg] &&
1471 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1472 dr6 |= 1 << reg;
1473 if (hw_breakpoint_enabled(env->dr[7], reg))
1474 hit_enabled = 1;
1477 if (hit_enabled || force_dr6_update)
1478 env->dr[6] = dr6;
1479 return hit_enabled;
1482 static CPUDebugExcpHandler *prev_debug_excp_handler;
1484 void raise_exception(int exception_index);
1486 static void breakpoint_handler(CPUState *env)
1488 CPUBreakpoint *bp;
1490 if (env->watchpoint_hit) {
1491 if (env->watchpoint_hit->flags & BP_CPU) {
1492 env->watchpoint_hit = NULL;
1493 if (check_hw_breakpoints(env, 0))
1494 raise_exception(EXCP01_DB);
1495 else
1496 cpu_resume_from_signal(env, NULL);
1498 } else {
1499 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1500 if (bp->pc == env->eip) {
1501 if (bp->flags & BP_CPU) {
1502 check_hw_breakpoints(env, 1);
1503 raise_exception(EXCP01_DB);
1505 break;
1508 if (prev_debug_excp_handler)
1509 prev_debug_excp_handler(env);
1512 /* This should come from sysemu.h - if we could include it here... */
1513 void qemu_system_reset_request(void);
1515 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1516 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1518 uint64_t mcg_cap = cenv->mcg_cap;
1519 unsigned bank_num = mcg_cap & 0xff;
1520 uint64_t *banks = cenv->mce_banks;
1522 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1523 return;
1526 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1527 * reporting is disabled
1529 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1530 cenv->mcg_ctl != ~(uint64_t)0)
1531 return;
1532 banks += 4 * bank;
1534 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1535 * reporting is disabled for the bank
1537 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1538 return;
1539 if (status & MCI_STATUS_UC) {
1540 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1541 !(cenv->cr[4] & CR4_MCE_MASK)) {
1542 fprintf(stderr, "injects mce exception while previous "
1543 "one is in progress!\n");
1544 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1545 qemu_system_reset_request();
1546 return;
1548 if (banks[1] & MCI_STATUS_VAL)
1549 status |= MCI_STATUS_OVER;
1550 banks[2] = addr;
1551 banks[3] = misc;
1552 cenv->mcg_status = mcg_status;
1553 banks[1] = status;
1554 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1555 } else if (!(banks[1] & MCI_STATUS_VAL)
1556 || !(banks[1] & MCI_STATUS_UC)) {
1557 if (banks[1] & MCI_STATUS_VAL)
1558 status |= MCI_STATUS_OVER;
1559 banks[2] = addr;
1560 banks[3] = misc;
1561 banks[1] = status;
1562 } else
1563 banks[1] |= MCI_STATUS_OVER;
1565 #endif /* !CONFIG_USER_ONLY */
1567 static void mce_init(CPUX86State *cenv)
1569 unsigned int bank, bank_num;
1571 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1572 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1573 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1574 cenv->mcg_ctl = ~(uint64_t)0;
1575 bank_num = cenv->mcg_cap & 0xff;
1576 cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1577 for (bank = 0; bank < bank_num; bank++)
1578 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1582 static void host_cpuid(uint32_t function, uint32_t count,
1583 uint32_t *eax, uint32_t *ebx,
1584 uint32_t *ecx, uint32_t *edx)
1586 #if defined(CONFIG_KVM)
1587 uint32_t vec[4];
1589 #ifdef __x86_64__
1590 asm volatile("cpuid"
1591 : "=a"(vec[0]), "=b"(vec[1]),
1592 "=c"(vec[2]), "=d"(vec[3])
1593 : "0"(function), "c"(count) : "cc");
1594 #else
1595 asm volatile("pusha \n\t"
1596 "cpuid \n\t"
1597 "mov %%eax, 0(%2) \n\t"
1598 "mov %%ebx, 4(%2) \n\t"
1599 "mov %%ecx, 8(%2) \n\t"
1600 "mov %%edx, 12(%2) \n\t"
1601 "popa"
1602 : : "a"(function), "c"(count), "S"(vec)
1603 : "memory", "cc");
1604 #endif
1606 if (eax)
1607 *eax = vec[0];
1608 if (ebx)
1609 *ebx = vec[1];
1610 if (ecx)
1611 *ecx = vec[2];
1612 if (edx)
1613 *edx = vec[3];
1614 #endif
1617 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1618 uint32_t *eax, uint32_t *ebx,
1619 uint32_t *ecx, uint32_t *edx)
1621 /* test if maximum index reached */
1622 if (index & 0x80000000) {
1623 if (index > env->cpuid_xlevel)
1624 index = env->cpuid_level;
1625 } else {
1626 if (index > env->cpuid_level)
1627 index = env->cpuid_level;
1630 switch(index) {
1631 case 0:
1632 *eax = env->cpuid_level;
1633 *ebx = env->cpuid_vendor1;
1634 *edx = env->cpuid_vendor2;
1635 *ecx = env->cpuid_vendor3;
1637 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1638 * isn't supported in compatibility mode on Intel. so advertise the
1639 * actuall cpu, and say goodbye to migration between different vendors
1640 * is you use compatibility mode. */
1641 if (kvm_enabled() && !env->cpuid_vendor_override)
1642 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1643 break;
1644 case 1:
1645 *eax = env->cpuid_version;
1646 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1647 *ecx = env->cpuid_ext_features;
1648 *edx = env->cpuid_features;
1649 if (env->nr_cores * env->nr_threads > 1) {
1650 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1651 *edx |= 1 << 28; /* HTT bit */
1653 break;
1654 case 2:
1655 /* cache info: needed for Pentium Pro compatibility */
1656 *eax = 1;
1657 *ebx = 0;
1658 *ecx = 0;
1659 *edx = 0x2c307d;
1660 break;
1661 case 4:
1662 /* cache info: needed for Core compatibility */
1663 if (env->nr_cores > 1) {
1664 *eax = (env->nr_cores - 1) << 26;
1665 } else {
1666 *eax = 0;
1668 switch (count) {
1669 case 0: /* L1 dcache info */
1670 *eax |= 0x0000121;
1671 *ebx = 0x1c0003f;
1672 *ecx = 0x000003f;
1673 *edx = 0x0000001;
1674 break;
1675 case 1: /* L1 icache info */
1676 *eax |= 0x0000122;
1677 *ebx = 0x1c0003f;
1678 *ecx = 0x000003f;
1679 *edx = 0x0000001;
1680 break;
1681 case 2: /* L2 cache info */
1682 *eax |= 0x0000143;
1683 if (env->nr_threads > 1) {
1684 *eax |= (env->nr_threads - 1) << 14;
1686 *ebx = 0x3c0003f;
1687 *ecx = 0x0000fff;
1688 *edx = 0x0000001;
1689 break;
1690 default: /* end of info */
1691 *eax = 0;
1692 *ebx = 0;
1693 *ecx = 0;
1694 *edx = 0;
1695 break;
1697 break;
1698 case 5:
1699 /* mwait info: needed for Core compatibility */
1700 *eax = 0; /* Smallest monitor-line size in bytes */
1701 *ebx = 0; /* Largest monitor-line size in bytes */
1702 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1703 *edx = 0;
1704 break;
1705 case 6:
1706 /* Thermal and Power Leaf */
1707 *eax = 0;
1708 *ebx = 0;
1709 *ecx = 0;
1710 *edx = 0;
1711 break;
1712 case 9:
1713 /* Direct Cache Access Information Leaf */
1714 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1715 *ebx = 0;
1716 *ecx = 0;
1717 *edx = 0;
1718 break;
1719 case 0xA:
1720 /* Architectural Performance Monitoring Leaf */
1721 *eax = 0;
1722 *ebx = 0;
1723 *ecx = 0;
1724 *edx = 0;
1725 break;
1726 case 0x80000000:
1727 *eax = env->cpuid_xlevel;
1728 *ebx = env->cpuid_vendor1;
1729 *edx = env->cpuid_vendor2;
1730 *ecx = env->cpuid_vendor3;
1731 break;
1732 case 0x80000001:
1733 *eax = env->cpuid_version;
1734 *ebx = 0;
1735 *ecx = env->cpuid_ext3_features;
1736 *edx = env->cpuid_ext2_features;
1738 if (env->nr_cores * env->nr_threads > 1 &&
1739 env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
1740 env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
1741 env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
1742 *ecx |= 1 << 1; /* CmpLegacy bit */
1745 if (kvm_enabled()) {
1746 /* Nested SVM not yet supported in KVM */
1747 *ecx &= ~CPUID_EXT3_SVM;
1748 } else {
1749 /* AMD 3DNow! is not supported in QEMU */
1750 *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1752 break;
1753 case 0x80000002:
1754 case 0x80000003:
1755 case 0x80000004:
1756 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1757 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1758 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1759 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1760 break;
1761 case 0x80000005:
1762 /* cache info (L1 cache) */
1763 *eax = 0x01ff01ff;
1764 *ebx = 0x01ff01ff;
1765 *ecx = 0x40020140;
1766 *edx = 0x40020140;
1767 break;
1768 case 0x80000006:
1769 /* cache info (L2 cache) */
1770 *eax = 0;
1771 *ebx = 0x42004200;
1772 *ecx = 0x02008140;
1773 *edx = 0;
1774 break;
1775 case 0x80000008:
1776 /* virtual & phys address size in low 2 bytes. */
1777 /* XXX: This value must match the one used in the MMU code. */
1778 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1779 /* 64 bit processor */
1780 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1781 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1782 } else {
1783 if (env->cpuid_features & CPUID_PSE36)
1784 *eax = 0x00000024; /* 36 bits physical */
1785 else
1786 *eax = 0x00000020; /* 32 bits physical */
1788 *ebx = 0;
1789 *ecx = 0;
1790 *edx = 0;
1791 if (env->nr_cores * env->nr_threads > 1) {
1792 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1794 break;
1795 case 0x8000000A:
1796 *eax = 0x00000001; /* SVM Revision */
1797 *ebx = 0x00000010; /* nr of ASIDs */
1798 *ecx = 0;
1799 *edx = 0; /* optional features */
1800 break;
1801 default:
1802 /* reserved values: zero */
1803 *eax = 0;
1804 *ebx = 0;
1805 *ecx = 0;
1806 *edx = 0;
1807 break;
1812 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1813 target_ulong *base, unsigned int *limit,
1814 unsigned int *flags)
1816 SegmentCache *dt;
1817 target_ulong ptr;
1818 uint32_t e1, e2;
1819 int index;
1821 if (selector & 0x4)
1822 dt = &env->ldt;
1823 else
1824 dt = &env->gdt;
1825 index = selector & ~7;
1826 ptr = dt->base + index;
1827 if ((index + 7) > dt->limit
1828 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1829 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1830 return 0;
1832 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1833 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1834 if (e2 & DESC_G_MASK)
1835 *limit = (*limit << 12) | 0xfff;
1836 *flags = e2;
1838 return 1;
1841 CPUX86State *cpu_x86_init(const char *cpu_model)
1843 CPUX86State *env;
1844 static int inited;
1846 env = qemu_mallocz(sizeof(CPUX86State));
1847 cpu_exec_init(env);
1848 env->cpu_model_str = cpu_model;
1850 /* init various static tables */
1851 if (!inited) {
1852 inited = 1;
1853 optimize_flags_init();
1854 #ifndef CONFIG_USER_ONLY
1855 prev_debug_excp_handler =
1856 cpu_set_debug_excp_handler(breakpoint_handler);
1857 #endif
1859 if (cpu_x86_register(env, cpu_model) < 0) {
1860 cpu_x86_close(env);
1861 return NULL;
1863 mce_init(env);
1864 cpu_reset(env);
1866 qemu_init_vcpu(env);
1868 return env;
1871 #if !defined(CONFIG_USER_ONLY)
1872 void do_cpu_init(CPUState *env)
1874 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1875 cpu_reset(env);
1876 env->interrupt_request = sipi;
1877 apic_init_reset(env);
1880 void do_cpu_sipi(CPUState *env)
1882 apic_sipi(env);
1884 #else
1885 void do_cpu_init(CPUState *env)
1888 void do_cpu_sipi(CPUState *env)
1891 #endif