Update SeaBIOS to include PCI based option rom loading
[qemu/aliguori-queue.git] / target-i386 / helper.c
blob730e396a696fd371cd0f611c90ca82d4b0fe81cd
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
31 //#define DEBUG_MMU
33 /* feature flags taken from "Intel Processor Identification and the CPUID
34 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35 * about feature names, the Linux name is used. */
36 static const char *feature_name[] = {
37 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
42 static const char *ext_feature_name[] = {
43 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
48 static const char *ext2_feature_name[] = {
49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
54 static const char *ext3_feature_name[] = {
55 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
61 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
62 uint32_t *ext_features,
63 uint32_t *ext2_features,
64 uint32_t *ext3_features)
66 int i;
67 int found = 0;
69 for ( i = 0 ; i < 32 ; i++ )
70 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71 *features |= 1 << i;
72 found = 1;
74 for ( i = 0 ; i < 32 ; i++ )
75 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76 *ext_features |= 1 << i;
77 found = 1;
79 for ( i = 0 ; i < 32 ; i++ )
80 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81 *ext2_features |= 1 << i;
82 found = 1;
84 for ( i = 0 ; i < 32 ; i++ )
85 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86 *ext3_features |= 1 << i;
87 found = 1;
89 if (!found) {
90 fprintf(stderr, "CPU feature %s not found\n", flagname);
94 typedef struct x86_def_t {
95 const char *name;
96 uint32_t level;
97 uint32_t vendor1, vendor2, vendor3;
98 int family;
99 int model;
100 int stepping;
101 uint32_t features, ext_features, ext2_features, ext3_features;
102 uint32_t xlevel;
103 char model_id[48];
104 int vendor_override;
105 } x86_def_t;
107 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
108 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
109 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
110 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
111 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
112 CPUID_PSE36 | CPUID_FXSR)
113 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
114 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
115 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
116 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
117 CPUID_PAE | CPUID_SEP | CPUID_APIC)
118 static x86_def_t x86_defs[] = {
119 #ifdef TARGET_X86_64
121 .name = "qemu64",
122 .level = 4,
123 .vendor1 = CPUID_VENDOR_AMD_1,
124 .vendor2 = CPUID_VENDOR_AMD_2,
125 .vendor3 = CPUID_VENDOR_AMD_3,
126 .family = 6,
127 .model = 2,
128 .stepping = 3,
129 .features = PPRO_FEATURES |
130 /* these features are needed for Win64 and aren't fully implemented */
131 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
132 /* this feature is needed for Solaris and isn't fully implemented */
133 CPUID_PSE36,
134 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
135 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
136 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
137 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
138 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
139 .xlevel = 0x8000000A,
140 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
143 .name = "phenom",
144 .level = 5,
145 .vendor1 = CPUID_VENDOR_AMD_1,
146 .vendor2 = CPUID_VENDOR_AMD_2,
147 .vendor3 = CPUID_VENDOR_AMD_3,
148 .family = 16,
149 .model = 2,
150 .stepping = 3,
151 /* Missing: CPUID_VME, CPUID_HT */
152 .features = PPRO_FEATURES |
153 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
154 CPUID_PSE36,
155 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
156 CPUID_EXT_POPCNT,
157 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
158 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
159 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
160 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
161 CPUID_EXT2_FFXSR,
162 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
163 CPUID_EXT3_CR8LEG,
164 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
165 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
166 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
167 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
168 .xlevel = 0x8000001A,
169 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
172 .name = "core2duo",
173 .level = 10,
174 .family = 6,
175 .model = 15,
176 .stepping = 11,
177 /* The original CPU also implements these features:
178 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
179 CPUID_TM, CPUID_PBE */
180 .features = PPRO_FEATURES |
181 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
182 CPUID_PSE36,
183 /* The original CPU also implements these ext features:
184 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
185 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
186 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
187 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
188 .ext3_features = CPUID_EXT3_LAHF_LM,
189 .xlevel = 0x80000008,
190 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
193 .name = "kvm64",
194 .level = 5,
195 .vendor1 = CPUID_VENDOR_INTEL_1,
196 .vendor2 = CPUID_VENDOR_INTEL_2,
197 .vendor3 = CPUID_VENDOR_INTEL_3,
198 .family = 15,
199 .model = 6,
200 .stepping = 1,
201 /* Missing: CPUID_VME, CPUID_HT */
202 .features = PPRO_FEATURES |
203 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
204 CPUID_PSE36,
205 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
206 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
207 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
208 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
209 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
210 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
211 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
212 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
213 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
214 .ext3_features = 0,
215 .xlevel = 0x80000008,
216 .model_id = "Common KVM processor"
218 #endif
220 .name = "qemu32",
221 .level = 4,
222 .family = 6,
223 .model = 3,
224 .stepping = 3,
225 .features = PPRO_FEATURES,
226 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
227 .xlevel = 0,
228 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
231 .name = "coreduo",
232 .level = 10,
233 .family = 6,
234 .model = 14,
235 .stepping = 8,
236 /* The original CPU also implements these features:
237 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
238 CPUID_TM, CPUID_PBE */
239 .features = PPRO_FEATURES | CPUID_VME |
240 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
241 /* The original CPU also implements these ext features:
242 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
243 CPUID_EXT_PDCM */
244 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
245 .ext2_features = CPUID_EXT2_NX,
246 .xlevel = 0x80000008,
247 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
250 .name = "486",
251 .level = 0,
252 .family = 4,
253 .model = 0,
254 .stepping = 0,
255 .features = I486_FEATURES,
256 .xlevel = 0,
259 .name = "pentium",
260 .level = 1,
261 .family = 5,
262 .model = 4,
263 .stepping = 3,
264 .features = PENTIUM_FEATURES,
265 .xlevel = 0,
268 .name = "pentium2",
269 .level = 2,
270 .family = 6,
271 .model = 5,
272 .stepping = 2,
273 .features = PENTIUM2_FEATURES,
274 .xlevel = 0,
277 .name = "pentium3",
278 .level = 2,
279 .family = 6,
280 .model = 7,
281 .stepping = 3,
282 .features = PENTIUM3_FEATURES,
283 .xlevel = 0,
286 .name = "athlon",
287 .level = 2,
288 .vendor1 = CPUID_VENDOR_AMD_1,
289 .vendor2 = CPUID_VENDOR_AMD_2,
290 .vendor3 = CPUID_VENDOR_AMD_3,
291 .family = 6,
292 .model = 2,
293 .stepping = 3,
294 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
295 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
296 .xlevel = 0x80000008,
297 /* XXX: put another string ? */
298 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
301 .name = "n270",
302 /* original is on level 10 */
303 .level = 5,
304 .family = 6,
305 .model = 28,
306 .stepping = 2,
307 .features = PPRO_FEATURES |
308 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
309 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
310 * CPUID_HT | CPUID_TM | CPUID_PBE */
311 /* Some CPUs got no CPUID_SEP */
312 .ext_features = CPUID_EXT_MONITOR |
313 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
314 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
315 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
316 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
317 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
318 .xlevel = 0x8000000A,
319 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
323 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
324 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
326 static int cpu_x86_fill_model_id(char *str)
328 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
329 int i;
331 for (i = 0; i < 3; i++) {
332 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
333 memcpy(str + i * 16 + 0, &eax, 4);
334 memcpy(str + i * 16 + 4, &ebx, 4);
335 memcpy(str + i * 16 + 8, &ecx, 4);
336 memcpy(str + i * 16 + 12, &edx, 4);
338 return 0;
341 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
343 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
345 x86_cpu_def->name = "host";
346 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
347 x86_cpu_def->level = eax;
348 x86_cpu_def->vendor1 = ebx;
349 x86_cpu_def->vendor2 = edx;
350 x86_cpu_def->vendor3 = ecx;
352 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
353 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
354 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
355 x86_cpu_def->stepping = eax & 0x0F;
356 x86_cpu_def->ext_features = ecx;
357 x86_cpu_def->features = edx;
359 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
360 x86_cpu_def->xlevel = eax;
362 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
363 x86_cpu_def->ext2_features = edx;
364 x86_cpu_def->ext3_features = ecx;
365 cpu_x86_fill_model_id(x86_cpu_def->model_id);
366 x86_cpu_def->vendor_override = 0;
368 return 0;
371 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
373 unsigned int i;
374 x86_def_t *def;
376 char *s = strdup(cpu_model);
377 char *featurestr, *name = strtok(s, ",");
378 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
379 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
380 uint32_t numvalue;
382 def = NULL;
383 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
384 if (strcmp(name, x86_defs[i].name) == 0) {
385 def = &x86_defs[i];
386 break;
389 if (kvm_enabled() && strcmp(name, "host") == 0) {
390 cpu_x86_fill_host(x86_cpu_def);
391 } else if (!def) {
392 goto error;
393 } else {
394 memcpy(x86_cpu_def, def, sizeof(*def));
397 add_flagname_to_bitmaps("hypervisor", &plus_features,
398 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
400 featurestr = strtok(NULL, ",");
402 while (featurestr) {
403 char *val;
404 if (featurestr[0] == '+') {
405 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
406 } else if (featurestr[0] == '-') {
407 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
408 } else if ((val = strchr(featurestr, '='))) {
409 *val = 0; val++;
410 if (!strcmp(featurestr, "family")) {
411 char *err;
412 numvalue = strtoul(val, &err, 0);
413 if (!*val || *err) {
414 fprintf(stderr, "bad numerical value %s\n", val);
415 goto error;
417 x86_cpu_def->family = numvalue;
418 } else if (!strcmp(featurestr, "model")) {
419 char *err;
420 numvalue = strtoul(val, &err, 0);
421 if (!*val || *err || numvalue > 0xff) {
422 fprintf(stderr, "bad numerical value %s\n", val);
423 goto error;
425 x86_cpu_def->model = numvalue;
426 } else if (!strcmp(featurestr, "stepping")) {
427 char *err;
428 numvalue = strtoul(val, &err, 0);
429 if (!*val || *err || numvalue > 0xf) {
430 fprintf(stderr, "bad numerical value %s\n", val);
431 goto error;
433 x86_cpu_def->stepping = numvalue ;
434 } else if (!strcmp(featurestr, "level")) {
435 char *err;
436 numvalue = strtoul(val, &err, 0);
437 if (!*val || *err) {
438 fprintf(stderr, "bad numerical value %s\n", val);
439 goto error;
441 x86_cpu_def->level = numvalue;
442 } else if (!strcmp(featurestr, "xlevel")) {
443 char *err;
444 numvalue = strtoul(val, &err, 0);
445 if (!*val || *err) {
446 fprintf(stderr, "bad numerical value %s\n", val);
447 goto error;
449 if (numvalue < 0x80000000) {
450 numvalue += 0x80000000;
452 x86_cpu_def->xlevel = numvalue;
453 } else if (!strcmp(featurestr, "vendor")) {
454 if (strlen(val) != 12) {
455 fprintf(stderr, "vendor string must be 12 chars long\n");
456 goto error;
458 x86_cpu_def->vendor1 = 0;
459 x86_cpu_def->vendor2 = 0;
460 x86_cpu_def->vendor3 = 0;
461 for(i = 0; i < 4; i++) {
462 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
463 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
464 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
466 x86_cpu_def->vendor_override = 1;
467 } else if (!strcmp(featurestr, "model_id")) {
468 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
469 val);
470 } else {
471 fprintf(stderr, "unrecognized feature %s\n", featurestr);
472 goto error;
474 } else {
475 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
476 goto error;
478 featurestr = strtok(NULL, ",");
480 x86_cpu_def->features |= plus_features;
481 x86_cpu_def->ext_features |= plus_ext_features;
482 x86_cpu_def->ext2_features |= plus_ext2_features;
483 x86_cpu_def->ext3_features |= plus_ext3_features;
484 x86_cpu_def->features &= ~minus_features;
485 x86_cpu_def->ext_features &= ~minus_ext_features;
486 x86_cpu_def->ext2_features &= ~minus_ext2_features;
487 x86_cpu_def->ext3_features &= ~minus_ext3_features;
488 free(s);
489 return 0;
491 error:
492 free(s);
493 return -1;
496 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
498 unsigned int i;
500 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
501 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
504 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
506 x86_def_t def1, *def = &def1;
508 if (cpu_x86_find_by_name(def, cpu_model) < 0)
509 return -1;
510 if (def->vendor1) {
511 env->cpuid_vendor1 = def->vendor1;
512 env->cpuid_vendor2 = def->vendor2;
513 env->cpuid_vendor3 = def->vendor3;
514 } else {
515 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
516 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
517 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
519 env->cpuid_vendor_override = def->vendor_override;
520 env->cpuid_level = def->level;
521 if (def->family > 0x0f)
522 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
523 else
524 env->cpuid_version = def->family << 8;
525 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
526 env->cpuid_version |= def->stepping;
527 env->cpuid_features = def->features;
528 env->pat = 0x0007040600070406ULL;
529 env->cpuid_ext_features = def->ext_features;
530 env->cpuid_ext2_features = def->ext2_features;
531 env->cpuid_xlevel = def->xlevel;
532 env->cpuid_ext3_features = def->ext3_features;
534 const char *model_id = def->model_id;
535 int c, len, i;
536 if (!model_id)
537 model_id = "";
538 len = strlen(model_id);
539 for(i = 0; i < 48; i++) {
540 if (i >= len)
541 c = '\0';
542 else
543 c = (uint8_t)model_id[i];
544 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
547 return 0;
550 /* NOTE: must be called outside the CPU execute loop */
551 void cpu_reset(CPUX86State *env)
553 int i;
555 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
556 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
557 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
560 memset(env, 0, offsetof(CPUX86State, breakpoints));
562 tlb_flush(env, 1);
564 env->old_exception = -1;
566 /* init to reset state */
568 #ifdef CONFIG_SOFTMMU
569 env->hflags |= HF_SOFTMMU_MASK;
570 #endif
571 env->hflags2 |= HF2_GIF_MASK;
573 cpu_x86_update_cr0(env, 0x60000010);
574 env->a20_mask = ~0x0;
575 env->smbase = 0x30000;
577 env->idt.limit = 0xffff;
578 env->gdt.limit = 0xffff;
579 env->ldt.limit = 0xffff;
580 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
581 env->tr.limit = 0xffff;
582 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
584 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
585 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
586 DESC_R_MASK | DESC_A_MASK);
587 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
588 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
589 DESC_A_MASK);
590 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
591 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
592 DESC_A_MASK);
593 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
594 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
595 DESC_A_MASK);
596 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
597 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
598 DESC_A_MASK);
599 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
600 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
601 DESC_A_MASK);
603 env->eip = 0xfff0;
604 env->regs[R_EDX] = env->cpuid_version;
606 env->eflags = 0x2;
608 /* FPU init */
609 for(i = 0;i < 8; i++)
610 env->fptags[i] = 1;
611 env->fpuc = 0x37f;
613 env->mxcsr = 0x1f80;
615 memset(env->dr, 0, sizeof(env->dr));
616 env->dr[6] = DR6_FIXED_1;
617 env->dr[7] = DR7_FIXED_1;
618 cpu_breakpoint_remove_all(env, BP_CPU);
619 cpu_watchpoint_remove_all(env, BP_CPU);
622 void cpu_x86_close(CPUX86State *env)
624 qemu_free(env);
627 /***********************************************************/
628 /* x86 debug */
630 static const char *cc_op_str[] = {
631 "DYNAMIC",
632 "EFLAGS",
634 "MULB",
635 "MULW",
636 "MULL",
637 "MULQ",
639 "ADDB",
640 "ADDW",
641 "ADDL",
642 "ADDQ",
644 "ADCB",
645 "ADCW",
646 "ADCL",
647 "ADCQ",
649 "SUBB",
650 "SUBW",
651 "SUBL",
652 "SUBQ",
654 "SBBB",
655 "SBBW",
656 "SBBL",
657 "SBBQ",
659 "LOGICB",
660 "LOGICW",
661 "LOGICL",
662 "LOGICQ",
664 "INCB",
665 "INCW",
666 "INCL",
667 "INCQ",
669 "DECB",
670 "DECW",
671 "DECL",
672 "DECQ",
674 "SHLB",
675 "SHLW",
676 "SHLL",
677 "SHLQ",
679 "SARB",
680 "SARW",
681 "SARL",
682 "SARQ",
685 static void
686 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
687 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
688 const char *name, struct SegmentCache *sc)
690 #ifdef TARGET_X86_64
691 if (env->hflags & HF_CS64_MASK) {
692 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
693 sc->selector, sc->base, sc->limit, sc->flags);
694 } else
695 #endif
697 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
698 (uint32_t)sc->base, sc->limit, sc->flags);
701 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
702 goto done;
704 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
705 if (sc->flags & DESC_S_MASK) {
706 if (sc->flags & DESC_CS_MASK) {
707 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
708 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
709 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
710 (sc->flags & DESC_R_MASK) ? 'R' : '-');
711 } else {
712 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
713 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
714 (sc->flags & DESC_W_MASK) ? 'W' : '-');
716 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
717 } else {
718 static const char *sys_type_name[2][16] = {
719 { /* 32 bit mode */
720 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
721 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
722 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
723 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
725 { /* 64 bit mode */
726 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
727 "Reserved", "Reserved", "Reserved", "Reserved",
728 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
729 "Reserved", "IntGate64", "TrapGate64"
732 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
733 [(sc->flags & DESC_TYPE_MASK)
734 >> DESC_TYPE_SHIFT]);
736 done:
737 cpu_fprintf(f, "\n");
740 void cpu_dump_state(CPUState *env, FILE *f,
741 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
742 int flags)
744 int eflags, i, nb;
745 char cc_op_name[32];
746 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
748 cpu_synchronize_state(env);
750 eflags = env->eflags;
751 #ifdef TARGET_X86_64
752 if (env->hflags & HF_CS64_MASK) {
753 cpu_fprintf(f,
754 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
755 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
756 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
757 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
758 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
759 env->regs[R_EAX],
760 env->regs[R_EBX],
761 env->regs[R_ECX],
762 env->regs[R_EDX],
763 env->regs[R_ESI],
764 env->regs[R_EDI],
765 env->regs[R_EBP],
766 env->regs[R_ESP],
767 env->regs[8],
768 env->regs[9],
769 env->regs[10],
770 env->regs[11],
771 env->regs[12],
772 env->regs[13],
773 env->regs[14],
774 env->regs[15],
775 env->eip, eflags,
776 eflags & DF_MASK ? 'D' : '-',
777 eflags & CC_O ? 'O' : '-',
778 eflags & CC_S ? 'S' : '-',
779 eflags & CC_Z ? 'Z' : '-',
780 eflags & CC_A ? 'A' : '-',
781 eflags & CC_P ? 'P' : '-',
782 eflags & CC_C ? 'C' : '-',
783 env->hflags & HF_CPL_MASK,
784 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
785 (env->a20_mask >> 20) & 1,
786 (env->hflags >> HF_SMM_SHIFT) & 1,
787 env->halted);
788 } else
789 #endif
791 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
792 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
793 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
794 (uint32_t)env->regs[R_EAX],
795 (uint32_t)env->regs[R_EBX],
796 (uint32_t)env->regs[R_ECX],
797 (uint32_t)env->regs[R_EDX],
798 (uint32_t)env->regs[R_ESI],
799 (uint32_t)env->regs[R_EDI],
800 (uint32_t)env->regs[R_EBP],
801 (uint32_t)env->regs[R_ESP],
802 (uint32_t)env->eip, eflags,
803 eflags & DF_MASK ? 'D' : '-',
804 eflags & CC_O ? 'O' : '-',
805 eflags & CC_S ? 'S' : '-',
806 eflags & CC_Z ? 'Z' : '-',
807 eflags & CC_A ? 'A' : '-',
808 eflags & CC_P ? 'P' : '-',
809 eflags & CC_C ? 'C' : '-',
810 env->hflags & HF_CPL_MASK,
811 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
812 (env->a20_mask >> 20) & 1,
813 (env->hflags >> HF_SMM_SHIFT) & 1,
814 env->halted);
817 for(i = 0; i < 6; i++) {
818 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
819 &env->segs[i]);
821 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
822 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
824 #ifdef TARGET_X86_64
825 if (env->hflags & HF_LMA_MASK) {
826 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
827 env->gdt.base, env->gdt.limit);
828 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
829 env->idt.base, env->idt.limit);
830 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
831 (uint32_t)env->cr[0],
832 env->cr[2],
833 env->cr[3],
834 (uint32_t)env->cr[4]);
835 for(i = 0; i < 4; i++)
836 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
837 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
838 env->dr[6], env->dr[7]);
839 } else
840 #endif
842 cpu_fprintf(f, "GDT= %08x %08x\n",
843 (uint32_t)env->gdt.base, env->gdt.limit);
844 cpu_fprintf(f, "IDT= %08x %08x\n",
845 (uint32_t)env->idt.base, env->idt.limit);
846 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
847 (uint32_t)env->cr[0],
848 (uint32_t)env->cr[2],
849 (uint32_t)env->cr[3],
850 (uint32_t)env->cr[4]);
851 for(i = 0; i < 4; i++)
852 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
853 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
855 if (flags & X86_DUMP_CCOP) {
856 if ((unsigned)env->cc_op < CC_OP_NB)
857 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
858 else
859 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
860 #ifdef TARGET_X86_64
861 if (env->hflags & HF_CS64_MASK) {
862 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
863 env->cc_src, env->cc_dst,
864 cc_op_name);
865 } else
866 #endif
868 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
869 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
870 cc_op_name);
873 if (flags & X86_DUMP_FPU) {
874 int fptag;
875 fptag = 0;
876 for(i = 0; i < 8; i++) {
877 fptag |= ((!env->fptags[i]) << i);
879 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
880 env->fpuc,
881 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
882 env->fpstt,
883 fptag,
884 env->mxcsr);
885 for(i=0;i<8;i++) {
886 #if defined(USE_X86LDOUBLE)
887 union {
888 long double d;
889 struct {
890 uint64_t lower;
891 uint16_t upper;
892 } l;
893 } tmp;
894 tmp.d = env->fpregs[i].d;
895 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
896 i, tmp.l.lower, tmp.l.upper);
897 #else
898 cpu_fprintf(f, "FPR%d=%016" PRIx64,
899 i, env->fpregs[i].mmx.q);
900 #endif
901 if ((i & 1) == 1)
902 cpu_fprintf(f, "\n");
903 else
904 cpu_fprintf(f, " ");
906 if (env->hflags & HF_CS64_MASK)
907 nb = 16;
908 else
909 nb = 8;
910 for(i=0;i<nb;i++) {
911 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
913 env->xmm_regs[i].XMM_L(3),
914 env->xmm_regs[i].XMM_L(2),
915 env->xmm_regs[i].XMM_L(1),
916 env->xmm_regs[i].XMM_L(0));
917 if ((i & 1) == 1)
918 cpu_fprintf(f, "\n");
919 else
920 cpu_fprintf(f, " ");
925 /***********************************************************/
926 /* x86 mmu */
927 /* XXX: add PGE support */
929 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
931 a20_state = (a20_state != 0);
932 if (a20_state != ((env->a20_mask >> 20) & 1)) {
933 #if defined(DEBUG_MMU)
934 printf("A20 update: a20=%d\n", a20_state);
935 #endif
936 /* if the cpu is currently executing code, we must unlink it and
937 all the potentially executing TB */
938 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
940 /* when a20 is changed, all the MMU mappings are invalid, so
941 we must flush everything */
942 tlb_flush(env, 1);
943 env->a20_mask = ~(1 << 20) | (a20_state << 20);
947 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
949 int pe_state;
951 #if defined(DEBUG_MMU)
952 printf("CR0 update: CR0=0x%08x\n", new_cr0);
953 #endif
954 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
955 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
956 tlb_flush(env, 1);
959 #ifdef TARGET_X86_64
960 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
961 (env->efer & MSR_EFER_LME)) {
962 /* enter in long mode */
963 /* XXX: generate an exception */
964 if (!(env->cr[4] & CR4_PAE_MASK))
965 return;
966 env->efer |= MSR_EFER_LMA;
967 env->hflags |= HF_LMA_MASK;
968 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
969 (env->efer & MSR_EFER_LMA)) {
970 /* exit long mode */
971 env->efer &= ~MSR_EFER_LMA;
972 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
973 env->eip &= 0xffffffff;
975 #endif
976 env->cr[0] = new_cr0 | CR0_ET_MASK;
978 /* update PE flag in hidden flags */
979 pe_state = (env->cr[0] & CR0_PE_MASK);
980 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
981 /* ensure that ADDSEG is always set in real mode */
982 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
983 /* update FPU flags */
984 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
985 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
988 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
989 the PDPT */
990 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
992 env->cr[3] = new_cr3;
993 if (env->cr[0] & CR0_PG_MASK) {
994 #if defined(DEBUG_MMU)
995 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
996 #endif
997 tlb_flush(env, 0);
1001 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
1003 #if defined(DEBUG_MMU)
1004 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1005 #endif
1006 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
1007 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
1008 tlb_flush(env, 1);
1010 /* SSE handling */
1011 if (!(env->cpuid_features & CPUID_SSE))
1012 new_cr4 &= ~CR4_OSFXSR_MASK;
1013 if (new_cr4 & CR4_OSFXSR_MASK)
1014 env->hflags |= HF_OSFXSR_MASK;
1015 else
1016 env->hflags &= ~HF_OSFXSR_MASK;
1018 env->cr[4] = new_cr4;
1021 #if defined(CONFIG_USER_ONLY)
1023 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1024 int is_write, int mmu_idx, int is_softmmu)
1026 /* user mode only emulation */
1027 is_write &= 1;
1028 env->cr[2] = addr;
1029 env->error_code = (is_write << PG_ERROR_W_BIT);
1030 env->error_code |= PG_ERROR_U_MASK;
1031 env->exception_index = EXCP0E_PAGE;
1032 return 1;
1035 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1037 return addr;
1040 #else
1042 /* XXX: This value should match the one returned by CPUID
1043 * and in exec.c */
1044 # if defined(TARGET_X86_64)
1045 # define PHYS_ADDR_MASK 0xfffffff000LL
1046 # else
1047 # define PHYS_ADDR_MASK 0xffffff000LL
1048 # endif
1050 /* return value:
1051 -1 = cannot handle fault
1052 0 = nothing more to do
1053 1 = generate PF fault
1054 2 = soft MMU activation required for this block
1056 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1057 int is_write1, int mmu_idx, int is_softmmu)
1059 uint64_t ptep, pte;
1060 target_ulong pde_addr, pte_addr;
1061 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1062 target_phys_addr_t paddr;
1063 uint32_t page_offset;
1064 target_ulong vaddr, virt_addr;
1066 is_user = mmu_idx == MMU_USER_IDX;
1067 #if defined(DEBUG_MMU)
1068 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1069 addr, is_write1, is_user, env->eip);
1070 #endif
1071 is_write = is_write1 & 1;
1073 if (!(env->cr[0] & CR0_PG_MASK)) {
1074 pte = addr;
1075 virt_addr = addr & TARGET_PAGE_MASK;
1076 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1077 page_size = 4096;
1078 goto do_mapping;
1081 if (env->cr[4] & CR4_PAE_MASK) {
1082 uint64_t pde, pdpe;
1083 target_ulong pdpe_addr;
1085 #ifdef TARGET_X86_64
1086 if (env->hflags & HF_LMA_MASK) {
1087 uint64_t pml4e_addr, pml4e;
1088 int32_t sext;
1090 /* test virtual address sign extension */
1091 sext = (int64_t)addr >> 47;
1092 if (sext != 0 && sext != -1) {
1093 env->error_code = 0;
1094 env->exception_index = EXCP0D_GPF;
1095 return 1;
1098 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1099 env->a20_mask;
1100 pml4e = ldq_phys(pml4e_addr);
1101 if (!(pml4e & PG_PRESENT_MASK)) {
1102 error_code = 0;
1103 goto do_fault;
1105 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1106 error_code = PG_ERROR_RSVD_MASK;
1107 goto do_fault;
1109 if (!(pml4e & PG_ACCESSED_MASK)) {
1110 pml4e |= PG_ACCESSED_MASK;
1111 stl_phys_notdirty(pml4e_addr, pml4e);
1113 ptep = pml4e ^ PG_NX_MASK;
1114 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1115 env->a20_mask;
1116 pdpe = ldq_phys(pdpe_addr);
1117 if (!(pdpe & PG_PRESENT_MASK)) {
1118 error_code = 0;
1119 goto do_fault;
1121 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1122 error_code = PG_ERROR_RSVD_MASK;
1123 goto do_fault;
1125 ptep &= pdpe ^ PG_NX_MASK;
1126 if (!(pdpe & PG_ACCESSED_MASK)) {
1127 pdpe |= PG_ACCESSED_MASK;
1128 stl_phys_notdirty(pdpe_addr, pdpe);
1130 } else
1131 #endif
1133 /* XXX: load them when cr3 is loaded ? */
1134 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1135 env->a20_mask;
1136 pdpe = ldq_phys(pdpe_addr);
1137 if (!(pdpe & PG_PRESENT_MASK)) {
1138 error_code = 0;
1139 goto do_fault;
1141 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1144 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1145 env->a20_mask;
1146 pde = ldq_phys(pde_addr);
1147 if (!(pde & PG_PRESENT_MASK)) {
1148 error_code = 0;
1149 goto do_fault;
1151 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1152 error_code = PG_ERROR_RSVD_MASK;
1153 goto do_fault;
1155 ptep &= pde ^ PG_NX_MASK;
1156 if (pde & PG_PSE_MASK) {
1157 /* 2 MB page */
1158 page_size = 2048 * 1024;
1159 ptep ^= PG_NX_MASK;
1160 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1161 goto do_fault_protect;
1162 if (is_user) {
1163 if (!(ptep & PG_USER_MASK))
1164 goto do_fault_protect;
1165 if (is_write && !(ptep & PG_RW_MASK))
1166 goto do_fault_protect;
1167 } else {
1168 if ((env->cr[0] & CR0_WP_MASK) &&
1169 is_write && !(ptep & PG_RW_MASK))
1170 goto do_fault_protect;
1172 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1173 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1174 pde |= PG_ACCESSED_MASK;
1175 if (is_dirty)
1176 pde |= PG_DIRTY_MASK;
1177 stl_phys_notdirty(pde_addr, pde);
1179 /* align to page_size */
1180 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1181 virt_addr = addr & ~(page_size - 1);
1182 } else {
1183 /* 4 KB page */
1184 if (!(pde & PG_ACCESSED_MASK)) {
1185 pde |= PG_ACCESSED_MASK;
1186 stl_phys_notdirty(pde_addr, pde);
1188 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1189 env->a20_mask;
1190 pte = ldq_phys(pte_addr);
1191 if (!(pte & PG_PRESENT_MASK)) {
1192 error_code = 0;
1193 goto do_fault;
1195 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1196 error_code = PG_ERROR_RSVD_MASK;
1197 goto do_fault;
1199 /* combine pde and pte nx, user and rw protections */
1200 ptep &= pte ^ PG_NX_MASK;
1201 ptep ^= PG_NX_MASK;
1202 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1203 goto do_fault_protect;
1204 if (is_user) {
1205 if (!(ptep & PG_USER_MASK))
1206 goto do_fault_protect;
1207 if (is_write && !(ptep & PG_RW_MASK))
1208 goto do_fault_protect;
1209 } else {
1210 if ((env->cr[0] & CR0_WP_MASK) &&
1211 is_write && !(ptep & PG_RW_MASK))
1212 goto do_fault_protect;
1214 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1215 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1216 pte |= PG_ACCESSED_MASK;
1217 if (is_dirty)
1218 pte |= PG_DIRTY_MASK;
1219 stl_phys_notdirty(pte_addr, pte);
1221 page_size = 4096;
1222 virt_addr = addr & ~0xfff;
1223 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1225 } else {
1226 uint32_t pde;
1228 /* page directory entry */
1229 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1230 env->a20_mask;
1231 pde = ldl_phys(pde_addr);
1232 if (!(pde & PG_PRESENT_MASK)) {
1233 error_code = 0;
1234 goto do_fault;
1236 /* if PSE bit is set, then we use a 4MB page */
1237 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1238 page_size = 4096 * 1024;
1239 if (is_user) {
1240 if (!(pde & PG_USER_MASK))
1241 goto do_fault_protect;
1242 if (is_write && !(pde & PG_RW_MASK))
1243 goto do_fault_protect;
1244 } else {
1245 if ((env->cr[0] & CR0_WP_MASK) &&
1246 is_write && !(pde & PG_RW_MASK))
1247 goto do_fault_protect;
1249 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1250 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1251 pde |= PG_ACCESSED_MASK;
1252 if (is_dirty)
1253 pde |= PG_DIRTY_MASK;
1254 stl_phys_notdirty(pde_addr, pde);
1257 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1258 ptep = pte;
1259 virt_addr = addr & ~(page_size - 1);
1260 } else {
1261 if (!(pde & PG_ACCESSED_MASK)) {
1262 pde |= PG_ACCESSED_MASK;
1263 stl_phys_notdirty(pde_addr, pde);
1266 /* page directory entry */
1267 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1268 env->a20_mask;
1269 pte = ldl_phys(pte_addr);
1270 if (!(pte & PG_PRESENT_MASK)) {
1271 error_code = 0;
1272 goto do_fault;
1274 /* combine pde and pte user and rw protections */
1275 ptep = pte & pde;
1276 if (is_user) {
1277 if (!(ptep & PG_USER_MASK))
1278 goto do_fault_protect;
1279 if (is_write && !(ptep & PG_RW_MASK))
1280 goto do_fault_protect;
1281 } else {
1282 if ((env->cr[0] & CR0_WP_MASK) &&
1283 is_write && !(ptep & PG_RW_MASK))
1284 goto do_fault_protect;
1286 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1287 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1288 pte |= PG_ACCESSED_MASK;
1289 if (is_dirty)
1290 pte |= PG_DIRTY_MASK;
1291 stl_phys_notdirty(pte_addr, pte);
1293 page_size = 4096;
1294 virt_addr = addr & ~0xfff;
1297 /* the page can be put in the TLB */
1298 prot = PAGE_READ;
1299 if (!(ptep & PG_NX_MASK))
1300 prot |= PAGE_EXEC;
1301 if (pte & PG_DIRTY_MASK) {
1302 /* only set write access if already dirty... otherwise wait
1303 for dirty access */
1304 if (is_user) {
1305 if (ptep & PG_RW_MASK)
1306 prot |= PAGE_WRITE;
1307 } else {
1308 if (!(env->cr[0] & CR0_WP_MASK) ||
1309 (ptep & PG_RW_MASK))
1310 prot |= PAGE_WRITE;
1313 do_mapping:
1314 pte = pte & env->a20_mask;
1316 /* Even if 4MB pages, we map only one 4KB page in the cache to
1317 avoid filling it too fast */
1318 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1319 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1320 vaddr = virt_addr + page_offset;
1322 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1323 return ret;
1324 do_fault_protect:
1325 error_code = PG_ERROR_P_MASK;
1326 do_fault:
1327 error_code |= (is_write << PG_ERROR_W_BIT);
1328 if (is_user)
1329 error_code |= PG_ERROR_U_MASK;
1330 if (is_write1 == 2 &&
1331 (env->efer & MSR_EFER_NXE) &&
1332 (env->cr[4] & CR4_PAE_MASK))
1333 error_code |= PG_ERROR_I_D_MASK;
1334 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1335 /* cr2 is not modified in case of exceptions */
1336 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1337 addr);
1338 } else {
1339 env->cr[2] = addr;
1341 env->error_code = error_code;
1342 env->exception_index = EXCP0E_PAGE;
1343 return 1;
1346 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1348 target_ulong pde_addr, pte_addr;
1349 uint64_t pte;
1350 target_phys_addr_t paddr;
1351 uint32_t page_offset;
1352 int page_size;
1354 if (env->cr[4] & CR4_PAE_MASK) {
1355 target_ulong pdpe_addr;
1356 uint64_t pde, pdpe;
1358 #ifdef TARGET_X86_64
1359 if (env->hflags & HF_LMA_MASK) {
1360 uint64_t pml4e_addr, pml4e;
1361 int32_t sext;
1363 /* test virtual address sign extension */
1364 sext = (int64_t)addr >> 47;
1365 if (sext != 0 && sext != -1)
1366 return -1;
1368 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1369 env->a20_mask;
1370 pml4e = ldq_phys(pml4e_addr);
1371 if (!(pml4e & PG_PRESENT_MASK))
1372 return -1;
1374 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1375 env->a20_mask;
1376 pdpe = ldq_phys(pdpe_addr);
1377 if (!(pdpe & PG_PRESENT_MASK))
1378 return -1;
1379 } else
1380 #endif
1382 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1383 env->a20_mask;
1384 pdpe = ldq_phys(pdpe_addr);
1385 if (!(pdpe & PG_PRESENT_MASK))
1386 return -1;
1389 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1390 env->a20_mask;
1391 pde = ldq_phys(pde_addr);
1392 if (!(pde & PG_PRESENT_MASK)) {
1393 return -1;
1395 if (pde & PG_PSE_MASK) {
1396 /* 2 MB page */
1397 page_size = 2048 * 1024;
1398 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1399 } else {
1400 /* 4 KB page */
1401 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1402 env->a20_mask;
1403 page_size = 4096;
1404 pte = ldq_phys(pte_addr);
1406 if (!(pte & PG_PRESENT_MASK))
1407 return -1;
1408 } else {
1409 uint32_t pde;
1411 if (!(env->cr[0] & CR0_PG_MASK)) {
1412 pte = addr;
1413 page_size = 4096;
1414 } else {
1415 /* page directory entry */
1416 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1417 pde = ldl_phys(pde_addr);
1418 if (!(pde & PG_PRESENT_MASK))
1419 return -1;
1420 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1421 pte = pde & ~0x003ff000; /* align to 4MB */
1422 page_size = 4096 * 1024;
1423 } else {
1424 /* page directory entry */
1425 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1426 pte = ldl_phys(pte_addr);
1427 if (!(pte & PG_PRESENT_MASK))
1428 return -1;
1429 page_size = 4096;
1432 pte = pte & env->a20_mask;
1435 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1436 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1437 return paddr;
1440 void hw_breakpoint_insert(CPUState *env, int index)
1442 int type, err = 0;
1444 switch (hw_breakpoint_type(env->dr[7], index)) {
1445 case 0:
1446 if (hw_breakpoint_enabled(env->dr[7], index))
1447 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1448 &env->cpu_breakpoint[index]);
1449 break;
1450 case 1:
1451 type = BP_CPU | BP_MEM_WRITE;
1452 goto insert_wp;
1453 case 2:
1454 /* No support for I/O watchpoints yet */
1455 break;
1456 case 3:
1457 type = BP_CPU | BP_MEM_ACCESS;
1458 insert_wp:
1459 err = cpu_watchpoint_insert(env, env->dr[index],
1460 hw_breakpoint_len(env->dr[7], index),
1461 type, &env->cpu_watchpoint[index]);
1462 break;
1464 if (err)
1465 env->cpu_breakpoint[index] = NULL;
1468 void hw_breakpoint_remove(CPUState *env, int index)
1470 if (!env->cpu_breakpoint[index])
1471 return;
1472 switch (hw_breakpoint_type(env->dr[7], index)) {
1473 case 0:
1474 if (hw_breakpoint_enabled(env->dr[7], index))
1475 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1476 break;
1477 case 1:
1478 case 3:
1479 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1480 break;
1481 case 2:
1482 /* No support for I/O watchpoints yet */
1483 break;
1487 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1489 target_ulong dr6;
1490 int reg, type;
1491 int hit_enabled = 0;
1493 dr6 = env->dr[6] & ~0xf;
1494 for (reg = 0; reg < 4; reg++) {
1495 type = hw_breakpoint_type(env->dr[7], reg);
1496 if ((type == 0 && env->dr[reg] == env->eip) ||
1497 ((type & 1) && env->cpu_watchpoint[reg] &&
1498 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1499 dr6 |= 1 << reg;
1500 if (hw_breakpoint_enabled(env->dr[7], reg))
1501 hit_enabled = 1;
1504 if (hit_enabled || force_dr6_update)
1505 env->dr[6] = dr6;
1506 return hit_enabled;
1509 static CPUDebugExcpHandler *prev_debug_excp_handler;
1511 void raise_exception(int exception_index);
1513 static void breakpoint_handler(CPUState *env)
1515 CPUBreakpoint *bp;
1517 if (env->watchpoint_hit) {
1518 if (env->watchpoint_hit->flags & BP_CPU) {
1519 env->watchpoint_hit = NULL;
1520 if (check_hw_breakpoints(env, 0))
1521 raise_exception(EXCP01_DB);
1522 else
1523 cpu_resume_from_signal(env, NULL);
1525 } else {
1526 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1527 if (bp->pc == env->eip) {
1528 if (bp->flags & BP_CPU) {
1529 check_hw_breakpoints(env, 1);
1530 raise_exception(EXCP01_DB);
1532 break;
1535 if (prev_debug_excp_handler)
1536 prev_debug_excp_handler(env);
1539 /* This should come from sysemu.h - if we could include it here... */
1540 void qemu_system_reset_request(void);
1542 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1543 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1545 uint64_t mcg_cap = cenv->mcg_cap;
1546 unsigned bank_num = mcg_cap & 0xff;
1547 uint64_t *banks = cenv->mce_banks;
1549 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1550 return;
1553 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1554 * reporting is disabled
1556 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1557 cenv->mcg_ctl != ~(uint64_t)0)
1558 return;
1559 banks += 4 * bank;
1561 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1562 * reporting is disabled for the bank
1564 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1565 return;
1566 if (status & MCI_STATUS_UC) {
1567 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1568 !(cenv->cr[4] & CR4_MCE_MASK)) {
1569 fprintf(stderr, "injects mce exception while previous "
1570 "one is in progress!\n");
1571 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1572 qemu_system_reset_request();
1573 return;
1575 if (banks[1] & MCI_STATUS_VAL)
1576 status |= MCI_STATUS_OVER;
1577 banks[2] = addr;
1578 banks[3] = misc;
1579 cenv->mcg_status = mcg_status;
1580 banks[1] = status;
1581 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1582 } else if (!(banks[1] & MCI_STATUS_VAL)
1583 || !(banks[1] & MCI_STATUS_UC)) {
1584 if (banks[1] & MCI_STATUS_VAL)
1585 status |= MCI_STATUS_OVER;
1586 banks[2] = addr;
1587 banks[3] = misc;
1588 banks[1] = status;
1589 } else
1590 banks[1] |= MCI_STATUS_OVER;
1592 #endif /* !CONFIG_USER_ONLY */
1594 static void mce_init(CPUX86State *cenv)
1596 unsigned int bank, bank_num;
1598 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1599 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1600 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1601 cenv->mcg_ctl = ~(uint64_t)0;
1602 bank_num = MCE_BANKS_DEF;
1603 for (bank = 0; bank < bank_num; bank++)
1604 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1608 static void host_cpuid(uint32_t function, uint32_t count,
1609 uint32_t *eax, uint32_t *ebx,
1610 uint32_t *ecx, uint32_t *edx)
1612 #if defined(CONFIG_KVM)
1613 uint32_t vec[4];
1615 #ifdef __x86_64__
1616 asm volatile("cpuid"
1617 : "=a"(vec[0]), "=b"(vec[1]),
1618 "=c"(vec[2]), "=d"(vec[3])
1619 : "0"(function), "c"(count) : "cc");
1620 #else
1621 asm volatile("pusha \n\t"
1622 "cpuid \n\t"
1623 "mov %%eax, 0(%2) \n\t"
1624 "mov %%ebx, 4(%2) \n\t"
1625 "mov %%ecx, 8(%2) \n\t"
1626 "mov %%edx, 12(%2) \n\t"
1627 "popa"
1628 : : "a"(function), "c"(count), "S"(vec)
1629 : "memory", "cc");
1630 #endif
1632 if (eax)
1633 *eax = vec[0];
1634 if (ebx)
1635 *ebx = vec[1];
1636 if (ecx)
1637 *ecx = vec[2];
1638 if (edx)
1639 *edx = vec[3];
1640 #endif
1643 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1644 uint32_t *ecx, uint32_t *edx)
1646 *ebx = env->cpuid_vendor1;
1647 *edx = env->cpuid_vendor2;
1648 *ecx = env->cpuid_vendor3;
1650 /* sysenter isn't supported on compatibility mode on AMD, syscall
1651 * isn't supported in compatibility mode on Intel.
1652 * Normally we advertise the actual cpu vendor, but you can override
1653 * this if you want to use KVM's sysenter/syscall emulation
1654 * in compatibility mode and when doing cross vendor migration
1656 if (kvm_enabled() && env->cpuid_vendor_override) {
1657 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1661 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1662 uint32_t *eax, uint32_t *ebx,
1663 uint32_t *ecx, uint32_t *edx)
1665 /* test if maximum index reached */
1666 if (index & 0x80000000) {
1667 if (index > env->cpuid_xlevel)
1668 index = env->cpuid_level;
1669 } else {
1670 if (index > env->cpuid_level)
1671 index = env->cpuid_level;
1674 switch(index) {
1675 case 0:
1676 *eax = env->cpuid_level;
1677 get_cpuid_vendor(env, ebx, ecx, edx);
1678 break;
1679 case 1:
1680 *eax = env->cpuid_version;
1681 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1682 *ecx = env->cpuid_ext_features;
1683 *edx = env->cpuid_features;
1684 if (env->nr_cores * env->nr_threads > 1) {
1685 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1686 *edx |= 1 << 28; /* HTT bit */
1688 break;
1689 case 2:
1690 /* cache info: needed for Pentium Pro compatibility */
1691 *eax = 1;
1692 *ebx = 0;
1693 *ecx = 0;
1694 *edx = 0x2c307d;
1695 break;
1696 case 4:
1697 /* cache info: needed for Core compatibility */
1698 if (env->nr_cores > 1) {
1699 *eax = (env->nr_cores - 1) << 26;
1700 } else {
1701 *eax = 0;
1703 switch (count) {
1704 case 0: /* L1 dcache info */
1705 *eax |= 0x0000121;
1706 *ebx = 0x1c0003f;
1707 *ecx = 0x000003f;
1708 *edx = 0x0000001;
1709 break;
1710 case 1: /* L1 icache info */
1711 *eax |= 0x0000122;
1712 *ebx = 0x1c0003f;
1713 *ecx = 0x000003f;
1714 *edx = 0x0000001;
1715 break;
1716 case 2: /* L2 cache info */
1717 *eax |= 0x0000143;
1718 if (env->nr_threads > 1) {
1719 *eax |= (env->nr_threads - 1) << 14;
1721 *ebx = 0x3c0003f;
1722 *ecx = 0x0000fff;
1723 *edx = 0x0000001;
1724 break;
1725 default: /* end of info */
1726 *eax = 0;
1727 *ebx = 0;
1728 *ecx = 0;
1729 *edx = 0;
1730 break;
1732 break;
1733 case 5:
1734 /* mwait info: needed for Core compatibility */
1735 *eax = 0; /* Smallest monitor-line size in bytes */
1736 *ebx = 0; /* Largest monitor-line size in bytes */
1737 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1738 *edx = 0;
1739 break;
1740 case 6:
1741 /* Thermal and Power Leaf */
1742 *eax = 0;
1743 *ebx = 0;
1744 *ecx = 0;
1745 *edx = 0;
1746 break;
1747 case 9:
1748 /* Direct Cache Access Information Leaf */
1749 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1750 *ebx = 0;
1751 *ecx = 0;
1752 *edx = 0;
1753 break;
1754 case 0xA:
1755 /* Architectural Performance Monitoring Leaf */
1756 *eax = 0;
1757 *ebx = 0;
1758 *ecx = 0;
1759 *edx = 0;
1760 break;
1761 case 0x80000000:
1762 *eax = env->cpuid_xlevel;
1763 *ebx = env->cpuid_vendor1;
1764 *edx = env->cpuid_vendor2;
1765 *ecx = env->cpuid_vendor3;
1766 break;
1767 case 0x80000001:
1768 *eax = env->cpuid_version;
1769 *ebx = 0;
1770 *ecx = env->cpuid_ext3_features;
1771 *edx = env->cpuid_ext2_features;
1773 /* The Linux kernel checks for the CMPLegacy bit and
1774 * discards multiple thread information if it is set.
1775 * So dont set it here for Intel to make Linux guests happy.
1777 if (env->nr_cores * env->nr_threads > 1) {
1778 uint32_t tebx, tecx, tedx;
1779 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
1780 if (tebx != CPUID_VENDOR_INTEL_1 ||
1781 tedx != CPUID_VENDOR_INTEL_2 ||
1782 tecx != CPUID_VENDOR_INTEL_3) {
1783 *ecx |= 1 << 1; /* CmpLegacy bit */
1787 if (kvm_enabled()) {
1788 /* Nested SVM not yet supported in upstream QEMU */
1789 *ecx &= ~CPUID_EXT3_SVM;
1791 break;
1792 case 0x80000002:
1793 case 0x80000003:
1794 case 0x80000004:
1795 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1796 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1797 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1798 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1799 break;
1800 case 0x80000005:
1801 /* cache info (L1 cache) */
1802 *eax = 0x01ff01ff;
1803 *ebx = 0x01ff01ff;
1804 *ecx = 0x40020140;
1805 *edx = 0x40020140;
1806 break;
1807 case 0x80000006:
1808 /* cache info (L2 cache) */
1809 *eax = 0;
1810 *ebx = 0x42004200;
1811 *ecx = 0x02008140;
1812 *edx = 0;
1813 break;
1814 case 0x80000008:
1815 /* virtual & phys address size in low 2 bytes. */
1816 /* XXX: This value must match the one used in the MMU code. */
1817 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1818 /* 64 bit processor */
1819 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1820 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1821 } else {
1822 if (env->cpuid_features & CPUID_PSE36)
1823 *eax = 0x00000024; /* 36 bits physical */
1824 else
1825 *eax = 0x00000020; /* 32 bits physical */
1827 *ebx = 0;
1828 *ecx = 0;
1829 *edx = 0;
1830 if (env->nr_cores * env->nr_threads > 1) {
1831 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1833 break;
1834 case 0x8000000A:
1835 *eax = 0x00000001; /* SVM Revision */
1836 *ebx = 0x00000010; /* nr of ASIDs */
1837 *ecx = 0;
1838 *edx = 0; /* optional features */
1839 break;
1840 default:
1841 /* reserved values: zero */
1842 *eax = 0;
1843 *ebx = 0;
1844 *ecx = 0;
1845 *edx = 0;
1846 break;
1851 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1852 target_ulong *base, unsigned int *limit,
1853 unsigned int *flags)
1855 SegmentCache *dt;
1856 target_ulong ptr;
1857 uint32_t e1, e2;
1858 int index;
1860 if (selector & 0x4)
1861 dt = &env->ldt;
1862 else
1863 dt = &env->gdt;
1864 index = selector & ~7;
1865 ptr = dt->base + index;
1866 if ((index + 7) > dt->limit
1867 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1868 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1869 return 0;
1871 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1872 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1873 if (e2 & DESC_G_MASK)
1874 *limit = (*limit << 12) | 0xfff;
1875 *flags = e2;
1877 return 1;
1880 CPUX86State *cpu_x86_init(const char *cpu_model)
1882 CPUX86State *env;
1883 static int inited;
1885 env = qemu_mallocz(sizeof(CPUX86State));
1886 cpu_exec_init(env);
1887 env->cpu_model_str = cpu_model;
1889 /* init various static tables */
1890 if (!inited) {
1891 inited = 1;
1892 optimize_flags_init();
1893 #ifndef CONFIG_USER_ONLY
1894 prev_debug_excp_handler =
1895 cpu_set_debug_excp_handler(breakpoint_handler);
1896 #endif
1898 if (cpu_x86_register(env, cpu_model) < 0) {
1899 cpu_x86_close(env);
1900 return NULL;
1902 mce_init(env);
1904 qemu_init_vcpu(env);
1906 return env;
1909 #if !defined(CONFIG_USER_ONLY)
1910 void do_cpu_init(CPUState *env)
1912 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1913 cpu_reset(env);
1914 env->interrupt_request = sipi;
1915 apic_init_reset(env);
1918 void do_cpu_sipi(CPUState *env)
1920 apic_sipi(env);
1922 #else
1923 void do_cpu_init(CPUState *env)
1926 void do_cpu_sipi(CPUState *env)
1929 #endif