json-parser: remove dead increment
[qemu.git] / target-i386 / helper.c
blob049fccfc721fa0b99e6794e3b1e2dd904476106a
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
31 //#define DEBUG_MMU
33 /* feature flags taken from "Intel Processor Identification and the CPUID
34 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35 * about feature names, the Linux name is used. */
36 static const char *feature_name[] = {
37 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
42 static const char *ext_feature_name[] = {
43 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
48 static const char *ext2_feature_name[] = {
49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
54 static const char *ext3_feature_name[] = {
55 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
61 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
62 uint32_t *ext_features,
63 uint32_t *ext2_features,
64 uint32_t *ext3_features)
66 int i;
67 int found = 0;
69 for ( i = 0 ; i < 32 ; i++ )
70 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71 *features |= 1 << i;
72 found = 1;
74 for ( i = 0 ; i < 32 ; i++ )
75 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76 *ext_features |= 1 << i;
77 found = 1;
79 for ( i = 0 ; i < 32 ; i++ )
80 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81 *ext2_features |= 1 << i;
82 found = 1;
84 for ( i = 0 ; i < 32 ; i++ )
85 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86 *ext3_features |= 1 << i;
87 found = 1;
89 if (!found) {
90 fprintf(stderr, "CPU feature %s not found\n", flagname);
94 typedef struct x86_def_t {
95 const char *name;
96 uint32_t level;
97 uint32_t vendor1, vendor2, vendor3;
98 int family;
99 int model;
100 int stepping;
101 uint32_t features, ext_features, ext2_features, ext3_features;
102 uint32_t xlevel;
103 char model_id[48];
104 int vendor_override;
105 } x86_def_t;
107 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
108 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
109 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
110 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
111 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
112 CPUID_PSE36 | CPUID_FXSR)
113 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
114 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
115 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
116 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
117 CPUID_PAE | CPUID_SEP | CPUID_APIC)
118 static x86_def_t x86_defs[] = {
119 #ifdef TARGET_X86_64
121 .name = "qemu64",
122 .level = 4,
123 .vendor1 = CPUID_VENDOR_AMD_1,
124 .vendor2 = CPUID_VENDOR_AMD_2,
125 .vendor3 = CPUID_VENDOR_AMD_3,
126 .family = 6,
127 .model = 2,
128 .stepping = 3,
129 .features = PPRO_FEATURES |
130 /* these features are needed for Win64 and aren't fully implemented */
131 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
132 /* this feature is needed for Solaris and isn't fully implemented */
133 CPUID_PSE36,
134 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
135 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
136 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
137 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
138 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
139 .xlevel = 0x8000000A,
140 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
143 .name = "phenom",
144 .level = 5,
145 .vendor1 = CPUID_VENDOR_AMD_1,
146 .vendor2 = CPUID_VENDOR_AMD_2,
147 .vendor3 = CPUID_VENDOR_AMD_3,
148 .family = 16,
149 .model = 2,
150 .stepping = 3,
151 /* Missing: CPUID_VME, CPUID_HT */
152 .features = PPRO_FEATURES |
153 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
154 CPUID_PSE36,
155 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
156 CPUID_EXT_POPCNT,
157 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
158 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
159 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
160 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
161 CPUID_EXT2_FFXSR,
162 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
163 CPUID_EXT3_CR8LEG,
164 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
165 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
166 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
167 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
168 .xlevel = 0x8000001A,
169 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
172 .name = "core2duo",
173 .level = 10,
174 .family = 6,
175 .model = 15,
176 .stepping = 11,
177 /* The original CPU also implements these features:
178 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
179 CPUID_TM, CPUID_PBE */
180 .features = PPRO_FEATURES |
181 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
182 CPUID_PSE36,
183 /* The original CPU also implements these ext features:
184 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
185 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
186 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
187 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
188 .ext3_features = CPUID_EXT3_LAHF_LM,
189 .xlevel = 0x80000008,
190 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
193 .name = "kvm64",
194 .level = 5,
195 .vendor1 = CPUID_VENDOR_INTEL_1,
196 .vendor2 = CPUID_VENDOR_INTEL_2,
197 .vendor3 = CPUID_VENDOR_INTEL_3,
198 .family = 15,
199 .model = 6,
200 .stepping = 1,
201 /* Missing: CPUID_VME, CPUID_HT */
202 .features = PPRO_FEATURES |
203 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
204 CPUID_PSE36,
205 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
206 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
207 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
208 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
209 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
210 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
211 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
212 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
213 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
214 .ext3_features = 0,
215 .xlevel = 0x80000008,
216 .model_id = "Common KVM processor"
218 #endif
220 .name = "qemu32",
221 .level = 4,
222 .family = 6,
223 .model = 3,
224 .stepping = 3,
225 .features = PPRO_FEATURES,
226 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
227 .xlevel = 0,
228 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
231 .name = "coreduo",
232 .level = 10,
233 .family = 6,
234 .model = 14,
235 .stepping = 8,
236 /* The original CPU also implements these features:
237 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
238 CPUID_TM, CPUID_PBE */
239 .features = PPRO_FEATURES | CPUID_VME |
240 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
241 /* The original CPU also implements these ext features:
242 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
243 CPUID_EXT_PDCM */
244 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
245 .ext2_features = CPUID_EXT2_NX,
246 .xlevel = 0x80000008,
247 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
250 .name = "486",
251 .level = 0,
252 .family = 4,
253 .model = 0,
254 .stepping = 0,
255 .features = I486_FEATURES,
256 .xlevel = 0,
259 .name = "pentium",
260 .level = 1,
261 .family = 5,
262 .model = 4,
263 .stepping = 3,
264 .features = PENTIUM_FEATURES,
265 .xlevel = 0,
268 .name = "pentium2",
269 .level = 2,
270 .family = 6,
271 .model = 5,
272 .stepping = 2,
273 .features = PENTIUM2_FEATURES,
274 .xlevel = 0,
277 .name = "pentium3",
278 .level = 2,
279 .family = 6,
280 .model = 7,
281 .stepping = 3,
282 .features = PENTIUM3_FEATURES,
283 .xlevel = 0,
286 .name = "athlon",
287 .level = 2,
288 .vendor1 = CPUID_VENDOR_AMD_1,
289 .vendor2 = CPUID_VENDOR_AMD_2,
290 .vendor3 = CPUID_VENDOR_AMD_3,
291 .family = 6,
292 .model = 2,
293 .stepping = 3,
294 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
295 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
296 .xlevel = 0x80000008,
297 /* XXX: put another string ? */
298 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
301 .name = "n270",
302 /* original is on level 10 */
303 .level = 5,
304 .family = 6,
305 .model = 28,
306 .stepping = 2,
307 .features = PPRO_FEATURES |
308 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
309 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
310 * CPUID_HT | CPUID_TM | CPUID_PBE */
311 /* Some CPUs got no CPUID_SEP */
312 .ext_features = CPUID_EXT_MONITOR |
313 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
314 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
315 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
316 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
317 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
318 .xlevel = 0x8000000A,
319 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
323 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
324 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
326 static int cpu_x86_fill_model_id(char *str)
328 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
329 int i;
331 for (i = 0; i < 3; i++) {
332 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
333 memcpy(str + i * 16 + 0, &eax, 4);
334 memcpy(str + i * 16 + 4, &ebx, 4);
335 memcpy(str + i * 16 + 8, &ecx, 4);
336 memcpy(str + i * 16 + 12, &edx, 4);
338 return 0;
341 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
343 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
345 x86_cpu_def->name = "host";
346 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
347 x86_cpu_def->level = eax;
348 x86_cpu_def->vendor1 = ebx;
349 x86_cpu_def->vendor2 = edx;
350 x86_cpu_def->vendor3 = ecx;
352 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
353 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
354 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
355 x86_cpu_def->stepping = eax & 0x0F;
356 x86_cpu_def->ext_features = ecx;
357 x86_cpu_def->features = edx;
359 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
360 x86_cpu_def->xlevel = eax;
362 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
363 x86_cpu_def->ext2_features = edx;
364 x86_cpu_def->ext3_features = ecx;
365 cpu_x86_fill_model_id(x86_cpu_def->model_id);
366 x86_cpu_def->vendor_override = 0;
368 return 0;
371 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
373 unsigned int i;
374 x86_def_t *def;
376 char *s = strdup(cpu_model);
377 char *featurestr, *name = strtok(s, ",");
378 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
379 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
380 uint32_t numvalue;
382 def = NULL;
383 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
384 if (strcmp(name, x86_defs[i].name) == 0) {
385 def = &x86_defs[i];
386 break;
389 if (kvm_enabled() && strcmp(name, "host") == 0) {
390 cpu_x86_fill_host(x86_cpu_def);
391 } else if (!def) {
392 goto error;
393 } else {
394 memcpy(x86_cpu_def, def, sizeof(*def));
397 add_flagname_to_bitmaps("hypervisor", &plus_features,
398 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
400 featurestr = strtok(NULL, ",");
402 while (featurestr) {
403 char *val;
404 if (featurestr[0] == '+') {
405 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
406 } else if (featurestr[0] == '-') {
407 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
408 } else if ((val = strchr(featurestr, '='))) {
409 *val = 0; val++;
410 if (!strcmp(featurestr, "family")) {
411 char *err;
412 numvalue = strtoul(val, &err, 0);
413 if (!*val || *err) {
414 fprintf(stderr, "bad numerical value %s\n", val);
415 goto error;
417 x86_cpu_def->family = numvalue;
418 } else if (!strcmp(featurestr, "model")) {
419 char *err;
420 numvalue = strtoul(val, &err, 0);
421 if (!*val || *err || numvalue > 0xff) {
422 fprintf(stderr, "bad numerical value %s\n", val);
423 goto error;
425 x86_cpu_def->model = numvalue;
426 } else if (!strcmp(featurestr, "stepping")) {
427 char *err;
428 numvalue = strtoul(val, &err, 0);
429 if (!*val || *err || numvalue > 0xf) {
430 fprintf(stderr, "bad numerical value %s\n", val);
431 goto error;
433 x86_cpu_def->stepping = numvalue ;
434 } else if (!strcmp(featurestr, "level")) {
435 char *err;
436 numvalue = strtoul(val, &err, 0);
437 if (!*val || *err) {
438 fprintf(stderr, "bad numerical value %s\n", val);
439 goto error;
441 x86_cpu_def->level = numvalue;
442 } else if (!strcmp(featurestr, "xlevel")) {
443 char *err;
444 numvalue = strtoul(val, &err, 0);
445 if (!*val || *err) {
446 fprintf(stderr, "bad numerical value %s\n", val);
447 goto error;
449 if (numvalue < 0x80000000) {
450 numvalue += 0x80000000;
452 x86_cpu_def->xlevel = numvalue;
453 } else if (!strcmp(featurestr, "vendor")) {
454 if (strlen(val) != 12) {
455 fprintf(stderr, "vendor string must be 12 chars long\n");
456 goto error;
458 x86_cpu_def->vendor1 = 0;
459 x86_cpu_def->vendor2 = 0;
460 x86_cpu_def->vendor3 = 0;
461 for(i = 0; i < 4; i++) {
462 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
463 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
464 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
466 x86_cpu_def->vendor_override = 1;
467 } else if (!strcmp(featurestr, "model_id")) {
468 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
469 val);
470 } else {
471 fprintf(stderr, "unrecognized feature %s\n", featurestr);
472 goto error;
474 } else {
475 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
476 goto error;
478 featurestr = strtok(NULL, ",");
480 x86_cpu_def->features |= plus_features;
481 x86_cpu_def->ext_features |= plus_ext_features;
482 x86_cpu_def->ext2_features |= plus_ext2_features;
483 x86_cpu_def->ext3_features |= plus_ext3_features;
484 x86_cpu_def->features &= ~minus_features;
485 x86_cpu_def->ext_features &= ~minus_ext_features;
486 x86_cpu_def->ext2_features &= ~minus_ext2_features;
487 x86_cpu_def->ext3_features &= ~minus_ext3_features;
488 free(s);
489 return 0;
491 error:
492 free(s);
493 return -1;
496 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
498 unsigned int i;
500 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
501 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
504 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
506 x86_def_t def1, *def = &def1;
508 if (cpu_x86_find_by_name(def, cpu_model) < 0)
509 return -1;
510 if (def->vendor1) {
511 env->cpuid_vendor1 = def->vendor1;
512 env->cpuid_vendor2 = def->vendor2;
513 env->cpuid_vendor3 = def->vendor3;
514 } else {
515 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
516 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
517 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
519 env->cpuid_vendor_override = def->vendor_override;
520 env->cpuid_level = def->level;
521 if (def->family > 0x0f)
522 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
523 else
524 env->cpuid_version = def->family << 8;
525 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
526 env->cpuid_version |= def->stepping;
527 env->cpuid_features = def->features;
528 env->pat = 0x0007040600070406ULL;
529 env->cpuid_ext_features = def->ext_features;
530 env->cpuid_ext2_features = def->ext2_features;
531 env->cpuid_xlevel = def->xlevel;
532 env->cpuid_ext3_features = def->ext3_features;
534 const char *model_id = def->model_id;
535 int c, len, i;
536 if (!model_id)
537 model_id = "";
538 len = strlen(model_id);
539 for(i = 0; i < 48; i++) {
540 if (i >= len)
541 c = '\0';
542 else
543 c = (uint8_t)model_id[i];
544 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
547 return 0;
550 /* NOTE: must be called outside the CPU execute loop */
551 void cpu_reset(CPUX86State *env)
553 int i;
555 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
556 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
557 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
560 memset(env, 0, offsetof(CPUX86State, breakpoints));
562 tlb_flush(env, 1);
564 env->old_exception = -1;
566 /* init to reset state */
568 #ifdef CONFIG_SOFTMMU
569 env->hflags |= HF_SOFTMMU_MASK;
570 #endif
571 env->hflags2 |= HF2_GIF_MASK;
573 cpu_x86_update_cr0(env, 0x60000010);
574 env->a20_mask = ~0x0;
575 env->smbase = 0x30000;
577 env->idt.limit = 0xffff;
578 env->gdt.limit = 0xffff;
579 env->ldt.limit = 0xffff;
580 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
581 env->tr.limit = 0xffff;
582 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
584 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
585 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
586 DESC_R_MASK | DESC_A_MASK);
587 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
588 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
589 DESC_A_MASK);
590 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
591 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
592 DESC_A_MASK);
593 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
594 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
595 DESC_A_MASK);
596 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
597 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
598 DESC_A_MASK);
599 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
600 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
601 DESC_A_MASK);
603 env->eip = 0xfff0;
604 env->regs[R_EDX] = env->cpuid_version;
606 env->eflags = 0x2;
608 /* FPU init */
609 for(i = 0;i < 8; i++)
610 env->fptags[i] = 1;
611 env->fpuc = 0x37f;
613 env->mxcsr = 0x1f80;
615 memset(env->dr, 0, sizeof(env->dr));
616 env->dr[6] = DR6_FIXED_1;
617 env->dr[7] = DR7_FIXED_1;
618 cpu_breakpoint_remove_all(env, BP_CPU);
619 cpu_watchpoint_remove_all(env, BP_CPU);
621 env->mcg_status = 0;
624 void cpu_x86_close(CPUX86State *env)
626 qemu_free(env);
629 /***********************************************************/
630 /* x86 debug */
632 static const char *cc_op_str[] = {
633 "DYNAMIC",
634 "EFLAGS",
636 "MULB",
637 "MULW",
638 "MULL",
639 "MULQ",
641 "ADDB",
642 "ADDW",
643 "ADDL",
644 "ADDQ",
646 "ADCB",
647 "ADCW",
648 "ADCL",
649 "ADCQ",
651 "SUBB",
652 "SUBW",
653 "SUBL",
654 "SUBQ",
656 "SBBB",
657 "SBBW",
658 "SBBL",
659 "SBBQ",
661 "LOGICB",
662 "LOGICW",
663 "LOGICL",
664 "LOGICQ",
666 "INCB",
667 "INCW",
668 "INCL",
669 "INCQ",
671 "DECB",
672 "DECW",
673 "DECL",
674 "DECQ",
676 "SHLB",
677 "SHLW",
678 "SHLL",
679 "SHLQ",
681 "SARB",
682 "SARW",
683 "SARL",
684 "SARQ",
687 static void
688 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
689 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
690 const char *name, struct SegmentCache *sc)
692 #ifdef TARGET_X86_64
693 if (env->hflags & HF_CS64_MASK) {
694 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
695 sc->selector, sc->base, sc->limit, sc->flags);
696 } else
697 #endif
699 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
700 (uint32_t)sc->base, sc->limit, sc->flags);
703 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
704 goto done;
706 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
707 if (sc->flags & DESC_S_MASK) {
708 if (sc->flags & DESC_CS_MASK) {
709 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
710 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
711 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
712 (sc->flags & DESC_R_MASK) ? 'R' : '-');
713 } else {
714 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
715 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
716 (sc->flags & DESC_W_MASK) ? 'W' : '-');
718 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
719 } else {
720 static const char *sys_type_name[2][16] = {
721 { /* 32 bit mode */
722 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
723 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
724 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
725 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
727 { /* 64 bit mode */
728 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
729 "Reserved", "Reserved", "Reserved", "Reserved",
730 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
731 "Reserved", "IntGate64", "TrapGate64"
734 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
735 [(sc->flags & DESC_TYPE_MASK)
736 >> DESC_TYPE_SHIFT]);
738 done:
739 cpu_fprintf(f, "\n");
742 void cpu_dump_state(CPUState *env, FILE *f,
743 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
744 int flags)
746 int eflags, i, nb;
747 char cc_op_name[32];
748 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
750 cpu_synchronize_state(env);
752 eflags = env->eflags;
753 #ifdef TARGET_X86_64
754 if (env->hflags & HF_CS64_MASK) {
755 cpu_fprintf(f,
756 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
757 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
758 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
759 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
760 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
761 env->regs[R_EAX],
762 env->regs[R_EBX],
763 env->regs[R_ECX],
764 env->regs[R_EDX],
765 env->regs[R_ESI],
766 env->regs[R_EDI],
767 env->regs[R_EBP],
768 env->regs[R_ESP],
769 env->regs[8],
770 env->regs[9],
771 env->regs[10],
772 env->regs[11],
773 env->regs[12],
774 env->regs[13],
775 env->regs[14],
776 env->regs[15],
777 env->eip, eflags,
778 eflags & DF_MASK ? 'D' : '-',
779 eflags & CC_O ? 'O' : '-',
780 eflags & CC_S ? 'S' : '-',
781 eflags & CC_Z ? 'Z' : '-',
782 eflags & CC_A ? 'A' : '-',
783 eflags & CC_P ? 'P' : '-',
784 eflags & CC_C ? 'C' : '-',
785 env->hflags & HF_CPL_MASK,
786 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
787 (env->a20_mask >> 20) & 1,
788 (env->hflags >> HF_SMM_SHIFT) & 1,
789 env->halted);
790 } else
791 #endif
793 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
794 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
795 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
796 (uint32_t)env->regs[R_EAX],
797 (uint32_t)env->regs[R_EBX],
798 (uint32_t)env->regs[R_ECX],
799 (uint32_t)env->regs[R_EDX],
800 (uint32_t)env->regs[R_ESI],
801 (uint32_t)env->regs[R_EDI],
802 (uint32_t)env->regs[R_EBP],
803 (uint32_t)env->regs[R_ESP],
804 (uint32_t)env->eip, eflags,
805 eflags & DF_MASK ? 'D' : '-',
806 eflags & CC_O ? 'O' : '-',
807 eflags & CC_S ? 'S' : '-',
808 eflags & CC_Z ? 'Z' : '-',
809 eflags & CC_A ? 'A' : '-',
810 eflags & CC_P ? 'P' : '-',
811 eflags & CC_C ? 'C' : '-',
812 env->hflags & HF_CPL_MASK,
813 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
814 (env->a20_mask >> 20) & 1,
815 (env->hflags >> HF_SMM_SHIFT) & 1,
816 env->halted);
819 for(i = 0; i < 6; i++) {
820 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
821 &env->segs[i]);
823 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
824 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
826 #ifdef TARGET_X86_64
827 if (env->hflags & HF_LMA_MASK) {
828 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
829 env->gdt.base, env->gdt.limit);
830 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
831 env->idt.base, env->idt.limit);
832 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
833 (uint32_t)env->cr[0],
834 env->cr[2],
835 env->cr[3],
836 (uint32_t)env->cr[4]);
837 for(i = 0; i < 4; i++)
838 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
839 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
840 env->dr[6], env->dr[7]);
841 } else
842 #endif
844 cpu_fprintf(f, "GDT= %08x %08x\n",
845 (uint32_t)env->gdt.base, env->gdt.limit);
846 cpu_fprintf(f, "IDT= %08x %08x\n",
847 (uint32_t)env->idt.base, env->idt.limit);
848 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
849 (uint32_t)env->cr[0],
850 (uint32_t)env->cr[2],
851 (uint32_t)env->cr[3],
852 (uint32_t)env->cr[4]);
853 for(i = 0; i < 4; i++)
854 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
855 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
857 if (flags & X86_DUMP_CCOP) {
858 if ((unsigned)env->cc_op < CC_OP_NB)
859 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
860 else
861 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
862 #ifdef TARGET_X86_64
863 if (env->hflags & HF_CS64_MASK) {
864 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
865 env->cc_src, env->cc_dst,
866 cc_op_name);
867 } else
868 #endif
870 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
871 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
872 cc_op_name);
875 if (flags & X86_DUMP_FPU) {
876 int fptag;
877 fptag = 0;
878 for(i = 0; i < 8; i++) {
879 fptag |= ((!env->fptags[i]) << i);
881 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
882 env->fpuc,
883 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
884 env->fpstt,
885 fptag,
886 env->mxcsr);
887 for(i=0;i<8;i++) {
888 #if defined(USE_X86LDOUBLE)
889 union {
890 long double d;
891 struct {
892 uint64_t lower;
893 uint16_t upper;
894 } l;
895 } tmp;
896 tmp.d = env->fpregs[i].d;
897 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
898 i, tmp.l.lower, tmp.l.upper);
899 #else
900 cpu_fprintf(f, "FPR%d=%016" PRIx64,
901 i, env->fpregs[i].mmx.q);
902 #endif
903 if ((i & 1) == 1)
904 cpu_fprintf(f, "\n");
905 else
906 cpu_fprintf(f, " ");
908 if (env->hflags & HF_CS64_MASK)
909 nb = 16;
910 else
911 nb = 8;
912 for(i=0;i<nb;i++) {
913 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
915 env->xmm_regs[i].XMM_L(3),
916 env->xmm_regs[i].XMM_L(2),
917 env->xmm_regs[i].XMM_L(1),
918 env->xmm_regs[i].XMM_L(0));
919 if ((i & 1) == 1)
920 cpu_fprintf(f, "\n");
921 else
922 cpu_fprintf(f, " ");
927 /***********************************************************/
928 /* x86 mmu */
929 /* XXX: add PGE support */
931 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
933 a20_state = (a20_state != 0);
934 if (a20_state != ((env->a20_mask >> 20) & 1)) {
935 #if defined(DEBUG_MMU)
936 printf("A20 update: a20=%d\n", a20_state);
937 #endif
938 /* if the cpu is currently executing code, we must unlink it and
939 all the potentially executing TB */
940 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
942 /* when a20 is changed, all the MMU mappings are invalid, so
943 we must flush everything */
944 tlb_flush(env, 1);
945 env->a20_mask = ~(1 << 20) | (a20_state << 20);
949 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
951 int pe_state;
953 #if defined(DEBUG_MMU)
954 printf("CR0 update: CR0=0x%08x\n", new_cr0);
955 #endif
956 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
957 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
958 tlb_flush(env, 1);
961 #ifdef TARGET_X86_64
962 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
963 (env->efer & MSR_EFER_LME)) {
964 /* enter in long mode */
965 /* XXX: generate an exception */
966 if (!(env->cr[4] & CR4_PAE_MASK))
967 return;
968 env->efer |= MSR_EFER_LMA;
969 env->hflags |= HF_LMA_MASK;
970 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
971 (env->efer & MSR_EFER_LMA)) {
972 /* exit long mode */
973 env->efer &= ~MSR_EFER_LMA;
974 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
975 env->eip &= 0xffffffff;
977 #endif
978 env->cr[0] = new_cr0 | CR0_ET_MASK;
980 /* update PE flag in hidden flags */
981 pe_state = (env->cr[0] & CR0_PE_MASK);
982 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
983 /* ensure that ADDSEG is always set in real mode */
984 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
985 /* update FPU flags */
986 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
987 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
990 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
991 the PDPT */
992 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
994 env->cr[3] = new_cr3;
995 if (env->cr[0] & CR0_PG_MASK) {
996 #if defined(DEBUG_MMU)
997 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
998 #endif
999 tlb_flush(env, 0);
1003 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
1005 #if defined(DEBUG_MMU)
1006 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1007 #endif
1008 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
1009 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
1010 tlb_flush(env, 1);
1012 /* SSE handling */
1013 if (!(env->cpuid_features & CPUID_SSE))
1014 new_cr4 &= ~CR4_OSFXSR_MASK;
1015 if (new_cr4 & CR4_OSFXSR_MASK)
1016 env->hflags |= HF_OSFXSR_MASK;
1017 else
1018 env->hflags &= ~HF_OSFXSR_MASK;
1020 env->cr[4] = new_cr4;
1023 #if defined(CONFIG_USER_ONLY)
1025 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1026 int is_write, int mmu_idx, int is_softmmu)
1028 /* user mode only emulation */
1029 is_write &= 1;
1030 env->cr[2] = addr;
1031 env->error_code = (is_write << PG_ERROR_W_BIT);
1032 env->error_code |= PG_ERROR_U_MASK;
1033 env->exception_index = EXCP0E_PAGE;
1034 return 1;
1037 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1039 return addr;
1042 #else
1044 /* XXX: This value should match the one returned by CPUID
1045 * and in exec.c */
1046 # if defined(TARGET_X86_64)
1047 # define PHYS_ADDR_MASK 0xfffffff000LL
1048 # else
1049 # define PHYS_ADDR_MASK 0xffffff000LL
1050 # endif
1052 /* return value:
1053 -1 = cannot handle fault
1054 0 = nothing more to do
1055 1 = generate PF fault
1056 2 = soft MMU activation required for this block
1058 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1059 int is_write1, int mmu_idx, int is_softmmu)
1061 uint64_t ptep, pte;
1062 target_ulong pde_addr, pte_addr;
1063 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1064 target_phys_addr_t paddr;
1065 uint32_t page_offset;
1066 target_ulong vaddr, virt_addr;
1068 is_user = mmu_idx == MMU_USER_IDX;
1069 #if defined(DEBUG_MMU)
1070 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1071 addr, is_write1, is_user, env->eip);
1072 #endif
1073 is_write = is_write1 & 1;
1075 if (!(env->cr[0] & CR0_PG_MASK)) {
1076 pte = addr;
1077 virt_addr = addr & TARGET_PAGE_MASK;
1078 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1079 page_size = 4096;
1080 goto do_mapping;
1083 if (env->cr[4] & CR4_PAE_MASK) {
1084 uint64_t pde, pdpe;
1085 target_ulong pdpe_addr;
1087 #ifdef TARGET_X86_64
1088 if (env->hflags & HF_LMA_MASK) {
1089 uint64_t pml4e_addr, pml4e;
1090 int32_t sext;
1092 /* test virtual address sign extension */
1093 sext = (int64_t)addr >> 47;
1094 if (sext != 0 && sext != -1) {
1095 env->error_code = 0;
1096 env->exception_index = EXCP0D_GPF;
1097 return 1;
1100 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1101 env->a20_mask;
1102 pml4e = ldq_phys(pml4e_addr);
1103 if (!(pml4e & PG_PRESENT_MASK)) {
1104 error_code = 0;
1105 goto do_fault;
1107 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1108 error_code = PG_ERROR_RSVD_MASK;
1109 goto do_fault;
1111 if (!(pml4e & PG_ACCESSED_MASK)) {
1112 pml4e |= PG_ACCESSED_MASK;
1113 stl_phys_notdirty(pml4e_addr, pml4e);
1115 ptep = pml4e ^ PG_NX_MASK;
1116 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1117 env->a20_mask;
1118 pdpe = ldq_phys(pdpe_addr);
1119 if (!(pdpe & PG_PRESENT_MASK)) {
1120 error_code = 0;
1121 goto do_fault;
1123 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1124 error_code = PG_ERROR_RSVD_MASK;
1125 goto do_fault;
1127 ptep &= pdpe ^ PG_NX_MASK;
1128 if (!(pdpe & PG_ACCESSED_MASK)) {
1129 pdpe |= PG_ACCESSED_MASK;
1130 stl_phys_notdirty(pdpe_addr, pdpe);
1132 } else
1133 #endif
1135 /* XXX: load them when cr3 is loaded ? */
1136 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1137 env->a20_mask;
1138 pdpe = ldq_phys(pdpe_addr);
1139 if (!(pdpe & PG_PRESENT_MASK)) {
1140 error_code = 0;
1141 goto do_fault;
1143 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1146 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1147 env->a20_mask;
1148 pde = ldq_phys(pde_addr);
1149 if (!(pde & PG_PRESENT_MASK)) {
1150 error_code = 0;
1151 goto do_fault;
1153 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1154 error_code = PG_ERROR_RSVD_MASK;
1155 goto do_fault;
1157 ptep &= pde ^ PG_NX_MASK;
1158 if (pde & PG_PSE_MASK) {
1159 /* 2 MB page */
1160 page_size = 2048 * 1024;
1161 ptep ^= PG_NX_MASK;
1162 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1163 goto do_fault_protect;
1164 if (is_user) {
1165 if (!(ptep & PG_USER_MASK))
1166 goto do_fault_protect;
1167 if (is_write && !(ptep & PG_RW_MASK))
1168 goto do_fault_protect;
1169 } else {
1170 if ((env->cr[0] & CR0_WP_MASK) &&
1171 is_write && !(ptep & PG_RW_MASK))
1172 goto do_fault_protect;
1174 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1175 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1176 pde |= PG_ACCESSED_MASK;
1177 if (is_dirty)
1178 pde |= PG_DIRTY_MASK;
1179 stl_phys_notdirty(pde_addr, pde);
1181 /* align to page_size */
1182 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1183 virt_addr = addr & ~(page_size - 1);
1184 } else {
1185 /* 4 KB page */
1186 if (!(pde & PG_ACCESSED_MASK)) {
1187 pde |= PG_ACCESSED_MASK;
1188 stl_phys_notdirty(pde_addr, pde);
1190 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1191 env->a20_mask;
1192 pte = ldq_phys(pte_addr);
1193 if (!(pte & PG_PRESENT_MASK)) {
1194 error_code = 0;
1195 goto do_fault;
1197 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1198 error_code = PG_ERROR_RSVD_MASK;
1199 goto do_fault;
1201 /* combine pde and pte nx, user and rw protections */
1202 ptep &= pte ^ PG_NX_MASK;
1203 ptep ^= PG_NX_MASK;
1204 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1205 goto do_fault_protect;
1206 if (is_user) {
1207 if (!(ptep & PG_USER_MASK))
1208 goto do_fault_protect;
1209 if (is_write && !(ptep & PG_RW_MASK))
1210 goto do_fault_protect;
1211 } else {
1212 if ((env->cr[0] & CR0_WP_MASK) &&
1213 is_write && !(ptep & PG_RW_MASK))
1214 goto do_fault_protect;
1216 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1217 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1218 pte |= PG_ACCESSED_MASK;
1219 if (is_dirty)
1220 pte |= PG_DIRTY_MASK;
1221 stl_phys_notdirty(pte_addr, pte);
1223 page_size = 4096;
1224 virt_addr = addr & ~0xfff;
1225 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1227 } else {
1228 uint32_t pde;
1230 /* page directory entry */
1231 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1232 env->a20_mask;
1233 pde = ldl_phys(pde_addr);
1234 if (!(pde & PG_PRESENT_MASK)) {
1235 error_code = 0;
1236 goto do_fault;
1238 /* if PSE bit is set, then we use a 4MB page */
1239 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1240 page_size = 4096 * 1024;
1241 if (is_user) {
1242 if (!(pde & PG_USER_MASK))
1243 goto do_fault_protect;
1244 if (is_write && !(pde & PG_RW_MASK))
1245 goto do_fault_protect;
1246 } else {
1247 if ((env->cr[0] & CR0_WP_MASK) &&
1248 is_write && !(pde & PG_RW_MASK))
1249 goto do_fault_protect;
1251 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1252 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1253 pde |= PG_ACCESSED_MASK;
1254 if (is_dirty)
1255 pde |= PG_DIRTY_MASK;
1256 stl_phys_notdirty(pde_addr, pde);
1259 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1260 ptep = pte;
1261 virt_addr = addr & ~(page_size - 1);
1262 } else {
1263 if (!(pde & PG_ACCESSED_MASK)) {
1264 pde |= PG_ACCESSED_MASK;
1265 stl_phys_notdirty(pde_addr, pde);
1268 /* page directory entry */
1269 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1270 env->a20_mask;
1271 pte = ldl_phys(pte_addr);
1272 if (!(pte & PG_PRESENT_MASK)) {
1273 error_code = 0;
1274 goto do_fault;
1276 /* combine pde and pte user and rw protections */
1277 ptep = pte & pde;
1278 if (is_user) {
1279 if (!(ptep & PG_USER_MASK))
1280 goto do_fault_protect;
1281 if (is_write && !(ptep & PG_RW_MASK))
1282 goto do_fault_protect;
1283 } else {
1284 if ((env->cr[0] & CR0_WP_MASK) &&
1285 is_write && !(ptep & PG_RW_MASK))
1286 goto do_fault_protect;
1288 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1289 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1290 pte |= PG_ACCESSED_MASK;
1291 if (is_dirty)
1292 pte |= PG_DIRTY_MASK;
1293 stl_phys_notdirty(pte_addr, pte);
1295 page_size = 4096;
1296 virt_addr = addr & ~0xfff;
1299 /* the page can be put in the TLB */
1300 prot = PAGE_READ;
1301 if (!(ptep & PG_NX_MASK))
1302 prot |= PAGE_EXEC;
1303 if (pte & PG_DIRTY_MASK) {
1304 /* only set write access if already dirty... otherwise wait
1305 for dirty access */
1306 if (is_user) {
1307 if (ptep & PG_RW_MASK)
1308 prot |= PAGE_WRITE;
1309 } else {
1310 if (!(env->cr[0] & CR0_WP_MASK) ||
1311 (ptep & PG_RW_MASK))
1312 prot |= PAGE_WRITE;
1315 do_mapping:
1316 pte = pte & env->a20_mask;
1318 /* Even if 4MB pages, we map only one 4KB page in the cache to
1319 avoid filling it too fast */
1320 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1321 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1322 vaddr = virt_addr + page_offset;
1324 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1325 return ret;
1326 do_fault_protect:
1327 error_code = PG_ERROR_P_MASK;
1328 do_fault:
1329 error_code |= (is_write << PG_ERROR_W_BIT);
1330 if (is_user)
1331 error_code |= PG_ERROR_U_MASK;
1332 if (is_write1 == 2 &&
1333 (env->efer & MSR_EFER_NXE) &&
1334 (env->cr[4] & CR4_PAE_MASK))
1335 error_code |= PG_ERROR_I_D_MASK;
1336 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1337 /* cr2 is not modified in case of exceptions */
1338 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1339 addr);
1340 } else {
1341 env->cr[2] = addr;
1343 env->error_code = error_code;
1344 env->exception_index = EXCP0E_PAGE;
1345 return 1;
1348 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1350 target_ulong pde_addr, pte_addr;
1351 uint64_t pte;
1352 target_phys_addr_t paddr;
1353 uint32_t page_offset;
1354 int page_size;
1356 if (env->cr[4] & CR4_PAE_MASK) {
1357 target_ulong pdpe_addr;
1358 uint64_t pde, pdpe;
1360 #ifdef TARGET_X86_64
1361 if (env->hflags & HF_LMA_MASK) {
1362 uint64_t pml4e_addr, pml4e;
1363 int32_t sext;
1365 /* test virtual address sign extension */
1366 sext = (int64_t)addr >> 47;
1367 if (sext != 0 && sext != -1)
1368 return -1;
1370 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1371 env->a20_mask;
1372 pml4e = ldq_phys(pml4e_addr);
1373 if (!(pml4e & PG_PRESENT_MASK))
1374 return -1;
1376 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1377 env->a20_mask;
1378 pdpe = ldq_phys(pdpe_addr);
1379 if (!(pdpe & PG_PRESENT_MASK))
1380 return -1;
1381 } else
1382 #endif
1384 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1385 env->a20_mask;
1386 pdpe = ldq_phys(pdpe_addr);
1387 if (!(pdpe & PG_PRESENT_MASK))
1388 return -1;
1391 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1392 env->a20_mask;
1393 pde = ldq_phys(pde_addr);
1394 if (!(pde & PG_PRESENT_MASK)) {
1395 return -1;
1397 if (pde & PG_PSE_MASK) {
1398 /* 2 MB page */
1399 page_size = 2048 * 1024;
1400 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1401 } else {
1402 /* 4 KB page */
1403 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1404 env->a20_mask;
1405 page_size = 4096;
1406 pte = ldq_phys(pte_addr);
1408 if (!(pte & PG_PRESENT_MASK))
1409 return -1;
1410 } else {
1411 uint32_t pde;
1413 if (!(env->cr[0] & CR0_PG_MASK)) {
1414 pte = addr;
1415 page_size = 4096;
1416 } else {
1417 /* page directory entry */
1418 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1419 pde = ldl_phys(pde_addr);
1420 if (!(pde & PG_PRESENT_MASK))
1421 return -1;
1422 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1423 pte = pde & ~0x003ff000; /* align to 4MB */
1424 page_size = 4096 * 1024;
1425 } else {
1426 /* page directory entry */
1427 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1428 pte = ldl_phys(pte_addr);
1429 if (!(pte & PG_PRESENT_MASK))
1430 return -1;
1431 page_size = 4096;
1434 pte = pte & env->a20_mask;
1437 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1438 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1439 return paddr;
1442 void hw_breakpoint_insert(CPUState *env, int index)
1444 int type, err = 0;
1446 switch (hw_breakpoint_type(env->dr[7], index)) {
1447 case 0:
1448 if (hw_breakpoint_enabled(env->dr[7], index))
1449 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1450 &env->cpu_breakpoint[index]);
1451 break;
1452 case 1:
1453 type = BP_CPU | BP_MEM_WRITE;
1454 goto insert_wp;
1455 case 2:
1456 /* No support for I/O watchpoints yet */
1457 break;
1458 case 3:
1459 type = BP_CPU | BP_MEM_ACCESS;
1460 insert_wp:
1461 err = cpu_watchpoint_insert(env, env->dr[index],
1462 hw_breakpoint_len(env->dr[7], index),
1463 type, &env->cpu_watchpoint[index]);
1464 break;
1466 if (err)
1467 env->cpu_breakpoint[index] = NULL;
1470 void hw_breakpoint_remove(CPUState *env, int index)
1472 if (!env->cpu_breakpoint[index])
1473 return;
1474 switch (hw_breakpoint_type(env->dr[7], index)) {
1475 case 0:
1476 if (hw_breakpoint_enabled(env->dr[7], index))
1477 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1478 break;
1479 case 1:
1480 case 3:
1481 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1482 break;
1483 case 2:
1484 /* No support for I/O watchpoints yet */
1485 break;
1489 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1491 target_ulong dr6;
1492 int reg, type;
1493 int hit_enabled = 0;
1495 dr6 = env->dr[6] & ~0xf;
1496 for (reg = 0; reg < 4; reg++) {
1497 type = hw_breakpoint_type(env->dr[7], reg);
1498 if ((type == 0 && env->dr[reg] == env->eip) ||
1499 ((type & 1) && env->cpu_watchpoint[reg] &&
1500 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1501 dr6 |= 1 << reg;
1502 if (hw_breakpoint_enabled(env->dr[7], reg))
1503 hit_enabled = 1;
1506 if (hit_enabled || force_dr6_update)
1507 env->dr[6] = dr6;
1508 return hit_enabled;
1511 static CPUDebugExcpHandler *prev_debug_excp_handler;
1513 void raise_exception(int exception_index);
1515 static void breakpoint_handler(CPUState *env)
1517 CPUBreakpoint *bp;
1519 if (env->watchpoint_hit) {
1520 if (env->watchpoint_hit->flags & BP_CPU) {
1521 env->watchpoint_hit = NULL;
1522 if (check_hw_breakpoints(env, 0))
1523 raise_exception(EXCP01_DB);
1524 else
1525 cpu_resume_from_signal(env, NULL);
1527 } else {
1528 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1529 if (bp->pc == env->eip) {
1530 if (bp->flags & BP_CPU) {
1531 check_hw_breakpoints(env, 1);
1532 raise_exception(EXCP01_DB);
1534 break;
1537 if (prev_debug_excp_handler)
1538 prev_debug_excp_handler(env);
1541 /* This should come from sysemu.h - if we could include it here... */
1542 void qemu_system_reset_request(void);
1544 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1545 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1547 uint64_t mcg_cap = cenv->mcg_cap;
1548 unsigned bank_num = mcg_cap & 0xff;
1549 uint64_t *banks = cenv->mce_banks;
1551 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1552 return;
1555 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1556 * reporting is disabled
1558 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1559 cenv->mcg_ctl != ~(uint64_t)0)
1560 return;
1561 banks += 4 * bank;
1563 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1564 * reporting is disabled for the bank
1566 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1567 return;
1568 if (status & MCI_STATUS_UC) {
1569 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1570 !(cenv->cr[4] & CR4_MCE_MASK)) {
1571 fprintf(stderr, "injects mce exception while previous "
1572 "one is in progress!\n");
1573 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1574 qemu_system_reset_request();
1575 return;
1577 if (banks[1] & MCI_STATUS_VAL)
1578 status |= MCI_STATUS_OVER;
1579 banks[2] = addr;
1580 banks[3] = misc;
1581 cenv->mcg_status = mcg_status;
1582 banks[1] = status;
1583 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1584 } else if (!(banks[1] & MCI_STATUS_VAL)
1585 || !(banks[1] & MCI_STATUS_UC)) {
1586 if (banks[1] & MCI_STATUS_VAL)
1587 status |= MCI_STATUS_OVER;
1588 banks[2] = addr;
1589 banks[3] = misc;
1590 banks[1] = status;
1591 } else
1592 banks[1] |= MCI_STATUS_OVER;
1594 #endif /* !CONFIG_USER_ONLY */
1596 static void mce_init(CPUX86State *cenv)
1598 unsigned int bank, bank_num;
1600 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1601 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1602 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1603 cenv->mcg_ctl = ~(uint64_t)0;
1604 bank_num = MCE_BANKS_DEF;
1605 for (bank = 0; bank < bank_num; bank++)
1606 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1610 static void host_cpuid(uint32_t function, uint32_t count,
1611 uint32_t *eax, uint32_t *ebx,
1612 uint32_t *ecx, uint32_t *edx)
1614 #if defined(CONFIG_KVM)
1615 uint32_t vec[4];
1617 #ifdef __x86_64__
1618 asm volatile("cpuid"
1619 : "=a"(vec[0]), "=b"(vec[1]),
1620 "=c"(vec[2]), "=d"(vec[3])
1621 : "0"(function), "c"(count) : "cc");
1622 #else
1623 asm volatile("pusha \n\t"
1624 "cpuid \n\t"
1625 "mov %%eax, 0(%2) \n\t"
1626 "mov %%ebx, 4(%2) \n\t"
1627 "mov %%ecx, 8(%2) \n\t"
1628 "mov %%edx, 12(%2) \n\t"
1629 "popa"
1630 : : "a"(function), "c"(count), "S"(vec)
1631 : "memory", "cc");
1632 #endif
1634 if (eax)
1635 *eax = vec[0];
1636 if (ebx)
1637 *ebx = vec[1];
1638 if (ecx)
1639 *ecx = vec[2];
1640 if (edx)
1641 *edx = vec[3];
1642 #endif
1645 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1646 uint32_t *ecx, uint32_t *edx)
1648 *ebx = env->cpuid_vendor1;
1649 *edx = env->cpuid_vendor2;
1650 *ecx = env->cpuid_vendor3;
1652 /* sysenter isn't supported on compatibility mode on AMD, syscall
1653 * isn't supported in compatibility mode on Intel.
1654 * Normally we advertise the actual cpu vendor, but you can override
1655 * this if you want to use KVM's sysenter/syscall emulation
1656 * in compatibility mode and when doing cross vendor migration
1658 if (kvm_enabled() && env->cpuid_vendor_override) {
1659 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1663 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1664 uint32_t *eax, uint32_t *ebx,
1665 uint32_t *ecx, uint32_t *edx)
1667 /* test if maximum index reached */
1668 if (index & 0x80000000) {
1669 if (index > env->cpuid_xlevel)
1670 index = env->cpuid_level;
1671 } else {
1672 if (index > env->cpuid_level)
1673 index = env->cpuid_level;
1676 switch(index) {
1677 case 0:
1678 *eax = env->cpuid_level;
1679 get_cpuid_vendor(env, ebx, ecx, edx);
1680 break;
1681 case 1:
1682 *eax = env->cpuid_version;
1683 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1684 *ecx = env->cpuid_ext_features;
1685 *edx = env->cpuid_features;
1686 if (env->nr_cores * env->nr_threads > 1) {
1687 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1688 *edx |= 1 << 28; /* HTT bit */
1690 break;
1691 case 2:
1692 /* cache info: needed for Pentium Pro compatibility */
1693 *eax = 1;
1694 *ebx = 0;
1695 *ecx = 0;
1696 *edx = 0x2c307d;
1697 break;
1698 case 4:
1699 /* cache info: needed for Core compatibility */
1700 if (env->nr_cores > 1) {
1701 *eax = (env->nr_cores - 1) << 26;
1702 } else {
1703 *eax = 0;
1705 switch (count) {
1706 case 0: /* L1 dcache info */
1707 *eax |= 0x0000121;
1708 *ebx = 0x1c0003f;
1709 *ecx = 0x000003f;
1710 *edx = 0x0000001;
1711 break;
1712 case 1: /* L1 icache info */
1713 *eax |= 0x0000122;
1714 *ebx = 0x1c0003f;
1715 *ecx = 0x000003f;
1716 *edx = 0x0000001;
1717 break;
1718 case 2: /* L2 cache info */
1719 *eax |= 0x0000143;
1720 if (env->nr_threads > 1) {
1721 *eax |= (env->nr_threads - 1) << 14;
1723 *ebx = 0x3c0003f;
1724 *ecx = 0x0000fff;
1725 *edx = 0x0000001;
1726 break;
1727 default: /* end of info */
1728 *eax = 0;
1729 *ebx = 0;
1730 *ecx = 0;
1731 *edx = 0;
1732 break;
1734 break;
1735 case 5:
1736 /* mwait info: needed for Core compatibility */
1737 *eax = 0; /* Smallest monitor-line size in bytes */
1738 *ebx = 0; /* Largest monitor-line size in bytes */
1739 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1740 *edx = 0;
1741 break;
1742 case 6:
1743 /* Thermal and Power Leaf */
1744 *eax = 0;
1745 *ebx = 0;
1746 *ecx = 0;
1747 *edx = 0;
1748 break;
1749 case 9:
1750 /* Direct Cache Access Information Leaf */
1751 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1752 *ebx = 0;
1753 *ecx = 0;
1754 *edx = 0;
1755 break;
1756 case 0xA:
1757 /* Architectural Performance Monitoring Leaf */
1758 *eax = 0;
1759 *ebx = 0;
1760 *ecx = 0;
1761 *edx = 0;
1762 break;
1763 case 0x80000000:
1764 *eax = env->cpuid_xlevel;
1765 *ebx = env->cpuid_vendor1;
1766 *edx = env->cpuid_vendor2;
1767 *ecx = env->cpuid_vendor3;
1768 break;
1769 case 0x80000001:
1770 *eax = env->cpuid_version;
1771 *ebx = 0;
1772 *ecx = env->cpuid_ext3_features;
1773 *edx = env->cpuid_ext2_features;
1775 /* The Linux kernel checks for the CMPLegacy bit and
1776 * discards multiple thread information if it is set.
1777 * So dont set it here for Intel to make Linux guests happy.
1779 if (env->nr_cores * env->nr_threads > 1) {
1780 uint32_t tebx, tecx, tedx;
1781 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
1782 if (tebx != CPUID_VENDOR_INTEL_1 ||
1783 tedx != CPUID_VENDOR_INTEL_2 ||
1784 tecx != CPUID_VENDOR_INTEL_3) {
1785 *ecx |= 1 << 1; /* CmpLegacy bit */
1789 if (kvm_enabled()) {
1790 /* Nested SVM not yet supported in upstream QEMU */
1791 *ecx &= ~CPUID_EXT3_SVM;
1793 break;
1794 case 0x80000002:
1795 case 0x80000003:
1796 case 0x80000004:
1797 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1798 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1799 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1800 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1801 break;
1802 case 0x80000005:
1803 /* cache info (L1 cache) */
1804 *eax = 0x01ff01ff;
1805 *ebx = 0x01ff01ff;
1806 *ecx = 0x40020140;
1807 *edx = 0x40020140;
1808 break;
1809 case 0x80000006:
1810 /* cache info (L2 cache) */
1811 *eax = 0;
1812 *ebx = 0x42004200;
1813 *ecx = 0x02008140;
1814 *edx = 0;
1815 break;
1816 case 0x80000008:
1817 /* virtual & phys address size in low 2 bytes. */
1818 /* XXX: This value must match the one used in the MMU code. */
1819 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1820 /* 64 bit processor */
1821 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1822 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1823 } else {
1824 if (env->cpuid_features & CPUID_PSE36)
1825 *eax = 0x00000024; /* 36 bits physical */
1826 else
1827 *eax = 0x00000020; /* 32 bits physical */
1829 *ebx = 0;
1830 *ecx = 0;
1831 *edx = 0;
1832 if (env->nr_cores * env->nr_threads > 1) {
1833 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1835 break;
1836 case 0x8000000A:
1837 *eax = 0x00000001; /* SVM Revision */
1838 *ebx = 0x00000010; /* nr of ASIDs */
1839 *ecx = 0;
1840 *edx = 0; /* optional features */
1841 break;
1842 default:
1843 /* reserved values: zero */
1844 *eax = 0;
1845 *ebx = 0;
1846 *ecx = 0;
1847 *edx = 0;
1848 break;
1853 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1854 target_ulong *base, unsigned int *limit,
1855 unsigned int *flags)
1857 SegmentCache *dt;
1858 target_ulong ptr;
1859 uint32_t e1, e2;
1860 int index;
1862 if (selector & 0x4)
1863 dt = &env->ldt;
1864 else
1865 dt = &env->gdt;
1866 index = selector & ~7;
1867 ptr = dt->base + index;
1868 if ((index + 7) > dt->limit
1869 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1870 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1871 return 0;
1873 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1874 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1875 if (e2 & DESC_G_MASK)
1876 *limit = (*limit << 12) | 0xfff;
1877 *flags = e2;
1879 return 1;
1882 CPUX86State *cpu_x86_init(const char *cpu_model)
1884 CPUX86State *env;
1885 static int inited;
1887 env = qemu_mallocz(sizeof(CPUX86State));
1888 cpu_exec_init(env);
1889 env->cpu_model_str = cpu_model;
1891 /* init various static tables */
1892 if (!inited) {
1893 inited = 1;
1894 optimize_flags_init();
1895 #ifndef CONFIG_USER_ONLY
1896 prev_debug_excp_handler =
1897 cpu_set_debug_excp_handler(breakpoint_handler);
1898 #endif
1900 if (cpu_x86_register(env, cpu_model) < 0) {
1901 cpu_x86_close(env);
1902 return NULL;
1904 mce_init(env);
1906 qemu_init_vcpu(env);
1908 return env;
1911 #if !defined(CONFIG_USER_ONLY)
1912 void do_cpu_init(CPUState *env)
1914 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1915 cpu_reset(env);
1916 env->interrupt_request = sipi;
1917 apic_init_reset(env);
1920 void do_cpu_sipi(CPUState *env)
1922 apic_sipi(env);
1924 #else
1925 void do_cpu_init(CPUState *env)
1928 void do_cpu_sipi(CPUState *env)
1931 #endif