Sparc64: make system bus parent of PCI bus
[qemu.git] / target-i386 / helper.c
blob8111f256629d45335c2ee04006cd43ba364fb968
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
31 //#define DEBUG_MMU
33 /* feature flags taken from "Intel Processor Identification and the CPUID
34 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35 * about feature names, the Linux name is used. */
36 static const char *feature_name[] = {
37 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
42 static const char *ext_feature_name[] = {
43 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
48 static const char *ext2_feature_name[] = {
49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
54 static const char *ext3_feature_name[] = {
55 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
61 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
62 uint32_t *ext_features,
63 uint32_t *ext2_features,
64 uint32_t *ext3_features)
66 int i;
67 int found = 0;
69 for ( i = 0 ; i < 32 ; i++ )
70 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71 *features |= 1 << i;
72 found = 1;
74 for ( i = 0 ; i < 32 ; i++ )
75 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76 *ext_features |= 1 << i;
77 found = 1;
79 for ( i = 0 ; i < 32 ; i++ )
80 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81 *ext2_features |= 1 << i;
82 found = 1;
84 for ( i = 0 ; i < 32 ; i++ )
85 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86 *ext3_features |= 1 << i;
87 found = 1;
89 if (!found) {
90 fprintf(stderr, "CPU feature %s not found\n", flagname);
94 typedef struct x86_def_t {
95 const char *name;
96 uint32_t level;
97 uint32_t vendor1, vendor2, vendor3;
98 int family;
99 int model;
100 int stepping;
101 uint32_t features, ext_features, ext2_features, ext3_features;
102 uint32_t xlevel;
103 char model_id[48];
104 int vendor_override;
105 } x86_def_t;
107 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
108 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
109 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
110 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
111 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
112 CPUID_PSE36 | CPUID_FXSR)
113 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
114 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
115 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
116 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
117 CPUID_PAE | CPUID_SEP | CPUID_APIC)
118 static x86_def_t x86_defs[] = {
119 #ifdef TARGET_X86_64
121 .name = "qemu64",
122 .level = 4,
123 .vendor1 = CPUID_VENDOR_AMD_1,
124 .vendor2 = CPUID_VENDOR_AMD_2,
125 .vendor3 = CPUID_VENDOR_AMD_3,
126 .family = 6,
127 .model = 2,
128 .stepping = 3,
129 .features = PPRO_FEATURES |
130 /* these features are needed for Win64 and aren't fully implemented */
131 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
132 /* this feature is needed for Solaris and isn't fully implemented */
133 CPUID_PSE36,
134 .ext_features = CPUID_EXT_SSE3,
135 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
136 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
137 .ext3_features = CPUID_EXT3_SVM,
138 .xlevel = 0x8000000A,
139 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
142 .name = "phenom",
143 .level = 5,
144 .vendor1 = CPUID_VENDOR_AMD_1,
145 .vendor2 = CPUID_VENDOR_AMD_2,
146 .vendor3 = CPUID_VENDOR_AMD_3,
147 .family = 16,
148 .model = 2,
149 .stepping = 3,
150 /* Missing: CPUID_VME, CPUID_HT */
151 .features = PPRO_FEATURES |
152 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
153 CPUID_PSE36,
154 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
155 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
156 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
157 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
158 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
159 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
160 CPUID_EXT2_FFXSR,
161 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
162 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
163 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
164 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
165 .ext3_features = CPUID_EXT3_SVM,
166 .xlevel = 0x8000001A,
167 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
170 .name = "core2duo",
171 .level = 10,
172 .family = 6,
173 .model = 15,
174 .stepping = 11,
175 /* The original CPU also implements these features:
176 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
177 CPUID_TM, CPUID_PBE */
178 .features = PPRO_FEATURES |
179 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
180 CPUID_PSE36,
181 /* The original CPU also implements these ext features:
182 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
183 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
184 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
185 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
186 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
187 .xlevel = 0x80000008,
188 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
191 .name = "kvm64",
192 .level = 5,
193 .vendor1 = CPUID_VENDOR_INTEL_1,
194 .vendor2 = CPUID_VENDOR_INTEL_2,
195 .vendor3 = CPUID_VENDOR_INTEL_3,
196 .family = 15,
197 .model = 6,
198 .stepping = 1,
199 /* Missing: CPUID_VME, CPUID_HT */
200 .features = PPRO_FEATURES |
201 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
202 CPUID_PSE36,
203 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
204 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
205 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
206 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
207 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
208 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
209 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
210 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
211 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
212 .ext3_features = 0,
213 .xlevel = 0x80000008,
214 .model_id = "Common KVM processor"
216 #endif
218 .name = "qemu32",
219 .level = 4,
220 .family = 6,
221 .model = 3,
222 .stepping = 3,
223 .features = PPRO_FEATURES,
224 .ext_features = CPUID_EXT_SSE3,
225 .xlevel = 0,
226 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
229 .name = "coreduo",
230 .level = 10,
231 .family = 6,
232 .model = 14,
233 .stepping = 8,
234 /* The original CPU also implements these features:
235 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
236 CPUID_TM, CPUID_PBE */
237 .features = PPRO_FEATURES | CPUID_VME |
238 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
239 /* The original CPU also implements these ext features:
240 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
241 CPUID_EXT_PDCM */
242 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
243 .ext2_features = CPUID_EXT2_NX,
244 .xlevel = 0x80000008,
245 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
248 .name = "486",
249 .level = 0,
250 .family = 4,
251 .model = 0,
252 .stepping = 0,
253 .features = I486_FEATURES,
254 .xlevel = 0,
257 .name = "pentium",
258 .level = 1,
259 .family = 5,
260 .model = 4,
261 .stepping = 3,
262 .features = PENTIUM_FEATURES,
263 .xlevel = 0,
266 .name = "pentium2",
267 .level = 2,
268 .family = 6,
269 .model = 5,
270 .stepping = 2,
271 .features = PENTIUM2_FEATURES,
272 .xlevel = 0,
275 .name = "pentium3",
276 .level = 2,
277 .family = 6,
278 .model = 7,
279 .stepping = 3,
280 .features = PENTIUM3_FEATURES,
281 .xlevel = 0,
284 .name = "athlon",
285 .level = 2,
286 .vendor1 = CPUID_VENDOR_AMD_1,
287 .vendor2 = CPUID_VENDOR_AMD_2,
288 .vendor3 = CPUID_VENDOR_AMD_3,
289 .family = 6,
290 .model = 2,
291 .stepping = 3,
292 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
293 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
294 .xlevel = 0x80000008,
295 /* XXX: put another string ? */
296 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
299 .name = "n270",
300 /* original is on level 10 */
301 .level = 5,
302 .family = 6,
303 .model = 28,
304 .stepping = 2,
305 .features = PPRO_FEATURES |
306 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
307 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
308 * CPUID_HT | CPUID_TM | CPUID_PBE */
309 /* Some CPUs got no CPUID_SEP */
310 .ext_features = CPUID_EXT_MONITOR |
311 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
312 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
313 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
314 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
315 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
316 .xlevel = 0x8000000A,
317 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
321 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
322 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
324 static int cpu_x86_fill_model_id(char *str)
326 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
327 int i;
329 for (i = 0; i < 3; i++) {
330 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
331 memcpy(str + i * 16 + 0, &eax, 4);
332 memcpy(str + i * 16 + 4, &ebx, 4);
333 memcpy(str + i * 16 + 8, &ecx, 4);
334 memcpy(str + i * 16 + 12, &edx, 4);
336 return 0;
339 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
341 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
343 x86_cpu_def->name = "host";
344 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
345 x86_cpu_def->level = eax;
346 x86_cpu_def->vendor1 = ebx;
347 x86_cpu_def->vendor2 = edx;
348 x86_cpu_def->vendor3 = ecx;
350 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
351 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
352 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
353 x86_cpu_def->stepping = eax & 0x0F;
354 x86_cpu_def->ext_features = ecx;
355 x86_cpu_def->features = edx;
357 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
358 x86_cpu_def->xlevel = eax;
360 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
361 x86_cpu_def->ext2_features = edx;
362 x86_cpu_def->ext3_features = ecx;
363 cpu_x86_fill_model_id(x86_cpu_def->model_id);
364 x86_cpu_def->vendor_override = 0;
366 return 0;
369 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
371 unsigned int i;
372 x86_def_t *def;
374 char *s = strdup(cpu_model);
375 char *featurestr, *name = strtok(s, ",");
376 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
377 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
378 uint32_t numvalue;
380 def = NULL;
381 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
382 if (strcmp(name, x86_defs[i].name) == 0) {
383 def = &x86_defs[i];
384 break;
387 if (kvm_enabled() && strcmp(name, "host") == 0) {
388 cpu_x86_fill_host(x86_cpu_def);
389 } else if (!def) {
390 goto error;
391 } else {
392 memcpy(x86_cpu_def, def, sizeof(*def));
395 add_flagname_to_bitmaps("hypervisor", &plus_features,
396 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
398 featurestr = strtok(NULL, ",");
400 while (featurestr) {
401 char *val;
402 if (featurestr[0] == '+') {
403 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
404 } else if (featurestr[0] == '-') {
405 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
406 } else if ((val = strchr(featurestr, '='))) {
407 *val = 0; val++;
408 if (!strcmp(featurestr, "family")) {
409 char *err;
410 numvalue = strtoul(val, &err, 0);
411 if (!*val || *err) {
412 fprintf(stderr, "bad numerical value %s\n", val);
413 goto error;
415 x86_cpu_def->family = numvalue;
416 } else if (!strcmp(featurestr, "model")) {
417 char *err;
418 numvalue = strtoul(val, &err, 0);
419 if (!*val || *err || numvalue > 0xff) {
420 fprintf(stderr, "bad numerical value %s\n", val);
421 goto error;
423 x86_cpu_def->model = numvalue;
424 } else if (!strcmp(featurestr, "stepping")) {
425 char *err;
426 numvalue = strtoul(val, &err, 0);
427 if (!*val || *err || numvalue > 0xf) {
428 fprintf(stderr, "bad numerical value %s\n", val);
429 goto error;
431 x86_cpu_def->stepping = numvalue ;
432 } else if (!strcmp(featurestr, "level")) {
433 char *err;
434 numvalue = strtoul(val, &err, 0);
435 if (!*val || *err) {
436 fprintf(stderr, "bad numerical value %s\n", val);
437 goto error;
439 x86_cpu_def->level = numvalue;
440 } else if (!strcmp(featurestr, "xlevel")) {
441 char *err;
442 numvalue = strtoul(val, &err, 0);
443 if (!*val || *err) {
444 fprintf(stderr, "bad numerical value %s\n", val);
445 goto error;
447 if (numvalue < 0x80000000) {
448 numvalue += 0x80000000;
450 x86_cpu_def->xlevel = numvalue;
451 } else if (!strcmp(featurestr, "vendor")) {
452 if (strlen(val) != 12) {
453 fprintf(stderr, "vendor string must be 12 chars long\n");
454 goto error;
456 x86_cpu_def->vendor1 = 0;
457 x86_cpu_def->vendor2 = 0;
458 x86_cpu_def->vendor3 = 0;
459 for(i = 0; i < 4; i++) {
460 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
461 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
462 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
464 x86_cpu_def->vendor_override = 1;
465 } else if (!strcmp(featurestr, "model_id")) {
466 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
467 val);
468 } else {
469 fprintf(stderr, "unrecognized feature %s\n", featurestr);
470 goto error;
472 } else {
473 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
474 goto error;
476 featurestr = strtok(NULL, ",");
478 x86_cpu_def->features |= plus_features;
479 x86_cpu_def->ext_features |= plus_ext_features;
480 x86_cpu_def->ext2_features |= plus_ext2_features;
481 x86_cpu_def->ext3_features |= plus_ext3_features;
482 x86_cpu_def->features &= ~minus_features;
483 x86_cpu_def->ext_features &= ~minus_ext_features;
484 x86_cpu_def->ext2_features &= ~minus_ext2_features;
485 x86_cpu_def->ext3_features &= ~minus_ext3_features;
486 free(s);
487 return 0;
489 error:
490 free(s);
491 return -1;
494 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
496 unsigned int i;
498 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
499 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
502 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
504 x86_def_t def1, *def = &def1;
506 if (cpu_x86_find_by_name(def, cpu_model) < 0)
507 return -1;
508 if (def->vendor1) {
509 env->cpuid_vendor1 = def->vendor1;
510 env->cpuid_vendor2 = def->vendor2;
511 env->cpuid_vendor3 = def->vendor3;
512 } else {
513 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
514 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
515 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
517 env->cpuid_vendor_override = def->vendor_override;
518 env->cpuid_level = def->level;
519 if (def->family > 0x0f)
520 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
521 else
522 env->cpuid_version = def->family << 8;
523 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
524 env->cpuid_version |= def->stepping;
525 env->cpuid_features = def->features;
526 env->pat = 0x0007040600070406ULL;
527 env->cpuid_ext_features = def->ext_features;
528 env->cpuid_ext2_features = def->ext2_features;
529 env->cpuid_xlevel = def->xlevel;
530 env->cpuid_ext3_features = def->ext3_features;
532 const char *model_id = def->model_id;
533 int c, len, i;
534 if (!model_id)
535 model_id = "";
536 len = strlen(model_id);
537 for(i = 0; i < 48; i++) {
538 if (i >= len)
539 c = '\0';
540 else
541 c = (uint8_t)model_id[i];
542 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
545 return 0;
548 /* NOTE: must be called outside the CPU execute loop */
549 void cpu_reset(CPUX86State *env)
551 int i;
553 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
554 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
555 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
558 memset(env, 0, offsetof(CPUX86State, breakpoints));
560 tlb_flush(env, 1);
562 env->old_exception = -1;
564 /* init to reset state */
566 #ifdef CONFIG_SOFTMMU
567 env->hflags |= HF_SOFTMMU_MASK;
568 #endif
569 env->hflags2 |= HF2_GIF_MASK;
571 cpu_x86_update_cr0(env, 0x60000010);
572 env->a20_mask = ~0x0;
573 env->smbase = 0x30000;
575 env->idt.limit = 0xffff;
576 env->gdt.limit = 0xffff;
577 env->ldt.limit = 0xffff;
578 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
579 env->tr.limit = 0xffff;
580 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
582 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
583 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
584 DESC_R_MASK | DESC_A_MASK);
585 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
586 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
587 DESC_A_MASK);
588 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
589 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
590 DESC_A_MASK);
591 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
592 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
593 DESC_A_MASK);
594 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
595 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
596 DESC_A_MASK);
597 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
598 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
599 DESC_A_MASK);
601 env->eip = 0xfff0;
602 env->regs[R_EDX] = env->cpuid_version;
604 env->eflags = 0x2;
606 /* FPU init */
607 for(i = 0;i < 8; i++)
608 env->fptags[i] = 1;
609 env->fpuc = 0x37f;
611 env->mxcsr = 0x1f80;
613 memset(env->dr, 0, sizeof(env->dr));
614 env->dr[6] = DR6_FIXED_1;
615 env->dr[7] = DR7_FIXED_1;
616 cpu_breakpoint_remove_all(env, BP_CPU);
617 cpu_watchpoint_remove_all(env, BP_CPU);
620 void cpu_x86_close(CPUX86State *env)
622 qemu_free(env);
625 /***********************************************************/
626 /* x86 debug */
628 static const char *cc_op_str[] = {
629 "DYNAMIC",
630 "EFLAGS",
632 "MULB",
633 "MULW",
634 "MULL",
635 "MULQ",
637 "ADDB",
638 "ADDW",
639 "ADDL",
640 "ADDQ",
642 "ADCB",
643 "ADCW",
644 "ADCL",
645 "ADCQ",
647 "SUBB",
648 "SUBW",
649 "SUBL",
650 "SUBQ",
652 "SBBB",
653 "SBBW",
654 "SBBL",
655 "SBBQ",
657 "LOGICB",
658 "LOGICW",
659 "LOGICL",
660 "LOGICQ",
662 "INCB",
663 "INCW",
664 "INCL",
665 "INCQ",
667 "DECB",
668 "DECW",
669 "DECL",
670 "DECQ",
672 "SHLB",
673 "SHLW",
674 "SHLL",
675 "SHLQ",
677 "SARB",
678 "SARW",
679 "SARL",
680 "SARQ",
683 static void
684 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
685 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
686 const char *name, struct SegmentCache *sc)
688 #ifdef TARGET_X86_64
689 if (env->hflags & HF_CS64_MASK) {
690 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
691 sc->selector, sc->base, sc->limit, sc->flags);
692 } else
693 #endif
695 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
696 (uint32_t)sc->base, sc->limit, sc->flags);
699 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
700 goto done;
702 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
703 if (sc->flags & DESC_S_MASK) {
704 if (sc->flags & DESC_CS_MASK) {
705 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
706 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
707 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
708 (sc->flags & DESC_R_MASK) ? 'R' : '-');
709 } else {
710 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
711 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
712 (sc->flags & DESC_W_MASK) ? 'W' : '-');
714 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
715 } else {
716 static const char *sys_type_name[2][16] = {
717 { /* 32 bit mode */
718 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
719 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
720 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
721 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
723 { /* 64 bit mode */
724 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
725 "Reserved", "Reserved", "Reserved", "Reserved",
726 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
727 "Reserved", "IntGate64", "TrapGate64"
730 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
731 [(sc->flags & DESC_TYPE_MASK)
732 >> DESC_TYPE_SHIFT]);
734 done:
735 cpu_fprintf(f, "\n");
738 void cpu_dump_state(CPUState *env, FILE *f,
739 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
740 int flags)
742 int eflags, i, nb;
743 char cc_op_name[32];
744 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
746 cpu_synchronize_state(env);
748 eflags = env->eflags;
749 #ifdef TARGET_X86_64
750 if (env->hflags & HF_CS64_MASK) {
751 cpu_fprintf(f,
752 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
753 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
754 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
755 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
756 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
757 env->regs[R_EAX],
758 env->regs[R_EBX],
759 env->regs[R_ECX],
760 env->regs[R_EDX],
761 env->regs[R_ESI],
762 env->regs[R_EDI],
763 env->regs[R_EBP],
764 env->regs[R_ESP],
765 env->regs[8],
766 env->regs[9],
767 env->regs[10],
768 env->regs[11],
769 env->regs[12],
770 env->regs[13],
771 env->regs[14],
772 env->regs[15],
773 env->eip, eflags,
774 eflags & DF_MASK ? 'D' : '-',
775 eflags & CC_O ? 'O' : '-',
776 eflags & CC_S ? 'S' : '-',
777 eflags & CC_Z ? 'Z' : '-',
778 eflags & CC_A ? 'A' : '-',
779 eflags & CC_P ? 'P' : '-',
780 eflags & CC_C ? 'C' : '-',
781 env->hflags & HF_CPL_MASK,
782 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
783 (int)(env->a20_mask >> 20) & 1,
784 (env->hflags >> HF_SMM_SHIFT) & 1,
785 env->halted);
786 } else
787 #endif
789 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
790 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
791 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
792 (uint32_t)env->regs[R_EAX],
793 (uint32_t)env->regs[R_EBX],
794 (uint32_t)env->regs[R_ECX],
795 (uint32_t)env->regs[R_EDX],
796 (uint32_t)env->regs[R_ESI],
797 (uint32_t)env->regs[R_EDI],
798 (uint32_t)env->regs[R_EBP],
799 (uint32_t)env->regs[R_ESP],
800 (uint32_t)env->eip, eflags,
801 eflags & DF_MASK ? 'D' : '-',
802 eflags & CC_O ? 'O' : '-',
803 eflags & CC_S ? 'S' : '-',
804 eflags & CC_Z ? 'Z' : '-',
805 eflags & CC_A ? 'A' : '-',
806 eflags & CC_P ? 'P' : '-',
807 eflags & CC_C ? 'C' : '-',
808 env->hflags & HF_CPL_MASK,
809 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
810 (int)(env->a20_mask >> 20) & 1,
811 (env->hflags >> HF_SMM_SHIFT) & 1,
812 env->halted);
815 for(i = 0; i < 6; i++) {
816 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
817 &env->segs[i]);
819 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
820 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
822 #ifdef TARGET_X86_64
823 if (env->hflags & HF_LMA_MASK) {
824 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
825 env->gdt.base, env->gdt.limit);
826 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
827 env->idt.base, env->idt.limit);
828 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
829 (uint32_t)env->cr[0],
830 env->cr[2],
831 env->cr[3],
832 (uint32_t)env->cr[4]);
833 for(i = 0; i < 4; i++)
834 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
835 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
836 env->dr[6], env->dr[7]);
837 } else
838 #endif
840 cpu_fprintf(f, "GDT= %08x %08x\n",
841 (uint32_t)env->gdt.base, env->gdt.limit);
842 cpu_fprintf(f, "IDT= %08x %08x\n",
843 (uint32_t)env->idt.base, env->idt.limit);
844 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
845 (uint32_t)env->cr[0],
846 (uint32_t)env->cr[2],
847 (uint32_t)env->cr[3],
848 (uint32_t)env->cr[4]);
849 for(i = 0; i < 4; i++)
850 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
851 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
853 if (flags & X86_DUMP_CCOP) {
854 if ((unsigned)env->cc_op < CC_OP_NB)
855 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
856 else
857 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
858 #ifdef TARGET_X86_64
859 if (env->hflags & HF_CS64_MASK) {
860 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
861 env->cc_src, env->cc_dst,
862 cc_op_name);
863 } else
864 #endif
866 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
867 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
868 cc_op_name);
871 if (flags & X86_DUMP_FPU) {
872 int fptag;
873 fptag = 0;
874 for(i = 0; i < 8; i++) {
875 fptag |= ((!env->fptags[i]) << i);
877 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
878 env->fpuc,
879 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
880 env->fpstt,
881 fptag,
882 env->mxcsr);
883 for(i=0;i<8;i++) {
884 #if defined(USE_X86LDOUBLE)
885 union {
886 long double d;
887 struct {
888 uint64_t lower;
889 uint16_t upper;
890 } l;
891 } tmp;
892 tmp.d = env->fpregs[i].d;
893 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
894 i, tmp.l.lower, tmp.l.upper);
895 #else
896 cpu_fprintf(f, "FPR%d=%016" PRIx64,
897 i, env->fpregs[i].mmx.q);
898 #endif
899 if ((i & 1) == 1)
900 cpu_fprintf(f, "\n");
901 else
902 cpu_fprintf(f, " ");
904 if (env->hflags & HF_CS64_MASK)
905 nb = 16;
906 else
907 nb = 8;
908 for(i=0;i<nb;i++) {
909 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
911 env->xmm_regs[i].XMM_L(3),
912 env->xmm_regs[i].XMM_L(2),
913 env->xmm_regs[i].XMM_L(1),
914 env->xmm_regs[i].XMM_L(0));
915 if ((i & 1) == 1)
916 cpu_fprintf(f, "\n");
917 else
918 cpu_fprintf(f, " ");
923 /***********************************************************/
924 /* x86 mmu */
925 /* XXX: add PGE support */
927 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
929 a20_state = (a20_state != 0);
930 if (a20_state != ((env->a20_mask >> 20) & 1)) {
931 #if defined(DEBUG_MMU)
932 printf("A20 update: a20=%d\n", a20_state);
933 #endif
934 /* if the cpu is currently executing code, we must unlink it and
935 all the potentially executing TB */
936 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
938 /* when a20 is changed, all the MMU mappings are invalid, so
939 we must flush everything */
940 tlb_flush(env, 1);
941 env->a20_mask = (~0x100000) | (a20_state << 20);
945 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
947 int pe_state;
949 #if defined(DEBUG_MMU)
950 printf("CR0 update: CR0=0x%08x\n", new_cr0);
951 #endif
952 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
953 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
954 tlb_flush(env, 1);
957 #ifdef TARGET_X86_64
958 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
959 (env->efer & MSR_EFER_LME)) {
960 /* enter in long mode */
961 /* XXX: generate an exception */
962 if (!(env->cr[4] & CR4_PAE_MASK))
963 return;
964 env->efer |= MSR_EFER_LMA;
965 env->hflags |= HF_LMA_MASK;
966 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
967 (env->efer & MSR_EFER_LMA)) {
968 /* exit long mode */
969 env->efer &= ~MSR_EFER_LMA;
970 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
971 env->eip &= 0xffffffff;
973 #endif
974 env->cr[0] = new_cr0 | CR0_ET_MASK;
976 /* update PE flag in hidden flags */
977 pe_state = (env->cr[0] & CR0_PE_MASK);
978 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
979 /* ensure that ADDSEG is always set in real mode */
980 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
981 /* update FPU flags */
982 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
983 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
986 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
987 the PDPT */
988 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
990 env->cr[3] = new_cr3;
991 if (env->cr[0] & CR0_PG_MASK) {
992 #if defined(DEBUG_MMU)
993 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
994 #endif
995 tlb_flush(env, 0);
999 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
1001 #if defined(DEBUG_MMU)
1002 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1003 #endif
1004 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
1005 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
1006 tlb_flush(env, 1);
1008 /* SSE handling */
1009 if (!(env->cpuid_features & CPUID_SSE))
1010 new_cr4 &= ~CR4_OSFXSR_MASK;
1011 if (new_cr4 & CR4_OSFXSR_MASK)
1012 env->hflags |= HF_OSFXSR_MASK;
1013 else
1014 env->hflags &= ~HF_OSFXSR_MASK;
1016 env->cr[4] = new_cr4;
1019 #if defined(CONFIG_USER_ONLY)
1021 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1022 int is_write, int mmu_idx, int is_softmmu)
1024 /* user mode only emulation */
1025 is_write &= 1;
1026 env->cr[2] = addr;
1027 env->error_code = (is_write << PG_ERROR_W_BIT);
1028 env->error_code |= PG_ERROR_U_MASK;
1029 env->exception_index = EXCP0E_PAGE;
1030 return 1;
1033 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1035 return addr;
1038 #else
1040 /* XXX: This value should match the one returned by CPUID
1041 * and in exec.c */
1042 # if defined(TARGET_X86_64)
1043 # define PHYS_ADDR_MASK 0xfffffff000LL
1044 # else
1045 # define PHYS_ADDR_MASK 0xffffff000LL
1046 # endif
1048 /* return value:
1049 -1 = cannot handle fault
1050 0 = nothing more to do
1051 1 = generate PF fault
1052 2 = soft MMU activation required for this block
1054 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1055 int is_write1, int mmu_idx, int is_softmmu)
1057 uint64_t ptep, pte;
1058 target_ulong pde_addr, pte_addr;
1059 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1060 target_phys_addr_t paddr;
1061 uint32_t page_offset;
1062 target_ulong vaddr, virt_addr;
1064 is_user = mmu_idx == MMU_USER_IDX;
1065 #if defined(DEBUG_MMU)
1066 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1067 addr, is_write1, is_user, env->eip);
1068 #endif
1069 is_write = is_write1 & 1;
1071 if (!(env->cr[0] & CR0_PG_MASK)) {
1072 pte = addr;
1073 virt_addr = addr & TARGET_PAGE_MASK;
1074 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1075 page_size = 4096;
1076 goto do_mapping;
1079 if (env->cr[4] & CR4_PAE_MASK) {
1080 uint64_t pde, pdpe;
1081 target_ulong pdpe_addr;
1083 #ifdef TARGET_X86_64
1084 if (env->hflags & HF_LMA_MASK) {
1085 uint64_t pml4e_addr, pml4e;
1086 int32_t sext;
1088 /* test virtual address sign extension */
1089 sext = (int64_t)addr >> 47;
1090 if (sext != 0 && sext != -1) {
1091 env->error_code = 0;
1092 env->exception_index = EXCP0D_GPF;
1093 return 1;
1096 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1097 env->a20_mask;
1098 pml4e = ldq_phys(pml4e_addr);
1099 if (!(pml4e & PG_PRESENT_MASK)) {
1100 error_code = 0;
1101 goto do_fault;
1103 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1104 error_code = PG_ERROR_RSVD_MASK;
1105 goto do_fault;
1107 if (!(pml4e & PG_ACCESSED_MASK)) {
1108 pml4e |= PG_ACCESSED_MASK;
1109 stl_phys_notdirty(pml4e_addr, pml4e);
1111 ptep = pml4e ^ PG_NX_MASK;
1112 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1113 env->a20_mask;
1114 pdpe = ldq_phys(pdpe_addr);
1115 if (!(pdpe & PG_PRESENT_MASK)) {
1116 error_code = 0;
1117 goto do_fault;
1119 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1120 error_code = PG_ERROR_RSVD_MASK;
1121 goto do_fault;
1123 ptep &= pdpe ^ PG_NX_MASK;
1124 if (!(pdpe & PG_ACCESSED_MASK)) {
1125 pdpe |= PG_ACCESSED_MASK;
1126 stl_phys_notdirty(pdpe_addr, pdpe);
1128 } else
1129 #endif
1131 /* XXX: load them when cr3 is loaded ? */
1132 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1133 env->a20_mask;
1134 pdpe = ldq_phys(pdpe_addr);
1135 if (!(pdpe & PG_PRESENT_MASK)) {
1136 error_code = 0;
1137 goto do_fault;
1139 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1142 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1143 env->a20_mask;
1144 pde = ldq_phys(pde_addr);
1145 if (!(pde & PG_PRESENT_MASK)) {
1146 error_code = 0;
1147 goto do_fault;
1149 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1150 error_code = PG_ERROR_RSVD_MASK;
1151 goto do_fault;
1153 ptep &= pde ^ PG_NX_MASK;
1154 if (pde & PG_PSE_MASK) {
1155 /* 2 MB page */
1156 page_size = 2048 * 1024;
1157 ptep ^= PG_NX_MASK;
1158 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1159 goto do_fault_protect;
1160 if (is_user) {
1161 if (!(ptep & PG_USER_MASK))
1162 goto do_fault_protect;
1163 if (is_write && !(ptep & PG_RW_MASK))
1164 goto do_fault_protect;
1165 } else {
1166 if ((env->cr[0] & CR0_WP_MASK) &&
1167 is_write && !(ptep & PG_RW_MASK))
1168 goto do_fault_protect;
1170 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1171 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1172 pde |= PG_ACCESSED_MASK;
1173 if (is_dirty)
1174 pde |= PG_DIRTY_MASK;
1175 stl_phys_notdirty(pde_addr, pde);
1177 /* align to page_size */
1178 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1179 virt_addr = addr & ~(page_size - 1);
1180 } else {
1181 /* 4 KB page */
1182 if (!(pde & PG_ACCESSED_MASK)) {
1183 pde |= PG_ACCESSED_MASK;
1184 stl_phys_notdirty(pde_addr, pde);
1186 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1187 env->a20_mask;
1188 pte = ldq_phys(pte_addr);
1189 if (!(pte & PG_PRESENT_MASK)) {
1190 error_code = 0;
1191 goto do_fault;
1193 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1194 error_code = PG_ERROR_RSVD_MASK;
1195 goto do_fault;
1197 /* combine pde and pte nx, user and rw protections */
1198 ptep &= pte ^ PG_NX_MASK;
1199 ptep ^= PG_NX_MASK;
1200 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1201 goto do_fault_protect;
1202 if (is_user) {
1203 if (!(ptep & PG_USER_MASK))
1204 goto do_fault_protect;
1205 if (is_write && !(ptep & PG_RW_MASK))
1206 goto do_fault_protect;
1207 } else {
1208 if ((env->cr[0] & CR0_WP_MASK) &&
1209 is_write && !(ptep & PG_RW_MASK))
1210 goto do_fault_protect;
1212 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1213 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1214 pte |= PG_ACCESSED_MASK;
1215 if (is_dirty)
1216 pte |= PG_DIRTY_MASK;
1217 stl_phys_notdirty(pte_addr, pte);
1219 page_size = 4096;
1220 virt_addr = addr & ~0xfff;
1221 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1223 } else {
1224 uint32_t pde;
1226 /* page directory entry */
1227 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1228 env->a20_mask;
1229 pde = ldl_phys(pde_addr);
1230 if (!(pde & PG_PRESENT_MASK)) {
1231 error_code = 0;
1232 goto do_fault;
1234 /* if PSE bit is set, then we use a 4MB page */
1235 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1236 page_size = 4096 * 1024;
1237 if (is_user) {
1238 if (!(pde & PG_USER_MASK))
1239 goto do_fault_protect;
1240 if (is_write && !(pde & PG_RW_MASK))
1241 goto do_fault_protect;
1242 } else {
1243 if ((env->cr[0] & CR0_WP_MASK) &&
1244 is_write && !(pde & PG_RW_MASK))
1245 goto do_fault_protect;
1247 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1248 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1249 pde |= PG_ACCESSED_MASK;
1250 if (is_dirty)
1251 pde |= PG_DIRTY_MASK;
1252 stl_phys_notdirty(pde_addr, pde);
1255 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1256 ptep = pte;
1257 virt_addr = addr & ~(page_size - 1);
1258 } else {
1259 if (!(pde & PG_ACCESSED_MASK)) {
1260 pde |= PG_ACCESSED_MASK;
1261 stl_phys_notdirty(pde_addr, pde);
1264 /* page directory entry */
1265 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1266 env->a20_mask;
1267 pte = ldl_phys(pte_addr);
1268 if (!(pte & PG_PRESENT_MASK)) {
1269 error_code = 0;
1270 goto do_fault;
1272 /* combine pde and pte user and rw protections */
1273 ptep = pte & pde;
1274 if (is_user) {
1275 if (!(ptep & PG_USER_MASK))
1276 goto do_fault_protect;
1277 if (is_write && !(ptep & PG_RW_MASK))
1278 goto do_fault_protect;
1279 } else {
1280 if ((env->cr[0] & CR0_WP_MASK) &&
1281 is_write && !(ptep & PG_RW_MASK))
1282 goto do_fault_protect;
1284 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1285 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1286 pte |= PG_ACCESSED_MASK;
1287 if (is_dirty)
1288 pte |= PG_DIRTY_MASK;
1289 stl_phys_notdirty(pte_addr, pte);
1291 page_size = 4096;
1292 virt_addr = addr & ~0xfff;
1295 /* the page can be put in the TLB */
1296 prot = PAGE_READ;
1297 if (!(ptep & PG_NX_MASK))
1298 prot |= PAGE_EXEC;
1299 if (pte & PG_DIRTY_MASK) {
1300 /* only set write access if already dirty... otherwise wait
1301 for dirty access */
1302 if (is_user) {
1303 if (ptep & PG_RW_MASK)
1304 prot |= PAGE_WRITE;
1305 } else {
1306 if (!(env->cr[0] & CR0_WP_MASK) ||
1307 (ptep & PG_RW_MASK))
1308 prot |= PAGE_WRITE;
1311 do_mapping:
1312 pte = pte & env->a20_mask;
1314 /* Even if 4MB pages, we map only one 4KB page in the cache to
1315 avoid filling it too fast */
1316 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1317 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1318 vaddr = virt_addr + page_offset;
1320 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1321 return ret;
1322 do_fault_protect:
1323 error_code = PG_ERROR_P_MASK;
1324 do_fault:
1325 error_code |= (is_write << PG_ERROR_W_BIT);
1326 if (is_user)
1327 error_code |= PG_ERROR_U_MASK;
1328 if (is_write1 == 2 &&
1329 (env->efer & MSR_EFER_NXE) &&
1330 (env->cr[4] & CR4_PAE_MASK))
1331 error_code |= PG_ERROR_I_D_MASK;
1332 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1333 /* cr2 is not modified in case of exceptions */
1334 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1335 addr);
1336 } else {
1337 env->cr[2] = addr;
1339 env->error_code = error_code;
1340 env->exception_index = EXCP0E_PAGE;
1341 return 1;
1344 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1346 target_ulong pde_addr, pte_addr;
1347 uint64_t pte;
1348 target_phys_addr_t paddr;
1349 uint32_t page_offset;
1350 int page_size;
1352 if (env->cr[4] & CR4_PAE_MASK) {
1353 target_ulong pdpe_addr;
1354 uint64_t pde, pdpe;
1356 #ifdef TARGET_X86_64
1357 if (env->hflags & HF_LMA_MASK) {
1358 uint64_t pml4e_addr, pml4e;
1359 int32_t sext;
1361 /* test virtual address sign extension */
1362 sext = (int64_t)addr >> 47;
1363 if (sext != 0 && sext != -1)
1364 return -1;
1366 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1367 env->a20_mask;
1368 pml4e = ldq_phys(pml4e_addr);
1369 if (!(pml4e & PG_PRESENT_MASK))
1370 return -1;
1372 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1373 env->a20_mask;
1374 pdpe = ldq_phys(pdpe_addr);
1375 if (!(pdpe & PG_PRESENT_MASK))
1376 return -1;
1377 } else
1378 #endif
1380 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1381 env->a20_mask;
1382 pdpe = ldq_phys(pdpe_addr);
1383 if (!(pdpe & PG_PRESENT_MASK))
1384 return -1;
1387 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1388 env->a20_mask;
1389 pde = ldq_phys(pde_addr);
1390 if (!(pde & PG_PRESENT_MASK)) {
1391 return -1;
1393 if (pde & PG_PSE_MASK) {
1394 /* 2 MB page */
1395 page_size = 2048 * 1024;
1396 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1397 } else {
1398 /* 4 KB page */
1399 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1400 env->a20_mask;
1401 page_size = 4096;
1402 pte = ldq_phys(pte_addr);
1404 if (!(pte & PG_PRESENT_MASK))
1405 return -1;
1406 } else {
1407 uint32_t pde;
1409 if (!(env->cr[0] & CR0_PG_MASK)) {
1410 pte = addr;
1411 page_size = 4096;
1412 } else {
1413 /* page directory entry */
1414 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1415 pde = ldl_phys(pde_addr);
1416 if (!(pde & PG_PRESENT_MASK))
1417 return -1;
1418 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1419 pte = pde & ~0x003ff000; /* align to 4MB */
1420 page_size = 4096 * 1024;
1421 } else {
1422 /* page directory entry */
1423 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1424 pte = ldl_phys(pte_addr);
1425 if (!(pte & PG_PRESENT_MASK))
1426 return -1;
1427 page_size = 4096;
1430 pte = pte & env->a20_mask;
1433 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1434 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1435 return paddr;
1438 void hw_breakpoint_insert(CPUState *env, int index)
1440 int type, err = 0;
1442 switch (hw_breakpoint_type(env->dr[7], index)) {
1443 case 0:
1444 if (hw_breakpoint_enabled(env->dr[7], index))
1445 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1446 &env->cpu_breakpoint[index]);
1447 break;
1448 case 1:
1449 type = BP_CPU | BP_MEM_WRITE;
1450 goto insert_wp;
1451 case 2:
1452 /* No support for I/O watchpoints yet */
1453 break;
1454 case 3:
1455 type = BP_CPU | BP_MEM_ACCESS;
1456 insert_wp:
1457 err = cpu_watchpoint_insert(env, env->dr[index],
1458 hw_breakpoint_len(env->dr[7], index),
1459 type, &env->cpu_watchpoint[index]);
1460 break;
1462 if (err)
1463 env->cpu_breakpoint[index] = NULL;
1466 void hw_breakpoint_remove(CPUState *env, int index)
1468 if (!env->cpu_breakpoint[index])
1469 return;
1470 switch (hw_breakpoint_type(env->dr[7], index)) {
1471 case 0:
1472 if (hw_breakpoint_enabled(env->dr[7], index))
1473 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1474 break;
1475 case 1:
1476 case 3:
1477 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1478 break;
1479 case 2:
1480 /* No support for I/O watchpoints yet */
1481 break;
1485 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1487 target_ulong dr6;
1488 int reg, type;
1489 int hit_enabled = 0;
1491 dr6 = env->dr[6] & ~0xf;
1492 for (reg = 0; reg < 4; reg++) {
1493 type = hw_breakpoint_type(env->dr[7], reg);
1494 if ((type == 0 && env->dr[reg] == env->eip) ||
1495 ((type & 1) && env->cpu_watchpoint[reg] &&
1496 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1497 dr6 |= 1 << reg;
1498 if (hw_breakpoint_enabled(env->dr[7], reg))
1499 hit_enabled = 1;
1502 if (hit_enabled || force_dr6_update)
1503 env->dr[6] = dr6;
1504 return hit_enabled;
1507 static CPUDebugExcpHandler *prev_debug_excp_handler;
1509 void raise_exception(int exception_index);
1511 static void breakpoint_handler(CPUState *env)
1513 CPUBreakpoint *bp;
1515 if (env->watchpoint_hit) {
1516 if (env->watchpoint_hit->flags & BP_CPU) {
1517 env->watchpoint_hit = NULL;
1518 if (check_hw_breakpoints(env, 0))
1519 raise_exception(EXCP01_DB);
1520 else
1521 cpu_resume_from_signal(env, NULL);
1523 } else {
1524 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1525 if (bp->pc == env->eip) {
1526 if (bp->flags & BP_CPU) {
1527 check_hw_breakpoints(env, 1);
1528 raise_exception(EXCP01_DB);
1530 break;
1533 if (prev_debug_excp_handler)
1534 prev_debug_excp_handler(env);
1537 /* This should come from sysemu.h - if we could include it here... */
1538 void qemu_system_reset_request(void);
1540 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1541 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1543 uint64_t mcg_cap = cenv->mcg_cap;
1544 unsigned bank_num = mcg_cap & 0xff;
1545 uint64_t *banks = cenv->mce_banks;
1547 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1548 return;
1551 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1552 * reporting is disabled
1554 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1555 cenv->mcg_ctl != ~(uint64_t)0)
1556 return;
1557 banks += 4 * bank;
1559 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1560 * reporting is disabled for the bank
1562 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1563 return;
1564 if (status & MCI_STATUS_UC) {
1565 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1566 !(cenv->cr[4] & CR4_MCE_MASK)) {
1567 fprintf(stderr, "injects mce exception while previous "
1568 "one is in progress!\n");
1569 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1570 qemu_system_reset_request();
1571 return;
1573 if (banks[1] & MCI_STATUS_VAL)
1574 status |= MCI_STATUS_OVER;
1575 banks[2] = addr;
1576 banks[3] = misc;
1577 cenv->mcg_status = mcg_status;
1578 banks[1] = status;
1579 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1580 } else if (!(banks[1] & MCI_STATUS_VAL)
1581 || !(banks[1] & MCI_STATUS_UC)) {
1582 if (banks[1] & MCI_STATUS_VAL)
1583 status |= MCI_STATUS_OVER;
1584 banks[2] = addr;
1585 banks[3] = misc;
1586 banks[1] = status;
1587 } else
1588 banks[1] |= MCI_STATUS_OVER;
1590 #endif /* !CONFIG_USER_ONLY */
1592 static void mce_init(CPUX86State *cenv)
1594 unsigned int bank, bank_num;
1596 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1597 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1598 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1599 cenv->mcg_ctl = ~(uint64_t)0;
1600 bank_num = cenv->mcg_cap & 0xff;
1601 cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1602 for (bank = 0; bank < bank_num; bank++)
1603 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1607 static void host_cpuid(uint32_t function, uint32_t count,
1608 uint32_t *eax, uint32_t *ebx,
1609 uint32_t *ecx, uint32_t *edx)
1611 #if defined(CONFIG_KVM)
1612 uint32_t vec[4];
1614 #ifdef __x86_64__
1615 asm volatile("cpuid"
1616 : "=a"(vec[0]), "=b"(vec[1]),
1617 "=c"(vec[2]), "=d"(vec[3])
1618 : "0"(function), "c"(count) : "cc");
1619 #else
1620 asm volatile("pusha \n\t"
1621 "cpuid \n\t"
1622 "mov %%eax, 0(%2) \n\t"
1623 "mov %%ebx, 4(%2) \n\t"
1624 "mov %%ecx, 8(%2) \n\t"
1625 "mov %%edx, 12(%2) \n\t"
1626 "popa"
1627 : : "a"(function), "c"(count), "S"(vec)
1628 : "memory", "cc");
1629 #endif
1631 if (eax)
1632 *eax = vec[0];
1633 if (ebx)
1634 *ebx = vec[1];
1635 if (ecx)
1636 *ecx = vec[2];
1637 if (edx)
1638 *edx = vec[3];
1639 #endif
1642 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1643 uint32_t *eax, uint32_t *ebx,
1644 uint32_t *ecx, uint32_t *edx)
1646 /* test if maximum index reached */
1647 if (index & 0x80000000) {
1648 if (index > env->cpuid_xlevel)
1649 index = env->cpuid_level;
1650 } else {
1651 if (index > env->cpuid_level)
1652 index = env->cpuid_level;
1655 switch(index) {
1656 case 0:
1657 *eax = env->cpuid_level;
1658 *ebx = env->cpuid_vendor1;
1659 *edx = env->cpuid_vendor2;
1660 *ecx = env->cpuid_vendor3;
1662 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1663 * isn't supported in compatibility mode on Intel. so advertise the
1664 * actuall cpu, and say goodbye to migration between different vendors
1665 * is you use compatibility mode. */
1666 if (kvm_enabled() && !env->cpuid_vendor_override)
1667 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1668 break;
1669 case 1:
1670 *eax = env->cpuid_version;
1671 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1672 *ecx = env->cpuid_ext_features;
1673 *edx = env->cpuid_features;
1674 if (env->nr_cores * env->nr_threads > 1) {
1675 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1676 *edx |= 1 << 28; /* HTT bit */
1678 break;
1679 case 2:
1680 /* cache info: needed for Pentium Pro compatibility */
1681 *eax = 1;
1682 *ebx = 0;
1683 *ecx = 0;
1684 *edx = 0x2c307d;
1685 break;
1686 case 4:
1687 /* cache info: needed for Core compatibility */
1688 if (env->nr_cores > 1) {
1689 *eax = (env->nr_cores - 1) << 26;
1690 } else {
1691 *eax = 0;
1693 switch (count) {
1694 case 0: /* L1 dcache info */
1695 *eax |= 0x0000121;
1696 *ebx = 0x1c0003f;
1697 *ecx = 0x000003f;
1698 *edx = 0x0000001;
1699 break;
1700 case 1: /* L1 icache info */
1701 *eax |= 0x0000122;
1702 *ebx = 0x1c0003f;
1703 *ecx = 0x000003f;
1704 *edx = 0x0000001;
1705 break;
1706 case 2: /* L2 cache info */
1707 *eax |= 0x0000143;
1708 if (env->nr_threads > 1) {
1709 *eax |= (env->nr_threads - 1) << 14;
1711 *ebx = 0x3c0003f;
1712 *ecx = 0x0000fff;
1713 *edx = 0x0000001;
1714 break;
1715 default: /* end of info */
1716 *eax = 0;
1717 *ebx = 0;
1718 *ecx = 0;
1719 *edx = 0;
1720 break;
1722 break;
1723 case 5:
1724 /* mwait info: needed for Core compatibility */
1725 *eax = 0; /* Smallest monitor-line size in bytes */
1726 *ebx = 0; /* Largest monitor-line size in bytes */
1727 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1728 *edx = 0;
1729 break;
1730 case 6:
1731 /* Thermal and Power Leaf */
1732 *eax = 0;
1733 *ebx = 0;
1734 *ecx = 0;
1735 *edx = 0;
1736 break;
1737 case 9:
1738 /* Direct Cache Access Information Leaf */
1739 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1740 *ebx = 0;
1741 *ecx = 0;
1742 *edx = 0;
1743 break;
1744 case 0xA:
1745 /* Architectural Performance Monitoring Leaf */
1746 *eax = 0;
1747 *ebx = 0;
1748 *ecx = 0;
1749 *edx = 0;
1750 break;
1751 case 0x80000000:
1752 *eax = env->cpuid_xlevel;
1753 *ebx = env->cpuid_vendor1;
1754 *edx = env->cpuid_vendor2;
1755 *ecx = env->cpuid_vendor3;
1756 break;
1757 case 0x80000001:
1758 *eax = env->cpuid_version;
1759 *ebx = 0;
1760 *ecx = env->cpuid_ext3_features;
1761 *edx = env->cpuid_ext2_features;
1763 if (env->nr_cores * env->nr_threads > 1 &&
1764 env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
1765 env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
1766 env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
1767 *ecx |= 1 << 1; /* CmpLegacy bit */
1770 if (kvm_enabled()) {
1771 /* Nested SVM not yet supported in KVM */
1772 *ecx &= ~CPUID_EXT3_SVM;
1773 } else {
1774 /* AMD 3DNow! is not supported in QEMU */
1775 *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1777 break;
1778 case 0x80000002:
1779 case 0x80000003:
1780 case 0x80000004:
1781 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1782 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1783 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1784 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1785 break;
1786 case 0x80000005:
1787 /* cache info (L1 cache) */
1788 *eax = 0x01ff01ff;
1789 *ebx = 0x01ff01ff;
1790 *ecx = 0x40020140;
1791 *edx = 0x40020140;
1792 break;
1793 case 0x80000006:
1794 /* cache info (L2 cache) */
1795 *eax = 0;
1796 *ebx = 0x42004200;
1797 *ecx = 0x02008140;
1798 *edx = 0;
1799 break;
1800 case 0x80000008:
1801 /* virtual & phys address size in low 2 bytes. */
1802 /* XXX: This value must match the one used in the MMU code. */
1803 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1804 /* 64 bit processor */
1805 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1806 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1807 } else {
1808 if (env->cpuid_features & CPUID_PSE36)
1809 *eax = 0x00000024; /* 36 bits physical */
1810 else
1811 *eax = 0x00000020; /* 32 bits physical */
1813 *ebx = 0;
1814 *ecx = 0;
1815 *edx = 0;
1816 if (env->nr_cores * env->nr_threads > 1) {
1817 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1819 break;
1820 case 0x8000000A:
1821 *eax = 0x00000001; /* SVM Revision */
1822 *ebx = 0x00000010; /* nr of ASIDs */
1823 *ecx = 0;
1824 *edx = 0; /* optional features */
1825 break;
1826 default:
1827 /* reserved values: zero */
1828 *eax = 0;
1829 *ebx = 0;
1830 *ecx = 0;
1831 *edx = 0;
1832 break;
1837 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1838 target_ulong *base, unsigned int *limit,
1839 unsigned int *flags)
1841 SegmentCache *dt;
1842 target_ulong ptr;
1843 uint32_t e1, e2;
1844 int index;
1846 if (selector & 0x4)
1847 dt = &env->ldt;
1848 else
1849 dt = &env->gdt;
1850 index = selector & ~7;
1851 ptr = dt->base + index;
1852 if ((index + 7) > dt->limit
1853 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1854 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1855 return 0;
1857 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1858 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1859 if (e2 & DESC_G_MASK)
1860 *limit = (*limit << 12) | 0xfff;
1861 *flags = e2;
1863 return 1;
1866 CPUX86State *cpu_x86_init(const char *cpu_model)
1868 CPUX86State *env;
1869 static int inited;
1871 env = qemu_mallocz(sizeof(CPUX86State));
1872 cpu_exec_init(env);
1873 env->cpu_model_str = cpu_model;
1875 /* init various static tables */
1876 if (!inited) {
1877 inited = 1;
1878 optimize_flags_init();
1879 #ifndef CONFIG_USER_ONLY
1880 prev_debug_excp_handler =
1881 cpu_set_debug_excp_handler(breakpoint_handler);
1882 #endif
1884 if (cpu_x86_register(env, cpu_model) < 0) {
1885 cpu_x86_close(env);
1886 return NULL;
1888 mce_init(env);
1889 cpu_reset(env);
1891 qemu_init_vcpu(env);
1893 return env;
1896 #if !defined(CONFIG_USER_ONLY)
1897 void do_cpu_init(CPUState *env)
1899 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1900 cpu_reset(env);
1901 env->interrupt_request = sipi;
1902 apic_init_reset(env);
1905 void do_cpu_sipi(CPUState *env)
1907 apic_sipi(env);
1909 #else
1910 void do_cpu_init(CPUState *env)
1913 void do_cpu_sipi(CPUState *env)
1916 #endif