duplicate KVMState
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob49cefc3e308cdf4260de940ff04f40ad841d7d91
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "qemu-common.h"
30 #include "kvm.h"
32 #include "qemu-kvm.h"
34 //#define DEBUG_MMU
36 /* feature flags taken from "Intel Processor Identification and the CPUID
37 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
38 * about feature names, the Linux name is used. */
39 static const char *feature_name[] = {
40 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
41 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
42 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
43 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
45 static const char *ext_feature_name[] = {
46 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
47 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
48 NULL, NULL, "dca", NULL, NULL, "x2apic", NULL, "popcnt",
49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
51 static const char *ext2_feature_name[] = {
52 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
53 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
54 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
55 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
57 static const char *ext3_feature_name[] = {
58 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
59 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
60 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
61 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
64 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
65 uint32_t *ext_features,
66 uint32_t *ext2_features,
67 uint32_t *ext3_features)
69 int i;
70 int found = 0;
72 for ( i = 0 ; i < 32 ; i++ )
73 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
74 *features |= 1 << i;
75 found = 1;
77 for ( i = 0 ; i < 32 ; i++ )
78 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
79 *ext_features |= 1 << i;
80 found = 1;
82 for ( i = 0 ; i < 32 ; i++ )
83 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
84 *ext2_features |= 1 << i;
85 found = 1;
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
89 *ext3_features |= 1 << i;
90 found = 1;
92 if (!found) {
93 fprintf(stderr, "CPU feature %s not found\n", flagname);
97 typedef struct x86_def_t {
98 const char *name;
99 uint32_t level;
100 uint32_t vendor1, vendor2, vendor3;
101 int family;
102 int model;
103 int stepping;
104 uint32_t features, ext_features, ext2_features, ext3_features;
105 uint32_t xlevel;
106 char model_id[48];
107 int vendor_override;
108 } x86_def_t;
110 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
111 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
112 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
113 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
114 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
115 CPUID_PSE36 | CPUID_FXSR)
116 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
117 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
118 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
119 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
120 CPUID_PAE | CPUID_SEP | CPUID_APIC)
121 static x86_def_t x86_defs[] = {
122 #ifdef TARGET_X86_64
124 .name = "qemu64",
125 .level = 2,
126 .vendor1 = CPUID_VENDOR_AMD_1,
127 .vendor2 = CPUID_VENDOR_AMD_2,
128 .vendor3 = CPUID_VENDOR_AMD_3,
129 .family = 6,
130 .model = 2,
131 .stepping = 3,
132 .features = PPRO_FEATURES |
133 /* these features are needed for Win64 and aren't fully implemented */
134 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
135 /* this feature is needed for Solaris and isn't fully implemented */
136 CPUID_PSE36,
137 .ext_features = CPUID_EXT_SSE3,
138 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
139 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
140 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
141 .ext3_features = CPUID_EXT3_SVM,
142 .xlevel = 0x8000000A,
143 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
146 .name = "phenom",
147 .level = 5,
148 .vendor1 = CPUID_VENDOR_AMD_1,
149 .vendor2 = CPUID_VENDOR_AMD_2,
150 .vendor3 = CPUID_VENDOR_AMD_3,
151 .family = 16,
152 .model = 2,
153 .stepping = 3,
154 /* Missing: CPUID_VME, CPUID_HT */
155 .features = PPRO_FEATURES |
156 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
157 CPUID_PSE36,
158 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
159 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
160 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
161 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
162 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
163 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
164 CPUID_EXT2_FFXSR,
165 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
166 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
167 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
168 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
169 .ext3_features = CPUID_EXT3_SVM,
170 .xlevel = 0x8000001A,
171 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
174 .name = "core2duo",
175 .level = 10,
176 .family = 6,
177 .model = 15,
178 .stepping = 11,
179 /* The original CPU also implements these features:
180 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
181 CPUID_TM, CPUID_PBE */
182 .features = PPRO_FEATURES |
183 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
184 CPUID_PSE36,
185 /* The original CPU also implements these ext features:
186 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
187 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
188 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
189 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
190 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
191 .xlevel = 0x80000008,
192 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
194 #endif
196 .name = "qemu32",
197 .level = 2,
198 .family = 6,
199 .model = 3,
200 .stepping = 3,
201 .features = PPRO_FEATURES,
202 .ext_features = CPUID_EXT_SSE3,
203 .xlevel = 0,
204 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
207 .name = "coreduo",
208 .level = 10,
209 .family = 6,
210 .model = 14,
211 .stepping = 8,
212 /* The original CPU also implements these features:
213 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
214 CPUID_TM, CPUID_PBE */
215 .features = PPRO_FEATURES | CPUID_VME |
216 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
217 /* The original CPU also implements these ext features:
218 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
219 CPUID_EXT_PDCM */
220 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
221 .ext2_features = CPUID_EXT2_NX,
222 .xlevel = 0x80000008,
223 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
226 .name = "486",
227 .level = 0,
228 .family = 4,
229 .model = 0,
230 .stepping = 0,
231 .features = I486_FEATURES,
232 .xlevel = 0,
235 .name = "pentium",
236 .level = 1,
237 .family = 5,
238 .model = 4,
239 .stepping = 3,
240 .features = PENTIUM_FEATURES,
241 .xlevel = 0,
244 .name = "pentium2",
245 .level = 2,
246 .family = 6,
247 .model = 5,
248 .stepping = 2,
249 .features = PENTIUM2_FEATURES,
250 .xlevel = 0,
253 .name = "pentium3",
254 .level = 2,
255 .family = 6,
256 .model = 7,
257 .stepping = 3,
258 .features = PENTIUM3_FEATURES,
259 .xlevel = 0,
262 .name = "athlon",
263 .level = 2,
264 .vendor1 = CPUID_VENDOR_AMD_1,
265 .vendor2 = CPUID_VENDOR_AMD_2,
266 .vendor3 = CPUID_VENDOR_AMD_3,
267 .family = 6,
268 .model = 2,
269 .stepping = 3,
270 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
271 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
272 .xlevel = 0x80000008,
273 /* XXX: put another string ? */
274 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
277 .name = "n270",
278 /* original is on level 10 */
279 .level = 5,
280 .family = 6,
281 .model = 28,
282 .stepping = 2,
283 .features = PPRO_FEATURES |
284 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
285 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
286 * CPUID_HT | CPUID_TM | CPUID_PBE */
287 /* Some CPUs got no CPUID_SEP */
288 .ext_features = CPUID_EXT_MONITOR |
289 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
290 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
291 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
292 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
293 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
294 .xlevel = 0x8000000A,
295 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
299 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
300 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
302 static int cpu_x86_fill_model_id(char *str)
304 uint32_t eax, ebx, ecx, edx;
305 int i;
307 for (i = 0; i < 3; i++) {
308 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
309 memcpy(str + i * 16 + 0, &eax, 4);
310 memcpy(str + i * 16 + 4, &ebx, 4);
311 memcpy(str + i * 16 + 8, &ecx, 4);
312 memcpy(str + i * 16 + 12, &edx, 4);
314 return 0;
317 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
319 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
321 x86_cpu_def->name = "host";
322 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
323 x86_cpu_def->level = eax;
324 x86_cpu_def->vendor1 = ebx;
325 x86_cpu_def->vendor2 = edx;
326 x86_cpu_def->vendor3 = ecx;
328 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
329 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
330 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
331 x86_cpu_def->stepping = eax & 0x0F;
332 x86_cpu_def->ext_features = ecx;
333 x86_cpu_def->features = edx;
335 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
336 x86_cpu_def->xlevel = eax;
338 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
339 x86_cpu_def->ext2_features = edx;
340 x86_cpu_def->ext3_features = ecx;
341 cpu_x86_fill_model_id(x86_cpu_def->model_id);
342 x86_cpu_def->vendor_override = 0;
344 return 0;
347 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
349 unsigned int i;
350 x86_def_t *def;
352 char *s = strdup(cpu_model);
353 char *featurestr, *name = strtok(s, ",");
354 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
355 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
356 int family = -1, model = -1, stepping = -1;
358 def = NULL;
359 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
360 if (strcmp(name, x86_defs[i].name) == 0) {
361 def = &x86_defs[i];
362 break;
365 if (!def) {
366 if (strcmp(name, "host") != 0) {
367 goto error;
369 cpu_x86_fill_host(x86_cpu_def);
370 } else {
371 memcpy(x86_cpu_def, def, sizeof(*def));
374 add_flagname_to_bitmaps("hypervisor", &plus_features,
375 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
377 featurestr = strtok(NULL, ",");
379 while (featurestr) {
380 char *val;
381 if (featurestr[0] == '+') {
382 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
383 } else if (featurestr[0] == '-') {
384 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
385 } else if ((val = strchr(featurestr, '='))) {
386 *val = 0; val++;
387 if (!strcmp(featurestr, "family")) {
388 char *err;
389 family = strtol(val, &err, 10);
390 if (!*val || *err || family < 0) {
391 fprintf(stderr, "bad numerical value %s\n", val);
392 goto error;
394 x86_cpu_def->family = family;
395 } else if (!strcmp(featurestr, "model")) {
396 char *err;
397 model = strtol(val, &err, 10);
398 if (!*val || *err || model < 0 || model > 0xff) {
399 fprintf(stderr, "bad numerical value %s\n", val);
400 goto error;
402 x86_cpu_def->model = model;
403 } else if (!strcmp(featurestr, "stepping")) {
404 char *err;
405 stepping = strtol(val, &err, 10);
406 if (!*val || *err || stepping < 0 || stepping > 0xf) {
407 fprintf(stderr, "bad numerical value %s\n", val);
408 goto error;
410 x86_cpu_def->stepping = stepping;
411 } else if (!strcmp(featurestr, "vendor")) {
412 if (strlen(val) != 12) {
413 fprintf(stderr, "vendor string must be 12 chars long\n");
414 goto error;
416 x86_cpu_def->vendor1 = 0;
417 x86_cpu_def->vendor2 = 0;
418 x86_cpu_def->vendor3 = 0;
419 for(i = 0; i < 4; i++) {
420 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
421 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
422 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
424 x86_cpu_def->vendor_override = 1;
425 } else if (!strcmp(featurestr, "model_id")) {
426 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
427 val);
428 } else {
429 fprintf(stderr, "unrecognized feature %s\n", featurestr);
430 goto error;
432 } else {
433 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
434 goto error;
436 featurestr = strtok(NULL, ",");
438 x86_cpu_def->features |= plus_features;
439 x86_cpu_def->ext_features |= plus_ext_features;
440 x86_cpu_def->ext2_features |= plus_ext2_features;
441 x86_cpu_def->ext3_features |= plus_ext3_features;
442 x86_cpu_def->features &= ~minus_features;
443 x86_cpu_def->ext_features &= ~minus_ext_features;
444 x86_cpu_def->ext2_features &= ~minus_ext2_features;
445 x86_cpu_def->ext3_features &= ~minus_ext3_features;
446 free(s);
447 return 0;
449 error:
450 free(s);
451 return -1;
454 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
456 unsigned int i;
458 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
459 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
462 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
464 x86_def_t def1, *def = &def1;
466 if (cpu_x86_find_by_name(def, cpu_model) < 0)
467 return -1;
468 if (def->vendor1) {
469 env->cpuid_vendor1 = def->vendor1;
470 env->cpuid_vendor2 = def->vendor2;
471 env->cpuid_vendor3 = def->vendor3;
472 } else {
473 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
474 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
475 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
477 env->cpuid_vendor_override = def->vendor_override;
478 env->cpuid_level = def->level;
479 if (def->family > 0x0f)
480 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
481 else
482 env->cpuid_version = def->family << 8;
483 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
484 env->cpuid_version |= def->stepping;
485 env->cpuid_features = def->features;
486 env->pat = 0x0007040600070406ULL;
487 env->cpuid_ext_features = def->ext_features;
488 env->cpuid_ext2_features = def->ext2_features;
489 env->cpuid_xlevel = def->xlevel;
490 env->cpuid_ext3_features = def->ext3_features;
492 const char *model_id = def->model_id;
493 int c, len, i;
494 if (!model_id)
495 model_id = "";
496 len = strlen(model_id);
497 for(i = 0; i < 48; i++) {
498 if (i >= len)
499 c = '\0';
500 else
501 c = (uint8_t)model_id[i];
502 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
505 return 0;
508 /* NOTE: must be called outside the CPU execute loop */
509 void cpu_reset(CPUX86State *env)
511 int i;
513 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
514 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
515 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
518 memset(env, 0, offsetof(CPUX86State, breakpoints));
520 tlb_flush(env, 1);
522 env->old_exception = -1;
524 /* init to reset state */
526 #ifdef CONFIG_SOFTMMU
527 env->hflags |= HF_SOFTMMU_MASK;
528 #endif
529 env->hflags2 |= HF2_GIF_MASK;
531 cpu_x86_update_cr0(env, 0x60000010);
532 env->a20_mask = ~0x0;
533 env->smbase = 0x30000;
535 env->idt.limit = 0xffff;
536 env->gdt.limit = 0xffff;
537 env->ldt.limit = 0xffff;
538 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
539 env->tr.limit = 0xffff;
540 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
542 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
543 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
544 DESC_R_MASK | DESC_A_MASK);
545 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
546 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
547 DESC_A_MASK);
548 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
549 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
550 DESC_A_MASK);
551 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
552 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
553 DESC_A_MASK);
554 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
555 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
556 DESC_A_MASK);
557 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
558 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
559 DESC_A_MASK);
561 env->eip = 0xfff0;
562 env->regs[R_EDX] = env->cpuid_version;
564 env->eflags = 0x2;
566 /* FPU init */
567 for(i = 0;i < 8; i++)
568 env->fptags[i] = 1;
569 env->fpuc = 0x37f;
571 env->mxcsr = 0x1f80;
573 memset(env->dr, 0, sizeof(env->dr));
574 env->dr[6] = DR6_FIXED_1;
575 env->dr[7] = DR7_FIXED_1;
576 cpu_breakpoint_remove_all(env, BP_CPU);
577 cpu_watchpoint_remove_all(env, BP_CPU);
580 void cpu_x86_close(CPUX86State *env)
582 qemu_free(env);
585 /***********************************************************/
586 /* x86 debug */
588 static const char *cc_op_str[] = {
589 "DYNAMIC",
590 "EFLAGS",
592 "MULB",
593 "MULW",
594 "MULL",
595 "MULQ",
597 "ADDB",
598 "ADDW",
599 "ADDL",
600 "ADDQ",
602 "ADCB",
603 "ADCW",
604 "ADCL",
605 "ADCQ",
607 "SUBB",
608 "SUBW",
609 "SUBL",
610 "SUBQ",
612 "SBBB",
613 "SBBW",
614 "SBBL",
615 "SBBQ",
617 "LOGICB",
618 "LOGICW",
619 "LOGICL",
620 "LOGICQ",
622 "INCB",
623 "INCW",
624 "INCL",
625 "INCQ",
627 "DECB",
628 "DECW",
629 "DECL",
630 "DECQ",
632 "SHLB",
633 "SHLW",
634 "SHLL",
635 "SHLQ",
637 "SARB",
638 "SARW",
639 "SARL",
640 "SARQ",
643 static void
644 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
645 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
646 const char *name, struct SegmentCache *sc)
648 #ifdef TARGET_X86_64
649 if (env->hflags & HF_CS64_MASK) {
650 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
651 sc->selector, sc->base, sc->limit, sc->flags);
652 } else
653 #endif
655 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
656 (uint32_t)sc->base, sc->limit, sc->flags);
659 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
660 goto done;
662 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
663 if (sc->flags & DESC_S_MASK) {
664 if (sc->flags & DESC_CS_MASK) {
665 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
666 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
667 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
668 (sc->flags & DESC_R_MASK) ? 'R' : '-');
669 } else {
670 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
671 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
672 (sc->flags & DESC_W_MASK) ? 'W' : '-');
674 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
675 } else {
676 static const char *sys_type_name[2][16] = {
677 { /* 32 bit mode */
678 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
679 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
680 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
681 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
683 { /* 64 bit mode */
684 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
685 "Reserved", "Reserved", "Reserved", "Reserved",
686 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
687 "Reserved", "IntGate64", "TrapGate64"
690 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
691 [(sc->flags & DESC_TYPE_MASK)
692 >> DESC_TYPE_SHIFT]);
694 done:
695 cpu_fprintf(f, "\n");
698 void cpu_dump_state(CPUState *env, FILE *f,
699 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
700 int flags)
702 int eflags, i, nb;
703 char cc_op_name[32];
704 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
706 if (kvm_enabled())
707 kvm_arch_get_registers(env);
709 eflags = env->eflags;
710 #ifdef TARGET_X86_64
711 if (env->hflags & HF_CS64_MASK) {
712 cpu_fprintf(f,
713 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
714 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
715 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
716 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
717 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
718 env->regs[R_EAX],
719 env->regs[R_EBX],
720 env->regs[R_ECX],
721 env->regs[R_EDX],
722 env->regs[R_ESI],
723 env->regs[R_EDI],
724 env->regs[R_EBP],
725 env->regs[R_ESP],
726 env->regs[8],
727 env->regs[9],
728 env->regs[10],
729 env->regs[11],
730 env->regs[12],
731 env->regs[13],
732 env->regs[14],
733 env->regs[15],
734 env->eip, eflags,
735 eflags & DF_MASK ? 'D' : '-',
736 eflags & CC_O ? 'O' : '-',
737 eflags & CC_S ? 'S' : '-',
738 eflags & CC_Z ? 'Z' : '-',
739 eflags & CC_A ? 'A' : '-',
740 eflags & CC_P ? 'P' : '-',
741 eflags & CC_C ? 'C' : '-',
742 env->hflags & HF_CPL_MASK,
743 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
744 (int)(env->a20_mask >> 20) & 1,
745 (env->hflags >> HF_SMM_SHIFT) & 1,
746 env->halted);
747 } else
748 #endif
750 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
751 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
752 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
753 (uint32_t)env->regs[R_EAX],
754 (uint32_t)env->regs[R_EBX],
755 (uint32_t)env->regs[R_ECX],
756 (uint32_t)env->regs[R_EDX],
757 (uint32_t)env->regs[R_ESI],
758 (uint32_t)env->regs[R_EDI],
759 (uint32_t)env->regs[R_EBP],
760 (uint32_t)env->regs[R_ESP],
761 (uint32_t)env->eip, eflags,
762 eflags & DF_MASK ? 'D' : '-',
763 eflags & CC_O ? 'O' : '-',
764 eflags & CC_S ? 'S' : '-',
765 eflags & CC_Z ? 'Z' : '-',
766 eflags & CC_A ? 'A' : '-',
767 eflags & CC_P ? 'P' : '-',
768 eflags & CC_C ? 'C' : '-',
769 env->hflags & HF_CPL_MASK,
770 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
771 (int)(env->a20_mask >> 20) & 1,
772 (env->hflags >> HF_SMM_SHIFT) & 1,
773 env->halted);
776 for(i = 0; i < 6; i++) {
777 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
778 &env->segs[i]);
780 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
781 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
783 #ifdef TARGET_X86_64
784 if (env->hflags & HF_LMA_MASK) {
785 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
786 env->gdt.base, env->gdt.limit);
787 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
788 env->idt.base, env->idt.limit);
789 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
790 (uint32_t)env->cr[0],
791 env->cr[2],
792 env->cr[3],
793 (uint32_t)env->cr[4]);
794 for(i = 0; i < 4; i++)
795 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
796 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
797 env->dr[6], env->dr[7]);
798 } else
799 #endif
801 cpu_fprintf(f, "GDT= %08x %08x\n",
802 (uint32_t)env->gdt.base, env->gdt.limit);
803 cpu_fprintf(f, "IDT= %08x %08x\n",
804 (uint32_t)env->idt.base, env->idt.limit);
805 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
806 (uint32_t)env->cr[0],
807 (uint32_t)env->cr[2],
808 (uint32_t)env->cr[3],
809 (uint32_t)env->cr[4]);
810 for(i = 0; i < 4; i++)
811 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
812 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
814 if (flags & X86_DUMP_CCOP) {
815 if ((unsigned)env->cc_op < CC_OP_NB)
816 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
817 else
818 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
819 #ifdef TARGET_X86_64
820 if (env->hflags & HF_CS64_MASK) {
821 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
822 env->cc_src, env->cc_dst,
823 cc_op_name);
824 } else
825 #endif
827 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
828 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
829 cc_op_name);
832 if (flags & X86_DUMP_FPU) {
833 int fptag;
834 fptag = 0;
835 for(i = 0; i < 8; i++) {
836 fptag |= ((!env->fptags[i]) << i);
838 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
839 env->fpuc,
840 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
841 env->fpstt,
842 fptag,
843 env->mxcsr);
844 for(i=0;i<8;i++) {
845 #if defined(USE_X86LDOUBLE)
846 union {
847 long double d;
848 struct {
849 uint64_t lower;
850 uint16_t upper;
851 } l;
852 } tmp;
853 tmp.d = env->fpregs[i].d;
854 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
855 i, tmp.l.lower, tmp.l.upper);
856 #else
857 cpu_fprintf(f, "FPR%d=%016" PRIx64,
858 i, env->fpregs[i].mmx.q);
859 #endif
860 if ((i & 1) == 1)
861 cpu_fprintf(f, "\n");
862 else
863 cpu_fprintf(f, " ");
865 if (env->hflags & HF_CS64_MASK)
866 nb = 16;
867 else
868 nb = 8;
869 for(i=0;i<nb;i++) {
870 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
872 env->xmm_regs[i].XMM_L(3),
873 env->xmm_regs[i].XMM_L(2),
874 env->xmm_regs[i].XMM_L(1),
875 env->xmm_regs[i].XMM_L(0));
876 if ((i & 1) == 1)
877 cpu_fprintf(f, "\n");
878 else
879 cpu_fprintf(f, " ");
884 /***********************************************************/
885 /* x86 mmu */
886 /* XXX: add PGE support */
888 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
890 a20_state = (a20_state != 0);
891 if (a20_state != ((env->a20_mask >> 20) & 1)) {
892 #if defined(DEBUG_MMU)
893 printf("A20 update: a20=%d\n", a20_state);
894 #endif
895 /* if the cpu is currently executing code, we must unlink it and
896 all the potentially executing TB */
897 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
899 /* when a20 is changed, all the MMU mappings are invalid, so
900 we must flush everything */
901 tlb_flush(env, 1);
902 env->a20_mask = (~0x100000) | (a20_state << 20);
906 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
908 int pe_state;
910 #if defined(DEBUG_MMU)
911 printf("CR0 update: CR0=0x%08x\n", new_cr0);
912 #endif
913 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
914 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
915 tlb_flush(env, 1);
918 #ifdef TARGET_X86_64
919 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
920 (env->efer & MSR_EFER_LME)) {
921 /* enter in long mode */
922 /* XXX: generate an exception */
923 if (!(env->cr[4] & CR4_PAE_MASK))
924 return;
925 env->efer |= MSR_EFER_LMA;
926 env->hflags |= HF_LMA_MASK;
927 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
928 (env->efer & MSR_EFER_LMA)) {
929 /* exit long mode */
930 env->efer &= ~MSR_EFER_LMA;
931 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
932 env->eip &= 0xffffffff;
934 #endif
935 env->cr[0] = new_cr0 | CR0_ET_MASK;
937 /* update PE flag in hidden flags */
938 pe_state = (env->cr[0] & CR0_PE_MASK);
939 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
940 /* ensure that ADDSEG is always set in real mode */
941 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
942 /* update FPU flags */
943 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
944 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
947 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
948 the PDPT */
949 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
951 env->cr[3] = new_cr3;
952 if (env->cr[0] & CR0_PG_MASK) {
953 #if defined(DEBUG_MMU)
954 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
955 #endif
956 tlb_flush(env, 0);
960 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
962 #if defined(DEBUG_MMU)
963 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
964 #endif
965 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
966 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
967 tlb_flush(env, 1);
969 /* SSE handling */
970 if (!(env->cpuid_features & CPUID_SSE))
971 new_cr4 &= ~CR4_OSFXSR_MASK;
972 if (new_cr4 & CR4_OSFXSR_MASK)
973 env->hflags |= HF_OSFXSR_MASK;
974 else
975 env->hflags &= ~HF_OSFXSR_MASK;
977 env->cr[4] = new_cr4;
980 #if defined(CONFIG_USER_ONLY)
982 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
983 int is_write, int mmu_idx, int is_softmmu)
985 /* user mode only emulation */
986 is_write &= 1;
987 env->cr[2] = addr;
988 env->error_code = (is_write << PG_ERROR_W_BIT);
989 env->error_code |= PG_ERROR_U_MASK;
990 env->exception_index = EXCP0E_PAGE;
991 return 1;
994 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
996 return addr;
999 #else
1001 /* XXX: This value should match the one returned by CPUID
1002 * and in exec.c */
1003 #if defined(CONFIG_KQEMU)
1004 #define PHYS_ADDR_MASK 0xfffff000LL
1005 #else
1006 # if defined(TARGET_X86_64)
1007 # define PHYS_ADDR_MASK 0xfffffff000LL
1008 # else
1009 # define PHYS_ADDR_MASK 0xffffff000LL
1010 # endif
1011 #endif
1013 /* return value:
1014 -1 = cannot handle fault
1015 0 = nothing more to do
1016 1 = generate PF fault
1017 2 = soft MMU activation required for this block
1019 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1020 int is_write1, int mmu_idx, int is_softmmu)
1022 uint64_t ptep, pte;
1023 target_ulong pde_addr, pte_addr;
1024 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1025 target_phys_addr_t paddr;
1026 uint32_t page_offset;
1027 target_ulong vaddr, virt_addr;
1029 is_user = mmu_idx == MMU_USER_IDX;
1030 #if defined(DEBUG_MMU)
1031 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1032 addr, is_write1, is_user, env->eip);
1033 #endif
1034 is_write = is_write1 & 1;
1036 if (!(env->cr[0] & CR0_PG_MASK)) {
1037 pte = addr;
1038 virt_addr = addr & TARGET_PAGE_MASK;
1039 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1040 page_size = 4096;
1041 goto do_mapping;
1044 if (env->cr[4] & CR4_PAE_MASK) {
1045 uint64_t pde, pdpe;
1046 target_ulong pdpe_addr;
1048 #ifdef TARGET_X86_64
1049 if (env->hflags & HF_LMA_MASK) {
1050 uint64_t pml4e_addr, pml4e;
1051 int32_t sext;
1053 /* test virtual address sign extension */
1054 sext = (int64_t)addr >> 47;
1055 if (sext != 0 && sext != -1) {
1056 env->error_code = 0;
1057 env->exception_index = EXCP0D_GPF;
1058 return 1;
1061 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1062 env->a20_mask;
1063 pml4e = ldq_phys(pml4e_addr);
1064 if (!(pml4e & PG_PRESENT_MASK)) {
1065 error_code = 0;
1066 goto do_fault;
1068 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1069 error_code = PG_ERROR_RSVD_MASK;
1070 goto do_fault;
1072 if (!(pml4e & PG_ACCESSED_MASK)) {
1073 pml4e |= PG_ACCESSED_MASK;
1074 stl_phys_notdirty(pml4e_addr, pml4e);
1076 ptep = pml4e ^ PG_NX_MASK;
1077 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1078 env->a20_mask;
1079 pdpe = ldq_phys(pdpe_addr);
1080 if (!(pdpe & PG_PRESENT_MASK)) {
1081 error_code = 0;
1082 goto do_fault;
1084 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1085 error_code = PG_ERROR_RSVD_MASK;
1086 goto do_fault;
1088 ptep &= pdpe ^ PG_NX_MASK;
1089 if (!(pdpe & PG_ACCESSED_MASK)) {
1090 pdpe |= PG_ACCESSED_MASK;
1091 stl_phys_notdirty(pdpe_addr, pdpe);
1093 } else
1094 #endif
1096 /* XXX: load them when cr3 is loaded ? */
1097 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1098 env->a20_mask;
1099 pdpe = ldq_phys(pdpe_addr);
1100 if (!(pdpe & PG_PRESENT_MASK)) {
1101 error_code = 0;
1102 goto do_fault;
1104 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1107 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1108 env->a20_mask;
1109 pde = ldq_phys(pde_addr);
1110 if (!(pde & PG_PRESENT_MASK)) {
1111 error_code = 0;
1112 goto do_fault;
1114 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1115 error_code = PG_ERROR_RSVD_MASK;
1116 goto do_fault;
1118 ptep &= pde ^ PG_NX_MASK;
1119 if (pde & PG_PSE_MASK) {
1120 /* 2 MB page */
1121 page_size = 2048 * 1024;
1122 ptep ^= PG_NX_MASK;
1123 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1124 goto do_fault_protect;
1125 if (is_user) {
1126 if (!(ptep & PG_USER_MASK))
1127 goto do_fault_protect;
1128 if (is_write && !(ptep & PG_RW_MASK))
1129 goto do_fault_protect;
1130 } else {
1131 if ((env->cr[0] & CR0_WP_MASK) &&
1132 is_write && !(ptep & PG_RW_MASK))
1133 goto do_fault_protect;
1135 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1136 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1137 pde |= PG_ACCESSED_MASK;
1138 if (is_dirty)
1139 pde |= PG_DIRTY_MASK;
1140 stl_phys_notdirty(pde_addr, pde);
1142 /* align to page_size */
1143 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1144 virt_addr = addr & ~(page_size - 1);
1145 } else {
1146 /* 4 KB page */
1147 if (!(pde & PG_ACCESSED_MASK)) {
1148 pde |= PG_ACCESSED_MASK;
1149 stl_phys_notdirty(pde_addr, pde);
1151 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1152 env->a20_mask;
1153 pte = ldq_phys(pte_addr);
1154 if (!(pte & PG_PRESENT_MASK)) {
1155 error_code = 0;
1156 goto do_fault;
1158 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1159 error_code = PG_ERROR_RSVD_MASK;
1160 goto do_fault;
1162 /* combine pde and pte nx, user and rw protections */
1163 ptep &= pte ^ PG_NX_MASK;
1164 ptep ^= PG_NX_MASK;
1165 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1166 goto do_fault_protect;
1167 if (is_user) {
1168 if (!(ptep & PG_USER_MASK))
1169 goto do_fault_protect;
1170 if (is_write && !(ptep & PG_RW_MASK))
1171 goto do_fault_protect;
1172 } else {
1173 if ((env->cr[0] & CR0_WP_MASK) &&
1174 is_write && !(ptep & PG_RW_MASK))
1175 goto do_fault_protect;
1177 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1178 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1179 pte |= PG_ACCESSED_MASK;
1180 if (is_dirty)
1181 pte |= PG_DIRTY_MASK;
1182 stl_phys_notdirty(pte_addr, pte);
1184 page_size = 4096;
1185 virt_addr = addr & ~0xfff;
1186 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1188 } else {
1189 uint32_t pde;
1191 /* page directory entry */
1192 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1193 env->a20_mask;
1194 pde = ldl_phys(pde_addr);
1195 if (!(pde & PG_PRESENT_MASK)) {
1196 error_code = 0;
1197 goto do_fault;
1199 /* if PSE bit is set, then we use a 4MB page */
1200 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1201 page_size = 4096 * 1024;
1202 if (is_user) {
1203 if (!(pde & PG_USER_MASK))
1204 goto do_fault_protect;
1205 if (is_write && !(pde & PG_RW_MASK))
1206 goto do_fault_protect;
1207 } else {
1208 if ((env->cr[0] & CR0_WP_MASK) &&
1209 is_write && !(pde & PG_RW_MASK))
1210 goto do_fault_protect;
1212 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1213 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1214 pde |= PG_ACCESSED_MASK;
1215 if (is_dirty)
1216 pde |= PG_DIRTY_MASK;
1217 stl_phys_notdirty(pde_addr, pde);
1220 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1221 ptep = pte;
1222 virt_addr = addr & ~(page_size - 1);
1223 } else {
1224 if (!(pde & PG_ACCESSED_MASK)) {
1225 pde |= PG_ACCESSED_MASK;
1226 stl_phys_notdirty(pde_addr, pde);
1229 /* page directory entry */
1230 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1231 env->a20_mask;
1232 pte = ldl_phys(pte_addr);
1233 if (!(pte & PG_PRESENT_MASK)) {
1234 error_code = 0;
1235 goto do_fault;
1237 /* combine pde and pte user and rw protections */
1238 ptep = pte & pde;
1239 if (is_user) {
1240 if (!(ptep & PG_USER_MASK))
1241 goto do_fault_protect;
1242 if (is_write && !(ptep & PG_RW_MASK))
1243 goto do_fault_protect;
1244 } else {
1245 if ((env->cr[0] & CR0_WP_MASK) &&
1246 is_write && !(ptep & PG_RW_MASK))
1247 goto do_fault_protect;
1249 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1250 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1251 pte |= PG_ACCESSED_MASK;
1252 if (is_dirty)
1253 pte |= PG_DIRTY_MASK;
1254 stl_phys_notdirty(pte_addr, pte);
1256 page_size = 4096;
1257 virt_addr = addr & ~0xfff;
1260 /* the page can be put in the TLB */
1261 prot = PAGE_READ;
1262 if (!(ptep & PG_NX_MASK))
1263 prot |= PAGE_EXEC;
1264 if (pte & PG_DIRTY_MASK) {
1265 /* only set write access if already dirty... otherwise wait
1266 for dirty access */
1267 if (is_user) {
1268 if (ptep & PG_RW_MASK)
1269 prot |= PAGE_WRITE;
1270 } else {
1271 if (!(env->cr[0] & CR0_WP_MASK) ||
1272 (ptep & PG_RW_MASK))
1273 prot |= PAGE_WRITE;
1276 do_mapping:
1277 pte = pte & env->a20_mask;
1279 /* Even if 4MB pages, we map only one 4KB page in the cache to
1280 avoid filling it too fast */
1281 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1282 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1283 vaddr = virt_addr + page_offset;
1285 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1286 return ret;
1287 do_fault_protect:
1288 error_code = PG_ERROR_P_MASK;
1289 do_fault:
1290 error_code |= (is_write << PG_ERROR_W_BIT);
1291 if (is_user)
1292 error_code |= PG_ERROR_U_MASK;
1293 if (is_write1 == 2 &&
1294 (env->efer & MSR_EFER_NXE) &&
1295 (env->cr[4] & CR4_PAE_MASK))
1296 error_code |= PG_ERROR_I_D_MASK;
1297 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1298 /* cr2 is not modified in case of exceptions */
1299 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1300 addr);
1301 } else {
1302 env->cr[2] = addr;
1304 env->error_code = error_code;
1305 env->exception_index = EXCP0E_PAGE;
1306 return 1;
1309 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1311 target_ulong pde_addr, pte_addr;
1312 uint64_t pte;
1313 target_phys_addr_t paddr;
1314 uint32_t page_offset;
1315 int page_size;
1317 if (env->cr[4] & CR4_PAE_MASK) {
1318 target_ulong pdpe_addr;
1319 uint64_t pde, pdpe;
1321 #ifdef TARGET_X86_64
1322 if (env->hflags & HF_LMA_MASK) {
1323 uint64_t pml4e_addr, pml4e;
1324 int32_t sext;
1326 /* test virtual address sign extension */
1327 sext = (int64_t)addr >> 47;
1328 if (sext != 0 && sext != -1)
1329 return -1;
1331 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1332 env->a20_mask;
1333 pml4e = ldq_phys(pml4e_addr);
1334 if (!(pml4e & PG_PRESENT_MASK))
1335 return -1;
1337 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1338 env->a20_mask;
1339 pdpe = ldq_phys(pdpe_addr);
1340 if (!(pdpe & PG_PRESENT_MASK))
1341 return -1;
1342 } else
1343 #endif
1345 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1346 env->a20_mask;
1347 pdpe = ldq_phys(pdpe_addr);
1348 if (!(pdpe & PG_PRESENT_MASK))
1349 return -1;
1352 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1353 env->a20_mask;
1354 pde = ldq_phys(pde_addr);
1355 if (!(pde & PG_PRESENT_MASK)) {
1356 return -1;
1358 if (pde & PG_PSE_MASK) {
1359 /* 2 MB page */
1360 page_size = 2048 * 1024;
1361 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1362 } else {
1363 /* 4 KB page */
1364 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1365 env->a20_mask;
1366 page_size = 4096;
1367 pte = ldq_phys(pte_addr);
1369 if (!(pte & PG_PRESENT_MASK))
1370 return -1;
1371 } else {
1372 uint32_t pde;
1374 if (!(env->cr[0] & CR0_PG_MASK)) {
1375 pte = addr;
1376 page_size = 4096;
1377 } else {
1378 /* page directory entry */
1379 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1380 pde = ldl_phys(pde_addr);
1381 if (!(pde & PG_PRESENT_MASK))
1382 return -1;
1383 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1384 pte = pde & ~0x003ff000; /* align to 4MB */
1385 page_size = 4096 * 1024;
1386 } else {
1387 /* page directory entry */
1388 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1389 pte = ldl_phys(pte_addr);
1390 if (!(pte & PG_PRESENT_MASK))
1391 return -1;
1392 page_size = 4096;
1395 pte = pte & env->a20_mask;
1398 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1399 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1400 return paddr;
1403 void hw_breakpoint_insert(CPUState *env, int index)
1405 int type, err = 0;
1407 switch (hw_breakpoint_type(env->dr[7], index)) {
1408 case 0:
1409 if (hw_breakpoint_enabled(env->dr[7], index))
1410 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1411 &env->cpu_breakpoint[index]);
1412 break;
1413 case 1:
1414 type = BP_CPU | BP_MEM_WRITE;
1415 goto insert_wp;
1416 case 2:
1417 /* No support for I/O watchpoints yet */
1418 break;
1419 case 3:
1420 type = BP_CPU | BP_MEM_ACCESS;
1421 insert_wp:
1422 err = cpu_watchpoint_insert(env, env->dr[index],
1423 hw_breakpoint_len(env->dr[7], index),
1424 type, &env->cpu_watchpoint[index]);
1425 break;
1427 if (err)
1428 env->cpu_breakpoint[index] = NULL;
1431 void hw_breakpoint_remove(CPUState *env, int index)
1433 if (!env->cpu_breakpoint[index])
1434 return;
1435 switch (hw_breakpoint_type(env->dr[7], index)) {
1436 case 0:
1437 if (hw_breakpoint_enabled(env->dr[7], index))
1438 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1439 break;
1440 case 1:
1441 case 3:
1442 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1443 break;
1444 case 2:
1445 /* No support for I/O watchpoints yet */
1446 break;
1450 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1452 target_ulong dr6;
1453 int reg, type;
1454 int hit_enabled = 0;
1456 dr6 = env->dr[6] & ~0xf;
1457 for (reg = 0; reg < 4; reg++) {
1458 type = hw_breakpoint_type(env->dr[7], reg);
1459 if ((type == 0 && env->dr[reg] == env->eip) ||
1460 ((type & 1) && env->cpu_watchpoint[reg] &&
1461 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1462 dr6 |= 1 << reg;
1463 if (hw_breakpoint_enabled(env->dr[7], reg))
1464 hit_enabled = 1;
1467 if (hit_enabled || force_dr6_update)
1468 env->dr[6] = dr6;
1469 return hit_enabled;
1472 static CPUDebugExcpHandler *prev_debug_excp_handler;
1474 void raise_exception(int exception_index);
1476 static void breakpoint_handler(CPUState *env)
1478 CPUBreakpoint *bp;
1480 if (env->watchpoint_hit) {
1481 if (env->watchpoint_hit->flags & BP_CPU) {
1482 env->watchpoint_hit = NULL;
1483 if (check_hw_breakpoints(env, 0))
1484 raise_exception(EXCP01_DB);
1485 else
1486 cpu_resume_from_signal(env, NULL);
1488 } else {
1489 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1490 if (bp->pc == env->eip) {
1491 if (bp->flags & BP_CPU) {
1492 check_hw_breakpoints(env, 1);
1493 raise_exception(EXCP01_DB);
1495 break;
1498 if (prev_debug_excp_handler)
1499 prev_debug_excp_handler(env);
1502 /* This should come from sysemu.h - if we could include it here... */
1503 void qemu_system_reset_request(void);
1505 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1506 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1508 uint64_t mcg_cap = cenv->mcg_cap;
1509 unsigned bank_num = mcg_cap & 0xff;
1510 uint64_t *banks = cenv->mce_banks;
1512 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1513 return;
1516 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1517 * reporting is disabled
1519 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1520 cenv->mcg_ctl != ~(uint64_t)0)
1521 return;
1522 banks += 4 * bank;
1524 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1525 * reporting is disabled for the bank
1527 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1528 return;
1529 if (status & MCI_STATUS_UC) {
1530 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1531 !(cenv->cr[4] & CR4_MCE_MASK)) {
1532 fprintf(stderr, "injects mce exception while previous "
1533 "one is in progress!\n");
1534 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1535 qemu_system_reset_request();
1536 return;
1538 if (banks[1] & MCI_STATUS_VAL)
1539 status |= MCI_STATUS_OVER;
1540 banks[2] = addr;
1541 banks[3] = misc;
1542 cenv->mcg_status = mcg_status;
1543 banks[1] = status;
1544 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1545 } else if (!(banks[1] & MCI_STATUS_VAL)
1546 || !(banks[1] & MCI_STATUS_UC)) {
1547 if (banks[1] & MCI_STATUS_VAL)
1548 status |= MCI_STATUS_OVER;
1549 banks[2] = addr;
1550 banks[3] = misc;
1551 banks[1] = status;
1552 } else
1553 banks[1] |= MCI_STATUS_OVER;
1555 #endif /* !CONFIG_USER_ONLY */
1557 static void mce_init(CPUX86State *cenv)
1559 unsigned int bank, bank_num;
1561 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1562 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1563 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1564 cenv->mcg_ctl = ~(uint64_t)0;
1565 bank_num = cenv->mcg_cap & 0xff;
1566 cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1567 for (bank = 0; bank < bank_num; bank++)
1568 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1572 static void host_cpuid(uint32_t function, uint32_t count,
1573 uint32_t *eax, uint32_t *ebx,
1574 uint32_t *ecx, uint32_t *edx)
1576 #if defined(CONFIG_KVM) || defined(USE_KVM)
1577 uint32_t vec[4];
1579 #ifdef __x86_64__
1580 asm volatile("cpuid"
1581 : "=a"(vec[0]), "=b"(vec[1]),
1582 "=c"(vec[2]), "=d"(vec[3])
1583 : "0"(function), "c"(count) : "cc");
1584 #else
1585 asm volatile("pusha \n\t"
1586 "cpuid \n\t"
1587 "mov %%eax, 0(%2) \n\t"
1588 "mov %%ebx, 4(%2) \n\t"
1589 "mov %%ecx, 8(%2) \n\t"
1590 "mov %%edx, 12(%2) \n\t"
1591 "popa"
1592 : : "a"(function), "c"(count), "S"(vec)
1593 : "memory", "cc");
1594 #endif
1596 if (eax)
1597 *eax = vec[0];
1598 if (ebx)
1599 *ebx = vec[1];
1600 if (ecx)
1601 *ecx = vec[2];
1602 if (edx)
1603 *edx = vec[3];
1604 #endif
1607 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1608 uint32_t *eax, uint32_t *ebx,
1609 uint32_t *ecx, uint32_t *edx)
1611 /* test if maximum index reached */
1612 if (index & 0x80000000) {
1613 if (index > env->cpuid_xlevel)
1614 index = env->cpuid_level;
1615 } else {
1616 if (index > env->cpuid_level)
1617 index = env->cpuid_level;
1620 switch(index) {
1621 case 0:
1622 *eax = env->cpuid_level;
1623 *ebx = env->cpuid_vendor1;
1624 *edx = env->cpuid_vendor2;
1625 *ecx = env->cpuid_vendor3;
1627 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1628 * isn't supported in compatibility mode on Intel. so advertise the
1629 * actuall cpu, and say goodbye to migration between different vendors
1630 * is you use compatibility mode. */
1631 if (kvm_enabled() && !env->cpuid_vendor_override)
1632 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1633 break;
1634 case 1:
1635 *eax = env->cpuid_version;
1636 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1637 *ecx = env->cpuid_ext_features;
1638 *edx = env->cpuid_features;
1639 break;
1640 case 2:
1641 /* cache info: needed for Pentium Pro compatibility */
1642 *eax = 1;
1643 *ebx = 0;
1644 *ecx = 0;
1645 *edx = 0x2c307d;
1646 break;
1647 case 4:
1648 /* cache info: needed for Core compatibility */
1649 switch (count) {
1650 case 0: /* L1 dcache info */
1651 *eax = 0x0000121;
1652 *ebx = 0x1c0003f;
1653 *ecx = 0x000003f;
1654 *edx = 0x0000001;
1655 break;
1656 case 1: /* L1 icache info */
1657 *eax = 0x0000122;
1658 *ebx = 0x1c0003f;
1659 *ecx = 0x000003f;
1660 *edx = 0x0000001;
1661 break;
1662 case 2: /* L2 cache info */
1663 *eax = 0x0000143;
1664 *ebx = 0x3c0003f;
1665 *ecx = 0x0000fff;
1666 *edx = 0x0000001;
1667 break;
1668 default: /* end of info */
1669 *eax = 0;
1670 *ebx = 0;
1671 *ecx = 0;
1672 *edx = 0;
1673 break;
1675 break;
1676 case 5:
1677 /* mwait info: needed for Core compatibility */
1678 *eax = 0; /* Smallest monitor-line size in bytes */
1679 *ebx = 0; /* Largest monitor-line size in bytes */
1680 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1681 *edx = 0;
1682 break;
1683 case 6:
1684 /* Thermal and Power Leaf */
1685 *eax = 0;
1686 *ebx = 0;
1687 *ecx = 0;
1688 *edx = 0;
1689 break;
1690 case 9:
1691 /* Direct Cache Access Information Leaf */
1692 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1693 *ebx = 0;
1694 *ecx = 0;
1695 *edx = 0;
1696 break;
1697 case 0xA:
1698 /* Architectural Performance Monitoring Leaf */
1699 *eax = 0;
1700 *ebx = 0;
1701 *ecx = 0;
1702 *edx = 0;
1703 break;
1704 case 0x80000000:
1705 *eax = env->cpuid_xlevel;
1706 *ebx = env->cpuid_vendor1;
1707 *edx = env->cpuid_vendor2;
1708 *ecx = env->cpuid_vendor3;
1709 break;
1710 case 0x80000001:
1711 *eax = env->cpuid_version;
1712 *ebx = 0;
1713 *ecx = env->cpuid_ext3_features;
1714 *edx = env->cpuid_ext2_features;
1716 if (kvm_enabled()) {
1717 uint32_t h_eax, h_edx;
1719 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1721 /* disable CPU features that the host does not support */
1723 /* long mode */
1724 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1725 *edx &= ~0x20000000;
1726 /* syscall */
1727 if ((h_edx & 0x00000800) == 0)
1728 *edx &= ~0x00000800;
1729 /* nx */
1730 if ((h_edx & 0x00100000) == 0)
1731 *edx &= ~0x00100000;
1733 /* disable CPU features that KVM cannot support */
1735 /* svm */
1736 if (!kvm_nested)
1737 *ecx &= ~CPUID_EXT3_SVM;
1738 /* 3dnow */
1739 *edx &= ~0xc0000000;
1740 } else {
1741 /* AMD 3DNow! is not supported in QEMU */
1742 *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1744 break;
1745 case 0x80000002:
1746 case 0x80000003:
1747 case 0x80000004:
1748 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1749 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1750 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1751 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1752 break;
1753 case 0x80000005:
1754 /* cache info (L1 cache) */
1755 *eax = 0x01ff01ff;
1756 *ebx = 0x01ff01ff;
1757 *ecx = 0x40020140;
1758 *edx = 0x40020140;
1759 break;
1760 case 0x80000006:
1761 /* cache info (L2 cache) */
1762 *eax = 0;
1763 *ebx = 0x42004200;
1764 *ecx = 0x02008140;
1765 *edx = 0;
1766 break;
1767 case 0x80000008:
1768 /* virtual & phys address size in low 2 bytes. */
1769 /* XXX: This value must match the one used in the MMU code. */
1770 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1771 /* 64 bit processor */
1772 #if defined(CONFIG_KQEMU)
1773 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1774 #else
1775 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1776 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1777 #endif
1778 } else {
1779 #if defined(CONFIG_KQEMU)
1780 *eax = 0x00000020; /* 32 bits physical */
1781 #else
1782 if (env->cpuid_features & CPUID_PSE36)
1783 *eax = 0x00000024; /* 36 bits physical */
1784 else
1785 *eax = 0x00000020; /* 32 bits physical */
1786 #endif
1788 *ebx = 0;
1789 *ecx = 0;
1790 *edx = 0;
1791 break;
1792 case 0x8000000A:
1793 *eax = 0x00000001; /* SVM Revision */
1794 *ebx = 0x00000010; /* nr of ASIDs */
1795 *ecx = 0;
1796 *edx = 0; /* optional features */
1797 break;
1798 default:
1799 /* reserved values: zero */
1800 *eax = 0;
1801 *ebx = 0;
1802 *ecx = 0;
1803 *edx = 0;
1804 break;
1808 CPUX86State *cpu_x86_init(const char *cpu_model)
1810 CPUX86State *env;
1811 static int inited;
1813 env = qemu_mallocz(sizeof(CPUX86State));
1814 cpu_exec_init(env);
1815 env->cpu_model_str = cpu_model;
1817 /* init various static tables */
1818 if (!inited) {
1819 inited = 1;
1820 optimize_flags_init();
1821 #ifndef CONFIG_USER_ONLY
1822 prev_debug_excp_handler =
1823 cpu_set_debug_excp_handler(breakpoint_handler);
1824 #endif
1826 if (cpu_x86_register(env, cpu_model) < 0) {
1827 cpu_x86_close(env);
1828 return NULL;
1830 mce_init(env);
1831 cpu_reset(env);
1832 #ifdef CONFIG_KQEMU
1833 kqemu_init(env);
1834 #endif
1836 return env;
1839 #if !defined(CONFIG_USER_ONLY)
1840 void do_cpu_init(CPUState *env)
1842 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1843 cpu_reset(env);
1844 env->interrupt_request = sipi;
1845 apic_init_reset(env);
1848 void do_cpu_sipi(CPUState *env)
1850 apic_sipi(env);
1852 #else
1853 void do_cpu_init(CPUState *env)
1856 void do_cpu_sipi(CPUState *env)
1859 #endif