Disable preadv/pwritev support
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob4785ff0d7dc6f60a766282962771d14168f9768b
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
31 #include "qemu-kvm.h"
33 //#define DEBUG_MMU
35 /* feature flags taken from "Intel Processor Identification and the CPUID
36 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
37 * about feature names, the Linux name is used. */
38 static const char *feature_name[] = {
39 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
40 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
41 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
42 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
44 static const char *ext_feature_name[] = {
45 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
46 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
47 NULL, NULL, "dca", NULL, NULL, "x2apic", NULL, "popcnt",
48 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
50 static const char *ext2_feature_name[] = {
51 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
52 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
53 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
54 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
56 static const char *ext3_feature_name[] = {
57 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
58 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
59 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
60 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
63 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
64 uint32_t *ext_features,
65 uint32_t *ext2_features,
66 uint32_t *ext3_features)
68 int i;
69 int found = 0;
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 found = 1;
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 found = 1;
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 found = 1;
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 found = 1;
91 if (!found) {
92 fprintf(stderr, "CPU feature %s not found\n", flagname);
96 typedef struct x86_def_t {
97 const char *name;
98 uint32_t level;
99 uint32_t vendor1, vendor2, vendor3;
100 int family;
101 int model;
102 int stepping;
103 uint32_t features, ext_features, ext2_features, ext3_features;
104 uint32_t xlevel;
105 char model_id[48];
106 int vendor_override;
107 } x86_def_t;
109 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
110 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
111 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
112 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
113 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
114 CPUID_PSE36 | CPUID_FXSR)
115 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
116 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
117 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
118 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
119 CPUID_PAE | CPUID_SEP | CPUID_APIC)
120 static x86_def_t x86_defs[] = {
121 #ifdef TARGET_X86_64
123 .name = "qemu64",
124 .level = 2,
125 .vendor1 = CPUID_VENDOR_AMD_1,
126 .vendor2 = CPUID_VENDOR_AMD_2,
127 .vendor3 = CPUID_VENDOR_AMD_3,
128 .family = 6,
129 .model = 2,
130 .stepping = 3,
131 .features = PPRO_FEATURES |
132 /* these features are needed for Win64 and aren't fully implemented */
133 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
134 /* this feature is needed for Solaris and isn't fully implemented */
135 CPUID_PSE36,
136 .ext_features = CPUID_EXT_SSE3,
137 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
138 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
139 .ext3_features = CPUID_EXT3_SVM,
140 .xlevel = 0x8000000A,
141 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
144 .name = "phenom",
145 .level = 5,
146 .vendor1 = CPUID_VENDOR_AMD_1,
147 .vendor2 = CPUID_VENDOR_AMD_2,
148 .vendor3 = CPUID_VENDOR_AMD_3,
149 .family = 16,
150 .model = 2,
151 .stepping = 3,
152 /* Missing: CPUID_VME, CPUID_HT */
153 .features = PPRO_FEATURES |
154 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
155 CPUID_PSE36,
156 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
157 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
158 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
159 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
160 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
161 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
162 CPUID_EXT2_FFXSR,
163 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
164 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
165 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
166 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
167 .ext3_features = CPUID_EXT3_SVM,
168 .xlevel = 0x8000001A,
169 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
172 .name = "core2duo",
173 .level = 10,
174 .family = 6,
175 .model = 15,
176 .stepping = 11,
177 /* The original CPU also implements these features:
178 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
179 CPUID_TM, CPUID_PBE */
180 .features = PPRO_FEATURES |
181 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
182 CPUID_PSE36,
183 /* The original CPU also implements these ext features:
184 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
185 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
186 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
187 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
188 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
189 .xlevel = 0x80000008,
190 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
192 #endif
194 .name = "qemu32",
195 .level = 2,
196 .family = 6,
197 .model = 3,
198 .stepping = 3,
199 .features = PPRO_FEATURES,
200 .ext_features = CPUID_EXT_SSE3,
201 .xlevel = 0,
202 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
205 .name = "coreduo",
206 .level = 10,
207 .family = 6,
208 .model = 14,
209 .stepping = 8,
210 /* The original CPU also implements these features:
211 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
212 CPUID_TM, CPUID_PBE */
213 .features = PPRO_FEATURES | CPUID_VME |
214 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
215 /* The original CPU also implements these ext features:
216 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
217 CPUID_EXT_PDCM */
218 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
219 .ext2_features = CPUID_EXT2_NX,
220 .xlevel = 0x80000008,
221 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
224 .name = "486",
225 .level = 0,
226 .family = 4,
227 .model = 0,
228 .stepping = 0,
229 .features = I486_FEATURES,
230 .xlevel = 0,
233 .name = "pentium",
234 .level = 1,
235 .family = 5,
236 .model = 4,
237 .stepping = 3,
238 .features = PENTIUM_FEATURES,
239 .xlevel = 0,
242 .name = "pentium2",
243 .level = 2,
244 .family = 6,
245 .model = 5,
246 .stepping = 2,
247 .features = PENTIUM2_FEATURES,
248 .xlevel = 0,
251 .name = "pentium3",
252 .level = 2,
253 .family = 6,
254 .model = 7,
255 .stepping = 3,
256 .features = PENTIUM3_FEATURES,
257 .xlevel = 0,
260 .name = "athlon",
261 .level = 2,
262 .vendor1 = CPUID_VENDOR_AMD_1,
263 .vendor2 = CPUID_VENDOR_AMD_2,
264 .vendor3 = CPUID_VENDOR_AMD_3,
265 .family = 6,
266 .model = 2,
267 .stepping = 3,
268 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
269 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
270 .xlevel = 0x80000008,
271 /* XXX: put another string ? */
272 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
275 .name = "n270",
276 /* original is on level 10 */
277 .level = 5,
278 .family = 6,
279 .model = 28,
280 .stepping = 2,
281 .features = PPRO_FEATURES |
282 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
283 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
284 * CPUID_HT | CPUID_TM | CPUID_PBE */
285 /* Some CPUs got no CPUID_SEP */
286 .ext_features = CPUID_EXT_MONITOR |
287 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
288 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
289 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
290 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
291 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
292 .xlevel = 0x8000000A,
293 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
297 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
298 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
300 static int cpu_x86_fill_model_id(char *str)
302 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
303 int i;
305 for (i = 0; i < 3; i++) {
306 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
307 memcpy(str + i * 16 + 0, &eax, 4);
308 memcpy(str + i * 16 + 4, &ebx, 4);
309 memcpy(str + i * 16 + 8, &ecx, 4);
310 memcpy(str + i * 16 + 12, &edx, 4);
312 return 0;
315 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
317 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
319 x86_cpu_def->name = "host";
320 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
321 x86_cpu_def->level = eax;
322 x86_cpu_def->vendor1 = ebx;
323 x86_cpu_def->vendor2 = edx;
324 x86_cpu_def->vendor3 = ecx;
326 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
327 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
328 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
329 x86_cpu_def->stepping = eax & 0x0F;
330 x86_cpu_def->ext_features = ecx;
331 x86_cpu_def->features = edx;
333 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
334 x86_cpu_def->xlevel = eax;
336 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
337 x86_cpu_def->ext2_features = edx;
338 x86_cpu_def->ext3_features = ecx;
339 cpu_x86_fill_model_id(x86_cpu_def->model_id);
340 x86_cpu_def->vendor_override = 0;
342 return 0;
345 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
347 unsigned int i;
348 x86_def_t *def;
350 char *s = strdup(cpu_model);
351 char *featurestr, *name = strtok(s, ",");
352 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
353 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
354 int family = -1, model = -1, stepping = -1;
356 def = NULL;
357 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
358 if (strcmp(name, x86_defs[i].name) == 0) {
359 def = &x86_defs[i];
360 break;
363 if (kvm_enabled() && strcmp(name, "host") == 0) {
364 cpu_x86_fill_host(x86_cpu_def);
365 } else if (!def) {
366 goto error;
367 } else {
368 memcpy(x86_cpu_def, def, sizeof(*def));
371 add_flagname_to_bitmaps("hypervisor", &plus_features,
372 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
374 featurestr = strtok(NULL, ",");
376 while (featurestr) {
377 char *val;
378 if (featurestr[0] == '+') {
379 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
380 } else if (featurestr[0] == '-') {
381 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
382 } else if ((val = strchr(featurestr, '='))) {
383 *val = 0; val++;
384 if (!strcmp(featurestr, "family")) {
385 char *err;
386 family = strtol(val, &err, 10);
387 if (!*val || *err || family < 0) {
388 fprintf(stderr, "bad numerical value %s\n", val);
389 goto error;
391 x86_cpu_def->family = family;
392 } else if (!strcmp(featurestr, "model")) {
393 char *err;
394 model = strtol(val, &err, 10);
395 if (!*val || *err || model < 0 || model > 0xff) {
396 fprintf(stderr, "bad numerical value %s\n", val);
397 goto error;
399 x86_cpu_def->model = model;
400 } else if (!strcmp(featurestr, "stepping")) {
401 char *err;
402 stepping = strtol(val, &err, 10);
403 if (!*val || *err || stepping < 0 || stepping > 0xf) {
404 fprintf(stderr, "bad numerical value %s\n", val);
405 goto error;
407 x86_cpu_def->stepping = stepping;
408 } else if (!strcmp(featurestr, "vendor")) {
409 if (strlen(val) != 12) {
410 fprintf(stderr, "vendor string must be 12 chars long\n");
411 goto error;
413 x86_cpu_def->vendor1 = 0;
414 x86_cpu_def->vendor2 = 0;
415 x86_cpu_def->vendor3 = 0;
416 for(i = 0; i < 4; i++) {
417 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
418 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
419 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
421 x86_cpu_def->vendor_override = 1;
422 } else if (!strcmp(featurestr, "model_id")) {
423 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
424 val);
425 } else {
426 fprintf(stderr, "unrecognized feature %s\n", featurestr);
427 goto error;
429 } else {
430 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
431 goto error;
433 featurestr = strtok(NULL, ",");
435 x86_cpu_def->features |= plus_features;
436 x86_cpu_def->ext_features |= plus_ext_features;
437 x86_cpu_def->ext2_features |= plus_ext2_features;
438 x86_cpu_def->ext3_features |= plus_ext3_features;
439 x86_cpu_def->features &= ~minus_features;
440 x86_cpu_def->ext_features &= ~minus_ext_features;
441 x86_cpu_def->ext2_features &= ~minus_ext2_features;
442 x86_cpu_def->ext3_features &= ~minus_ext3_features;
443 free(s);
444 return 0;
446 error:
447 free(s);
448 return -1;
451 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
453 unsigned int i;
455 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
456 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
459 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
461 x86_def_t def1, *def = &def1;
463 if (cpu_x86_find_by_name(def, cpu_model) < 0)
464 return -1;
465 if (def->vendor1) {
466 env->cpuid_vendor1 = def->vendor1;
467 env->cpuid_vendor2 = def->vendor2;
468 env->cpuid_vendor3 = def->vendor3;
469 } else {
470 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
471 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
472 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
474 env->cpuid_vendor_override = def->vendor_override;
475 env->cpuid_level = def->level;
476 if (def->family > 0x0f)
477 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
478 else
479 env->cpuid_version = def->family << 8;
480 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
481 env->cpuid_version |= def->stepping;
482 env->cpuid_features = def->features;
483 env->pat = 0x0007040600070406ULL;
484 env->cpuid_ext_features = def->ext_features;
485 env->cpuid_ext2_features = def->ext2_features;
486 env->cpuid_xlevel = def->xlevel;
487 env->cpuid_ext3_features = def->ext3_features;
489 const char *model_id = def->model_id;
490 int c, len, i;
491 if (!model_id)
492 model_id = "";
493 len = strlen(model_id);
494 for(i = 0; i < 48; i++) {
495 if (i >= len)
496 c = '\0';
497 else
498 c = (uint8_t)model_id[i];
499 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
502 return 0;
505 /* NOTE: must be called outside the CPU execute loop */
506 void cpu_reset(CPUX86State *env)
508 int i;
510 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
511 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
512 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
515 memset(env, 0, offsetof(CPUX86State, breakpoints));
517 tlb_flush(env, 1);
519 env->old_exception = -1;
521 /* init to reset state */
523 #ifdef CONFIG_SOFTMMU
524 env->hflags |= HF_SOFTMMU_MASK;
525 #endif
526 env->hflags2 |= HF2_GIF_MASK;
528 cpu_x86_update_cr0(env, 0x60000010);
529 env->a20_mask = ~0x0;
530 env->smbase = 0x30000;
532 env->idt.limit = 0xffff;
533 env->gdt.limit = 0xffff;
534 env->ldt.limit = 0xffff;
535 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
536 env->tr.limit = 0xffff;
537 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
539 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
540 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
541 DESC_R_MASK | DESC_A_MASK);
542 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
543 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
544 DESC_A_MASK);
545 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
546 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
547 DESC_A_MASK);
548 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
549 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
550 DESC_A_MASK);
551 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
552 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
553 DESC_A_MASK);
554 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
555 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
556 DESC_A_MASK);
558 env->eip = 0xfff0;
559 env->regs[R_EDX] = env->cpuid_version;
561 env->eflags = 0x2;
563 /* FPU init */
564 for(i = 0;i < 8; i++)
565 env->fptags[i] = 1;
566 env->fpuc = 0x37f;
568 env->mxcsr = 0x1f80;
570 memset(env->dr, 0, sizeof(env->dr));
571 env->dr[6] = DR6_FIXED_1;
572 env->dr[7] = DR7_FIXED_1;
573 cpu_breakpoint_remove_all(env, BP_CPU);
574 cpu_watchpoint_remove_all(env, BP_CPU);
577 void cpu_x86_close(CPUX86State *env)
579 qemu_free(env);
582 /***********************************************************/
583 /* x86 debug */
585 static const char *cc_op_str[] = {
586 "DYNAMIC",
587 "EFLAGS",
589 "MULB",
590 "MULW",
591 "MULL",
592 "MULQ",
594 "ADDB",
595 "ADDW",
596 "ADDL",
597 "ADDQ",
599 "ADCB",
600 "ADCW",
601 "ADCL",
602 "ADCQ",
604 "SUBB",
605 "SUBW",
606 "SUBL",
607 "SUBQ",
609 "SBBB",
610 "SBBW",
611 "SBBL",
612 "SBBQ",
614 "LOGICB",
615 "LOGICW",
616 "LOGICL",
617 "LOGICQ",
619 "INCB",
620 "INCW",
621 "INCL",
622 "INCQ",
624 "DECB",
625 "DECW",
626 "DECL",
627 "DECQ",
629 "SHLB",
630 "SHLW",
631 "SHLL",
632 "SHLQ",
634 "SARB",
635 "SARW",
636 "SARL",
637 "SARQ",
640 static void
641 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
642 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
643 const char *name, struct SegmentCache *sc)
645 #ifdef TARGET_X86_64
646 if (env->hflags & HF_CS64_MASK) {
647 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
648 sc->selector, sc->base, sc->limit, sc->flags);
649 } else
650 #endif
652 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
653 (uint32_t)sc->base, sc->limit, sc->flags);
656 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
657 goto done;
659 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
660 if (sc->flags & DESC_S_MASK) {
661 if (sc->flags & DESC_CS_MASK) {
662 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
663 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
664 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
665 (sc->flags & DESC_R_MASK) ? 'R' : '-');
666 } else {
667 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
668 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
669 (sc->flags & DESC_W_MASK) ? 'W' : '-');
671 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
672 } else {
673 static const char *sys_type_name[2][16] = {
674 { /* 32 bit mode */
675 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
676 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
677 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
678 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
680 { /* 64 bit mode */
681 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
682 "Reserved", "Reserved", "Reserved", "Reserved",
683 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
684 "Reserved", "IntGate64", "TrapGate64"
687 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
688 [(sc->flags & DESC_TYPE_MASK)
689 >> DESC_TYPE_SHIFT]);
691 done:
692 cpu_fprintf(f, "\n");
695 void cpu_dump_state(CPUState *env, FILE *f,
696 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
697 int flags)
699 int eflags, i, nb;
700 char cc_op_name[32];
701 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
703 if (kvm_enabled())
704 kvm_arch_get_registers(env);
706 eflags = env->eflags;
707 #ifdef TARGET_X86_64
708 if (env->hflags & HF_CS64_MASK) {
709 cpu_fprintf(f,
710 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
711 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
712 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
713 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
714 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
715 env->regs[R_EAX],
716 env->regs[R_EBX],
717 env->regs[R_ECX],
718 env->regs[R_EDX],
719 env->regs[R_ESI],
720 env->regs[R_EDI],
721 env->regs[R_EBP],
722 env->regs[R_ESP],
723 env->regs[8],
724 env->regs[9],
725 env->regs[10],
726 env->regs[11],
727 env->regs[12],
728 env->regs[13],
729 env->regs[14],
730 env->regs[15],
731 env->eip, eflags,
732 eflags & DF_MASK ? 'D' : '-',
733 eflags & CC_O ? 'O' : '-',
734 eflags & CC_S ? 'S' : '-',
735 eflags & CC_Z ? 'Z' : '-',
736 eflags & CC_A ? 'A' : '-',
737 eflags & CC_P ? 'P' : '-',
738 eflags & CC_C ? 'C' : '-',
739 env->hflags & HF_CPL_MASK,
740 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
741 (int)(env->a20_mask >> 20) & 1,
742 (env->hflags >> HF_SMM_SHIFT) & 1,
743 env->halted);
744 } else
745 #endif
747 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
748 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
749 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
750 (uint32_t)env->regs[R_EAX],
751 (uint32_t)env->regs[R_EBX],
752 (uint32_t)env->regs[R_ECX],
753 (uint32_t)env->regs[R_EDX],
754 (uint32_t)env->regs[R_ESI],
755 (uint32_t)env->regs[R_EDI],
756 (uint32_t)env->regs[R_EBP],
757 (uint32_t)env->regs[R_ESP],
758 (uint32_t)env->eip, eflags,
759 eflags & DF_MASK ? 'D' : '-',
760 eflags & CC_O ? 'O' : '-',
761 eflags & CC_S ? 'S' : '-',
762 eflags & CC_Z ? 'Z' : '-',
763 eflags & CC_A ? 'A' : '-',
764 eflags & CC_P ? 'P' : '-',
765 eflags & CC_C ? 'C' : '-',
766 env->hflags & HF_CPL_MASK,
767 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
768 (int)(env->a20_mask >> 20) & 1,
769 (env->hflags >> HF_SMM_SHIFT) & 1,
770 env->halted);
773 for(i = 0; i < 6; i++) {
774 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
775 &env->segs[i]);
777 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
778 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
780 #ifdef TARGET_X86_64
781 if (env->hflags & HF_LMA_MASK) {
782 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
783 env->gdt.base, env->gdt.limit);
784 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
785 env->idt.base, env->idt.limit);
786 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
787 (uint32_t)env->cr[0],
788 env->cr[2],
789 env->cr[3],
790 (uint32_t)env->cr[4]);
791 for(i = 0; i < 4; i++)
792 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
793 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
794 env->dr[6], env->dr[7]);
795 } else
796 #endif
798 cpu_fprintf(f, "GDT= %08x %08x\n",
799 (uint32_t)env->gdt.base, env->gdt.limit);
800 cpu_fprintf(f, "IDT= %08x %08x\n",
801 (uint32_t)env->idt.base, env->idt.limit);
802 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
803 (uint32_t)env->cr[0],
804 (uint32_t)env->cr[2],
805 (uint32_t)env->cr[3],
806 (uint32_t)env->cr[4]);
807 for(i = 0; i < 4; i++)
808 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
809 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
811 if (flags & X86_DUMP_CCOP) {
812 if ((unsigned)env->cc_op < CC_OP_NB)
813 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
814 else
815 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
816 #ifdef TARGET_X86_64
817 if (env->hflags & HF_CS64_MASK) {
818 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
819 env->cc_src, env->cc_dst,
820 cc_op_name);
821 } else
822 #endif
824 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
825 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
826 cc_op_name);
829 if (flags & X86_DUMP_FPU) {
830 int fptag;
831 fptag = 0;
832 for(i = 0; i < 8; i++) {
833 fptag |= ((!env->fptags[i]) << i);
835 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
836 env->fpuc,
837 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
838 env->fpstt,
839 fptag,
840 env->mxcsr);
841 for(i=0;i<8;i++) {
842 #if defined(USE_X86LDOUBLE)
843 union {
844 long double d;
845 struct {
846 uint64_t lower;
847 uint16_t upper;
848 } l;
849 } tmp;
850 tmp.d = env->fpregs[i].d;
851 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
852 i, tmp.l.lower, tmp.l.upper);
853 #else
854 cpu_fprintf(f, "FPR%d=%016" PRIx64,
855 i, env->fpregs[i].mmx.q);
856 #endif
857 if ((i & 1) == 1)
858 cpu_fprintf(f, "\n");
859 else
860 cpu_fprintf(f, " ");
862 if (env->hflags & HF_CS64_MASK)
863 nb = 16;
864 else
865 nb = 8;
866 for(i=0;i<nb;i++) {
867 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
869 env->xmm_regs[i].XMM_L(3),
870 env->xmm_regs[i].XMM_L(2),
871 env->xmm_regs[i].XMM_L(1),
872 env->xmm_regs[i].XMM_L(0));
873 if ((i & 1) == 1)
874 cpu_fprintf(f, "\n");
875 else
876 cpu_fprintf(f, " ");
881 /***********************************************************/
882 /* x86 mmu */
883 /* XXX: add PGE support */
885 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
887 a20_state = (a20_state != 0);
888 if (a20_state != ((env->a20_mask >> 20) & 1)) {
889 #if defined(DEBUG_MMU)
890 printf("A20 update: a20=%d\n", a20_state);
891 #endif
892 /* if the cpu is currently executing code, we must unlink it and
893 all the potentially executing TB */
894 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
896 /* when a20 is changed, all the MMU mappings are invalid, so
897 we must flush everything */
898 tlb_flush(env, 1);
899 env->a20_mask = (~0x100000) | (a20_state << 20);
903 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
905 int pe_state;
907 #if defined(DEBUG_MMU)
908 printf("CR0 update: CR0=0x%08x\n", new_cr0);
909 #endif
910 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
911 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
912 tlb_flush(env, 1);
915 #ifdef TARGET_X86_64
916 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
917 (env->efer & MSR_EFER_LME)) {
918 /* enter in long mode */
919 /* XXX: generate an exception */
920 if (!(env->cr[4] & CR4_PAE_MASK))
921 return;
922 env->efer |= MSR_EFER_LMA;
923 env->hflags |= HF_LMA_MASK;
924 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
925 (env->efer & MSR_EFER_LMA)) {
926 /* exit long mode */
927 env->efer &= ~MSR_EFER_LMA;
928 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
929 env->eip &= 0xffffffff;
931 #endif
932 env->cr[0] = new_cr0 | CR0_ET_MASK;
934 /* update PE flag in hidden flags */
935 pe_state = (env->cr[0] & CR0_PE_MASK);
936 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
937 /* ensure that ADDSEG is always set in real mode */
938 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
939 /* update FPU flags */
940 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
941 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
944 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
945 the PDPT */
946 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
948 env->cr[3] = new_cr3;
949 if (env->cr[0] & CR0_PG_MASK) {
950 #if defined(DEBUG_MMU)
951 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
952 #endif
953 tlb_flush(env, 0);
957 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
959 #if defined(DEBUG_MMU)
960 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
961 #endif
962 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
963 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
964 tlb_flush(env, 1);
966 /* SSE handling */
967 if (!(env->cpuid_features & CPUID_SSE))
968 new_cr4 &= ~CR4_OSFXSR_MASK;
969 if (new_cr4 & CR4_OSFXSR_MASK)
970 env->hflags |= HF_OSFXSR_MASK;
971 else
972 env->hflags &= ~HF_OSFXSR_MASK;
974 env->cr[4] = new_cr4;
977 #if defined(CONFIG_USER_ONLY)
979 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
980 int is_write, int mmu_idx, int is_softmmu)
982 /* user mode only emulation */
983 is_write &= 1;
984 env->cr[2] = addr;
985 env->error_code = (is_write << PG_ERROR_W_BIT);
986 env->error_code |= PG_ERROR_U_MASK;
987 env->exception_index = EXCP0E_PAGE;
988 return 1;
991 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
993 return addr;
996 #else
998 /* XXX: This value should match the one returned by CPUID
999 * and in exec.c */
1000 #if defined(CONFIG_KQEMU)
1001 #define PHYS_ADDR_MASK 0xfffff000LL
1002 #else
1003 # if defined(TARGET_X86_64)
1004 # define PHYS_ADDR_MASK 0xfffffff000LL
1005 # else
1006 # define PHYS_ADDR_MASK 0xffffff000LL
1007 # endif
1008 #endif
1010 /* return value:
1011 -1 = cannot handle fault
1012 0 = nothing more to do
1013 1 = generate PF fault
1014 2 = soft MMU activation required for this block
1016 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1017 int is_write1, int mmu_idx, int is_softmmu)
1019 uint64_t ptep, pte;
1020 target_ulong pde_addr, pte_addr;
1021 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1022 target_phys_addr_t paddr;
1023 uint32_t page_offset;
1024 target_ulong vaddr, virt_addr;
1026 is_user = mmu_idx == MMU_USER_IDX;
1027 #if defined(DEBUG_MMU)
1028 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1029 addr, is_write1, is_user, env->eip);
1030 #endif
1031 is_write = is_write1 & 1;
1033 if (!(env->cr[0] & CR0_PG_MASK)) {
1034 pte = addr;
1035 virt_addr = addr & TARGET_PAGE_MASK;
1036 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1037 page_size = 4096;
1038 goto do_mapping;
1041 if (env->cr[4] & CR4_PAE_MASK) {
1042 uint64_t pde, pdpe;
1043 target_ulong pdpe_addr;
1045 #ifdef TARGET_X86_64
1046 if (env->hflags & HF_LMA_MASK) {
1047 uint64_t pml4e_addr, pml4e;
1048 int32_t sext;
1050 /* test virtual address sign extension */
1051 sext = (int64_t)addr >> 47;
1052 if (sext != 0 && sext != -1) {
1053 env->error_code = 0;
1054 env->exception_index = EXCP0D_GPF;
1055 return 1;
1058 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1059 env->a20_mask;
1060 pml4e = ldq_phys(pml4e_addr);
1061 if (!(pml4e & PG_PRESENT_MASK)) {
1062 error_code = 0;
1063 goto do_fault;
1065 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1066 error_code = PG_ERROR_RSVD_MASK;
1067 goto do_fault;
1069 if (!(pml4e & PG_ACCESSED_MASK)) {
1070 pml4e |= PG_ACCESSED_MASK;
1071 stl_phys_notdirty(pml4e_addr, pml4e);
1073 ptep = pml4e ^ PG_NX_MASK;
1074 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1075 env->a20_mask;
1076 pdpe = ldq_phys(pdpe_addr);
1077 if (!(pdpe & PG_PRESENT_MASK)) {
1078 error_code = 0;
1079 goto do_fault;
1081 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1082 error_code = PG_ERROR_RSVD_MASK;
1083 goto do_fault;
1085 ptep &= pdpe ^ PG_NX_MASK;
1086 if (!(pdpe & PG_ACCESSED_MASK)) {
1087 pdpe |= PG_ACCESSED_MASK;
1088 stl_phys_notdirty(pdpe_addr, pdpe);
1090 } else
1091 #endif
1093 /* XXX: load them when cr3 is loaded ? */
1094 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1095 env->a20_mask;
1096 pdpe = ldq_phys(pdpe_addr);
1097 if (!(pdpe & PG_PRESENT_MASK)) {
1098 error_code = 0;
1099 goto do_fault;
1101 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1104 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1105 env->a20_mask;
1106 pde = ldq_phys(pde_addr);
1107 if (!(pde & PG_PRESENT_MASK)) {
1108 error_code = 0;
1109 goto do_fault;
1111 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1112 error_code = PG_ERROR_RSVD_MASK;
1113 goto do_fault;
1115 ptep &= pde ^ PG_NX_MASK;
1116 if (pde & PG_PSE_MASK) {
1117 /* 2 MB page */
1118 page_size = 2048 * 1024;
1119 ptep ^= PG_NX_MASK;
1120 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1121 goto do_fault_protect;
1122 if (is_user) {
1123 if (!(ptep & PG_USER_MASK))
1124 goto do_fault_protect;
1125 if (is_write && !(ptep & PG_RW_MASK))
1126 goto do_fault_protect;
1127 } else {
1128 if ((env->cr[0] & CR0_WP_MASK) &&
1129 is_write && !(ptep & PG_RW_MASK))
1130 goto do_fault_protect;
1132 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1133 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1134 pde |= PG_ACCESSED_MASK;
1135 if (is_dirty)
1136 pde |= PG_DIRTY_MASK;
1137 stl_phys_notdirty(pde_addr, pde);
1139 /* align to page_size */
1140 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1141 virt_addr = addr & ~(page_size - 1);
1142 } else {
1143 /* 4 KB page */
1144 if (!(pde & PG_ACCESSED_MASK)) {
1145 pde |= PG_ACCESSED_MASK;
1146 stl_phys_notdirty(pde_addr, pde);
1148 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1149 env->a20_mask;
1150 pte = ldq_phys(pte_addr);
1151 if (!(pte & PG_PRESENT_MASK)) {
1152 error_code = 0;
1153 goto do_fault;
1155 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1156 error_code = PG_ERROR_RSVD_MASK;
1157 goto do_fault;
1159 /* combine pde and pte nx, user and rw protections */
1160 ptep &= pte ^ PG_NX_MASK;
1161 ptep ^= PG_NX_MASK;
1162 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1163 goto do_fault_protect;
1164 if (is_user) {
1165 if (!(ptep & PG_USER_MASK))
1166 goto do_fault_protect;
1167 if (is_write && !(ptep & PG_RW_MASK))
1168 goto do_fault_protect;
1169 } else {
1170 if ((env->cr[0] & CR0_WP_MASK) &&
1171 is_write && !(ptep & PG_RW_MASK))
1172 goto do_fault_protect;
1174 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1175 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1176 pte |= PG_ACCESSED_MASK;
1177 if (is_dirty)
1178 pte |= PG_DIRTY_MASK;
1179 stl_phys_notdirty(pte_addr, pte);
1181 page_size = 4096;
1182 virt_addr = addr & ~0xfff;
1183 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1185 } else {
1186 uint32_t pde;
1188 /* page directory entry */
1189 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1190 env->a20_mask;
1191 pde = ldl_phys(pde_addr);
1192 if (!(pde & PG_PRESENT_MASK)) {
1193 error_code = 0;
1194 goto do_fault;
1196 /* if PSE bit is set, then we use a 4MB page */
1197 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1198 page_size = 4096 * 1024;
1199 if (is_user) {
1200 if (!(pde & PG_USER_MASK))
1201 goto do_fault_protect;
1202 if (is_write && !(pde & PG_RW_MASK))
1203 goto do_fault_protect;
1204 } else {
1205 if ((env->cr[0] & CR0_WP_MASK) &&
1206 is_write && !(pde & PG_RW_MASK))
1207 goto do_fault_protect;
1209 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1210 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1211 pde |= PG_ACCESSED_MASK;
1212 if (is_dirty)
1213 pde |= PG_DIRTY_MASK;
1214 stl_phys_notdirty(pde_addr, pde);
1217 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1218 ptep = pte;
1219 virt_addr = addr & ~(page_size - 1);
1220 } else {
1221 if (!(pde & PG_ACCESSED_MASK)) {
1222 pde |= PG_ACCESSED_MASK;
1223 stl_phys_notdirty(pde_addr, pde);
1226 /* page directory entry */
1227 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1228 env->a20_mask;
1229 pte = ldl_phys(pte_addr);
1230 if (!(pte & PG_PRESENT_MASK)) {
1231 error_code = 0;
1232 goto do_fault;
1234 /* combine pde and pte user and rw protections */
1235 ptep = pte & pde;
1236 if (is_user) {
1237 if (!(ptep & PG_USER_MASK))
1238 goto do_fault_protect;
1239 if (is_write && !(ptep & PG_RW_MASK))
1240 goto do_fault_protect;
1241 } else {
1242 if ((env->cr[0] & CR0_WP_MASK) &&
1243 is_write && !(ptep & PG_RW_MASK))
1244 goto do_fault_protect;
1246 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1247 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1248 pte |= PG_ACCESSED_MASK;
1249 if (is_dirty)
1250 pte |= PG_DIRTY_MASK;
1251 stl_phys_notdirty(pte_addr, pte);
1253 page_size = 4096;
1254 virt_addr = addr & ~0xfff;
1257 /* the page can be put in the TLB */
1258 prot = PAGE_READ;
1259 if (!(ptep & PG_NX_MASK))
1260 prot |= PAGE_EXEC;
1261 if (pte & PG_DIRTY_MASK) {
1262 /* only set write access if already dirty... otherwise wait
1263 for dirty access */
1264 if (is_user) {
1265 if (ptep & PG_RW_MASK)
1266 prot |= PAGE_WRITE;
1267 } else {
1268 if (!(env->cr[0] & CR0_WP_MASK) ||
1269 (ptep & PG_RW_MASK))
1270 prot |= PAGE_WRITE;
1273 do_mapping:
1274 pte = pte & env->a20_mask;
1276 /* Even if 4MB pages, we map only one 4KB page in the cache to
1277 avoid filling it too fast */
1278 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1279 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1280 vaddr = virt_addr + page_offset;
1282 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1283 return ret;
1284 do_fault_protect:
1285 error_code = PG_ERROR_P_MASK;
1286 do_fault:
1287 error_code |= (is_write << PG_ERROR_W_BIT);
1288 if (is_user)
1289 error_code |= PG_ERROR_U_MASK;
1290 if (is_write1 == 2 &&
1291 (env->efer & MSR_EFER_NXE) &&
1292 (env->cr[4] & CR4_PAE_MASK))
1293 error_code |= PG_ERROR_I_D_MASK;
1294 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1295 /* cr2 is not modified in case of exceptions */
1296 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1297 addr);
1298 } else {
1299 env->cr[2] = addr;
1301 env->error_code = error_code;
1302 env->exception_index = EXCP0E_PAGE;
1303 return 1;
1306 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1308 target_ulong pde_addr, pte_addr;
1309 uint64_t pte;
1310 target_phys_addr_t paddr;
1311 uint32_t page_offset;
1312 int page_size;
1314 if (env->cr[4] & CR4_PAE_MASK) {
1315 target_ulong pdpe_addr;
1316 uint64_t pde, pdpe;
1318 #ifdef TARGET_X86_64
1319 if (env->hflags & HF_LMA_MASK) {
1320 uint64_t pml4e_addr, pml4e;
1321 int32_t sext;
1323 /* test virtual address sign extension */
1324 sext = (int64_t)addr >> 47;
1325 if (sext != 0 && sext != -1)
1326 return -1;
1328 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1329 env->a20_mask;
1330 pml4e = ldq_phys(pml4e_addr);
1331 if (!(pml4e & PG_PRESENT_MASK))
1332 return -1;
1334 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1335 env->a20_mask;
1336 pdpe = ldq_phys(pdpe_addr);
1337 if (!(pdpe & PG_PRESENT_MASK))
1338 return -1;
1339 } else
1340 #endif
1342 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1343 env->a20_mask;
1344 pdpe = ldq_phys(pdpe_addr);
1345 if (!(pdpe & PG_PRESENT_MASK))
1346 return -1;
1349 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1350 env->a20_mask;
1351 pde = ldq_phys(pde_addr);
1352 if (!(pde & PG_PRESENT_MASK)) {
1353 return -1;
1355 if (pde & PG_PSE_MASK) {
1356 /* 2 MB page */
1357 page_size = 2048 * 1024;
1358 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1359 } else {
1360 /* 4 KB page */
1361 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1362 env->a20_mask;
1363 page_size = 4096;
1364 pte = ldq_phys(pte_addr);
1366 if (!(pte & PG_PRESENT_MASK))
1367 return -1;
1368 } else {
1369 uint32_t pde;
1371 if (!(env->cr[0] & CR0_PG_MASK)) {
1372 pte = addr;
1373 page_size = 4096;
1374 } else {
1375 /* page directory entry */
1376 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1377 pde = ldl_phys(pde_addr);
1378 if (!(pde & PG_PRESENT_MASK))
1379 return -1;
1380 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1381 pte = pde & ~0x003ff000; /* align to 4MB */
1382 page_size = 4096 * 1024;
1383 } else {
1384 /* page directory entry */
1385 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1386 pte = ldl_phys(pte_addr);
1387 if (!(pte & PG_PRESENT_MASK))
1388 return -1;
1389 page_size = 4096;
1392 pte = pte & env->a20_mask;
1395 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1396 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1397 return paddr;
1400 void hw_breakpoint_insert(CPUState *env, int index)
1402 int type, err = 0;
1404 switch (hw_breakpoint_type(env->dr[7], index)) {
1405 case 0:
1406 if (hw_breakpoint_enabled(env->dr[7], index))
1407 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1408 &env->cpu_breakpoint[index]);
1409 break;
1410 case 1:
1411 type = BP_CPU | BP_MEM_WRITE;
1412 goto insert_wp;
1413 case 2:
1414 /* No support for I/O watchpoints yet */
1415 break;
1416 case 3:
1417 type = BP_CPU | BP_MEM_ACCESS;
1418 insert_wp:
1419 err = cpu_watchpoint_insert(env, env->dr[index],
1420 hw_breakpoint_len(env->dr[7], index),
1421 type, &env->cpu_watchpoint[index]);
1422 break;
1424 if (err)
1425 env->cpu_breakpoint[index] = NULL;
1428 void hw_breakpoint_remove(CPUState *env, int index)
1430 if (!env->cpu_breakpoint[index])
1431 return;
1432 switch (hw_breakpoint_type(env->dr[7], index)) {
1433 case 0:
1434 if (hw_breakpoint_enabled(env->dr[7], index))
1435 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1436 break;
1437 case 1:
1438 case 3:
1439 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1440 break;
1441 case 2:
1442 /* No support for I/O watchpoints yet */
1443 break;
1447 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1449 target_ulong dr6;
1450 int reg, type;
1451 int hit_enabled = 0;
1453 dr6 = env->dr[6] & ~0xf;
1454 for (reg = 0; reg < 4; reg++) {
1455 type = hw_breakpoint_type(env->dr[7], reg);
1456 if ((type == 0 && env->dr[reg] == env->eip) ||
1457 ((type & 1) && env->cpu_watchpoint[reg] &&
1458 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1459 dr6 |= 1 << reg;
1460 if (hw_breakpoint_enabled(env->dr[7], reg))
1461 hit_enabled = 1;
1464 if (hit_enabled || force_dr6_update)
1465 env->dr[6] = dr6;
1466 return hit_enabled;
1469 static CPUDebugExcpHandler *prev_debug_excp_handler;
1471 void raise_exception(int exception_index);
1473 static void breakpoint_handler(CPUState *env)
1475 CPUBreakpoint *bp;
1477 if (env->watchpoint_hit) {
1478 if (env->watchpoint_hit->flags & BP_CPU) {
1479 env->watchpoint_hit = NULL;
1480 if (check_hw_breakpoints(env, 0))
1481 raise_exception(EXCP01_DB);
1482 else
1483 cpu_resume_from_signal(env, NULL);
1485 } else {
1486 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1487 if (bp->pc == env->eip) {
1488 if (bp->flags & BP_CPU) {
1489 check_hw_breakpoints(env, 1);
1490 raise_exception(EXCP01_DB);
1492 break;
1495 if (prev_debug_excp_handler)
1496 prev_debug_excp_handler(env);
1499 /* This should come from sysemu.h - if we could include it here... */
1500 void qemu_system_reset_request(void);
1502 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1503 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1505 uint64_t mcg_cap = cenv->mcg_cap;
1506 unsigned bank_num = mcg_cap & 0xff;
1507 uint64_t *banks = cenv->mce_banks;
1509 if (kvm_enabled()) {
1510 kvm_inject_x86_mce(cenv, bank, status, mcg_status, addr, misc);
1511 return;
1514 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1515 return;
1518 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1519 * reporting is disabled
1521 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1522 cenv->mcg_ctl != ~(uint64_t)0)
1523 return;
1524 banks += 4 * bank;
1526 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1527 * reporting is disabled for the bank
1529 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1530 return;
1531 if (status & MCI_STATUS_UC) {
1532 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1533 !(cenv->cr[4] & CR4_MCE_MASK)) {
1534 fprintf(stderr, "injects mce exception while previous "
1535 "one is in progress!\n");
1536 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1537 qemu_system_reset_request();
1538 return;
1540 if (banks[1] & MCI_STATUS_VAL)
1541 status |= MCI_STATUS_OVER;
1542 banks[2] = addr;
1543 banks[3] = misc;
1544 cenv->mcg_status = mcg_status;
1545 banks[1] = status;
1546 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1547 } else if (!(banks[1] & MCI_STATUS_VAL)
1548 || !(banks[1] & MCI_STATUS_UC)) {
1549 if (banks[1] & MCI_STATUS_VAL)
1550 status |= MCI_STATUS_OVER;
1551 banks[2] = addr;
1552 banks[3] = misc;
1553 banks[1] = status;
1554 } else
1555 banks[1] |= MCI_STATUS_OVER;
1557 #endif /* !CONFIG_USER_ONLY */
1559 static void mce_init(CPUX86State *cenv)
1561 unsigned int bank, bank_num;
1563 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1564 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1565 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1566 cenv->mcg_ctl = ~(uint64_t)0;
1567 bank_num = cenv->mcg_cap & 0xff;
1568 cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1569 for (bank = 0; bank < bank_num; bank++)
1570 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1574 static void host_cpuid(uint32_t function, uint32_t count,
1575 uint32_t *eax, uint32_t *ebx,
1576 uint32_t *ecx, uint32_t *edx)
1578 #if defined(CONFIG_KVM) || defined(USE_KVM)
1579 uint32_t vec[4];
1581 #ifdef __x86_64__
1582 asm volatile("cpuid"
1583 : "=a"(vec[0]), "=b"(vec[1]),
1584 "=c"(vec[2]), "=d"(vec[3])
1585 : "0"(function), "c"(count) : "cc");
1586 #else
1587 asm volatile("pusha \n\t"
1588 "cpuid \n\t"
1589 "mov %%eax, 0(%2) \n\t"
1590 "mov %%ebx, 4(%2) \n\t"
1591 "mov %%ecx, 8(%2) \n\t"
1592 "mov %%edx, 12(%2) \n\t"
1593 "popa"
1594 : : "a"(function), "c"(count), "S"(vec)
1595 : "memory", "cc");
1596 #endif
1598 if (eax)
1599 *eax = vec[0];
1600 if (ebx)
1601 *ebx = vec[1];
1602 if (ecx)
1603 *ecx = vec[2];
1604 if (edx)
1605 *edx = vec[3];
1606 #endif
1609 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1610 uint32_t *eax, uint32_t *ebx,
1611 uint32_t *ecx, uint32_t *edx)
1613 /* test if maximum index reached */
1614 if (index & 0x80000000) {
1615 if (index > env->cpuid_xlevel)
1616 index = env->cpuid_level;
1617 } else {
1618 if (index > env->cpuid_level)
1619 index = env->cpuid_level;
1622 switch(index) {
1623 case 0:
1624 *eax = env->cpuid_level;
1625 *ebx = env->cpuid_vendor1;
1626 *edx = env->cpuid_vendor2;
1627 *ecx = env->cpuid_vendor3;
1629 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1630 * isn't supported in compatibility mode on Intel. so advertise the
1631 * actuall cpu, and say goodbye to migration between different vendors
1632 * is you use compatibility mode. */
1633 if (kvm_enabled() && !env->cpuid_vendor_override)
1634 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1635 break;
1636 case 1:
1637 *eax = env->cpuid_version;
1638 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1639 *ecx = env->cpuid_ext_features;
1640 *edx = env->cpuid_features;
1641 break;
1642 case 2:
1643 /* cache info: needed for Pentium Pro compatibility */
1644 *eax = 1;
1645 *ebx = 0;
1646 *ecx = 0;
1647 *edx = 0x2c307d;
1648 break;
1649 case 4:
1650 /* cache info: needed for Core compatibility */
1651 switch (count) {
1652 case 0: /* L1 dcache info */
1653 *eax = 0x0000121;
1654 *ebx = 0x1c0003f;
1655 *ecx = 0x000003f;
1656 *edx = 0x0000001;
1657 break;
1658 case 1: /* L1 icache info */
1659 *eax = 0x0000122;
1660 *ebx = 0x1c0003f;
1661 *ecx = 0x000003f;
1662 *edx = 0x0000001;
1663 break;
1664 case 2: /* L2 cache info */
1665 *eax = 0x0000143;
1666 *ebx = 0x3c0003f;
1667 *ecx = 0x0000fff;
1668 *edx = 0x0000001;
1669 break;
1670 default: /* end of info */
1671 *eax = 0;
1672 *ebx = 0;
1673 *ecx = 0;
1674 *edx = 0;
1675 break;
1677 break;
1678 case 5:
1679 /* mwait info: needed for Core compatibility */
1680 *eax = 0; /* Smallest monitor-line size in bytes */
1681 *ebx = 0; /* Largest monitor-line size in bytes */
1682 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1683 *edx = 0;
1684 break;
1685 case 6:
1686 /* Thermal and Power Leaf */
1687 *eax = 0;
1688 *ebx = 0;
1689 *ecx = 0;
1690 *edx = 0;
1691 break;
1692 case 9:
1693 /* Direct Cache Access Information Leaf */
1694 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1695 *ebx = 0;
1696 *ecx = 0;
1697 *edx = 0;
1698 break;
1699 case 0xA:
1700 /* Architectural Performance Monitoring Leaf */
1701 *eax = 0;
1702 *ebx = 0;
1703 *ecx = 0;
1704 *edx = 0;
1705 break;
1706 case 0x80000000:
1707 *eax = env->cpuid_xlevel;
1708 *ebx = env->cpuid_vendor1;
1709 *edx = env->cpuid_vendor2;
1710 *ecx = env->cpuid_vendor3;
1711 break;
1712 case 0x80000001:
1713 *eax = env->cpuid_version;
1714 *ebx = 0;
1715 *ecx = env->cpuid_ext3_features;
1716 *edx = env->cpuid_ext2_features;
1718 if (kvm_enabled()) {
1719 uint32_t h_eax, h_edx;
1721 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1723 /* disable CPU features that the host does not support */
1725 /* long mode */
1726 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1727 *edx &= ~0x20000000;
1728 /* syscall */
1729 if ((h_edx & 0x00000800) == 0)
1730 *edx &= ~0x00000800;
1731 /* nx */
1732 if ((h_edx & 0x00100000) == 0)
1733 *edx &= ~0x00100000;
1735 /* disable CPU features that KVM cannot support */
1737 /* svm */
1738 if (!kvm_nested)
1739 *ecx &= ~CPUID_EXT3_SVM;
1740 /* 3dnow */
1741 *edx &= ~0xc0000000;
1742 } else {
1743 /* AMD 3DNow! is not supported in QEMU */
1744 *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1746 break;
1747 case 0x80000002:
1748 case 0x80000003:
1749 case 0x80000004:
1750 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1751 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1752 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1753 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1754 break;
1755 case 0x80000005:
1756 /* cache info (L1 cache) */
1757 *eax = 0x01ff01ff;
1758 *ebx = 0x01ff01ff;
1759 *ecx = 0x40020140;
1760 *edx = 0x40020140;
1761 break;
1762 case 0x80000006:
1763 /* cache info (L2 cache) */
1764 *eax = 0;
1765 *ebx = 0x42004200;
1766 *ecx = 0x02008140;
1767 *edx = 0;
1768 break;
1769 case 0x80000008:
1770 /* virtual & phys address size in low 2 bytes. */
1771 /* XXX: This value must match the one used in the MMU code. */
1772 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1773 /* 64 bit processor */
1774 #if defined(CONFIG_KQEMU)
1775 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1776 #else
1777 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1778 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1779 #endif
1780 } else {
1781 #if defined(CONFIG_KQEMU)
1782 *eax = 0x00000020; /* 32 bits physical */
1783 #else
1784 if (env->cpuid_features & CPUID_PSE36)
1785 *eax = 0x00000024; /* 36 bits physical */
1786 else
1787 *eax = 0x00000020; /* 32 bits physical */
1788 #endif
1790 *ebx = 0;
1791 *ecx = 0;
1792 *edx = 0;
1793 break;
1794 case 0x8000000A:
1795 *eax = 0x00000001; /* SVM Revision */
1796 *ebx = 0x00000010; /* nr of ASIDs */
1797 *ecx = 0;
1798 *edx = 0; /* optional features */
1799 break;
1800 default:
1801 /* reserved values: zero */
1802 *eax = 0;
1803 *ebx = 0;
1804 *ecx = 0;
1805 *edx = 0;
1806 break;
1811 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1812 target_ulong *base, unsigned int *limit,
1813 unsigned int *flags)
1815 SegmentCache *dt;
1816 target_ulong ptr;
1817 uint32_t e1, e2;
1818 int index;
1820 if (selector & 0x4)
1821 dt = &env->ldt;
1822 else
1823 dt = &env->gdt;
1824 index = selector & ~7;
1825 ptr = dt->base + index;
1826 if ((index + 7) > dt->limit
1827 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1828 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1829 return 0;
1831 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1832 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1833 if (e2 & DESC_G_MASK)
1834 *limit = (*limit << 12) | 0xfff;
1835 *flags = e2;
1837 return 1;
1840 CPUX86State *cpu_x86_init(const char *cpu_model)
1842 CPUX86State *env;
1843 static int inited;
1845 env = qemu_mallocz(sizeof(CPUX86State));
1846 cpu_exec_init(env);
1847 env->cpu_model_str = cpu_model;
1849 /* init various static tables */
1850 if (!inited) {
1851 inited = 1;
1852 optimize_flags_init();
1853 #ifndef CONFIG_USER_ONLY
1854 prev_debug_excp_handler =
1855 cpu_set_debug_excp_handler(breakpoint_handler);
1856 #endif
1858 if (cpu_x86_register(env, cpu_model) < 0) {
1859 cpu_x86_close(env);
1860 return NULL;
1862 mce_init(env);
1863 cpu_reset(env);
1864 #ifdef CONFIG_KQEMU
1865 kqemu_init(env);
1866 #endif
1868 return env;
1871 #if !defined(CONFIG_USER_ONLY)
1872 void do_cpu_init(CPUState *env)
1874 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1875 cpu_reset(env);
1876 env->interrupt_request = sipi;
1877 apic_init_reset(env);
1880 void do_cpu_sipi(CPUState *env)
1882 apic_sipi(env);
1884 #else
1885 void do_cpu_init(CPUState *env)
1888 void do_cpu_sipi(CPUState *env)
1891 #endif