OHCI USB PXA support (Andrzej Zaborowski).
[qemu/mini2440.git] / kqemu.c
blob96ca5826c291f649a37cae269f3f1be0d4368a4b
1 /*
2 * KQEMU support
3 *
4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #include <winioctl.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <sys/ioctl.h>
28 #endif
29 #ifdef HOST_SOLARIS
30 #include <sys/modctl.h>
31 #endif
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <stdarg.h>
35 #include <string.h>
36 #include <errno.h>
37 #include <unistd.h>
38 #include <inttypes.h>
40 #include "cpu.h"
41 #include "exec-all.h"
43 #ifdef USE_KQEMU
45 #define DEBUG
46 //#define PROFILE
48 #include <unistd.h>
49 #include <fcntl.h>
50 #include "kqemu.h"
52 /* compatibility stuff */
53 #ifndef KQEMU_RET_SYSCALL
54 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
55 #endif
56 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
57 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
58 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
59 #endif
60 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
61 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
62 #endif
64 #ifdef _WIN32
65 #define KQEMU_DEVICE "\\\\.\\kqemu"
66 #else
67 #define KQEMU_DEVICE "/dev/kqemu"
68 #endif
70 #ifdef _WIN32
71 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
72 HANDLE kqemu_fd = KQEMU_INVALID_FD;
73 #define kqemu_closefd(x) CloseHandle(x)
74 #else
75 #define KQEMU_INVALID_FD -1
76 int kqemu_fd = KQEMU_INVALID_FD;
77 #define kqemu_closefd(x) close(x)
78 #endif
80 /* 0 = not allowed
81 1 = user kqemu
82 2 = kernel kqemu
84 int kqemu_allowed = 1;
85 unsigned long *pages_to_flush;
86 unsigned int nb_pages_to_flush;
87 unsigned long *ram_pages_to_update;
88 unsigned int nb_ram_pages_to_update;
89 unsigned long *modified_ram_pages;
90 unsigned int nb_modified_ram_pages;
91 uint8_t *modified_ram_pages_table;
92 extern uint32_t **l1_phys_map;
94 #define cpuid(index, eax, ebx, ecx, edx) \
95 asm volatile ("cpuid" \
96 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
97 : "0" (index))
99 #ifdef __x86_64__
100 static int is_cpuid_supported(void)
102 return 1;
104 #else
105 static int is_cpuid_supported(void)
107 int v0, v1;
108 asm volatile ("pushf\n"
109 "popl %0\n"
110 "movl %0, %1\n"
111 "xorl $0x00200000, %0\n"
112 "pushl %0\n"
113 "popf\n"
114 "pushf\n"
115 "popl %0\n"
116 : "=a" (v0), "=d" (v1)
118 : "cc");
119 return (v0 != v1);
121 #endif
123 static void kqemu_update_cpuid(CPUState *env)
125 int critical_features_mask, features, ext_features, ext_features_mask;
126 uint32_t eax, ebx, ecx, edx;
128 /* the following features are kept identical on the host and
129 target cpus because they are important for user code. Strictly
130 speaking, only SSE really matters because the OS must support
131 it if the user code uses it. */
132 critical_features_mask =
133 CPUID_CMOV | CPUID_CX8 |
134 CPUID_FXSR | CPUID_MMX | CPUID_SSE |
135 CPUID_SSE2 | CPUID_SEP;
136 ext_features_mask = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR;
137 if (!is_cpuid_supported()) {
138 features = 0;
139 ext_features = 0;
140 } else {
141 cpuid(1, eax, ebx, ecx, edx);
142 features = edx;
143 ext_features = ecx;
145 #ifdef __x86_64__
146 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
147 compatibility mode, so in order to have the best performances
148 it is better not to use it */
149 features &= ~CPUID_SEP;
150 #endif
151 env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
152 (features & critical_features_mask);
153 env->cpuid_ext_features = (env->cpuid_ext_features & ~ext_features_mask) |
154 (ext_features & ext_features_mask);
155 /* XXX: we could update more of the target CPUID state so that the
156 non accelerated code sees exactly the same CPU features as the
157 accelerated code */
160 int kqemu_init(CPUState *env)
162 struct kqemu_init init;
163 int ret, version;
164 #ifdef _WIN32
165 DWORD temp;
166 #endif
168 if (!kqemu_allowed)
169 return -1;
171 #ifdef _WIN32
172 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
173 FILE_SHARE_READ | FILE_SHARE_WRITE,
174 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
175 NULL);
176 #else
177 kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
178 #endif
179 if (kqemu_fd == KQEMU_INVALID_FD) {
180 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE);
181 return -1;
183 version = 0;
184 #ifdef _WIN32
185 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
186 &version, sizeof(version), &temp, NULL);
187 #else
188 ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
189 #endif
190 if (version != KQEMU_VERSION) {
191 fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
192 version, KQEMU_VERSION);
193 goto fail;
196 pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
197 sizeof(unsigned long));
198 if (!pages_to_flush)
199 goto fail;
201 ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
202 sizeof(unsigned long));
203 if (!ram_pages_to_update)
204 goto fail;
206 modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES *
207 sizeof(unsigned long));
208 if (!modified_ram_pages)
209 goto fail;
210 modified_ram_pages_table = qemu_mallocz(phys_ram_size >> TARGET_PAGE_BITS);
211 if (!modified_ram_pages_table)
212 goto fail;
214 init.ram_base = phys_ram_base;
215 init.ram_size = phys_ram_size;
216 init.ram_dirty = phys_ram_dirty;
217 init.phys_to_ram_map = l1_phys_map;
218 init.pages_to_flush = pages_to_flush;
219 #if KQEMU_VERSION >= 0x010200
220 init.ram_pages_to_update = ram_pages_to_update;
221 #endif
222 #if KQEMU_VERSION >= 0x010300
223 init.modified_ram_pages = modified_ram_pages;
224 #endif
225 #ifdef _WIN32
226 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &init, sizeof(init),
227 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
228 #else
229 ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
230 #endif
231 if (ret < 0) {
232 fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
233 fail:
234 kqemu_closefd(kqemu_fd);
235 kqemu_fd = KQEMU_INVALID_FD;
236 return -1;
238 kqemu_update_cpuid(env);
239 env->kqemu_enabled = kqemu_allowed;
240 nb_pages_to_flush = 0;
241 nb_ram_pages_to_update = 0;
242 return 0;
245 void kqemu_flush_page(CPUState *env, target_ulong addr)
247 #if defined(DEBUG)
248 if (loglevel & CPU_LOG_INT) {
249 fprintf(logfile, "kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
251 #endif
252 if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
253 nb_pages_to_flush = KQEMU_FLUSH_ALL;
254 else
255 pages_to_flush[nb_pages_to_flush++] = addr;
258 void kqemu_flush(CPUState *env, int global)
260 #ifdef DEBUG
261 if (loglevel & CPU_LOG_INT) {
262 fprintf(logfile, "kqemu_flush:\n");
264 #endif
265 nb_pages_to_flush = KQEMU_FLUSH_ALL;
268 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
270 #ifdef DEBUG
271 if (loglevel & CPU_LOG_INT) {
272 fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", ram_addr);
274 #endif
275 /* we only track transitions to dirty state */
276 if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff)
277 return;
278 if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
279 nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
280 else
281 ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
284 static void kqemu_reset_modified_ram_pages(void)
286 int i;
287 unsigned long page_index;
289 for(i = 0; i < nb_modified_ram_pages; i++) {
290 page_index = modified_ram_pages[i] >> TARGET_PAGE_BITS;
291 modified_ram_pages_table[page_index] = 0;
293 nb_modified_ram_pages = 0;
296 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr)
298 unsigned long page_index;
299 int ret;
300 #ifdef _WIN32
301 DWORD temp;
302 #endif
304 page_index = ram_addr >> TARGET_PAGE_BITS;
305 if (!modified_ram_pages_table[page_index]) {
306 #if 0
307 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages, ram_addr);
308 #endif
309 modified_ram_pages_table[page_index] = 1;
310 modified_ram_pages[nb_modified_ram_pages++] = ram_addr;
311 if (nb_modified_ram_pages >= KQEMU_MAX_MODIFIED_RAM_PAGES) {
312 /* flush */
313 #ifdef _WIN32
314 ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
315 &nb_modified_ram_pages,
316 sizeof(nb_modified_ram_pages),
317 NULL, 0, &temp, NULL);
318 #else
319 ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
320 &nb_modified_ram_pages);
321 #endif
322 kqemu_reset_modified_ram_pages();
327 struct fpstate {
328 uint16_t fpuc;
329 uint16_t dummy1;
330 uint16_t fpus;
331 uint16_t dummy2;
332 uint16_t fptag;
333 uint16_t dummy3;
335 uint32_t fpip;
336 uint32_t fpcs;
337 uint32_t fpoo;
338 uint32_t fpos;
339 uint8_t fpregs1[8 * 10];
342 struct fpxstate {
343 uint16_t fpuc;
344 uint16_t fpus;
345 uint16_t fptag;
346 uint16_t fop;
347 uint32_t fpuip;
348 uint16_t cs_sel;
349 uint16_t dummy0;
350 uint32_t fpudp;
351 uint16_t ds_sel;
352 uint16_t dummy1;
353 uint32_t mxcsr;
354 uint32_t mxcsr_mask;
355 uint8_t fpregs1[8 * 16];
356 uint8_t xmm_regs[16 * 16];
357 uint8_t dummy2[96];
360 static struct fpxstate fpx1 __attribute__((aligned(16)));
362 static void restore_native_fp_frstor(CPUState *env)
364 int fptag, i, j;
365 struct fpstate fp1, *fp = &fp1;
367 fp->fpuc = env->fpuc;
368 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
369 fptag = 0;
370 for (i=7; i>=0; i--) {
371 fptag <<= 2;
372 if (env->fptags[i]) {
373 fptag |= 3;
374 } else {
375 /* the FPU automatically computes it */
378 fp->fptag = fptag;
379 j = env->fpstt;
380 for(i = 0;i < 8; i++) {
381 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
382 j = (j + 1) & 7;
384 asm volatile ("frstor %0" : "=m" (*fp));
387 static void save_native_fp_fsave(CPUState *env)
389 int fptag, i, j;
390 uint16_t fpuc;
391 struct fpstate fp1, *fp = &fp1;
393 asm volatile ("fsave %0" : : "m" (*fp));
394 env->fpuc = fp->fpuc;
395 env->fpstt = (fp->fpus >> 11) & 7;
396 env->fpus = fp->fpus & ~0x3800;
397 fptag = fp->fptag;
398 for(i = 0;i < 8; i++) {
399 env->fptags[i] = ((fptag & 3) == 3);
400 fptag >>= 2;
402 j = env->fpstt;
403 for(i = 0;i < 8; i++) {
404 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
405 j = (j + 1) & 7;
407 /* we must restore the default rounding state */
408 fpuc = 0x037f | (env->fpuc & (3 << 10));
409 asm volatile("fldcw %0" : : "m" (fpuc));
412 static void restore_native_fp_fxrstor(CPUState *env)
414 struct fpxstate *fp = &fpx1;
415 int i, j, fptag;
417 fp->fpuc = env->fpuc;
418 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
419 fptag = 0;
420 for(i = 0; i < 8; i++)
421 fptag |= (env->fptags[i] << i);
422 fp->fptag = fptag ^ 0xff;
424 j = env->fpstt;
425 for(i = 0;i < 8; i++) {
426 memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
427 j = (j + 1) & 7;
429 if (env->cpuid_features & CPUID_SSE) {
430 fp->mxcsr = env->mxcsr;
431 /* XXX: check if DAZ is not available */
432 fp->mxcsr_mask = 0xffff;
433 memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
435 asm volatile ("fxrstor %0" : "=m" (*fp));
438 static void save_native_fp_fxsave(CPUState *env)
440 struct fpxstate *fp = &fpx1;
441 int fptag, i, j;
442 uint16_t fpuc;
444 asm volatile ("fxsave %0" : : "m" (*fp));
445 env->fpuc = fp->fpuc;
446 env->fpstt = (fp->fpus >> 11) & 7;
447 env->fpus = fp->fpus & ~0x3800;
448 fptag = fp->fptag ^ 0xff;
449 for(i = 0;i < 8; i++) {
450 env->fptags[i] = (fptag >> i) & 1;
452 j = env->fpstt;
453 for(i = 0;i < 8; i++) {
454 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
455 j = (j + 1) & 7;
457 if (env->cpuid_features & CPUID_SSE) {
458 env->mxcsr = fp->mxcsr;
459 memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
462 /* we must restore the default rounding state */
463 asm volatile ("fninit");
464 fpuc = 0x037f | (env->fpuc & (3 << 10));
465 asm volatile("fldcw %0" : : "m" (fpuc));
468 static int do_syscall(CPUState *env,
469 struct kqemu_cpu_state *kenv)
471 int selector;
473 selector = (env->star >> 32) & 0xffff;
474 #ifdef __x86_64__
475 if (env->hflags & HF_LMA_MASK) {
476 int code64;
478 env->regs[R_ECX] = kenv->next_eip;
479 env->regs[11] = env->eflags;
481 code64 = env->hflags & HF_CS64_MASK;
483 cpu_x86_set_cpl(env, 0);
484 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
485 0, 0xffffffff,
486 DESC_G_MASK | DESC_P_MASK |
487 DESC_S_MASK |
488 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
489 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
490 0, 0xffffffff,
491 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
492 DESC_S_MASK |
493 DESC_W_MASK | DESC_A_MASK);
494 env->eflags &= ~env->fmask;
495 if (code64)
496 env->eip = env->lstar;
497 else
498 env->eip = env->cstar;
499 } else
500 #endif
502 env->regs[R_ECX] = (uint32_t)kenv->next_eip;
504 cpu_x86_set_cpl(env, 0);
505 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
506 0, 0xffffffff,
507 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
508 DESC_S_MASK |
509 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
510 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
511 0, 0xffffffff,
512 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
513 DESC_S_MASK |
514 DESC_W_MASK | DESC_A_MASK);
515 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
516 env->eip = (uint32_t)env->star;
518 return 2;
521 #ifdef CONFIG_PROFILER
523 #define PC_REC_SIZE 1
524 #define PC_REC_HASH_BITS 16
525 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
527 typedef struct PCRecord {
528 unsigned long pc;
529 int64_t count;
530 struct PCRecord *next;
531 } PCRecord;
533 static PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
534 static int nb_pc_records;
536 static void kqemu_record_pc(unsigned long pc)
538 unsigned long h;
539 PCRecord **pr, *r;
541 h = pc / PC_REC_SIZE;
542 h = h ^ (h >> PC_REC_HASH_BITS);
543 h &= (PC_REC_HASH_SIZE - 1);
544 pr = &pc_rec_hash[h];
545 for(;;) {
546 r = *pr;
547 if (r == NULL)
548 break;
549 if (r->pc == pc) {
550 r->count++;
551 return;
553 pr = &r->next;
555 r = malloc(sizeof(PCRecord));
556 r->count = 1;
557 r->pc = pc;
558 r->next = NULL;
559 *pr = r;
560 nb_pc_records++;
563 static int pc_rec_cmp(const void *p1, const void *p2)
565 PCRecord *r1 = *(PCRecord **)p1;
566 PCRecord *r2 = *(PCRecord **)p2;
567 if (r1->count < r2->count)
568 return 1;
569 else if (r1->count == r2->count)
570 return 0;
571 else
572 return -1;
575 static void kqemu_record_flush(void)
577 PCRecord *r, *r_next;
578 int h;
580 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
581 for(r = pc_rec_hash[h]; r != NULL; r = r_next) {
582 r_next = r->next;
583 free(r);
585 pc_rec_hash[h] = NULL;
587 nb_pc_records = 0;
590 void kqemu_record_dump(void)
592 PCRecord **pr, *r;
593 int i, h;
594 FILE *f;
595 int64_t total, sum;
597 pr = malloc(sizeof(PCRecord *) * nb_pc_records);
598 i = 0;
599 total = 0;
600 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
601 for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
602 pr[i++] = r;
603 total += r->count;
606 qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
608 f = fopen("/tmp/kqemu.stats", "w");
609 if (!f) {
610 perror("/tmp/kqemu.stats");
611 exit(1);
613 fprintf(f, "total: %" PRId64 "\n", total);
614 sum = 0;
615 for(i = 0; i < nb_pc_records; i++) {
616 r = pr[i];
617 sum += r->count;
618 fprintf(f, "%08lx: %" PRId64 " %0.2f%% %0.2f%%\n",
619 r->pc,
620 r->count,
621 (double)r->count / (double)total * 100.0,
622 (double)sum / (double)total * 100.0);
624 fclose(f);
625 free(pr);
627 kqemu_record_flush();
629 #endif
631 int kqemu_cpu_exec(CPUState *env)
633 struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
634 int ret, cpl, i;
635 #ifdef CONFIG_PROFILER
636 int64_t ti;
637 #endif
639 #ifdef _WIN32
640 DWORD temp;
641 #endif
643 #ifdef CONFIG_PROFILER
644 ti = profile_getclock();
645 #endif
646 #ifdef DEBUG
647 if (loglevel & CPU_LOG_INT) {
648 fprintf(logfile, "kqemu: cpu_exec: enter\n");
649 cpu_dump_state(env, logfile, fprintf, 0);
651 #endif
652 memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
653 kenv->eip = env->eip;
654 kenv->eflags = env->eflags;
655 memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
656 memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
657 memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
658 memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
659 memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
660 kenv->cr0 = env->cr[0];
661 kenv->cr2 = env->cr[2];
662 kenv->cr3 = env->cr[3];
663 kenv->cr4 = env->cr[4];
664 kenv->a20_mask = env->a20_mask;
665 #if KQEMU_VERSION >= 0x010100
666 kenv->efer = env->efer;
667 #endif
668 #if KQEMU_VERSION >= 0x010300
669 kenv->tsc_offset = 0;
670 kenv->star = env->star;
671 kenv->sysenter_cs = env->sysenter_cs;
672 kenv->sysenter_esp = env->sysenter_esp;
673 kenv->sysenter_eip = env->sysenter_eip;
674 #ifdef __x86_64__
675 kenv->lstar = env->lstar;
676 kenv->cstar = env->cstar;
677 kenv->fmask = env->fmask;
678 kenv->kernelgsbase = env->kernelgsbase;
679 #endif
680 #endif
681 if (env->dr[7] & 0xff) {
682 kenv->dr7 = env->dr[7];
683 kenv->dr0 = env->dr[0];
684 kenv->dr1 = env->dr[1];
685 kenv->dr2 = env->dr[2];
686 kenv->dr3 = env->dr[3];
687 } else {
688 kenv->dr7 = 0;
690 kenv->dr6 = env->dr[6];
691 cpl = (env->hflags & HF_CPL_MASK);
692 kenv->cpl = cpl;
693 kenv->nb_pages_to_flush = nb_pages_to_flush;
694 #if KQEMU_VERSION >= 0x010200
695 kenv->user_only = (env->kqemu_enabled == 1);
696 kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
697 #endif
698 nb_ram_pages_to_update = 0;
700 #if KQEMU_VERSION >= 0x010300
701 kenv->nb_modified_ram_pages = nb_modified_ram_pages;
702 #endif
703 kqemu_reset_modified_ram_pages();
705 if (env->cpuid_features & CPUID_FXSR)
706 restore_native_fp_fxrstor(env);
707 else
708 restore_native_fp_frstor(env);
710 #ifdef _WIN32
711 if (DeviceIoControl(kqemu_fd, KQEMU_EXEC,
712 kenv, sizeof(struct kqemu_cpu_state),
713 kenv, sizeof(struct kqemu_cpu_state),
714 &temp, NULL)) {
715 ret = kenv->retval;
716 } else {
717 ret = -1;
719 #else
720 #if KQEMU_VERSION >= 0x010100
721 ioctl(kqemu_fd, KQEMU_EXEC, kenv);
722 ret = kenv->retval;
723 #else
724 ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
725 #endif
726 #endif
727 if (env->cpuid_features & CPUID_FXSR)
728 save_native_fp_fxsave(env);
729 else
730 save_native_fp_fsave(env);
732 memcpy(env->regs, kenv->regs, sizeof(env->regs));
733 env->eip = kenv->eip;
734 env->eflags = kenv->eflags;
735 memcpy(env->segs, kenv->segs, sizeof(env->segs));
736 cpu_x86_set_cpl(env, kenv->cpl);
737 memcpy(&env->ldt, &kenv->ldt, sizeof(env->ldt));
738 #if 0
739 /* no need to restore that */
740 memcpy(env->tr, kenv->tr, sizeof(env->tr));
741 memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
742 memcpy(env->idt, kenv->idt, sizeof(env->idt));
743 env->a20_mask = kenv->a20_mask;
744 #endif
745 env->cr[0] = kenv->cr0;
746 env->cr[4] = kenv->cr4;
747 env->cr[3] = kenv->cr3;
748 env->cr[2] = kenv->cr2;
749 env->dr[6] = kenv->dr6;
750 #if KQEMU_VERSION >= 0x010300
751 #ifdef __x86_64__
752 env->kernelgsbase = kenv->kernelgsbase;
753 #endif
754 #endif
756 /* flush pages as indicated by kqemu */
757 if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) {
758 tlb_flush(env, 1);
759 } else {
760 for(i = 0; i < kenv->nb_pages_to_flush; i++) {
761 tlb_flush_page(env, pages_to_flush[i]);
764 nb_pages_to_flush = 0;
766 #ifdef CONFIG_PROFILER
767 kqemu_time += profile_getclock() - ti;
768 kqemu_exec_count++;
769 #endif
771 #if KQEMU_VERSION >= 0x010200
772 if (kenv->nb_ram_pages_to_update > 0) {
773 cpu_tlb_update_dirty(env);
775 #endif
777 #if KQEMU_VERSION >= 0x010300
778 if (kenv->nb_modified_ram_pages > 0) {
779 for(i = 0; i < kenv->nb_modified_ram_pages; i++) {
780 unsigned long addr;
781 addr = modified_ram_pages[i];
782 tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0);
785 #endif
787 /* restore the hidden flags */
789 unsigned int new_hflags;
790 #ifdef TARGET_X86_64
791 if ((env->hflags & HF_LMA_MASK) &&
792 (env->segs[R_CS].flags & DESC_L_MASK)) {
793 /* long mode */
794 new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
795 } else
796 #endif
798 /* legacy / compatibility case */
799 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
800 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
801 new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
802 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
803 if (!(env->cr[0] & CR0_PE_MASK) ||
804 (env->eflags & VM_MASK) ||
805 !(env->hflags & HF_CS32_MASK)) {
806 /* XXX: try to avoid this test. The problem comes from the
807 fact that is real mode or vm86 mode we only modify the
808 'base' and 'selector' fields of the segment cache to go
809 faster. A solution may be to force addseg to one in
810 translate-i386.c. */
811 new_hflags |= HF_ADDSEG_MASK;
812 } else {
813 new_hflags |= ((env->segs[R_DS].base |
814 env->segs[R_ES].base |
815 env->segs[R_SS].base) != 0) <<
816 HF_ADDSEG_SHIFT;
819 env->hflags = (env->hflags &
820 ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
821 new_hflags;
823 /* update FPU flags */
824 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
825 ((env->cr[0] << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
826 if (env->cr[4] & CR4_OSFXSR_MASK)
827 env->hflags |= HF_OSFXSR_MASK;
828 else
829 env->hflags &= ~HF_OSFXSR_MASK;
831 #ifdef DEBUG
832 if (loglevel & CPU_LOG_INT) {
833 fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
835 #endif
836 if (ret == KQEMU_RET_SYSCALL) {
837 /* syscall instruction */
838 return do_syscall(env, kenv);
839 } else
840 if ((ret & 0xff00) == KQEMU_RET_INT) {
841 env->exception_index = ret & 0xff;
842 env->error_code = 0;
843 env->exception_is_int = 1;
844 env->exception_next_eip = kenv->next_eip;
845 #ifdef CONFIG_PROFILER
846 kqemu_ret_int_count++;
847 #endif
848 #ifdef DEBUG
849 if (loglevel & CPU_LOG_INT) {
850 fprintf(logfile, "kqemu: interrupt v=%02x:\n",
851 env->exception_index);
852 cpu_dump_state(env, logfile, fprintf, 0);
854 #endif
855 return 1;
856 } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
857 env->exception_index = ret & 0xff;
858 env->error_code = kenv->error_code;
859 env->exception_is_int = 0;
860 env->exception_next_eip = 0;
861 #ifdef CONFIG_PROFILER
862 kqemu_ret_excp_count++;
863 #endif
864 #ifdef DEBUG
865 if (loglevel & CPU_LOG_INT) {
866 fprintf(logfile, "kqemu: exception v=%02x e=%04x:\n",
867 env->exception_index, env->error_code);
868 cpu_dump_state(env, logfile, fprintf, 0);
870 #endif
871 return 1;
872 } else if (ret == KQEMU_RET_INTR) {
873 #ifdef CONFIG_PROFILER
874 kqemu_ret_intr_count++;
875 #endif
876 #ifdef DEBUG
877 if (loglevel & CPU_LOG_INT) {
878 cpu_dump_state(env, logfile, fprintf, 0);
880 #endif
881 return 0;
882 } else if (ret == KQEMU_RET_SOFTMMU) {
883 #ifdef CONFIG_PROFILER
885 unsigned long pc = env->eip + env->segs[R_CS].base;
886 kqemu_record_pc(pc);
888 #endif
889 #ifdef DEBUG
890 if (loglevel & CPU_LOG_INT) {
891 cpu_dump_state(env, logfile, fprintf, 0);
893 #endif
894 return 2;
895 } else {
896 cpu_dump_state(env, stderr, fprintf, 0);
897 fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
898 exit(1);
900 return 0;
903 void kqemu_cpu_interrupt(CPUState *env)
905 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
906 /* cancelling the I/O request causes KQEMU to finish executing the
907 current block and successfully returning. */
908 CancelIo(kqemu_fd);
909 #endif
912 #endif