Sparc64 build fix (Igor Kovalenko).
[qemu/mini2440.git] / kqemu.c
blob5ba314f42bf25a4f094dc611e3d83704e9cfeb22
1 /*
2 * KQEMU support
3 *
4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #include <winioctl.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <sys/ioctl.h>
28 #endif
29 #include <stdlib.h>
30 #include <stdio.h>
31 #include <stdarg.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <unistd.h>
35 #include <inttypes.h>
37 #include "cpu.h"
38 #include "exec-all.h"
40 #ifdef USE_KQEMU
42 #define DEBUG
43 //#define PROFILE
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include "kqemu.h"
49 /* compatibility stuff */
50 #ifndef KQEMU_RET_SYSCALL
51 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
52 #endif
53 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
54 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
55 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
56 #endif
57 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
58 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
59 #endif
61 #ifdef _WIN32
62 #define KQEMU_DEVICE "\\\\.\\kqemu"
63 #else
64 #define KQEMU_DEVICE "/dev/kqemu"
65 #endif
67 #ifdef _WIN32
68 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
69 HANDLE kqemu_fd = KQEMU_INVALID_FD;
70 #define kqemu_closefd(x) CloseHandle(x)
71 #else
72 #define KQEMU_INVALID_FD -1
73 int kqemu_fd = KQEMU_INVALID_FD;
74 #define kqemu_closefd(x) close(x)
75 #endif
77 /* 0 = not allowed
78 1 = user kqemu
79 2 = kernel kqemu
81 int kqemu_allowed = 1;
82 unsigned long *pages_to_flush;
83 unsigned int nb_pages_to_flush;
84 unsigned long *ram_pages_to_update;
85 unsigned int nb_ram_pages_to_update;
86 unsigned long *modified_ram_pages;
87 unsigned int nb_modified_ram_pages;
88 uint8_t *modified_ram_pages_table;
89 extern uint32_t **l1_phys_map;
91 #define cpuid(index, eax, ebx, ecx, edx) \
92 asm volatile ("cpuid" \
93 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
94 : "0" (index))
96 #ifdef __x86_64__
97 static int is_cpuid_supported(void)
99 return 1;
101 #else
102 static int is_cpuid_supported(void)
104 int v0, v1;
105 asm volatile ("pushf\n"
106 "popl %0\n"
107 "movl %0, %1\n"
108 "xorl $0x00200000, %0\n"
109 "pushl %0\n"
110 "popf\n"
111 "pushf\n"
112 "popl %0\n"
113 : "=a" (v0), "=d" (v1)
115 : "cc");
116 return (v0 != v1);
118 #endif
120 static void kqemu_update_cpuid(CPUState *env)
122 int critical_features_mask, features, ext_features, ext_features_mask;
123 uint32_t eax, ebx, ecx, edx;
125 /* the following features are kept identical on the host and
126 target cpus because they are important for user code. Strictly
127 speaking, only SSE really matters because the OS must support
128 it if the user code uses it. */
129 critical_features_mask =
130 CPUID_CMOV | CPUID_CX8 |
131 CPUID_FXSR | CPUID_MMX | CPUID_SSE |
132 CPUID_SSE2 | CPUID_SEP;
133 ext_features_mask = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR;
134 if (!is_cpuid_supported()) {
135 features = 0;
136 ext_features = 0;
137 } else {
138 cpuid(1, eax, ebx, ecx, edx);
139 features = edx;
140 ext_features = ecx;
142 #ifdef __x86_64__
143 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
144 compatibility mode, so in order to have the best performances
145 it is better not to use it */
146 features &= ~CPUID_SEP;
147 #endif
148 env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
149 (features & critical_features_mask);
150 env->cpuid_ext_features = (env->cpuid_ext_features & ~ext_features_mask) |
151 (ext_features & ext_features_mask);
152 /* XXX: we could update more of the target CPUID state so that the
153 non accelerated code sees exactly the same CPU features as the
154 accelerated code */
157 int kqemu_init(CPUState *env)
159 struct kqemu_init init;
160 int ret, version;
161 #ifdef _WIN32
162 DWORD temp;
163 #endif
165 if (!kqemu_allowed)
166 return -1;
168 #ifdef _WIN32
169 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
170 FILE_SHARE_READ | FILE_SHARE_WRITE,
171 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
172 NULL);
173 #else
174 kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
175 #endif
176 if (kqemu_fd == KQEMU_INVALID_FD) {
177 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE);
178 return -1;
180 version = 0;
181 #ifdef _WIN32
182 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
183 &version, sizeof(version), &temp, NULL);
184 #else
185 ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
186 #endif
187 if (version != KQEMU_VERSION) {
188 fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
189 version, KQEMU_VERSION);
190 goto fail;
193 pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
194 sizeof(unsigned long));
195 if (!pages_to_flush)
196 goto fail;
198 ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
199 sizeof(unsigned long));
200 if (!ram_pages_to_update)
201 goto fail;
203 modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES *
204 sizeof(unsigned long));
205 if (!modified_ram_pages)
206 goto fail;
207 modified_ram_pages_table = qemu_mallocz(phys_ram_size >> TARGET_PAGE_BITS);
208 if (!modified_ram_pages_table)
209 goto fail;
211 init.ram_base = phys_ram_base;
212 init.ram_size = phys_ram_size;
213 init.ram_dirty = phys_ram_dirty;
214 init.phys_to_ram_map = l1_phys_map;
215 init.pages_to_flush = pages_to_flush;
216 #if KQEMU_VERSION >= 0x010200
217 init.ram_pages_to_update = ram_pages_to_update;
218 #endif
219 #if KQEMU_VERSION >= 0x010300
220 init.modified_ram_pages = modified_ram_pages;
221 #endif
222 #ifdef _WIN32
223 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &init, sizeof(init),
224 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
225 #else
226 ret = ioctl(kqemu_fd, KQEMU_INIT, &init);
227 #endif
228 if (ret < 0) {
229 fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
230 fail:
231 kqemu_closefd(kqemu_fd);
232 kqemu_fd = KQEMU_INVALID_FD;
233 return -1;
235 kqemu_update_cpuid(env);
236 env->kqemu_enabled = kqemu_allowed;
237 nb_pages_to_flush = 0;
238 nb_ram_pages_to_update = 0;
239 return 0;
242 void kqemu_flush_page(CPUState *env, target_ulong addr)
244 #if defined(DEBUG)
245 if (loglevel & CPU_LOG_INT) {
246 fprintf(logfile, "kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
248 #endif
249 if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
250 nb_pages_to_flush = KQEMU_FLUSH_ALL;
251 else
252 pages_to_flush[nb_pages_to_flush++] = addr;
255 void kqemu_flush(CPUState *env, int global)
257 #ifdef DEBUG
258 if (loglevel & CPU_LOG_INT) {
259 fprintf(logfile, "kqemu_flush:\n");
261 #endif
262 nb_pages_to_flush = KQEMU_FLUSH_ALL;
265 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
267 #ifdef DEBUG
268 if (loglevel & CPU_LOG_INT) {
269 fprintf(logfile, "kqemu_set_notdirty: addr=%08lx\n", ram_addr);
271 #endif
272 /* we only track transitions to dirty state */
273 if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff)
274 return;
275 if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
276 nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
277 else
278 ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
281 static void kqemu_reset_modified_ram_pages(void)
283 int i;
284 unsigned long page_index;
286 for(i = 0; i < nb_modified_ram_pages; i++) {
287 page_index = modified_ram_pages[i] >> TARGET_PAGE_BITS;
288 modified_ram_pages_table[page_index] = 0;
290 nb_modified_ram_pages = 0;
293 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr)
295 unsigned long page_index;
296 int ret;
297 #ifdef _WIN32
298 DWORD temp;
299 #endif
301 page_index = ram_addr >> TARGET_PAGE_BITS;
302 if (!modified_ram_pages_table[page_index]) {
303 #if 0
304 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages, ram_addr);
305 #endif
306 modified_ram_pages_table[page_index] = 1;
307 modified_ram_pages[nb_modified_ram_pages++] = ram_addr;
308 if (nb_modified_ram_pages >= KQEMU_MAX_MODIFIED_RAM_PAGES) {
309 /* flush */
310 #ifdef _WIN32
311 ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
312 &nb_modified_ram_pages,
313 sizeof(nb_modified_ram_pages),
314 NULL, 0, &temp, NULL);
315 #else
316 ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
317 &nb_modified_ram_pages);
318 #endif
319 kqemu_reset_modified_ram_pages();
324 struct fpstate {
325 uint16_t fpuc;
326 uint16_t dummy1;
327 uint16_t fpus;
328 uint16_t dummy2;
329 uint16_t fptag;
330 uint16_t dummy3;
332 uint32_t fpip;
333 uint32_t fpcs;
334 uint32_t fpoo;
335 uint32_t fpos;
336 uint8_t fpregs1[8 * 10];
339 struct fpxstate {
340 uint16_t fpuc;
341 uint16_t fpus;
342 uint16_t fptag;
343 uint16_t fop;
344 uint32_t fpuip;
345 uint16_t cs_sel;
346 uint16_t dummy0;
347 uint32_t fpudp;
348 uint16_t ds_sel;
349 uint16_t dummy1;
350 uint32_t mxcsr;
351 uint32_t mxcsr_mask;
352 uint8_t fpregs1[8 * 16];
353 uint8_t xmm_regs[16 * 16];
354 uint8_t dummy2[96];
357 static struct fpxstate fpx1 __attribute__((aligned(16)));
359 static void restore_native_fp_frstor(CPUState *env)
361 int fptag, i, j;
362 struct fpstate fp1, *fp = &fp1;
364 fp->fpuc = env->fpuc;
365 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
366 fptag = 0;
367 for (i=7; i>=0; i--) {
368 fptag <<= 2;
369 if (env->fptags[i]) {
370 fptag |= 3;
371 } else {
372 /* the FPU automatically computes it */
375 fp->fptag = fptag;
376 j = env->fpstt;
377 for(i = 0;i < 8; i++) {
378 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
379 j = (j + 1) & 7;
381 asm volatile ("frstor %0" : "=m" (*fp));
384 static void save_native_fp_fsave(CPUState *env)
386 int fptag, i, j;
387 uint16_t fpuc;
388 struct fpstate fp1, *fp = &fp1;
390 asm volatile ("fsave %0" : : "m" (*fp));
391 env->fpuc = fp->fpuc;
392 env->fpstt = (fp->fpus >> 11) & 7;
393 env->fpus = fp->fpus & ~0x3800;
394 fptag = fp->fptag;
395 for(i = 0;i < 8; i++) {
396 env->fptags[i] = ((fptag & 3) == 3);
397 fptag >>= 2;
399 j = env->fpstt;
400 for(i = 0;i < 8; i++) {
401 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
402 j = (j + 1) & 7;
404 /* we must restore the default rounding state */
405 fpuc = 0x037f | (env->fpuc & (3 << 10));
406 asm volatile("fldcw %0" : : "m" (fpuc));
409 static void restore_native_fp_fxrstor(CPUState *env)
411 struct fpxstate *fp = &fpx1;
412 int i, j, fptag;
414 fp->fpuc = env->fpuc;
415 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
416 fptag = 0;
417 for(i = 0; i < 8; i++)
418 fptag |= (env->fptags[i] << i);
419 fp->fptag = fptag ^ 0xff;
421 j = env->fpstt;
422 for(i = 0;i < 8; i++) {
423 memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
424 j = (j + 1) & 7;
426 if (env->cpuid_features & CPUID_SSE) {
427 fp->mxcsr = env->mxcsr;
428 /* XXX: check if DAZ is not available */
429 fp->mxcsr_mask = 0xffff;
430 memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
432 asm volatile ("fxrstor %0" : "=m" (*fp));
435 static void save_native_fp_fxsave(CPUState *env)
437 struct fpxstate *fp = &fpx1;
438 int fptag, i, j;
439 uint16_t fpuc;
441 asm volatile ("fxsave %0" : : "m" (*fp));
442 env->fpuc = fp->fpuc;
443 env->fpstt = (fp->fpus >> 11) & 7;
444 env->fpus = fp->fpus & ~0x3800;
445 fptag = fp->fptag ^ 0xff;
446 for(i = 0;i < 8; i++) {
447 env->fptags[i] = (fptag >> i) & 1;
449 j = env->fpstt;
450 for(i = 0;i < 8; i++) {
451 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
452 j = (j + 1) & 7;
454 if (env->cpuid_features & CPUID_SSE) {
455 env->mxcsr = fp->mxcsr;
456 memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
459 /* we must restore the default rounding state */
460 asm volatile ("fninit");
461 fpuc = 0x037f | (env->fpuc & (3 << 10));
462 asm volatile("fldcw %0" : : "m" (fpuc));
465 static int do_syscall(CPUState *env,
466 struct kqemu_cpu_state *kenv)
468 int selector;
470 selector = (env->star >> 32) & 0xffff;
471 #ifdef __x86_64__
472 if (env->hflags & HF_LMA_MASK) {
473 int code64;
475 env->regs[R_ECX] = kenv->next_eip;
476 env->regs[11] = env->eflags;
478 code64 = env->hflags & HF_CS64_MASK;
480 cpu_x86_set_cpl(env, 0);
481 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
482 0, 0xffffffff,
483 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
484 DESC_S_MASK |
485 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
486 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
487 0, 0xffffffff,
488 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
489 DESC_S_MASK |
490 DESC_W_MASK | DESC_A_MASK);
491 env->eflags &= ~env->fmask;
492 if (code64)
493 env->eip = env->lstar;
494 else
495 env->eip = env->cstar;
496 } else
497 #endif
499 env->regs[R_ECX] = (uint32_t)kenv->next_eip;
501 cpu_x86_set_cpl(env, 0);
502 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
503 0, 0xffffffff,
504 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
505 DESC_S_MASK |
506 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
507 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
508 0, 0xffffffff,
509 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
510 DESC_S_MASK |
511 DESC_W_MASK | DESC_A_MASK);
512 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
513 env->eip = (uint32_t)env->star;
515 return 2;
518 #ifdef CONFIG_PROFILER
520 #define PC_REC_SIZE 1
521 #define PC_REC_HASH_BITS 16
522 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
524 typedef struct PCRecord {
525 unsigned long pc;
526 int64_t count;
527 struct PCRecord *next;
528 } PCRecord;
530 static PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
531 static int nb_pc_records;
533 static void kqemu_record_pc(unsigned long pc)
535 unsigned long h;
536 PCRecord **pr, *r;
538 h = pc / PC_REC_SIZE;
539 h = h ^ (h >> PC_REC_HASH_BITS);
540 h &= (PC_REC_HASH_SIZE - 1);
541 pr = &pc_rec_hash[h];
542 for(;;) {
543 r = *pr;
544 if (r == NULL)
545 break;
546 if (r->pc == pc) {
547 r->count++;
548 return;
550 pr = &r->next;
552 r = malloc(sizeof(PCRecord));
553 r->count = 1;
554 r->pc = pc;
555 r->next = NULL;
556 *pr = r;
557 nb_pc_records++;
560 static int pc_rec_cmp(const void *p1, const void *p2)
562 PCRecord *r1 = *(PCRecord **)p1;
563 PCRecord *r2 = *(PCRecord **)p2;
564 if (r1->count < r2->count)
565 return 1;
566 else if (r1->count == r2->count)
567 return 0;
568 else
569 return -1;
572 static void kqemu_record_flush(void)
574 PCRecord *r, *r_next;
575 int h;
577 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
578 for(r = pc_rec_hash[h]; r != NULL; r = r_next) {
579 r_next = r->next;
580 free(r);
582 pc_rec_hash[h] = NULL;
584 nb_pc_records = 0;
587 void kqemu_record_dump(void)
589 PCRecord **pr, *r;
590 int i, h;
591 FILE *f;
592 int64_t total, sum;
594 pr = malloc(sizeof(PCRecord *) * nb_pc_records);
595 i = 0;
596 total = 0;
597 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
598 for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
599 pr[i++] = r;
600 total += r->count;
603 qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
605 f = fopen("/tmp/kqemu.stats", "w");
606 if (!f) {
607 perror("/tmp/kqemu.stats");
608 exit(1);
610 fprintf(f, "total: %" PRId64 "\n", total);
611 sum = 0;
612 for(i = 0; i < nb_pc_records; i++) {
613 r = pr[i];
614 sum += r->count;
615 fprintf(f, "%08lx: %" PRId64 " %0.2f%% %0.2f%%\n",
616 r->pc,
617 r->count,
618 (double)r->count / (double)total * 100.0,
619 (double)sum / (double)total * 100.0);
621 fclose(f);
622 free(pr);
624 kqemu_record_flush();
626 #endif
628 int kqemu_cpu_exec(CPUState *env)
630 struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
631 int ret, cpl, i;
632 #ifdef CONFIG_PROFILER
633 int64_t ti;
634 #endif
636 #ifdef _WIN32
637 DWORD temp;
638 #endif
640 #ifdef CONFIG_PROFILER
641 ti = profile_getclock();
642 #endif
643 #ifdef DEBUG
644 if (loglevel & CPU_LOG_INT) {
645 fprintf(logfile, "kqemu: cpu_exec: enter\n");
646 cpu_dump_state(env, logfile, fprintf, 0);
648 #endif
649 memcpy(kenv->regs, env->regs, sizeof(kenv->regs));
650 kenv->eip = env->eip;
651 kenv->eflags = env->eflags;
652 memcpy(&kenv->segs, &env->segs, sizeof(env->segs));
653 memcpy(&kenv->ldt, &env->ldt, sizeof(env->ldt));
654 memcpy(&kenv->tr, &env->tr, sizeof(env->tr));
655 memcpy(&kenv->gdt, &env->gdt, sizeof(env->gdt));
656 memcpy(&kenv->idt, &env->idt, sizeof(env->idt));
657 kenv->cr0 = env->cr[0];
658 kenv->cr2 = env->cr[2];
659 kenv->cr3 = env->cr[3];
660 kenv->cr4 = env->cr[4];
661 kenv->a20_mask = env->a20_mask;
662 #if KQEMU_VERSION >= 0x010100
663 kenv->efer = env->efer;
664 #endif
665 #if KQEMU_VERSION >= 0x010300
666 kenv->tsc_offset = 0;
667 kenv->star = env->star;
668 kenv->sysenter_cs = env->sysenter_cs;
669 kenv->sysenter_esp = env->sysenter_esp;
670 kenv->sysenter_eip = env->sysenter_eip;
671 #ifdef __x86_64__
672 kenv->lstar = env->lstar;
673 kenv->cstar = env->cstar;
674 kenv->fmask = env->fmask;
675 kenv->kernelgsbase = env->kernelgsbase;
676 #endif
677 #endif
678 if (env->dr[7] & 0xff) {
679 kenv->dr7 = env->dr[7];
680 kenv->dr0 = env->dr[0];
681 kenv->dr1 = env->dr[1];
682 kenv->dr2 = env->dr[2];
683 kenv->dr3 = env->dr[3];
684 } else {
685 kenv->dr7 = 0;
687 kenv->dr6 = env->dr[6];
688 cpl = (env->hflags & HF_CPL_MASK);
689 kenv->cpl = cpl;
690 kenv->nb_pages_to_flush = nb_pages_to_flush;
691 #if KQEMU_VERSION >= 0x010200
692 kenv->user_only = (env->kqemu_enabled == 1);
693 kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
694 #endif
695 nb_ram_pages_to_update = 0;
697 #if KQEMU_VERSION >= 0x010300
698 kenv->nb_modified_ram_pages = nb_modified_ram_pages;
699 #endif
700 kqemu_reset_modified_ram_pages();
702 if (env->cpuid_features & CPUID_FXSR)
703 restore_native_fp_fxrstor(env);
704 else
705 restore_native_fp_frstor(env);
707 #ifdef _WIN32
708 if (DeviceIoControl(kqemu_fd, KQEMU_EXEC,
709 kenv, sizeof(struct kqemu_cpu_state),
710 kenv, sizeof(struct kqemu_cpu_state),
711 &temp, NULL)) {
712 ret = kenv->retval;
713 } else {
714 ret = -1;
716 #else
717 #if KQEMU_VERSION >= 0x010100
718 ioctl(kqemu_fd, KQEMU_EXEC, kenv);
719 ret = kenv->retval;
720 #else
721 ret = ioctl(kqemu_fd, KQEMU_EXEC, kenv);
722 #endif
723 #endif
724 if (env->cpuid_features & CPUID_FXSR)
725 save_native_fp_fxsave(env);
726 else
727 save_native_fp_fsave(env);
729 memcpy(env->regs, kenv->regs, sizeof(env->regs));
730 env->eip = kenv->eip;
731 env->eflags = kenv->eflags;
732 memcpy(env->segs, kenv->segs, sizeof(env->segs));
733 cpu_x86_set_cpl(env, kenv->cpl);
734 memcpy(&env->ldt, &kenv->ldt, sizeof(env->ldt));
735 #if 0
736 /* no need to restore that */
737 memcpy(env->tr, kenv->tr, sizeof(env->tr));
738 memcpy(env->gdt, kenv->gdt, sizeof(env->gdt));
739 memcpy(env->idt, kenv->idt, sizeof(env->idt));
740 env->a20_mask = kenv->a20_mask;
741 #endif
742 env->cr[0] = kenv->cr0;
743 env->cr[4] = kenv->cr4;
744 env->cr[3] = kenv->cr3;
745 env->cr[2] = kenv->cr2;
746 env->dr[6] = kenv->dr6;
747 #if KQEMU_VERSION >= 0x010300
748 #ifdef __x86_64__
749 env->kernelgsbase = kenv->kernelgsbase;
750 #endif
751 #endif
753 /* flush pages as indicated by kqemu */
754 if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) {
755 tlb_flush(env, 1);
756 } else {
757 for(i = 0; i < kenv->nb_pages_to_flush; i++) {
758 tlb_flush_page(env, pages_to_flush[i]);
761 nb_pages_to_flush = 0;
763 #ifdef CONFIG_PROFILER
764 kqemu_time += profile_getclock() - ti;
765 kqemu_exec_count++;
766 #endif
768 #if KQEMU_VERSION >= 0x010200
769 if (kenv->nb_ram_pages_to_update > 0) {
770 cpu_tlb_update_dirty(env);
772 #endif
774 #if KQEMU_VERSION >= 0x010300
775 if (kenv->nb_modified_ram_pages > 0) {
776 for(i = 0; i < kenv->nb_modified_ram_pages; i++) {
777 unsigned long addr;
778 addr = modified_ram_pages[i];
779 tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0);
782 #endif
784 /* restore the hidden flags */
786 unsigned int new_hflags;
787 #ifdef TARGET_X86_64
788 if ((env->hflags & HF_LMA_MASK) &&
789 (env->segs[R_CS].flags & DESC_L_MASK)) {
790 /* long mode */
791 new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
792 } else
793 #endif
795 /* legacy / compatibility case */
796 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
797 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
798 new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
799 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
800 if (!(env->cr[0] & CR0_PE_MASK) ||
801 (env->eflags & VM_MASK) ||
802 !(env->hflags & HF_CS32_MASK)) {
803 /* XXX: try to avoid this test. The problem comes from the
804 fact that is real mode or vm86 mode we only modify the
805 'base' and 'selector' fields of the segment cache to go
806 faster. A solution may be to force addseg to one in
807 translate-i386.c. */
808 new_hflags |= HF_ADDSEG_MASK;
809 } else {
810 new_hflags |= ((env->segs[R_DS].base |
811 env->segs[R_ES].base |
812 env->segs[R_SS].base) != 0) <<
813 HF_ADDSEG_SHIFT;
816 env->hflags = (env->hflags &
817 ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
818 new_hflags;
820 /* update FPU flags */
821 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
822 ((env->cr[0] << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
823 if (env->cr[4] & CR4_OSFXSR_MASK)
824 env->hflags |= HF_OSFXSR_MASK;
825 else
826 env->hflags &= ~HF_OSFXSR_MASK;
828 #ifdef DEBUG
829 if (loglevel & CPU_LOG_INT) {
830 fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
832 #endif
833 if (ret == KQEMU_RET_SYSCALL) {
834 /* syscall instruction */
835 return do_syscall(env, kenv);
836 } else
837 if ((ret & 0xff00) == KQEMU_RET_INT) {
838 env->exception_index = ret & 0xff;
839 env->error_code = 0;
840 env->exception_is_int = 1;
841 env->exception_next_eip = kenv->next_eip;
842 #ifdef CONFIG_PROFILER
843 kqemu_ret_int_count++;
844 #endif
845 #ifdef DEBUG
846 if (loglevel & CPU_LOG_INT) {
847 fprintf(logfile, "kqemu: interrupt v=%02x:\n",
848 env->exception_index);
849 cpu_dump_state(env, logfile, fprintf, 0);
851 #endif
852 return 1;
853 } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
854 env->exception_index = ret & 0xff;
855 env->error_code = kenv->error_code;
856 env->exception_is_int = 0;
857 env->exception_next_eip = 0;
858 #ifdef CONFIG_PROFILER
859 kqemu_ret_excp_count++;
860 #endif
861 #ifdef DEBUG
862 if (loglevel & CPU_LOG_INT) {
863 fprintf(logfile, "kqemu: exception v=%02x e=%04x:\n",
864 env->exception_index, env->error_code);
865 cpu_dump_state(env, logfile, fprintf, 0);
867 #endif
868 return 1;
869 } else if (ret == KQEMU_RET_INTR) {
870 #ifdef CONFIG_PROFILER
871 kqemu_ret_intr_count++;
872 #endif
873 #ifdef DEBUG
874 if (loglevel & CPU_LOG_INT) {
875 cpu_dump_state(env, logfile, fprintf, 0);
877 #endif
878 return 0;
879 } else if (ret == KQEMU_RET_SOFTMMU) {
880 #ifdef CONFIG_PROFILER
882 unsigned long pc = env->eip + env->segs[R_CS].base;
883 kqemu_record_pc(pc);
885 #endif
886 #ifdef DEBUG
887 if (loglevel & CPU_LOG_INT) {
888 cpu_dump_state(env, logfile, fprintf, 0);
890 #endif
891 return 2;
892 } else {
893 cpu_dump_state(env, stderr, fprintf, 0);
894 fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
895 exit(1);
897 return 0;
900 void kqemu_cpu_interrupt(CPUState *env)
902 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
903 /* cancelling the I/O request causes KQEMU to finish executing the
904 current block and successfully returning. */
905 CancelIo(kqemu_fd);
906 #endif
909 #endif