4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
26 #include <sys/types.h>
28 #include <sys/ioctl.h>
31 #include <sys/ioccom.h>
53 /* compatibility stuff */
54 #ifndef KQEMU_RET_SYSCALL
55 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
57 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
58 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
59 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
61 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
62 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
66 #define KQEMU_DEVICE "\\\\.\\kqemu"
68 #define KQEMU_DEVICE "/dev/kqemu"
72 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
73 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
74 #define kqemu_closefd(x) CloseHandle(x)
76 #define KQEMU_INVALID_FD -1
77 int kqemu_fd
= KQEMU_INVALID_FD
;
78 #define kqemu_closefd(x) close(x)
85 int kqemu_allowed
= 1;
86 unsigned long *pages_to_flush
;
87 unsigned int nb_pages_to_flush
;
88 unsigned long *ram_pages_to_update
;
89 unsigned int nb_ram_pages_to_update
;
90 unsigned long *modified_ram_pages
;
91 unsigned int nb_modified_ram_pages
;
92 uint8_t *modified_ram_pages_table
;
93 extern uint32_t **l1_phys_map
;
95 #define cpuid(index, eax, ebx, ecx, edx) \
96 asm volatile ("cpuid" \
97 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
101 static int is_cpuid_supported(void)
106 static int is_cpuid_supported(void)
109 asm volatile ("pushf\n"
112 "xorl $0x00200000, %0\n"
117 : "=a" (v0
), "=d" (v1
)
124 static void kqemu_update_cpuid(CPUState
*env
)
126 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
127 uint32_t eax
, ebx
, ecx
, edx
;
129 /* the following features are kept identical on the host and
130 target cpus because they are important for user code. Strictly
131 speaking, only SSE really matters because the OS must support
132 it if the user code uses it. */
133 critical_features_mask
=
134 CPUID_CMOV
| CPUID_CX8
|
135 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
136 CPUID_SSE2
| CPUID_SEP
;
137 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
138 if (!is_cpuid_supported()) {
142 cpuid(1, eax
, ebx
, ecx
, edx
);
147 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
148 compatibility mode, so in order to have the best performances
149 it is better not to use it */
150 features
&= ~CPUID_SEP
;
152 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
153 (features
& critical_features_mask
);
154 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
155 (ext_features
& ext_features_mask
);
156 /* XXX: we could update more of the target CPUID state so that the
157 non accelerated code sees exactly the same CPU features as the
161 int kqemu_init(CPUState
*env
)
163 struct kqemu_init init
;
173 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
174 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
175 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
178 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
180 if (kqemu_fd
== KQEMU_INVALID_FD
) {
181 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
182 KQEMU_DEVICE
, strerror(errno
));
187 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
188 &version
, sizeof(version
), &temp
, NULL
);
190 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
192 if (version
!= KQEMU_VERSION
) {
193 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
194 version
, KQEMU_VERSION
);
198 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
199 sizeof(unsigned long));
203 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
204 sizeof(unsigned long));
205 if (!ram_pages_to_update
)
208 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
209 sizeof(unsigned long));
210 if (!modified_ram_pages
)
212 modified_ram_pages_table
= qemu_mallocz(phys_ram_size
>> TARGET_PAGE_BITS
);
213 if (!modified_ram_pages_table
)
216 init
.ram_base
= phys_ram_base
;
217 init
.ram_size
= phys_ram_size
;
218 init
.ram_dirty
= phys_ram_dirty
;
219 init
.phys_to_ram_map
= l1_phys_map
;
220 init
.pages_to_flush
= pages_to_flush
;
221 #if KQEMU_VERSION >= 0x010200
222 init
.ram_pages_to_update
= ram_pages_to_update
;
224 #if KQEMU_VERSION >= 0x010300
225 init
.modified_ram_pages
= modified_ram_pages
;
228 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &init
, sizeof(init
),
229 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
231 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &init
);
234 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
236 kqemu_closefd(kqemu_fd
);
237 kqemu_fd
= KQEMU_INVALID_FD
;
240 kqemu_update_cpuid(env
);
241 env
->kqemu_enabled
= kqemu_allowed
;
242 nb_pages_to_flush
= 0;
243 nb_ram_pages_to_update
= 0;
247 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
250 if (loglevel
& CPU_LOG_INT
) {
251 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
254 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
255 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
257 pages_to_flush
[nb_pages_to_flush
++] = addr
;
260 void kqemu_flush(CPUState
*env
, int global
)
263 if (loglevel
& CPU_LOG_INT
) {
264 fprintf(logfile
, "kqemu_flush:\n");
267 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
270 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
273 if (loglevel
& CPU_LOG_INT
) {
274 fprintf(logfile
, "kqemu_set_notdirty: addr=%08lx\n", ram_addr
);
277 /* we only track transitions to dirty state */
278 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
280 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
281 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
283 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
286 static void kqemu_reset_modified_ram_pages(void)
289 unsigned long page_index
;
291 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
292 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
293 modified_ram_pages_table
[page_index
] = 0;
295 nb_modified_ram_pages
= 0;
298 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
300 unsigned long page_index
;
306 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
307 if (!modified_ram_pages_table
[page_index
]) {
309 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
311 modified_ram_pages_table
[page_index
] = 1;
312 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
313 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
316 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
317 &nb_modified_ram_pages
,
318 sizeof(nb_modified_ram_pages
),
319 NULL
, 0, &temp
, NULL
);
321 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
322 &nb_modified_ram_pages
);
324 kqemu_reset_modified_ram_pages();
341 uint8_t fpregs1
[8 * 10];
357 uint8_t fpregs1
[8 * 16];
358 uint8_t xmm_regs
[16 * 16];
362 static struct fpxstate fpx1
__attribute__((aligned(16)));
364 static void restore_native_fp_frstor(CPUState
*env
)
367 struct fpstate fp1
, *fp
= &fp1
;
369 fp
->fpuc
= env
->fpuc
;
370 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
372 for (i
=7; i
>=0; i
--) {
374 if (env
->fptags
[i
]) {
377 /* the FPU automatically computes it */
382 for(i
= 0;i
< 8; i
++) {
383 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
386 asm volatile ("frstor %0" : "=m" (*fp
));
389 static void save_native_fp_fsave(CPUState
*env
)
393 struct fpstate fp1
, *fp
= &fp1
;
395 asm volatile ("fsave %0" : : "m" (*fp
));
396 env
->fpuc
= fp
->fpuc
;
397 env
->fpstt
= (fp
->fpus
>> 11) & 7;
398 env
->fpus
= fp
->fpus
& ~0x3800;
400 for(i
= 0;i
< 8; i
++) {
401 env
->fptags
[i
] = ((fptag
& 3) == 3);
405 for(i
= 0;i
< 8; i
++) {
406 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
409 /* we must restore the default rounding state */
410 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
411 asm volatile("fldcw %0" : : "m" (fpuc
));
414 static void restore_native_fp_fxrstor(CPUState
*env
)
416 struct fpxstate
*fp
= &fpx1
;
419 fp
->fpuc
= env
->fpuc
;
420 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
422 for(i
= 0; i
< 8; i
++)
423 fptag
|= (env
->fptags
[i
] << i
);
424 fp
->fptag
= fptag
^ 0xff;
427 for(i
= 0;i
< 8; i
++) {
428 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
431 if (env
->cpuid_features
& CPUID_SSE
) {
432 fp
->mxcsr
= env
->mxcsr
;
433 /* XXX: check if DAZ is not available */
434 fp
->mxcsr_mask
= 0xffff;
435 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
437 asm volatile ("fxrstor %0" : "=m" (*fp
));
440 static void save_native_fp_fxsave(CPUState
*env
)
442 struct fpxstate
*fp
= &fpx1
;
446 asm volatile ("fxsave %0" : : "m" (*fp
));
447 env
->fpuc
= fp
->fpuc
;
448 env
->fpstt
= (fp
->fpus
>> 11) & 7;
449 env
->fpus
= fp
->fpus
& ~0x3800;
450 fptag
= fp
->fptag
^ 0xff;
451 for(i
= 0;i
< 8; i
++) {
452 env
->fptags
[i
] = (fptag
>> i
) & 1;
455 for(i
= 0;i
< 8; i
++) {
456 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
459 if (env
->cpuid_features
& CPUID_SSE
) {
460 env
->mxcsr
= fp
->mxcsr
;
461 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
464 /* we must restore the default rounding state */
465 asm volatile ("fninit");
466 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
467 asm volatile("fldcw %0" : : "m" (fpuc
));
470 static int do_syscall(CPUState
*env
,
471 struct kqemu_cpu_state
*kenv
)
475 selector
= (env
->star
>> 32) & 0xffff;
477 if (env
->hflags
& HF_LMA_MASK
) {
480 env
->regs
[R_ECX
] = kenv
->next_eip
;
481 env
->regs
[11] = env
->eflags
;
483 code64
= env
->hflags
& HF_CS64_MASK
;
485 cpu_x86_set_cpl(env
, 0);
486 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
488 DESC_G_MASK
| DESC_P_MASK
|
490 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
491 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
493 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
495 DESC_W_MASK
| DESC_A_MASK
);
496 env
->eflags
&= ~env
->fmask
;
498 env
->eip
= env
->lstar
;
500 env
->eip
= env
->cstar
;
504 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
506 cpu_x86_set_cpl(env
, 0);
507 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
509 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
511 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
512 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
514 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
516 DESC_W_MASK
| DESC_A_MASK
);
517 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
518 env
->eip
= (uint32_t)env
->star
;
523 #ifdef CONFIG_PROFILER
525 #define PC_REC_SIZE 1
526 #define PC_REC_HASH_BITS 16
527 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
529 typedef struct PCRecord
{
532 struct PCRecord
*next
;
535 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
536 static int nb_pc_records
;
538 static void kqemu_record_pc(unsigned long pc
)
543 h
= pc
/ PC_REC_SIZE
;
544 h
= h
^ (h
>> PC_REC_HASH_BITS
);
545 h
&= (PC_REC_HASH_SIZE
- 1);
546 pr
= &pc_rec_hash
[h
];
557 r
= malloc(sizeof(PCRecord
));
565 static int pc_rec_cmp(const void *p1
, const void *p2
)
567 PCRecord
*r1
= *(PCRecord
**)p1
;
568 PCRecord
*r2
= *(PCRecord
**)p2
;
569 if (r1
->count
< r2
->count
)
571 else if (r1
->count
== r2
->count
)
577 static void kqemu_record_flush(void)
579 PCRecord
*r
, *r_next
;
582 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
583 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
587 pc_rec_hash
[h
] = NULL
;
592 void kqemu_record_dump(void)
599 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
602 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
603 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
608 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
610 f
= fopen("/tmp/kqemu.stats", "w");
612 perror("/tmp/kqemu.stats");
615 fprintf(f
, "total: %" PRId64
"\n", total
);
617 for(i
= 0; i
< nb_pc_records
; i
++) {
620 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
623 (double)r
->count
/ (double)total
* 100.0,
624 (double)sum
/ (double)total
* 100.0);
629 kqemu_record_flush();
633 int kqemu_cpu_exec(CPUState
*env
)
635 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
637 #ifdef CONFIG_PROFILER
645 #ifdef CONFIG_PROFILER
646 ti
= profile_getclock();
649 if (loglevel
& CPU_LOG_INT
) {
650 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
651 cpu_dump_state(env
, logfile
, fprintf
, 0);
654 memcpy(kenv
->regs
, env
->regs
, sizeof(kenv
->regs
));
655 kenv
->eip
= env
->eip
;
656 kenv
->eflags
= env
->eflags
;
657 memcpy(&kenv
->segs
, &env
->segs
, sizeof(env
->segs
));
658 memcpy(&kenv
->ldt
, &env
->ldt
, sizeof(env
->ldt
));
659 memcpy(&kenv
->tr
, &env
->tr
, sizeof(env
->tr
));
660 memcpy(&kenv
->gdt
, &env
->gdt
, sizeof(env
->gdt
));
661 memcpy(&kenv
->idt
, &env
->idt
, sizeof(env
->idt
));
662 kenv
->cr0
= env
->cr
[0];
663 kenv
->cr2
= env
->cr
[2];
664 kenv
->cr3
= env
->cr
[3];
665 kenv
->cr4
= env
->cr
[4];
666 kenv
->a20_mask
= env
->a20_mask
;
667 #if KQEMU_VERSION >= 0x010100
668 kenv
->efer
= env
->efer
;
670 #if KQEMU_VERSION >= 0x010300
671 kenv
->tsc_offset
= 0;
672 kenv
->star
= env
->star
;
673 kenv
->sysenter_cs
= env
->sysenter_cs
;
674 kenv
->sysenter_esp
= env
->sysenter_esp
;
675 kenv
->sysenter_eip
= env
->sysenter_eip
;
677 kenv
->lstar
= env
->lstar
;
678 kenv
->cstar
= env
->cstar
;
679 kenv
->fmask
= env
->fmask
;
680 kenv
->kernelgsbase
= env
->kernelgsbase
;
683 if (env
->dr
[7] & 0xff) {
684 kenv
->dr7
= env
->dr
[7];
685 kenv
->dr0
= env
->dr
[0];
686 kenv
->dr1
= env
->dr
[1];
687 kenv
->dr2
= env
->dr
[2];
688 kenv
->dr3
= env
->dr
[3];
692 kenv
->dr6
= env
->dr
[6];
693 cpl
= (env
->hflags
& HF_CPL_MASK
);
695 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
696 #if KQEMU_VERSION >= 0x010200
697 kenv
->user_only
= (env
->kqemu_enabled
== 1);
698 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
700 nb_ram_pages_to_update
= 0;
702 #if KQEMU_VERSION >= 0x010300
703 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
705 kqemu_reset_modified_ram_pages();
707 if (env
->cpuid_features
& CPUID_FXSR
)
708 restore_native_fp_fxrstor(env
);
710 restore_native_fp_frstor(env
);
713 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
714 kenv
, sizeof(struct kqemu_cpu_state
),
715 kenv
, sizeof(struct kqemu_cpu_state
),
722 #if KQEMU_VERSION >= 0x010100
723 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
726 ret
= ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
729 if (env
->cpuid_features
& CPUID_FXSR
)
730 save_native_fp_fxsave(env
);
732 save_native_fp_fsave(env
);
734 memcpy(env
->regs
, kenv
->regs
, sizeof(env
->regs
));
735 env
->eip
= kenv
->eip
;
736 env
->eflags
= kenv
->eflags
;
737 memcpy(env
->segs
, kenv
->segs
, sizeof(env
->segs
));
738 cpu_x86_set_cpl(env
, kenv
->cpl
);
739 memcpy(&env
->ldt
, &kenv
->ldt
, sizeof(env
->ldt
));
741 /* no need to restore that */
742 memcpy(env
->tr
, kenv
->tr
, sizeof(env
->tr
));
743 memcpy(env
->gdt
, kenv
->gdt
, sizeof(env
->gdt
));
744 memcpy(env
->idt
, kenv
->idt
, sizeof(env
->idt
));
745 env
->a20_mask
= kenv
->a20_mask
;
747 env
->cr
[0] = kenv
->cr0
;
748 env
->cr
[4] = kenv
->cr4
;
749 env
->cr
[3] = kenv
->cr3
;
750 env
->cr
[2] = kenv
->cr2
;
751 env
->dr
[6] = kenv
->dr6
;
752 #if KQEMU_VERSION >= 0x010300
754 env
->kernelgsbase
= kenv
->kernelgsbase
;
758 /* flush pages as indicated by kqemu */
759 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
762 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
763 tlb_flush_page(env
, pages_to_flush
[i
]);
766 nb_pages_to_flush
= 0;
768 #ifdef CONFIG_PROFILER
769 kqemu_time
+= profile_getclock() - ti
;
773 #if KQEMU_VERSION >= 0x010200
774 if (kenv
->nb_ram_pages_to_update
> 0) {
775 cpu_tlb_update_dirty(env
);
779 #if KQEMU_VERSION >= 0x010300
780 if (kenv
->nb_modified_ram_pages
> 0) {
781 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
783 addr
= modified_ram_pages
[i
];
784 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
789 /* restore the hidden flags */
791 unsigned int new_hflags
;
793 if ((env
->hflags
& HF_LMA_MASK
) &&
794 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
796 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
800 /* legacy / compatibility case */
801 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
802 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
803 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
804 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
805 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
806 (env
->eflags
& VM_MASK
) ||
807 !(env
->hflags
& HF_CS32_MASK
)) {
808 /* XXX: try to avoid this test. The problem comes from the
809 fact that is real mode or vm86 mode we only modify the
810 'base' and 'selector' fields of the segment cache to go
811 faster. A solution may be to force addseg to one in
813 new_hflags
|= HF_ADDSEG_MASK
;
815 new_hflags
|= ((env
->segs
[R_DS
].base
|
816 env
->segs
[R_ES
].base
|
817 env
->segs
[R_SS
].base
) != 0) <<
821 env
->hflags
= (env
->hflags
&
822 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
825 /* update FPU flags */
826 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
827 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
828 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
829 env
->hflags
|= HF_OSFXSR_MASK
;
831 env
->hflags
&= ~HF_OSFXSR_MASK
;
834 if (loglevel
& CPU_LOG_INT
) {
835 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
838 if (ret
== KQEMU_RET_SYSCALL
) {
839 /* syscall instruction */
840 return do_syscall(env
, kenv
);
842 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
843 env
->exception_index
= ret
& 0xff;
845 env
->exception_is_int
= 1;
846 env
->exception_next_eip
= kenv
->next_eip
;
847 #ifdef CONFIG_PROFILER
848 kqemu_ret_int_count
++;
851 if (loglevel
& CPU_LOG_INT
) {
852 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
853 env
->exception_index
);
854 cpu_dump_state(env
, logfile
, fprintf
, 0);
858 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
859 env
->exception_index
= ret
& 0xff;
860 env
->error_code
= kenv
->error_code
;
861 env
->exception_is_int
= 0;
862 env
->exception_next_eip
= 0;
863 #ifdef CONFIG_PROFILER
864 kqemu_ret_excp_count
++;
867 if (loglevel
& CPU_LOG_INT
) {
868 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
869 env
->exception_index
, env
->error_code
);
870 cpu_dump_state(env
, logfile
, fprintf
, 0);
874 } else if (ret
== KQEMU_RET_INTR
) {
875 #ifdef CONFIG_PROFILER
876 kqemu_ret_intr_count
++;
879 if (loglevel
& CPU_LOG_INT
) {
880 cpu_dump_state(env
, logfile
, fprintf
, 0);
884 } else if (ret
== KQEMU_RET_SOFTMMU
) {
885 #ifdef CONFIG_PROFILER
887 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
892 if (loglevel
& CPU_LOG_INT
) {
893 cpu_dump_state(env
, logfile
, fprintf
, 0);
898 cpu_dump_state(env
, stderr
, fprintf
, 0);
899 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
905 void kqemu_cpu_interrupt(CPUState
*env
)
907 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
908 /* cancelling the I/O request causes KQEMU to finish executing the
909 current block and successfully returning. */