4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <sys/types.h>
27 #include <sys/ioctl.h>
49 /* compatibility stuff */
50 #ifndef KQEMU_RET_SYSCALL
51 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
53 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
54 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
55 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
57 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
58 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
62 #define KQEMU_DEVICE "\\\\.\\kqemu"
64 #define KQEMU_DEVICE "/dev/kqemu"
68 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
69 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
70 #define kqemu_closefd(x) CloseHandle(x)
72 #define KQEMU_INVALID_FD -1
73 int kqemu_fd
= KQEMU_INVALID_FD
;
74 #define kqemu_closefd(x) close(x)
81 int kqemu_allowed
= 1;
82 unsigned long *pages_to_flush
;
83 unsigned int nb_pages_to_flush
;
84 unsigned long *ram_pages_to_update
;
85 unsigned int nb_ram_pages_to_update
;
86 unsigned long *modified_ram_pages
;
87 unsigned int nb_modified_ram_pages
;
88 uint8_t *modified_ram_pages_table
;
89 extern uint32_t **l1_phys_map
;
91 #define cpuid(index, eax, ebx, ecx, edx) \
92 asm volatile ("cpuid" \
93 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
97 static int is_cpuid_supported(void)
102 static int is_cpuid_supported(void)
105 asm volatile ("pushf\n"
108 "xorl $0x00200000, %0\n"
113 : "=a" (v0
), "=d" (v1
)
120 static void kqemu_update_cpuid(CPUState
*env
)
122 int critical_features_mask
, features
;
123 uint32_t eax
, ebx
, ecx
, edx
;
125 /* the following features are kept identical on the host and
126 target cpus because they are important for user code. Strictly
127 speaking, only SSE really matters because the OS must support
128 it if the user code uses it. */
129 critical_features_mask
=
130 CPUID_CMOV
| CPUID_CX8
|
131 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
132 CPUID_SSE2
| CPUID_SEP
;
133 if (!is_cpuid_supported()) {
136 cpuid(1, eax
, ebx
, ecx
, edx
);
140 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
141 compatibility mode, so in order to have the best performances
142 it is better not to use it */
143 features
&= ~CPUID_SEP
;
145 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
146 (features
& critical_features_mask
);
147 /* XXX: we could update more of the target CPUID state so that the
148 non accelerated code sees exactly the same CPU features as the
152 int kqemu_init(CPUState
*env
)
154 struct kqemu_init init
;
164 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
165 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
166 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
169 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
171 if (kqemu_fd
== KQEMU_INVALID_FD
) {
172 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE
);
177 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
178 &version
, sizeof(version
), &temp
, NULL
);
180 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
182 if (version
!= KQEMU_VERSION
) {
183 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
184 version
, KQEMU_VERSION
);
188 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
189 sizeof(unsigned long));
193 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
194 sizeof(unsigned long));
195 if (!ram_pages_to_update
)
198 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
199 sizeof(unsigned long));
200 if (!modified_ram_pages
)
202 modified_ram_pages_table
= qemu_mallocz(phys_ram_size
>> TARGET_PAGE_BITS
);
203 if (!modified_ram_pages_table
)
206 init
.ram_base
= phys_ram_base
;
207 init
.ram_size
= phys_ram_size
;
208 init
.ram_dirty
= phys_ram_dirty
;
209 init
.phys_to_ram_map
= l1_phys_map
;
210 init
.pages_to_flush
= pages_to_flush
;
211 #if KQEMU_VERSION >= 0x010200
212 init
.ram_pages_to_update
= ram_pages_to_update
;
214 #if KQEMU_VERSION >= 0x010300
215 init
.modified_ram_pages
= modified_ram_pages
;
218 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &init
, sizeof(init
),
219 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
221 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &init
);
224 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
226 kqemu_closefd(kqemu_fd
);
227 kqemu_fd
= KQEMU_INVALID_FD
;
230 kqemu_update_cpuid(env
);
231 env
->kqemu_enabled
= kqemu_allowed
;
232 nb_pages_to_flush
= 0;
233 nb_ram_pages_to_update
= 0;
237 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
240 if (loglevel
& CPU_LOG_INT
) {
241 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
244 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
245 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
247 pages_to_flush
[nb_pages_to_flush
++] = addr
;
250 void kqemu_flush(CPUState
*env
, int global
)
253 if (loglevel
& CPU_LOG_INT
) {
254 fprintf(logfile
, "kqemu_flush:\n");
257 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
260 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
263 if (loglevel
& CPU_LOG_INT
) {
264 fprintf(logfile
, "kqemu_set_notdirty: addr=%08lx\n", ram_addr
);
267 /* we only track transitions to dirty state */
268 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
270 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
271 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
273 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
276 static void kqemu_reset_modified_ram_pages(void)
279 unsigned long page_index
;
281 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
282 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
283 modified_ram_pages_table
[page_index
] = 0;
285 nb_modified_ram_pages
= 0;
288 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
290 unsigned long page_index
;
296 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
297 if (!modified_ram_pages_table
[page_index
]) {
299 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
301 modified_ram_pages_table
[page_index
] = 1;
302 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
303 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
306 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
307 &nb_modified_ram_pages
,
308 sizeof(nb_modified_ram_pages
),
309 NULL
, 0, &temp
, NULL
);
311 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
312 &nb_modified_ram_pages
);
314 kqemu_reset_modified_ram_pages();
331 uint8_t fpregs1
[8 * 10];
347 uint8_t fpregs1
[8 * 16];
348 uint8_t xmm_regs
[16 * 16];
352 static struct fpxstate fpx1
__attribute__((aligned(16)));
354 static void restore_native_fp_frstor(CPUState
*env
)
357 struct fpstate fp1
, *fp
= &fp1
;
359 fp
->fpuc
= env
->fpuc
;
360 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
362 for (i
=7; i
>=0; i
--) {
364 if (env
->fptags
[i
]) {
367 /* the FPU automatically computes it */
372 for(i
= 0;i
< 8; i
++) {
373 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
376 asm volatile ("frstor %0" : "=m" (*fp
));
379 static void save_native_fp_fsave(CPUState
*env
)
383 struct fpstate fp1
, *fp
= &fp1
;
385 asm volatile ("fsave %0" : : "m" (*fp
));
386 env
->fpuc
= fp
->fpuc
;
387 env
->fpstt
= (fp
->fpus
>> 11) & 7;
388 env
->fpus
= fp
->fpus
& ~0x3800;
390 for(i
= 0;i
< 8; i
++) {
391 env
->fptags
[i
] = ((fptag
& 3) == 3);
395 for(i
= 0;i
< 8; i
++) {
396 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
399 /* we must restore the default rounding state */
400 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
401 asm volatile("fldcw %0" : : "m" (fpuc
));
404 static void restore_native_fp_fxrstor(CPUState
*env
)
406 struct fpxstate
*fp
= &fpx1
;
409 fp
->fpuc
= env
->fpuc
;
410 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
412 for(i
= 0; i
< 8; i
++)
413 fptag
|= (env
->fptags
[i
] << i
);
414 fp
->fptag
= fptag
^ 0xff;
417 for(i
= 0;i
< 8; i
++) {
418 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
421 if (env
->cpuid_features
& CPUID_SSE
) {
422 fp
->mxcsr
= env
->mxcsr
;
423 /* XXX: check if DAZ is not available */
424 fp
->mxcsr_mask
= 0xffff;
425 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
427 asm volatile ("fxrstor %0" : "=m" (*fp
));
430 static void save_native_fp_fxsave(CPUState
*env
)
432 struct fpxstate
*fp
= &fpx1
;
436 asm volatile ("fxsave %0" : : "m" (*fp
));
437 env
->fpuc
= fp
->fpuc
;
438 env
->fpstt
= (fp
->fpus
>> 11) & 7;
439 env
->fpus
= fp
->fpus
& ~0x3800;
440 fptag
= fp
->fptag
^ 0xff;
441 for(i
= 0;i
< 8; i
++) {
442 env
->fptags
[i
] = (fptag
>> i
) & 1;
445 for(i
= 0;i
< 8; i
++) {
446 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
449 if (env
->cpuid_features
& CPUID_SSE
) {
450 env
->mxcsr
= fp
->mxcsr
;
451 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
454 /* we must restore the default rounding state */
455 asm volatile ("fninit");
456 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
457 asm volatile("fldcw %0" : : "m" (fpuc
));
460 static int do_syscall(CPUState
*env
,
461 struct kqemu_cpu_state
*kenv
)
465 selector
= (env
->star
>> 32) & 0xffff;
467 if (env
->hflags
& HF_LMA_MASK
) {
468 env
->regs
[R_ECX
] = kenv
->next_eip
;
469 env
->regs
[11] = env
->eflags
;
471 cpu_x86_set_cpl(env
, 0);
472 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
474 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
476 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
477 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
479 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
481 DESC_W_MASK
| DESC_A_MASK
);
482 env
->eflags
&= ~env
->fmask
;
483 if (env
->hflags
& HF_CS64_MASK
)
484 env
->eip
= env
->lstar
;
486 env
->eip
= env
->cstar
;
490 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
492 cpu_x86_set_cpl(env
, 0);
493 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
495 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
497 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
498 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
500 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
502 DESC_W_MASK
| DESC_A_MASK
);
503 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
504 env
->eip
= (uint32_t)env
->star
;
509 #ifdef CONFIG_PROFILER
511 #define PC_REC_SIZE 1
512 #define PC_REC_HASH_BITS 16
513 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
515 typedef struct PCRecord
{
518 struct PCRecord
*next
;
521 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
522 static int nb_pc_records
;
524 static void kqemu_record_pc(unsigned long pc
)
529 h
= pc
/ PC_REC_SIZE
;
530 h
= h
^ (h
>> PC_REC_HASH_BITS
);
531 h
&= (PC_REC_HASH_SIZE
- 1);
532 pr
= &pc_rec_hash
[h
];
543 r
= malloc(sizeof(PCRecord
));
551 static int pc_rec_cmp(const void *p1
, const void *p2
)
553 PCRecord
*r1
= *(PCRecord
**)p1
;
554 PCRecord
*r2
= *(PCRecord
**)p2
;
555 if (r1
->count
< r2
->count
)
557 else if (r1
->count
== r2
->count
)
563 static void kqemu_record_flush(void)
565 PCRecord
*r
, *r_next
;
568 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
569 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
573 pc_rec_hash
[h
] = NULL
;
578 void kqemu_record_dump(void)
585 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
588 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
589 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
594 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
596 f
= fopen("/tmp/kqemu.stats", "w");
598 perror("/tmp/kqemu.stats");
601 fprintf(f
, "total: %lld\n", total
);
603 for(i
= 0; i
< nb_pc_records
; i
++) {
606 fprintf(f
, "%08lx: %lld %0.2f%% %0.2f%%\n",
609 (double)r
->count
/ (double)total
* 100.0,
610 (double)sum
/ (double)total
* 100.0);
615 kqemu_record_flush();
619 int kqemu_cpu_exec(CPUState
*env
)
621 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
623 #ifdef CONFIG_PROFILER
631 #ifdef CONFIG_PROFILER
632 ti
= profile_getclock();
635 if (loglevel
& CPU_LOG_INT
) {
636 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
637 cpu_dump_state(env
, logfile
, fprintf
, 0);
640 memcpy(kenv
->regs
, env
->regs
, sizeof(kenv
->regs
));
641 kenv
->eip
= env
->eip
;
642 kenv
->eflags
= env
->eflags
;
643 memcpy(&kenv
->segs
, &env
->segs
, sizeof(env
->segs
));
644 memcpy(&kenv
->ldt
, &env
->ldt
, sizeof(env
->ldt
));
645 memcpy(&kenv
->tr
, &env
->tr
, sizeof(env
->tr
));
646 memcpy(&kenv
->gdt
, &env
->gdt
, sizeof(env
->gdt
));
647 memcpy(&kenv
->idt
, &env
->idt
, sizeof(env
->idt
));
648 kenv
->cr0
= env
->cr
[0];
649 kenv
->cr2
= env
->cr
[2];
650 kenv
->cr3
= env
->cr
[3];
651 kenv
->cr4
= env
->cr
[4];
652 kenv
->a20_mask
= env
->a20_mask
;
653 #if KQEMU_VERSION >= 0x010100
654 kenv
->efer
= env
->efer
;
656 #if KQEMU_VERSION >= 0x010300
657 kenv
->tsc_offset
= 0;
658 kenv
->star
= env
->star
;
659 kenv
->sysenter_cs
= env
->sysenter_cs
;
660 kenv
->sysenter_esp
= env
->sysenter_esp
;
661 kenv
->sysenter_eip
= env
->sysenter_eip
;
663 kenv
->lstar
= env
->lstar
;
664 kenv
->cstar
= env
->cstar
;
665 kenv
->fmask
= env
->fmask
;
666 kenv
->kernelgsbase
= env
->kernelgsbase
;
669 if (env
->dr
[7] & 0xff) {
670 kenv
->dr7
= env
->dr
[7];
671 kenv
->dr0
= env
->dr
[0];
672 kenv
->dr1
= env
->dr
[1];
673 kenv
->dr2
= env
->dr
[2];
674 kenv
->dr3
= env
->dr
[3];
678 kenv
->dr6
= env
->dr
[6];
679 cpl
= (env
->hflags
& HF_CPL_MASK
);
681 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
682 #if KQEMU_VERSION >= 0x010200
683 kenv
->user_only
= (env
->kqemu_enabled
== 1);
684 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
686 nb_ram_pages_to_update
= 0;
688 #if KQEMU_VERSION >= 0x010300
689 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
691 kqemu_reset_modified_ram_pages();
693 if (env
->cpuid_features
& CPUID_FXSR
)
694 restore_native_fp_fxrstor(env
);
696 restore_native_fp_frstor(env
);
699 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
700 kenv
, sizeof(struct kqemu_cpu_state
),
701 kenv
, sizeof(struct kqemu_cpu_state
),
708 #if KQEMU_VERSION >= 0x010100
709 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
712 ret
= ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
715 if (env
->cpuid_features
& CPUID_FXSR
)
716 save_native_fp_fxsave(env
);
718 save_native_fp_fsave(env
);
720 memcpy(env
->regs
, kenv
->regs
, sizeof(env
->regs
));
721 env
->eip
= kenv
->eip
;
722 env
->eflags
= kenv
->eflags
;
723 memcpy(env
->segs
, kenv
->segs
, sizeof(env
->segs
));
724 cpu_x86_set_cpl(env
, kenv
->cpl
);
725 memcpy(&env
->ldt
, &kenv
->ldt
, sizeof(env
->ldt
));
727 /* no need to restore that */
728 memcpy(env
->tr
, kenv
->tr
, sizeof(env
->tr
));
729 memcpy(env
->gdt
, kenv
->gdt
, sizeof(env
->gdt
));
730 memcpy(env
->idt
, kenv
->idt
, sizeof(env
->idt
));
731 env
->a20_mask
= kenv
->a20_mask
;
733 env
->cr
[0] = kenv
->cr0
;
734 env
->cr
[4] = kenv
->cr4
;
735 env
->cr
[3] = kenv
->cr3
;
736 env
->cr
[2] = kenv
->cr2
;
737 env
->dr
[6] = kenv
->dr6
;
738 #if KQEMU_VERSION >= 0x010300
740 env
->kernelgsbase
= kenv
->kernelgsbase
;
744 /* flush pages as indicated by kqemu */
745 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
748 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
749 tlb_flush_page(env
, pages_to_flush
[i
]);
752 nb_pages_to_flush
= 0;
754 #ifdef CONFIG_PROFILER
755 kqemu_time
+= profile_getclock() - ti
;
759 #if KQEMU_VERSION >= 0x010200
760 if (kenv
->nb_ram_pages_to_update
> 0) {
761 cpu_tlb_update_dirty(env
);
765 #if KQEMU_VERSION >= 0x010300
766 if (kenv
->nb_modified_ram_pages
> 0) {
767 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
769 addr
= modified_ram_pages
[i
];
770 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
775 /* restore the hidden flags */
777 unsigned int new_hflags
;
779 if ((env
->hflags
& HF_LMA_MASK
) &&
780 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
782 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
786 /* legacy / compatibility case */
787 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
788 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
789 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
790 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
791 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
792 (env
->eflags
& VM_MASK
) ||
793 !(env
->hflags
& HF_CS32_MASK
)) {
794 /* XXX: try to avoid this test. The problem comes from the
795 fact that is real mode or vm86 mode we only modify the
796 'base' and 'selector' fields of the segment cache to go
797 faster. A solution may be to force addseg to one in
799 new_hflags
|= HF_ADDSEG_MASK
;
801 new_hflags
|= ((env
->segs
[R_DS
].base
|
802 env
->segs
[R_ES
].base
|
803 env
->segs
[R_SS
].base
) != 0) <<
807 env
->hflags
= (env
->hflags
&
808 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
811 /* update FPU flags */
812 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
813 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
814 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
815 env
->hflags
|= HF_OSFXSR_MASK
;
817 env
->hflags
&= ~HF_OSFXSR_MASK
;
820 if (loglevel
& CPU_LOG_INT
) {
821 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
824 if (ret
== KQEMU_RET_SYSCALL
) {
825 /* syscall instruction */
826 return do_syscall(env
, kenv
);
828 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
829 env
->exception_index
= ret
& 0xff;
831 env
->exception_is_int
= 1;
832 env
->exception_next_eip
= kenv
->next_eip
;
833 #ifdef CONFIG_PROFILER
834 kqemu_ret_int_count
++;
837 if (loglevel
& CPU_LOG_INT
) {
838 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
839 env
->exception_index
);
840 cpu_dump_state(env
, logfile
, fprintf
, 0);
844 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
845 env
->exception_index
= ret
& 0xff;
846 env
->error_code
= kenv
->error_code
;
847 env
->exception_is_int
= 0;
848 env
->exception_next_eip
= 0;
849 #ifdef CONFIG_PROFILER
850 kqemu_ret_excp_count
++;
853 if (loglevel
& CPU_LOG_INT
) {
854 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
855 env
->exception_index
, env
->error_code
);
856 cpu_dump_state(env
, logfile
, fprintf
, 0);
860 } else if (ret
== KQEMU_RET_INTR
) {
861 #ifdef CONFIG_PROFILER
862 kqemu_ret_intr_count
++;
865 if (loglevel
& CPU_LOG_INT
) {
866 cpu_dump_state(env
, logfile
, fprintf
, 0);
870 } else if (ret
== KQEMU_RET_SOFTMMU
) {
871 #ifdef CONFIG_PROFILER
873 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
878 if (loglevel
& CPU_LOG_INT
) {
879 cpu_dump_state(env
, logfile
, fprintf
, 0);
884 cpu_dump_state(env
, stderr
, fprintf
, 0);
885 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
891 void kqemu_cpu_interrupt(CPUState
*env
)
893 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
894 /* cancelling the I/O request causes KQEMU to finish executing the
895 current block and successfully returning. */