4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <sys/types.h>
27 #include <sys/ioctl.h>
30 #include <sys/modctl.h>
52 /* compatibility stuff */
53 #ifndef KQEMU_RET_SYSCALL
54 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
56 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
57 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
58 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
60 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
61 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
65 #define KQEMU_DEVICE "\\\\.\\kqemu"
67 #define KQEMU_DEVICE "/dev/kqemu"
71 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
72 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
73 #define kqemu_closefd(x) CloseHandle(x)
75 #define KQEMU_INVALID_FD -1
76 int kqemu_fd
= KQEMU_INVALID_FD
;
77 #define kqemu_closefd(x) close(x)
84 int kqemu_allowed
= 1;
85 unsigned long *pages_to_flush
;
86 unsigned int nb_pages_to_flush
;
87 unsigned long *ram_pages_to_update
;
88 unsigned int nb_ram_pages_to_update
;
89 unsigned long *modified_ram_pages
;
90 unsigned int nb_modified_ram_pages
;
91 uint8_t *modified_ram_pages_table
;
92 extern uint32_t **l1_phys_map
;
94 #define cpuid(index, eax, ebx, ecx, edx) \
95 asm volatile ("cpuid" \
96 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
100 static int is_cpuid_supported(void)
105 static int is_cpuid_supported(void)
108 asm volatile ("pushf\n"
111 "xorl $0x00200000, %0\n"
116 : "=a" (v0
), "=d" (v1
)
123 static void kqemu_update_cpuid(CPUState
*env
)
125 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
126 uint32_t eax
, ebx
, ecx
, edx
;
128 /* the following features are kept identical on the host and
129 target cpus because they are important for user code. Strictly
130 speaking, only SSE really matters because the OS must support
131 it if the user code uses it. */
132 critical_features_mask
=
133 CPUID_CMOV
| CPUID_CX8
|
134 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
135 CPUID_SSE2
| CPUID_SEP
;
136 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
137 if (!is_cpuid_supported()) {
141 cpuid(1, eax
, ebx
, ecx
, edx
);
146 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
147 compatibility mode, so in order to have the best performances
148 it is better not to use it */
149 features
&= ~CPUID_SEP
;
151 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
152 (features
& critical_features_mask
);
153 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
154 (ext_features
& ext_features_mask
);
155 /* XXX: we could update more of the target CPUID state so that the
156 non accelerated code sees exactly the same CPU features as the
160 int kqemu_init(CPUState
*env
)
162 struct kqemu_init init
;
172 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
173 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
174 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
177 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
179 if (kqemu_fd
== KQEMU_INVALID_FD
) {
180 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE
);
185 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
186 &version
, sizeof(version
), &temp
, NULL
);
188 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
190 if (version
!= KQEMU_VERSION
) {
191 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
192 version
, KQEMU_VERSION
);
196 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
197 sizeof(unsigned long));
201 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
202 sizeof(unsigned long));
203 if (!ram_pages_to_update
)
206 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
207 sizeof(unsigned long));
208 if (!modified_ram_pages
)
210 modified_ram_pages_table
= qemu_mallocz(phys_ram_size
>> TARGET_PAGE_BITS
);
211 if (!modified_ram_pages_table
)
214 init
.ram_base
= phys_ram_base
;
215 init
.ram_size
= phys_ram_size
;
216 init
.ram_dirty
= phys_ram_dirty
;
217 init
.phys_to_ram_map
= l1_phys_map
;
218 init
.pages_to_flush
= pages_to_flush
;
219 #if KQEMU_VERSION >= 0x010200
220 init
.ram_pages_to_update
= ram_pages_to_update
;
222 #if KQEMU_VERSION >= 0x010300
223 init
.modified_ram_pages
= modified_ram_pages
;
226 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &init
, sizeof(init
),
227 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
229 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &init
);
232 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
234 kqemu_closefd(kqemu_fd
);
235 kqemu_fd
= KQEMU_INVALID_FD
;
238 kqemu_update_cpuid(env
);
239 env
->kqemu_enabled
= kqemu_allowed
;
240 nb_pages_to_flush
= 0;
241 nb_ram_pages_to_update
= 0;
245 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
248 if (loglevel
& CPU_LOG_INT
) {
249 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
252 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
253 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
255 pages_to_flush
[nb_pages_to_flush
++] = addr
;
258 void kqemu_flush(CPUState
*env
, int global
)
261 if (loglevel
& CPU_LOG_INT
) {
262 fprintf(logfile
, "kqemu_flush:\n");
265 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
268 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
271 if (loglevel
& CPU_LOG_INT
) {
272 fprintf(logfile
, "kqemu_set_notdirty: addr=%08lx\n", ram_addr
);
275 /* we only track transitions to dirty state */
276 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
278 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
279 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
281 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
284 static void kqemu_reset_modified_ram_pages(void)
287 unsigned long page_index
;
289 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
290 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
291 modified_ram_pages_table
[page_index
] = 0;
293 nb_modified_ram_pages
= 0;
296 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
298 unsigned long page_index
;
304 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
305 if (!modified_ram_pages_table
[page_index
]) {
307 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
309 modified_ram_pages_table
[page_index
] = 1;
310 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
311 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
314 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
315 &nb_modified_ram_pages
,
316 sizeof(nb_modified_ram_pages
),
317 NULL
, 0, &temp
, NULL
);
319 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
320 &nb_modified_ram_pages
);
322 kqemu_reset_modified_ram_pages();
339 uint8_t fpregs1
[8 * 10];
355 uint8_t fpregs1
[8 * 16];
356 uint8_t xmm_regs
[16 * 16];
360 static struct fpxstate fpx1
__attribute__((aligned(16)));
362 static void restore_native_fp_frstor(CPUState
*env
)
365 struct fpstate fp1
, *fp
= &fp1
;
367 fp
->fpuc
= env
->fpuc
;
368 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
370 for (i
=7; i
>=0; i
--) {
372 if (env
->fptags
[i
]) {
375 /* the FPU automatically computes it */
380 for(i
= 0;i
< 8; i
++) {
381 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
384 asm volatile ("frstor %0" : "=m" (*fp
));
387 static void save_native_fp_fsave(CPUState
*env
)
391 struct fpstate fp1
, *fp
= &fp1
;
393 asm volatile ("fsave %0" : : "m" (*fp
));
394 env
->fpuc
= fp
->fpuc
;
395 env
->fpstt
= (fp
->fpus
>> 11) & 7;
396 env
->fpus
= fp
->fpus
& ~0x3800;
398 for(i
= 0;i
< 8; i
++) {
399 env
->fptags
[i
] = ((fptag
& 3) == 3);
403 for(i
= 0;i
< 8; i
++) {
404 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
407 /* we must restore the default rounding state */
408 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
409 asm volatile("fldcw %0" : : "m" (fpuc
));
412 static void restore_native_fp_fxrstor(CPUState
*env
)
414 struct fpxstate
*fp
= &fpx1
;
417 fp
->fpuc
= env
->fpuc
;
418 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
420 for(i
= 0; i
< 8; i
++)
421 fptag
|= (env
->fptags
[i
] << i
);
422 fp
->fptag
= fptag
^ 0xff;
425 for(i
= 0;i
< 8; i
++) {
426 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
429 if (env
->cpuid_features
& CPUID_SSE
) {
430 fp
->mxcsr
= env
->mxcsr
;
431 /* XXX: check if DAZ is not available */
432 fp
->mxcsr_mask
= 0xffff;
433 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
435 asm volatile ("fxrstor %0" : "=m" (*fp
));
438 static void save_native_fp_fxsave(CPUState
*env
)
440 struct fpxstate
*fp
= &fpx1
;
444 asm volatile ("fxsave %0" : : "m" (*fp
));
445 env
->fpuc
= fp
->fpuc
;
446 env
->fpstt
= (fp
->fpus
>> 11) & 7;
447 env
->fpus
= fp
->fpus
& ~0x3800;
448 fptag
= fp
->fptag
^ 0xff;
449 for(i
= 0;i
< 8; i
++) {
450 env
->fptags
[i
] = (fptag
>> i
) & 1;
453 for(i
= 0;i
< 8; i
++) {
454 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
457 if (env
->cpuid_features
& CPUID_SSE
) {
458 env
->mxcsr
= fp
->mxcsr
;
459 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
462 /* we must restore the default rounding state */
463 asm volatile ("fninit");
464 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
465 asm volatile("fldcw %0" : : "m" (fpuc
));
468 static int do_syscall(CPUState
*env
,
469 struct kqemu_cpu_state
*kenv
)
473 selector
= (env
->star
>> 32) & 0xffff;
475 if (env
->hflags
& HF_LMA_MASK
) {
478 env
->regs
[R_ECX
] = kenv
->next_eip
;
479 env
->regs
[11] = env
->eflags
;
481 code64
= env
->hflags
& HF_CS64_MASK
;
483 cpu_x86_set_cpl(env
, 0);
484 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
486 DESC_G_MASK
| DESC_P_MASK
|
488 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
489 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
491 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
493 DESC_W_MASK
| DESC_A_MASK
);
494 env
->eflags
&= ~env
->fmask
;
496 env
->eip
= env
->lstar
;
498 env
->eip
= env
->cstar
;
502 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
504 cpu_x86_set_cpl(env
, 0);
505 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
507 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
509 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
510 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
512 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
514 DESC_W_MASK
| DESC_A_MASK
);
515 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
516 env
->eip
= (uint32_t)env
->star
;
521 #ifdef CONFIG_PROFILER
523 #define PC_REC_SIZE 1
524 #define PC_REC_HASH_BITS 16
525 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
527 typedef struct PCRecord
{
530 struct PCRecord
*next
;
533 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
534 static int nb_pc_records
;
536 static void kqemu_record_pc(unsigned long pc
)
541 h
= pc
/ PC_REC_SIZE
;
542 h
= h
^ (h
>> PC_REC_HASH_BITS
);
543 h
&= (PC_REC_HASH_SIZE
- 1);
544 pr
= &pc_rec_hash
[h
];
555 r
= malloc(sizeof(PCRecord
));
563 static int pc_rec_cmp(const void *p1
, const void *p2
)
565 PCRecord
*r1
= *(PCRecord
**)p1
;
566 PCRecord
*r2
= *(PCRecord
**)p2
;
567 if (r1
->count
< r2
->count
)
569 else if (r1
->count
== r2
->count
)
575 static void kqemu_record_flush(void)
577 PCRecord
*r
, *r_next
;
580 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
581 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
585 pc_rec_hash
[h
] = NULL
;
590 void kqemu_record_dump(void)
597 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
600 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
601 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
606 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
608 f
= fopen("/tmp/kqemu.stats", "w");
610 perror("/tmp/kqemu.stats");
613 fprintf(f
, "total: %" PRId64
"\n", total
);
615 for(i
= 0; i
< nb_pc_records
; i
++) {
618 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
621 (double)r
->count
/ (double)total
* 100.0,
622 (double)sum
/ (double)total
* 100.0);
627 kqemu_record_flush();
631 int kqemu_cpu_exec(CPUState
*env
)
633 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
635 #ifdef CONFIG_PROFILER
643 #ifdef CONFIG_PROFILER
644 ti
= profile_getclock();
647 if (loglevel
& CPU_LOG_INT
) {
648 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
649 cpu_dump_state(env
, logfile
, fprintf
, 0);
652 memcpy(kenv
->regs
, env
->regs
, sizeof(kenv
->regs
));
653 kenv
->eip
= env
->eip
;
654 kenv
->eflags
= env
->eflags
;
655 memcpy(&kenv
->segs
, &env
->segs
, sizeof(env
->segs
));
656 memcpy(&kenv
->ldt
, &env
->ldt
, sizeof(env
->ldt
));
657 memcpy(&kenv
->tr
, &env
->tr
, sizeof(env
->tr
));
658 memcpy(&kenv
->gdt
, &env
->gdt
, sizeof(env
->gdt
));
659 memcpy(&kenv
->idt
, &env
->idt
, sizeof(env
->idt
));
660 kenv
->cr0
= env
->cr
[0];
661 kenv
->cr2
= env
->cr
[2];
662 kenv
->cr3
= env
->cr
[3];
663 kenv
->cr4
= env
->cr
[4];
664 kenv
->a20_mask
= env
->a20_mask
;
665 #if KQEMU_VERSION >= 0x010100
666 kenv
->efer
= env
->efer
;
668 #if KQEMU_VERSION >= 0x010300
669 kenv
->tsc_offset
= 0;
670 kenv
->star
= env
->star
;
671 kenv
->sysenter_cs
= env
->sysenter_cs
;
672 kenv
->sysenter_esp
= env
->sysenter_esp
;
673 kenv
->sysenter_eip
= env
->sysenter_eip
;
675 kenv
->lstar
= env
->lstar
;
676 kenv
->cstar
= env
->cstar
;
677 kenv
->fmask
= env
->fmask
;
678 kenv
->kernelgsbase
= env
->kernelgsbase
;
681 if (env
->dr
[7] & 0xff) {
682 kenv
->dr7
= env
->dr
[7];
683 kenv
->dr0
= env
->dr
[0];
684 kenv
->dr1
= env
->dr
[1];
685 kenv
->dr2
= env
->dr
[2];
686 kenv
->dr3
= env
->dr
[3];
690 kenv
->dr6
= env
->dr
[6];
691 cpl
= (env
->hflags
& HF_CPL_MASK
);
693 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
694 #if KQEMU_VERSION >= 0x010200
695 kenv
->user_only
= (env
->kqemu_enabled
== 1);
696 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
698 nb_ram_pages_to_update
= 0;
700 #if KQEMU_VERSION >= 0x010300
701 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
703 kqemu_reset_modified_ram_pages();
705 if (env
->cpuid_features
& CPUID_FXSR
)
706 restore_native_fp_fxrstor(env
);
708 restore_native_fp_frstor(env
);
711 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
712 kenv
, sizeof(struct kqemu_cpu_state
),
713 kenv
, sizeof(struct kqemu_cpu_state
),
720 #if KQEMU_VERSION >= 0x010100
721 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
724 ret
= ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
727 if (env
->cpuid_features
& CPUID_FXSR
)
728 save_native_fp_fxsave(env
);
730 save_native_fp_fsave(env
);
732 memcpy(env
->regs
, kenv
->regs
, sizeof(env
->regs
));
733 env
->eip
= kenv
->eip
;
734 env
->eflags
= kenv
->eflags
;
735 memcpy(env
->segs
, kenv
->segs
, sizeof(env
->segs
));
736 cpu_x86_set_cpl(env
, kenv
->cpl
);
737 memcpy(&env
->ldt
, &kenv
->ldt
, sizeof(env
->ldt
));
739 /* no need to restore that */
740 memcpy(env
->tr
, kenv
->tr
, sizeof(env
->tr
));
741 memcpy(env
->gdt
, kenv
->gdt
, sizeof(env
->gdt
));
742 memcpy(env
->idt
, kenv
->idt
, sizeof(env
->idt
));
743 env
->a20_mask
= kenv
->a20_mask
;
745 env
->cr
[0] = kenv
->cr0
;
746 env
->cr
[4] = kenv
->cr4
;
747 env
->cr
[3] = kenv
->cr3
;
748 env
->cr
[2] = kenv
->cr2
;
749 env
->dr
[6] = kenv
->dr6
;
750 #if KQEMU_VERSION >= 0x010300
752 env
->kernelgsbase
= kenv
->kernelgsbase
;
756 /* flush pages as indicated by kqemu */
757 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
760 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
761 tlb_flush_page(env
, pages_to_flush
[i
]);
764 nb_pages_to_flush
= 0;
766 #ifdef CONFIG_PROFILER
767 kqemu_time
+= profile_getclock() - ti
;
771 #if KQEMU_VERSION >= 0x010200
772 if (kenv
->nb_ram_pages_to_update
> 0) {
773 cpu_tlb_update_dirty(env
);
777 #if KQEMU_VERSION >= 0x010300
778 if (kenv
->nb_modified_ram_pages
> 0) {
779 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
781 addr
= modified_ram_pages
[i
];
782 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
787 /* restore the hidden flags */
789 unsigned int new_hflags
;
791 if ((env
->hflags
& HF_LMA_MASK
) &&
792 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
794 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
798 /* legacy / compatibility case */
799 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
800 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
801 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
802 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
803 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
804 (env
->eflags
& VM_MASK
) ||
805 !(env
->hflags
& HF_CS32_MASK
)) {
806 /* XXX: try to avoid this test. The problem comes from the
807 fact that is real mode or vm86 mode we only modify the
808 'base' and 'selector' fields of the segment cache to go
809 faster. A solution may be to force addseg to one in
811 new_hflags
|= HF_ADDSEG_MASK
;
813 new_hflags
|= ((env
->segs
[R_DS
].base
|
814 env
->segs
[R_ES
].base
|
815 env
->segs
[R_SS
].base
) != 0) <<
819 env
->hflags
= (env
->hflags
&
820 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
823 /* update FPU flags */
824 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
825 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
826 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
827 env
->hflags
|= HF_OSFXSR_MASK
;
829 env
->hflags
&= ~HF_OSFXSR_MASK
;
832 if (loglevel
& CPU_LOG_INT
) {
833 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
836 if (ret
== KQEMU_RET_SYSCALL
) {
837 /* syscall instruction */
838 return do_syscall(env
, kenv
);
840 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
841 env
->exception_index
= ret
& 0xff;
843 env
->exception_is_int
= 1;
844 env
->exception_next_eip
= kenv
->next_eip
;
845 #ifdef CONFIG_PROFILER
846 kqemu_ret_int_count
++;
849 if (loglevel
& CPU_LOG_INT
) {
850 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
851 env
->exception_index
);
852 cpu_dump_state(env
, logfile
, fprintf
, 0);
856 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
857 env
->exception_index
= ret
& 0xff;
858 env
->error_code
= kenv
->error_code
;
859 env
->exception_is_int
= 0;
860 env
->exception_next_eip
= 0;
861 #ifdef CONFIG_PROFILER
862 kqemu_ret_excp_count
++;
865 if (loglevel
& CPU_LOG_INT
) {
866 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
867 env
->exception_index
, env
->error_code
);
868 cpu_dump_state(env
, logfile
, fprintf
, 0);
872 } else if (ret
== KQEMU_RET_INTR
) {
873 #ifdef CONFIG_PROFILER
874 kqemu_ret_intr_count
++;
877 if (loglevel
& CPU_LOG_INT
) {
878 cpu_dump_state(env
, logfile
, fprintf
, 0);
882 } else if (ret
== KQEMU_RET_SOFTMMU
) {
883 #ifdef CONFIG_PROFILER
885 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
890 if (loglevel
& CPU_LOG_INT
) {
891 cpu_dump_state(env
, logfile
, fprintf
, 0);
896 cpu_dump_state(env
, stderr
, fprintf
, 0);
897 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
903 void kqemu_cpu_interrupt(CPUState
*env
)
905 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
906 /* cancelling the I/O request causes KQEMU to finish executing the
907 current block and successfully returning. */