4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <sys/types.h>
27 #include <sys/ioctl.h>
49 /* compatibility stuff */
50 #ifndef KQEMU_RET_SYSCALL
51 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
53 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
54 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
55 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
57 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
58 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
62 #define KQEMU_DEVICE "\\\\.\\kqemu"
64 #define KQEMU_DEVICE "/dev/kqemu"
68 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
69 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
70 #define kqemu_closefd(x) CloseHandle(x)
72 #define KQEMU_INVALID_FD -1
73 int kqemu_fd
= KQEMU_INVALID_FD
;
74 #define kqemu_closefd(x) close(x)
81 int kqemu_allowed
= 1;
82 unsigned long *pages_to_flush
;
83 unsigned int nb_pages_to_flush
;
84 unsigned long *ram_pages_to_update
;
85 unsigned int nb_ram_pages_to_update
;
86 unsigned long *modified_ram_pages
;
87 unsigned int nb_modified_ram_pages
;
88 uint8_t *modified_ram_pages_table
;
89 extern uint32_t **l1_phys_map
;
91 #define cpuid(index, eax, ebx, ecx, edx) \
92 asm volatile ("cpuid" \
93 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
97 static int is_cpuid_supported(void)
102 static int is_cpuid_supported(void)
105 asm volatile ("pushf\n"
108 "xorl $0x00200000, %0\n"
113 : "=a" (v0
), "=d" (v1
)
120 static void kqemu_update_cpuid(CPUState
*env
)
122 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
123 uint32_t eax
, ebx
, ecx
, edx
;
125 /* the following features are kept identical on the host and
126 target cpus because they are important for user code. Strictly
127 speaking, only SSE really matters because the OS must support
128 it if the user code uses it. */
129 critical_features_mask
=
130 CPUID_CMOV
| CPUID_CX8
|
131 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
132 CPUID_SSE2
| CPUID_SEP
;
133 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
134 if (!is_cpuid_supported()) {
138 cpuid(1, eax
, ebx
, ecx
, edx
);
143 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
144 compatibility mode, so in order to have the best performances
145 it is better not to use it */
146 features
&= ~CPUID_SEP
;
148 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
149 (features
& critical_features_mask
);
150 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
151 (ext_features
& ext_features_mask
);
152 /* XXX: we could update more of the target CPUID state so that the
153 non accelerated code sees exactly the same CPU features as the
157 int kqemu_init(CPUState
*env
)
159 struct kqemu_init init
;
169 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
170 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
171 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
174 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
176 if (kqemu_fd
== KQEMU_INVALID_FD
) {
177 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE
);
182 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
183 &version
, sizeof(version
), &temp
, NULL
);
185 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
187 if (version
!= KQEMU_VERSION
) {
188 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
189 version
, KQEMU_VERSION
);
193 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
194 sizeof(unsigned long));
198 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
199 sizeof(unsigned long));
200 if (!ram_pages_to_update
)
203 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
204 sizeof(unsigned long));
205 if (!modified_ram_pages
)
207 modified_ram_pages_table
= qemu_mallocz(phys_ram_size
>> TARGET_PAGE_BITS
);
208 if (!modified_ram_pages_table
)
211 init
.ram_base
= phys_ram_base
;
212 init
.ram_size
= phys_ram_size
;
213 init
.ram_dirty
= phys_ram_dirty
;
214 init
.phys_to_ram_map
= l1_phys_map
;
215 init
.pages_to_flush
= pages_to_flush
;
216 #if KQEMU_VERSION >= 0x010200
217 init
.ram_pages_to_update
= ram_pages_to_update
;
219 #if KQEMU_VERSION >= 0x010300
220 init
.modified_ram_pages
= modified_ram_pages
;
223 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &init
, sizeof(init
),
224 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
226 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &init
);
229 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
231 kqemu_closefd(kqemu_fd
);
232 kqemu_fd
= KQEMU_INVALID_FD
;
235 kqemu_update_cpuid(env
);
236 env
->kqemu_enabled
= kqemu_allowed
;
237 nb_pages_to_flush
= 0;
238 nb_ram_pages_to_update
= 0;
242 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
245 if (loglevel
& CPU_LOG_INT
) {
246 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
249 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
250 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
252 pages_to_flush
[nb_pages_to_flush
++] = addr
;
255 void kqemu_flush(CPUState
*env
, int global
)
258 if (loglevel
& CPU_LOG_INT
) {
259 fprintf(logfile
, "kqemu_flush:\n");
262 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
265 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
268 if (loglevel
& CPU_LOG_INT
) {
269 fprintf(logfile
, "kqemu_set_notdirty: addr=%08lx\n", ram_addr
);
272 /* we only track transitions to dirty state */
273 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
275 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
276 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
278 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
281 static void kqemu_reset_modified_ram_pages(void)
284 unsigned long page_index
;
286 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
287 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
288 modified_ram_pages_table
[page_index
] = 0;
290 nb_modified_ram_pages
= 0;
293 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
295 unsigned long page_index
;
301 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
302 if (!modified_ram_pages_table
[page_index
]) {
304 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
306 modified_ram_pages_table
[page_index
] = 1;
307 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
308 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
311 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
312 &nb_modified_ram_pages
,
313 sizeof(nb_modified_ram_pages
),
314 NULL
, 0, &temp
, NULL
);
316 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
317 &nb_modified_ram_pages
);
319 kqemu_reset_modified_ram_pages();
336 uint8_t fpregs1
[8 * 10];
352 uint8_t fpregs1
[8 * 16];
353 uint8_t xmm_regs
[16 * 16];
357 static struct fpxstate fpx1
__attribute__((aligned(16)));
359 static void restore_native_fp_frstor(CPUState
*env
)
362 struct fpstate fp1
, *fp
= &fp1
;
364 fp
->fpuc
= env
->fpuc
;
365 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
367 for (i
=7; i
>=0; i
--) {
369 if (env
->fptags
[i
]) {
372 /* the FPU automatically computes it */
377 for(i
= 0;i
< 8; i
++) {
378 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
381 asm volatile ("frstor %0" : "=m" (*fp
));
384 static void save_native_fp_fsave(CPUState
*env
)
388 struct fpstate fp1
, *fp
= &fp1
;
390 asm volatile ("fsave %0" : : "m" (*fp
));
391 env
->fpuc
= fp
->fpuc
;
392 env
->fpstt
= (fp
->fpus
>> 11) & 7;
393 env
->fpus
= fp
->fpus
& ~0x3800;
395 for(i
= 0;i
< 8; i
++) {
396 env
->fptags
[i
] = ((fptag
& 3) == 3);
400 for(i
= 0;i
< 8; i
++) {
401 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
404 /* we must restore the default rounding state */
405 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
406 asm volatile("fldcw %0" : : "m" (fpuc
));
409 static void restore_native_fp_fxrstor(CPUState
*env
)
411 struct fpxstate
*fp
= &fpx1
;
414 fp
->fpuc
= env
->fpuc
;
415 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
417 for(i
= 0; i
< 8; i
++)
418 fptag
|= (env
->fptags
[i
] << i
);
419 fp
->fptag
= fptag
^ 0xff;
422 for(i
= 0;i
< 8; i
++) {
423 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
426 if (env
->cpuid_features
& CPUID_SSE
) {
427 fp
->mxcsr
= env
->mxcsr
;
428 /* XXX: check if DAZ is not available */
429 fp
->mxcsr_mask
= 0xffff;
430 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
432 asm volatile ("fxrstor %0" : "=m" (*fp
));
435 static void save_native_fp_fxsave(CPUState
*env
)
437 struct fpxstate
*fp
= &fpx1
;
441 asm volatile ("fxsave %0" : : "m" (*fp
));
442 env
->fpuc
= fp
->fpuc
;
443 env
->fpstt
= (fp
->fpus
>> 11) & 7;
444 env
->fpus
= fp
->fpus
& ~0x3800;
445 fptag
= fp
->fptag
^ 0xff;
446 for(i
= 0;i
< 8; i
++) {
447 env
->fptags
[i
] = (fptag
>> i
) & 1;
450 for(i
= 0;i
< 8; i
++) {
451 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
454 if (env
->cpuid_features
& CPUID_SSE
) {
455 env
->mxcsr
= fp
->mxcsr
;
456 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
459 /* we must restore the default rounding state */
460 asm volatile ("fninit");
461 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
462 asm volatile("fldcw %0" : : "m" (fpuc
));
465 static int do_syscall(CPUState
*env
,
466 struct kqemu_cpu_state
*kenv
)
470 selector
= (env
->star
>> 32) & 0xffff;
472 if (env
->hflags
& HF_LMA_MASK
) {
475 env
->regs
[R_ECX
] = kenv
->next_eip
;
476 env
->regs
[11] = env
->eflags
;
478 code64
= env
->hflags
& HF_CS64_MASK
;
480 cpu_x86_set_cpl(env
, 0);
481 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
483 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
485 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
486 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
488 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
490 DESC_W_MASK
| DESC_A_MASK
);
491 env
->eflags
&= ~env
->fmask
;
493 env
->eip
= env
->lstar
;
495 env
->eip
= env
->cstar
;
499 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
501 cpu_x86_set_cpl(env
, 0);
502 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
504 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
506 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
507 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
509 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
511 DESC_W_MASK
| DESC_A_MASK
);
512 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
513 env
->eip
= (uint32_t)env
->star
;
518 #ifdef CONFIG_PROFILER
520 #define PC_REC_SIZE 1
521 #define PC_REC_HASH_BITS 16
522 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
524 typedef struct PCRecord
{
527 struct PCRecord
*next
;
530 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
531 static int nb_pc_records
;
533 static void kqemu_record_pc(unsigned long pc
)
538 h
= pc
/ PC_REC_SIZE
;
539 h
= h
^ (h
>> PC_REC_HASH_BITS
);
540 h
&= (PC_REC_HASH_SIZE
- 1);
541 pr
= &pc_rec_hash
[h
];
552 r
= malloc(sizeof(PCRecord
));
560 static int pc_rec_cmp(const void *p1
, const void *p2
)
562 PCRecord
*r1
= *(PCRecord
**)p1
;
563 PCRecord
*r2
= *(PCRecord
**)p2
;
564 if (r1
->count
< r2
->count
)
566 else if (r1
->count
== r2
->count
)
572 static void kqemu_record_flush(void)
574 PCRecord
*r
, *r_next
;
577 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
578 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
582 pc_rec_hash
[h
] = NULL
;
587 void kqemu_record_dump(void)
594 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
597 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
598 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
603 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
605 f
= fopen("/tmp/kqemu.stats", "w");
607 perror("/tmp/kqemu.stats");
610 fprintf(f
, "total: %" PRId64
"\n", total
);
612 for(i
= 0; i
< nb_pc_records
; i
++) {
615 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
618 (double)r
->count
/ (double)total
* 100.0,
619 (double)sum
/ (double)total
* 100.0);
624 kqemu_record_flush();
628 int kqemu_cpu_exec(CPUState
*env
)
630 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
632 #ifdef CONFIG_PROFILER
640 #ifdef CONFIG_PROFILER
641 ti
= profile_getclock();
644 if (loglevel
& CPU_LOG_INT
) {
645 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
646 cpu_dump_state(env
, logfile
, fprintf
, 0);
649 memcpy(kenv
->regs
, env
->regs
, sizeof(kenv
->regs
));
650 kenv
->eip
= env
->eip
;
651 kenv
->eflags
= env
->eflags
;
652 memcpy(&kenv
->segs
, &env
->segs
, sizeof(env
->segs
));
653 memcpy(&kenv
->ldt
, &env
->ldt
, sizeof(env
->ldt
));
654 memcpy(&kenv
->tr
, &env
->tr
, sizeof(env
->tr
));
655 memcpy(&kenv
->gdt
, &env
->gdt
, sizeof(env
->gdt
));
656 memcpy(&kenv
->idt
, &env
->idt
, sizeof(env
->idt
));
657 kenv
->cr0
= env
->cr
[0];
658 kenv
->cr2
= env
->cr
[2];
659 kenv
->cr3
= env
->cr
[3];
660 kenv
->cr4
= env
->cr
[4];
661 kenv
->a20_mask
= env
->a20_mask
;
662 #if KQEMU_VERSION >= 0x010100
663 kenv
->efer
= env
->efer
;
665 #if KQEMU_VERSION >= 0x010300
666 kenv
->tsc_offset
= 0;
667 kenv
->star
= env
->star
;
668 kenv
->sysenter_cs
= env
->sysenter_cs
;
669 kenv
->sysenter_esp
= env
->sysenter_esp
;
670 kenv
->sysenter_eip
= env
->sysenter_eip
;
672 kenv
->lstar
= env
->lstar
;
673 kenv
->cstar
= env
->cstar
;
674 kenv
->fmask
= env
->fmask
;
675 kenv
->kernelgsbase
= env
->kernelgsbase
;
678 if (env
->dr
[7] & 0xff) {
679 kenv
->dr7
= env
->dr
[7];
680 kenv
->dr0
= env
->dr
[0];
681 kenv
->dr1
= env
->dr
[1];
682 kenv
->dr2
= env
->dr
[2];
683 kenv
->dr3
= env
->dr
[3];
687 kenv
->dr6
= env
->dr
[6];
688 cpl
= (env
->hflags
& HF_CPL_MASK
);
690 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
691 #if KQEMU_VERSION >= 0x010200
692 kenv
->user_only
= (env
->kqemu_enabled
== 1);
693 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
695 nb_ram_pages_to_update
= 0;
697 #if KQEMU_VERSION >= 0x010300
698 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
700 kqemu_reset_modified_ram_pages();
702 if (env
->cpuid_features
& CPUID_FXSR
)
703 restore_native_fp_fxrstor(env
);
705 restore_native_fp_frstor(env
);
708 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
709 kenv
, sizeof(struct kqemu_cpu_state
),
710 kenv
, sizeof(struct kqemu_cpu_state
),
717 #if KQEMU_VERSION >= 0x010100
718 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
721 ret
= ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
724 if (env
->cpuid_features
& CPUID_FXSR
)
725 save_native_fp_fxsave(env
);
727 save_native_fp_fsave(env
);
729 memcpy(env
->regs
, kenv
->regs
, sizeof(env
->regs
));
730 env
->eip
= kenv
->eip
;
731 env
->eflags
= kenv
->eflags
;
732 memcpy(env
->segs
, kenv
->segs
, sizeof(env
->segs
));
733 cpu_x86_set_cpl(env
, kenv
->cpl
);
734 memcpy(&env
->ldt
, &kenv
->ldt
, sizeof(env
->ldt
));
736 /* no need to restore that */
737 memcpy(env
->tr
, kenv
->tr
, sizeof(env
->tr
));
738 memcpy(env
->gdt
, kenv
->gdt
, sizeof(env
->gdt
));
739 memcpy(env
->idt
, kenv
->idt
, sizeof(env
->idt
));
740 env
->a20_mask
= kenv
->a20_mask
;
742 env
->cr
[0] = kenv
->cr0
;
743 env
->cr
[4] = kenv
->cr4
;
744 env
->cr
[3] = kenv
->cr3
;
745 env
->cr
[2] = kenv
->cr2
;
746 env
->dr
[6] = kenv
->dr6
;
747 #if KQEMU_VERSION >= 0x010300
749 env
->kernelgsbase
= kenv
->kernelgsbase
;
753 /* flush pages as indicated by kqemu */
754 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
757 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
758 tlb_flush_page(env
, pages_to_flush
[i
]);
761 nb_pages_to_flush
= 0;
763 #ifdef CONFIG_PROFILER
764 kqemu_time
+= profile_getclock() - ti
;
768 #if KQEMU_VERSION >= 0x010200
769 if (kenv
->nb_ram_pages_to_update
> 0) {
770 cpu_tlb_update_dirty(env
);
774 #if KQEMU_VERSION >= 0x010300
775 if (kenv
->nb_modified_ram_pages
> 0) {
776 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
778 addr
= modified_ram_pages
[i
];
779 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
784 /* restore the hidden flags */
786 unsigned int new_hflags
;
788 if ((env
->hflags
& HF_LMA_MASK
) &&
789 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
791 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
795 /* legacy / compatibility case */
796 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
797 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
798 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
799 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
800 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
801 (env
->eflags
& VM_MASK
) ||
802 !(env
->hflags
& HF_CS32_MASK
)) {
803 /* XXX: try to avoid this test. The problem comes from the
804 fact that is real mode or vm86 mode we only modify the
805 'base' and 'selector' fields of the segment cache to go
806 faster. A solution may be to force addseg to one in
808 new_hflags
|= HF_ADDSEG_MASK
;
810 new_hflags
|= ((env
->segs
[R_DS
].base
|
811 env
->segs
[R_ES
].base
|
812 env
->segs
[R_SS
].base
) != 0) <<
816 env
->hflags
= (env
->hflags
&
817 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
820 /* update FPU flags */
821 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
822 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
823 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
824 env
->hflags
|= HF_OSFXSR_MASK
;
826 env
->hflags
&= ~HF_OSFXSR_MASK
;
829 if (loglevel
& CPU_LOG_INT
) {
830 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
833 if (ret
== KQEMU_RET_SYSCALL
) {
834 /* syscall instruction */
835 return do_syscall(env
, kenv
);
837 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
838 env
->exception_index
= ret
& 0xff;
840 env
->exception_is_int
= 1;
841 env
->exception_next_eip
= kenv
->next_eip
;
842 #ifdef CONFIG_PROFILER
843 kqemu_ret_int_count
++;
846 if (loglevel
& CPU_LOG_INT
) {
847 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
848 env
->exception_index
);
849 cpu_dump_state(env
, logfile
, fprintf
, 0);
853 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
854 env
->exception_index
= ret
& 0xff;
855 env
->error_code
= kenv
->error_code
;
856 env
->exception_is_int
= 0;
857 env
->exception_next_eip
= 0;
858 #ifdef CONFIG_PROFILER
859 kqemu_ret_excp_count
++;
862 if (loglevel
& CPU_LOG_INT
) {
863 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
864 env
->exception_index
, env
->error_code
);
865 cpu_dump_state(env
, logfile
, fprintf
, 0);
869 } else if (ret
== KQEMU_RET_INTR
) {
870 #ifdef CONFIG_PROFILER
871 kqemu_ret_intr_count
++;
874 if (loglevel
& CPU_LOG_INT
) {
875 cpu_dump_state(env
, logfile
, fprintf
, 0);
879 } else if (ret
== KQEMU_RET_SOFTMMU
) {
880 #ifdef CONFIG_PROFILER
882 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
887 if (loglevel
& CPU_LOG_INT
) {
888 cpu_dump_state(env
, logfile
, fprintf
, 0);
893 cpu_dump_state(env
, stderr
, fprintf
, 0);
894 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
900 void kqemu_cpu_interrupt(CPUState
*env
)
902 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
903 /* cancelling the I/O request causes KQEMU to finish executing the
904 current block and successfully returning. */