4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
26 #include <sys/types.h>
28 #include <sys/ioctl.h>
31 #include <sys/ioccom.h>
43 #include "qemu-common.h"
54 /* compatibility stuff */
55 #ifndef KQEMU_RET_SYSCALL
56 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
58 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
59 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
60 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
62 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
63 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
67 #define KQEMU_DEVICE "\\\\.\\kqemu"
69 #define KQEMU_DEVICE "/dev/kqemu"
73 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
74 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
75 #define kqemu_closefd(x) CloseHandle(x)
77 #define KQEMU_INVALID_FD -1
78 int kqemu_fd
= KQEMU_INVALID_FD
;
79 #define kqemu_closefd(x) close(x)
86 int kqemu_allowed
= 1;
87 unsigned long *pages_to_flush
;
88 unsigned int nb_pages_to_flush
;
89 unsigned long *ram_pages_to_update
;
90 unsigned int nb_ram_pages_to_update
;
91 unsigned long *modified_ram_pages
;
92 unsigned int nb_modified_ram_pages
;
93 uint8_t *modified_ram_pages_table
;
94 extern uint32_t **l1_phys_map
;
96 #define cpuid(index, eax, ebx, ecx, edx) \
97 asm volatile ("cpuid" \
98 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
102 static int is_cpuid_supported(void)
107 static int is_cpuid_supported(void)
110 asm volatile ("pushf\n"
113 "xorl $0x00200000, %0\n"
118 : "=a" (v0
), "=d" (v1
)
125 static void kqemu_update_cpuid(CPUState
*env
)
127 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
128 uint32_t eax
, ebx
, ecx
, edx
;
130 /* the following features are kept identical on the host and
131 target cpus because they are important for user code. Strictly
132 speaking, only SSE really matters because the OS must support
133 it if the user code uses it. */
134 critical_features_mask
=
135 CPUID_CMOV
| CPUID_CX8
|
136 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
137 CPUID_SSE2
| CPUID_SEP
;
138 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
139 if (!is_cpuid_supported()) {
143 cpuid(1, eax
, ebx
, ecx
, edx
);
148 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
149 compatibility mode, so in order to have the best performances
150 it is better not to use it */
151 features
&= ~CPUID_SEP
;
153 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
154 (features
& critical_features_mask
);
155 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
156 (ext_features
& ext_features_mask
);
157 /* XXX: we could update more of the target CPUID state so that the
158 non accelerated code sees exactly the same CPU features as the
162 int kqemu_init(CPUState
*env
)
164 struct kqemu_init init
;
174 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
175 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
176 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
179 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
181 if (kqemu_fd
== KQEMU_INVALID_FD
) {
182 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
183 KQEMU_DEVICE
, strerror(errno
));
188 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
189 &version
, sizeof(version
), &temp
, NULL
);
191 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
193 if (version
!= KQEMU_VERSION
) {
194 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
195 version
, KQEMU_VERSION
);
199 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
200 sizeof(unsigned long));
204 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
205 sizeof(unsigned long));
206 if (!ram_pages_to_update
)
209 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
210 sizeof(unsigned long));
211 if (!modified_ram_pages
)
213 modified_ram_pages_table
= qemu_mallocz(phys_ram_size
>> TARGET_PAGE_BITS
);
214 if (!modified_ram_pages_table
)
217 init
.ram_base
= phys_ram_base
;
218 init
.ram_size
= phys_ram_size
;
219 init
.ram_dirty
= phys_ram_dirty
;
220 init
.phys_to_ram_map
= l1_phys_map
;
221 init
.pages_to_flush
= pages_to_flush
;
222 #if KQEMU_VERSION >= 0x010200
223 init
.ram_pages_to_update
= ram_pages_to_update
;
225 #if KQEMU_VERSION >= 0x010300
226 init
.modified_ram_pages
= modified_ram_pages
;
229 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &init
, sizeof(init
),
230 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
232 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &init
);
235 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
237 kqemu_closefd(kqemu_fd
);
238 kqemu_fd
= KQEMU_INVALID_FD
;
241 kqemu_update_cpuid(env
);
242 env
->kqemu_enabled
= kqemu_allowed
;
243 nb_pages_to_flush
= 0;
244 nb_ram_pages_to_update
= 0;
248 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
251 if (loglevel
& CPU_LOG_INT
) {
252 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
255 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
256 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
258 pages_to_flush
[nb_pages_to_flush
++] = addr
;
261 void kqemu_flush(CPUState
*env
, int global
)
264 if (loglevel
& CPU_LOG_INT
) {
265 fprintf(logfile
, "kqemu_flush:\n");
268 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
271 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
274 if (loglevel
& CPU_LOG_INT
) {
275 fprintf(logfile
, "kqemu_set_notdirty: addr=%08lx\n", ram_addr
);
278 /* we only track transitions to dirty state */
279 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
281 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
282 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
284 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
287 static void kqemu_reset_modified_ram_pages(void)
290 unsigned long page_index
;
292 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
293 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
294 modified_ram_pages_table
[page_index
] = 0;
296 nb_modified_ram_pages
= 0;
299 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
301 unsigned long page_index
;
307 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
308 if (!modified_ram_pages_table
[page_index
]) {
310 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
312 modified_ram_pages_table
[page_index
] = 1;
313 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
314 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
317 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
318 &nb_modified_ram_pages
,
319 sizeof(nb_modified_ram_pages
),
320 NULL
, 0, &temp
, NULL
);
322 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
323 &nb_modified_ram_pages
);
325 kqemu_reset_modified_ram_pages();
342 uint8_t fpregs1
[8 * 10];
358 uint8_t fpregs1
[8 * 16];
359 uint8_t xmm_regs
[16 * 16];
363 static struct fpxstate fpx1
__attribute__((aligned(16)));
365 static void restore_native_fp_frstor(CPUState
*env
)
368 struct fpstate fp1
, *fp
= &fp1
;
370 fp
->fpuc
= env
->fpuc
;
371 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
373 for (i
=7; i
>=0; i
--) {
375 if (env
->fptags
[i
]) {
378 /* the FPU automatically computes it */
383 for(i
= 0;i
< 8; i
++) {
384 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
387 asm volatile ("frstor %0" : "=m" (*fp
));
390 static void save_native_fp_fsave(CPUState
*env
)
394 struct fpstate fp1
, *fp
= &fp1
;
396 asm volatile ("fsave %0" : : "m" (*fp
));
397 env
->fpuc
= fp
->fpuc
;
398 env
->fpstt
= (fp
->fpus
>> 11) & 7;
399 env
->fpus
= fp
->fpus
& ~0x3800;
401 for(i
= 0;i
< 8; i
++) {
402 env
->fptags
[i
] = ((fptag
& 3) == 3);
406 for(i
= 0;i
< 8; i
++) {
407 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
410 /* we must restore the default rounding state */
411 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
412 asm volatile("fldcw %0" : : "m" (fpuc
));
415 static void restore_native_fp_fxrstor(CPUState
*env
)
417 struct fpxstate
*fp
= &fpx1
;
420 fp
->fpuc
= env
->fpuc
;
421 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
423 for(i
= 0; i
< 8; i
++)
424 fptag
|= (env
->fptags
[i
] << i
);
425 fp
->fptag
= fptag
^ 0xff;
428 for(i
= 0;i
< 8; i
++) {
429 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
432 if (env
->cpuid_features
& CPUID_SSE
) {
433 fp
->mxcsr
= env
->mxcsr
;
434 /* XXX: check if DAZ is not available */
435 fp
->mxcsr_mask
= 0xffff;
436 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
438 asm volatile ("fxrstor %0" : "=m" (*fp
));
441 static void save_native_fp_fxsave(CPUState
*env
)
443 struct fpxstate
*fp
= &fpx1
;
447 asm volatile ("fxsave %0" : : "m" (*fp
));
448 env
->fpuc
= fp
->fpuc
;
449 env
->fpstt
= (fp
->fpus
>> 11) & 7;
450 env
->fpus
= fp
->fpus
& ~0x3800;
451 fptag
= fp
->fptag
^ 0xff;
452 for(i
= 0;i
< 8; i
++) {
453 env
->fptags
[i
] = (fptag
>> i
) & 1;
456 for(i
= 0;i
< 8; i
++) {
457 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
460 if (env
->cpuid_features
& CPUID_SSE
) {
461 env
->mxcsr
= fp
->mxcsr
;
462 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
465 /* we must restore the default rounding state */
466 asm volatile ("fninit");
467 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
468 asm volatile("fldcw %0" : : "m" (fpuc
));
471 static int do_syscall(CPUState
*env
,
472 struct kqemu_cpu_state
*kenv
)
476 selector
= (env
->star
>> 32) & 0xffff;
478 if (env
->hflags
& HF_LMA_MASK
) {
481 env
->regs
[R_ECX
] = kenv
->next_eip
;
482 env
->regs
[11] = env
->eflags
;
484 code64
= env
->hflags
& HF_CS64_MASK
;
486 cpu_x86_set_cpl(env
, 0);
487 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
489 DESC_G_MASK
| DESC_P_MASK
|
491 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
492 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
494 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
496 DESC_W_MASK
| DESC_A_MASK
);
497 env
->eflags
&= ~env
->fmask
;
499 env
->eip
= env
->lstar
;
501 env
->eip
= env
->cstar
;
505 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
507 cpu_x86_set_cpl(env
, 0);
508 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
510 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
512 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
513 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
515 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
517 DESC_W_MASK
| DESC_A_MASK
);
518 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
519 env
->eip
= (uint32_t)env
->star
;
524 #ifdef CONFIG_PROFILER
526 #define PC_REC_SIZE 1
527 #define PC_REC_HASH_BITS 16
528 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
530 typedef struct PCRecord
{
533 struct PCRecord
*next
;
536 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
537 static int nb_pc_records
;
539 static void kqemu_record_pc(unsigned long pc
)
544 h
= pc
/ PC_REC_SIZE
;
545 h
= h
^ (h
>> PC_REC_HASH_BITS
);
546 h
&= (PC_REC_HASH_SIZE
- 1);
547 pr
= &pc_rec_hash
[h
];
558 r
= malloc(sizeof(PCRecord
));
566 static int pc_rec_cmp(const void *p1
, const void *p2
)
568 PCRecord
*r1
= *(PCRecord
**)p1
;
569 PCRecord
*r2
= *(PCRecord
**)p2
;
570 if (r1
->count
< r2
->count
)
572 else if (r1
->count
== r2
->count
)
578 static void kqemu_record_flush(void)
580 PCRecord
*r
, *r_next
;
583 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
584 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
588 pc_rec_hash
[h
] = NULL
;
593 void kqemu_record_dump(void)
600 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
603 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
604 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
609 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
611 f
= fopen("/tmp/kqemu.stats", "w");
613 perror("/tmp/kqemu.stats");
616 fprintf(f
, "total: %" PRId64
"\n", total
);
618 for(i
= 0; i
< nb_pc_records
; i
++) {
621 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
624 (double)r
->count
/ (double)total
* 100.0,
625 (double)sum
/ (double)total
* 100.0);
630 kqemu_record_flush();
634 int kqemu_cpu_exec(CPUState
*env
)
636 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
638 #ifdef CONFIG_PROFILER
646 #ifdef CONFIG_PROFILER
647 ti
= profile_getclock();
650 if (loglevel
& CPU_LOG_INT
) {
651 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
652 cpu_dump_state(env
, logfile
, fprintf
, 0);
655 memcpy(kenv
->regs
, env
->regs
, sizeof(kenv
->regs
));
656 kenv
->eip
= env
->eip
;
657 kenv
->eflags
= env
->eflags
;
658 memcpy(&kenv
->segs
, &env
->segs
, sizeof(env
->segs
));
659 memcpy(&kenv
->ldt
, &env
->ldt
, sizeof(env
->ldt
));
660 memcpy(&kenv
->tr
, &env
->tr
, sizeof(env
->tr
));
661 memcpy(&kenv
->gdt
, &env
->gdt
, sizeof(env
->gdt
));
662 memcpy(&kenv
->idt
, &env
->idt
, sizeof(env
->idt
));
663 kenv
->cr0
= env
->cr
[0];
664 kenv
->cr2
= env
->cr
[2];
665 kenv
->cr3
= env
->cr
[3];
666 kenv
->cr4
= env
->cr
[4];
667 kenv
->a20_mask
= env
->a20_mask
;
668 #if KQEMU_VERSION >= 0x010100
669 kenv
->efer
= env
->efer
;
671 #if KQEMU_VERSION >= 0x010300
672 kenv
->tsc_offset
= 0;
673 kenv
->star
= env
->star
;
674 kenv
->sysenter_cs
= env
->sysenter_cs
;
675 kenv
->sysenter_esp
= env
->sysenter_esp
;
676 kenv
->sysenter_eip
= env
->sysenter_eip
;
678 kenv
->lstar
= env
->lstar
;
679 kenv
->cstar
= env
->cstar
;
680 kenv
->fmask
= env
->fmask
;
681 kenv
->kernelgsbase
= env
->kernelgsbase
;
684 if (env
->dr
[7] & 0xff) {
685 kenv
->dr7
= env
->dr
[7];
686 kenv
->dr0
= env
->dr
[0];
687 kenv
->dr1
= env
->dr
[1];
688 kenv
->dr2
= env
->dr
[2];
689 kenv
->dr3
= env
->dr
[3];
693 kenv
->dr6
= env
->dr
[6];
694 cpl
= (env
->hflags
& HF_CPL_MASK
);
696 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
697 #if KQEMU_VERSION >= 0x010200
698 kenv
->user_only
= (env
->kqemu_enabled
== 1);
699 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
701 nb_ram_pages_to_update
= 0;
703 #if KQEMU_VERSION >= 0x010300
704 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
706 kqemu_reset_modified_ram_pages();
708 if (env
->cpuid_features
& CPUID_FXSR
)
709 restore_native_fp_fxrstor(env
);
711 restore_native_fp_frstor(env
);
714 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
715 kenv
, sizeof(struct kqemu_cpu_state
),
716 kenv
, sizeof(struct kqemu_cpu_state
),
723 #if KQEMU_VERSION >= 0x010100
724 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
727 ret
= ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
730 if (env
->cpuid_features
& CPUID_FXSR
)
731 save_native_fp_fxsave(env
);
733 save_native_fp_fsave(env
);
735 memcpy(env
->regs
, kenv
->regs
, sizeof(env
->regs
));
736 env
->eip
= kenv
->eip
;
737 env
->eflags
= kenv
->eflags
;
738 memcpy(env
->segs
, kenv
->segs
, sizeof(env
->segs
));
739 cpu_x86_set_cpl(env
, kenv
->cpl
);
740 memcpy(&env
->ldt
, &kenv
->ldt
, sizeof(env
->ldt
));
742 /* no need to restore that */
743 memcpy(env
->tr
, kenv
->tr
, sizeof(env
->tr
));
744 memcpy(env
->gdt
, kenv
->gdt
, sizeof(env
->gdt
));
745 memcpy(env
->idt
, kenv
->idt
, sizeof(env
->idt
));
746 env
->a20_mask
= kenv
->a20_mask
;
748 env
->cr
[0] = kenv
->cr0
;
749 env
->cr
[4] = kenv
->cr4
;
750 env
->cr
[3] = kenv
->cr3
;
751 env
->cr
[2] = kenv
->cr2
;
752 env
->dr
[6] = kenv
->dr6
;
753 #if KQEMU_VERSION >= 0x010300
755 env
->kernelgsbase
= kenv
->kernelgsbase
;
759 /* flush pages as indicated by kqemu */
760 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
763 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
764 tlb_flush_page(env
, pages_to_flush
[i
]);
767 nb_pages_to_flush
= 0;
769 #ifdef CONFIG_PROFILER
770 kqemu_time
+= profile_getclock() - ti
;
774 #if KQEMU_VERSION >= 0x010200
775 if (kenv
->nb_ram_pages_to_update
> 0) {
776 cpu_tlb_update_dirty(env
);
780 #if KQEMU_VERSION >= 0x010300
781 if (kenv
->nb_modified_ram_pages
> 0) {
782 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
784 addr
= modified_ram_pages
[i
];
785 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
790 /* restore the hidden flags */
792 unsigned int new_hflags
;
794 if ((env
->hflags
& HF_LMA_MASK
) &&
795 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
797 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
801 /* legacy / compatibility case */
802 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
803 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
804 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
805 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
806 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
807 (env
->eflags
& VM_MASK
) ||
808 !(env
->hflags
& HF_CS32_MASK
)) {
809 /* XXX: try to avoid this test. The problem comes from the
810 fact that is real mode or vm86 mode we only modify the
811 'base' and 'selector' fields of the segment cache to go
812 faster. A solution may be to force addseg to one in
814 new_hflags
|= HF_ADDSEG_MASK
;
816 new_hflags
|= ((env
->segs
[R_DS
].base
|
817 env
->segs
[R_ES
].base
|
818 env
->segs
[R_SS
].base
) != 0) <<
822 env
->hflags
= (env
->hflags
&
823 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
826 /* update FPU flags */
827 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
828 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
829 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
830 env
->hflags
|= HF_OSFXSR_MASK
;
832 env
->hflags
&= ~HF_OSFXSR_MASK
;
835 if (loglevel
& CPU_LOG_INT
) {
836 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
839 if (ret
== KQEMU_RET_SYSCALL
) {
840 /* syscall instruction */
841 return do_syscall(env
, kenv
);
843 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
844 env
->exception_index
= ret
& 0xff;
846 env
->exception_is_int
= 1;
847 env
->exception_next_eip
= kenv
->next_eip
;
848 #ifdef CONFIG_PROFILER
849 kqemu_ret_int_count
++;
852 if (loglevel
& CPU_LOG_INT
) {
853 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
854 env
->exception_index
);
855 cpu_dump_state(env
, logfile
, fprintf
, 0);
859 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
860 env
->exception_index
= ret
& 0xff;
861 env
->error_code
= kenv
->error_code
;
862 env
->exception_is_int
= 0;
863 env
->exception_next_eip
= 0;
864 #ifdef CONFIG_PROFILER
865 kqemu_ret_excp_count
++;
868 if (loglevel
& CPU_LOG_INT
) {
869 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
870 env
->exception_index
, env
->error_code
);
871 cpu_dump_state(env
, logfile
, fprintf
, 0);
875 } else if (ret
== KQEMU_RET_INTR
) {
876 #ifdef CONFIG_PROFILER
877 kqemu_ret_intr_count
++;
880 if (loglevel
& CPU_LOG_INT
) {
881 cpu_dump_state(env
, logfile
, fprintf
, 0);
885 } else if (ret
== KQEMU_RET_SOFTMMU
) {
886 #ifdef CONFIG_PROFILER
888 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
893 if (loglevel
& CPU_LOG_INT
) {
894 cpu_dump_state(env
, logfile
, fprintf
, 0);
899 cpu_dump_state(env
, stderr
, fprintf
, 0);
900 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
906 void kqemu_cpu_interrupt(CPUState
*env
)
908 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
909 /* cancelling the I/O request causes KQEMU to finish executing the
910 current block and successfully returning. */