4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <sys/types.h>
27 #include <sys/ioctl.h>
30 #include <sys/ioccom.h>
52 /* compatibility stuff */
53 #ifndef KQEMU_RET_SYSCALL
54 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
56 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
57 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
58 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
60 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
61 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
65 #define KQEMU_DEVICE "\\\\.\\kqemu"
67 #define KQEMU_DEVICE "/dev/kqemu"
71 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
72 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
73 #define kqemu_closefd(x) CloseHandle(x)
75 #define KQEMU_INVALID_FD -1
76 int kqemu_fd
= KQEMU_INVALID_FD
;
77 #define kqemu_closefd(x) close(x)
84 int kqemu_allowed
= 1;
85 unsigned long *pages_to_flush
;
86 unsigned int nb_pages_to_flush
;
87 unsigned long *ram_pages_to_update
;
88 unsigned int nb_ram_pages_to_update
;
89 unsigned long *modified_ram_pages
;
90 unsigned int nb_modified_ram_pages
;
91 uint8_t *modified_ram_pages_table
;
92 extern uint32_t **l1_phys_map
;
94 #define cpuid(index, eax, ebx, ecx, edx) \
95 asm volatile ("cpuid" \
96 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
100 static int is_cpuid_supported(void)
105 static int is_cpuid_supported(void)
108 asm volatile ("pushf\n"
111 "xorl $0x00200000, %0\n"
116 : "=a" (v0
), "=d" (v1
)
123 static void kqemu_update_cpuid(CPUState
*env
)
125 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
126 uint32_t eax
, ebx
, ecx
, edx
;
128 /* the following features are kept identical on the host and
129 target cpus because they are important for user code. Strictly
130 speaking, only SSE really matters because the OS must support
131 it if the user code uses it. */
132 critical_features_mask
=
133 CPUID_CMOV
| CPUID_CX8
|
134 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
135 CPUID_SSE2
| CPUID_SEP
;
136 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
137 if (!is_cpuid_supported()) {
141 cpuid(1, eax
, ebx
, ecx
, edx
);
146 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
147 compatibility mode, so in order to have the best performances
148 it is better not to use it */
149 features
&= ~CPUID_SEP
;
151 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
152 (features
& critical_features_mask
);
153 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
154 (ext_features
& ext_features_mask
);
155 /* XXX: we could update more of the target CPUID state so that the
156 non accelerated code sees exactly the same CPU features as the
160 int kqemu_init(CPUState
*env
)
162 struct kqemu_init init
;
172 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
173 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
174 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
177 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
179 if (kqemu_fd
== KQEMU_INVALID_FD
) {
180 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
181 KQEMU_DEVICE
, strerror(errno
));
186 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
187 &version
, sizeof(version
), &temp
, NULL
);
189 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
191 if (version
!= KQEMU_VERSION
) {
192 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
193 version
, KQEMU_VERSION
);
197 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
198 sizeof(unsigned long));
202 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
203 sizeof(unsigned long));
204 if (!ram_pages_to_update
)
207 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
208 sizeof(unsigned long));
209 if (!modified_ram_pages
)
211 modified_ram_pages_table
= qemu_mallocz(phys_ram_size
>> TARGET_PAGE_BITS
);
212 if (!modified_ram_pages_table
)
215 init
.ram_base
= phys_ram_base
;
216 init
.ram_size
= phys_ram_size
;
217 init
.ram_dirty
= phys_ram_dirty
;
218 init
.phys_to_ram_map
= l1_phys_map
;
219 init
.pages_to_flush
= pages_to_flush
;
220 #if KQEMU_VERSION >= 0x010200
221 init
.ram_pages_to_update
= ram_pages_to_update
;
223 #if KQEMU_VERSION >= 0x010300
224 init
.modified_ram_pages
= modified_ram_pages
;
227 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &init
, sizeof(init
),
228 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
230 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &init
);
233 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
235 kqemu_closefd(kqemu_fd
);
236 kqemu_fd
= KQEMU_INVALID_FD
;
239 kqemu_update_cpuid(env
);
240 env
->kqemu_enabled
= kqemu_allowed
;
241 nb_pages_to_flush
= 0;
242 nb_ram_pages_to_update
= 0;
246 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
249 if (loglevel
& CPU_LOG_INT
) {
250 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
253 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
254 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
256 pages_to_flush
[nb_pages_to_flush
++] = addr
;
259 void kqemu_flush(CPUState
*env
, int global
)
262 if (loglevel
& CPU_LOG_INT
) {
263 fprintf(logfile
, "kqemu_flush:\n");
266 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
269 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
272 if (loglevel
& CPU_LOG_INT
) {
273 fprintf(logfile
, "kqemu_set_notdirty: addr=%08lx\n", ram_addr
);
276 /* we only track transitions to dirty state */
277 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
279 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
280 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
282 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
285 static void kqemu_reset_modified_ram_pages(void)
288 unsigned long page_index
;
290 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
291 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
292 modified_ram_pages_table
[page_index
] = 0;
294 nb_modified_ram_pages
= 0;
297 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
299 unsigned long page_index
;
305 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
306 if (!modified_ram_pages_table
[page_index
]) {
308 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
310 modified_ram_pages_table
[page_index
] = 1;
311 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
312 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
315 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
316 &nb_modified_ram_pages
,
317 sizeof(nb_modified_ram_pages
),
318 NULL
, 0, &temp
, NULL
);
320 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
321 &nb_modified_ram_pages
);
323 kqemu_reset_modified_ram_pages();
340 uint8_t fpregs1
[8 * 10];
356 uint8_t fpregs1
[8 * 16];
357 uint8_t xmm_regs
[16 * 16];
361 static struct fpxstate fpx1
__attribute__((aligned(16)));
363 static void restore_native_fp_frstor(CPUState
*env
)
366 struct fpstate fp1
, *fp
= &fp1
;
368 fp
->fpuc
= env
->fpuc
;
369 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
371 for (i
=7; i
>=0; i
--) {
373 if (env
->fptags
[i
]) {
376 /* the FPU automatically computes it */
381 for(i
= 0;i
< 8; i
++) {
382 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
385 asm volatile ("frstor %0" : "=m" (*fp
));
388 static void save_native_fp_fsave(CPUState
*env
)
392 struct fpstate fp1
, *fp
= &fp1
;
394 asm volatile ("fsave %0" : : "m" (*fp
));
395 env
->fpuc
= fp
->fpuc
;
396 env
->fpstt
= (fp
->fpus
>> 11) & 7;
397 env
->fpus
= fp
->fpus
& ~0x3800;
399 for(i
= 0;i
< 8; i
++) {
400 env
->fptags
[i
] = ((fptag
& 3) == 3);
404 for(i
= 0;i
< 8; i
++) {
405 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
408 /* we must restore the default rounding state */
409 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
410 asm volatile("fldcw %0" : : "m" (fpuc
));
413 static void restore_native_fp_fxrstor(CPUState
*env
)
415 struct fpxstate
*fp
= &fpx1
;
418 fp
->fpuc
= env
->fpuc
;
419 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
421 for(i
= 0; i
< 8; i
++)
422 fptag
|= (env
->fptags
[i
] << i
);
423 fp
->fptag
= fptag
^ 0xff;
426 for(i
= 0;i
< 8; i
++) {
427 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
430 if (env
->cpuid_features
& CPUID_SSE
) {
431 fp
->mxcsr
= env
->mxcsr
;
432 /* XXX: check if DAZ is not available */
433 fp
->mxcsr_mask
= 0xffff;
434 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
436 asm volatile ("fxrstor %0" : "=m" (*fp
));
439 static void save_native_fp_fxsave(CPUState
*env
)
441 struct fpxstate
*fp
= &fpx1
;
445 asm volatile ("fxsave %0" : : "m" (*fp
));
446 env
->fpuc
= fp
->fpuc
;
447 env
->fpstt
= (fp
->fpus
>> 11) & 7;
448 env
->fpus
= fp
->fpus
& ~0x3800;
449 fptag
= fp
->fptag
^ 0xff;
450 for(i
= 0;i
< 8; i
++) {
451 env
->fptags
[i
] = (fptag
>> i
) & 1;
454 for(i
= 0;i
< 8; i
++) {
455 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
458 if (env
->cpuid_features
& CPUID_SSE
) {
459 env
->mxcsr
= fp
->mxcsr
;
460 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
463 /* we must restore the default rounding state */
464 asm volatile ("fninit");
465 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
466 asm volatile("fldcw %0" : : "m" (fpuc
));
469 static int do_syscall(CPUState
*env
,
470 struct kqemu_cpu_state
*kenv
)
474 selector
= (env
->star
>> 32) & 0xffff;
476 if (env
->hflags
& HF_LMA_MASK
) {
479 env
->regs
[R_ECX
] = kenv
->next_eip
;
480 env
->regs
[11] = env
->eflags
;
482 code64
= env
->hflags
& HF_CS64_MASK
;
484 cpu_x86_set_cpl(env
, 0);
485 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
487 DESC_G_MASK
| DESC_P_MASK
|
489 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
490 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
492 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
494 DESC_W_MASK
| DESC_A_MASK
);
495 env
->eflags
&= ~env
->fmask
;
497 env
->eip
= env
->lstar
;
499 env
->eip
= env
->cstar
;
503 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
505 cpu_x86_set_cpl(env
, 0);
506 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
508 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
510 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
511 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
513 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
515 DESC_W_MASK
| DESC_A_MASK
);
516 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
517 env
->eip
= (uint32_t)env
->star
;
522 #ifdef CONFIG_PROFILER
524 #define PC_REC_SIZE 1
525 #define PC_REC_HASH_BITS 16
526 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
528 typedef struct PCRecord
{
531 struct PCRecord
*next
;
534 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
535 static int nb_pc_records
;
537 static void kqemu_record_pc(unsigned long pc
)
542 h
= pc
/ PC_REC_SIZE
;
543 h
= h
^ (h
>> PC_REC_HASH_BITS
);
544 h
&= (PC_REC_HASH_SIZE
- 1);
545 pr
= &pc_rec_hash
[h
];
556 r
= malloc(sizeof(PCRecord
));
564 static int pc_rec_cmp(const void *p1
, const void *p2
)
566 PCRecord
*r1
= *(PCRecord
**)p1
;
567 PCRecord
*r2
= *(PCRecord
**)p2
;
568 if (r1
->count
< r2
->count
)
570 else if (r1
->count
== r2
->count
)
576 static void kqemu_record_flush(void)
578 PCRecord
*r
, *r_next
;
581 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
582 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
586 pc_rec_hash
[h
] = NULL
;
591 void kqemu_record_dump(void)
598 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
601 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
602 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
607 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
609 f
= fopen("/tmp/kqemu.stats", "w");
611 perror("/tmp/kqemu.stats");
614 fprintf(f
, "total: %" PRId64
"\n", total
);
616 for(i
= 0; i
< nb_pc_records
; i
++) {
619 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
622 (double)r
->count
/ (double)total
* 100.0,
623 (double)sum
/ (double)total
* 100.0);
628 kqemu_record_flush();
632 int kqemu_cpu_exec(CPUState
*env
)
634 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
636 #ifdef CONFIG_PROFILER
644 #ifdef CONFIG_PROFILER
645 ti
= profile_getclock();
648 if (loglevel
& CPU_LOG_INT
) {
649 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
650 cpu_dump_state(env
, logfile
, fprintf
, 0);
653 memcpy(kenv
->regs
, env
->regs
, sizeof(kenv
->regs
));
654 kenv
->eip
= env
->eip
;
655 kenv
->eflags
= env
->eflags
;
656 memcpy(&kenv
->segs
, &env
->segs
, sizeof(env
->segs
));
657 memcpy(&kenv
->ldt
, &env
->ldt
, sizeof(env
->ldt
));
658 memcpy(&kenv
->tr
, &env
->tr
, sizeof(env
->tr
));
659 memcpy(&kenv
->gdt
, &env
->gdt
, sizeof(env
->gdt
));
660 memcpy(&kenv
->idt
, &env
->idt
, sizeof(env
->idt
));
661 kenv
->cr0
= env
->cr
[0];
662 kenv
->cr2
= env
->cr
[2];
663 kenv
->cr3
= env
->cr
[3];
664 kenv
->cr4
= env
->cr
[4];
665 kenv
->a20_mask
= env
->a20_mask
;
666 #if KQEMU_VERSION >= 0x010100
667 kenv
->efer
= env
->efer
;
669 #if KQEMU_VERSION >= 0x010300
670 kenv
->tsc_offset
= 0;
671 kenv
->star
= env
->star
;
672 kenv
->sysenter_cs
= env
->sysenter_cs
;
673 kenv
->sysenter_esp
= env
->sysenter_esp
;
674 kenv
->sysenter_eip
= env
->sysenter_eip
;
676 kenv
->lstar
= env
->lstar
;
677 kenv
->cstar
= env
->cstar
;
678 kenv
->fmask
= env
->fmask
;
679 kenv
->kernelgsbase
= env
->kernelgsbase
;
682 if (env
->dr
[7] & 0xff) {
683 kenv
->dr7
= env
->dr
[7];
684 kenv
->dr0
= env
->dr
[0];
685 kenv
->dr1
= env
->dr
[1];
686 kenv
->dr2
= env
->dr
[2];
687 kenv
->dr3
= env
->dr
[3];
691 kenv
->dr6
= env
->dr
[6];
692 cpl
= (env
->hflags
& HF_CPL_MASK
);
694 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
695 #if KQEMU_VERSION >= 0x010200
696 kenv
->user_only
= (env
->kqemu_enabled
== 1);
697 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
699 nb_ram_pages_to_update
= 0;
701 #if KQEMU_VERSION >= 0x010300
702 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
704 kqemu_reset_modified_ram_pages();
706 if (env
->cpuid_features
& CPUID_FXSR
)
707 restore_native_fp_fxrstor(env
);
709 restore_native_fp_frstor(env
);
712 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
713 kenv
, sizeof(struct kqemu_cpu_state
),
714 kenv
, sizeof(struct kqemu_cpu_state
),
721 #if KQEMU_VERSION >= 0x010100
722 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
725 ret
= ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
728 if (env
->cpuid_features
& CPUID_FXSR
)
729 save_native_fp_fxsave(env
);
731 save_native_fp_fsave(env
);
733 memcpy(env
->regs
, kenv
->regs
, sizeof(env
->regs
));
734 env
->eip
= kenv
->eip
;
735 env
->eflags
= kenv
->eflags
;
736 memcpy(env
->segs
, kenv
->segs
, sizeof(env
->segs
));
737 cpu_x86_set_cpl(env
, kenv
->cpl
);
738 memcpy(&env
->ldt
, &kenv
->ldt
, sizeof(env
->ldt
));
740 /* no need to restore that */
741 memcpy(env
->tr
, kenv
->tr
, sizeof(env
->tr
));
742 memcpy(env
->gdt
, kenv
->gdt
, sizeof(env
->gdt
));
743 memcpy(env
->idt
, kenv
->idt
, sizeof(env
->idt
));
744 env
->a20_mask
= kenv
->a20_mask
;
746 env
->cr
[0] = kenv
->cr0
;
747 env
->cr
[4] = kenv
->cr4
;
748 env
->cr
[3] = kenv
->cr3
;
749 env
->cr
[2] = kenv
->cr2
;
750 env
->dr
[6] = kenv
->dr6
;
751 #if KQEMU_VERSION >= 0x010300
753 env
->kernelgsbase
= kenv
->kernelgsbase
;
757 /* flush pages as indicated by kqemu */
758 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
761 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
762 tlb_flush_page(env
, pages_to_flush
[i
]);
765 nb_pages_to_flush
= 0;
767 #ifdef CONFIG_PROFILER
768 kqemu_time
+= profile_getclock() - ti
;
772 #if KQEMU_VERSION >= 0x010200
773 if (kenv
->nb_ram_pages_to_update
> 0) {
774 cpu_tlb_update_dirty(env
);
778 #if KQEMU_VERSION >= 0x010300
779 if (kenv
->nb_modified_ram_pages
> 0) {
780 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
782 addr
= modified_ram_pages
[i
];
783 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
788 /* restore the hidden flags */
790 unsigned int new_hflags
;
792 if ((env
->hflags
& HF_LMA_MASK
) &&
793 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
795 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
799 /* legacy / compatibility case */
800 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
801 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
802 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
803 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
804 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
805 (env
->eflags
& VM_MASK
) ||
806 !(env
->hflags
& HF_CS32_MASK
)) {
807 /* XXX: try to avoid this test. The problem comes from the
808 fact that is real mode or vm86 mode we only modify the
809 'base' and 'selector' fields of the segment cache to go
810 faster. A solution may be to force addseg to one in
812 new_hflags
|= HF_ADDSEG_MASK
;
814 new_hflags
|= ((env
->segs
[R_DS
].base
|
815 env
->segs
[R_ES
].base
|
816 env
->segs
[R_SS
].base
) != 0) <<
820 env
->hflags
= (env
->hflags
&
821 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
824 /* update FPU flags */
825 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
826 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
827 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
828 env
->hflags
|= HF_OSFXSR_MASK
;
830 env
->hflags
&= ~HF_OSFXSR_MASK
;
833 if (loglevel
& CPU_LOG_INT
) {
834 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
837 if (ret
== KQEMU_RET_SYSCALL
) {
838 /* syscall instruction */
839 return do_syscall(env
, kenv
);
841 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
842 env
->exception_index
= ret
& 0xff;
844 env
->exception_is_int
= 1;
845 env
->exception_next_eip
= kenv
->next_eip
;
846 #ifdef CONFIG_PROFILER
847 kqemu_ret_int_count
++;
850 if (loglevel
& CPU_LOG_INT
) {
851 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
852 env
->exception_index
);
853 cpu_dump_state(env
, logfile
, fprintf
, 0);
857 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
858 env
->exception_index
= ret
& 0xff;
859 env
->error_code
= kenv
->error_code
;
860 env
->exception_is_int
= 0;
861 env
->exception_next_eip
= 0;
862 #ifdef CONFIG_PROFILER
863 kqemu_ret_excp_count
++;
866 if (loglevel
& CPU_LOG_INT
) {
867 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
868 env
->exception_index
, env
->error_code
);
869 cpu_dump_state(env
, logfile
, fprintf
, 0);
873 } else if (ret
== KQEMU_RET_INTR
) {
874 #ifdef CONFIG_PROFILER
875 kqemu_ret_intr_count
++;
878 if (loglevel
& CPU_LOG_INT
) {
879 cpu_dump_state(env
, logfile
, fprintf
, 0);
883 } else if (ret
== KQEMU_RET_SOFTMMU
) {
884 #ifdef CONFIG_PROFILER
886 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
891 if (loglevel
& CPU_LOG_INT
) {
892 cpu_dump_state(env
, logfile
, fprintf
, 0);
897 cpu_dump_state(env
, stderr
, fprintf
, 0);
898 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
904 void kqemu_cpu_interrupt(CPUState
*env
)
906 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
907 /* cancelling the I/O request causes KQEMU to finish executing the
908 current block and successfully returning. */