4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <sys/types.h>
27 #include <sys/ioctl.h>
49 /* compatibility stuff */
50 #ifndef KQEMU_RET_SYSCALL
51 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
53 #ifndef KQEMU_MAX_RAM_PAGES_TO_UPDATE
54 #define KQEMU_MAX_RAM_PAGES_TO_UPDATE 512
55 #define KQEMU_RAM_PAGES_UPDATE_ALL (KQEMU_MAX_RAM_PAGES_TO_UPDATE + 1)
57 #ifndef KQEMU_MAX_MODIFIED_RAM_PAGES
58 #define KQEMU_MAX_MODIFIED_RAM_PAGES 512
62 #define KQEMU_DEVICE "\\\\.\\kqemu"
64 #define KQEMU_DEVICE "/dev/kqemu"
68 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
69 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
70 #define kqemu_closefd(x) CloseHandle(x)
72 #define KQEMU_INVALID_FD -1
73 int kqemu_fd
= KQEMU_INVALID_FD
;
74 #define kqemu_closefd(x) close(x)
81 int kqemu_allowed
= 1;
82 unsigned long *pages_to_flush
;
83 unsigned int nb_pages_to_flush
;
84 unsigned long *ram_pages_to_update
;
85 unsigned int nb_ram_pages_to_update
;
86 unsigned long *modified_ram_pages
;
87 unsigned int nb_modified_ram_pages
;
88 uint8_t *modified_ram_pages_table
;
89 extern uint32_t **l1_phys_map
;
91 #define cpuid(index, eax, ebx, ecx, edx) \
92 asm volatile ("cpuid" \
93 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
97 static int is_cpuid_supported(void)
102 static int is_cpuid_supported(void)
105 asm volatile ("pushf\n"
108 "xorl $0x00200000, %0\n"
113 : "=a" (v0
), "=d" (v1
)
120 static void kqemu_update_cpuid(CPUState
*env
)
122 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
123 uint32_t eax
, ebx
, ecx
, edx
;
125 /* the following features are kept identical on the host and
126 target cpus because they are important for user code. Strictly
127 speaking, only SSE really matters because the OS must support
128 it if the user code uses it. */
129 critical_features_mask
=
130 CPUID_CMOV
| CPUID_CX8
|
131 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
132 CPUID_SSE2
| CPUID_SEP
;
133 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
134 if (!is_cpuid_supported()) {
138 cpuid(1, eax
, ebx
, ecx
, edx
);
143 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
144 compatibility mode, so in order to have the best performances
145 it is better not to use it */
146 features
&= ~CPUID_SEP
;
148 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
149 (features
& critical_features_mask
);
150 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
151 (ext_features
& ext_features_mask
);
152 /* XXX: we could update more of the target CPUID state so that the
153 non accelerated code sees exactly the same CPU features as the
157 int kqemu_init(CPUState
*env
)
159 struct kqemu_init init
;
169 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
170 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
171 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
174 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
176 if (kqemu_fd
== KQEMU_INVALID_FD
) {
177 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE
);
182 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
183 &version
, sizeof(version
), &temp
, NULL
);
185 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
187 if (version
!= KQEMU_VERSION
) {
188 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
189 version
, KQEMU_VERSION
);
193 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
194 sizeof(unsigned long));
198 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
199 sizeof(unsigned long));
200 if (!ram_pages_to_update
)
203 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
204 sizeof(unsigned long));
205 if (!modified_ram_pages
)
207 modified_ram_pages_table
= qemu_mallocz(phys_ram_size
>> TARGET_PAGE_BITS
);
208 if (!modified_ram_pages_table
)
211 init
.ram_base
= phys_ram_base
;
212 init
.ram_size
= phys_ram_size
;
213 init
.ram_dirty
= phys_ram_dirty
;
214 init
.phys_to_ram_map
= l1_phys_map
;
215 init
.pages_to_flush
= pages_to_flush
;
216 #if KQEMU_VERSION >= 0x010200
217 init
.ram_pages_to_update
= ram_pages_to_update
;
219 #if KQEMU_VERSION >= 0x010300
220 init
.modified_ram_pages
= modified_ram_pages
;
223 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &init
, sizeof(init
),
224 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
226 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &init
);
229 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
231 kqemu_closefd(kqemu_fd
);
232 kqemu_fd
= KQEMU_INVALID_FD
;
235 kqemu_update_cpuid(env
);
236 env
->kqemu_enabled
= kqemu_allowed
;
237 nb_pages_to_flush
= 0;
238 nb_ram_pages_to_update
= 0;
242 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
245 if (loglevel
& CPU_LOG_INT
) {
246 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
249 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
250 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
252 pages_to_flush
[nb_pages_to_flush
++] = addr
;
255 void kqemu_flush(CPUState
*env
, int global
)
258 if (loglevel
& CPU_LOG_INT
) {
259 fprintf(logfile
, "kqemu_flush:\n");
262 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
265 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
268 if (loglevel
& CPU_LOG_INT
) {
269 fprintf(logfile
, "kqemu_set_notdirty: addr=%08lx\n", ram_addr
);
272 /* we only track transitions to dirty state */
273 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
275 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
276 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
278 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
281 static void kqemu_reset_modified_ram_pages(void)
284 unsigned long page_index
;
286 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
287 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
288 modified_ram_pages_table
[page_index
] = 0;
290 nb_modified_ram_pages
= 0;
293 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
295 unsigned long page_index
;
301 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
302 if (!modified_ram_pages_table
[page_index
]) {
304 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
306 modified_ram_pages_table
[page_index
] = 1;
307 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
308 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
311 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
312 &nb_modified_ram_pages
,
313 sizeof(nb_modified_ram_pages
),
314 NULL
, 0, &temp
, NULL
);
316 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
317 &nb_modified_ram_pages
);
319 kqemu_reset_modified_ram_pages();
336 uint8_t fpregs1
[8 * 10];
352 uint8_t fpregs1
[8 * 16];
353 uint8_t xmm_regs
[16 * 16];
357 static struct fpxstate fpx1
__attribute__((aligned(16)));
359 static void restore_native_fp_frstor(CPUState
*env
)
362 struct fpstate fp1
, *fp
= &fp1
;
364 fp
->fpuc
= env
->fpuc
;
365 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
367 for (i
=7; i
>=0; i
--) {
369 if (env
->fptags
[i
]) {
372 /* the FPU automatically computes it */
377 for(i
= 0;i
< 8; i
++) {
378 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
381 asm volatile ("frstor %0" : "=m" (*fp
));
384 static void save_native_fp_fsave(CPUState
*env
)
388 struct fpstate fp1
, *fp
= &fp1
;
390 asm volatile ("fsave %0" : : "m" (*fp
));
391 env
->fpuc
= fp
->fpuc
;
392 env
->fpstt
= (fp
->fpus
>> 11) & 7;
393 env
->fpus
= fp
->fpus
& ~0x3800;
395 for(i
= 0;i
< 8; i
++) {
396 env
->fptags
[i
] = ((fptag
& 3) == 3);
400 for(i
= 0;i
< 8; i
++) {
401 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
404 /* we must restore the default rounding state */
405 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
406 asm volatile("fldcw %0" : : "m" (fpuc
));
409 static void restore_native_fp_fxrstor(CPUState
*env
)
411 struct fpxstate
*fp
= &fpx1
;
414 fp
->fpuc
= env
->fpuc
;
415 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
417 for(i
= 0; i
< 8; i
++)
418 fptag
|= (env
->fptags
[i
] << i
);
419 fp
->fptag
= fptag
^ 0xff;
422 for(i
= 0;i
< 8; i
++) {
423 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
426 if (env
->cpuid_features
& CPUID_SSE
) {
427 fp
->mxcsr
= env
->mxcsr
;
428 /* XXX: check if DAZ is not available */
429 fp
->mxcsr_mask
= 0xffff;
430 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
432 asm volatile ("fxrstor %0" : "=m" (*fp
));
435 static void save_native_fp_fxsave(CPUState
*env
)
437 struct fpxstate
*fp
= &fpx1
;
441 asm volatile ("fxsave %0" : : "m" (*fp
));
442 env
->fpuc
= fp
->fpuc
;
443 env
->fpstt
= (fp
->fpus
>> 11) & 7;
444 env
->fpus
= fp
->fpus
& ~0x3800;
445 fptag
= fp
->fptag
^ 0xff;
446 for(i
= 0;i
< 8; i
++) {
447 env
->fptags
[i
] = (fptag
>> i
) & 1;
450 for(i
= 0;i
< 8; i
++) {
451 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
454 if (env
->cpuid_features
& CPUID_SSE
) {
455 env
->mxcsr
= fp
->mxcsr
;
456 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
459 /* we must restore the default rounding state */
460 asm volatile ("fninit");
461 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
462 asm volatile("fldcw %0" : : "m" (fpuc
));
465 static int do_syscall(CPUState
*env
,
466 struct kqemu_cpu_state
*kenv
)
470 selector
= (env
->star
>> 32) & 0xffff;
472 if (env
->hflags
& HF_LMA_MASK
) {
473 env
->regs
[R_ECX
] = kenv
->next_eip
;
474 env
->regs
[11] = env
->eflags
;
476 cpu_x86_set_cpl(env
, 0);
477 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
479 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
481 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
482 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
484 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
486 DESC_W_MASK
| DESC_A_MASK
);
487 env
->eflags
&= ~env
->fmask
;
488 if (env
->hflags
& HF_CS64_MASK
)
489 env
->eip
= env
->lstar
;
491 env
->eip
= env
->cstar
;
495 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
497 cpu_x86_set_cpl(env
, 0);
498 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
500 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
502 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
503 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
505 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
507 DESC_W_MASK
| DESC_A_MASK
);
508 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
509 env
->eip
= (uint32_t)env
->star
;
514 #ifdef CONFIG_PROFILER
516 #define PC_REC_SIZE 1
517 #define PC_REC_HASH_BITS 16
518 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
520 typedef struct PCRecord
{
523 struct PCRecord
*next
;
526 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
527 static int nb_pc_records
;
529 static void kqemu_record_pc(unsigned long pc
)
534 h
= pc
/ PC_REC_SIZE
;
535 h
= h
^ (h
>> PC_REC_HASH_BITS
);
536 h
&= (PC_REC_HASH_SIZE
- 1);
537 pr
= &pc_rec_hash
[h
];
548 r
= malloc(sizeof(PCRecord
));
556 static int pc_rec_cmp(const void *p1
, const void *p2
)
558 PCRecord
*r1
= *(PCRecord
**)p1
;
559 PCRecord
*r2
= *(PCRecord
**)p2
;
560 if (r1
->count
< r2
->count
)
562 else if (r1
->count
== r2
->count
)
568 static void kqemu_record_flush(void)
570 PCRecord
*r
, *r_next
;
573 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
574 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
578 pc_rec_hash
[h
] = NULL
;
583 void kqemu_record_dump(void)
590 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
593 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
594 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
599 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
601 f
= fopen("/tmp/kqemu.stats", "w");
603 perror("/tmp/kqemu.stats");
606 fprintf(f
, "total: %" PRId64
"\n", total
);
608 for(i
= 0; i
< nb_pc_records
; i
++) {
611 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
614 (double)r
->count
/ (double)total
* 100.0,
615 (double)sum
/ (double)total
* 100.0);
620 kqemu_record_flush();
624 int kqemu_cpu_exec(CPUState
*env
)
626 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
628 #ifdef CONFIG_PROFILER
636 #ifdef CONFIG_PROFILER
637 ti
= profile_getclock();
640 if (loglevel
& CPU_LOG_INT
) {
641 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
642 cpu_dump_state(env
, logfile
, fprintf
, 0);
645 memcpy(kenv
->regs
, env
->regs
, sizeof(kenv
->regs
));
646 kenv
->eip
= env
->eip
;
647 kenv
->eflags
= env
->eflags
;
648 memcpy(&kenv
->segs
, &env
->segs
, sizeof(env
->segs
));
649 memcpy(&kenv
->ldt
, &env
->ldt
, sizeof(env
->ldt
));
650 memcpy(&kenv
->tr
, &env
->tr
, sizeof(env
->tr
));
651 memcpy(&kenv
->gdt
, &env
->gdt
, sizeof(env
->gdt
));
652 memcpy(&kenv
->idt
, &env
->idt
, sizeof(env
->idt
));
653 kenv
->cr0
= env
->cr
[0];
654 kenv
->cr2
= env
->cr
[2];
655 kenv
->cr3
= env
->cr
[3];
656 kenv
->cr4
= env
->cr
[4];
657 kenv
->a20_mask
= env
->a20_mask
;
658 #if KQEMU_VERSION >= 0x010100
659 kenv
->efer
= env
->efer
;
661 #if KQEMU_VERSION >= 0x010300
662 kenv
->tsc_offset
= 0;
663 kenv
->star
= env
->star
;
664 kenv
->sysenter_cs
= env
->sysenter_cs
;
665 kenv
->sysenter_esp
= env
->sysenter_esp
;
666 kenv
->sysenter_eip
= env
->sysenter_eip
;
668 kenv
->lstar
= env
->lstar
;
669 kenv
->cstar
= env
->cstar
;
670 kenv
->fmask
= env
->fmask
;
671 kenv
->kernelgsbase
= env
->kernelgsbase
;
674 if (env
->dr
[7] & 0xff) {
675 kenv
->dr7
= env
->dr
[7];
676 kenv
->dr0
= env
->dr
[0];
677 kenv
->dr1
= env
->dr
[1];
678 kenv
->dr2
= env
->dr
[2];
679 kenv
->dr3
= env
->dr
[3];
683 kenv
->dr6
= env
->dr
[6];
684 cpl
= (env
->hflags
& HF_CPL_MASK
);
686 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
687 #if KQEMU_VERSION >= 0x010200
688 kenv
->user_only
= (env
->kqemu_enabled
== 1);
689 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
691 nb_ram_pages_to_update
= 0;
693 #if KQEMU_VERSION >= 0x010300
694 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
696 kqemu_reset_modified_ram_pages();
698 if (env
->cpuid_features
& CPUID_FXSR
)
699 restore_native_fp_fxrstor(env
);
701 restore_native_fp_frstor(env
);
704 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
705 kenv
, sizeof(struct kqemu_cpu_state
),
706 kenv
, sizeof(struct kqemu_cpu_state
),
713 #if KQEMU_VERSION >= 0x010100
714 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
717 ret
= ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
720 if (env
->cpuid_features
& CPUID_FXSR
)
721 save_native_fp_fxsave(env
);
723 save_native_fp_fsave(env
);
725 memcpy(env
->regs
, kenv
->regs
, sizeof(env
->regs
));
726 env
->eip
= kenv
->eip
;
727 env
->eflags
= kenv
->eflags
;
728 memcpy(env
->segs
, kenv
->segs
, sizeof(env
->segs
));
729 cpu_x86_set_cpl(env
, kenv
->cpl
);
730 memcpy(&env
->ldt
, &kenv
->ldt
, sizeof(env
->ldt
));
732 /* no need to restore that */
733 memcpy(env
->tr
, kenv
->tr
, sizeof(env
->tr
));
734 memcpy(env
->gdt
, kenv
->gdt
, sizeof(env
->gdt
));
735 memcpy(env
->idt
, kenv
->idt
, sizeof(env
->idt
));
736 env
->a20_mask
= kenv
->a20_mask
;
738 env
->cr
[0] = kenv
->cr0
;
739 env
->cr
[4] = kenv
->cr4
;
740 env
->cr
[3] = kenv
->cr3
;
741 env
->cr
[2] = kenv
->cr2
;
742 env
->dr
[6] = kenv
->dr6
;
743 #if KQEMU_VERSION >= 0x010300
745 env
->kernelgsbase
= kenv
->kernelgsbase
;
749 /* flush pages as indicated by kqemu */
750 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
753 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
754 tlb_flush_page(env
, pages_to_flush
[i
]);
757 nb_pages_to_flush
= 0;
759 #ifdef CONFIG_PROFILER
760 kqemu_time
+= profile_getclock() - ti
;
764 #if KQEMU_VERSION >= 0x010200
765 if (kenv
->nb_ram_pages_to_update
> 0) {
766 cpu_tlb_update_dirty(env
);
770 #if KQEMU_VERSION >= 0x010300
771 if (kenv
->nb_modified_ram_pages
> 0) {
772 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
774 addr
= modified_ram_pages
[i
];
775 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
780 /* restore the hidden flags */
782 unsigned int new_hflags
;
784 if ((env
->hflags
& HF_LMA_MASK
) &&
785 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
787 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
791 /* legacy / compatibility case */
792 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
793 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
794 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
795 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
796 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
797 (env
->eflags
& VM_MASK
) ||
798 !(env
->hflags
& HF_CS32_MASK
)) {
799 /* XXX: try to avoid this test. The problem comes from the
800 fact that is real mode or vm86 mode we only modify the
801 'base' and 'selector' fields of the segment cache to go
802 faster. A solution may be to force addseg to one in
804 new_hflags
|= HF_ADDSEG_MASK
;
806 new_hflags
|= ((env
->segs
[R_DS
].base
|
807 env
->segs
[R_ES
].base
|
808 env
->segs
[R_SS
].base
) != 0) <<
812 env
->hflags
= (env
->hflags
&
813 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
816 /* update FPU flags */
817 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
818 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
819 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
820 env
->hflags
|= HF_OSFXSR_MASK
;
822 env
->hflags
&= ~HF_OSFXSR_MASK
;
825 if (loglevel
& CPU_LOG_INT
) {
826 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
829 if (ret
== KQEMU_RET_SYSCALL
) {
830 /* syscall instruction */
831 return do_syscall(env
, kenv
);
833 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
834 env
->exception_index
= ret
& 0xff;
836 env
->exception_is_int
= 1;
837 env
->exception_next_eip
= kenv
->next_eip
;
838 #ifdef CONFIG_PROFILER
839 kqemu_ret_int_count
++;
842 if (loglevel
& CPU_LOG_INT
) {
843 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
844 env
->exception_index
);
845 cpu_dump_state(env
, logfile
, fprintf
, 0);
849 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
850 env
->exception_index
= ret
& 0xff;
851 env
->error_code
= kenv
->error_code
;
852 env
->exception_is_int
= 0;
853 env
->exception_next_eip
= 0;
854 #ifdef CONFIG_PROFILER
855 kqemu_ret_excp_count
++;
858 if (loglevel
& CPU_LOG_INT
) {
859 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
860 env
->exception_index
, env
->error_code
);
861 cpu_dump_state(env
, logfile
, fprintf
, 0);
865 } else if (ret
== KQEMU_RET_INTR
) {
866 #ifdef CONFIG_PROFILER
867 kqemu_ret_intr_count
++;
870 if (loglevel
& CPU_LOG_INT
) {
871 cpu_dump_state(env
, logfile
, fprintf
, 0);
875 } else if (ret
== KQEMU_RET_SOFTMMU
) {
876 #ifdef CONFIG_PROFILER
878 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
883 if (loglevel
& CPU_LOG_INT
) {
884 cpu_dump_state(env
, logfile
, fprintf
, 0);
889 cpu_dump_state(env
, stderr
, fprintf
, 0);
890 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
896 void kqemu_cpu_interrupt(CPUState
*env
)
898 #if defined(_WIN32) && KQEMU_VERSION >= 0x010101
899 /* cancelling the I/O request causes KQEMU to finish executing the
900 current block and successfully returning. */