4 * Copyright (c) 2005-2008 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #define WIN32_LEAN_AND_MEAN
26 #include <sys/types.h>
28 #include <sys/ioctl.h>
31 #include <sys/ioccom.h>
43 #include "qemu-common.h"
52 # define LOG_INT(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
53 # define LOG_INT_STATE(env) log_cpu_state_mask(CPU_LOG_INT, (env), 0)
55 # define LOG_INT(...) do { } while (0)
56 # define LOG_INT_STATE(env) do { } while (0)
64 #define KQEMU_DEVICE "\\\\.\\kqemu"
66 #define KQEMU_DEVICE "/dev/kqemu"
69 static void qpi_init(void);
72 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
73 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
74 #define kqemu_closefd(x) CloseHandle(x)
76 #define KQEMU_INVALID_FD -1
77 int kqemu_fd
= KQEMU_INVALID_FD
;
78 #define kqemu_closefd(x) close(x)
85 int kqemu_allowed
= 1;
86 uint64_t *pages_to_flush
;
87 unsigned int nb_pages_to_flush
;
88 uint64_t *ram_pages_to_update
;
89 unsigned int nb_ram_pages_to_update
;
90 uint64_t *modified_ram_pages
;
91 unsigned int nb_modified_ram_pages
;
92 uint8_t *modified_ram_pages_table
;
94 uint32_t kqemu_comm_base
; /* physical address of the QPI communication page */
96 #define cpuid(index, eax, ebx, ecx, edx) \
97 asm volatile ("cpuid" \
98 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
102 static int is_cpuid_supported(void)
107 static int is_cpuid_supported(void)
110 asm volatile ("pushf\n"
113 "xorl $0x00200000, %0\n"
118 : "=a" (v0
), "=d" (v1
)
125 static void kqemu_update_cpuid(CPUState
*env
)
127 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
128 uint32_t eax
, ebx
, ecx
, edx
;
130 /* the following features are kept identical on the host and
131 target cpus because they are important for user code. Strictly
132 speaking, only SSE really matters because the OS must support
133 it if the user code uses it. */
134 critical_features_mask
=
135 CPUID_CMOV
| CPUID_CX8
|
136 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
137 CPUID_SSE2
| CPUID_SEP
;
138 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
139 if (!is_cpuid_supported()) {
143 cpuid(1, eax
, ebx
, ecx
, edx
);
148 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
149 compatibility mode, so in order to have the best performances
150 it is better not to use it */
151 features
&= ~CPUID_SEP
;
153 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
154 (features
& critical_features_mask
);
155 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
156 (ext_features
& ext_features_mask
);
157 /* XXX: we could update more of the target CPUID state so that the
158 non accelerated code sees exactly the same CPU features as the
162 int kqemu_init(CPUState
*env
)
164 struct kqemu_init kinit
;
174 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
175 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
176 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
178 if (kqemu_fd
== KQEMU_INVALID_FD
) {
179 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n",
180 KQEMU_DEVICE
, GetLastError());
184 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
185 if (kqemu_fd
== KQEMU_INVALID_FD
) {
186 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
187 KQEMU_DEVICE
, strerror(errno
));
193 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
194 &version
, sizeof(version
), &temp
, NULL
);
196 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
198 if (version
!= KQEMU_VERSION
) {
199 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
200 version
, KQEMU_VERSION
);
204 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
209 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
211 if (!ram_pages_to_update
)
214 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
216 if (!modified_ram_pages
)
218 modified_ram_pages_table
= qemu_mallocz(phys_ram_size
>> TARGET_PAGE_BITS
);
219 if (!modified_ram_pages_table
)
222 memset(&kinit
, 0, sizeof(kinit
)); /* set the paddings to zero */
223 kinit
.ram_base
= phys_ram_base
;
224 kinit
.ram_size
= phys_ram_size
;
225 kinit
.ram_dirty
= phys_ram_dirty
;
226 kinit
.pages_to_flush
= pages_to_flush
;
227 kinit
.ram_pages_to_update
= ram_pages_to_update
;
228 kinit
.modified_ram_pages
= modified_ram_pages
;
230 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &kinit
, sizeof(kinit
),
231 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
233 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &kinit
);
236 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
238 kqemu_closefd(kqemu_fd
);
239 kqemu_fd
= KQEMU_INVALID_FD
;
242 kqemu_update_cpuid(env
);
243 env
->kqemu_enabled
= kqemu_allowed
;
244 nb_pages_to_flush
= 0;
245 nb_ram_pages_to_update
= 0;
251 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
253 LOG_INT("kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
254 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
255 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
257 pages_to_flush
[nb_pages_to_flush
++] = addr
;
260 void kqemu_flush(CPUState
*env
, int global
)
262 LOG_INT("kqemu_flush:\n");
263 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
266 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
268 LOG_INT("kqemu_set_notdirty: addr=%08lx\n",
269 (unsigned long)ram_addr
);
270 /* we only track transitions to dirty state */
271 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
273 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
274 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
276 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
279 static void kqemu_reset_modified_ram_pages(void)
282 unsigned long page_index
;
284 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
285 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
286 modified_ram_pages_table
[page_index
] = 0;
288 nb_modified_ram_pages
= 0;
291 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
293 unsigned long page_index
;
299 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
300 if (!modified_ram_pages_table
[page_index
]) {
302 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
304 modified_ram_pages_table
[page_index
] = 1;
305 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
306 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
309 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
310 &nb_modified_ram_pages
,
311 sizeof(nb_modified_ram_pages
),
312 NULL
, 0, &temp
, NULL
);
314 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
315 &nb_modified_ram_pages
);
317 kqemu_reset_modified_ram_pages();
322 void kqemu_set_phys_mem(uint64_t start_addr
, ram_addr_t size
,
323 ram_addr_t phys_offset
)
325 struct kqemu_phys_mem kphys_mem1
, *kphys_mem
= &kphys_mem1
;
329 end
= (start_addr
+ size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
330 start_addr
&= TARGET_PAGE_MASK
;
331 kphys_mem
->phys_addr
= start_addr
;
332 kphys_mem
->size
= end
- start_addr
;
333 kphys_mem
->ram_addr
= phys_offset
& TARGET_PAGE_MASK
;
334 io_index
= phys_offset
& ~TARGET_PAGE_MASK
;
337 kphys_mem
->io_index
= KQEMU_IO_MEM_RAM
;
340 kphys_mem
->io_index
= KQEMU_IO_MEM_ROM
;
343 if (qpi_io_memory
== io_index
) {
344 kphys_mem
->io_index
= KQEMU_IO_MEM_COMM
;
346 kphys_mem
->io_index
= KQEMU_IO_MEM_UNASSIGNED
;
353 ret
= DeviceIoControl(kqemu_fd
, KQEMU_SET_PHYS_MEM
,
354 kphys_mem
, sizeof(*kphys_mem
),
355 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
358 ret
= ioctl(kqemu_fd
, KQEMU_SET_PHYS_MEM
, kphys_mem
);
361 fprintf(stderr
, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64
" size=0x%08lx phys_offset=0x%08lx\n",
363 (unsigned long)size
, (unsigned long)phys_offset
);
379 uint8_t fpregs1
[8 * 10];
395 uint8_t fpregs1
[8 * 16];
396 uint8_t xmm_regs
[16 * 16];
400 static struct fpxstate fpx1
__attribute__((aligned(16)));
402 static void restore_native_fp_frstor(CPUState
*env
)
405 struct fpstate fp1
, *fp
= &fp1
;
407 fp
->fpuc
= env
->fpuc
;
408 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
410 for (i
=7; i
>=0; i
--) {
412 if (env
->fptags
[i
]) {
415 /* the FPU automatically computes it */
420 for(i
= 0;i
< 8; i
++) {
421 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
424 asm volatile ("frstor %0" : "=m" (*fp
));
427 static void save_native_fp_fsave(CPUState
*env
)
431 struct fpstate fp1
, *fp
= &fp1
;
433 asm volatile ("fsave %0" : : "m" (*fp
));
434 env
->fpuc
= fp
->fpuc
;
435 env
->fpstt
= (fp
->fpus
>> 11) & 7;
436 env
->fpus
= fp
->fpus
& ~0x3800;
438 for(i
= 0;i
< 8; i
++) {
439 env
->fptags
[i
] = ((fptag
& 3) == 3);
443 for(i
= 0;i
< 8; i
++) {
444 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
447 /* we must restore the default rounding state */
448 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
449 asm volatile("fldcw %0" : : "m" (fpuc
));
452 static void restore_native_fp_fxrstor(CPUState
*env
)
454 struct fpxstate
*fp
= &fpx1
;
457 fp
->fpuc
= env
->fpuc
;
458 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
460 for(i
= 0; i
< 8; i
++)
461 fptag
|= (env
->fptags
[i
] << i
);
462 fp
->fptag
= fptag
^ 0xff;
465 for(i
= 0;i
< 8; i
++) {
466 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
469 if (env
->cpuid_features
& CPUID_SSE
) {
470 fp
->mxcsr
= env
->mxcsr
;
471 /* XXX: check if DAZ is not available */
472 fp
->mxcsr_mask
= 0xffff;
473 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
475 asm volatile ("fxrstor %0" : "=m" (*fp
));
478 static void save_native_fp_fxsave(CPUState
*env
)
480 struct fpxstate
*fp
= &fpx1
;
484 asm volatile ("fxsave %0" : : "m" (*fp
));
485 env
->fpuc
= fp
->fpuc
;
486 env
->fpstt
= (fp
->fpus
>> 11) & 7;
487 env
->fpus
= fp
->fpus
& ~0x3800;
488 fptag
= fp
->fptag
^ 0xff;
489 for(i
= 0;i
< 8; i
++) {
490 env
->fptags
[i
] = (fptag
>> i
) & 1;
493 for(i
= 0;i
< 8; i
++) {
494 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
497 if (env
->cpuid_features
& CPUID_SSE
) {
498 env
->mxcsr
= fp
->mxcsr
;
499 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
502 /* we must restore the default rounding state */
503 asm volatile ("fninit");
504 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
505 asm volatile("fldcw %0" : : "m" (fpuc
));
508 static int do_syscall(CPUState
*env
,
509 struct kqemu_cpu_state
*kenv
)
513 selector
= (env
->star
>> 32) & 0xffff;
515 if (env
->hflags
& HF_LMA_MASK
) {
518 env
->regs
[R_ECX
] = kenv
->next_eip
;
519 env
->regs
[11] = env
->eflags
;
521 code64
= env
->hflags
& HF_CS64_MASK
;
523 cpu_x86_set_cpl(env
, 0);
524 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
526 DESC_G_MASK
| DESC_P_MASK
|
528 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
529 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
531 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
533 DESC_W_MASK
| DESC_A_MASK
);
534 env
->eflags
&= ~env
->fmask
;
536 env
->eip
= env
->lstar
;
538 env
->eip
= env
->cstar
;
542 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
544 cpu_x86_set_cpl(env
, 0);
545 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
547 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
549 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
550 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
552 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
554 DESC_W_MASK
| DESC_A_MASK
);
555 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
556 env
->eip
= (uint32_t)env
->star
;
561 #ifdef CONFIG_PROFILER
563 #define PC_REC_SIZE 1
564 #define PC_REC_HASH_BITS 16
565 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
567 typedef struct PCRecord
{
570 struct PCRecord
*next
;
573 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
574 static int nb_pc_records
;
576 static void kqemu_record_pc(unsigned long pc
)
581 h
= pc
/ PC_REC_SIZE
;
582 h
= h
^ (h
>> PC_REC_HASH_BITS
);
583 h
&= (PC_REC_HASH_SIZE
- 1);
584 pr
= &pc_rec_hash
[h
];
595 r
= malloc(sizeof(PCRecord
));
603 static int pc_rec_cmp(const void *p1
, const void *p2
)
605 PCRecord
*r1
= *(PCRecord
**)p1
;
606 PCRecord
*r2
= *(PCRecord
**)p2
;
607 if (r1
->count
< r2
->count
)
609 else if (r1
->count
== r2
->count
)
615 static void kqemu_record_flush(void)
617 PCRecord
*r
, *r_next
;
620 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
621 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
625 pc_rec_hash
[h
] = NULL
;
630 void kqemu_record_dump(void)
637 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
640 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
641 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
646 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
648 f
= fopen("/tmp/kqemu.stats", "w");
650 perror("/tmp/kqemu.stats");
653 fprintf(f
, "total: %" PRId64
"\n", total
);
655 for(i
= 0; i
< nb_pc_records
; i
++) {
658 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
661 (double)r
->count
/ (double)total
* 100.0,
662 (double)sum
/ (double)total
* 100.0);
667 kqemu_record_flush();
671 static inline void kqemu_load_seg(struct kqemu_segment_cache
*ksc
,
672 const SegmentCache
*sc
)
674 ksc
->selector
= sc
->selector
;
675 ksc
->flags
= sc
->flags
;
676 ksc
->limit
= sc
->limit
;
677 ksc
->base
= sc
->base
;
680 static inline void kqemu_save_seg(SegmentCache
*sc
,
681 const struct kqemu_segment_cache
*ksc
)
683 sc
->selector
= ksc
->selector
;
684 sc
->flags
= ksc
->flags
;
685 sc
->limit
= ksc
->limit
;
686 sc
->base
= ksc
->base
;
689 int kqemu_cpu_exec(CPUState
*env
)
691 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
693 #ifdef CONFIG_PROFILER
700 #ifdef CONFIG_PROFILER
701 ti
= profile_getclock();
703 LOG_INT("kqemu: cpu_exec: enter\n");
705 for(i
= 0; i
< CPU_NB_REGS
; i
++)
706 kenv
->regs
[i
] = env
->regs
[i
];
707 kenv
->eip
= env
->eip
;
708 kenv
->eflags
= env
->eflags
;
709 for(i
= 0; i
< 6; i
++)
710 kqemu_load_seg(&kenv
->segs
[i
], &env
->segs
[i
]);
711 kqemu_load_seg(&kenv
->ldt
, &env
->ldt
);
712 kqemu_load_seg(&kenv
->tr
, &env
->tr
);
713 kqemu_load_seg(&kenv
->gdt
, &env
->gdt
);
714 kqemu_load_seg(&kenv
->idt
, &env
->idt
);
715 kenv
->cr0
= env
->cr
[0];
716 kenv
->cr2
= env
->cr
[2];
717 kenv
->cr3
= env
->cr
[3];
718 kenv
->cr4
= env
->cr
[4];
719 kenv
->a20_mask
= env
->a20_mask
;
720 kenv
->efer
= env
->efer
;
721 kenv
->tsc_offset
= 0;
722 kenv
->star
= env
->star
;
723 kenv
->sysenter_cs
= env
->sysenter_cs
;
724 kenv
->sysenter_esp
= env
->sysenter_esp
;
725 kenv
->sysenter_eip
= env
->sysenter_eip
;
727 kenv
->lstar
= env
->lstar
;
728 kenv
->cstar
= env
->cstar
;
729 kenv
->fmask
= env
->fmask
;
730 kenv
->kernelgsbase
= env
->kernelgsbase
;
732 if (env
->dr
[7] & 0xff) {
733 kenv
->dr7
= env
->dr
[7];
734 kenv
->dr0
= env
->dr
[0];
735 kenv
->dr1
= env
->dr
[1];
736 kenv
->dr2
= env
->dr
[2];
737 kenv
->dr3
= env
->dr
[3];
741 kenv
->dr6
= env
->dr
[6];
742 cpl
= (env
->hflags
& HF_CPL_MASK
);
744 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
745 kenv
->user_only
= (env
->kqemu_enabled
== 1);
746 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
747 nb_ram_pages_to_update
= 0;
748 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
750 kqemu_reset_modified_ram_pages();
752 if (env
->cpuid_features
& CPUID_FXSR
)
753 restore_native_fp_fxrstor(env
);
755 restore_native_fp_frstor(env
);
758 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
759 kenv
, sizeof(struct kqemu_cpu_state
),
760 kenv
, sizeof(struct kqemu_cpu_state
),
767 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
770 if (env
->cpuid_features
& CPUID_FXSR
)
771 save_native_fp_fxsave(env
);
773 save_native_fp_fsave(env
);
775 for(i
= 0; i
< CPU_NB_REGS
; i
++)
776 env
->regs
[i
] = kenv
->regs
[i
];
777 env
->eip
= kenv
->eip
;
778 env
->eflags
= kenv
->eflags
;
779 for(i
= 0; i
< 6; i
++)
780 kqemu_save_seg(&env
->segs
[i
], &kenv
->segs
[i
]);
781 cpu_x86_set_cpl(env
, kenv
->cpl
);
782 kqemu_save_seg(&env
->ldt
, &kenv
->ldt
);
783 env
->cr
[0] = kenv
->cr0
;
784 env
->cr
[4] = kenv
->cr4
;
785 env
->cr
[3] = kenv
->cr3
;
786 env
->cr
[2] = kenv
->cr2
;
787 env
->dr
[6] = kenv
->dr6
;
789 env
->kernelgsbase
= kenv
->kernelgsbase
;
792 /* flush pages as indicated by kqemu */
793 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
796 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
797 tlb_flush_page(env
, pages_to_flush
[i
]);
800 nb_pages_to_flush
= 0;
802 #ifdef CONFIG_PROFILER
803 kqemu_time
+= profile_getclock() - ti
;
807 if (kenv
->nb_ram_pages_to_update
> 0) {
808 cpu_tlb_update_dirty(env
);
811 if (kenv
->nb_modified_ram_pages
> 0) {
812 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
814 addr
= modified_ram_pages
[i
];
815 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
819 /* restore the hidden flags */
821 unsigned int new_hflags
;
823 if ((env
->hflags
& HF_LMA_MASK
) &&
824 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
826 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
830 /* legacy / compatibility case */
831 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
832 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
833 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
834 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
835 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
836 (env
->eflags
& VM_MASK
) ||
837 !(env
->hflags
& HF_CS32_MASK
)) {
838 /* XXX: try to avoid this test. The problem comes from the
839 fact that is real mode or vm86 mode we only modify the
840 'base' and 'selector' fields of the segment cache to go
841 faster. A solution may be to force addseg to one in
843 new_hflags
|= HF_ADDSEG_MASK
;
845 new_hflags
|= ((env
->segs
[R_DS
].base
|
846 env
->segs
[R_ES
].base
|
847 env
->segs
[R_SS
].base
) != 0) <<
851 env
->hflags
= (env
->hflags
&
852 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
855 /* update FPU flags */
856 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
857 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
858 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
859 env
->hflags
|= HF_OSFXSR_MASK
;
861 env
->hflags
&= ~HF_OSFXSR_MASK
;
863 LOG_INT("kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
864 if (ret
== KQEMU_RET_SYSCALL
) {
865 /* syscall instruction */
866 return do_syscall(env
, kenv
);
868 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
869 env
->exception_index
= ret
& 0xff;
871 env
->exception_is_int
= 1;
872 env
->exception_next_eip
= kenv
->next_eip
;
873 #ifdef CONFIG_PROFILER
874 kqemu_ret_int_count
++;
876 LOG_INT("kqemu: interrupt v=%02x:\n", env
->exception_index
);
879 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
880 env
->exception_index
= ret
& 0xff;
881 env
->error_code
= kenv
->error_code
;
882 env
->exception_is_int
= 0;
883 env
->exception_next_eip
= 0;
884 #ifdef CONFIG_PROFILER
885 kqemu_ret_excp_count
++;
887 LOG_INT("kqemu: exception v=%02x e=%04x:\n",
888 env
->exception_index
, env
->error_code
);
891 } else if (ret
== KQEMU_RET_INTR
) {
892 #ifdef CONFIG_PROFILER
893 kqemu_ret_intr_count
++;
897 } else if (ret
== KQEMU_RET_SOFTMMU
) {
898 #ifdef CONFIG_PROFILER
900 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
907 cpu_dump_state(env
, stderr
, fprintf
, 0);
908 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
914 void kqemu_cpu_interrupt(CPUState
*env
)
917 /* cancelling the I/O request causes KQEMU to finish executing the
918 current block and successfully returning. */
924 QEMU paravirtualization interface. The current interface only
925 allows to modify the IF and IOPL flags when running in
928 At this point it is not very satisfactory. I leave it for reference
929 as it adds little complexity.
932 #define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
934 static uint32_t qpi_mem_readb(void *opaque
, target_phys_addr_t addr
)
939 static uint32_t qpi_mem_readw(void *opaque
, target_phys_addr_t addr
)
944 static void qpi_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
948 static void qpi_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
952 static uint32_t qpi_mem_readl(void *opaque
, target_phys_addr_t addr
)
956 env
= cpu_single_env
;
959 return env
->eflags
& (IF_MASK
| IOPL_MASK
);
962 /* Note: after writing to this address, the guest code must make sure
963 it is exiting the current TB. pushf/popf can be used for that
965 static void qpi_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
969 env
= cpu_single_env
;
972 env
->eflags
= (env
->eflags
& ~(IF_MASK
| IOPL_MASK
)) |
973 (val
& (IF_MASK
| IOPL_MASK
));
976 static CPUReadMemoryFunc
*qpi_mem_read
[3] = {
982 static CPUWriteMemoryFunc
*qpi_mem_write
[3] = {
988 static void qpi_init(void)
990 kqemu_comm_base
= 0xff000000 | 1;
991 qpi_io_memory
= cpu_register_io_memory(0,
993 qpi_mem_write
, NULL
);
994 cpu_register_physical_memory(kqemu_comm_base
& ~0xfff,
995 0x1000, qpi_io_memory
);