4 * Copyright (c) 2005-2008 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
26 #include <sys/types.h>
28 #include <sys/ioctl.h>
31 #include <sys/ioccom.h>
43 #include "qemu-common.h"
55 #define KQEMU_DEVICE "\\\\.\\kqemu"
57 #define KQEMU_DEVICE "/dev/kqemu"
60 static void qpi_init(void);
63 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
64 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
65 #define kqemu_closefd(x) CloseHandle(x)
67 #define KQEMU_INVALID_FD -1
68 int kqemu_fd
= KQEMU_INVALID_FD
;
69 #define kqemu_closefd(x) close(x)
76 int kqemu_allowed
= 1;
77 uint64_t *pages_to_flush
;
78 unsigned int nb_pages_to_flush
;
79 uint64_t *ram_pages_to_update
;
80 unsigned int nb_ram_pages_to_update
;
81 uint64_t *modified_ram_pages
;
82 unsigned int nb_modified_ram_pages
;
83 uint8_t *modified_ram_pages_table
;
85 uint32_t kqemu_comm_base
; /* physical address of the QPI communication page */
87 #define cpuid(index, eax, ebx, ecx, edx) \
88 asm volatile ("cpuid" \
89 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
93 static int is_cpuid_supported(void)
98 static int is_cpuid_supported(void)
101 asm volatile ("pushf\n"
104 "xorl $0x00200000, %0\n"
109 : "=a" (v0
), "=d" (v1
)
116 static void kqemu_update_cpuid(CPUState
*env
)
118 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
119 uint32_t eax
, ebx
, ecx
, edx
;
121 /* the following features are kept identical on the host and
122 target cpus because they are important for user code. Strictly
123 speaking, only SSE really matters because the OS must support
124 it if the user code uses it. */
125 critical_features_mask
=
126 CPUID_CMOV
| CPUID_CX8
|
127 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
128 CPUID_SSE2
| CPUID_SEP
;
129 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
130 if (!is_cpuid_supported()) {
134 cpuid(1, eax
, ebx
, ecx
, edx
);
139 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
140 compatibility mode, so in order to have the best performances
141 it is better not to use it */
142 features
&= ~CPUID_SEP
;
144 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
145 (features
& critical_features_mask
);
146 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
147 (ext_features
& ext_features_mask
);
148 /* XXX: we could update more of the target CPUID state so that the
149 non accelerated code sees exactly the same CPU features as the
153 int kqemu_init(CPUState
*env
)
155 struct kqemu_init kinit
;
165 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
166 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
167 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
170 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
172 if (kqemu_fd
== KQEMU_INVALID_FD
) {
173 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
174 KQEMU_DEVICE
, strerror(errno
));
179 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
180 &version
, sizeof(version
), &temp
, NULL
);
182 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
184 if (version
!= KQEMU_VERSION
) {
185 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
186 version
, KQEMU_VERSION
);
190 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
195 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
197 if (!ram_pages_to_update
)
200 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
202 if (!modified_ram_pages
)
204 modified_ram_pages_table
= qemu_mallocz(phys_ram_size
>> TARGET_PAGE_BITS
);
205 if (!modified_ram_pages_table
)
208 memset(&kinit
, 0, sizeof(kinit
)); /* set the paddings to zero */
209 kinit
.ram_base
= phys_ram_base
;
210 kinit
.ram_size
= phys_ram_size
;
211 kinit
.ram_dirty
= phys_ram_dirty
;
212 kinit
.pages_to_flush
= pages_to_flush
;
213 kinit
.ram_pages_to_update
= ram_pages_to_update
;
214 kinit
.modified_ram_pages
= modified_ram_pages
;
216 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &kinit
, sizeof(kinit
),
217 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
219 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &kinit
);
222 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
224 kqemu_closefd(kqemu_fd
);
225 kqemu_fd
= KQEMU_INVALID_FD
;
228 kqemu_update_cpuid(env
);
229 env
->kqemu_enabled
= kqemu_allowed
;
230 nb_pages_to_flush
= 0;
231 nb_ram_pages_to_update
= 0;
237 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
240 if (loglevel
& CPU_LOG_INT
) {
241 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
244 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
245 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
247 pages_to_flush
[nb_pages_to_flush
++] = addr
;
250 void kqemu_flush(CPUState
*env
, int global
)
253 if (loglevel
& CPU_LOG_INT
) {
254 fprintf(logfile
, "kqemu_flush:\n");
257 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
260 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
263 if (loglevel
& CPU_LOG_INT
) {
264 fprintf(logfile
, "kqemu_set_notdirty: addr=%08lx\n",
265 (unsigned long)ram_addr
);
268 /* we only track transitions to dirty state */
269 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
271 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
272 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
274 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
277 static void kqemu_reset_modified_ram_pages(void)
280 unsigned long page_index
;
282 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
283 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
284 modified_ram_pages_table
[page_index
] = 0;
286 nb_modified_ram_pages
= 0;
289 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
291 unsigned long page_index
;
297 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
298 if (!modified_ram_pages_table
[page_index
]) {
300 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
302 modified_ram_pages_table
[page_index
] = 1;
303 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
304 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
307 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
308 &nb_modified_ram_pages
,
309 sizeof(nb_modified_ram_pages
),
310 NULL
, 0, &temp
, NULL
);
312 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
313 &nb_modified_ram_pages
);
315 kqemu_reset_modified_ram_pages();
320 void kqemu_set_phys_mem(uint64_t start_addr
, ram_addr_t size
,
321 ram_addr_t phys_offset
)
323 struct kqemu_phys_mem kphys_mem1
, *kphys_mem
= &kphys_mem1
;
327 end
= (start_addr
+ size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
328 start_addr
&= TARGET_PAGE_MASK
;
329 kphys_mem
->phys_addr
= start_addr
;
330 kphys_mem
->size
= end
- start_addr
;
331 kphys_mem
->ram_addr
= phys_offset
& TARGET_PAGE_MASK
;
332 io_index
= phys_offset
& ~TARGET_PAGE_MASK
;
335 kphys_mem
->io_index
= KQEMU_IO_MEM_RAM
;
338 kphys_mem
->io_index
= KQEMU_IO_MEM_ROM
;
341 if (qpi_io_memory
== io_index
) {
342 kphys_mem
->io_index
= KQEMU_IO_MEM_COMM
;
344 kphys_mem
->io_index
= KQEMU_IO_MEM_UNASSIGNED
;
351 ret
= DeviceIoControl(kqemu_fd
, KQEMU_SET_PHYS_MEM
,
352 kphys_mem
, sizeof(*kphys_mem
),
353 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
356 ret
= ioctl(kqemu_fd
, KQEMU_SET_PHYS_MEM
, kphys_mem
);
359 fprintf(stderr
, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64
" size=0x%08lx phys_offset=0x%08lx\n",
361 (unsigned long)size
, (unsigned long)phys_offset
);
377 uint8_t fpregs1
[8 * 10];
393 uint8_t fpregs1
[8 * 16];
394 uint8_t xmm_regs
[16 * 16];
398 static struct fpxstate fpx1
__attribute__((aligned(16)));
400 static void restore_native_fp_frstor(CPUState
*env
)
403 struct fpstate fp1
, *fp
= &fp1
;
405 fp
->fpuc
= env
->fpuc
;
406 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
408 for (i
=7; i
>=0; i
--) {
410 if (env
->fptags
[i
]) {
413 /* the FPU automatically computes it */
418 for(i
= 0;i
< 8; i
++) {
419 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
422 asm volatile ("frstor %0" : "=m" (*fp
));
425 static void save_native_fp_fsave(CPUState
*env
)
429 struct fpstate fp1
, *fp
= &fp1
;
431 asm volatile ("fsave %0" : : "m" (*fp
));
432 env
->fpuc
= fp
->fpuc
;
433 env
->fpstt
= (fp
->fpus
>> 11) & 7;
434 env
->fpus
= fp
->fpus
& ~0x3800;
436 for(i
= 0;i
< 8; i
++) {
437 env
->fptags
[i
] = ((fptag
& 3) == 3);
441 for(i
= 0;i
< 8; i
++) {
442 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
445 /* we must restore the default rounding state */
446 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
447 asm volatile("fldcw %0" : : "m" (fpuc
));
450 static void restore_native_fp_fxrstor(CPUState
*env
)
452 struct fpxstate
*fp
= &fpx1
;
455 fp
->fpuc
= env
->fpuc
;
456 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
458 for(i
= 0; i
< 8; i
++)
459 fptag
|= (env
->fptags
[i
] << i
);
460 fp
->fptag
= fptag
^ 0xff;
463 for(i
= 0;i
< 8; i
++) {
464 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
467 if (env
->cpuid_features
& CPUID_SSE
) {
468 fp
->mxcsr
= env
->mxcsr
;
469 /* XXX: check if DAZ is not available */
470 fp
->mxcsr_mask
= 0xffff;
471 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
473 asm volatile ("fxrstor %0" : "=m" (*fp
));
476 static void save_native_fp_fxsave(CPUState
*env
)
478 struct fpxstate
*fp
= &fpx1
;
482 asm volatile ("fxsave %0" : : "m" (*fp
));
483 env
->fpuc
= fp
->fpuc
;
484 env
->fpstt
= (fp
->fpus
>> 11) & 7;
485 env
->fpus
= fp
->fpus
& ~0x3800;
486 fptag
= fp
->fptag
^ 0xff;
487 for(i
= 0;i
< 8; i
++) {
488 env
->fptags
[i
] = (fptag
>> i
) & 1;
491 for(i
= 0;i
< 8; i
++) {
492 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
495 if (env
->cpuid_features
& CPUID_SSE
) {
496 env
->mxcsr
= fp
->mxcsr
;
497 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
500 /* we must restore the default rounding state */
501 asm volatile ("fninit");
502 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
503 asm volatile("fldcw %0" : : "m" (fpuc
));
506 static int do_syscall(CPUState
*env
,
507 struct kqemu_cpu_state
*kenv
)
511 selector
= (env
->star
>> 32) & 0xffff;
513 if (env
->hflags
& HF_LMA_MASK
) {
516 env
->regs
[R_ECX
] = kenv
->next_eip
;
517 env
->regs
[11] = env
->eflags
;
519 code64
= env
->hflags
& HF_CS64_MASK
;
521 cpu_x86_set_cpl(env
, 0);
522 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
524 DESC_G_MASK
| DESC_P_MASK
|
526 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
527 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
529 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
531 DESC_W_MASK
| DESC_A_MASK
);
532 env
->eflags
&= ~env
->fmask
;
534 env
->eip
= env
->lstar
;
536 env
->eip
= env
->cstar
;
540 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
542 cpu_x86_set_cpl(env
, 0);
543 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
545 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
547 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
548 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
550 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
552 DESC_W_MASK
| DESC_A_MASK
);
553 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
554 env
->eip
= (uint32_t)env
->star
;
559 #ifdef CONFIG_PROFILER
561 #define PC_REC_SIZE 1
562 #define PC_REC_HASH_BITS 16
563 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
565 typedef struct PCRecord
{
568 struct PCRecord
*next
;
571 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
572 static int nb_pc_records
;
574 static void kqemu_record_pc(unsigned long pc
)
579 h
= pc
/ PC_REC_SIZE
;
580 h
= h
^ (h
>> PC_REC_HASH_BITS
);
581 h
&= (PC_REC_HASH_SIZE
- 1);
582 pr
= &pc_rec_hash
[h
];
593 r
= malloc(sizeof(PCRecord
));
601 static int pc_rec_cmp(const void *p1
, const void *p2
)
603 PCRecord
*r1
= *(PCRecord
**)p1
;
604 PCRecord
*r2
= *(PCRecord
**)p2
;
605 if (r1
->count
< r2
->count
)
607 else if (r1
->count
== r2
->count
)
613 static void kqemu_record_flush(void)
615 PCRecord
*r
, *r_next
;
618 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
619 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
623 pc_rec_hash
[h
] = NULL
;
628 void kqemu_record_dump(void)
635 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
638 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
639 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
644 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
646 f
= fopen("/tmp/kqemu.stats", "w");
648 perror("/tmp/kqemu.stats");
651 fprintf(f
, "total: %" PRId64
"\n", total
);
653 for(i
= 0; i
< nb_pc_records
; i
++) {
656 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
659 (double)r
->count
/ (double)total
* 100.0,
660 (double)sum
/ (double)total
* 100.0);
665 kqemu_record_flush();
669 static inline void kqemu_load_seg(struct kqemu_segment_cache
*ksc
,
670 const SegmentCache
*sc
)
672 ksc
->selector
= sc
->selector
;
673 ksc
->flags
= sc
->flags
;
674 ksc
->limit
= sc
->limit
;
675 ksc
->base
= sc
->base
;
678 static inline void kqemu_save_seg(SegmentCache
*sc
,
679 const struct kqemu_segment_cache
*ksc
)
681 sc
->selector
= ksc
->selector
;
682 sc
->flags
= ksc
->flags
;
683 sc
->limit
= ksc
->limit
;
684 sc
->base
= ksc
->base
;
687 int kqemu_cpu_exec(CPUState
*env
)
689 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
691 #ifdef CONFIG_PROFILER
698 #ifdef CONFIG_PROFILER
699 ti
= profile_getclock();
702 if (loglevel
& CPU_LOG_INT
) {
703 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
704 cpu_dump_state(env
, logfile
, fprintf
, 0);
707 for(i
= 0; i
< CPU_NB_REGS
; i
++)
708 kenv
->regs
[i
] = env
->regs
[i
];
709 kenv
->eip
= env
->eip
;
710 kenv
->eflags
= env
->eflags
;
711 for(i
= 0; i
< 6; i
++)
712 kqemu_load_seg(&kenv
->segs
[i
], &env
->segs
[i
]);
713 kqemu_load_seg(&kenv
->ldt
, &env
->ldt
);
714 kqemu_load_seg(&kenv
->tr
, &env
->tr
);
715 kqemu_load_seg(&kenv
->gdt
, &env
->gdt
);
716 kqemu_load_seg(&kenv
->idt
, &env
->idt
);
717 kenv
->cr0
= env
->cr
[0];
718 kenv
->cr2
= env
->cr
[2];
719 kenv
->cr3
= env
->cr
[3];
720 kenv
->cr4
= env
->cr
[4];
721 kenv
->a20_mask
= env
->a20_mask
;
722 kenv
->efer
= env
->efer
;
723 kenv
->tsc_offset
= 0;
724 kenv
->star
= env
->star
;
725 kenv
->sysenter_cs
= env
->sysenter_cs
;
726 kenv
->sysenter_esp
= env
->sysenter_esp
;
727 kenv
->sysenter_eip
= env
->sysenter_eip
;
729 kenv
->lstar
= env
->lstar
;
730 kenv
->cstar
= env
->cstar
;
731 kenv
->fmask
= env
->fmask
;
732 kenv
->kernelgsbase
= env
->kernelgsbase
;
734 if (env
->dr
[7] & 0xff) {
735 kenv
->dr7
= env
->dr
[7];
736 kenv
->dr0
= env
->dr
[0];
737 kenv
->dr1
= env
->dr
[1];
738 kenv
->dr2
= env
->dr
[2];
739 kenv
->dr3
= env
->dr
[3];
743 kenv
->dr6
= env
->dr
[6];
744 cpl
= (env
->hflags
& HF_CPL_MASK
);
746 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
747 kenv
->user_only
= (env
->kqemu_enabled
== 1);
748 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
749 nb_ram_pages_to_update
= 0;
750 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
752 kqemu_reset_modified_ram_pages();
754 if (env
->cpuid_features
& CPUID_FXSR
)
755 restore_native_fp_fxrstor(env
);
757 restore_native_fp_frstor(env
);
760 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
761 kenv
, sizeof(struct kqemu_cpu_state
),
762 kenv
, sizeof(struct kqemu_cpu_state
),
769 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
772 if (env
->cpuid_features
& CPUID_FXSR
)
773 save_native_fp_fxsave(env
);
775 save_native_fp_fsave(env
);
777 for(i
= 0; i
< CPU_NB_REGS
; i
++)
778 env
->regs
[i
] = kenv
->regs
[i
];
779 env
->eip
= kenv
->eip
;
780 env
->eflags
= kenv
->eflags
;
781 for(i
= 0; i
< 6; i
++)
782 kqemu_save_seg(&env
->segs
[i
], &kenv
->segs
[i
]);
783 cpu_x86_set_cpl(env
, kenv
->cpl
);
784 kqemu_save_seg(&env
->ldt
, &kenv
->ldt
);
785 env
->cr
[0] = kenv
->cr0
;
786 env
->cr
[4] = kenv
->cr4
;
787 env
->cr
[3] = kenv
->cr3
;
788 env
->cr
[2] = kenv
->cr2
;
789 env
->dr
[6] = kenv
->dr6
;
791 env
->kernelgsbase
= kenv
->kernelgsbase
;
794 /* flush pages as indicated by kqemu */
795 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
798 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
799 tlb_flush_page(env
, pages_to_flush
[i
]);
802 nb_pages_to_flush
= 0;
804 #ifdef CONFIG_PROFILER
805 kqemu_time
+= profile_getclock() - ti
;
809 if (kenv
->nb_ram_pages_to_update
> 0) {
810 cpu_tlb_update_dirty(env
);
813 if (kenv
->nb_modified_ram_pages
> 0) {
814 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
816 addr
= modified_ram_pages
[i
];
817 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
821 /* restore the hidden flags */
823 unsigned int new_hflags
;
825 if ((env
->hflags
& HF_LMA_MASK
) &&
826 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
828 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
832 /* legacy / compatibility case */
833 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
834 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
835 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
836 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
837 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
838 (env
->eflags
& VM_MASK
) ||
839 !(env
->hflags
& HF_CS32_MASK
)) {
840 /* XXX: try to avoid this test. The problem comes from the
841 fact that is real mode or vm86 mode we only modify the
842 'base' and 'selector' fields of the segment cache to go
843 faster. A solution may be to force addseg to one in
845 new_hflags
|= HF_ADDSEG_MASK
;
847 new_hflags
|= ((env
->segs
[R_DS
].base
|
848 env
->segs
[R_ES
].base
|
849 env
->segs
[R_SS
].base
) != 0) <<
853 env
->hflags
= (env
->hflags
&
854 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
857 /* update FPU flags */
858 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
859 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
860 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
861 env
->hflags
|= HF_OSFXSR_MASK
;
863 env
->hflags
&= ~HF_OSFXSR_MASK
;
866 if (loglevel
& CPU_LOG_INT
) {
867 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
870 if (ret
== KQEMU_RET_SYSCALL
) {
871 /* syscall instruction */
872 return do_syscall(env
, kenv
);
874 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
875 env
->exception_index
= ret
& 0xff;
877 env
->exception_is_int
= 1;
878 env
->exception_next_eip
= kenv
->next_eip
;
879 #ifdef CONFIG_PROFILER
880 kqemu_ret_int_count
++;
883 if (loglevel
& CPU_LOG_INT
) {
884 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
885 env
->exception_index
);
886 cpu_dump_state(env
, logfile
, fprintf
, 0);
890 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
891 env
->exception_index
= ret
& 0xff;
892 env
->error_code
= kenv
->error_code
;
893 env
->exception_is_int
= 0;
894 env
->exception_next_eip
= 0;
895 #ifdef CONFIG_PROFILER
896 kqemu_ret_excp_count
++;
899 if (loglevel
& CPU_LOG_INT
) {
900 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
901 env
->exception_index
, env
->error_code
);
902 cpu_dump_state(env
, logfile
, fprintf
, 0);
906 } else if (ret
== KQEMU_RET_INTR
) {
907 #ifdef CONFIG_PROFILER
908 kqemu_ret_intr_count
++;
911 if (loglevel
& CPU_LOG_INT
) {
912 cpu_dump_state(env
, logfile
, fprintf
, 0);
916 } else if (ret
== KQEMU_RET_SOFTMMU
) {
917 #ifdef CONFIG_PROFILER
919 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
924 if (loglevel
& CPU_LOG_INT
) {
925 cpu_dump_state(env
, logfile
, fprintf
, 0);
930 cpu_dump_state(env
, stderr
, fprintf
, 0);
931 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
937 void kqemu_cpu_interrupt(CPUState
*env
)
940 /* cancelling the I/O request causes KQEMU to finish executing the
941 current block and successfully returning. */
947 QEMU paravirtualization interface. The current interface only
948 allows to modify the IF and IOPL flags when running in
951 At this point it is not very satisfactory. I leave it for reference
952 as it adds little complexity.
955 #define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
957 static uint32_t qpi_mem_readb(void *opaque
, target_phys_addr_t addr
)
962 static uint32_t qpi_mem_readw(void *opaque
, target_phys_addr_t addr
)
967 static void qpi_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
971 static void qpi_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
975 static uint32_t qpi_mem_readl(void *opaque
, target_phys_addr_t addr
)
979 env
= cpu_single_env
;
982 return env
->eflags
& (IF_MASK
| IOPL_MASK
);
985 /* Note: after writing to this address, the guest code must make sure
986 it is exiting the current TB. pushf/popf can be used for that
988 static void qpi_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
992 env
= cpu_single_env
;
995 env
->eflags
= (env
->eflags
& ~(IF_MASK
| IOPL_MASK
)) |
996 (val
& (IF_MASK
| IOPL_MASK
));
999 static CPUReadMemoryFunc
*qpi_mem_read
[3] = {
1005 static CPUWriteMemoryFunc
*qpi_mem_write
[3] = {
1011 static void qpi_init(void)
1013 kqemu_comm_base
= 0xff000000 | 1;
1014 qpi_io_memory
= cpu_register_io_memory(0,
1016 qpi_mem_write
, NULL
);
1017 cpu_register_physical_memory(kqemu_comm_base
& ~0xfff,
1018 0x1000, qpi_io_memory
);