4 * Copyright (c) 2005-2008 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include <sys/types.h>
26 #include <sys/ioctl.h>
29 #include <sys/ioccom.h>
41 #include "qemu-common.h"
50 # define LOG_INT(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
51 # define LOG_INT_STATE(env) log_cpu_state_mask(CPU_LOG_INT, (env), 0)
53 # define LOG_INT(...) do { } while (0)
54 # define LOG_INT_STATE(env) do { } while (0)
62 #define KQEMU_DEVICE "\\\\.\\kqemu"
64 #define KQEMU_DEVICE "/dev/kqemu"
67 static void qpi_init(void);
70 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
71 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
72 #define kqemu_closefd(x) CloseHandle(x)
74 #define KQEMU_INVALID_FD -1
75 int kqemu_fd
= KQEMU_INVALID_FD
;
76 #define kqemu_closefd(x) close(x)
83 int kqemu_allowed
= 0;
84 uint64_t *pages_to_flush
;
85 unsigned int nb_pages_to_flush
;
86 uint64_t *ram_pages_to_update
;
87 unsigned int nb_ram_pages_to_update
;
88 uint64_t *modified_ram_pages
;
89 unsigned int nb_modified_ram_pages
;
90 uint8_t *modified_ram_pages_table
;
92 uint32_t kqemu_comm_base
; /* physical address of the QPI communication page */
93 ram_addr_t kqemu_phys_ram_size
;
94 uint8_t *kqemu_phys_ram_base
;
96 #define cpuid(index, eax, ebx, ecx, edx) \
97 asm volatile ("cpuid" \
98 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
102 static int is_cpuid_supported(void)
107 static int is_cpuid_supported(void)
110 asm volatile ("pushf\n"
113 "xorl $0x00200000, %0\n"
118 : "=a" (v0
), "=d" (v1
)
125 static void kqemu_update_cpuid(CPUState
*env
)
127 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
128 uint32_t eax
, ebx
, ecx
, edx
;
130 /* the following features are kept identical on the host and
131 target cpus because they are important for user code. Strictly
132 speaking, only SSE really matters because the OS must support
133 it if the user code uses it. */
134 critical_features_mask
=
135 CPUID_CMOV
| CPUID_CX8
|
136 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
137 CPUID_SSE2
| CPUID_SEP
;
138 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
139 if (!is_cpuid_supported()) {
143 cpuid(1, eax
, ebx
, ecx
, edx
);
148 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
149 compatibility mode, so in order to have the best performances
150 it is better not to use it */
151 features
&= ~CPUID_SEP
;
153 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
154 (features
& critical_features_mask
);
155 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
156 (ext_features
& ext_features_mask
);
157 /* XXX: we could update more of the target CPUID state so that the
158 non accelerated code sees exactly the same CPU features as the
162 int kqemu_init(CPUState
*env
)
164 struct kqemu_init kinit
;
174 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
175 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
176 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
178 if (kqemu_fd
== KQEMU_INVALID_FD
) {
179 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n",
180 KQEMU_DEVICE
, GetLastError());
184 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
185 if (kqemu_fd
== KQEMU_INVALID_FD
) {
186 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
187 KQEMU_DEVICE
, strerror(errno
));
193 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
194 &version
, sizeof(version
), &temp
, NULL
);
196 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
198 if (version
!= KQEMU_VERSION
) {
199 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
200 version
, KQEMU_VERSION
);
204 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
209 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
211 if (!ram_pages_to_update
)
214 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
216 if (!modified_ram_pages
)
218 modified_ram_pages_table
=
219 qemu_mallocz(kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
220 if (!modified_ram_pages_table
)
223 memset(&kinit
, 0, sizeof(kinit
)); /* set the paddings to zero */
224 kinit
.ram_base
= kqemu_phys_ram_base
;
225 kinit
.ram_size
= kqemu_phys_ram_size
;
226 kinit
.ram_dirty
= phys_ram_dirty
;
227 kinit
.pages_to_flush
= pages_to_flush
;
228 kinit
.ram_pages_to_update
= ram_pages_to_update
;
229 kinit
.modified_ram_pages
= modified_ram_pages
;
231 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &kinit
, sizeof(kinit
),
232 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
234 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &kinit
);
237 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
239 kqemu_closefd(kqemu_fd
);
240 kqemu_fd
= KQEMU_INVALID_FD
;
243 kqemu_update_cpuid(env
);
244 env
->kqemu_enabled
= kqemu_allowed
;
245 nb_pages_to_flush
= 0;
246 nb_ram_pages_to_update
= 0;
252 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
254 LOG_INT("kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
255 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
256 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
258 pages_to_flush
[nb_pages_to_flush
++] = addr
;
261 void kqemu_flush(CPUState
*env
, int global
)
263 LOG_INT("kqemu_flush:\n");
264 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
267 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
269 LOG_INT("kqemu_set_notdirty: addr=%08lx\n",
270 (unsigned long)ram_addr
);
271 /* we only track transitions to dirty state */
272 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
274 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
275 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
277 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
280 static void kqemu_reset_modified_ram_pages(void)
283 unsigned long page_index
;
285 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
286 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
287 modified_ram_pages_table
[page_index
] = 0;
289 nb_modified_ram_pages
= 0;
292 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
294 unsigned long page_index
;
300 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
301 if (!modified_ram_pages_table
[page_index
]) {
303 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
305 modified_ram_pages_table
[page_index
] = 1;
306 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
307 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
310 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
311 &nb_modified_ram_pages
,
312 sizeof(nb_modified_ram_pages
),
313 NULL
, 0, &temp
, NULL
);
315 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
316 &nb_modified_ram_pages
);
318 kqemu_reset_modified_ram_pages();
323 void kqemu_set_phys_mem(uint64_t start_addr
, ram_addr_t size
,
324 ram_addr_t phys_offset
)
326 struct kqemu_phys_mem kphys_mem1
, *kphys_mem
= &kphys_mem1
;
330 end
= (start_addr
+ size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
331 start_addr
&= TARGET_PAGE_MASK
;
332 kphys_mem
->phys_addr
= start_addr
;
333 kphys_mem
->size
= end
- start_addr
;
334 kphys_mem
->ram_addr
= phys_offset
& TARGET_PAGE_MASK
;
335 io_index
= phys_offset
& ~TARGET_PAGE_MASK
;
338 kphys_mem
->io_index
= KQEMU_IO_MEM_RAM
;
341 kphys_mem
->io_index
= KQEMU_IO_MEM_ROM
;
344 if (qpi_io_memory
== io_index
) {
345 kphys_mem
->io_index
= KQEMU_IO_MEM_COMM
;
347 kphys_mem
->io_index
= KQEMU_IO_MEM_UNASSIGNED
;
354 ret
= DeviceIoControl(kqemu_fd
, KQEMU_SET_PHYS_MEM
,
355 kphys_mem
, sizeof(*kphys_mem
),
356 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
359 ret
= ioctl(kqemu_fd
, KQEMU_SET_PHYS_MEM
, kphys_mem
);
362 fprintf(stderr
, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64
" size=0x%08lx phys_offset=0x%08lx\n",
364 (unsigned long)size
, (unsigned long)phys_offset
);
380 uint8_t fpregs1
[8 * 10];
396 uint8_t fpregs1
[8 * 16];
397 uint8_t xmm_regs
[16 * 16];
401 static struct fpxstate fpx1
__attribute__((aligned(16)));
403 static void restore_native_fp_frstor(CPUState
*env
)
406 struct fpstate fp1
, *fp
= &fp1
;
408 fp
->fpuc
= env
->fpuc
;
409 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
411 for (i
=7; i
>=0; i
--) {
413 if (env
->fptags
[i
]) {
416 /* the FPU automatically computes it */
421 for(i
= 0;i
< 8; i
++) {
422 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
425 asm volatile ("frstor %0" : "=m" (*fp
));
428 static void save_native_fp_fsave(CPUState
*env
)
432 struct fpstate fp1
, *fp
= &fp1
;
434 asm volatile ("fsave %0" : : "m" (*fp
));
435 env
->fpuc
= fp
->fpuc
;
436 env
->fpstt
= (fp
->fpus
>> 11) & 7;
437 env
->fpus
= fp
->fpus
& ~0x3800;
439 for(i
= 0;i
< 8; i
++) {
440 env
->fptags
[i
] = ((fptag
& 3) == 3);
444 for(i
= 0;i
< 8; i
++) {
445 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
448 /* we must restore the default rounding state */
449 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
450 asm volatile("fldcw %0" : : "m" (fpuc
));
453 static void restore_native_fp_fxrstor(CPUState
*env
)
455 struct fpxstate
*fp
= &fpx1
;
458 fp
->fpuc
= env
->fpuc
;
459 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
461 for(i
= 0; i
< 8; i
++)
462 fptag
|= (env
->fptags
[i
] << i
);
463 fp
->fptag
= fptag
^ 0xff;
466 for(i
= 0;i
< 8; i
++) {
467 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
470 if (env
->cpuid_features
& CPUID_SSE
) {
471 fp
->mxcsr
= env
->mxcsr
;
472 /* XXX: check if DAZ is not available */
473 fp
->mxcsr_mask
= 0xffff;
474 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
476 asm volatile ("fxrstor %0" : "=m" (*fp
));
479 static void save_native_fp_fxsave(CPUState
*env
)
481 struct fpxstate
*fp
= &fpx1
;
485 asm volatile ("fxsave %0" : : "m" (*fp
));
486 env
->fpuc
= fp
->fpuc
;
487 env
->fpstt
= (fp
->fpus
>> 11) & 7;
488 env
->fpus
= fp
->fpus
& ~0x3800;
489 fptag
= fp
->fptag
^ 0xff;
490 for(i
= 0;i
< 8; i
++) {
491 env
->fptags
[i
] = (fptag
>> i
) & 1;
494 for(i
= 0;i
< 8; i
++) {
495 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
498 if (env
->cpuid_features
& CPUID_SSE
) {
499 env
->mxcsr
= fp
->mxcsr
;
500 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
503 /* we must restore the default rounding state */
504 asm volatile ("fninit");
505 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
506 asm volatile("fldcw %0" : : "m" (fpuc
));
509 static int do_syscall(CPUState
*env
,
510 struct kqemu_cpu_state
*kenv
)
514 selector
= (env
->star
>> 32) & 0xffff;
516 if (env
->hflags
& HF_LMA_MASK
) {
519 env
->regs
[R_ECX
] = kenv
->next_eip
;
520 env
->regs
[11] = env
->eflags
;
522 code64
= env
->hflags
& HF_CS64_MASK
;
524 cpu_x86_set_cpl(env
, 0);
525 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
527 DESC_G_MASK
| DESC_P_MASK
|
529 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
530 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
532 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
534 DESC_W_MASK
| DESC_A_MASK
);
535 env
->eflags
&= ~env
->fmask
;
537 env
->eip
= env
->lstar
;
539 env
->eip
= env
->cstar
;
543 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
545 cpu_x86_set_cpl(env
, 0);
546 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
548 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
550 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
551 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
553 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
555 DESC_W_MASK
| DESC_A_MASK
);
556 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
557 env
->eip
= (uint32_t)env
->star
;
562 #ifdef CONFIG_PROFILER
564 #define PC_REC_SIZE 1
565 #define PC_REC_HASH_BITS 16
566 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
568 typedef struct PCRecord
{
571 struct PCRecord
*next
;
574 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
575 static int nb_pc_records
;
577 static void kqemu_record_pc(unsigned long pc
)
582 h
= pc
/ PC_REC_SIZE
;
583 h
= h
^ (h
>> PC_REC_HASH_BITS
);
584 h
&= (PC_REC_HASH_SIZE
- 1);
585 pr
= &pc_rec_hash
[h
];
596 r
= malloc(sizeof(PCRecord
));
604 static int pc_rec_cmp(const void *p1
, const void *p2
)
606 PCRecord
*r1
= *(PCRecord
**)p1
;
607 PCRecord
*r2
= *(PCRecord
**)p2
;
608 if (r1
->count
< r2
->count
)
610 else if (r1
->count
== r2
->count
)
616 static void kqemu_record_flush(void)
618 PCRecord
*r
, *r_next
;
621 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
622 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
626 pc_rec_hash
[h
] = NULL
;
631 void kqemu_record_dump(void)
638 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
641 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
642 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
647 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
649 f
= fopen("/tmp/kqemu.stats", "w");
651 perror("/tmp/kqemu.stats");
654 fprintf(f
, "total: %" PRId64
"\n", total
);
656 for(i
= 0; i
< nb_pc_records
; i
++) {
659 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
662 (double)r
->count
/ (double)total
* 100.0,
663 (double)sum
/ (double)total
* 100.0);
668 kqemu_record_flush();
672 static inline void kqemu_load_seg(struct kqemu_segment_cache
*ksc
,
673 const SegmentCache
*sc
)
675 ksc
->selector
= sc
->selector
;
676 ksc
->flags
= sc
->flags
;
677 ksc
->limit
= sc
->limit
;
678 ksc
->base
= sc
->base
;
681 static inline void kqemu_save_seg(SegmentCache
*sc
,
682 const struct kqemu_segment_cache
*ksc
)
684 sc
->selector
= ksc
->selector
;
685 sc
->flags
= ksc
->flags
;
686 sc
->limit
= ksc
->limit
;
687 sc
->base
= ksc
->base
;
690 int kqemu_cpu_exec(CPUState
*env
)
692 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
694 #ifdef CONFIG_PROFILER
701 #ifdef CONFIG_PROFILER
702 ti
= profile_getclock();
704 LOG_INT("kqemu: cpu_exec: enter\n");
706 for(i
= 0; i
< CPU_NB_REGS
; i
++)
707 kenv
->regs
[i
] = env
->regs
[i
];
708 kenv
->eip
= env
->eip
;
709 kenv
->eflags
= env
->eflags
;
710 for(i
= 0; i
< 6; i
++)
711 kqemu_load_seg(&kenv
->segs
[i
], &env
->segs
[i
]);
712 kqemu_load_seg(&kenv
->ldt
, &env
->ldt
);
713 kqemu_load_seg(&kenv
->tr
, &env
->tr
);
714 kqemu_load_seg(&kenv
->gdt
, &env
->gdt
);
715 kqemu_load_seg(&kenv
->idt
, &env
->idt
);
716 kenv
->cr0
= env
->cr
[0];
717 kenv
->cr2
= env
->cr
[2];
718 kenv
->cr3
= env
->cr
[3];
719 kenv
->cr4
= env
->cr
[4];
720 kenv
->a20_mask
= env
->a20_mask
;
721 kenv
->efer
= env
->efer
;
722 kenv
->tsc_offset
= 0;
723 kenv
->star
= env
->star
;
724 kenv
->sysenter_cs
= env
->sysenter_cs
;
725 kenv
->sysenter_esp
= env
->sysenter_esp
;
726 kenv
->sysenter_eip
= env
->sysenter_eip
;
728 kenv
->lstar
= env
->lstar
;
729 kenv
->cstar
= env
->cstar
;
730 kenv
->fmask
= env
->fmask
;
731 kenv
->kernelgsbase
= env
->kernelgsbase
;
733 if (env
->dr
[7] & 0xff) {
734 kenv
->dr7
= env
->dr
[7];
735 kenv
->dr0
= env
->dr
[0];
736 kenv
->dr1
= env
->dr
[1];
737 kenv
->dr2
= env
->dr
[2];
738 kenv
->dr3
= env
->dr
[3];
742 kenv
->dr6
= env
->dr
[6];
743 cpl
= (env
->hflags
& HF_CPL_MASK
);
745 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
746 kenv
->user_only
= (env
->kqemu_enabled
== 1);
747 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
748 nb_ram_pages_to_update
= 0;
749 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
751 kqemu_reset_modified_ram_pages();
753 if (env
->cpuid_features
& CPUID_FXSR
)
754 restore_native_fp_fxrstor(env
);
756 restore_native_fp_frstor(env
);
759 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
760 kenv
, sizeof(struct kqemu_cpu_state
),
761 kenv
, sizeof(struct kqemu_cpu_state
),
768 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
771 if (env
->cpuid_features
& CPUID_FXSR
)
772 save_native_fp_fxsave(env
);
774 save_native_fp_fsave(env
);
776 for(i
= 0; i
< CPU_NB_REGS
; i
++)
777 env
->regs
[i
] = kenv
->regs
[i
];
778 env
->eip
= kenv
->eip
;
779 env
->eflags
= kenv
->eflags
;
780 for(i
= 0; i
< 6; i
++)
781 kqemu_save_seg(&env
->segs
[i
], &kenv
->segs
[i
]);
782 cpu_x86_set_cpl(env
, kenv
->cpl
);
783 kqemu_save_seg(&env
->ldt
, &kenv
->ldt
);
784 env
->cr
[0] = kenv
->cr0
;
785 env
->cr
[4] = kenv
->cr4
;
786 env
->cr
[3] = kenv
->cr3
;
787 env
->cr
[2] = kenv
->cr2
;
788 env
->dr
[6] = kenv
->dr6
;
790 env
->kernelgsbase
= kenv
->kernelgsbase
;
793 /* flush pages as indicated by kqemu */
794 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
797 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
798 tlb_flush_page(env
, pages_to_flush
[i
]);
801 nb_pages_to_flush
= 0;
803 #ifdef CONFIG_PROFILER
804 kqemu_time
+= profile_getclock() - ti
;
808 if (kenv
->nb_ram_pages_to_update
> 0) {
809 cpu_tlb_update_dirty(env
);
812 if (kenv
->nb_modified_ram_pages
> 0) {
813 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
815 addr
= modified_ram_pages
[i
];
816 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
820 /* restore the hidden flags */
822 unsigned int new_hflags
;
824 if ((env
->hflags
& HF_LMA_MASK
) &&
825 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
827 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
831 /* legacy / compatibility case */
832 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
833 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
834 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
835 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
836 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
837 (env
->eflags
& VM_MASK
) ||
838 !(env
->hflags
& HF_CS32_MASK
)) {
839 /* XXX: try to avoid this test. The problem comes from the
840 fact that is real mode or vm86 mode we only modify the
841 'base' and 'selector' fields of the segment cache to go
842 faster. A solution may be to force addseg to one in
844 new_hflags
|= HF_ADDSEG_MASK
;
846 new_hflags
|= ((env
->segs
[R_DS
].base
|
847 env
->segs
[R_ES
].base
|
848 env
->segs
[R_SS
].base
) != 0) <<
852 env
->hflags
= (env
->hflags
&
853 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
856 /* update FPU flags */
857 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
858 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
859 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
860 env
->hflags
|= HF_OSFXSR_MASK
;
862 env
->hflags
&= ~HF_OSFXSR_MASK
;
864 LOG_INT("kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
865 if (ret
== KQEMU_RET_SYSCALL
) {
866 /* syscall instruction */
867 return do_syscall(env
, kenv
);
869 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
870 env
->exception_index
= ret
& 0xff;
872 env
->exception_is_int
= 1;
873 env
->exception_next_eip
= kenv
->next_eip
;
874 #ifdef CONFIG_PROFILER
875 kqemu_ret_int_count
++;
877 LOG_INT("kqemu: interrupt v=%02x:\n", env
->exception_index
);
880 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
881 env
->exception_index
= ret
& 0xff;
882 env
->error_code
= kenv
->error_code
;
883 env
->exception_is_int
= 0;
884 env
->exception_next_eip
= 0;
885 #ifdef CONFIG_PROFILER
886 kqemu_ret_excp_count
++;
888 LOG_INT("kqemu: exception v=%02x e=%04x:\n",
889 env
->exception_index
, env
->error_code
);
892 } else if (ret
== KQEMU_RET_INTR
) {
893 #ifdef CONFIG_PROFILER
894 kqemu_ret_intr_count
++;
898 } else if (ret
== KQEMU_RET_SOFTMMU
) {
899 #ifdef CONFIG_PROFILER
901 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
908 cpu_dump_state(env
, stderr
, fprintf
, 0);
909 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
915 void kqemu_cpu_interrupt(CPUState
*env
)
918 /* cancelling the I/O request causes KQEMU to finish executing the
919 current block and successfully returning. */
925 QEMU paravirtualization interface. The current interface only
926 allows to modify the IF and IOPL flags when running in
929 At this point it is not very satisfactory. I leave it for reference
930 as it adds little complexity.
933 #define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
935 static uint32_t qpi_mem_readb(void *opaque
, target_phys_addr_t addr
)
940 static uint32_t qpi_mem_readw(void *opaque
, target_phys_addr_t addr
)
945 static void qpi_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
949 static void qpi_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
953 static uint32_t qpi_mem_readl(void *opaque
, target_phys_addr_t addr
)
957 env
= cpu_single_env
;
960 return env
->eflags
& (IF_MASK
| IOPL_MASK
);
963 /* Note: after writing to this address, the guest code must make sure
964 it is exiting the current TB. pushf/popf can be used for that
966 static void qpi_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
970 env
= cpu_single_env
;
973 env
->eflags
= (env
->eflags
& ~(IF_MASK
| IOPL_MASK
)) |
974 (val
& (IF_MASK
| IOPL_MASK
));
977 static CPUReadMemoryFunc
*qpi_mem_read
[3] = {
983 static CPUWriteMemoryFunc
*qpi_mem_write
[3] = {
989 static void qpi_init(void)
991 kqemu_comm_base
= 0xff000000 | 1;
992 qpi_io_memory
= cpu_register_io_memory(
994 qpi_mem_write
, NULL
);
995 cpu_register_physical_memory(kqemu_comm_base
& ~0xfff,
996 0x1000, qpi_io_memory
);