4 * Copyright (c) 2005-2008 Fabrice Bellard
5 * Copyright (c) 2011 Stefan Weil
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
27 #include <sys/ioctl.h>
30 #include <sys/ioccom.h>
43 # define LOG_INT(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
44 # define LOG_INT_STATE(env) log_cpu_state_mask(CPU_LOG_INT, (env), 0)
46 # define LOG_INT(...) do { } while (0)
47 # define LOG_INT_STATE(env) do { } while (0)
53 #define KQEMU_DEVICE "\\\\.\\kqemu"
55 #define KQEMU_DEVICE "/dev/kqemu"
58 static void qpi_init(void);
61 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
62 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
63 #define kqemu_closefd(x) CloseHandle(x)
65 #define KQEMU_INVALID_FD -1
66 int kqemu_fd
= KQEMU_INVALID_FD
;
67 #define kqemu_closefd(x) close(x)
74 int kqemu_allowed
= 0;
75 uint64_t *pages_to_flush
;
76 unsigned int nb_pages_to_flush
;
77 uint64_t *ram_pages_to_update
;
78 unsigned int nb_ram_pages_to_update
;
79 uint64_t *modified_ram_pages
;
80 unsigned int nb_modified_ram_pages
;
81 uint8_t *modified_ram_pages_table
;
83 uint32_t kqemu_comm_base
; /* physical address of the QPI communication page */
84 ram_addr_t kqemu_phys_ram_size
;
85 uint8_t *kqemu_phys_ram_base
;
87 #define cpuid(index, eax, ebx, ecx, edx) \
88 asm volatile ("cpuid" \
89 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
93 static int is_cpuid_supported(void)
98 static int is_cpuid_supported(void)
101 asm volatile ("pushf\n"
104 "xorl $0x00200000, %0\n"
109 : "=a" (v0
), "=d" (v1
)
116 static void kqemu_update_cpuid(CPUState
*env
)
118 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
119 uint32_t eax
, ebx
, ecx
, edx
;
121 /* the following features are kept identical on the host and
122 target cpus because they are important for user code. Strictly
123 speaking, only SSE really matters because the OS must support
124 it if the user code uses it. */
125 critical_features_mask
=
126 CPUID_CMOV
| CPUID_CX8
|
127 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
128 CPUID_SSE2
| CPUID_SEP
;
129 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
130 if (!is_cpuid_supported()) {
134 cpuid(1, eax
, ebx
, ecx
, edx
);
139 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
140 compatibility mode, so in order to have the best performances
141 it is better not to use it */
142 features
&= ~CPUID_SEP
;
144 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
145 (features
& critical_features_mask
);
146 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
147 (ext_features
& ext_features_mask
);
148 /* XXX: we could update more of the target CPUID state so that the
149 non accelerated code sees exactly the same CPU features as the
153 int kqemu_init(CPUState
*env
)
155 struct kqemu_init kinit
;
165 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
166 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
167 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
169 if (kqemu_fd
== KQEMU_INVALID_FD
) {
170 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n",
171 KQEMU_DEVICE
, GetLastError());
175 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
176 if (kqemu_fd
== KQEMU_INVALID_FD
) {
177 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
178 KQEMU_DEVICE
, strerror(errno
));
184 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
185 &version
, sizeof(version
), &temp
, NULL
);
187 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
189 if (version
!= KQEMU_VERSION
) {
190 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
191 version
, KQEMU_VERSION
);
195 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
200 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
202 if (!ram_pages_to_update
)
205 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
207 if (!modified_ram_pages
)
209 modified_ram_pages_table
=
210 g_malloc0(kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
211 if (!modified_ram_pages_table
)
214 memset(&kinit
, 0, sizeof(kinit
)); /* set the paddings to zero */
215 kinit
.ram_base
= kqemu_phys_ram_base
;
216 kinit
.ram_size
= kqemu_phys_ram_size
;
217 kinit
.ram_dirty
= phys_ram_dirty
;
218 kinit
.pages_to_flush
= pages_to_flush
;
219 kinit
.ram_pages_to_update
= ram_pages_to_update
;
220 kinit
.modified_ram_pages
= modified_ram_pages
;
222 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &kinit
, sizeof(kinit
),
223 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
225 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &kinit
);
228 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
230 kqemu_closefd(kqemu_fd
);
231 kqemu_fd
= KQEMU_INVALID_FD
;
234 kqemu_update_cpuid(env
);
235 env
->kqemu_enabled
= kqemu_allowed
;
236 nb_pages_to_flush
= 0;
237 nb_ram_pages_to_update
= 0;
243 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
245 LOG_INT("kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
246 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
247 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
249 pages_to_flush
[nb_pages_to_flush
++] = addr
;
252 void kqemu_flush(CPUState
*env
, int global
)
254 LOG_INT("kqemu_flush:\n");
255 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
258 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
260 LOG_INT("kqemu_set_notdirty: addr=%08lx\n",
261 (unsigned long)ram_addr
);
262 /* we only track transitions to dirty state */
263 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
265 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
266 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
268 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
271 static void kqemu_reset_modified_ram_pages(void)
274 unsigned long page_index
;
276 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
277 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
278 modified_ram_pages_table
[page_index
] = 0;
280 nb_modified_ram_pages
= 0;
283 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
285 unsigned long page_index
;
291 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
292 if (!modified_ram_pages_table
[page_index
]) {
294 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
296 modified_ram_pages_table
[page_index
] = 1;
297 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
298 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
301 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
302 &nb_modified_ram_pages
,
303 sizeof(nb_modified_ram_pages
),
304 NULL
, 0, &temp
, NULL
);
306 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
307 &nb_modified_ram_pages
);
309 kqemu_reset_modified_ram_pages();
314 void kqemu_set_phys_mem(uint64_t start_addr
, ram_addr_t size
,
315 ram_addr_t phys_offset
)
317 struct kqemu_phys_mem kphys_mem1
, *kphys_mem
= &kphys_mem1
;
321 end
= (start_addr
+ size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
322 start_addr
&= TARGET_PAGE_MASK
;
323 kphys_mem
->phys_addr
= start_addr
;
324 kphys_mem
->size
= end
- start_addr
;
325 kphys_mem
->ram_addr
= phys_offset
& TARGET_PAGE_MASK
;
326 io_index
= phys_offset
& ~TARGET_PAGE_MASK
;
329 kphys_mem
->io_index
= KQEMU_IO_MEM_RAM
;
332 kphys_mem
->io_index
= KQEMU_IO_MEM_ROM
;
335 if (qpi_io_memory
== io_index
) {
336 kphys_mem
->io_index
= KQEMU_IO_MEM_COMM
;
338 kphys_mem
->io_index
= KQEMU_IO_MEM_UNASSIGNED
;
345 ret
= DeviceIoControl(kqemu_fd
, KQEMU_SET_PHYS_MEM
,
346 kphys_mem
, sizeof(*kphys_mem
),
347 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
350 ret
= ioctl(kqemu_fd
, KQEMU_SET_PHYS_MEM
, kphys_mem
);
353 fprintf(stderr
, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64
" size=0x%08lx phys_offset=0x%08lx\n",
355 (unsigned long)size
, (unsigned long)phys_offset
);
371 uint8_t fpregs1
[8 * 10];
387 uint8_t fpregs1
[8 * 16];
388 uint8_t xmm_regs
[16 * 16];
392 static struct fpxstate fpx1
__attribute__((aligned(16)));
394 static void restore_native_fp_frstor(CPUState
*env
)
397 struct fpstate fp1
, *fp
= &fp1
;
399 fp
->fpuc
= env
->fpuc
;
400 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
402 for (i
=7; i
>=0; i
--) {
404 if (env
->fptags
[i
]) {
407 /* the FPU automatically computes it */
412 for(i
= 0;i
< 8; i
++) {
413 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
416 asm volatile ("frstor %0" : "=m" (*fp
));
419 static void save_native_fp_fsave(CPUState
*env
)
423 struct fpstate fp1
, *fp
= &fp1
;
425 asm volatile ("fsave %0" : : "m" (*fp
));
426 env
->fpuc
= fp
->fpuc
;
427 env
->fpstt
= (fp
->fpus
>> 11) & 7;
428 env
->fpus
= fp
->fpus
& ~0x3800;
430 for(i
= 0;i
< 8; i
++) {
431 env
->fptags
[i
] = ((fptag
& 3) == 3);
435 for(i
= 0;i
< 8; i
++) {
436 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
439 /* we must restore the default rounding state */
440 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
441 asm volatile("fldcw %0" : : "m" (fpuc
));
444 static void restore_native_fp_fxrstor(CPUState
*env
)
446 struct fpxstate
*fp
= &fpx1
;
449 fp
->fpuc
= env
->fpuc
;
450 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
452 for(i
= 0; i
< 8; i
++)
453 fptag
|= (env
->fptags
[i
] << i
);
454 fp
->fptag
= fptag
^ 0xff;
457 for(i
= 0;i
< 8; i
++) {
458 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
461 if (env
->cpuid_features
& CPUID_SSE
) {
462 fp
->mxcsr
= env
->mxcsr
;
463 /* XXX: check if DAZ is not available */
464 fp
->mxcsr_mask
= 0xffff;
465 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
467 asm volatile ("fxrstor %0" : "=m" (*fp
));
470 static void save_native_fp_fxsave(CPUState
*env
)
472 struct fpxstate
*fp
= &fpx1
;
476 asm volatile ("fxsave %0" : : "m" (*fp
));
477 env
->fpuc
= fp
->fpuc
;
478 env
->fpstt
= (fp
->fpus
>> 11) & 7;
479 env
->fpus
= fp
->fpus
& ~0x3800;
480 fptag
= fp
->fptag
^ 0xff;
481 for(i
= 0;i
< 8; i
++) {
482 env
->fptags
[i
] = (fptag
>> i
) & 1;
485 for(i
= 0;i
< 8; i
++) {
486 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
489 if (env
->cpuid_features
& CPUID_SSE
) {
490 env
->mxcsr
= fp
->mxcsr
;
491 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
494 /* we must restore the default rounding state */
495 asm volatile ("fninit");
496 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
497 asm volatile("fldcw %0" : : "m" (fpuc
));
500 static int do_syscall(CPUState
*env
,
501 struct kqemu_cpu_state
*kenv
)
505 selector
= (env
->star
>> 32) & 0xffff;
507 if (env
->hflags
& HF_LMA_MASK
) {
510 env
->regs
[R_ECX
] = kenv
->next_eip
;
511 env
->regs
[11] = env
->eflags
;
513 code64
= env
->hflags
& HF_CS64_MASK
;
515 cpu_x86_set_cpl(env
, 0);
516 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
518 DESC_G_MASK
| DESC_P_MASK
|
520 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
521 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
523 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
525 DESC_W_MASK
| DESC_A_MASK
);
526 env
->eflags
&= ~env
->fmask
;
528 env
->eip
= env
->lstar
;
530 env
->eip
= env
->cstar
;
534 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
536 cpu_x86_set_cpl(env
, 0);
537 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
539 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
541 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
542 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
544 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
546 DESC_W_MASK
| DESC_A_MASK
);
547 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
548 env
->eip
= (uint32_t)env
->star
;
553 #ifdef CONFIG_PROFILER
555 #define PC_REC_SIZE 1
556 #define PC_REC_HASH_BITS 16
557 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
559 typedef struct PCRecord
{
562 struct PCRecord
*next
;
565 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
566 static int nb_pc_records
;
568 static void kqemu_record_pc(unsigned long pc
)
573 h
= pc
/ PC_REC_SIZE
;
574 h
= h
^ (h
>> PC_REC_HASH_BITS
);
575 h
&= (PC_REC_HASH_SIZE
- 1);
576 pr
= &pc_rec_hash
[h
];
587 r
= malloc(sizeof(PCRecord
));
595 static int pc_rec_cmp(const void *p1
, const void *p2
)
597 PCRecord
*r1
= *(PCRecord
**)p1
;
598 PCRecord
*r2
= *(PCRecord
**)p2
;
599 if (r1
->count
< r2
->count
)
601 else if (r1
->count
== r2
->count
)
607 static void kqemu_record_flush(void)
609 PCRecord
*r
, *r_next
;
612 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
613 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
617 pc_rec_hash
[h
] = NULL
;
622 void kqemu_record_dump(void)
629 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
632 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
633 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
638 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
640 f
= fopen("/tmp/kqemu.stats", "w");
642 perror("/tmp/kqemu.stats");
645 fprintf(f
, "total: %" PRId64
"\n", total
);
647 for(i
= 0; i
< nb_pc_records
; i
++) {
650 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
653 (double)r
->count
/ (double)total
* 100.0,
654 (double)sum
/ (double)total
* 100.0);
659 kqemu_record_flush();
663 static inline void kqemu_load_seg(struct kqemu_segment_cache
*ksc
,
664 const SegmentCache
*sc
)
666 ksc
->selector
= sc
->selector
;
667 ksc
->flags
= sc
->flags
;
668 ksc
->limit
= sc
->limit
;
669 ksc
->base
= sc
->base
;
672 static inline void kqemu_save_seg(SegmentCache
*sc
,
673 const struct kqemu_segment_cache
*ksc
)
675 sc
->selector
= ksc
->selector
;
676 sc
->flags
= ksc
->flags
;
677 sc
->limit
= ksc
->limit
;
678 sc
->base
= ksc
->base
;
681 int kqemu_cpu_exec(CPUState
*env
)
683 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
685 #ifdef CONFIG_PROFILER
692 #ifdef CONFIG_PROFILER
693 ti
= profile_getclock();
695 LOG_INT("kqemu: cpu_exec: enter\n");
697 for(i
= 0; i
< CPU_NB_REGS
; i
++)
698 kenv
->regs
[i
] = env
->regs
[i
];
699 kenv
->eip
= env
->eip
;
700 kenv
->eflags
= env
->eflags
;
701 for(i
= 0; i
< 6; i
++)
702 kqemu_load_seg(&kenv
->segs
[i
], &env
->segs
[i
]);
703 kqemu_load_seg(&kenv
->ldt
, &env
->ldt
);
704 kqemu_load_seg(&kenv
->tr
, &env
->tr
);
705 kqemu_load_seg(&kenv
->gdt
, &env
->gdt
);
706 kqemu_load_seg(&kenv
->idt
, &env
->idt
);
707 kenv
->cr0
= env
->cr
[0];
708 kenv
->cr2
= env
->cr
[2];
709 kenv
->cr3
= env
->cr
[3];
710 kenv
->cr4
= env
->cr
[4];
711 kenv
->a20_mask
= env
->a20_mask
;
712 kenv
->efer
= env
->efer
;
713 kenv
->tsc_offset
= 0;
714 kenv
->star
= env
->star
;
715 kenv
->sysenter_cs
= env
->sysenter_cs
;
716 kenv
->sysenter_esp
= env
->sysenter_esp
;
717 kenv
->sysenter_eip
= env
->sysenter_eip
;
719 kenv
->lstar
= env
->lstar
;
720 kenv
->cstar
= env
->cstar
;
721 kenv
->fmask
= env
->fmask
;
722 kenv
->kernelgsbase
= env
->kernelgsbase
;
724 if (env
->dr
[7] & 0xff) {
725 kenv
->dr7
= env
->dr
[7];
726 kenv
->dr0
= env
->dr
[0];
727 kenv
->dr1
= env
->dr
[1];
728 kenv
->dr2
= env
->dr
[2];
729 kenv
->dr3
= env
->dr
[3];
733 kenv
->dr6
= env
->dr
[6];
734 cpl
= (env
->hflags
& HF_CPL_MASK
);
736 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
737 kenv
->user_only
= (env
->kqemu_enabled
== 1);
738 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
739 nb_ram_pages_to_update
= 0;
740 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
742 kqemu_reset_modified_ram_pages();
744 if (env
->cpuid_features
& CPUID_FXSR
)
745 restore_native_fp_fxrstor(env
);
747 restore_native_fp_frstor(env
);
750 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
751 kenv
, sizeof(struct kqemu_cpu_state
),
752 kenv
, sizeof(struct kqemu_cpu_state
),
759 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
762 if (env
->cpuid_features
& CPUID_FXSR
)
763 save_native_fp_fxsave(env
);
765 save_native_fp_fsave(env
);
767 for(i
= 0; i
< CPU_NB_REGS
; i
++)
768 env
->regs
[i
] = kenv
->regs
[i
];
769 env
->eip
= kenv
->eip
;
770 env
->eflags
= kenv
->eflags
;
771 for(i
= 0; i
< 6; i
++)
772 kqemu_save_seg(&env
->segs
[i
], &kenv
->segs
[i
]);
773 cpu_x86_set_cpl(env
, kenv
->cpl
);
774 kqemu_save_seg(&env
->ldt
, &kenv
->ldt
);
775 env
->cr
[0] = kenv
->cr0
;
776 env
->cr
[4] = kenv
->cr4
;
777 env
->cr
[3] = kenv
->cr3
;
778 env
->cr
[2] = kenv
->cr2
;
779 env
->dr
[6] = kenv
->dr6
;
781 env
->kernelgsbase
= kenv
->kernelgsbase
;
784 /* flush pages as indicated by kqemu */
785 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
788 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
789 tlb_flush_page(env
, pages_to_flush
[i
]);
792 nb_pages_to_flush
= 0;
794 #ifdef CONFIG_PROFILER
795 kqemu_time
+= profile_getclock() - ti
;
799 if (kenv
->nb_ram_pages_to_update
> 0) {
800 cpu_tlb_update_dirty(env
);
803 if (kenv
->nb_modified_ram_pages
> 0) {
804 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
806 addr
= modified_ram_pages
[i
];
807 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
811 /* restore the hidden flags */
813 unsigned int new_hflags
;
815 if ((env
->hflags
& HF_LMA_MASK
) &&
816 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
818 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
822 /* legacy / compatibility case */
823 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
824 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
825 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
826 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
827 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
828 (env
->eflags
& VM_MASK
) ||
829 !(env
->hflags
& HF_CS32_MASK
)) {
830 /* XXX: try to avoid this test. The problem comes from the
831 fact that is real mode or vm86 mode we only modify the
832 'base' and 'selector' fields of the segment cache to go
833 faster. A solution may be to force addseg to one in
835 new_hflags
|= HF_ADDSEG_MASK
;
837 new_hflags
|= ((env
->segs
[R_DS
].base
|
838 env
->segs
[R_ES
].base
|
839 env
->segs
[R_SS
].base
) != 0) <<
843 env
->hflags
= (env
->hflags
&
844 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
847 /* update FPU flags */
848 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
849 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
850 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
851 env
->hflags
|= HF_OSFXSR_MASK
;
853 env
->hflags
&= ~HF_OSFXSR_MASK
;
855 LOG_INT("kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
856 if (ret
== KQEMU_RET_SYSCALL
) {
857 /* syscall instruction */
858 return do_syscall(env
, kenv
);
860 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
861 env
->exception_index
= ret
& 0xff;
863 env
->exception_is_int
= 1;
864 env
->exception_next_eip
= kenv
->next_eip
;
865 #ifdef CONFIG_PROFILER
866 kqemu_ret_int_count
++;
868 LOG_INT("kqemu: interrupt v=%02x:\n", env
->exception_index
);
871 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
872 env
->exception_index
= ret
& 0xff;
873 env
->error_code
= kenv
->error_code
;
874 env
->exception_is_int
= 0;
875 env
->exception_next_eip
= 0;
876 #ifdef CONFIG_PROFILER
877 kqemu_ret_excp_count
++;
879 LOG_INT("kqemu: exception v=%02x e=%04x:\n",
880 env
->exception_index
, env
->error_code
);
883 } else if (ret
== KQEMU_RET_INTR
) {
884 #ifdef CONFIG_PROFILER
885 kqemu_ret_intr_count
++;
889 } else if (ret
== KQEMU_RET_SOFTMMU
) {
890 #ifdef CONFIG_PROFILER
892 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
899 cpu_dump_state(env
, stderr
, fprintf
, 0);
900 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
906 void kqemu_cpu_interrupt(CPUState
*env
)
909 /* cancelling the I/O request causes KQEMU to finish executing the
910 current block and successfully returning. */
916 QEMU paravirtualization interface. The current interface only
917 allows to modify the IF and IOPL flags when running in
920 At this point it is not very satisfactory. I leave it for reference
921 as it adds little complexity.
924 #define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
926 static uint32_t qpi_mem_readb(void *opaque
, hwaddr addr
)
931 static uint32_t qpi_mem_readw(void *opaque
, hwaddr addr
)
936 static void qpi_mem_writeb(void *opaque
, hwaddr addr
, uint32_t val
)
940 static void qpi_mem_writew(void *opaque
, hwaddr addr
, uint32_t val
)
944 static uint32_t qpi_mem_readl(void *opaque
, hwaddr addr
)
948 env
= cpu_single_env
;
951 return env
->eflags
& (IF_MASK
| IOPL_MASK
);
954 /* Note: after writing to this address, the guest code must make sure
955 it is exiting the current TB. pushf/popf can be used for that
957 static void qpi_mem_writel(void *opaque
, hwaddr addr
, uint32_t val
)
961 env
= cpu_single_env
;
964 env
->eflags
= (env
->eflags
& ~(IF_MASK
| IOPL_MASK
)) |
965 (val
& (IF_MASK
| IOPL_MASK
));
968 static CPUReadMemoryFunc
* const qpi_mem_read
[3] = {
974 static CPUWriteMemoryFunc
* const qpi_mem_write
[3] = {
980 static void qpi_init(void)
982 kqemu_comm_base
= 0xff000000 | 1;
983 qpi_io_memory
= cpu_register_io_memory(
985 qpi_mem_write
, NULL
);
986 cpu_register_physical_memory(kqemu_comm_base
& ~0xfff,
987 0x1000, qpi_io_memory
);