4 * Copyright (c) 2005-2008 Fabrice Bellard
5 * Copyright (c) 2011 Stefan Weil
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu-common.h"
26 #include <sys/types.h>
28 #include <sys/ioctl.h>
31 #include <sys/ioccom.h>
44 # define LOG_INT(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
45 # define LOG_INT_STATE(env) log_cpu_state_mask(CPU_LOG_INT, (env), 0)
47 # define LOG_INT(...) do { } while (0)
48 # define LOG_INT_STATE(env) do { } while (0)
56 #define KQEMU_DEVICE "\\\\.\\kqemu"
58 #define KQEMU_DEVICE "/dev/kqemu"
61 static void qpi_init(void);
64 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
65 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
66 #define kqemu_closefd(x) CloseHandle(x)
68 #define KQEMU_INVALID_FD -1
69 int kqemu_fd
= KQEMU_INVALID_FD
;
70 #define kqemu_closefd(x) close(x)
77 int kqemu_allowed
= 0;
78 uint64_t *pages_to_flush
;
79 unsigned int nb_pages_to_flush
;
80 uint64_t *ram_pages_to_update
;
81 unsigned int nb_ram_pages_to_update
;
82 uint64_t *modified_ram_pages
;
83 unsigned int nb_modified_ram_pages
;
84 uint8_t *modified_ram_pages_table
;
86 uint32_t kqemu_comm_base
; /* physical address of the QPI communication page */
87 ram_addr_t kqemu_phys_ram_size
;
88 uint8_t *kqemu_phys_ram_base
;
90 #define cpuid(index, eax, ebx, ecx, edx) \
91 asm volatile ("cpuid" \
92 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
96 static int is_cpuid_supported(void)
101 static int is_cpuid_supported(void)
104 asm volatile ("pushf\n"
107 "xorl $0x00200000, %0\n"
112 : "=a" (v0
), "=d" (v1
)
119 static void kqemu_update_cpuid(CPUState
*env
)
121 int critical_features_mask
, features
, ext_features
, ext_features_mask
;
122 uint32_t eax
, ebx
, ecx
, edx
;
124 /* the following features are kept identical on the host and
125 target cpus because they are important for user code. Strictly
126 speaking, only SSE really matters because the OS must support
127 it if the user code uses it. */
128 critical_features_mask
=
129 CPUID_CMOV
| CPUID_CX8
|
130 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
131 CPUID_SSE2
| CPUID_SEP
;
132 ext_features_mask
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
;
133 if (!is_cpuid_supported()) {
137 cpuid(1, eax
, ebx
, ecx
, edx
);
142 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
143 compatibility mode, so in order to have the best performances
144 it is better not to use it */
145 features
&= ~CPUID_SEP
;
147 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
148 (features
& critical_features_mask
);
149 env
->cpuid_ext_features
= (env
->cpuid_ext_features
& ~ext_features_mask
) |
150 (ext_features
& ext_features_mask
);
151 /* XXX: we could update more of the target CPUID state so that the
152 non accelerated code sees exactly the same CPU features as the
156 int kqemu_init(CPUState
*env
)
158 struct kqemu_init kinit
;
168 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
169 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
170 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
172 if (kqemu_fd
== KQEMU_INVALID_FD
) {
173 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n",
174 KQEMU_DEVICE
, GetLastError());
178 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
179 if (kqemu_fd
== KQEMU_INVALID_FD
) {
180 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
181 KQEMU_DEVICE
, strerror(errno
));
187 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
188 &version
, sizeof(version
), &temp
, NULL
);
190 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
192 if (version
!= KQEMU_VERSION
) {
193 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
194 version
, KQEMU_VERSION
);
198 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
203 ram_pages_to_update
= qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE
*
205 if (!ram_pages_to_update
)
208 modified_ram_pages
= qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES
*
210 if (!modified_ram_pages
)
212 modified_ram_pages_table
=
213 g_malloc0(kqemu_phys_ram_size
>> TARGET_PAGE_BITS
);
214 if (!modified_ram_pages_table
)
217 memset(&kinit
, 0, sizeof(kinit
)); /* set the paddings to zero */
218 kinit
.ram_base
= kqemu_phys_ram_base
;
219 kinit
.ram_size
= kqemu_phys_ram_size
;
220 kinit
.ram_dirty
= phys_ram_dirty
;
221 kinit
.pages_to_flush
= pages_to_flush
;
222 kinit
.ram_pages_to_update
= ram_pages_to_update
;
223 kinit
.modified_ram_pages
= modified_ram_pages
;
225 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &kinit
, sizeof(kinit
),
226 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
228 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &kinit
);
231 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
233 kqemu_closefd(kqemu_fd
);
234 kqemu_fd
= KQEMU_INVALID_FD
;
237 kqemu_update_cpuid(env
);
238 env
->kqemu_enabled
= kqemu_allowed
;
239 nb_pages_to_flush
= 0;
240 nb_ram_pages_to_update
= 0;
246 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
248 LOG_INT("kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
249 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
250 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
252 pages_to_flush
[nb_pages_to_flush
++] = addr
;
255 void kqemu_flush(CPUState
*env
, int global
)
257 LOG_INT("kqemu_flush:\n");
258 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
261 void kqemu_set_notdirty(CPUState
*env
, ram_addr_t ram_addr
)
263 LOG_INT("kqemu_set_notdirty: addr=%08lx\n",
264 (unsigned long)ram_addr
);
265 /* we only track transitions to dirty state */
266 if (phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] != 0xff)
268 if (nb_ram_pages_to_update
>= KQEMU_MAX_RAM_PAGES_TO_UPDATE
)
269 nb_ram_pages_to_update
= KQEMU_RAM_PAGES_UPDATE_ALL
;
271 ram_pages_to_update
[nb_ram_pages_to_update
++] = ram_addr
;
274 static void kqemu_reset_modified_ram_pages(void)
277 unsigned long page_index
;
279 for(i
= 0; i
< nb_modified_ram_pages
; i
++) {
280 page_index
= modified_ram_pages
[i
] >> TARGET_PAGE_BITS
;
281 modified_ram_pages_table
[page_index
] = 0;
283 nb_modified_ram_pages
= 0;
286 void kqemu_modify_page(CPUState
*env
, ram_addr_t ram_addr
)
288 unsigned long page_index
;
294 page_index
= ram_addr
>> TARGET_PAGE_BITS
;
295 if (!modified_ram_pages_table
[page_index
]) {
297 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages
, ram_addr
);
299 modified_ram_pages_table
[page_index
] = 1;
300 modified_ram_pages
[nb_modified_ram_pages
++] = ram_addr
;
301 if (nb_modified_ram_pages
>= KQEMU_MAX_MODIFIED_RAM_PAGES
) {
304 ret
= DeviceIoControl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
305 &nb_modified_ram_pages
,
306 sizeof(nb_modified_ram_pages
),
307 NULL
, 0, &temp
, NULL
);
309 ret
= ioctl(kqemu_fd
, KQEMU_MODIFY_RAM_PAGES
,
310 &nb_modified_ram_pages
);
312 kqemu_reset_modified_ram_pages();
317 void kqemu_set_phys_mem(uint64_t start_addr
, ram_addr_t size
,
318 ram_addr_t phys_offset
)
320 struct kqemu_phys_mem kphys_mem1
, *kphys_mem
= &kphys_mem1
;
324 end
= (start_addr
+ size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
325 start_addr
&= TARGET_PAGE_MASK
;
326 kphys_mem
->phys_addr
= start_addr
;
327 kphys_mem
->size
= end
- start_addr
;
328 kphys_mem
->ram_addr
= phys_offset
& TARGET_PAGE_MASK
;
329 io_index
= phys_offset
& ~TARGET_PAGE_MASK
;
332 kphys_mem
->io_index
= KQEMU_IO_MEM_RAM
;
335 kphys_mem
->io_index
= KQEMU_IO_MEM_ROM
;
338 if (qpi_io_memory
== io_index
) {
339 kphys_mem
->io_index
= KQEMU_IO_MEM_COMM
;
341 kphys_mem
->io_index
= KQEMU_IO_MEM_UNASSIGNED
;
348 ret
= DeviceIoControl(kqemu_fd
, KQEMU_SET_PHYS_MEM
,
349 kphys_mem
, sizeof(*kphys_mem
),
350 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
353 ret
= ioctl(kqemu_fd
, KQEMU_SET_PHYS_MEM
, kphys_mem
);
356 fprintf(stderr
, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64
" size=0x%08lx phys_offset=0x%08lx\n",
358 (unsigned long)size
, (unsigned long)phys_offset
);
374 uint8_t fpregs1
[8 * 10];
390 uint8_t fpregs1
[8 * 16];
391 uint8_t xmm_regs
[16 * 16];
395 static struct fpxstate fpx1
__attribute__((aligned(16)));
397 static void restore_native_fp_frstor(CPUState
*env
)
400 struct fpstate fp1
, *fp
= &fp1
;
402 fp
->fpuc
= env
->fpuc
;
403 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
405 for (i
=7; i
>=0; i
--) {
407 if (env
->fptags
[i
]) {
410 /* the FPU automatically computes it */
415 for(i
= 0;i
< 8; i
++) {
416 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
419 asm volatile ("frstor %0" : "=m" (*fp
));
422 static void save_native_fp_fsave(CPUState
*env
)
426 struct fpstate fp1
, *fp
= &fp1
;
428 asm volatile ("fsave %0" : : "m" (*fp
));
429 env
->fpuc
= fp
->fpuc
;
430 env
->fpstt
= (fp
->fpus
>> 11) & 7;
431 env
->fpus
= fp
->fpus
& ~0x3800;
433 for(i
= 0;i
< 8; i
++) {
434 env
->fptags
[i
] = ((fptag
& 3) == 3);
438 for(i
= 0;i
< 8; i
++) {
439 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
442 /* we must restore the default rounding state */
443 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
444 asm volatile("fldcw %0" : : "m" (fpuc
));
447 static void restore_native_fp_fxrstor(CPUState
*env
)
449 struct fpxstate
*fp
= &fpx1
;
452 fp
->fpuc
= env
->fpuc
;
453 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
455 for(i
= 0; i
< 8; i
++)
456 fptag
|= (env
->fptags
[i
] << i
);
457 fp
->fptag
= fptag
^ 0xff;
460 for(i
= 0;i
< 8; i
++) {
461 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
464 if (env
->cpuid_features
& CPUID_SSE
) {
465 fp
->mxcsr
= env
->mxcsr
;
466 /* XXX: check if DAZ is not available */
467 fp
->mxcsr_mask
= 0xffff;
468 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
470 asm volatile ("fxrstor %0" : "=m" (*fp
));
473 static void save_native_fp_fxsave(CPUState
*env
)
475 struct fpxstate
*fp
= &fpx1
;
479 asm volatile ("fxsave %0" : : "m" (*fp
));
480 env
->fpuc
= fp
->fpuc
;
481 env
->fpstt
= (fp
->fpus
>> 11) & 7;
482 env
->fpus
= fp
->fpus
& ~0x3800;
483 fptag
= fp
->fptag
^ 0xff;
484 for(i
= 0;i
< 8; i
++) {
485 env
->fptags
[i
] = (fptag
>> i
) & 1;
488 for(i
= 0;i
< 8; i
++) {
489 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
492 if (env
->cpuid_features
& CPUID_SSE
) {
493 env
->mxcsr
= fp
->mxcsr
;
494 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
497 /* we must restore the default rounding state */
498 asm volatile ("fninit");
499 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
500 asm volatile("fldcw %0" : : "m" (fpuc
));
503 static int do_syscall(CPUState
*env
,
504 struct kqemu_cpu_state
*kenv
)
508 selector
= (env
->star
>> 32) & 0xffff;
510 if (env
->hflags
& HF_LMA_MASK
) {
513 env
->regs
[R_ECX
] = kenv
->next_eip
;
514 env
->regs
[11] = env
->eflags
;
516 code64
= env
->hflags
& HF_CS64_MASK
;
518 cpu_x86_set_cpl(env
, 0);
519 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
521 DESC_G_MASK
| DESC_P_MASK
|
523 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
524 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
526 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
528 DESC_W_MASK
| DESC_A_MASK
);
529 env
->eflags
&= ~env
->fmask
;
531 env
->eip
= env
->lstar
;
533 env
->eip
= env
->cstar
;
537 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
539 cpu_x86_set_cpl(env
, 0);
540 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
542 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
544 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
545 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
547 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
549 DESC_W_MASK
| DESC_A_MASK
);
550 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
551 env
->eip
= (uint32_t)env
->star
;
556 #ifdef CONFIG_PROFILER
558 #define PC_REC_SIZE 1
559 #define PC_REC_HASH_BITS 16
560 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
562 typedef struct PCRecord
{
565 struct PCRecord
*next
;
568 static PCRecord
*pc_rec_hash
[PC_REC_HASH_SIZE
];
569 static int nb_pc_records
;
571 static void kqemu_record_pc(unsigned long pc
)
576 h
= pc
/ PC_REC_SIZE
;
577 h
= h
^ (h
>> PC_REC_HASH_BITS
);
578 h
&= (PC_REC_HASH_SIZE
- 1);
579 pr
= &pc_rec_hash
[h
];
590 r
= malloc(sizeof(PCRecord
));
598 static int pc_rec_cmp(const void *p1
, const void *p2
)
600 PCRecord
*r1
= *(PCRecord
**)p1
;
601 PCRecord
*r2
= *(PCRecord
**)p2
;
602 if (r1
->count
< r2
->count
)
604 else if (r1
->count
== r2
->count
)
610 static void kqemu_record_flush(void)
612 PCRecord
*r
, *r_next
;
615 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
616 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r_next
) {
620 pc_rec_hash
[h
] = NULL
;
625 void kqemu_record_dump(void)
632 pr
= malloc(sizeof(PCRecord
*) * nb_pc_records
);
635 for(h
= 0; h
< PC_REC_HASH_SIZE
; h
++) {
636 for(r
= pc_rec_hash
[h
]; r
!= NULL
; r
= r
->next
) {
641 qsort(pr
, nb_pc_records
, sizeof(PCRecord
*), pc_rec_cmp
);
643 f
= fopen("/tmp/kqemu.stats", "w");
645 perror("/tmp/kqemu.stats");
648 fprintf(f
, "total: %" PRId64
"\n", total
);
650 for(i
= 0; i
< nb_pc_records
; i
++) {
653 fprintf(f
, "%08lx: %" PRId64
" %0.2f%% %0.2f%%\n",
656 (double)r
->count
/ (double)total
* 100.0,
657 (double)sum
/ (double)total
* 100.0);
662 kqemu_record_flush();
666 static inline void kqemu_load_seg(struct kqemu_segment_cache
*ksc
,
667 const SegmentCache
*sc
)
669 ksc
->selector
= sc
->selector
;
670 ksc
->flags
= sc
->flags
;
671 ksc
->limit
= sc
->limit
;
672 ksc
->base
= sc
->base
;
675 static inline void kqemu_save_seg(SegmentCache
*sc
,
676 const struct kqemu_segment_cache
*ksc
)
678 sc
->selector
= ksc
->selector
;
679 sc
->flags
= ksc
->flags
;
680 sc
->limit
= ksc
->limit
;
681 sc
->base
= ksc
->base
;
684 int kqemu_cpu_exec(CPUState
*env
)
686 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
688 #ifdef CONFIG_PROFILER
695 #ifdef CONFIG_PROFILER
696 ti
= profile_getclock();
698 LOG_INT("kqemu: cpu_exec: enter\n");
700 for(i
= 0; i
< CPU_NB_REGS
; i
++)
701 kenv
->regs
[i
] = env
->regs
[i
];
702 kenv
->eip
= env
->eip
;
703 kenv
->eflags
= env
->eflags
;
704 for(i
= 0; i
< 6; i
++)
705 kqemu_load_seg(&kenv
->segs
[i
], &env
->segs
[i
]);
706 kqemu_load_seg(&kenv
->ldt
, &env
->ldt
);
707 kqemu_load_seg(&kenv
->tr
, &env
->tr
);
708 kqemu_load_seg(&kenv
->gdt
, &env
->gdt
);
709 kqemu_load_seg(&kenv
->idt
, &env
->idt
);
710 kenv
->cr0
= env
->cr
[0];
711 kenv
->cr2
= env
->cr
[2];
712 kenv
->cr3
= env
->cr
[3];
713 kenv
->cr4
= env
->cr
[4];
714 kenv
->a20_mask
= env
->a20_mask
;
715 kenv
->efer
= env
->efer
;
716 kenv
->tsc_offset
= 0;
717 kenv
->star
= env
->star
;
718 kenv
->sysenter_cs
= env
->sysenter_cs
;
719 kenv
->sysenter_esp
= env
->sysenter_esp
;
720 kenv
->sysenter_eip
= env
->sysenter_eip
;
722 kenv
->lstar
= env
->lstar
;
723 kenv
->cstar
= env
->cstar
;
724 kenv
->fmask
= env
->fmask
;
725 kenv
->kernelgsbase
= env
->kernelgsbase
;
727 if (env
->dr
[7] & 0xff) {
728 kenv
->dr7
= env
->dr
[7];
729 kenv
->dr0
= env
->dr
[0];
730 kenv
->dr1
= env
->dr
[1];
731 kenv
->dr2
= env
->dr
[2];
732 kenv
->dr3
= env
->dr
[3];
736 kenv
->dr6
= env
->dr
[6];
737 cpl
= (env
->hflags
& HF_CPL_MASK
);
739 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
740 kenv
->user_only
= (env
->kqemu_enabled
== 1);
741 kenv
->nb_ram_pages_to_update
= nb_ram_pages_to_update
;
742 nb_ram_pages_to_update
= 0;
743 kenv
->nb_modified_ram_pages
= nb_modified_ram_pages
;
745 kqemu_reset_modified_ram_pages();
747 if (env
->cpuid_features
& CPUID_FXSR
)
748 restore_native_fp_fxrstor(env
);
750 restore_native_fp_frstor(env
);
753 if (DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
754 kenv
, sizeof(struct kqemu_cpu_state
),
755 kenv
, sizeof(struct kqemu_cpu_state
),
762 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
765 if (env
->cpuid_features
& CPUID_FXSR
)
766 save_native_fp_fxsave(env
);
768 save_native_fp_fsave(env
);
770 for(i
= 0; i
< CPU_NB_REGS
; i
++)
771 env
->regs
[i
] = kenv
->regs
[i
];
772 env
->eip
= kenv
->eip
;
773 env
->eflags
= kenv
->eflags
;
774 for(i
= 0; i
< 6; i
++)
775 kqemu_save_seg(&env
->segs
[i
], &kenv
->segs
[i
]);
776 cpu_x86_set_cpl(env
, kenv
->cpl
);
777 kqemu_save_seg(&env
->ldt
, &kenv
->ldt
);
778 env
->cr
[0] = kenv
->cr0
;
779 env
->cr
[4] = kenv
->cr4
;
780 env
->cr
[3] = kenv
->cr3
;
781 env
->cr
[2] = kenv
->cr2
;
782 env
->dr
[6] = kenv
->dr6
;
784 env
->kernelgsbase
= kenv
->kernelgsbase
;
787 /* flush pages as indicated by kqemu */
788 if (kenv
->nb_pages_to_flush
>= KQEMU_FLUSH_ALL
) {
791 for(i
= 0; i
< kenv
->nb_pages_to_flush
; i
++) {
792 tlb_flush_page(env
, pages_to_flush
[i
]);
795 nb_pages_to_flush
= 0;
797 #ifdef CONFIG_PROFILER
798 kqemu_time
+= profile_getclock() - ti
;
802 if (kenv
->nb_ram_pages_to_update
> 0) {
803 cpu_tlb_update_dirty(env
);
806 if (kenv
->nb_modified_ram_pages
> 0) {
807 for(i
= 0; i
< kenv
->nb_modified_ram_pages
; i
++) {
809 addr
= modified_ram_pages
[i
];
810 tb_invalidate_phys_page_range(addr
, addr
+ TARGET_PAGE_SIZE
, 0);
814 /* restore the hidden flags */
816 unsigned int new_hflags
;
818 if ((env
->hflags
& HF_LMA_MASK
) &&
819 (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
821 new_hflags
= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
825 /* legacy / compatibility case */
826 new_hflags
= (env
->segs
[R_CS
].flags
& DESC_B_MASK
)
827 >> (DESC_B_SHIFT
- HF_CS32_SHIFT
);
828 new_hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
829 >> (DESC_B_SHIFT
- HF_SS32_SHIFT
);
830 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
831 (env
->eflags
& VM_MASK
) ||
832 !(env
->hflags
& HF_CS32_MASK
)) {
833 /* XXX: try to avoid this test. The problem comes from the
834 fact that is real mode or vm86 mode we only modify the
835 'base' and 'selector' fields of the segment cache to go
836 faster. A solution may be to force addseg to one in
838 new_hflags
|= HF_ADDSEG_MASK
;
840 new_hflags
|= ((env
->segs
[R_DS
].base
|
841 env
->segs
[R_ES
].base
|
842 env
->segs
[R_SS
].base
) != 0) <<
846 env
->hflags
= (env
->hflags
&
847 ~(HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
| HF_ADDSEG_MASK
)) |
850 /* update FPU flags */
851 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
852 ((env
->cr
[0] << (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
853 if (env
->cr
[4] & CR4_OSFXSR_MASK
)
854 env
->hflags
|= HF_OSFXSR_MASK
;
856 env
->hflags
&= ~HF_OSFXSR_MASK
;
858 LOG_INT("kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
859 if (ret
== KQEMU_RET_SYSCALL
) {
860 /* syscall instruction */
861 return do_syscall(env
, kenv
);
863 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
864 env
->exception_index
= ret
& 0xff;
866 env
->exception_is_int
= 1;
867 env
->exception_next_eip
= kenv
->next_eip
;
868 #ifdef CONFIG_PROFILER
869 kqemu_ret_int_count
++;
871 LOG_INT("kqemu: interrupt v=%02x:\n", env
->exception_index
);
874 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
875 env
->exception_index
= ret
& 0xff;
876 env
->error_code
= kenv
->error_code
;
877 env
->exception_is_int
= 0;
878 env
->exception_next_eip
= 0;
879 #ifdef CONFIG_PROFILER
880 kqemu_ret_excp_count
++;
882 LOG_INT("kqemu: exception v=%02x e=%04x:\n",
883 env
->exception_index
, env
->error_code
);
886 } else if (ret
== KQEMU_RET_INTR
) {
887 #ifdef CONFIG_PROFILER
888 kqemu_ret_intr_count
++;
892 } else if (ret
== KQEMU_RET_SOFTMMU
) {
893 #ifdef CONFIG_PROFILER
895 unsigned long pc
= env
->eip
+ env
->segs
[R_CS
].base
;
902 cpu_dump_state(env
, stderr
, fprintf
, 0);
903 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);
909 void kqemu_cpu_interrupt(CPUState
*env
)
912 /* cancelling the I/O request causes KQEMU to finish executing the
913 current block and successfully returning. */
919 QEMU paravirtualization interface. The current interface only
920 allows to modify the IF and IOPL flags when running in
923 At this point it is not very satisfactory. I leave it for reference
924 as it adds little complexity.
927 #define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
929 static uint32_t qpi_mem_readb(void *opaque
, hwaddr addr
)
934 static uint32_t qpi_mem_readw(void *opaque
, hwaddr addr
)
939 static void qpi_mem_writeb(void *opaque
, hwaddr addr
, uint32_t val
)
943 static void qpi_mem_writew(void *opaque
, hwaddr addr
, uint32_t val
)
947 static uint32_t qpi_mem_readl(void *opaque
, hwaddr addr
)
951 env
= cpu_single_env
;
954 return env
->eflags
& (IF_MASK
| IOPL_MASK
);
957 /* Note: after writing to this address, the guest code must make sure
958 it is exiting the current TB. pushf/popf can be used for that
960 static void qpi_mem_writel(void *opaque
, hwaddr addr
, uint32_t val
)
964 env
= cpu_single_env
;
967 env
->eflags
= (env
->eflags
& ~(IF_MASK
| IOPL_MASK
)) |
968 (val
& (IF_MASK
| IOPL_MASK
));
971 static CPUReadMemoryFunc
* const qpi_mem_read
[3] = {
977 static CPUWriteMemoryFunc
* const qpi_mem_write
[3] = {
983 static void qpi_init(void)
985 kqemu_comm_base
= 0xff000000 | 1;
986 qpi_io_memory
= cpu_register_io_memory(
988 qpi_mem_write
, NULL
);
989 cpu_register_physical_memory(kqemu_comm_base
& ~0xfff,
990 0x1000, qpi_io_memory
);