4 * Copyright (c) 2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <sys/types.h>
27 #include <sys/ioctl.h>
46 #include "kqemu/kqemu.h"
48 /* compatibility stuff */
49 #ifndef KQEMU_RET_SYSCALL
50 #define KQEMU_RET_SYSCALL 0x0300 /* syscall insn */
54 #define KQEMU_DEVICE "\\\\.\\kqemu"
56 #define KQEMU_DEVICE "/dev/kqemu"
60 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
61 HANDLE kqemu_fd
= KQEMU_INVALID_FD
;
62 #define kqemu_closefd(x) CloseHandle(x)
64 #define KQEMU_INVALID_FD -1
65 int kqemu_fd
= KQEMU_INVALID_FD
;
66 #define kqemu_closefd(x) close(x)
69 int kqemu_allowed
= 1;
70 unsigned long *pages_to_flush
;
71 unsigned int nb_pages_to_flush
;
72 extern uint32_t **l1_phys_map
;
74 #define cpuid(index, eax, ebx, ecx, edx) \
75 asm volatile ("cpuid" \
76 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
80 static int is_cpuid_supported(void)
85 static int is_cpuid_supported(void)
88 asm volatile ("pushf\n"
91 "xorl $0x00200000, %0\n"
96 : "=a" (v0
), "=d" (v1
)
103 static void kqemu_update_cpuid(CPUState
*env
)
105 int critical_features_mask
, features
;
106 uint32_t eax
, ebx
, ecx
, edx
;
108 /* the following features are kept identical on the host and
109 target cpus because they are important for user code. Strictly
110 speaking, only SSE really matters because the OS must support
111 it if the user code uses it. */
112 critical_features_mask
=
113 CPUID_CMOV
| CPUID_CX8
|
114 CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
|
116 if (!is_cpuid_supported()) {
119 cpuid(1, eax
, ebx
, ecx
, edx
);
122 env
->cpuid_features
= (env
->cpuid_features
& ~critical_features_mask
) |
123 (features
& critical_features_mask
);
124 /* XXX: we could update more of the target CPUID state so that the
125 non accelerated code sees exactly the same CPU features as the
129 int kqemu_init(CPUState
*env
)
131 struct kqemu_init init
;
141 kqemu_fd
= CreateFile(KQEMU_DEVICE
, GENERIC_WRITE
| GENERIC_READ
,
142 FILE_SHARE_READ
| FILE_SHARE_WRITE
,
143 NULL
, OPEN_EXISTING
, FILE_ATTRIBUTE_NORMAL
,
146 kqemu_fd
= open(KQEMU_DEVICE
, O_RDWR
);
148 if (kqemu_fd
== KQEMU_INVALID_FD
) {
149 fprintf(stderr
, "Could not open '%s' - QEMU acceleration layer not activated\n", KQEMU_DEVICE
);
154 DeviceIoControl(kqemu_fd
, KQEMU_GET_VERSION
, NULL
, 0,
155 &version
, sizeof(version
), &temp
, NULL
);
157 ioctl(kqemu_fd
, KQEMU_GET_VERSION
, &version
);
159 if (version
!= KQEMU_VERSION
) {
160 fprintf(stderr
, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
161 version
, KQEMU_VERSION
);
165 pages_to_flush
= qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH
*
166 sizeof(unsigned long));
170 init
.ram_base
= phys_ram_base
;
171 init
.ram_size
= phys_ram_size
;
172 init
.ram_dirty
= phys_ram_dirty
;
173 init
.phys_to_ram_map
= l1_phys_map
;
174 init
.pages_to_flush
= pages_to_flush
;
176 ret
= DeviceIoControl(kqemu_fd
, KQEMU_INIT
, &init
, sizeof(init
),
177 NULL
, 0, &temp
, NULL
) == TRUE
? 0 : -1;
179 ret
= ioctl(kqemu_fd
, KQEMU_INIT
, &init
);
182 fprintf(stderr
, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret
);
184 kqemu_closefd(kqemu_fd
);
185 kqemu_fd
= KQEMU_INVALID_FD
;
188 kqemu_update_cpuid(env
);
189 env
->kqemu_enabled
= 1;
190 nb_pages_to_flush
= 0;
194 void kqemu_flush_page(CPUState
*env
, target_ulong addr
)
197 if (loglevel
& CPU_LOG_INT
) {
198 fprintf(logfile
, "kqemu_flush_page: addr=" TARGET_FMT_lx
"\n", addr
);
201 if (nb_pages_to_flush
>= KQEMU_MAX_PAGES_TO_FLUSH
)
202 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
204 pages_to_flush
[nb_pages_to_flush
++] = addr
;
207 void kqemu_flush(CPUState
*env
, int global
)
210 if (loglevel
& CPU_LOG_INT
) {
211 fprintf(logfile
, "kqemu_flush:\n");
214 nb_pages_to_flush
= KQEMU_FLUSH_ALL
;
229 uint8_t fpregs1
[8 * 10];
245 uint8_t fpregs1
[8 * 16];
246 uint8_t xmm_regs
[16 * 16];
250 static struct fpxstate fpx1
__attribute__((aligned(16)));
252 static void restore_native_fp_frstor(CPUState
*env
)
255 struct fpstate fp1
, *fp
= &fp1
;
257 fp
->fpuc
= env
->fpuc
;
258 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
260 for (i
=7; i
>=0; i
--) {
262 if (env
->fptags
[i
]) {
265 /* the FPU automatically computes it */
270 for(i
= 0;i
< 8; i
++) {
271 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
274 asm volatile ("frstor %0" : "=m" (*fp
));
277 static void save_native_fp_fsave(CPUState
*env
)
281 struct fpstate fp1
, *fp
= &fp1
;
283 asm volatile ("fsave %0" : : "m" (*fp
));
284 env
->fpuc
= fp
->fpuc
;
285 env
->fpstt
= (fp
->fpus
>> 11) & 7;
286 env
->fpus
= fp
->fpus
& ~0x3800;
288 for(i
= 0;i
< 8; i
++) {
289 env
->fptags
[i
] = ((fptag
& 3) == 3);
293 for(i
= 0;i
< 8; i
++) {
294 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
297 /* we must restore the default rounding state */
298 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
299 asm volatile("fldcw %0" : : "m" (fpuc
));
302 static void restore_native_fp_fxrstor(CPUState
*env
)
304 struct fpxstate
*fp
= &fpx1
;
307 fp
->fpuc
= env
->fpuc
;
308 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
310 for(i
= 0; i
< 8; i
++)
311 fptag
|= (env
->fptags
[i
] << i
);
312 fp
->fptag
= fptag
^ 0xff;
315 for(i
= 0;i
< 8; i
++) {
316 memcpy(&fp
->fpregs1
[i
* 16], &env
->fpregs
[j
].d
, 10);
319 if (env
->cpuid_features
& CPUID_SSE
) {
320 fp
->mxcsr
= env
->mxcsr
;
321 /* XXX: check if DAZ is not available */
322 fp
->mxcsr_mask
= 0xffff;
323 memcpy(fp
->xmm_regs
, env
->xmm_regs
, CPU_NB_REGS
* 16);
325 asm volatile ("fxrstor %0" : "=m" (*fp
));
328 static void save_native_fp_fxsave(CPUState
*env
)
330 struct fpxstate
*fp
= &fpx1
;
334 asm volatile ("fxsave %0" : : "m" (*fp
));
335 env
->fpuc
= fp
->fpuc
;
336 env
->fpstt
= (fp
->fpus
>> 11) & 7;
337 env
->fpus
= fp
->fpus
& ~0x3800;
338 fptag
= fp
->fptag
^ 0xff;
339 for(i
= 0;i
< 8; i
++) {
340 env
->fptags
[i
] = (fptag
>> i
) & 1;
343 for(i
= 0;i
< 8; i
++) {
344 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 16], 10);
347 if (env
->cpuid_features
& CPUID_SSE
) {
348 env
->mxcsr
= fp
->mxcsr
;
349 memcpy(env
->xmm_regs
, fp
->xmm_regs
, CPU_NB_REGS
* 16);
352 /* we must restore the default rounding state */
353 asm volatile ("fninit");
354 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
355 asm volatile("fldcw %0" : : "m" (fpuc
));
358 static int do_syscall(CPUState
*env
,
359 struct kqemu_cpu_state
*kenv
)
363 selector
= (env
->star
>> 32) & 0xffff;
365 if (env
->hflags
& HF_LMA_MASK
) {
366 env
->regs
[R_ECX
] = kenv
->next_eip
;
367 env
->regs
[11] = env
->eflags
;
369 cpu_x86_set_cpl(env
, 0);
370 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
372 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
374 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
375 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
377 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
379 DESC_W_MASK
| DESC_A_MASK
);
380 env
->eflags
&= ~env
->fmask
;
381 if (env
->hflags
& HF_CS64_MASK
)
382 env
->eip
= env
->lstar
;
384 env
->eip
= env
->cstar
;
388 env
->regs
[R_ECX
] = (uint32_t)kenv
->next_eip
;
390 cpu_x86_set_cpl(env
, 0);
391 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
393 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
395 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
396 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
398 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
400 DESC_W_MASK
| DESC_A_MASK
);
401 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
402 env
->eip
= (uint32_t)env
->star
;
407 int kqemu_cpu_exec(CPUState
*env
)
409 struct kqemu_cpu_state kcpu_state
, *kenv
= &kcpu_state
;
416 if (loglevel
& CPU_LOG_INT
) {
417 fprintf(logfile
, "kqemu: cpu_exec: enter\n");
418 cpu_dump_state(env
, logfile
, fprintf
, 0);
421 memcpy(kenv
->regs
, env
->regs
, sizeof(kenv
->regs
));
422 kenv
->eip
= env
->eip
;
423 kenv
->eflags
= env
->eflags
;
424 memcpy(&kenv
->segs
, &env
->segs
, sizeof(env
->segs
));
425 memcpy(&kenv
->ldt
, &env
->ldt
, sizeof(env
->ldt
));
426 memcpy(&kenv
->tr
, &env
->tr
, sizeof(env
->tr
));
427 memcpy(&kenv
->gdt
, &env
->gdt
, sizeof(env
->gdt
));
428 memcpy(&kenv
->idt
, &env
->idt
, sizeof(env
->idt
));
429 kenv
->cr0
= env
->cr
[0];
430 kenv
->cr2
= env
->cr
[2];
431 kenv
->cr3
= env
->cr
[3];
432 kenv
->cr4
= env
->cr
[4];
433 kenv
->a20_mask
= env
->a20_mask
;
434 #if KQEMU_VERSION >= 0x010100
435 kenv
->efer
= env
->efer
;
437 if (env
->dr
[7] & 0xff) {
438 kenv
->dr7
= env
->dr
[7];
439 kenv
->dr0
= env
->dr
[0];
440 kenv
->dr1
= env
->dr
[1];
441 kenv
->dr2
= env
->dr
[2];
442 kenv
->dr3
= env
->dr
[3];
446 kenv
->dr6
= env
->dr
[6];
448 kenv
->nb_pages_to_flush
= nb_pages_to_flush
;
449 nb_pages_to_flush
= 0;
451 if (!(kenv
->cr0
& CR0_TS_MASK
)) {
452 if (env
->cpuid_features
& CPUID_FXSR
)
453 restore_native_fp_fxrstor(env
);
455 restore_native_fp_frstor(env
);
459 DeviceIoControl(kqemu_fd
, KQEMU_EXEC
,
460 kenv
, sizeof(struct kqemu_cpu_state
),
461 kenv
, sizeof(struct kqemu_cpu_state
),
465 #if KQEMU_VERSION >= 0x010100
466 ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
469 ret
= ioctl(kqemu_fd
, KQEMU_EXEC
, kenv
);
472 if (!(kenv
->cr0
& CR0_TS_MASK
)) {
473 if (env
->cpuid_features
& CPUID_FXSR
)
474 save_native_fp_fxsave(env
);
476 save_native_fp_fsave(env
);
479 memcpy(env
->regs
, kenv
->regs
, sizeof(env
->regs
));
480 env
->eip
= kenv
->eip
;
481 env
->eflags
= kenv
->eflags
;
482 memcpy(env
->segs
, kenv
->segs
, sizeof(env
->segs
));
484 /* no need to restore that */
485 memcpy(env
->ldt
, kenv
->ldt
, sizeof(env
->ldt
));
486 memcpy(env
->tr
, kenv
->tr
, sizeof(env
->tr
));
487 memcpy(env
->gdt
, kenv
->gdt
, sizeof(env
->gdt
));
488 memcpy(env
->idt
, kenv
->idt
, sizeof(env
->idt
));
489 env
->cr
[0] = kenv
->cr0
;
490 env
->cr
[3] = kenv
->cr3
;
491 env
->cr
[4] = kenv
->cr4
;
492 env
->a20_mask
= kenv
->a20_mask
;
494 env
->cr
[2] = kenv
->cr2
;
495 env
->dr
[6] = kenv
->dr6
;
498 if (loglevel
& CPU_LOG_INT
) {
499 fprintf(logfile
, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret
);
502 if (ret
== KQEMU_RET_SYSCALL
) {
503 /* syscall instruction */
504 return do_syscall(env
, kenv
);
506 if ((ret
& 0xff00) == KQEMU_RET_INT
) {
507 env
->exception_index
= ret
& 0xff;
509 env
->exception_is_int
= 1;
510 env
->exception_next_eip
= kenv
->next_eip
;
512 if (loglevel
& CPU_LOG_INT
) {
513 fprintf(logfile
, "kqemu: interrupt v=%02x:\n",
514 env
->exception_index
);
515 cpu_dump_state(env
, logfile
, fprintf
, 0);
519 } else if ((ret
& 0xff00) == KQEMU_RET_EXCEPTION
) {
520 env
->exception_index
= ret
& 0xff;
521 env
->error_code
= kenv
->error_code
;
522 env
->exception_is_int
= 0;
523 env
->exception_next_eip
= 0;
525 if (loglevel
& CPU_LOG_INT
) {
526 fprintf(logfile
, "kqemu: exception v=%02x e=%04x:\n",
527 env
->exception_index
, env
->error_code
);
528 cpu_dump_state(env
, logfile
, fprintf
, 0);
532 } else if (ret
== KQEMU_RET_INTR
) {
534 if (loglevel
& CPU_LOG_INT
) {
535 cpu_dump_state(env
, logfile
, fprintf
, 0);
539 } else if (ret
== KQEMU_RET_SOFTMMU
) {
542 cpu_dump_state(env
, stderr
, fprintf
, 0);
543 fprintf(stderr
, "Unsupported return value: 0x%x\n", ret
);