ui/sdl2-input: Fix AltGr modifier on Windows hosts
[qemu/ar7.git] / kqemu.c
blobb6735dee6801ccd62c9788f4e4f248e9ae8ac934
1 /*
2 * KQEMU support
4 * Copyright (c) 2005-2008 Fabrice Bellard
5 * Copyright (c) 2011 Stefan Weil
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #include <winioctl.h>
24 #else
25 #include <sys/mman.h>
26 #include <sys/ioctl.h>
27 #endif
28 #ifdef CONFIG_SOLARIS
29 #include <sys/ioccom.h>
30 #endif
32 #include "cpu.h"
33 #include "exec-all.h"
35 #ifdef CONFIG_KQEMU
37 #define DEBUG
38 //#define PROFILE
41 #ifdef DEBUG
42 # define LOG_INT(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
43 # define LOG_INT_STATE(env) log_cpu_state_mask(CPU_LOG_INT, (env), 0)
44 #else
45 # define LOG_INT(...) do { } while (0)
46 # define LOG_INT_STATE(env) do { } while (0)
47 #endif
49 #include "kqemu.h"
51 #ifdef _WIN32
52 #define KQEMU_DEVICE "\\\\.\\kqemu"
53 #else
54 #define KQEMU_DEVICE "/dev/kqemu"
55 #endif
57 static void qpi_init(void);
59 #ifdef _WIN32
60 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
61 HANDLE kqemu_fd = KQEMU_INVALID_FD;
62 #define kqemu_closefd(x) CloseHandle(x)
63 #else
64 #define KQEMU_INVALID_FD -1
65 int kqemu_fd = KQEMU_INVALID_FD;
66 #define kqemu_closefd(x) close(x)
67 #endif
69 /* 0 = not allowed
70 1 = user kqemu
71 2 = kernel kqemu
73 int kqemu_allowed = 0;
74 uint64_t *pages_to_flush;
75 unsigned int nb_pages_to_flush;
76 uint64_t *ram_pages_to_update;
77 unsigned int nb_ram_pages_to_update;
78 uint64_t *modified_ram_pages;
79 unsigned int nb_modified_ram_pages;
80 uint8_t *modified_ram_pages_table;
81 int qpi_io_memory;
82 uint32_t kqemu_comm_base; /* physical address of the QPI communication page */
83 ram_addr_t kqemu_phys_ram_size;
84 uint8_t *kqemu_phys_ram_base;
86 #define cpuid(index, eax, ebx, ecx, edx) \
87 asm volatile ("cpuid" \
88 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
89 : "0" (index))
91 #ifdef __x86_64__
92 static int is_cpuid_supported(void)
94 return 1;
96 #else
97 static int is_cpuid_supported(void)
99 int v0, v1;
100 asm volatile ("pushf\n"
101 "popl %0\n"
102 "movl %0, %1\n"
103 "xorl $0x00200000, %0\n"
104 "pushl %0\n"
105 "popf\n"
106 "pushf\n"
107 "popl %0\n"
108 : "=a" (v0), "=d" (v1)
110 : "cc");
111 return (v0 != v1);
113 #endif
115 static void kqemu_update_cpuid(CPUState *env)
117 int critical_features_mask, features, ext_features, ext_features_mask;
118 uint32_t eax, ebx, ecx, edx;
120 /* the following features are kept identical on the host and
121 target cpus because they are important for user code. Strictly
122 speaking, only SSE really matters because the OS must support
123 it if the user code uses it. */
124 critical_features_mask =
125 CPUID_CMOV | CPUID_CX8 |
126 CPUID_FXSR | CPUID_MMX | CPUID_SSE |
127 CPUID_SSE2 | CPUID_SEP;
128 ext_features_mask = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR;
129 if (!is_cpuid_supported()) {
130 features = 0;
131 ext_features = 0;
132 } else {
133 cpuid(1, eax, ebx, ecx, edx);
134 features = edx;
135 ext_features = ecx;
137 #ifdef __x86_64__
138 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
139 compatibility mode, so in order to have the best performances
140 it is better not to use it */
141 features &= ~CPUID_SEP;
142 #endif
143 env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
144 (features & critical_features_mask);
145 env->cpuid_ext_features = (env->cpuid_ext_features & ~ext_features_mask) |
146 (ext_features & ext_features_mask);
147 /* XXX: we could update more of the target CPUID state so that the
148 non accelerated code sees exactly the same CPU features as the
149 accelerated code */
152 int kqemu_init(CPUState *env)
154 struct kqemu_init kinit;
155 int ret, version;
156 #ifdef _WIN32
157 DWORD temp;
158 #endif
160 if (!kqemu_allowed)
161 return -1;
163 #ifdef _WIN32
164 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
165 FILE_SHARE_READ | FILE_SHARE_WRITE,
166 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
167 NULL);
168 if (kqemu_fd == KQEMU_INVALID_FD) {
169 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n",
170 KQEMU_DEVICE, GetLastError());
171 return -1;
173 #else
174 kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
175 if (kqemu_fd == KQEMU_INVALID_FD) {
176 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
177 KQEMU_DEVICE, strerror(errno));
178 return -1;
180 #endif
181 version = 0;
182 #ifdef _WIN32
183 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
184 &version, sizeof(version), &temp, NULL);
185 #else
186 ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
187 #endif
188 if (version != KQEMU_VERSION) {
189 fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
190 version, KQEMU_VERSION);
191 goto fail;
194 pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
195 sizeof(uint64_t));
196 if (!pages_to_flush)
197 goto fail;
199 ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
200 sizeof(uint64_t));
201 if (!ram_pages_to_update)
202 goto fail;
204 modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES *
205 sizeof(uint64_t));
206 if (!modified_ram_pages)
207 goto fail;
208 modified_ram_pages_table =
209 g_malloc0(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
210 if (!modified_ram_pages_table)
211 goto fail;
213 memset(&kinit, 0, sizeof(kinit)); /* set the paddings to zero */
214 kinit.ram_base = kqemu_phys_ram_base;
215 kinit.ram_size = kqemu_phys_ram_size;
216 kinit.ram_dirty = phys_ram_dirty;
217 kinit.pages_to_flush = pages_to_flush;
218 kinit.ram_pages_to_update = ram_pages_to_update;
219 kinit.modified_ram_pages = modified_ram_pages;
220 #ifdef _WIN32
221 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &kinit, sizeof(kinit),
222 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
223 #else
224 ret = ioctl(kqemu_fd, KQEMU_INIT, &kinit);
225 #endif
226 if (ret < 0) {
227 fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
228 fail:
229 kqemu_closefd(kqemu_fd);
230 kqemu_fd = KQEMU_INVALID_FD;
231 return -1;
233 kqemu_update_cpuid(env);
234 env->kqemu_enabled = kqemu_allowed;
235 nb_pages_to_flush = 0;
236 nb_ram_pages_to_update = 0;
238 qpi_init();
239 return 0;
242 void kqemu_flush_page(CPUState *env, target_ulong addr)
244 LOG_INT("kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
245 if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
246 nb_pages_to_flush = KQEMU_FLUSH_ALL;
247 else
248 pages_to_flush[nb_pages_to_flush++] = addr;
251 void kqemu_flush(CPUState *env, int global)
253 LOG_INT("kqemu_flush:\n");
254 nb_pages_to_flush = KQEMU_FLUSH_ALL;
257 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
259 LOG_INT("kqemu_set_notdirty: addr=%08lx\n",
260 (unsigned long)ram_addr);
261 /* we only track transitions to dirty state */
262 if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff)
263 return;
264 if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
265 nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
266 else
267 ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
270 static void kqemu_reset_modified_ram_pages(void)
272 int i;
273 unsigned long page_index;
275 for(i = 0; i < nb_modified_ram_pages; i++) {
276 page_index = modified_ram_pages[i] >> TARGET_PAGE_BITS;
277 modified_ram_pages_table[page_index] = 0;
279 nb_modified_ram_pages = 0;
282 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr)
284 unsigned long page_index;
285 int ret;
286 #ifdef _WIN32
287 DWORD temp;
288 #endif
290 page_index = ram_addr >> TARGET_PAGE_BITS;
291 if (!modified_ram_pages_table[page_index]) {
292 #if 0
293 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages, ram_addr);
294 #endif
295 modified_ram_pages_table[page_index] = 1;
296 modified_ram_pages[nb_modified_ram_pages++] = ram_addr;
297 if (nb_modified_ram_pages >= KQEMU_MAX_MODIFIED_RAM_PAGES) {
298 /* flush */
299 #ifdef _WIN32
300 ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
301 &nb_modified_ram_pages,
302 sizeof(nb_modified_ram_pages),
303 NULL, 0, &temp, NULL);
304 #else
305 ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
306 &nb_modified_ram_pages);
307 #endif
308 kqemu_reset_modified_ram_pages();
313 void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
314 ram_addr_t phys_offset)
316 struct kqemu_phys_mem kphys_mem1, *kphys_mem = &kphys_mem1;
317 uint64_t end;
318 int ret, io_index;
320 end = (start_addr + size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
321 start_addr &= TARGET_PAGE_MASK;
322 kphys_mem->phys_addr = start_addr;
323 kphys_mem->size = end - start_addr;
324 kphys_mem->ram_addr = phys_offset & TARGET_PAGE_MASK;
325 io_index = phys_offset & ~TARGET_PAGE_MASK;
326 switch(io_index) {
327 case IO_MEM_RAM:
328 kphys_mem->io_index = KQEMU_IO_MEM_RAM;
329 break;
330 case IO_MEM_ROM:
331 kphys_mem->io_index = KQEMU_IO_MEM_ROM;
332 break;
333 default:
334 if (qpi_io_memory == io_index) {
335 kphys_mem->io_index = KQEMU_IO_MEM_COMM;
336 } else {
337 kphys_mem->io_index = KQEMU_IO_MEM_UNASSIGNED;
339 break;
341 #ifdef _WIN32
343 DWORD temp;
344 ret = DeviceIoControl(kqemu_fd, KQEMU_SET_PHYS_MEM,
345 kphys_mem, sizeof(*kphys_mem),
346 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
348 #else
349 ret = ioctl(kqemu_fd, KQEMU_SET_PHYS_MEM, kphys_mem);
350 #endif
351 if (ret < 0) {
352 fprintf(stderr, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64 " size=0x%08lx phys_offset=0x%08lx\n",
353 ret, start_addr,
354 (unsigned long)size, (unsigned long)phys_offset);
358 struct fpstate {
359 uint16_t fpuc;
360 uint16_t dummy1;
361 uint16_t fpus;
362 uint16_t dummy2;
363 uint16_t fptag;
364 uint16_t dummy3;
366 uint32_t fpip;
367 uint32_t fpcs;
368 uint32_t fpoo;
369 uint32_t fpos;
370 uint8_t fpregs1[8 * 10];
373 struct fpxstate {
374 uint16_t fpuc;
375 uint16_t fpus;
376 uint16_t fptag;
377 uint16_t fop;
378 uint32_t fpuip;
379 uint16_t cs_sel;
380 uint16_t dummy0;
381 uint32_t fpudp;
382 uint16_t ds_sel;
383 uint16_t dummy1;
384 uint32_t mxcsr;
385 uint32_t mxcsr_mask;
386 uint8_t fpregs1[8 * 16];
387 uint8_t xmm_regs[16 * 16];
388 uint8_t dummy2[96];
391 static struct fpxstate fpx1 __attribute__((aligned(16)));
393 static void restore_native_fp_frstor(CPUState *env)
395 int fptag, i, j;
396 struct fpstate fp1, *fp = &fp1;
398 fp->fpuc = env->fpuc;
399 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
400 fptag = 0;
401 for (i=7; i>=0; i--) {
402 fptag <<= 2;
403 if (env->fptags[i]) {
404 fptag |= 3;
405 } else {
406 /* the FPU automatically computes it */
409 fp->fptag = fptag;
410 j = env->fpstt;
411 for(i = 0;i < 8; i++) {
412 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
413 j = (j + 1) & 7;
415 asm volatile ("frstor %0" : "=m" (*fp));
418 static void save_native_fp_fsave(CPUState *env)
420 int fptag, i, j;
421 uint16_t fpuc;
422 struct fpstate fp1, *fp = &fp1;
424 asm volatile ("fsave %0" : : "m" (*fp));
425 env->fpuc = fp->fpuc;
426 env->fpstt = (fp->fpus >> 11) & 7;
427 env->fpus = fp->fpus & ~0x3800;
428 fptag = fp->fptag;
429 for(i = 0;i < 8; i++) {
430 env->fptags[i] = ((fptag & 3) == 3);
431 fptag >>= 2;
433 j = env->fpstt;
434 for(i = 0;i < 8; i++) {
435 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
436 j = (j + 1) & 7;
438 /* we must restore the default rounding state */
439 fpuc = 0x037f | (env->fpuc & (3 << 10));
440 asm volatile("fldcw %0" : : "m" (fpuc));
443 static void restore_native_fp_fxrstor(CPUState *env)
445 struct fpxstate *fp = &fpx1;
446 int i, j, fptag;
448 fp->fpuc = env->fpuc;
449 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
450 fptag = 0;
451 for(i = 0; i < 8; i++)
452 fptag |= (env->fptags[i] << i);
453 fp->fptag = fptag ^ 0xff;
455 j = env->fpstt;
456 for(i = 0;i < 8; i++) {
457 memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
458 j = (j + 1) & 7;
460 if (env->cpuid_features & CPUID_SSE) {
461 fp->mxcsr = env->mxcsr;
462 /* XXX: check if DAZ is not available */
463 fp->mxcsr_mask = 0xffff;
464 memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
466 asm volatile ("fxrstor %0" : "=m" (*fp));
469 static void save_native_fp_fxsave(CPUState *env)
471 struct fpxstate *fp = &fpx1;
472 int fptag, i, j;
473 uint16_t fpuc;
475 asm volatile ("fxsave %0" : : "m" (*fp));
476 env->fpuc = fp->fpuc;
477 env->fpstt = (fp->fpus >> 11) & 7;
478 env->fpus = fp->fpus & ~0x3800;
479 fptag = fp->fptag ^ 0xff;
480 for(i = 0;i < 8; i++) {
481 env->fptags[i] = (fptag >> i) & 1;
483 j = env->fpstt;
484 for(i = 0;i < 8; i++) {
485 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
486 j = (j + 1) & 7;
488 if (env->cpuid_features & CPUID_SSE) {
489 env->mxcsr = fp->mxcsr;
490 memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
493 /* we must restore the default rounding state */
494 asm volatile ("fninit");
495 fpuc = 0x037f | (env->fpuc & (3 << 10));
496 asm volatile("fldcw %0" : : "m" (fpuc));
499 static int do_syscall(CPUState *env,
500 struct kqemu_cpu_state *kenv)
502 int selector;
504 selector = (env->star >> 32) & 0xffff;
505 #ifdef TARGET_X86_64
506 if (env->hflags & HF_LMA_MASK) {
507 int code64;
509 env->regs[R_ECX] = kenv->next_eip;
510 env->regs[11] = env->eflags;
512 code64 = env->hflags & HF_CS64_MASK;
514 cpu_x86_set_cpl(env, 0);
515 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
516 0, 0xffffffff,
517 DESC_G_MASK | DESC_P_MASK |
518 DESC_S_MASK |
519 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
520 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
521 0, 0xffffffff,
522 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
523 DESC_S_MASK |
524 DESC_W_MASK | DESC_A_MASK);
525 env->eflags &= ~env->fmask;
526 if (code64)
527 env->eip = env->lstar;
528 else
529 env->eip = env->cstar;
530 } else
531 #endif
533 env->regs[R_ECX] = (uint32_t)kenv->next_eip;
535 cpu_x86_set_cpl(env, 0);
536 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
537 0, 0xffffffff,
538 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
539 DESC_S_MASK |
540 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
541 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
542 0, 0xffffffff,
543 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
544 DESC_S_MASK |
545 DESC_W_MASK | DESC_A_MASK);
546 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
547 env->eip = (uint32_t)env->star;
549 return 2;
552 #ifdef CONFIG_PROFILER
554 #define PC_REC_SIZE 1
555 #define PC_REC_HASH_BITS 16
556 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
558 typedef struct PCRecord {
559 unsigned long pc;
560 int64_t count;
561 struct PCRecord *next;
562 } PCRecord;
564 static PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
565 static int nb_pc_records;
567 static void kqemu_record_pc(unsigned long pc)
569 unsigned long h;
570 PCRecord **pr, *r;
572 h = pc / PC_REC_SIZE;
573 h = h ^ (h >> PC_REC_HASH_BITS);
574 h &= (PC_REC_HASH_SIZE - 1);
575 pr = &pc_rec_hash[h];
576 for(;;) {
577 r = *pr;
578 if (r == NULL)
579 break;
580 if (r->pc == pc) {
581 r->count++;
582 return;
584 pr = &r->next;
586 r = malloc(sizeof(PCRecord));
587 r->count = 1;
588 r->pc = pc;
589 r->next = NULL;
590 *pr = r;
591 nb_pc_records++;
594 static int pc_rec_cmp(const void *p1, const void *p2)
596 PCRecord *r1 = *(PCRecord **)p1;
597 PCRecord *r2 = *(PCRecord **)p2;
598 if (r1->count < r2->count)
599 return 1;
600 else if (r1->count == r2->count)
601 return 0;
602 else
603 return -1;
606 static void kqemu_record_flush(void)
608 PCRecord *r, *r_next;
609 int h;
611 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
612 for(r = pc_rec_hash[h]; r != NULL; r = r_next) {
613 r_next = r->next;
614 free(r);
616 pc_rec_hash[h] = NULL;
618 nb_pc_records = 0;
621 void kqemu_record_dump(void)
623 PCRecord **pr, *r;
624 int i, h;
625 FILE *f;
626 int64_t total, sum;
628 pr = malloc(sizeof(PCRecord *) * nb_pc_records);
629 i = 0;
630 total = 0;
631 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
632 for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
633 pr[i++] = r;
634 total += r->count;
637 qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
639 f = fopen("/tmp/kqemu.stats", "w");
640 if (!f) {
641 perror("/tmp/kqemu.stats");
642 exit(1);
644 fprintf(f, "total: %" PRId64 "\n", total);
645 sum = 0;
646 for(i = 0; i < nb_pc_records; i++) {
647 r = pr[i];
648 sum += r->count;
649 fprintf(f, "%08lx: %" PRId64 " %0.2f%% %0.2f%%\n",
650 r->pc,
651 r->count,
652 (double)r->count / (double)total * 100.0,
653 (double)sum / (double)total * 100.0);
655 fclose(f);
656 free(pr);
658 kqemu_record_flush();
660 #endif
662 static inline void kqemu_load_seg(struct kqemu_segment_cache *ksc,
663 const SegmentCache *sc)
665 ksc->selector = sc->selector;
666 ksc->flags = sc->flags;
667 ksc->limit = sc->limit;
668 ksc->base = sc->base;
671 static inline void kqemu_save_seg(SegmentCache *sc,
672 const struct kqemu_segment_cache *ksc)
674 sc->selector = ksc->selector;
675 sc->flags = ksc->flags;
676 sc->limit = ksc->limit;
677 sc->base = ksc->base;
680 int kqemu_cpu_exec(CPUState *env)
682 struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
683 int ret, cpl, i;
684 #ifdef CONFIG_PROFILER
685 int64_t ti;
686 #endif
687 #ifdef _WIN32
688 DWORD temp;
689 #endif
691 #ifdef CONFIG_PROFILER
692 ti = profile_getclock();
693 #endif
694 LOG_INT("kqemu: cpu_exec: enter\n");
695 LOG_INT_STATE(env);
696 for(i = 0; i < CPU_NB_REGS; i++)
697 kenv->regs[i] = env->regs[i];
698 kenv->eip = env->eip;
699 kenv->eflags = env->eflags;
700 for(i = 0; i < 6; i++)
701 kqemu_load_seg(&kenv->segs[i], &env->segs[i]);
702 kqemu_load_seg(&kenv->ldt, &env->ldt);
703 kqemu_load_seg(&kenv->tr, &env->tr);
704 kqemu_load_seg(&kenv->gdt, &env->gdt);
705 kqemu_load_seg(&kenv->idt, &env->idt);
706 kenv->cr0 = env->cr[0];
707 kenv->cr2 = env->cr[2];
708 kenv->cr3 = env->cr[3];
709 kenv->cr4 = env->cr[4];
710 kenv->a20_mask = env->a20_mask;
711 kenv->efer = env->efer;
712 kenv->tsc_offset = 0;
713 kenv->star = env->star;
714 kenv->sysenter_cs = env->sysenter_cs;
715 kenv->sysenter_esp = env->sysenter_esp;
716 kenv->sysenter_eip = env->sysenter_eip;
717 #ifdef TARGET_X86_64
718 kenv->lstar = env->lstar;
719 kenv->cstar = env->cstar;
720 kenv->fmask = env->fmask;
721 kenv->kernelgsbase = env->kernelgsbase;
722 #endif
723 if (env->dr[7] & 0xff) {
724 kenv->dr7 = env->dr[7];
725 kenv->dr0 = env->dr[0];
726 kenv->dr1 = env->dr[1];
727 kenv->dr2 = env->dr[2];
728 kenv->dr3 = env->dr[3];
729 } else {
730 kenv->dr7 = 0;
732 kenv->dr6 = env->dr[6];
733 cpl = (env->hflags & HF_CPL_MASK);
734 kenv->cpl = cpl;
735 kenv->nb_pages_to_flush = nb_pages_to_flush;
736 kenv->user_only = (env->kqemu_enabled == 1);
737 kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
738 nb_ram_pages_to_update = 0;
739 kenv->nb_modified_ram_pages = nb_modified_ram_pages;
741 kqemu_reset_modified_ram_pages();
743 if (env->cpuid_features & CPUID_FXSR)
744 restore_native_fp_fxrstor(env);
745 else
746 restore_native_fp_frstor(env);
748 #ifdef _WIN32
749 if (DeviceIoControl(kqemu_fd, KQEMU_EXEC,
750 kenv, sizeof(struct kqemu_cpu_state),
751 kenv, sizeof(struct kqemu_cpu_state),
752 &temp, NULL)) {
753 ret = kenv->retval;
754 } else {
755 ret = -1;
757 #else
758 ioctl(kqemu_fd, KQEMU_EXEC, kenv);
759 ret = kenv->retval;
760 #endif
761 if (env->cpuid_features & CPUID_FXSR)
762 save_native_fp_fxsave(env);
763 else
764 save_native_fp_fsave(env);
766 for(i = 0; i < CPU_NB_REGS; i++)
767 env->regs[i] = kenv->regs[i];
768 env->eip = kenv->eip;
769 env->eflags = kenv->eflags;
770 for(i = 0; i < 6; i++)
771 kqemu_save_seg(&env->segs[i], &kenv->segs[i]);
772 cpu_x86_set_cpl(env, kenv->cpl);
773 kqemu_save_seg(&env->ldt, &kenv->ldt);
774 env->cr[0] = kenv->cr0;
775 env->cr[4] = kenv->cr4;
776 env->cr[3] = kenv->cr3;
777 env->cr[2] = kenv->cr2;
778 env->dr[6] = kenv->dr6;
779 #ifdef TARGET_X86_64
780 env->kernelgsbase = kenv->kernelgsbase;
781 #endif
783 /* flush pages as indicated by kqemu */
784 if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) {
785 tlb_flush(env, 1);
786 } else {
787 for(i = 0; i < kenv->nb_pages_to_flush; i++) {
788 tlb_flush_page(env, pages_to_flush[i]);
791 nb_pages_to_flush = 0;
793 #ifdef CONFIG_PROFILER
794 kqemu_time += profile_getclock() - ti;
795 kqemu_exec_count++;
796 #endif
798 if (kenv->nb_ram_pages_to_update > 0) {
799 cpu_tlb_update_dirty(env);
802 if (kenv->nb_modified_ram_pages > 0) {
803 for(i = 0; i < kenv->nb_modified_ram_pages; i++) {
804 unsigned long addr;
805 addr = modified_ram_pages[i];
806 tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0);
810 /* restore the hidden flags */
812 unsigned int new_hflags;
813 #ifdef TARGET_X86_64
814 if ((env->hflags & HF_LMA_MASK) &&
815 (env->segs[R_CS].flags & DESC_L_MASK)) {
816 /* long mode */
817 new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
818 } else
819 #endif
821 /* legacy / compatibility case */
822 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
823 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
824 new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
825 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
826 if (!(env->cr[0] & CR0_PE_MASK) ||
827 (env->eflags & VM_MASK) ||
828 !(env->hflags & HF_CS32_MASK)) {
829 /* XXX: try to avoid this test. The problem comes from the
830 fact that is real mode or vm86 mode we only modify the
831 'base' and 'selector' fields of the segment cache to go
832 faster. A solution may be to force addseg to one in
833 translate-i386.c. */
834 new_hflags |= HF_ADDSEG_MASK;
835 } else {
836 new_hflags |= ((env->segs[R_DS].base |
837 env->segs[R_ES].base |
838 env->segs[R_SS].base) != 0) <<
839 HF_ADDSEG_SHIFT;
842 env->hflags = (env->hflags &
843 ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
844 new_hflags;
846 /* update FPU flags */
847 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
848 ((env->cr[0] << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
849 if (env->cr[4] & CR4_OSFXSR_MASK)
850 env->hflags |= HF_OSFXSR_MASK;
851 else
852 env->hflags &= ~HF_OSFXSR_MASK;
854 LOG_INT("kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
855 if (ret == KQEMU_RET_SYSCALL) {
856 /* syscall instruction */
857 return do_syscall(env, kenv);
858 } else
859 if ((ret & 0xff00) == KQEMU_RET_INT) {
860 env->exception_index = ret & 0xff;
861 env->error_code = 0;
862 env->exception_is_int = 1;
863 env->exception_next_eip = kenv->next_eip;
864 #ifdef CONFIG_PROFILER
865 kqemu_ret_int_count++;
866 #endif
867 LOG_INT("kqemu: interrupt v=%02x:\n", env->exception_index);
868 LOG_INT_STATE(env);
869 return 1;
870 } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
871 env->exception_index = ret & 0xff;
872 env->error_code = kenv->error_code;
873 env->exception_is_int = 0;
874 env->exception_next_eip = 0;
875 #ifdef CONFIG_PROFILER
876 kqemu_ret_excp_count++;
877 #endif
878 LOG_INT("kqemu: exception v=%02x e=%04x:\n",
879 env->exception_index, env->error_code);
880 LOG_INT_STATE(env);
881 return 1;
882 } else if (ret == KQEMU_RET_INTR) {
883 #ifdef CONFIG_PROFILER
884 kqemu_ret_intr_count++;
885 #endif
886 LOG_INT_STATE(env);
887 return 0;
888 } else if (ret == KQEMU_RET_SOFTMMU) {
889 #ifdef CONFIG_PROFILER
891 unsigned long pc = env->eip + env->segs[R_CS].base;
892 kqemu_record_pc(pc);
894 #endif
895 LOG_INT_STATE(env);
896 return 2;
897 } else {
898 cpu_dump_state(env, stderr, fprintf, 0);
899 fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
900 exit(1);
902 return 0;
905 void kqemu_cpu_interrupt(CPUState *env)
907 #if defined(_WIN32)
908 /* cancelling the I/O request causes KQEMU to finish executing the
909 current block and successfully returning. */
910 CancelIo(kqemu_fd);
911 #endif
915 QEMU paravirtualization interface. The current interface only
916 allows to modify the IF and IOPL flags when running in
917 kqemu.
919 At this point it is not very satisfactory. I leave it for reference
920 as it adds little complexity.
923 #define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
925 static uint32_t qpi_mem_readb(void *opaque, hwaddr addr)
927 return 0;
930 static uint32_t qpi_mem_readw(void *opaque, hwaddr addr)
932 return 0;
935 static void qpi_mem_writeb(void *opaque, hwaddr addr, uint32_t val)
939 static void qpi_mem_writew(void *opaque, hwaddr addr, uint32_t val)
943 static uint32_t qpi_mem_readl(void *opaque, hwaddr addr)
945 CPUState *env;
947 env = cpu_single_env;
948 if (!env)
949 return 0;
950 return env->eflags & (IF_MASK | IOPL_MASK);
953 /* Note: after writing to this address, the guest code must make sure
954 it is exiting the current TB. pushf/popf can be used for that
955 purpose. */
956 static void qpi_mem_writel(void *opaque, hwaddr addr, uint32_t val)
958 CPUState *env;
960 env = cpu_single_env;
961 if (!env)
962 return;
963 env->eflags = (env->eflags & ~(IF_MASK | IOPL_MASK)) |
964 (val & (IF_MASK | IOPL_MASK));
967 static CPUReadMemoryFunc * const qpi_mem_read[3] = {
968 qpi_mem_readb,
969 qpi_mem_readw,
970 qpi_mem_readl,
973 static CPUWriteMemoryFunc * const qpi_mem_write[3] = {
974 qpi_mem_writeb,
975 qpi_mem_writew,
976 qpi_mem_writel,
979 static void qpi_init(void)
981 kqemu_comm_base = 0xff000000 | 1;
982 qpi_io_memory = cpu_register_io_memory(
983 qpi_mem_read,
984 qpi_mem_write, NULL);
985 cpu_register_physical_memory(kqemu_comm_base & ~0xfff,
986 0x1000, qpi_io_memory);
988 #endif