Fix sigsetjmp for w64
[qemu/ar7.git] / kqemu.c
blob4cc163912fe65a21f0fc41e5f242bdf8be6d708a
1 /*
2 * KQEMU support
4 * Copyright (c) 2005-2008 Fabrice Bellard
5 * Copyright (c) 2011 Stefan Weil
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "config.h"
21 #include "qemu-common.h"
22 #ifdef _WIN32
23 #include <windows.h>
24 #include <winioctl.h>
25 #else
26 #include <sys/types.h>
27 #include <sys/mman.h>
28 #include <sys/ioctl.h>
29 #endif
30 #ifdef CONFIG_SOLARIS
31 #include <sys/ioccom.h>
32 #endif
34 #include "cpu.h"
35 #include "exec-all.h"
37 #ifdef CONFIG_KQEMU
39 #define DEBUG
40 //#define PROFILE
43 #ifdef DEBUG
44 # define LOG_INT(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
45 # define LOG_INT_STATE(env) log_cpu_state_mask(CPU_LOG_INT, (env), 0)
46 #else
47 # define LOG_INT(...) do { } while (0)
48 # define LOG_INT_STATE(env) do { } while (0)
49 #endif
51 #include <unistd.h>
52 #include <fcntl.h>
53 #include "kqemu.h"
55 #ifdef _WIN32
56 #define KQEMU_DEVICE "\\\\.\\kqemu"
57 #else
58 #define KQEMU_DEVICE "/dev/kqemu"
59 #endif
61 static void qpi_init(void);
63 #ifdef _WIN32
64 #define KQEMU_INVALID_FD INVALID_HANDLE_VALUE
65 HANDLE kqemu_fd = KQEMU_INVALID_FD;
66 #define kqemu_closefd(x) CloseHandle(x)
67 #else
68 #define KQEMU_INVALID_FD -1
69 int kqemu_fd = KQEMU_INVALID_FD;
70 #define kqemu_closefd(x) close(x)
71 #endif
73 /* 0 = not allowed
74 1 = user kqemu
75 2 = kernel kqemu
77 int kqemu_allowed = 0;
78 uint64_t *pages_to_flush;
79 unsigned int nb_pages_to_flush;
80 uint64_t *ram_pages_to_update;
81 unsigned int nb_ram_pages_to_update;
82 uint64_t *modified_ram_pages;
83 unsigned int nb_modified_ram_pages;
84 uint8_t *modified_ram_pages_table;
85 int qpi_io_memory;
86 uint32_t kqemu_comm_base; /* physical address of the QPI communication page */
87 ram_addr_t kqemu_phys_ram_size;
88 uint8_t *kqemu_phys_ram_base;
90 #define cpuid(index, eax, ebx, ecx, edx) \
91 asm volatile ("cpuid" \
92 : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) \
93 : "0" (index))
95 #ifdef __x86_64__
96 static int is_cpuid_supported(void)
98 return 1;
100 #else
101 static int is_cpuid_supported(void)
103 int v0, v1;
104 asm volatile ("pushf\n"
105 "popl %0\n"
106 "movl %0, %1\n"
107 "xorl $0x00200000, %0\n"
108 "pushl %0\n"
109 "popf\n"
110 "pushf\n"
111 "popl %0\n"
112 : "=a" (v0), "=d" (v1)
114 : "cc");
115 return (v0 != v1);
117 #endif
119 static void kqemu_update_cpuid(CPUState *env)
121 int critical_features_mask, features, ext_features, ext_features_mask;
122 uint32_t eax, ebx, ecx, edx;
124 /* the following features are kept identical on the host and
125 target cpus because they are important for user code. Strictly
126 speaking, only SSE really matters because the OS must support
127 it if the user code uses it. */
128 critical_features_mask =
129 CPUID_CMOV | CPUID_CX8 |
130 CPUID_FXSR | CPUID_MMX | CPUID_SSE |
131 CPUID_SSE2 | CPUID_SEP;
132 ext_features_mask = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR;
133 if (!is_cpuid_supported()) {
134 features = 0;
135 ext_features = 0;
136 } else {
137 cpuid(1, eax, ebx, ecx, edx);
138 features = edx;
139 ext_features = ecx;
141 #ifdef __x86_64__
142 /* NOTE: on x86_64 CPUs, SYSENTER is not supported in
143 compatibility mode, so in order to have the best performances
144 it is better not to use it */
145 features &= ~CPUID_SEP;
146 #endif
147 env->cpuid_features = (env->cpuid_features & ~critical_features_mask) |
148 (features & critical_features_mask);
149 env->cpuid_ext_features = (env->cpuid_ext_features & ~ext_features_mask) |
150 (ext_features & ext_features_mask);
151 /* XXX: we could update more of the target CPUID state so that the
152 non accelerated code sees exactly the same CPU features as the
153 accelerated code */
156 int kqemu_init(CPUState *env)
158 struct kqemu_init kinit;
159 int ret, version;
160 #ifdef _WIN32
161 DWORD temp;
162 #endif
164 if (!kqemu_allowed)
165 return -1;
167 #ifdef _WIN32
168 kqemu_fd = CreateFile(KQEMU_DEVICE, GENERIC_WRITE | GENERIC_READ,
169 FILE_SHARE_READ | FILE_SHARE_WRITE,
170 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
171 NULL);
172 if (kqemu_fd == KQEMU_INVALID_FD) {
173 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %lu\n",
174 KQEMU_DEVICE, GetLastError());
175 return -1;
177 #else
178 kqemu_fd = open(KQEMU_DEVICE, O_RDWR);
179 if (kqemu_fd == KQEMU_INVALID_FD) {
180 fprintf(stderr, "Could not open '%s' - QEMU acceleration layer not activated: %s\n",
181 KQEMU_DEVICE, strerror(errno));
182 return -1;
184 #endif
185 version = 0;
186 #ifdef _WIN32
187 DeviceIoControl(kqemu_fd, KQEMU_GET_VERSION, NULL, 0,
188 &version, sizeof(version), &temp, NULL);
189 #else
190 ioctl(kqemu_fd, KQEMU_GET_VERSION, &version);
191 #endif
192 if (version != KQEMU_VERSION) {
193 fprintf(stderr, "Version mismatch between kqemu module and qemu (%08x %08x) - disabling kqemu use\n",
194 version, KQEMU_VERSION);
195 goto fail;
198 pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
199 sizeof(uint64_t));
200 if (!pages_to_flush)
201 goto fail;
203 ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
204 sizeof(uint64_t));
205 if (!ram_pages_to_update)
206 goto fail;
208 modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES *
209 sizeof(uint64_t));
210 if (!modified_ram_pages)
211 goto fail;
212 modified_ram_pages_table =
213 g_malloc0(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
214 if (!modified_ram_pages_table)
215 goto fail;
217 memset(&kinit, 0, sizeof(kinit)); /* set the paddings to zero */
218 kinit.ram_base = kqemu_phys_ram_base;
219 kinit.ram_size = kqemu_phys_ram_size;
220 kinit.ram_dirty = phys_ram_dirty;
221 kinit.pages_to_flush = pages_to_flush;
222 kinit.ram_pages_to_update = ram_pages_to_update;
223 kinit.modified_ram_pages = modified_ram_pages;
224 #ifdef _WIN32
225 ret = DeviceIoControl(kqemu_fd, KQEMU_INIT, &kinit, sizeof(kinit),
226 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
227 #else
228 ret = ioctl(kqemu_fd, KQEMU_INIT, &kinit);
229 #endif
230 if (ret < 0) {
231 fprintf(stderr, "Error %d while initializing QEMU acceleration layer - disabling it for now\n", ret);
232 fail:
233 kqemu_closefd(kqemu_fd);
234 kqemu_fd = KQEMU_INVALID_FD;
235 return -1;
237 kqemu_update_cpuid(env);
238 env->kqemu_enabled = kqemu_allowed;
239 nb_pages_to_flush = 0;
240 nb_ram_pages_to_update = 0;
242 qpi_init();
243 return 0;
246 void kqemu_flush_page(CPUState *env, target_ulong addr)
248 LOG_INT("kqemu_flush_page: addr=" TARGET_FMT_lx "\n", addr);
249 if (nb_pages_to_flush >= KQEMU_MAX_PAGES_TO_FLUSH)
250 nb_pages_to_flush = KQEMU_FLUSH_ALL;
251 else
252 pages_to_flush[nb_pages_to_flush++] = addr;
255 void kqemu_flush(CPUState *env, int global)
257 LOG_INT("kqemu_flush:\n");
258 nb_pages_to_flush = KQEMU_FLUSH_ALL;
261 void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr)
263 LOG_INT("kqemu_set_notdirty: addr=%08lx\n",
264 (unsigned long)ram_addr);
265 /* we only track transitions to dirty state */
266 if (phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] != 0xff)
267 return;
268 if (nb_ram_pages_to_update >= KQEMU_MAX_RAM_PAGES_TO_UPDATE)
269 nb_ram_pages_to_update = KQEMU_RAM_PAGES_UPDATE_ALL;
270 else
271 ram_pages_to_update[nb_ram_pages_to_update++] = ram_addr;
274 static void kqemu_reset_modified_ram_pages(void)
276 int i;
277 unsigned long page_index;
279 for(i = 0; i < nb_modified_ram_pages; i++) {
280 page_index = modified_ram_pages[i] >> TARGET_PAGE_BITS;
281 modified_ram_pages_table[page_index] = 0;
283 nb_modified_ram_pages = 0;
286 void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr)
288 unsigned long page_index;
289 int ret;
290 #ifdef _WIN32
291 DWORD temp;
292 #endif
294 page_index = ram_addr >> TARGET_PAGE_BITS;
295 if (!modified_ram_pages_table[page_index]) {
296 #if 0
297 printf("%d: modify_page=%08lx\n", nb_modified_ram_pages, ram_addr);
298 #endif
299 modified_ram_pages_table[page_index] = 1;
300 modified_ram_pages[nb_modified_ram_pages++] = ram_addr;
301 if (nb_modified_ram_pages >= KQEMU_MAX_MODIFIED_RAM_PAGES) {
302 /* flush */
303 #ifdef _WIN32
304 ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
305 &nb_modified_ram_pages,
306 sizeof(nb_modified_ram_pages),
307 NULL, 0, &temp, NULL);
308 #else
309 ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
310 &nb_modified_ram_pages);
311 #endif
312 kqemu_reset_modified_ram_pages();
317 void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
318 ram_addr_t phys_offset)
320 struct kqemu_phys_mem kphys_mem1, *kphys_mem = &kphys_mem1;
321 uint64_t end;
322 int ret, io_index;
324 end = (start_addr + size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
325 start_addr &= TARGET_PAGE_MASK;
326 kphys_mem->phys_addr = start_addr;
327 kphys_mem->size = end - start_addr;
328 kphys_mem->ram_addr = phys_offset & TARGET_PAGE_MASK;
329 io_index = phys_offset & ~TARGET_PAGE_MASK;
330 switch(io_index) {
331 case IO_MEM_RAM:
332 kphys_mem->io_index = KQEMU_IO_MEM_RAM;
333 break;
334 case IO_MEM_ROM:
335 kphys_mem->io_index = KQEMU_IO_MEM_ROM;
336 break;
337 default:
338 if (qpi_io_memory == io_index) {
339 kphys_mem->io_index = KQEMU_IO_MEM_COMM;
340 } else {
341 kphys_mem->io_index = KQEMU_IO_MEM_UNASSIGNED;
343 break;
345 #ifdef _WIN32
347 DWORD temp;
348 ret = DeviceIoControl(kqemu_fd, KQEMU_SET_PHYS_MEM,
349 kphys_mem, sizeof(*kphys_mem),
350 NULL, 0, &temp, NULL) == TRUE ? 0 : -1;
352 #else
353 ret = ioctl(kqemu_fd, KQEMU_SET_PHYS_MEM, kphys_mem);
354 #endif
355 if (ret < 0) {
356 fprintf(stderr, "kqemu: KQEMU_SET_PHYS_PAGE error=%d: start_addr=0x%016" PRIx64 " size=0x%08lx phys_offset=0x%08lx\n",
357 ret, start_addr,
358 (unsigned long)size, (unsigned long)phys_offset);
362 struct fpstate {
363 uint16_t fpuc;
364 uint16_t dummy1;
365 uint16_t fpus;
366 uint16_t dummy2;
367 uint16_t fptag;
368 uint16_t dummy3;
370 uint32_t fpip;
371 uint32_t fpcs;
372 uint32_t fpoo;
373 uint32_t fpos;
374 uint8_t fpregs1[8 * 10];
377 struct fpxstate {
378 uint16_t fpuc;
379 uint16_t fpus;
380 uint16_t fptag;
381 uint16_t fop;
382 uint32_t fpuip;
383 uint16_t cs_sel;
384 uint16_t dummy0;
385 uint32_t fpudp;
386 uint16_t ds_sel;
387 uint16_t dummy1;
388 uint32_t mxcsr;
389 uint32_t mxcsr_mask;
390 uint8_t fpregs1[8 * 16];
391 uint8_t xmm_regs[16 * 16];
392 uint8_t dummy2[96];
395 static struct fpxstate fpx1 __attribute__((aligned(16)));
397 static void restore_native_fp_frstor(CPUState *env)
399 int fptag, i, j;
400 struct fpstate fp1, *fp = &fp1;
402 fp->fpuc = env->fpuc;
403 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
404 fptag = 0;
405 for (i=7; i>=0; i--) {
406 fptag <<= 2;
407 if (env->fptags[i]) {
408 fptag |= 3;
409 } else {
410 /* the FPU automatically computes it */
413 fp->fptag = fptag;
414 j = env->fpstt;
415 for(i = 0;i < 8; i++) {
416 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
417 j = (j + 1) & 7;
419 asm volatile ("frstor %0" : "=m" (*fp));
422 static void save_native_fp_fsave(CPUState *env)
424 int fptag, i, j;
425 uint16_t fpuc;
426 struct fpstate fp1, *fp = &fp1;
428 asm volatile ("fsave %0" : : "m" (*fp));
429 env->fpuc = fp->fpuc;
430 env->fpstt = (fp->fpus >> 11) & 7;
431 env->fpus = fp->fpus & ~0x3800;
432 fptag = fp->fptag;
433 for(i = 0;i < 8; i++) {
434 env->fptags[i] = ((fptag & 3) == 3);
435 fptag >>= 2;
437 j = env->fpstt;
438 for(i = 0;i < 8; i++) {
439 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
440 j = (j + 1) & 7;
442 /* we must restore the default rounding state */
443 fpuc = 0x037f | (env->fpuc & (3 << 10));
444 asm volatile("fldcw %0" : : "m" (fpuc));
447 static void restore_native_fp_fxrstor(CPUState *env)
449 struct fpxstate *fp = &fpx1;
450 int i, j, fptag;
452 fp->fpuc = env->fpuc;
453 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
454 fptag = 0;
455 for(i = 0; i < 8; i++)
456 fptag |= (env->fptags[i] << i);
457 fp->fptag = fptag ^ 0xff;
459 j = env->fpstt;
460 for(i = 0;i < 8; i++) {
461 memcpy(&fp->fpregs1[i * 16], &env->fpregs[j].d, 10);
462 j = (j + 1) & 7;
464 if (env->cpuid_features & CPUID_SSE) {
465 fp->mxcsr = env->mxcsr;
466 /* XXX: check if DAZ is not available */
467 fp->mxcsr_mask = 0xffff;
468 memcpy(fp->xmm_regs, env->xmm_regs, CPU_NB_REGS * 16);
470 asm volatile ("fxrstor %0" : "=m" (*fp));
473 static void save_native_fp_fxsave(CPUState *env)
475 struct fpxstate *fp = &fpx1;
476 int fptag, i, j;
477 uint16_t fpuc;
479 asm volatile ("fxsave %0" : : "m" (*fp));
480 env->fpuc = fp->fpuc;
481 env->fpstt = (fp->fpus >> 11) & 7;
482 env->fpus = fp->fpus & ~0x3800;
483 fptag = fp->fptag ^ 0xff;
484 for(i = 0;i < 8; i++) {
485 env->fptags[i] = (fptag >> i) & 1;
487 j = env->fpstt;
488 for(i = 0;i < 8; i++) {
489 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 16], 10);
490 j = (j + 1) & 7;
492 if (env->cpuid_features & CPUID_SSE) {
493 env->mxcsr = fp->mxcsr;
494 memcpy(env->xmm_regs, fp->xmm_regs, CPU_NB_REGS * 16);
497 /* we must restore the default rounding state */
498 asm volatile ("fninit");
499 fpuc = 0x037f | (env->fpuc & (3 << 10));
500 asm volatile("fldcw %0" : : "m" (fpuc));
503 static int do_syscall(CPUState *env,
504 struct kqemu_cpu_state *kenv)
506 int selector;
508 selector = (env->star >> 32) & 0xffff;
509 #ifdef TARGET_X86_64
510 if (env->hflags & HF_LMA_MASK) {
511 int code64;
513 env->regs[R_ECX] = kenv->next_eip;
514 env->regs[11] = env->eflags;
516 code64 = env->hflags & HF_CS64_MASK;
518 cpu_x86_set_cpl(env, 0);
519 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
520 0, 0xffffffff,
521 DESC_G_MASK | DESC_P_MASK |
522 DESC_S_MASK |
523 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
524 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
525 0, 0xffffffff,
526 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
527 DESC_S_MASK |
528 DESC_W_MASK | DESC_A_MASK);
529 env->eflags &= ~env->fmask;
530 if (code64)
531 env->eip = env->lstar;
532 else
533 env->eip = env->cstar;
534 } else
535 #endif
537 env->regs[R_ECX] = (uint32_t)kenv->next_eip;
539 cpu_x86_set_cpl(env, 0);
540 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
541 0, 0xffffffff,
542 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
543 DESC_S_MASK |
544 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
545 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
546 0, 0xffffffff,
547 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
548 DESC_S_MASK |
549 DESC_W_MASK | DESC_A_MASK);
550 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
551 env->eip = (uint32_t)env->star;
553 return 2;
556 #ifdef CONFIG_PROFILER
558 #define PC_REC_SIZE 1
559 #define PC_REC_HASH_BITS 16
560 #define PC_REC_HASH_SIZE (1 << PC_REC_HASH_BITS)
562 typedef struct PCRecord {
563 unsigned long pc;
564 int64_t count;
565 struct PCRecord *next;
566 } PCRecord;
568 static PCRecord *pc_rec_hash[PC_REC_HASH_SIZE];
569 static int nb_pc_records;
571 static void kqemu_record_pc(unsigned long pc)
573 unsigned long h;
574 PCRecord **pr, *r;
576 h = pc / PC_REC_SIZE;
577 h = h ^ (h >> PC_REC_HASH_BITS);
578 h &= (PC_REC_HASH_SIZE - 1);
579 pr = &pc_rec_hash[h];
580 for(;;) {
581 r = *pr;
582 if (r == NULL)
583 break;
584 if (r->pc == pc) {
585 r->count++;
586 return;
588 pr = &r->next;
590 r = malloc(sizeof(PCRecord));
591 r->count = 1;
592 r->pc = pc;
593 r->next = NULL;
594 *pr = r;
595 nb_pc_records++;
598 static int pc_rec_cmp(const void *p1, const void *p2)
600 PCRecord *r1 = *(PCRecord **)p1;
601 PCRecord *r2 = *(PCRecord **)p2;
602 if (r1->count < r2->count)
603 return 1;
604 else if (r1->count == r2->count)
605 return 0;
606 else
607 return -1;
610 static void kqemu_record_flush(void)
612 PCRecord *r, *r_next;
613 int h;
615 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
616 for(r = pc_rec_hash[h]; r != NULL; r = r_next) {
617 r_next = r->next;
618 free(r);
620 pc_rec_hash[h] = NULL;
622 nb_pc_records = 0;
625 void kqemu_record_dump(void)
627 PCRecord **pr, *r;
628 int i, h;
629 FILE *f;
630 int64_t total, sum;
632 pr = malloc(sizeof(PCRecord *) * nb_pc_records);
633 i = 0;
634 total = 0;
635 for(h = 0; h < PC_REC_HASH_SIZE; h++) {
636 for(r = pc_rec_hash[h]; r != NULL; r = r->next) {
637 pr[i++] = r;
638 total += r->count;
641 qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp);
643 f = fopen("/tmp/kqemu.stats", "w");
644 if (!f) {
645 perror("/tmp/kqemu.stats");
646 exit(1);
648 fprintf(f, "total: %" PRId64 "\n", total);
649 sum = 0;
650 for(i = 0; i < nb_pc_records; i++) {
651 r = pr[i];
652 sum += r->count;
653 fprintf(f, "%08lx: %" PRId64 " %0.2f%% %0.2f%%\n",
654 r->pc,
655 r->count,
656 (double)r->count / (double)total * 100.0,
657 (double)sum / (double)total * 100.0);
659 fclose(f);
660 free(pr);
662 kqemu_record_flush();
664 #endif
666 static inline void kqemu_load_seg(struct kqemu_segment_cache *ksc,
667 const SegmentCache *sc)
669 ksc->selector = sc->selector;
670 ksc->flags = sc->flags;
671 ksc->limit = sc->limit;
672 ksc->base = sc->base;
675 static inline void kqemu_save_seg(SegmentCache *sc,
676 const struct kqemu_segment_cache *ksc)
678 sc->selector = ksc->selector;
679 sc->flags = ksc->flags;
680 sc->limit = ksc->limit;
681 sc->base = ksc->base;
684 int kqemu_cpu_exec(CPUState *env)
686 struct kqemu_cpu_state kcpu_state, *kenv = &kcpu_state;
687 int ret, cpl, i;
688 #ifdef CONFIG_PROFILER
689 int64_t ti;
690 #endif
691 #ifdef _WIN32
692 DWORD temp;
693 #endif
695 #ifdef CONFIG_PROFILER
696 ti = profile_getclock();
697 #endif
698 LOG_INT("kqemu: cpu_exec: enter\n");
699 LOG_INT_STATE(env);
700 for(i = 0; i < CPU_NB_REGS; i++)
701 kenv->regs[i] = env->regs[i];
702 kenv->eip = env->eip;
703 kenv->eflags = env->eflags;
704 for(i = 0; i < 6; i++)
705 kqemu_load_seg(&kenv->segs[i], &env->segs[i]);
706 kqemu_load_seg(&kenv->ldt, &env->ldt);
707 kqemu_load_seg(&kenv->tr, &env->tr);
708 kqemu_load_seg(&kenv->gdt, &env->gdt);
709 kqemu_load_seg(&kenv->idt, &env->idt);
710 kenv->cr0 = env->cr[0];
711 kenv->cr2 = env->cr[2];
712 kenv->cr3 = env->cr[3];
713 kenv->cr4 = env->cr[4];
714 kenv->a20_mask = env->a20_mask;
715 kenv->efer = env->efer;
716 kenv->tsc_offset = 0;
717 kenv->star = env->star;
718 kenv->sysenter_cs = env->sysenter_cs;
719 kenv->sysenter_esp = env->sysenter_esp;
720 kenv->sysenter_eip = env->sysenter_eip;
721 #ifdef TARGET_X86_64
722 kenv->lstar = env->lstar;
723 kenv->cstar = env->cstar;
724 kenv->fmask = env->fmask;
725 kenv->kernelgsbase = env->kernelgsbase;
726 #endif
727 if (env->dr[7] & 0xff) {
728 kenv->dr7 = env->dr[7];
729 kenv->dr0 = env->dr[0];
730 kenv->dr1 = env->dr[1];
731 kenv->dr2 = env->dr[2];
732 kenv->dr3 = env->dr[3];
733 } else {
734 kenv->dr7 = 0;
736 kenv->dr6 = env->dr[6];
737 cpl = (env->hflags & HF_CPL_MASK);
738 kenv->cpl = cpl;
739 kenv->nb_pages_to_flush = nb_pages_to_flush;
740 kenv->user_only = (env->kqemu_enabled == 1);
741 kenv->nb_ram_pages_to_update = nb_ram_pages_to_update;
742 nb_ram_pages_to_update = 0;
743 kenv->nb_modified_ram_pages = nb_modified_ram_pages;
745 kqemu_reset_modified_ram_pages();
747 if (env->cpuid_features & CPUID_FXSR)
748 restore_native_fp_fxrstor(env);
749 else
750 restore_native_fp_frstor(env);
752 #ifdef _WIN32
753 if (DeviceIoControl(kqemu_fd, KQEMU_EXEC,
754 kenv, sizeof(struct kqemu_cpu_state),
755 kenv, sizeof(struct kqemu_cpu_state),
756 &temp, NULL)) {
757 ret = kenv->retval;
758 } else {
759 ret = -1;
761 #else
762 ioctl(kqemu_fd, KQEMU_EXEC, kenv);
763 ret = kenv->retval;
764 #endif
765 if (env->cpuid_features & CPUID_FXSR)
766 save_native_fp_fxsave(env);
767 else
768 save_native_fp_fsave(env);
770 for(i = 0; i < CPU_NB_REGS; i++)
771 env->regs[i] = kenv->regs[i];
772 env->eip = kenv->eip;
773 env->eflags = kenv->eflags;
774 for(i = 0; i < 6; i++)
775 kqemu_save_seg(&env->segs[i], &kenv->segs[i]);
776 cpu_x86_set_cpl(env, kenv->cpl);
777 kqemu_save_seg(&env->ldt, &kenv->ldt);
778 env->cr[0] = kenv->cr0;
779 env->cr[4] = kenv->cr4;
780 env->cr[3] = kenv->cr3;
781 env->cr[2] = kenv->cr2;
782 env->dr[6] = kenv->dr6;
783 #ifdef TARGET_X86_64
784 env->kernelgsbase = kenv->kernelgsbase;
785 #endif
787 /* flush pages as indicated by kqemu */
788 if (kenv->nb_pages_to_flush >= KQEMU_FLUSH_ALL) {
789 tlb_flush(env, 1);
790 } else {
791 for(i = 0; i < kenv->nb_pages_to_flush; i++) {
792 tlb_flush_page(env, pages_to_flush[i]);
795 nb_pages_to_flush = 0;
797 #ifdef CONFIG_PROFILER
798 kqemu_time += profile_getclock() - ti;
799 kqemu_exec_count++;
800 #endif
802 if (kenv->nb_ram_pages_to_update > 0) {
803 cpu_tlb_update_dirty(env);
806 if (kenv->nb_modified_ram_pages > 0) {
807 for(i = 0; i < kenv->nb_modified_ram_pages; i++) {
808 unsigned long addr;
809 addr = modified_ram_pages[i];
810 tb_invalidate_phys_page_range(addr, addr + TARGET_PAGE_SIZE, 0);
814 /* restore the hidden flags */
816 unsigned int new_hflags;
817 #ifdef TARGET_X86_64
818 if ((env->hflags & HF_LMA_MASK) &&
819 (env->segs[R_CS].flags & DESC_L_MASK)) {
820 /* long mode */
821 new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
822 } else
823 #endif
825 /* legacy / compatibility case */
826 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
827 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
828 new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK)
829 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
830 if (!(env->cr[0] & CR0_PE_MASK) ||
831 (env->eflags & VM_MASK) ||
832 !(env->hflags & HF_CS32_MASK)) {
833 /* XXX: try to avoid this test. The problem comes from the
834 fact that is real mode or vm86 mode we only modify the
835 'base' and 'selector' fields of the segment cache to go
836 faster. A solution may be to force addseg to one in
837 translate-i386.c. */
838 new_hflags |= HF_ADDSEG_MASK;
839 } else {
840 new_hflags |= ((env->segs[R_DS].base |
841 env->segs[R_ES].base |
842 env->segs[R_SS].base) != 0) <<
843 HF_ADDSEG_SHIFT;
846 env->hflags = (env->hflags &
847 ~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) |
848 new_hflags;
850 /* update FPU flags */
851 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
852 ((env->cr[0] << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
853 if (env->cr[4] & CR4_OSFXSR_MASK)
854 env->hflags |= HF_OSFXSR_MASK;
855 else
856 env->hflags &= ~HF_OSFXSR_MASK;
858 LOG_INT("kqemu: kqemu_cpu_exec: ret=0x%x\n", ret);
859 if (ret == KQEMU_RET_SYSCALL) {
860 /* syscall instruction */
861 return do_syscall(env, kenv);
862 } else
863 if ((ret & 0xff00) == KQEMU_RET_INT) {
864 env->exception_index = ret & 0xff;
865 env->error_code = 0;
866 env->exception_is_int = 1;
867 env->exception_next_eip = kenv->next_eip;
868 #ifdef CONFIG_PROFILER
869 kqemu_ret_int_count++;
870 #endif
871 LOG_INT("kqemu: interrupt v=%02x:\n", env->exception_index);
872 LOG_INT_STATE(env);
873 return 1;
874 } else if ((ret & 0xff00) == KQEMU_RET_EXCEPTION) {
875 env->exception_index = ret & 0xff;
876 env->error_code = kenv->error_code;
877 env->exception_is_int = 0;
878 env->exception_next_eip = 0;
879 #ifdef CONFIG_PROFILER
880 kqemu_ret_excp_count++;
881 #endif
882 LOG_INT("kqemu: exception v=%02x e=%04x:\n",
883 env->exception_index, env->error_code);
884 LOG_INT_STATE(env);
885 return 1;
886 } else if (ret == KQEMU_RET_INTR) {
887 #ifdef CONFIG_PROFILER
888 kqemu_ret_intr_count++;
889 #endif
890 LOG_INT_STATE(env);
891 return 0;
892 } else if (ret == KQEMU_RET_SOFTMMU) {
893 #ifdef CONFIG_PROFILER
895 unsigned long pc = env->eip + env->segs[R_CS].base;
896 kqemu_record_pc(pc);
898 #endif
899 LOG_INT_STATE(env);
900 return 2;
901 } else {
902 cpu_dump_state(env, stderr, fprintf, 0);
903 fprintf(stderr, "Unsupported return value: 0x%x\n", ret);
904 exit(1);
906 return 0;
909 void kqemu_cpu_interrupt(CPUState *env)
911 #if defined(_WIN32)
912 /* cancelling the I/O request causes KQEMU to finish executing the
913 current block and successfully returning. */
914 CancelIo(kqemu_fd);
915 #endif
919 QEMU paravirtualization interface. The current interface only
920 allows to modify the IF and IOPL flags when running in
921 kqemu.
923 At this point it is not very satisfactory. I leave it for reference
924 as it adds little complexity.
927 #define QPI_COMM_PAGE_PHYS_ADDR 0xff000000
929 static uint32_t qpi_mem_readb(void *opaque, hwaddr addr)
931 return 0;
934 static uint32_t qpi_mem_readw(void *opaque, hwaddr addr)
936 return 0;
939 static void qpi_mem_writeb(void *opaque, hwaddr addr, uint32_t val)
943 static void qpi_mem_writew(void *opaque, hwaddr addr, uint32_t val)
947 static uint32_t qpi_mem_readl(void *opaque, hwaddr addr)
949 CPUState *env;
951 env = cpu_single_env;
952 if (!env)
953 return 0;
954 return env->eflags & (IF_MASK | IOPL_MASK);
957 /* Note: after writing to this address, the guest code must make sure
958 it is exiting the current TB. pushf/popf can be used for that
959 purpose. */
960 static void qpi_mem_writel(void *opaque, hwaddr addr, uint32_t val)
962 CPUState *env;
964 env = cpu_single_env;
965 if (!env)
966 return;
967 env->eflags = (env->eflags & ~(IF_MASK | IOPL_MASK)) |
968 (val & (IF_MASK | IOPL_MASK));
971 static CPUReadMemoryFunc * const qpi_mem_read[3] = {
972 qpi_mem_readb,
973 qpi_mem_readw,
974 qpi_mem_readl,
977 static CPUWriteMemoryFunc * const qpi_mem_write[3] = {
978 qpi_mem_writeb,
979 qpi_mem_writew,
980 qpi_mem_writel,
983 static void qpi_init(void)
985 kqemu_comm_base = 0xff000000 | 1;
986 qpi_io_memory = cpu_register_io_memory(
987 qpi_mem_read,
988 qpi_mem_write, NULL);
989 cpu_register_physical_memory(kqemu_comm_base & ~0xfff,
990 0x1000, qpi_io_memory);
992 #endif