Expose whether a mouse is an absolute device via QMP and the human monitor.
[qemu/aliguori-queue.git] / cpu-exec.c
blobbcfcda207b050329e8c702fb49b124f3e71d278f
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #ifdef __linux__
37 #include <sys/ucontext.h>
38 #endif
39 #endif
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
43 #undef env
44 #define env cpu_single_env
45 #endif
47 int tb_invalidated_flag;
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
52 int qemu_cpu_has_work(CPUState *env)
54 return cpu_has_work(env);
57 void cpu_loop_exit(void)
59 env->current_tb = NULL;
60 longjmp(env->jmp_env, 1);
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 #if !defined(CONFIG_SOFTMMU)
69 #ifdef __linux__
70 struct ucontext *uc = puc;
71 #elif defined(__OpenBSD__)
72 struct sigcontext *uc = puc;
73 #endif
74 #endif
76 env = env1;
78 /* XXX: restore cpu registers saved in host registers */
80 #if !defined(CONFIG_SOFTMMU)
81 if (puc) {
82 /* XXX: use siglongjmp ? */
83 #ifdef __linux__
84 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
85 #elif defined(__OpenBSD__)
86 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
87 #endif
89 #endif
90 env->exception_index = -1;
91 longjmp(env->jmp_env, 1);
94 /* Execute the code without caching the generated code. An interpreter
95 could be used if available. */
96 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
98 unsigned long next_tb;
99 TranslationBlock *tb;
101 /* Should never happen.
102 We only end up here when an existing TB is too long. */
103 if (max_cycles > CF_COUNT_MASK)
104 max_cycles = CF_COUNT_MASK;
106 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
107 max_cycles);
108 env->current_tb = tb;
109 /* execute the generated code */
110 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111 env->current_tb = NULL;
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 cpu_pc_from_tb(env, tb);
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
122 static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 tb_page_addr_t phys_pc, phys_page1, phys_page2;
129 target_ulong virt_page2;
131 tb_invalidated_flag = 0;
133 /* find translated block using physical mappings */
134 phys_pc = get_page_addr_code(env, pc);
135 phys_page1 = phys_pc & TARGET_PAGE_MASK;
136 phys_page2 = -1;
137 h = tb_phys_hash_func(phys_pc);
138 ptb1 = &tb_phys_hash[h];
139 for(;;) {
140 tb = *ptb1;
141 if (!tb)
142 goto not_found;
143 if (tb->pc == pc &&
144 tb->page_addr[0] == phys_page1 &&
145 tb->cs_base == cs_base &&
146 tb->flags == flags) {
147 /* check next page if needed */
148 if (tb->page_addr[1] != -1) {
149 virt_page2 = (pc & TARGET_PAGE_MASK) +
150 TARGET_PAGE_SIZE;
151 phys_page2 = get_page_addr_code(env, virt_page2);
152 if (tb->page_addr[1] == phys_page2)
153 goto found;
154 } else {
155 goto found;
158 ptb1 = &tb->phys_hash_next;
160 not_found:
161 /* if no translated code available, then translate it now */
162 tb = tb_gen_code(env, pc, cs_base, flags, 0);
164 found:
165 /* we add the TB in the virtual pc hash table */
166 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
167 return tb;
170 static inline TranslationBlock *tb_find_fast(void)
172 TranslationBlock *tb;
173 target_ulong cs_base, pc;
174 int flags;
176 /* we record a subset of the CPU state. It will
177 always be the same before a given translated block
178 is executed. */
179 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
180 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
181 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
182 tb->flags != flags)) {
183 tb = tb_find_slow(pc, cs_base, flags);
185 return tb;
188 static CPUDebugExcpHandler *debug_excp_handler;
190 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
192 CPUDebugExcpHandler *old_handler = debug_excp_handler;
194 debug_excp_handler = handler;
195 return old_handler;
198 static void cpu_handle_debug_exception(CPUState *env)
200 CPUWatchpoint *wp;
202 if (!env->watchpoint_hit)
203 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
204 wp->flags &= ~BP_WATCHPOINT_HIT;
206 if (debug_excp_handler)
207 debug_excp_handler(env);
210 /* main execution loop */
212 int cpu_exec(CPUState *env1)
214 volatile host_reg_t saved_env_reg;
215 int ret, interrupt_request;
216 TranslationBlock *tb;
217 uint8_t *tc_ptr;
218 unsigned long next_tb;
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
223 cpu_single_env = env1;
225 /* the access to env below is actually saving the global register's
226 value, so that files not including target-xyz/exec.h are free to
227 use it. */
228 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
229 saved_env_reg = (host_reg_t) env;
230 asm("");
231 env = env1;
233 #if defined(TARGET_I386)
234 if (!kvm_enabled()) {
235 /* put eflags in CPU temporary format */
236 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237 DF = 1 - (2 * ((env->eflags >> 10) & 1));
238 CC_OP = CC_OP_EFLAGS;
239 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env->cc_op = CC_OP_FLAGS;
244 env->cc_dest = env->sr & 0xf;
245 env->cc_x = (env->sr >> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MICROBLAZE)
250 #elif defined(TARGET_MIPS)
251 #elif defined(TARGET_SH4)
252 #elif defined(TARGET_CRIS)
253 #elif defined(TARGET_S390X)
254 /* XXXXX */
255 #else
256 #error unsupported target CPU
257 #endif
258 env->exception_index = -1;
260 /* prepare setjmp context for exception handling */
261 for(;;) {
262 if (setjmp(env->jmp_env) == 0) {
263 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
264 #undef env
265 env = cpu_single_env;
266 #define env cpu_single_env
267 #endif
268 /* if an exception is pending, we execute it here */
269 if (env->exception_index >= 0) {
270 if (env->exception_index >= EXCP_INTERRUPT) {
271 /* exit request from the cpu execution loop */
272 ret = env->exception_index;
273 if (ret == EXCP_DEBUG)
274 cpu_handle_debug_exception(env);
275 break;
276 } else {
277 #if defined(CONFIG_USER_ONLY)
278 /* if user mode only, we simulate a fake exception
279 which will be handled outside the cpu execution
280 loop */
281 #if defined(TARGET_I386)
282 do_interrupt_user(env->exception_index,
283 env->exception_is_int,
284 env->error_code,
285 env->exception_next_eip);
286 /* successfully delivered */
287 env->old_exception = -1;
288 #endif
289 ret = env->exception_index;
290 break;
291 #else
292 #if defined(TARGET_I386)
293 /* simulate a real cpu exception. On i386, it can
294 trigger new exceptions, but we do not handle
295 double or triple faults yet. */
296 do_interrupt(env->exception_index,
297 env->exception_is_int,
298 env->error_code,
299 env->exception_next_eip, 0);
300 /* successfully delivered */
301 env->old_exception = -1;
302 #elif defined(TARGET_PPC)
303 do_interrupt(env);
304 #elif defined(TARGET_MICROBLAZE)
305 do_interrupt(env);
306 #elif defined(TARGET_MIPS)
307 do_interrupt(env);
308 #elif defined(TARGET_SPARC)
309 do_interrupt(env);
310 #elif defined(TARGET_ARM)
311 do_interrupt(env);
312 #elif defined(TARGET_SH4)
313 do_interrupt(env);
314 #elif defined(TARGET_ALPHA)
315 do_interrupt(env);
316 #elif defined(TARGET_CRIS)
317 do_interrupt(env);
318 #elif defined(TARGET_M68K)
319 do_interrupt(0);
320 #endif
321 env->exception_index = -1;
322 #endif
326 if (kvm_enabled()) {
327 kvm_cpu_exec(env);
328 longjmp(env->jmp_env, 1);
331 next_tb = 0; /* force lookup of first TB */
332 for(;;) {
333 interrupt_request = env->interrupt_request;
334 if (unlikely(interrupt_request)) {
335 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
336 /* Mask out external interrupts for this step. */
337 interrupt_request &= ~(CPU_INTERRUPT_HARD |
338 CPU_INTERRUPT_FIQ |
339 CPU_INTERRUPT_SMI |
340 CPU_INTERRUPT_NMI);
342 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
343 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
344 env->exception_index = EXCP_DEBUG;
345 cpu_loop_exit();
347 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
348 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
349 defined(TARGET_MICROBLAZE)
350 if (interrupt_request & CPU_INTERRUPT_HALT) {
351 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
352 env->halted = 1;
353 env->exception_index = EXCP_HLT;
354 cpu_loop_exit();
356 #endif
357 #if defined(TARGET_I386)
358 if (interrupt_request & CPU_INTERRUPT_INIT) {
359 svm_check_intercept(SVM_EXIT_INIT);
360 do_cpu_init(env);
361 env->exception_index = EXCP_HALTED;
362 cpu_loop_exit();
363 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
364 do_cpu_sipi(env);
365 } else if (env->hflags2 & HF2_GIF_MASK) {
366 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
367 !(env->hflags & HF_SMM_MASK)) {
368 svm_check_intercept(SVM_EXIT_SMI);
369 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
370 do_smm_enter();
371 next_tb = 0;
372 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
373 !(env->hflags2 & HF2_NMI_MASK)) {
374 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
375 env->hflags2 |= HF2_NMI_MASK;
376 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
377 next_tb = 0;
378 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
379 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
380 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
381 next_tb = 0;
382 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
383 (((env->hflags2 & HF2_VINTR_MASK) &&
384 (env->hflags2 & HF2_HIF_MASK)) ||
385 (!(env->hflags2 & HF2_VINTR_MASK) &&
386 (env->eflags & IF_MASK &&
387 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
388 int intno;
389 svm_check_intercept(SVM_EXIT_INTR);
390 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
391 intno = cpu_get_pic_interrupt(env);
392 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
393 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
394 #undef env
395 env = cpu_single_env;
396 #define env cpu_single_env
397 #endif
398 do_interrupt(intno, 0, 0, 0, 1);
399 /* ensure that no TB jump will be modified as
400 the program flow was changed */
401 next_tb = 0;
402 #if !defined(CONFIG_USER_ONLY)
403 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
404 (env->eflags & IF_MASK) &&
405 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
406 int intno;
407 /* FIXME: this should respect TPR */
408 svm_check_intercept(SVM_EXIT_VINTR);
409 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
410 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
411 do_interrupt(intno, 0, 0, 0, 1);
412 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
413 next_tb = 0;
414 #endif
417 #elif defined(TARGET_PPC)
418 #if 0
419 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
420 cpu_reset(env);
422 #endif
423 if (interrupt_request & CPU_INTERRUPT_HARD) {
424 ppc_hw_interrupt(env);
425 if (env->pending_interrupts == 0)
426 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
427 next_tb = 0;
429 #elif defined(TARGET_MICROBLAZE)
430 if ((interrupt_request & CPU_INTERRUPT_HARD)
431 && (env->sregs[SR_MSR] & MSR_IE)
432 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
433 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
434 env->exception_index = EXCP_IRQ;
435 do_interrupt(env);
436 next_tb = 0;
438 #elif defined(TARGET_MIPS)
439 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
440 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
441 (env->CP0_Status & (1 << CP0St_IE)) &&
442 !(env->CP0_Status & (1 << CP0St_EXL)) &&
443 !(env->CP0_Status & (1 << CP0St_ERL)) &&
444 !(env->hflags & MIPS_HFLAG_DM)) {
445 /* Raise it */
446 env->exception_index = EXCP_EXT_INTERRUPT;
447 env->error_code = 0;
448 do_interrupt(env);
449 next_tb = 0;
451 #elif defined(TARGET_SPARC)
452 if (interrupt_request & CPU_INTERRUPT_HARD) {
453 if (cpu_interrupts_enabled(env) &&
454 env->interrupt_index > 0) {
455 int pil = env->interrupt_index & 0xf;
456 int type = env->interrupt_index & 0xf0;
458 if (((type == TT_EXTINT) &&
459 cpu_pil_allowed(env, pil)) ||
460 type != TT_EXTINT) {
461 env->exception_index = env->interrupt_index;
462 do_interrupt(env);
463 next_tb = 0;
466 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
467 //do_interrupt(0, 0, 0, 0, 0);
468 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
470 #elif defined(TARGET_ARM)
471 if (interrupt_request & CPU_INTERRUPT_FIQ
472 && !(env->uncached_cpsr & CPSR_F)) {
473 env->exception_index = EXCP_FIQ;
474 do_interrupt(env);
475 next_tb = 0;
477 /* ARMv7-M interrupt return works by loading a magic value
478 into the PC. On real hardware the load causes the
479 return to occur. The qemu implementation performs the
480 jump normally, then does the exception return when the
481 CPU tries to execute code at the magic address.
482 This will cause the magic PC value to be pushed to
483 the stack if an interrupt occured at the wrong time.
484 We avoid this by disabling interrupts when
485 pc contains a magic address. */
486 if (interrupt_request & CPU_INTERRUPT_HARD
487 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
488 || !(env->uncached_cpsr & CPSR_I))) {
489 env->exception_index = EXCP_IRQ;
490 do_interrupt(env);
491 next_tb = 0;
493 #elif defined(TARGET_SH4)
494 if (interrupt_request & CPU_INTERRUPT_HARD) {
495 do_interrupt(env);
496 next_tb = 0;
498 #elif defined(TARGET_ALPHA)
499 if (interrupt_request & CPU_INTERRUPT_HARD) {
500 do_interrupt(env);
501 next_tb = 0;
503 #elif defined(TARGET_CRIS)
504 if (interrupt_request & CPU_INTERRUPT_HARD
505 && (env->pregs[PR_CCS] & I_FLAG)
506 && !env->locked_irq) {
507 env->exception_index = EXCP_IRQ;
508 do_interrupt(env);
509 next_tb = 0;
511 if (interrupt_request & CPU_INTERRUPT_NMI
512 && (env->pregs[PR_CCS] & M_FLAG)) {
513 env->exception_index = EXCP_NMI;
514 do_interrupt(env);
515 next_tb = 0;
517 #elif defined(TARGET_M68K)
518 if (interrupt_request & CPU_INTERRUPT_HARD
519 && ((env->sr & SR_I) >> SR_I_SHIFT)
520 < env->pending_level) {
521 /* Real hardware gets the interrupt vector via an
522 IACK cycle at this point. Current emulated
523 hardware doesn't rely on this, so we
524 provide/save the vector when the interrupt is
525 first signalled. */
526 env->exception_index = env->pending_vector;
527 do_interrupt(1);
528 next_tb = 0;
530 #endif
531 /* Don't use the cached interupt_request value,
532 do_interrupt may have updated the EXITTB flag. */
533 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
534 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
535 /* ensure that no TB jump will be modified as
536 the program flow was changed */
537 next_tb = 0;
540 if (unlikely(env->exit_request)) {
541 env->exit_request = 0;
542 env->exception_index = EXCP_INTERRUPT;
543 cpu_loop_exit();
545 #ifdef CONFIG_DEBUG_EXEC
546 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
547 /* restore flags in standard format */
548 #if defined(TARGET_I386)
549 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
550 log_cpu_state(env, X86_DUMP_CCOP);
551 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
552 #elif defined(TARGET_ARM)
553 log_cpu_state(env, 0);
554 #elif defined(TARGET_SPARC)
555 log_cpu_state(env, 0);
556 #elif defined(TARGET_PPC)
557 log_cpu_state(env, 0);
558 #elif defined(TARGET_M68K)
559 cpu_m68k_flush_flags(env, env->cc_op);
560 env->cc_op = CC_OP_FLAGS;
561 env->sr = (env->sr & 0xffe0)
562 | env->cc_dest | (env->cc_x << 4);
563 log_cpu_state(env, 0);
564 #elif defined(TARGET_MICROBLAZE)
565 log_cpu_state(env, 0);
566 #elif defined(TARGET_MIPS)
567 log_cpu_state(env, 0);
568 #elif defined(TARGET_SH4)
569 log_cpu_state(env, 0);
570 #elif defined(TARGET_ALPHA)
571 log_cpu_state(env, 0);
572 #elif defined(TARGET_CRIS)
573 log_cpu_state(env, 0);
574 #else
575 #error unsupported target CPU
576 #endif
578 #endif
579 spin_lock(&tb_lock);
580 tb = tb_find_fast();
581 /* Note: we do it here to avoid a gcc bug on Mac OS X when
582 doing it in tb_find_slow */
583 if (tb_invalidated_flag) {
584 /* as some TB could have been invalidated because
585 of memory exceptions while generating the code, we
586 must recompute the hash index here */
587 next_tb = 0;
588 tb_invalidated_flag = 0;
590 #ifdef CONFIG_DEBUG_EXEC
591 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
592 (long)tb->tc_ptr, tb->pc,
593 lookup_symbol(tb->pc));
594 #endif
595 /* see if we can patch the calling TB. When the TB
596 spans two pages, we cannot safely do a direct
597 jump. */
598 if (next_tb != 0 && tb->page_addr[1] == -1) {
599 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
601 spin_unlock(&tb_lock);
603 /* cpu_interrupt might be called while translating the
604 TB, but before it is linked into a potentially
605 infinite loop and becomes env->current_tb. Avoid
606 starting execution if there is a pending interrupt. */
607 if (!unlikely (env->exit_request)) {
608 env->current_tb = tb;
609 tc_ptr = tb->tc_ptr;
610 /* execute the generated code */
611 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
612 #undef env
613 env = cpu_single_env;
614 #define env cpu_single_env
615 #endif
616 next_tb = tcg_qemu_tb_exec(tc_ptr);
617 env->current_tb = NULL;
618 if ((next_tb & 3) == 2) {
619 /* Instruction counter expired. */
620 int insns_left;
621 tb = (TranslationBlock *)(long)(next_tb & ~3);
622 /* Restore PC. */
623 cpu_pc_from_tb(env, tb);
624 insns_left = env->icount_decr.u32;
625 if (env->icount_extra && insns_left >= 0) {
626 /* Refill decrementer and continue execution. */
627 env->icount_extra += insns_left;
628 if (env->icount_extra > 0xffff) {
629 insns_left = 0xffff;
630 } else {
631 insns_left = env->icount_extra;
633 env->icount_extra -= insns_left;
634 env->icount_decr.u16.low = insns_left;
635 } else {
636 if (insns_left > 0) {
637 /* Execute remaining instructions. */
638 cpu_exec_nocache(insns_left, tb);
640 env->exception_index = EXCP_INTERRUPT;
641 next_tb = 0;
642 cpu_loop_exit();
646 /* reset soft MMU for next block (it can currently
647 only be set by a memory fault) */
648 } /* for(;;) */
650 } /* for(;;) */
653 #if defined(TARGET_I386)
654 /* restore flags in standard format */
655 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
656 #elif defined(TARGET_ARM)
657 /* XXX: Save/restore host fpu exception state?. */
658 #elif defined(TARGET_SPARC)
659 #elif defined(TARGET_PPC)
660 #elif defined(TARGET_M68K)
661 cpu_m68k_flush_flags(env, env->cc_op);
662 env->cc_op = CC_OP_FLAGS;
663 env->sr = (env->sr & 0xffe0)
664 | env->cc_dest | (env->cc_x << 4);
665 #elif defined(TARGET_MICROBLAZE)
666 #elif defined(TARGET_MIPS)
667 #elif defined(TARGET_SH4)
668 #elif defined(TARGET_ALPHA)
669 #elif defined(TARGET_CRIS)
670 #elif defined(TARGET_S390X)
671 /* XXXXX */
672 #else
673 #error unsupported target CPU
674 #endif
676 /* restore global registers */
677 asm("");
678 env = (void *) saved_env_reg;
680 /* fail safe : never use cpu_single_env outside cpu_exec() */
681 cpu_single_env = NULL;
682 return ret;
685 /* must only be called from the generated code as an exception can be
686 generated */
687 void tb_invalidate_page_range(target_ulong start, target_ulong end)
689 /* XXX: cannot enable it yet because it yields to MMU exception
690 where NIP != read address on PowerPC */
691 #if 0
692 target_ulong phys_addr;
693 phys_addr = get_phys_addr_code(env, start);
694 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
695 #endif
698 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
700 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
702 CPUX86State *saved_env;
704 saved_env = env;
705 env = s;
706 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
707 selector &= 0xffff;
708 cpu_x86_load_seg_cache(env, seg_reg, selector,
709 (selector << 4), 0xffff, 0);
710 } else {
711 helper_load_seg(seg_reg, selector);
713 env = saved_env;
716 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
718 CPUX86State *saved_env;
720 saved_env = env;
721 env = s;
723 helper_fsave(ptr, data32);
725 env = saved_env;
728 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
730 CPUX86State *saved_env;
732 saved_env = env;
733 env = s;
735 helper_frstor(ptr, data32);
737 env = saved_env;
740 #endif /* TARGET_I386 */
742 #if !defined(CONFIG_SOFTMMU)
744 #if defined(TARGET_I386)
745 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
746 #else
747 #define EXCEPTION_ACTION cpu_loop_exit()
748 #endif
750 /* 'pc' is the host PC at which the exception was raised. 'address' is
751 the effective address of the memory exception. 'is_write' is 1 if a
752 write caused the exception and otherwise 0'. 'old_set' is the
753 signal set which should be restored */
754 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
755 int is_write, sigset_t *old_set,
756 void *puc)
758 TranslationBlock *tb;
759 int ret;
761 if (cpu_single_env)
762 env = cpu_single_env; /* XXX: find a correct solution for multithread */
763 #if defined(DEBUG_SIGNAL)
764 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
765 pc, address, is_write, *(unsigned long *)old_set);
766 #endif
767 /* XXX: locking issue */
768 if (is_write && page_unprotect(h2g(address), pc, puc)) {
769 return 1;
772 /* see if it is an MMU fault */
773 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
774 if (ret < 0)
775 return 0; /* not an MMU fault */
776 if (ret == 0)
777 return 1; /* the MMU fault was handled without causing real CPU fault */
778 /* now we have a real cpu fault */
779 tb = tb_find_pc(pc);
780 if (tb) {
781 /* the PC is inside the translated code. It means that we have
782 a virtual CPU fault */
783 cpu_restore_state(tb, env, pc, puc);
786 /* we restore the process signal mask as the sigreturn should
787 do it (XXX: use sigsetjmp) */
788 sigprocmask(SIG_SETMASK, old_set, NULL);
789 EXCEPTION_ACTION;
791 /* never comes here */
792 return 1;
795 #if defined(__i386__)
797 #if defined(__APPLE__)
798 # include <sys/ucontext.h>
800 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
801 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
802 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
803 # define MASK_sig(context) ((context)->uc_sigmask)
804 #elif defined (__NetBSD__)
805 # include <ucontext.h>
807 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
808 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
809 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
810 # define MASK_sig(context) ((context)->uc_sigmask)
811 #elif defined (__FreeBSD__) || defined(__DragonFly__)
812 # include <ucontext.h>
814 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
815 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
816 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
817 # define MASK_sig(context) ((context)->uc_sigmask)
818 #elif defined(__OpenBSD__)
819 # define EIP_sig(context) ((context)->sc_eip)
820 # define TRAP_sig(context) ((context)->sc_trapno)
821 # define ERROR_sig(context) ((context)->sc_err)
822 # define MASK_sig(context) ((context)->sc_mask)
823 #else
824 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
825 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
826 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
827 # define MASK_sig(context) ((context)->uc_sigmask)
828 #endif
830 int cpu_signal_handler(int host_signum, void *pinfo,
831 void *puc)
833 siginfo_t *info = pinfo;
834 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
835 ucontext_t *uc = puc;
836 #elif defined(__OpenBSD__)
837 struct sigcontext *uc = puc;
838 #else
839 struct ucontext *uc = puc;
840 #endif
841 unsigned long pc;
842 int trapno;
844 #ifndef REG_EIP
845 /* for glibc 2.1 */
846 #define REG_EIP EIP
847 #define REG_ERR ERR
848 #define REG_TRAPNO TRAPNO
849 #endif
850 pc = EIP_sig(uc);
851 trapno = TRAP_sig(uc);
852 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
853 trapno == 0xe ?
854 (ERROR_sig(uc) >> 1) & 1 : 0,
855 &MASK_sig(uc), puc);
858 #elif defined(__x86_64__)
860 #ifdef __NetBSD__
861 #define PC_sig(context) _UC_MACHINE_PC(context)
862 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
863 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
864 #define MASK_sig(context) ((context)->uc_sigmask)
865 #elif defined(__OpenBSD__)
866 #define PC_sig(context) ((context)->sc_rip)
867 #define TRAP_sig(context) ((context)->sc_trapno)
868 #define ERROR_sig(context) ((context)->sc_err)
869 #define MASK_sig(context) ((context)->sc_mask)
870 #elif defined (__FreeBSD__) || defined(__DragonFly__)
871 #include <ucontext.h>
873 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
874 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
875 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
876 #define MASK_sig(context) ((context)->uc_sigmask)
877 #else
878 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
879 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
880 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
881 #define MASK_sig(context) ((context)->uc_sigmask)
882 #endif
884 int cpu_signal_handler(int host_signum, void *pinfo,
885 void *puc)
887 siginfo_t *info = pinfo;
888 unsigned long pc;
889 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
890 ucontext_t *uc = puc;
891 #elif defined(__OpenBSD__)
892 struct sigcontext *uc = puc;
893 #else
894 struct ucontext *uc = puc;
895 #endif
897 pc = PC_sig(uc);
898 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
899 TRAP_sig(uc) == 0xe ?
900 (ERROR_sig(uc) >> 1) & 1 : 0,
901 &MASK_sig(uc), puc);
904 #elif defined(_ARCH_PPC)
906 /***********************************************************************
907 * signal context platform-specific definitions
908 * From Wine
910 #ifdef linux
911 /* All Registers access - only for local access */
912 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
913 /* Gpr Registers access */
914 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
915 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
916 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
917 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
918 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
919 # define LR_sig(context) REG_sig(link, context) /* Link register */
920 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
921 /* Float Registers access */
922 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
923 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
924 /* Exception Registers access */
925 # define DAR_sig(context) REG_sig(dar, context)
926 # define DSISR_sig(context) REG_sig(dsisr, context)
927 # define TRAP_sig(context) REG_sig(trap, context)
928 #endif /* linux */
930 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
931 #include <ucontext.h>
932 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
933 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
934 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
935 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
936 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
937 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
938 /* Exception Registers access */
939 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
940 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
941 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
942 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
944 #ifdef __APPLE__
945 # include <sys/ucontext.h>
946 typedef struct ucontext SIGCONTEXT;
947 /* All Registers access - only for local access */
948 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
949 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
950 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
951 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
952 /* Gpr Registers access */
953 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
954 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
955 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
956 # define CTR_sig(context) REG_sig(ctr, context)
957 # define XER_sig(context) REG_sig(xer, context) /* Link register */
958 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
959 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
960 /* Float Registers access */
961 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
962 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
963 /* Exception Registers access */
964 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
965 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
966 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
967 #endif /* __APPLE__ */
969 int cpu_signal_handler(int host_signum, void *pinfo,
970 void *puc)
972 siginfo_t *info = pinfo;
973 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
974 ucontext_t *uc = puc;
975 #else
976 struct ucontext *uc = puc;
977 #endif
978 unsigned long pc;
979 int is_write;
981 pc = IAR_sig(uc);
982 is_write = 0;
983 #if 0
984 /* ppc 4xx case */
985 if (DSISR_sig(uc) & 0x00800000)
986 is_write = 1;
987 #else
988 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
989 is_write = 1;
990 #endif
991 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
992 is_write, &uc->uc_sigmask, puc);
995 #elif defined(__alpha__)
997 int cpu_signal_handler(int host_signum, void *pinfo,
998 void *puc)
1000 siginfo_t *info = pinfo;
1001 struct ucontext *uc = puc;
1002 uint32_t *pc = uc->uc_mcontext.sc_pc;
1003 uint32_t insn = *pc;
1004 int is_write = 0;
1006 /* XXX: need kernel patch to get write flag faster */
1007 switch (insn >> 26) {
1008 case 0x0d: // stw
1009 case 0x0e: // stb
1010 case 0x0f: // stq_u
1011 case 0x24: // stf
1012 case 0x25: // stg
1013 case 0x26: // sts
1014 case 0x27: // stt
1015 case 0x2c: // stl
1016 case 0x2d: // stq
1017 case 0x2e: // stl_c
1018 case 0x2f: // stq_c
1019 is_write = 1;
1022 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1023 is_write, &uc->uc_sigmask, puc);
1025 #elif defined(__sparc__)
1027 int cpu_signal_handler(int host_signum, void *pinfo,
1028 void *puc)
1030 siginfo_t *info = pinfo;
1031 int is_write;
1032 uint32_t insn;
1033 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1034 uint32_t *regs = (uint32_t *)(info + 1);
1035 void *sigmask = (regs + 20);
1036 /* XXX: is there a standard glibc define ? */
1037 unsigned long pc = regs[1];
1038 #else
1039 #ifdef __linux__
1040 struct sigcontext *sc = puc;
1041 unsigned long pc = sc->sigc_regs.tpc;
1042 void *sigmask = (void *)sc->sigc_mask;
1043 #elif defined(__OpenBSD__)
1044 struct sigcontext *uc = puc;
1045 unsigned long pc = uc->sc_pc;
1046 void *sigmask = (void *)(long)uc->sc_mask;
1047 #endif
1048 #endif
1050 /* XXX: need kernel patch to get write flag faster */
1051 is_write = 0;
1052 insn = *(uint32_t *)pc;
1053 if ((insn >> 30) == 3) {
1054 switch((insn >> 19) & 0x3f) {
1055 case 0x05: // stb
1056 case 0x15: // stba
1057 case 0x06: // sth
1058 case 0x16: // stha
1059 case 0x04: // st
1060 case 0x14: // sta
1061 case 0x07: // std
1062 case 0x17: // stda
1063 case 0x0e: // stx
1064 case 0x1e: // stxa
1065 case 0x24: // stf
1066 case 0x34: // stfa
1067 case 0x27: // stdf
1068 case 0x37: // stdfa
1069 case 0x26: // stqf
1070 case 0x36: // stqfa
1071 case 0x25: // stfsr
1072 case 0x3c: // casa
1073 case 0x3e: // casxa
1074 is_write = 1;
1075 break;
1078 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1079 is_write, sigmask, NULL);
1082 #elif defined(__arm__)
1084 int cpu_signal_handler(int host_signum, void *pinfo,
1085 void *puc)
1087 siginfo_t *info = pinfo;
1088 struct ucontext *uc = puc;
1089 unsigned long pc;
1090 int is_write;
1092 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1093 pc = uc->uc_mcontext.gregs[R15];
1094 #else
1095 pc = uc->uc_mcontext.arm_pc;
1096 #endif
1097 /* XXX: compute is_write */
1098 is_write = 0;
1099 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1100 is_write,
1101 &uc->uc_sigmask, puc);
1104 #elif defined(__mc68000)
1106 int cpu_signal_handler(int host_signum, void *pinfo,
1107 void *puc)
1109 siginfo_t *info = pinfo;
1110 struct ucontext *uc = puc;
1111 unsigned long pc;
1112 int is_write;
1114 pc = uc->uc_mcontext.gregs[16];
1115 /* XXX: compute is_write */
1116 is_write = 0;
1117 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1118 is_write,
1119 &uc->uc_sigmask, puc);
1122 #elif defined(__ia64)
1124 #ifndef __ISR_VALID
1125 /* This ought to be in <bits/siginfo.h>... */
1126 # define __ISR_VALID 1
1127 #endif
1129 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1131 siginfo_t *info = pinfo;
1132 struct ucontext *uc = puc;
1133 unsigned long ip;
1134 int is_write = 0;
1136 ip = uc->uc_mcontext.sc_ip;
1137 switch (host_signum) {
1138 case SIGILL:
1139 case SIGFPE:
1140 case SIGSEGV:
1141 case SIGBUS:
1142 case SIGTRAP:
1143 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1144 /* ISR.W (write-access) is bit 33: */
1145 is_write = (info->si_isr >> 33) & 1;
1146 break;
1148 default:
1149 break;
1151 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1152 is_write,
1153 &uc->uc_sigmask, puc);
1156 #elif defined(__s390__)
1158 int cpu_signal_handler(int host_signum, void *pinfo,
1159 void *puc)
1161 siginfo_t *info = pinfo;
1162 struct ucontext *uc = puc;
1163 unsigned long pc;
1164 int is_write;
1166 pc = uc->uc_mcontext.psw.addr;
1167 /* XXX: compute is_write */
1168 is_write = 0;
1169 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1170 is_write, &uc->uc_sigmask, puc);
1173 #elif defined(__mips__)
1175 int cpu_signal_handler(int host_signum, void *pinfo,
1176 void *puc)
1178 siginfo_t *info = pinfo;
1179 struct ucontext *uc = puc;
1180 greg_t pc = uc->uc_mcontext.pc;
1181 int is_write;
1183 /* XXX: compute is_write */
1184 is_write = 0;
1185 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1186 is_write, &uc->uc_sigmask, puc);
1189 #elif defined(__hppa__)
1191 int cpu_signal_handler(int host_signum, void *pinfo,
1192 void *puc)
1194 struct siginfo *info = pinfo;
1195 struct ucontext *uc = puc;
1196 unsigned long pc;
1197 int is_write;
1199 pc = uc->uc_mcontext.sc_iaoq[0];
1200 /* FIXME: compute is_write */
1201 is_write = 0;
1202 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1203 is_write,
1204 &uc->uc_sigmask, puc);
1207 #else
1209 #error host CPU specific signal handler needed
1211 #endif
1213 #endif /* !defined(CONFIG_SOFTMMU) */