qemu-kvm: main_loop_wait now takes blocking/nonblocking argument
[qemu-kvm/amd-iommu.git] / cpu-exec.c
blob9d47fbb5196f0a7749ae656046b2c224f7e8fea3
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #if !defined(TARGET_IA64)
23 #include "tcg.h"
24 #endif
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #include "qemu-kvm.h"
45 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
46 // Work around ugly bugs in glibc that mangle global register contents
47 #undef env
48 #define env cpu_single_env
49 #endif
51 int tb_invalidated_flag;
53 //#define CONFIG_DEBUG_EXEC
54 //#define DEBUG_SIGNAL
56 int qemu_cpu_has_work(CPUState *env)
58 return cpu_has_work(env);
61 void cpu_loop_exit(void)
63 env->current_tb = NULL;
64 longjmp(env->jmp_env, 1);
67 /* exit the current TB from a signal handler. The host registers are
68 restored in a state compatible with the CPU emulator
70 void cpu_resume_from_signal(CPUState *env1, void *puc)
72 #if !defined(CONFIG_SOFTMMU)
73 #ifdef __linux__
74 struct ucontext *uc = puc;
75 #elif defined(__OpenBSD__)
76 struct sigcontext *uc = puc;
77 #endif
78 #endif
80 env = env1;
82 /* XXX: restore cpu registers saved in host registers */
84 #if !defined(CONFIG_SOFTMMU)
85 if (puc) {
86 /* XXX: use siglongjmp ? */
87 #ifdef __linux__
88 #ifdef __ia64
89 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
90 #else
91 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
92 #endif
93 #elif defined(__OpenBSD__)
94 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
95 #endif
97 #endif
98 env->exception_index = -1;
99 longjmp(env->jmp_env, 1);
102 /* Execute the code without caching the generated code. An interpreter
103 could be used if available. */
104 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
106 unsigned long next_tb;
107 TranslationBlock *tb;
109 /* Should never happen.
110 We only end up here when an existing TB is too long. */
111 if (max_cycles > CF_COUNT_MASK)
112 max_cycles = CF_COUNT_MASK;
114 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
115 max_cycles);
116 env->current_tb = tb;
117 /* execute the generated code */
118 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
119 env->current_tb = NULL;
121 if ((next_tb & 3) == 2) {
122 /* Restore PC. This may happen if async event occurs before
123 the TB starts executing. */
124 cpu_pc_from_tb(env, tb);
126 tb_phys_invalidate(tb, -1);
127 tb_free(tb);
130 static TranslationBlock *tb_find_slow(target_ulong pc,
131 target_ulong cs_base,
132 uint64_t flags)
134 TranslationBlock *tb, **ptb1;
135 unsigned int h;
136 tb_page_addr_t phys_pc, phys_page1, phys_page2;
137 target_ulong virt_page2;
139 tb_invalidated_flag = 0;
141 /* find translated block using physical mappings */
142 phys_pc = get_page_addr_code(env, pc);
143 phys_page1 = phys_pc & TARGET_PAGE_MASK;
144 phys_page2 = -1;
145 h = tb_phys_hash_func(phys_pc);
146 ptb1 = &tb_phys_hash[h];
147 for(;;) {
148 tb = *ptb1;
149 if (!tb)
150 goto not_found;
151 if (tb->pc == pc &&
152 tb->page_addr[0] == phys_page1 &&
153 tb->cs_base == cs_base &&
154 tb->flags == flags) {
155 /* check next page if needed */
156 if (tb->page_addr[1] != -1) {
157 virt_page2 = (pc & TARGET_PAGE_MASK) +
158 TARGET_PAGE_SIZE;
159 phys_page2 = get_page_addr_code(env, virt_page2);
160 if (tb->page_addr[1] == phys_page2)
161 goto found;
162 } else {
163 goto found;
166 ptb1 = &tb->phys_hash_next;
168 not_found:
169 /* if no translated code available, then translate it now */
170 tb = tb_gen_code(env, pc, cs_base, flags, 0);
172 found:
173 /* we add the TB in the virtual pc hash table */
174 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
175 return tb;
178 static inline TranslationBlock *tb_find_fast(void)
180 TranslationBlock *tb;
181 target_ulong cs_base, pc;
182 int flags;
184 /* we record a subset of the CPU state. It will
185 always be the same before a given translated block
186 is executed. */
187 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
188 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
189 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
190 tb->flags != flags)) {
191 tb = tb_find_slow(pc, cs_base, flags);
193 return tb;
196 static CPUDebugExcpHandler *debug_excp_handler;
198 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
200 CPUDebugExcpHandler *old_handler = debug_excp_handler;
202 debug_excp_handler = handler;
203 return old_handler;
206 static void cpu_handle_debug_exception(CPUState *env)
208 CPUWatchpoint *wp;
210 if (!env->watchpoint_hit)
211 QTAILQ_FOREACH(wp, &env->watchpoints, entry)
212 wp->flags &= ~BP_WATCHPOINT_HIT;
214 if (debug_excp_handler)
215 debug_excp_handler(env);
218 /* main execution loop */
220 int cpu_exec(CPUState *env1)
222 volatile host_reg_t saved_env_reg;
223 int ret, interrupt_request;
224 TranslationBlock *tb;
225 uint8_t *tc_ptr;
226 unsigned long next_tb;
228 if (cpu_halted(env1) == EXCP_HALTED)
229 return EXCP_HALTED;
231 cpu_single_env = env1;
233 /* the access to env below is actually saving the global register's
234 value, so that files not including target-xyz/exec.h are free to
235 use it. */
236 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
237 saved_env_reg = (host_reg_t) env;
238 asm("");
239 env = env1;
241 #if defined(TARGET_I386)
242 if (!kvm_enabled()) {
243 /* put eflags in CPU temporary format */
244 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
245 DF = 1 - (2 * ((env->eflags >> 10) & 1));
246 CC_OP = CC_OP_EFLAGS;
247 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
249 #elif defined(TARGET_SPARC)
250 #elif defined(TARGET_M68K)
251 env->cc_op = CC_OP_FLAGS;
252 env->cc_dest = env->sr & 0xf;
253 env->cc_x = (env->sr >> 4) & 1;
254 #elif defined(TARGET_ALPHA)
255 #elif defined(TARGET_ARM)
256 #elif defined(TARGET_PPC)
257 #elif defined(TARGET_MICROBLAZE)
258 #elif defined(TARGET_MIPS)
259 #elif defined(TARGET_SH4)
260 #elif defined(TARGET_CRIS)
261 #elif defined(TARGET_S390X)
262 #elif defined(TARGET_IA64)
263 /* XXXXX */
264 #else
265 #error unsupported target CPU
266 #endif
267 env->exception_index = -1;
269 /* prepare setjmp context for exception handling */
270 for(;;) {
271 if (setjmp(env->jmp_env) == 0) {
272 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
273 #undef env
274 env = cpu_single_env;
275 #define env cpu_single_env
276 #endif
277 /* if an exception is pending, we execute it here */
278 if (env->exception_index >= 0) {
279 if (env->exception_index >= EXCP_INTERRUPT) {
280 /* exit request from the cpu execution loop */
281 ret = env->exception_index;
282 if (ret == EXCP_DEBUG)
283 cpu_handle_debug_exception(env);
284 break;
285 } else {
286 #if defined(CONFIG_USER_ONLY)
287 /* if user mode only, we simulate a fake exception
288 which will be handled outside the cpu execution
289 loop */
290 #if defined(TARGET_I386)
291 do_interrupt_user(env->exception_index,
292 env->exception_is_int,
293 env->error_code,
294 env->exception_next_eip);
295 /* successfully delivered */
296 env->old_exception = -1;
297 #endif
298 ret = env->exception_index;
299 break;
300 #else
301 #if defined(TARGET_I386)
302 /* simulate a real cpu exception. On i386, it can
303 trigger new exceptions, but we do not handle
304 double or triple faults yet. */
305 do_interrupt(env->exception_index,
306 env->exception_is_int,
307 env->error_code,
308 env->exception_next_eip, 0);
309 /* successfully delivered */
310 env->old_exception = -1;
311 #elif defined(TARGET_PPC)
312 do_interrupt(env);
313 #elif defined(TARGET_MICROBLAZE)
314 do_interrupt(env);
315 #elif defined(TARGET_MIPS)
316 do_interrupt(env);
317 #elif defined(TARGET_SPARC)
318 do_interrupt(env);
319 #elif defined(TARGET_ARM)
320 do_interrupt(env);
321 #elif defined(TARGET_SH4)
322 do_interrupt(env);
323 #elif defined(TARGET_ALPHA)
324 do_interrupt(env);
325 #elif defined(TARGET_CRIS)
326 do_interrupt(env);
327 #elif defined(TARGET_M68K)
328 do_interrupt(0);
329 #elif defined(TARGET_IA64)
330 do_interrupt(env);
331 #endif
332 env->exception_index = -1;
333 #endif
337 if (kvm_enabled()) {
338 kvm_cpu_exec(env);
339 longjmp(env->jmp_env, 1);
342 next_tb = 0; /* force lookup of first TB */
343 for(;;) {
344 interrupt_request = env->interrupt_request;
345 if (unlikely(interrupt_request)) {
346 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
347 /* Mask out external interrupts for this step. */
348 interrupt_request &= ~(CPU_INTERRUPT_HARD |
349 CPU_INTERRUPT_FIQ |
350 CPU_INTERRUPT_SMI |
351 CPU_INTERRUPT_NMI);
353 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
354 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
355 env->exception_index = EXCP_DEBUG;
356 cpu_loop_exit();
358 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
359 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
360 defined(TARGET_MICROBLAZE)
361 if (interrupt_request & CPU_INTERRUPT_HALT) {
362 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
363 env->halted = 1;
364 env->exception_index = EXCP_HLT;
365 cpu_loop_exit();
367 #endif
368 #if defined(TARGET_I386)
369 if (interrupt_request & CPU_INTERRUPT_INIT) {
370 svm_check_intercept(SVM_EXIT_INIT);
371 do_cpu_init(env);
372 env->exception_index = EXCP_HALTED;
373 cpu_loop_exit();
374 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
375 do_cpu_sipi(env);
376 } else if (env->hflags2 & HF2_GIF_MASK) {
377 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
378 !(env->hflags & HF_SMM_MASK)) {
379 svm_check_intercept(SVM_EXIT_SMI);
380 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
381 do_smm_enter();
382 next_tb = 0;
383 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
384 !(env->hflags2 & HF2_NMI_MASK)) {
385 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
386 env->hflags2 |= HF2_NMI_MASK;
387 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
388 next_tb = 0;
389 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
390 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
391 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
392 next_tb = 0;
393 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
394 (((env->hflags2 & HF2_VINTR_MASK) &&
395 (env->hflags2 & HF2_HIF_MASK)) ||
396 (!(env->hflags2 & HF2_VINTR_MASK) &&
397 (env->eflags & IF_MASK &&
398 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
399 int intno;
400 svm_check_intercept(SVM_EXIT_INTR);
401 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
402 intno = cpu_get_pic_interrupt(env);
403 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
404 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
405 #undef env
406 env = cpu_single_env;
407 #define env cpu_single_env
408 #endif
409 do_interrupt(intno, 0, 0, 0, 1);
410 /* ensure that no TB jump will be modified as
411 the program flow was changed */
412 next_tb = 0;
413 #if !defined(CONFIG_USER_ONLY)
414 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
415 (env->eflags & IF_MASK) &&
416 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
417 int intno;
418 /* FIXME: this should respect TPR */
419 svm_check_intercept(SVM_EXIT_VINTR);
420 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
421 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
422 do_interrupt(intno, 0, 0, 0, 1);
423 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
424 next_tb = 0;
425 #endif
428 #elif defined(TARGET_PPC)
429 #if 0
430 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
431 cpu_reset(env);
433 #endif
434 if (interrupt_request & CPU_INTERRUPT_HARD) {
435 ppc_hw_interrupt(env);
436 if (env->pending_interrupts == 0)
437 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
438 next_tb = 0;
440 #elif defined(TARGET_MICROBLAZE)
441 if ((interrupt_request & CPU_INTERRUPT_HARD)
442 && (env->sregs[SR_MSR] & MSR_IE)
443 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
444 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
445 env->exception_index = EXCP_IRQ;
446 do_interrupt(env);
447 next_tb = 0;
449 #elif defined(TARGET_MIPS)
450 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
451 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
452 (env->CP0_Status & (1 << CP0St_IE)) &&
453 !(env->CP0_Status & (1 << CP0St_EXL)) &&
454 !(env->CP0_Status & (1 << CP0St_ERL)) &&
455 !(env->hflags & MIPS_HFLAG_DM)) {
456 /* Raise it */
457 env->exception_index = EXCP_EXT_INTERRUPT;
458 env->error_code = 0;
459 do_interrupt(env);
460 next_tb = 0;
462 #elif defined(TARGET_SPARC)
463 if (interrupt_request & CPU_INTERRUPT_HARD) {
464 if (cpu_interrupts_enabled(env) &&
465 env->interrupt_index > 0) {
466 int pil = env->interrupt_index & 0xf;
467 int type = env->interrupt_index & 0xf0;
469 if (((type == TT_EXTINT) &&
470 cpu_pil_allowed(env, pil)) ||
471 type != TT_EXTINT) {
472 env->exception_index = env->interrupt_index;
473 do_interrupt(env);
474 next_tb = 0;
477 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
478 //do_interrupt(0, 0, 0, 0, 0);
479 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
481 #elif defined(TARGET_ARM)
482 if (interrupt_request & CPU_INTERRUPT_FIQ
483 && !(env->uncached_cpsr & CPSR_F)) {
484 env->exception_index = EXCP_FIQ;
485 do_interrupt(env);
486 next_tb = 0;
488 /* ARMv7-M interrupt return works by loading a magic value
489 into the PC. On real hardware the load causes the
490 return to occur. The qemu implementation performs the
491 jump normally, then does the exception return when the
492 CPU tries to execute code at the magic address.
493 This will cause the magic PC value to be pushed to
494 the stack if an interrupt occured at the wrong time.
495 We avoid this by disabling interrupts when
496 pc contains a magic address. */
497 if (interrupt_request & CPU_INTERRUPT_HARD
498 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
499 || !(env->uncached_cpsr & CPSR_I))) {
500 env->exception_index = EXCP_IRQ;
501 do_interrupt(env);
502 next_tb = 0;
504 #elif defined(TARGET_SH4)
505 if (interrupt_request & CPU_INTERRUPT_HARD) {
506 do_interrupt(env);
507 next_tb = 0;
509 #elif defined(TARGET_ALPHA)
510 if (interrupt_request & CPU_INTERRUPT_HARD) {
511 do_interrupt(env);
512 next_tb = 0;
514 #elif defined(TARGET_CRIS)
515 if (interrupt_request & CPU_INTERRUPT_HARD
516 && (env->pregs[PR_CCS] & I_FLAG)
517 && !env->locked_irq) {
518 env->exception_index = EXCP_IRQ;
519 do_interrupt(env);
520 next_tb = 0;
522 if (interrupt_request & CPU_INTERRUPT_NMI
523 && (env->pregs[PR_CCS] & M_FLAG)) {
524 env->exception_index = EXCP_NMI;
525 do_interrupt(env);
526 next_tb = 0;
528 #elif defined(TARGET_M68K)
529 if (interrupt_request & CPU_INTERRUPT_HARD
530 && ((env->sr & SR_I) >> SR_I_SHIFT)
531 < env->pending_level) {
532 /* Real hardware gets the interrupt vector via an
533 IACK cycle at this point. Current emulated
534 hardware doesn't rely on this, so we
535 provide/save the vector when the interrupt is
536 first signalled. */
537 env->exception_index = env->pending_vector;
538 do_interrupt(1);
539 next_tb = 0;
541 #endif
542 /* Don't use the cached interupt_request value,
543 do_interrupt may have updated the EXITTB flag. */
544 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
545 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
546 /* ensure that no TB jump will be modified as
547 the program flow was changed */
548 next_tb = 0;
551 if (unlikely(env->exit_request)) {
552 env->exit_request = 0;
553 env->exception_index = EXCP_INTERRUPT;
554 cpu_loop_exit();
556 #ifdef CONFIG_DEBUG_EXEC
557 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
558 /* restore flags in standard format */
559 #if defined(TARGET_I386)
560 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
561 log_cpu_state(env, X86_DUMP_CCOP);
562 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
563 #elif defined(TARGET_ARM)
564 log_cpu_state(env, 0);
565 #elif defined(TARGET_SPARC)
566 log_cpu_state(env, 0);
567 #elif defined(TARGET_PPC)
568 log_cpu_state(env, 0);
569 #elif defined(TARGET_M68K)
570 cpu_m68k_flush_flags(env, env->cc_op);
571 env->cc_op = CC_OP_FLAGS;
572 env->sr = (env->sr & 0xffe0)
573 | env->cc_dest | (env->cc_x << 4);
574 log_cpu_state(env, 0);
575 #elif defined(TARGET_MICROBLAZE)
576 log_cpu_state(env, 0);
577 #elif defined(TARGET_MIPS)
578 log_cpu_state(env, 0);
579 #elif defined(TARGET_SH4)
580 log_cpu_state(env, 0);
581 #elif defined(TARGET_ALPHA)
582 log_cpu_state(env, 0);
583 #elif defined(TARGET_CRIS)
584 log_cpu_state(env, 0);
585 #else
586 #error unsupported target CPU
587 #endif
589 #endif
590 spin_lock(&tb_lock);
591 tb = tb_find_fast();
592 /* Note: we do it here to avoid a gcc bug on Mac OS X when
593 doing it in tb_find_slow */
594 if (tb_invalidated_flag) {
595 /* as some TB could have been invalidated because
596 of memory exceptions while generating the code, we
597 must recompute the hash index here */
598 next_tb = 0;
599 tb_invalidated_flag = 0;
601 #ifdef CONFIG_DEBUG_EXEC
602 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
603 (long)tb->tc_ptr, tb->pc,
604 lookup_symbol(tb->pc));
605 #endif
606 /* see if we can patch the calling TB. When the TB
607 spans two pages, we cannot safely do a direct
608 jump. */
609 if (next_tb != 0 && tb->page_addr[1] == -1) {
610 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
612 spin_unlock(&tb_lock);
614 /* cpu_interrupt might be called while translating the
615 TB, but before it is linked into a potentially
616 infinite loop and becomes env->current_tb. Avoid
617 starting execution if there is a pending interrupt. */
618 if (!unlikely (env->exit_request)) {
619 env->current_tb = tb;
620 tc_ptr = tb->tc_ptr;
621 /* execute the generated code */
622 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
623 #undef env
624 env = cpu_single_env;
625 #define env cpu_single_env
626 #endif
627 next_tb = tcg_qemu_tb_exec(tc_ptr);
628 env->current_tb = NULL;
629 if ((next_tb & 3) == 2) {
630 /* Instruction counter expired. */
631 int insns_left;
632 tb = (TranslationBlock *)(long)(next_tb & ~3);
633 /* Restore PC. */
634 cpu_pc_from_tb(env, tb);
635 insns_left = env->icount_decr.u32;
636 if (env->icount_extra && insns_left >= 0) {
637 /* Refill decrementer and continue execution. */
638 env->icount_extra += insns_left;
639 if (env->icount_extra > 0xffff) {
640 insns_left = 0xffff;
641 } else {
642 insns_left = env->icount_extra;
644 env->icount_extra -= insns_left;
645 env->icount_decr.u16.low = insns_left;
646 } else {
647 if (insns_left > 0) {
648 /* Execute remaining instructions. */
649 cpu_exec_nocache(insns_left, tb);
651 env->exception_index = EXCP_INTERRUPT;
652 next_tb = 0;
653 cpu_loop_exit();
657 /* reset soft MMU for next block (it can currently
658 only be set by a memory fault) */
659 } /* for(;;) */
661 } /* for(;;) */
664 #if defined(TARGET_I386)
665 /* restore flags in standard format */
666 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
667 #elif defined(TARGET_ARM)
668 /* XXX: Save/restore host fpu exception state?. */
669 #elif defined(TARGET_SPARC)
670 #elif defined(TARGET_PPC)
671 #elif defined(TARGET_M68K)
672 cpu_m68k_flush_flags(env, env->cc_op);
673 env->cc_op = CC_OP_FLAGS;
674 env->sr = (env->sr & 0xffe0)
675 | env->cc_dest | (env->cc_x << 4);
676 #elif defined(TARGET_MICROBLAZE)
677 #elif defined(TARGET_MIPS)
678 #elif defined(TARGET_SH4)
679 #elif defined(TARGET_IA64)
680 #elif defined(TARGET_ALPHA)
681 #elif defined(TARGET_CRIS)
682 #elif defined(TARGET_S390X)
683 /* XXXXX */
684 #else
685 #error unsupported target CPU
686 #endif
688 /* restore global registers */
689 asm("");
690 env = (void *) saved_env_reg;
692 /* fail safe : never use cpu_single_env outside cpu_exec() */
693 cpu_single_env = NULL;
694 return ret;
697 /* must only be called from the generated code as an exception can be
698 generated */
699 void tb_invalidate_page_range(target_ulong start, target_ulong end)
701 /* XXX: cannot enable it yet because it yields to MMU exception
702 where NIP != read address on PowerPC */
703 #if 0
704 target_ulong phys_addr;
705 phys_addr = get_phys_addr_code(env, start);
706 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
707 #endif
710 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
712 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
714 CPUX86State *saved_env;
716 saved_env = env;
717 env = s;
718 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
719 selector &= 0xffff;
720 cpu_x86_load_seg_cache(env, seg_reg, selector,
721 (selector << 4), 0xffff, 0);
722 } else {
723 helper_load_seg(seg_reg, selector);
725 env = saved_env;
728 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
730 CPUX86State *saved_env;
732 saved_env = env;
733 env = s;
735 helper_fsave(ptr, data32);
737 env = saved_env;
740 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
742 CPUX86State *saved_env;
744 saved_env = env;
745 env = s;
747 helper_frstor(ptr, data32);
749 env = saved_env;
752 #endif /* TARGET_I386 */
754 #if !defined(CONFIG_SOFTMMU)
756 #if defined(TARGET_I386)
757 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
758 #else
759 #define EXCEPTION_ACTION cpu_loop_exit()
760 #endif
762 /* 'pc' is the host PC at which the exception was raised. 'address' is
763 the effective address of the memory exception. 'is_write' is 1 if a
764 write caused the exception and otherwise 0'. 'old_set' is the
765 signal set which should be restored */
766 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
767 int is_write, sigset_t *old_set,
768 void *puc)
770 TranslationBlock *tb;
771 int ret;
773 if (cpu_single_env)
774 env = cpu_single_env; /* XXX: find a correct solution for multithread */
775 #if defined(DEBUG_SIGNAL)
776 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
777 pc, address, is_write, *(unsigned long *)old_set);
778 #endif
779 /* XXX: locking issue */
780 if (is_write && page_unprotect(h2g(address), pc, puc)) {
781 return 1;
784 /* see if it is an MMU fault */
785 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
786 if (ret < 0)
787 return 0; /* not an MMU fault */
788 if (ret == 0)
789 return 1; /* the MMU fault was handled without causing real CPU fault */
790 /* now we have a real cpu fault */
791 tb = tb_find_pc(pc);
792 if (tb) {
793 /* the PC is inside the translated code. It means that we have
794 a virtual CPU fault */
795 cpu_restore_state(tb, env, pc, puc);
798 /* we restore the process signal mask as the sigreturn should
799 do it (XXX: use sigsetjmp) */
800 sigprocmask(SIG_SETMASK, old_set, NULL);
801 EXCEPTION_ACTION;
803 /* never comes here */
804 return 1;
807 #if defined(__i386__)
809 #if defined(__APPLE__)
810 # include <sys/ucontext.h>
812 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
813 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
814 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
815 # define MASK_sig(context) ((context)->uc_sigmask)
816 #elif defined (__NetBSD__)
817 # include <ucontext.h>
819 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
820 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
821 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
822 # define MASK_sig(context) ((context)->uc_sigmask)
823 #elif defined (__FreeBSD__) || defined(__DragonFly__)
824 # include <ucontext.h>
826 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
827 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
828 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
829 # define MASK_sig(context) ((context)->uc_sigmask)
830 #elif defined(__OpenBSD__)
831 # define EIP_sig(context) ((context)->sc_eip)
832 # define TRAP_sig(context) ((context)->sc_trapno)
833 # define ERROR_sig(context) ((context)->sc_err)
834 # define MASK_sig(context) ((context)->sc_mask)
835 #else
836 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
837 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
838 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
839 # define MASK_sig(context) ((context)->uc_sigmask)
840 #endif
842 int cpu_signal_handler(int host_signum, void *pinfo,
843 void *puc)
845 siginfo_t *info = pinfo;
846 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
847 ucontext_t *uc = puc;
848 #elif defined(__OpenBSD__)
849 struct sigcontext *uc = puc;
850 #else
851 struct ucontext *uc = puc;
852 #endif
853 unsigned long pc;
854 int trapno;
856 #ifndef REG_EIP
857 /* for glibc 2.1 */
858 #define REG_EIP EIP
859 #define REG_ERR ERR
860 #define REG_TRAPNO TRAPNO
861 #endif
862 pc = EIP_sig(uc);
863 trapno = TRAP_sig(uc);
864 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
865 trapno == 0xe ?
866 (ERROR_sig(uc) >> 1) & 1 : 0,
867 &MASK_sig(uc), puc);
870 #elif defined(__x86_64__)
872 #ifdef __NetBSD__
873 #define PC_sig(context) _UC_MACHINE_PC(context)
874 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
875 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
876 #define MASK_sig(context) ((context)->uc_sigmask)
877 #elif defined(__OpenBSD__)
878 #define PC_sig(context) ((context)->sc_rip)
879 #define TRAP_sig(context) ((context)->sc_trapno)
880 #define ERROR_sig(context) ((context)->sc_err)
881 #define MASK_sig(context) ((context)->sc_mask)
882 #elif defined (__FreeBSD__) || defined(__DragonFly__)
883 #include <ucontext.h>
885 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
886 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
887 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
888 #define MASK_sig(context) ((context)->uc_sigmask)
889 #else
890 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
891 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
892 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
893 #define MASK_sig(context) ((context)->uc_sigmask)
894 #endif
896 int cpu_signal_handler(int host_signum, void *pinfo,
897 void *puc)
899 siginfo_t *info = pinfo;
900 unsigned long pc;
901 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
902 ucontext_t *uc = puc;
903 #elif defined(__OpenBSD__)
904 struct sigcontext *uc = puc;
905 #else
906 struct ucontext *uc = puc;
907 #endif
909 pc = PC_sig(uc);
910 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
911 TRAP_sig(uc) == 0xe ?
912 (ERROR_sig(uc) >> 1) & 1 : 0,
913 &MASK_sig(uc), puc);
916 #elif defined(_ARCH_PPC)
918 /***********************************************************************
919 * signal context platform-specific definitions
920 * From Wine
922 #ifdef linux
923 /* All Registers access - only for local access */
924 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
925 /* Gpr Registers access */
926 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
927 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
928 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
929 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
930 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
931 # define LR_sig(context) REG_sig(link, context) /* Link register */
932 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
933 /* Float Registers access */
934 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
935 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
936 /* Exception Registers access */
937 # define DAR_sig(context) REG_sig(dar, context)
938 # define DSISR_sig(context) REG_sig(dsisr, context)
939 # define TRAP_sig(context) REG_sig(trap, context)
940 #endif /* linux */
942 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
943 #include <ucontext.h>
944 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
945 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
946 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
947 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
948 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
949 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
950 /* Exception Registers access */
951 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
952 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
953 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
954 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
956 #ifdef __APPLE__
957 # include <sys/ucontext.h>
958 typedef struct ucontext SIGCONTEXT;
959 /* All Registers access - only for local access */
960 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
961 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
962 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
963 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
964 /* Gpr Registers access */
965 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
966 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
967 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
968 # define CTR_sig(context) REG_sig(ctr, context)
969 # define XER_sig(context) REG_sig(xer, context) /* Link register */
970 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
971 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
972 /* Float Registers access */
973 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
974 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
975 /* Exception Registers access */
976 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
977 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
978 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
979 #endif /* __APPLE__ */
981 int cpu_signal_handler(int host_signum, void *pinfo,
982 void *puc)
984 siginfo_t *info = pinfo;
985 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
986 ucontext_t *uc = puc;
987 #else
988 struct ucontext *uc = puc;
989 #endif
990 unsigned long pc;
991 int is_write;
993 pc = IAR_sig(uc);
994 is_write = 0;
995 #if 0
996 /* ppc 4xx case */
997 if (DSISR_sig(uc) & 0x00800000)
998 is_write = 1;
999 #else
1000 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1001 is_write = 1;
1002 #endif
1003 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1004 is_write, &uc->uc_sigmask, puc);
1007 #elif defined(__alpha__)
1009 int cpu_signal_handler(int host_signum, void *pinfo,
1010 void *puc)
1012 siginfo_t *info = pinfo;
1013 struct ucontext *uc = puc;
1014 uint32_t *pc = uc->uc_mcontext.sc_pc;
1015 uint32_t insn = *pc;
1016 int is_write = 0;
1018 /* XXX: need kernel patch to get write flag faster */
1019 switch (insn >> 26) {
1020 case 0x0d: // stw
1021 case 0x0e: // stb
1022 case 0x0f: // stq_u
1023 case 0x24: // stf
1024 case 0x25: // stg
1025 case 0x26: // sts
1026 case 0x27: // stt
1027 case 0x2c: // stl
1028 case 0x2d: // stq
1029 case 0x2e: // stl_c
1030 case 0x2f: // stq_c
1031 is_write = 1;
1034 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1035 is_write, &uc->uc_sigmask, puc);
1037 #elif defined(__sparc__)
1039 int cpu_signal_handler(int host_signum, void *pinfo,
1040 void *puc)
1042 siginfo_t *info = pinfo;
1043 int is_write;
1044 uint32_t insn;
1045 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1046 uint32_t *regs = (uint32_t *)(info + 1);
1047 void *sigmask = (regs + 20);
1048 /* XXX: is there a standard glibc define ? */
1049 unsigned long pc = regs[1];
1050 #else
1051 #ifdef __linux__
1052 struct sigcontext *sc = puc;
1053 unsigned long pc = sc->sigc_regs.tpc;
1054 void *sigmask = (void *)sc->sigc_mask;
1055 #elif defined(__OpenBSD__)
1056 struct sigcontext *uc = puc;
1057 unsigned long pc = uc->sc_pc;
1058 void *sigmask = (void *)(long)uc->sc_mask;
1059 #endif
1060 #endif
1062 /* XXX: need kernel patch to get write flag faster */
1063 is_write = 0;
1064 insn = *(uint32_t *)pc;
1065 if ((insn >> 30) == 3) {
1066 switch((insn >> 19) & 0x3f) {
1067 case 0x05: // stb
1068 case 0x15: // stba
1069 case 0x06: // sth
1070 case 0x16: // stha
1071 case 0x04: // st
1072 case 0x14: // sta
1073 case 0x07: // std
1074 case 0x17: // stda
1075 case 0x0e: // stx
1076 case 0x1e: // stxa
1077 case 0x24: // stf
1078 case 0x34: // stfa
1079 case 0x27: // stdf
1080 case 0x37: // stdfa
1081 case 0x26: // stqf
1082 case 0x36: // stqfa
1083 case 0x25: // stfsr
1084 case 0x3c: // casa
1085 case 0x3e: // casxa
1086 is_write = 1;
1087 break;
1090 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1091 is_write, sigmask, NULL);
1094 #elif defined(__arm__)
1096 int cpu_signal_handler(int host_signum, void *pinfo,
1097 void *puc)
1099 siginfo_t *info = pinfo;
1100 struct ucontext *uc = puc;
1101 unsigned long pc;
1102 int is_write;
1104 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1105 pc = uc->uc_mcontext.gregs[R15];
1106 #else
1107 pc = uc->uc_mcontext.arm_pc;
1108 #endif
1109 /* XXX: compute is_write */
1110 is_write = 0;
1111 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1112 is_write,
1113 &uc->uc_sigmask, puc);
1116 #elif defined(__mc68000)
1118 int cpu_signal_handler(int host_signum, void *pinfo,
1119 void *puc)
1121 siginfo_t *info = pinfo;
1122 struct ucontext *uc = puc;
1123 unsigned long pc;
1124 int is_write;
1126 pc = uc->uc_mcontext.gregs[16];
1127 /* XXX: compute is_write */
1128 is_write = 0;
1129 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1130 is_write,
1131 &uc->uc_sigmask, puc);
1134 #elif defined(__ia64)
1136 #ifndef __ISR_VALID
1137 /* This ought to be in <bits/siginfo.h>... */
1138 # define __ISR_VALID 1
1139 #endif
1141 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1143 siginfo_t *info = pinfo;
1144 struct ucontext *uc = puc;
1145 unsigned long ip;
1146 int is_write = 0;
1148 ip = uc->uc_mcontext.sc_ip;
1149 switch (host_signum) {
1150 case SIGILL:
1151 case SIGFPE:
1152 case SIGSEGV:
1153 case SIGBUS:
1154 case SIGTRAP:
1155 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1156 /* ISR.W (write-access) is bit 33: */
1157 is_write = (info->si_isr >> 33) & 1;
1158 break;
1160 default:
1161 break;
1163 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1164 is_write,
1165 (sigset_t *)&uc->uc_sigmask, puc);
1168 #elif defined(__s390__)
1170 int cpu_signal_handler(int host_signum, void *pinfo,
1171 void *puc)
1173 siginfo_t *info = pinfo;
1174 struct ucontext *uc = puc;
1175 unsigned long pc;
1176 int is_write;
1178 pc = uc->uc_mcontext.psw.addr;
1179 /* XXX: compute is_write */
1180 is_write = 0;
1181 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1182 is_write, &uc->uc_sigmask, puc);
1185 #elif defined(__mips__)
1187 int cpu_signal_handler(int host_signum, void *pinfo,
1188 void *puc)
1190 siginfo_t *info = pinfo;
1191 struct ucontext *uc = puc;
1192 greg_t pc = uc->uc_mcontext.pc;
1193 int is_write;
1195 /* XXX: compute is_write */
1196 is_write = 0;
1197 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1198 is_write, &uc->uc_sigmask, puc);
1201 #elif defined(__hppa__)
1203 int cpu_signal_handler(int host_signum, void *pinfo,
1204 void *puc)
1206 struct siginfo *info = pinfo;
1207 struct ucontext *uc = puc;
1208 unsigned long pc;
1209 int is_write;
1211 pc = uc->uc_mcontext.sc_iaoq[0];
1212 /* FIXME: compute is_write */
1213 is_write = 0;
1214 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1215 is_write,
1216 &uc->uc_sigmask, puc);
1219 #else
1221 #error host CPU specific signal handler needed
1223 #endif
1225 #endif /* !defined(CONFIG_SOFTMMU) */