lm32: fix build breakage due to uninitialized variable 'r'
[qemu.git] / cpu-exec.c
blob5d6c9a8a1c22a2526c7c5ac30efb592111101ebb
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
24 #include "qemu-barrier.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
42 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
50 //#define CONFIG_DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState *env)
55 return cpu_has_work(env);
58 void cpu_loop_exit(void)
60 env->current_tb = NULL;
61 longjmp(env->jmp_env, 1);
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
69 #if !defined(CONFIG_SOFTMMU)
70 #ifdef __linux__
71 struct ucontext *uc = puc;
72 #elif defined(__OpenBSD__)
73 struct sigcontext *uc = puc;
74 #endif
75 #endif
77 env = env1;
79 /* XXX: restore cpu registers saved in host registers */
81 #if !defined(CONFIG_SOFTMMU)
82 if (puc) {
83 /* XXX: use siglongjmp ? */
84 #ifdef __linux__
85 #ifdef __ia64
86 sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
87 #else
88 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
89 #endif
90 #elif defined(__OpenBSD__)
91 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
92 #endif
94 #endif
95 env->exception_index = -1;
96 longjmp(env->jmp_env, 1);
99 /* Execute the code without caching the generated code. An interpreter
100 could be used if available. */
101 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
103 unsigned long next_tb;
104 TranslationBlock *tb;
106 /* Should never happen.
107 We only end up here when an existing TB is too long. */
108 if (max_cycles > CF_COUNT_MASK)
109 max_cycles = CF_COUNT_MASK;
111 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
112 max_cycles);
113 env->current_tb = tb;
114 /* execute the generated code */
115 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
116 env->current_tb = NULL;
118 if ((next_tb & 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env, tb);
123 tb_phys_invalidate(tb, -1);
124 tb_free(tb);
127 static TranslationBlock *tb_find_slow(target_ulong pc,
128 target_ulong cs_base,
129 uint64_t flags)
131 TranslationBlock *tb, **ptb1;
132 unsigned int h;
133 tb_page_addr_t phys_pc, phys_page1, phys_page2;
134 target_ulong virt_page2;
136 tb_invalidated_flag = 0;
138 /* find translated block using physical mappings */
139 phys_pc = get_page_addr_code(env, pc);
140 phys_page1 = phys_pc & TARGET_PAGE_MASK;
141 phys_page2 = -1;
142 h = tb_phys_hash_func(phys_pc);
143 ptb1 = &tb_phys_hash[h];
144 for(;;) {
145 tb = *ptb1;
146 if (!tb)
147 goto not_found;
148 if (tb->pc == pc &&
149 tb->page_addr[0] == phys_page1 &&
150 tb->cs_base == cs_base &&
151 tb->flags == flags) {
152 /* check next page if needed */
153 if (tb->page_addr[1] != -1) {
154 virt_page2 = (pc & TARGET_PAGE_MASK) +
155 TARGET_PAGE_SIZE;
156 phys_page2 = get_page_addr_code(env, virt_page2);
157 if (tb->page_addr[1] == phys_page2)
158 goto found;
159 } else {
160 goto found;
163 ptb1 = &tb->phys_hash_next;
165 not_found:
166 /* if no translated code available, then translate it now */
167 tb = tb_gen_code(env, pc, cs_base, flags, 0);
169 found:
170 /* Move the last found TB to the head of the list */
171 if (likely(*ptb1)) {
172 *ptb1 = tb->phys_hash_next;
173 tb->phys_hash_next = tb_phys_hash[h];
174 tb_phys_hash[h] = tb;
176 /* we add the TB in the virtual pc hash table */
177 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
178 return tb;
181 static inline TranslationBlock *tb_find_fast(void)
183 TranslationBlock *tb;
184 target_ulong cs_base, pc;
185 int flags;
187 /* we record a subset of the CPU state. It will
188 always be the same before a given translated block
189 is executed. */
190 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
191 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
192 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
193 tb->flags != flags)) {
194 tb = tb_find_slow(pc, cs_base, flags);
196 return tb;
199 static CPUDebugExcpHandler *debug_excp_handler;
201 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
203 CPUDebugExcpHandler *old_handler = debug_excp_handler;
205 debug_excp_handler = handler;
206 return old_handler;
209 static void cpu_handle_debug_exception(CPUState *env)
211 CPUWatchpoint *wp;
213 if (!env->watchpoint_hit) {
214 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
215 wp->flags &= ~BP_WATCHPOINT_HIT;
218 if (debug_excp_handler) {
219 debug_excp_handler(env);
223 /* main execution loop */
225 volatile sig_atomic_t exit_request;
227 int cpu_exec(CPUState *env1)
229 volatile host_reg_t saved_env_reg;
230 int ret, interrupt_request;
231 TranslationBlock *tb;
232 uint8_t *tc_ptr;
233 unsigned long next_tb;
235 if (env1->halted) {
236 if (!cpu_has_work(env1)) {
237 return EXCP_HALTED;
240 env1->halted = 0;
243 cpu_single_env = env1;
245 /* the access to env below is actually saving the global register's
246 value, so that files not including target-xyz/exec.h are free to
247 use it. */
248 QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
249 saved_env_reg = (host_reg_t) env;
250 barrier();
251 env = env1;
253 if (unlikely(exit_request)) {
254 env->exit_request = 1;
257 #if defined(TARGET_I386)
258 /* put eflags in CPU temporary format */
259 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260 DF = 1 - (2 * ((env->eflags >> 10) & 1));
261 CC_OP = CC_OP_EFLAGS;
262 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263 #elif defined(TARGET_SPARC)
264 #elif defined(TARGET_M68K)
265 env->cc_op = CC_OP_FLAGS;
266 env->cc_dest = env->sr & 0xf;
267 env->cc_x = (env->sr >> 4) & 1;
268 #elif defined(TARGET_ALPHA)
269 #elif defined(TARGET_ARM)
270 #elif defined(TARGET_UNICORE32)
271 #elif defined(TARGET_PPC)
272 #elif defined(TARGET_LM32)
273 #elif defined(TARGET_MICROBLAZE)
274 #elif defined(TARGET_MIPS)
275 #elif defined(TARGET_SH4)
276 #elif defined(TARGET_CRIS)
277 #elif defined(TARGET_S390X)
278 /* XXXXX */
279 #else
280 #error unsupported target CPU
281 #endif
282 env->exception_index = -1;
284 /* prepare setjmp context for exception handling */
285 for(;;) {
286 if (setjmp(env->jmp_env) == 0) {
287 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
288 #undef env
289 env = cpu_single_env;
290 #define env cpu_single_env
291 #endif
292 /* if an exception is pending, we execute it here */
293 if (env->exception_index >= 0) {
294 if (env->exception_index >= EXCP_INTERRUPT) {
295 /* exit request from the cpu execution loop */
296 ret = env->exception_index;
297 if (ret == EXCP_DEBUG) {
298 cpu_handle_debug_exception(env);
300 break;
301 } else {
302 #if defined(CONFIG_USER_ONLY)
303 /* if user mode only, we simulate a fake exception
304 which will be handled outside the cpu execution
305 loop */
306 #if defined(TARGET_I386)
307 do_interrupt_user(env->exception_index,
308 env->exception_is_int,
309 env->error_code,
310 env->exception_next_eip);
311 /* successfully delivered */
312 env->old_exception = -1;
313 #endif
314 ret = env->exception_index;
315 break;
316 #else
317 #if defined(TARGET_I386)
318 /* simulate a real cpu exception. On i386, it can
319 trigger new exceptions, but we do not handle
320 double or triple faults yet. */
321 do_interrupt(env->exception_index,
322 env->exception_is_int,
323 env->error_code,
324 env->exception_next_eip, 0);
325 /* successfully delivered */
326 env->old_exception = -1;
327 #elif defined(TARGET_PPC)
328 do_interrupt(env);
329 #elif defined(TARGET_LM32)
330 do_interrupt(env);
331 #elif defined(TARGET_MICROBLAZE)
332 do_interrupt(env);
333 #elif defined(TARGET_MIPS)
334 do_interrupt(env);
335 #elif defined(TARGET_SPARC)
336 do_interrupt(env);
337 #elif defined(TARGET_ARM)
338 do_interrupt(env);
339 #elif defined(TARGET_UNICORE32)
340 do_interrupt(env);
341 #elif defined(TARGET_SH4)
342 do_interrupt(env);
343 #elif defined(TARGET_ALPHA)
344 do_interrupt(env);
345 #elif defined(TARGET_CRIS)
346 do_interrupt(env);
347 #elif defined(TARGET_M68K)
348 do_interrupt(0);
349 #endif
350 env->exception_index = -1;
351 #endif
355 next_tb = 0; /* force lookup of first TB */
356 for(;;) {
357 interrupt_request = env->interrupt_request;
358 if (unlikely(interrupt_request)) {
359 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
360 /* Mask out external interrupts for this step. */
361 interrupt_request &= ~(CPU_INTERRUPT_HARD |
362 CPU_INTERRUPT_FIQ |
363 CPU_INTERRUPT_SMI |
364 CPU_INTERRUPT_NMI);
366 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
367 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
368 env->exception_index = EXCP_DEBUG;
369 cpu_loop_exit();
371 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
372 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
373 defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
374 if (interrupt_request & CPU_INTERRUPT_HALT) {
375 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
376 env->halted = 1;
377 env->exception_index = EXCP_HLT;
378 cpu_loop_exit();
380 #endif
381 #if defined(TARGET_I386)
382 if (interrupt_request & CPU_INTERRUPT_INIT) {
383 svm_check_intercept(SVM_EXIT_INIT);
384 do_cpu_init(env);
385 env->exception_index = EXCP_HALTED;
386 cpu_loop_exit();
387 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
388 do_cpu_sipi(env);
389 } else if (env->hflags2 & HF2_GIF_MASK) {
390 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
391 !(env->hflags & HF_SMM_MASK)) {
392 svm_check_intercept(SVM_EXIT_SMI);
393 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
394 do_smm_enter();
395 next_tb = 0;
396 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
397 !(env->hflags2 & HF2_NMI_MASK)) {
398 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
399 env->hflags2 |= HF2_NMI_MASK;
400 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
401 next_tb = 0;
402 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
403 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
404 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
405 next_tb = 0;
406 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
407 (((env->hflags2 & HF2_VINTR_MASK) &&
408 (env->hflags2 & HF2_HIF_MASK)) ||
409 (!(env->hflags2 & HF2_VINTR_MASK) &&
410 (env->eflags & IF_MASK &&
411 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
412 int intno;
413 svm_check_intercept(SVM_EXIT_INTR);
414 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
415 intno = cpu_get_pic_interrupt(env);
416 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
417 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
418 #undef env
419 env = cpu_single_env;
420 #define env cpu_single_env
421 #endif
422 do_interrupt(intno, 0, 0, 0, 1);
423 /* ensure that no TB jump will be modified as
424 the program flow was changed */
425 next_tb = 0;
426 #if !defined(CONFIG_USER_ONLY)
427 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
428 (env->eflags & IF_MASK) &&
429 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
430 int intno;
431 /* FIXME: this should respect TPR */
432 svm_check_intercept(SVM_EXIT_VINTR);
433 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
434 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
435 do_interrupt(intno, 0, 0, 0, 1);
436 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
437 next_tb = 0;
438 #endif
441 #elif defined(TARGET_PPC)
442 #if 0
443 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
444 cpu_reset(env);
446 #endif
447 if (interrupt_request & CPU_INTERRUPT_HARD) {
448 ppc_hw_interrupt(env);
449 if (env->pending_interrupts == 0)
450 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
451 next_tb = 0;
453 #elif defined(TARGET_LM32)
454 if ((interrupt_request & CPU_INTERRUPT_HARD)
455 && (env->ie & IE_IE)) {
456 env->exception_index = EXCP_IRQ;
457 do_interrupt(env);
458 next_tb = 0;
460 #elif defined(TARGET_MICROBLAZE)
461 if ((interrupt_request & CPU_INTERRUPT_HARD)
462 && (env->sregs[SR_MSR] & MSR_IE)
463 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
464 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
465 env->exception_index = EXCP_IRQ;
466 do_interrupt(env);
467 next_tb = 0;
469 #elif defined(TARGET_MIPS)
470 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
471 cpu_mips_hw_interrupts_pending(env)) {
472 /* Raise it */
473 env->exception_index = EXCP_EXT_INTERRUPT;
474 env->error_code = 0;
475 do_interrupt(env);
476 next_tb = 0;
478 #elif defined(TARGET_SPARC)
479 if (interrupt_request & CPU_INTERRUPT_HARD) {
480 if (cpu_interrupts_enabled(env) &&
481 env->interrupt_index > 0) {
482 int pil = env->interrupt_index & 0xf;
483 int type = env->interrupt_index & 0xf0;
485 if (((type == TT_EXTINT) &&
486 cpu_pil_allowed(env, pil)) ||
487 type != TT_EXTINT) {
488 env->exception_index = env->interrupt_index;
489 do_interrupt(env);
490 next_tb = 0;
493 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
494 //do_interrupt(0, 0, 0, 0, 0);
495 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
497 #elif defined(TARGET_ARM)
498 if (interrupt_request & CPU_INTERRUPT_FIQ
499 && !(env->uncached_cpsr & CPSR_F)) {
500 env->exception_index = EXCP_FIQ;
501 do_interrupt(env);
502 next_tb = 0;
504 /* ARMv7-M interrupt return works by loading a magic value
505 into the PC. On real hardware the load causes the
506 return to occur. The qemu implementation performs the
507 jump normally, then does the exception return when the
508 CPU tries to execute code at the magic address.
509 This will cause the magic PC value to be pushed to
510 the stack if an interrupt occured at the wrong time.
511 We avoid this by disabling interrupts when
512 pc contains a magic address. */
513 if (interrupt_request & CPU_INTERRUPT_HARD
514 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
515 || !(env->uncached_cpsr & CPSR_I))) {
516 env->exception_index = EXCP_IRQ;
517 do_interrupt(env);
518 next_tb = 0;
520 #elif defined(TARGET_UNICORE32)
521 if (interrupt_request & CPU_INTERRUPT_HARD
522 && !(env->uncached_asr & ASR_I)) {
523 do_interrupt(env);
524 next_tb = 0;
526 #elif defined(TARGET_SH4)
527 if (interrupt_request & CPU_INTERRUPT_HARD) {
528 do_interrupt(env);
529 next_tb = 0;
531 #elif defined(TARGET_ALPHA)
532 if (interrupt_request & CPU_INTERRUPT_HARD) {
533 do_interrupt(env);
534 next_tb = 0;
536 #elif defined(TARGET_CRIS)
537 if (interrupt_request & CPU_INTERRUPT_HARD
538 && (env->pregs[PR_CCS] & I_FLAG)
539 && !env->locked_irq) {
540 env->exception_index = EXCP_IRQ;
541 do_interrupt(env);
542 next_tb = 0;
544 if (interrupt_request & CPU_INTERRUPT_NMI
545 && (env->pregs[PR_CCS] & M_FLAG)) {
546 env->exception_index = EXCP_NMI;
547 do_interrupt(env);
548 next_tb = 0;
550 #elif defined(TARGET_M68K)
551 if (interrupt_request & CPU_INTERRUPT_HARD
552 && ((env->sr & SR_I) >> SR_I_SHIFT)
553 < env->pending_level) {
554 /* Real hardware gets the interrupt vector via an
555 IACK cycle at this point. Current emulated
556 hardware doesn't rely on this, so we
557 provide/save the vector when the interrupt is
558 first signalled. */
559 env->exception_index = env->pending_vector;
560 do_interrupt(1);
561 next_tb = 0;
563 #endif
564 /* Don't use the cached interupt_request value,
565 do_interrupt may have updated the EXITTB flag. */
566 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
567 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
568 /* ensure that no TB jump will be modified as
569 the program flow was changed */
570 next_tb = 0;
573 if (unlikely(env->exit_request)) {
574 env->exit_request = 0;
575 env->exception_index = EXCP_INTERRUPT;
576 cpu_loop_exit();
578 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
579 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
580 /* restore flags in standard format */
581 #if defined(TARGET_I386)
582 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
583 log_cpu_state(env, X86_DUMP_CCOP);
584 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
585 #elif defined(TARGET_M68K)
586 cpu_m68k_flush_flags(env, env->cc_op);
587 env->cc_op = CC_OP_FLAGS;
588 env->sr = (env->sr & 0xffe0)
589 | env->cc_dest | (env->cc_x << 4);
590 log_cpu_state(env, 0);
591 #else
592 log_cpu_state(env, 0);
593 #endif
595 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
596 spin_lock(&tb_lock);
597 tb = tb_find_fast();
598 /* Note: we do it here to avoid a gcc bug on Mac OS X when
599 doing it in tb_find_slow */
600 if (tb_invalidated_flag) {
601 /* as some TB could have been invalidated because
602 of memory exceptions while generating the code, we
603 must recompute the hash index here */
604 next_tb = 0;
605 tb_invalidated_flag = 0;
607 #ifdef CONFIG_DEBUG_EXEC
608 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
609 (long)tb->tc_ptr, tb->pc,
610 lookup_symbol(tb->pc));
611 #endif
612 /* see if we can patch the calling TB. When the TB
613 spans two pages, we cannot safely do a direct
614 jump. */
615 if (next_tb != 0 && tb->page_addr[1] == -1) {
616 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
618 spin_unlock(&tb_lock);
620 /* cpu_interrupt might be called while translating the
621 TB, but before it is linked into a potentially
622 infinite loop and becomes env->current_tb. Avoid
623 starting execution if there is a pending interrupt. */
624 env->current_tb = tb;
625 barrier();
626 if (likely(!env->exit_request)) {
627 tc_ptr = tb->tc_ptr;
628 /* execute the generated code */
629 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
630 #undef env
631 env = cpu_single_env;
632 #define env cpu_single_env
633 #endif
634 next_tb = tcg_qemu_tb_exec(tc_ptr);
635 if ((next_tb & 3) == 2) {
636 /* Instruction counter expired. */
637 int insns_left;
638 tb = (TranslationBlock *)(long)(next_tb & ~3);
639 /* Restore PC. */
640 cpu_pc_from_tb(env, tb);
641 insns_left = env->icount_decr.u32;
642 if (env->icount_extra && insns_left >= 0) {
643 /* Refill decrementer and continue execution. */
644 env->icount_extra += insns_left;
645 if (env->icount_extra > 0xffff) {
646 insns_left = 0xffff;
647 } else {
648 insns_left = env->icount_extra;
650 env->icount_extra -= insns_left;
651 env->icount_decr.u16.low = insns_left;
652 } else {
653 if (insns_left > 0) {
654 /* Execute remaining instructions. */
655 cpu_exec_nocache(insns_left, tb);
657 env->exception_index = EXCP_INTERRUPT;
658 next_tb = 0;
659 cpu_loop_exit();
663 env->current_tb = NULL;
664 /* reset soft MMU for next block (it can currently
665 only be set by a memory fault) */
666 } /* for(;;) */
668 } /* for(;;) */
671 #if defined(TARGET_I386)
672 /* restore flags in standard format */
673 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
674 #elif defined(TARGET_ARM)
675 /* XXX: Save/restore host fpu exception state?. */
676 #elif defined(TARGET_UNICORE32)
677 #elif defined(TARGET_SPARC)
678 #elif defined(TARGET_PPC)
679 #elif defined(TARGET_LM32)
680 #elif defined(TARGET_M68K)
681 cpu_m68k_flush_flags(env, env->cc_op);
682 env->cc_op = CC_OP_FLAGS;
683 env->sr = (env->sr & 0xffe0)
684 | env->cc_dest | (env->cc_x << 4);
685 #elif defined(TARGET_MICROBLAZE)
686 #elif defined(TARGET_MIPS)
687 #elif defined(TARGET_SH4)
688 #elif defined(TARGET_ALPHA)
689 #elif defined(TARGET_CRIS)
690 #elif defined(TARGET_S390X)
691 /* XXXXX */
692 #else
693 #error unsupported target CPU
694 #endif
696 /* restore global registers */
697 barrier();
698 env = (void *) saved_env_reg;
700 /* fail safe : never use cpu_single_env outside cpu_exec() */
701 cpu_single_env = NULL;
702 return ret;
705 /* must only be called from the generated code as an exception can be
706 generated */
707 void tb_invalidate_page_range(target_ulong start, target_ulong end)
709 /* XXX: cannot enable it yet because it yields to MMU exception
710 where NIP != read address on PowerPC */
711 #if 0
712 target_ulong phys_addr;
713 phys_addr = get_phys_addr_code(env, start);
714 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
715 #endif
718 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
720 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
722 CPUX86State *saved_env;
724 saved_env = env;
725 env = s;
726 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
727 selector &= 0xffff;
728 cpu_x86_load_seg_cache(env, seg_reg, selector,
729 (selector << 4), 0xffff, 0);
730 } else {
731 helper_load_seg(seg_reg, selector);
733 env = saved_env;
736 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
738 CPUX86State *saved_env;
740 saved_env = env;
741 env = s;
743 helper_fsave(ptr, data32);
745 env = saved_env;
748 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
750 CPUX86State *saved_env;
752 saved_env = env;
753 env = s;
755 helper_frstor(ptr, data32);
757 env = saved_env;
760 #endif /* TARGET_I386 */
762 #if !defined(CONFIG_SOFTMMU)
764 #if defined(TARGET_I386)
765 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
766 #else
767 #define EXCEPTION_ACTION cpu_loop_exit()
768 #endif
770 /* 'pc' is the host PC at which the exception was raised. 'address' is
771 the effective address of the memory exception. 'is_write' is 1 if a
772 write caused the exception and otherwise 0'. 'old_set' is the
773 signal set which should be restored */
774 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
775 int is_write, sigset_t *old_set,
776 void *puc)
778 TranslationBlock *tb;
779 int ret;
781 if (cpu_single_env)
782 env = cpu_single_env; /* XXX: find a correct solution for multithread */
783 #if defined(DEBUG_SIGNAL)
784 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
785 pc, address, is_write, *(unsigned long *)old_set);
786 #endif
787 /* XXX: locking issue */
788 if (is_write && page_unprotect(h2g(address), pc, puc)) {
789 return 1;
792 /* see if it is an MMU fault */
793 ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
794 if (ret < 0)
795 return 0; /* not an MMU fault */
796 if (ret == 0)
797 return 1; /* the MMU fault was handled without causing real CPU fault */
798 /* now we have a real cpu fault */
799 tb = tb_find_pc(pc);
800 if (tb) {
801 /* the PC is inside the translated code. It means that we have
802 a virtual CPU fault */
803 cpu_restore_state(tb, env, pc, puc);
806 /* we restore the process signal mask as the sigreturn should
807 do it (XXX: use sigsetjmp) */
808 sigprocmask(SIG_SETMASK, old_set, NULL);
809 EXCEPTION_ACTION;
811 /* never comes here */
812 return 1;
815 #if defined(__i386__)
817 #if defined(__APPLE__)
818 # include <sys/ucontext.h>
820 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
821 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
822 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
823 # define MASK_sig(context) ((context)->uc_sigmask)
824 #elif defined (__NetBSD__)
825 # include <ucontext.h>
827 # define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP])
828 # define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
829 # define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
830 # define MASK_sig(context) ((context)->uc_sigmask)
831 #elif defined (__FreeBSD__) || defined(__DragonFly__)
832 # include <ucontext.h>
834 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
835 # define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
836 # define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
837 # define MASK_sig(context) ((context)->uc_sigmask)
838 #elif defined(__OpenBSD__)
839 # define EIP_sig(context) ((context)->sc_eip)
840 # define TRAP_sig(context) ((context)->sc_trapno)
841 # define ERROR_sig(context) ((context)->sc_err)
842 # define MASK_sig(context) ((context)->sc_mask)
843 #else
844 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
845 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
846 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
847 # define MASK_sig(context) ((context)->uc_sigmask)
848 #endif
850 int cpu_signal_handler(int host_signum, void *pinfo,
851 void *puc)
853 siginfo_t *info = pinfo;
854 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
855 ucontext_t *uc = puc;
856 #elif defined(__OpenBSD__)
857 struct sigcontext *uc = puc;
858 #else
859 struct ucontext *uc = puc;
860 #endif
861 unsigned long pc;
862 int trapno;
864 #ifndef REG_EIP
865 /* for glibc 2.1 */
866 #define REG_EIP EIP
867 #define REG_ERR ERR
868 #define REG_TRAPNO TRAPNO
869 #endif
870 pc = EIP_sig(uc);
871 trapno = TRAP_sig(uc);
872 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
873 trapno == 0xe ?
874 (ERROR_sig(uc) >> 1) & 1 : 0,
875 &MASK_sig(uc), puc);
878 #elif defined(__x86_64__)
880 #ifdef __NetBSD__
881 #define PC_sig(context) _UC_MACHINE_PC(context)
882 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
883 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
884 #define MASK_sig(context) ((context)->uc_sigmask)
885 #elif defined(__OpenBSD__)
886 #define PC_sig(context) ((context)->sc_rip)
887 #define TRAP_sig(context) ((context)->sc_trapno)
888 #define ERROR_sig(context) ((context)->sc_err)
889 #define MASK_sig(context) ((context)->sc_mask)
890 #elif defined (__FreeBSD__) || defined(__DragonFly__)
891 #include <ucontext.h>
893 #define PC_sig(context) (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
894 #define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno)
895 #define ERROR_sig(context) ((context)->uc_mcontext.mc_err)
896 #define MASK_sig(context) ((context)->uc_sigmask)
897 #else
898 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
899 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
900 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
901 #define MASK_sig(context) ((context)->uc_sigmask)
902 #endif
904 int cpu_signal_handler(int host_signum, void *pinfo,
905 void *puc)
907 siginfo_t *info = pinfo;
908 unsigned long pc;
909 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
910 ucontext_t *uc = puc;
911 #elif defined(__OpenBSD__)
912 struct sigcontext *uc = puc;
913 #else
914 struct ucontext *uc = puc;
915 #endif
917 pc = PC_sig(uc);
918 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
919 TRAP_sig(uc) == 0xe ?
920 (ERROR_sig(uc) >> 1) & 1 : 0,
921 &MASK_sig(uc), puc);
924 #elif defined(_ARCH_PPC)
926 /***********************************************************************
927 * signal context platform-specific definitions
928 * From Wine
930 #ifdef linux
931 /* All Registers access - only for local access */
932 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
933 /* Gpr Registers access */
934 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
935 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
936 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
937 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
938 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
939 # define LR_sig(context) REG_sig(link, context) /* Link register */
940 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
941 /* Float Registers access */
942 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
943 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
944 /* Exception Registers access */
945 # define DAR_sig(context) REG_sig(dar, context)
946 # define DSISR_sig(context) REG_sig(dsisr, context)
947 # define TRAP_sig(context) REG_sig(trap, context)
948 #endif /* linux */
950 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
951 #include <ucontext.h>
952 # define IAR_sig(context) ((context)->uc_mcontext.mc_srr0)
953 # define MSR_sig(context) ((context)->uc_mcontext.mc_srr1)
954 # define CTR_sig(context) ((context)->uc_mcontext.mc_ctr)
955 # define XER_sig(context) ((context)->uc_mcontext.mc_xer)
956 # define LR_sig(context) ((context)->uc_mcontext.mc_lr)
957 # define CR_sig(context) ((context)->uc_mcontext.mc_cr)
958 /* Exception Registers access */
959 # define DAR_sig(context) ((context)->uc_mcontext.mc_dar)
960 # define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr)
961 # define TRAP_sig(context) ((context)->uc_mcontext.mc_exc)
962 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
964 #ifdef __APPLE__
965 # include <sys/ucontext.h>
966 typedef struct ucontext SIGCONTEXT;
967 /* All Registers access - only for local access */
968 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
969 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
970 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
971 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
972 /* Gpr Registers access */
973 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
974 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
975 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
976 # define CTR_sig(context) REG_sig(ctr, context)
977 # define XER_sig(context) REG_sig(xer, context) /* Link register */
978 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
979 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
980 /* Float Registers access */
981 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
982 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
983 /* Exception Registers access */
984 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
985 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
986 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
987 #endif /* __APPLE__ */
989 int cpu_signal_handler(int host_signum, void *pinfo,
990 void *puc)
992 siginfo_t *info = pinfo;
993 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
994 ucontext_t *uc = puc;
995 #else
996 struct ucontext *uc = puc;
997 #endif
998 unsigned long pc;
999 int is_write;
1001 pc = IAR_sig(uc);
1002 is_write = 0;
1003 #if 0
1004 /* ppc 4xx case */
1005 if (DSISR_sig(uc) & 0x00800000)
1006 is_write = 1;
1007 #else
1008 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1009 is_write = 1;
1010 #endif
1011 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1012 is_write, &uc->uc_sigmask, puc);
1015 #elif defined(__alpha__)
1017 int cpu_signal_handler(int host_signum, void *pinfo,
1018 void *puc)
1020 siginfo_t *info = pinfo;
1021 struct ucontext *uc = puc;
1022 uint32_t *pc = uc->uc_mcontext.sc_pc;
1023 uint32_t insn = *pc;
1024 int is_write = 0;
1026 /* XXX: need kernel patch to get write flag faster */
1027 switch (insn >> 26) {
1028 case 0x0d: // stw
1029 case 0x0e: // stb
1030 case 0x0f: // stq_u
1031 case 0x24: // stf
1032 case 0x25: // stg
1033 case 0x26: // sts
1034 case 0x27: // stt
1035 case 0x2c: // stl
1036 case 0x2d: // stq
1037 case 0x2e: // stl_c
1038 case 0x2f: // stq_c
1039 is_write = 1;
1042 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1043 is_write, &uc->uc_sigmask, puc);
1045 #elif defined(__sparc__)
1047 int cpu_signal_handler(int host_signum, void *pinfo,
1048 void *puc)
1050 siginfo_t *info = pinfo;
1051 int is_write;
1052 uint32_t insn;
1053 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1054 uint32_t *regs = (uint32_t *)(info + 1);
1055 void *sigmask = (regs + 20);
1056 /* XXX: is there a standard glibc define ? */
1057 unsigned long pc = regs[1];
1058 #else
1059 #ifdef __linux__
1060 struct sigcontext *sc = puc;
1061 unsigned long pc = sc->sigc_regs.tpc;
1062 void *sigmask = (void *)sc->sigc_mask;
1063 #elif defined(__OpenBSD__)
1064 struct sigcontext *uc = puc;
1065 unsigned long pc = uc->sc_pc;
1066 void *sigmask = (void *)(long)uc->sc_mask;
1067 #endif
1068 #endif
1070 /* XXX: need kernel patch to get write flag faster */
1071 is_write = 0;
1072 insn = *(uint32_t *)pc;
1073 if ((insn >> 30) == 3) {
1074 switch((insn >> 19) & 0x3f) {
1075 case 0x05: // stb
1076 case 0x15: // stba
1077 case 0x06: // sth
1078 case 0x16: // stha
1079 case 0x04: // st
1080 case 0x14: // sta
1081 case 0x07: // std
1082 case 0x17: // stda
1083 case 0x0e: // stx
1084 case 0x1e: // stxa
1085 case 0x24: // stf
1086 case 0x34: // stfa
1087 case 0x27: // stdf
1088 case 0x37: // stdfa
1089 case 0x26: // stqf
1090 case 0x36: // stqfa
1091 case 0x25: // stfsr
1092 case 0x3c: // casa
1093 case 0x3e: // casxa
1094 is_write = 1;
1095 break;
1098 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1099 is_write, sigmask, NULL);
1102 #elif defined(__arm__)
1104 int cpu_signal_handler(int host_signum, void *pinfo,
1105 void *puc)
1107 siginfo_t *info = pinfo;
1108 struct ucontext *uc = puc;
1109 unsigned long pc;
1110 int is_write;
1112 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1113 pc = uc->uc_mcontext.gregs[R15];
1114 #else
1115 pc = uc->uc_mcontext.arm_pc;
1116 #endif
1117 /* XXX: compute is_write */
1118 is_write = 0;
1119 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1120 is_write,
1121 &uc->uc_sigmask, puc);
1124 #elif defined(__mc68000)
1126 int cpu_signal_handler(int host_signum, void *pinfo,
1127 void *puc)
1129 siginfo_t *info = pinfo;
1130 struct ucontext *uc = puc;
1131 unsigned long pc;
1132 int is_write;
1134 pc = uc->uc_mcontext.gregs[16];
1135 /* XXX: compute is_write */
1136 is_write = 0;
1137 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1138 is_write,
1139 &uc->uc_sigmask, puc);
1142 #elif defined(__ia64)
1144 #ifndef __ISR_VALID
1145 /* This ought to be in <bits/siginfo.h>... */
1146 # define __ISR_VALID 1
1147 #endif
1149 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1151 siginfo_t *info = pinfo;
1152 struct ucontext *uc = puc;
1153 unsigned long ip;
1154 int is_write = 0;
1156 ip = uc->uc_mcontext.sc_ip;
1157 switch (host_signum) {
1158 case SIGILL:
1159 case SIGFPE:
1160 case SIGSEGV:
1161 case SIGBUS:
1162 case SIGTRAP:
1163 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1164 /* ISR.W (write-access) is bit 33: */
1165 is_write = (info->si_isr >> 33) & 1;
1166 break;
1168 default:
1169 break;
1171 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1172 is_write,
1173 (sigset_t *)&uc->uc_sigmask, puc);
1176 #elif defined(__s390__)
1178 int cpu_signal_handler(int host_signum, void *pinfo,
1179 void *puc)
1181 siginfo_t *info = pinfo;
1182 struct ucontext *uc = puc;
1183 unsigned long pc;
1184 uint16_t *pinsn;
1185 int is_write = 0;
1187 pc = uc->uc_mcontext.psw.addr;
1189 /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
1190 of the normal 2 arguments. The 3rd argument contains the "int_code"
1191 from the hardware which does in fact contain the is_write value.
1192 The rt signal handler, as far as I can tell, does not give this value
1193 at all. Not that we could get to it from here even if it were. */
1194 /* ??? This is not even close to complete, since it ignores all
1195 of the read-modify-write instructions. */
1196 pinsn = (uint16_t *)pc;
1197 switch (pinsn[0] >> 8) {
1198 case 0x50: /* ST */
1199 case 0x42: /* STC */
1200 case 0x40: /* STH */
1201 is_write = 1;
1202 break;
1203 case 0xc4: /* RIL format insns */
1204 switch (pinsn[0] & 0xf) {
1205 case 0xf: /* STRL */
1206 case 0xb: /* STGRL */
1207 case 0x7: /* STHRL */
1208 is_write = 1;
1210 break;
1211 case 0xe3: /* RXY format insns */
1212 switch (pinsn[2] & 0xff) {
1213 case 0x50: /* STY */
1214 case 0x24: /* STG */
1215 case 0x72: /* STCY */
1216 case 0x70: /* STHY */
1217 case 0x8e: /* STPQ */
1218 case 0x3f: /* STRVH */
1219 case 0x3e: /* STRV */
1220 case 0x2f: /* STRVG */
1221 is_write = 1;
1223 break;
1225 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1226 is_write, &uc->uc_sigmask, puc);
1229 #elif defined(__mips__)
1231 int cpu_signal_handler(int host_signum, void *pinfo,
1232 void *puc)
1234 siginfo_t *info = pinfo;
1235 struct ucontext *uc = puc;
1236 greg_t pc = uc->uc_mcontext.pc;
1237 int is_write;
1239 /* XXX: compute is_write */
1240 is_write = 0;
1241 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1242 is_write, &uc->uc_sigmask, puc);
1245 #elif defined(__hppa__)
1247 int cpu_signal_handler(int host_signum, void *pinfo,
1248 void *puc)
1250 struct siginfo *info = pinfo;
1251 struct ucontext *uc = puc;
1252 unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
1253 uint32_t insn = *(uint32_t *)pc;
1254 int is_write = 0;
1256 /* XXX: need kernel patch to get write flag faster. */
1257 switch (insn >> 26) {
1258 case 0x1a: /* STW */
1259 case 0x19: /* STH */
1260 case 0x18: /* STB */
1261 case 0x1b: /* STWM */
1262 is_write = 1;
1263 break;
1265 case 0x09: /* CSTWX, FSTWX, FSTWS */
1266 case 0x0b: /* CSTDX, FSTDX, FSTDS */
1267 /* Distinguish from coprocessor load ... */
1268 is_write = (insn >> 9) & 1;
1269 break;
1271 case 0x03:
1272 switch ((insn >> 6) & 15) {
1273 case 0xa: /* STWS */
1274 case 0x9: /* STHS */
1275 case 0x8: /* STBS */
1276 case 0xe: /* STWAS */
1277 case 0xc: /* STBYS */
1278 is_write = 1;
1280 break;
1283 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1284 is_write, &uc->uc_sigmask, puc);
1287 #else
1289 #error host CPU specific signal handler needed
1291 #endif
1293 #endif /* !defined(CONFIG_SOFTMMU) */