sh4: Add FMAC instruction support
[qemu/mini2440/sniper_sniper_test.git] / cpu-exec.c
blobb40e74a0c43716c931f92cae2331e72978afc1c3
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
45 #undef env
46 #define env cpu_single_env
47 #endif
49 int tb_invalidated_flag;
51 //#define DEBUG_EXEC
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
68 #ifdef __linux__
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72 #endif
73 #endif
75 env = env1;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
82 #ifdef __linux__
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86 #endif
88 #endif
89 env->exception_index = -1;
90 longjmp(env->jmp_env, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
97 unsigned long next_tb;
98 TranslationBlock *tb;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
105 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 max_cycles);
107 env->current_tb = tb;
108 /* execute the generated code */
109 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111 if ((next_tb & 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env, tb);
116 tb_phys_invalidate(tb, -1);
117 tb_free(tb);
120 static TranslationBlock *tb_find_slow(target_ulong pc,
121 target_ulong cs_base,
122 uint64_t flags)
124 TranslationBlock *tb, **ptb1;
125 unsigned int h;
126 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
128 tb_invalidated_flag = 0;
130 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
132 /* find translated block using physical mappings */
133 phys_pc = get_phys_addr_code(env, pc);
134 phys_page1 = phys_pc & TARGET_PAGE_MASK;
135 phys_page2 = -1;
136 h = tb_phys_hash_func(phys_pc);
137 ptb1 = &tb_phys_hash[h];
138 for(;;) {
139 tb = *ptb1;
140 if (!tb)
141 goto not_found;
142 if (tb->pc == pc &&
143 tb->page_addr[0] == phys_page1 &&
144 tb->cs_base == cs_base &&
145 tb->flags == flags) {
146 /* check next page if needed */
147 if (tb->page_addr[1] != -1) {
148 virt_page2 = (pc & TARGET_PAGE_MASK) +
149 TARGET_PAGE_SIZE;
150 phys_page2 = get_phys_addr_code(env, virt_page2);
151 if (tb->page_addr[1] == phys_page2)
152 goto found;
153 } else {
154 goto found;
157 ptb1 = &tb->phys_hash_next;
159 not_found:
160 /* if no translated code available, then translate it now */
161 tb = tb_gen_code(env, pc, cs_base, flags, 0);
163 found:
164 /* we add the TB in the virtual pc hash table */
165 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
166 return tb;
169 static inline TranslationBlock *tb_find_fast(void)
171 TranslationBlock *tb;
172 target_ulong cs_base, pc;
173 int flags;
175 /* we record a subset of the CPU state. It will
176 always be the same before a given translated block
177 is executed. */
178 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
179 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
180 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
181 tb->flags != flags)) {
182 tb = tb_find_slow(pc, cs_base, flags);
184 return tb;
187 static CPUDebugExcpHandler *debug_excp_handler;
189 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
191 CPUDebugExcpHandler *old_handler = debug_excp_handler;
193 debug_excp_handler = handler;
194 return old_handler;
197 static void cpu_handle_debug_exception(CPUState *env)
199 CPUWatchpoint *wp;
201 if (!env->watchpoint_hit)
202 TAILQ_FOREACH(wp, &env->watchpoints, entry)
203 wp->flags &= ~BP_WATCHPOINT_HIT;
205 if (debug_excp_handler)
206 debug_excp_handler(env);
209 /* main execution loop */
211 int cpu_exec(CPUState *env1)
213 #define DECLARE_HOST_REGS 1
214 #include "hostregs_helper.h"
215 int ret, interrupt_request;
216 TranslationBlock *tb;
217 uint8_t *tc_ptr;
218 unsigned long next_tb;
220 if (cpu_halted(env1) == EXCP_HALTED)
221 return EXCP_HALTED;
223 cpu_single_env = env1;
225 /* first we save global registers */
226 #define SAVE_HOST_REGS 1
227 #include "hostregs_helper.h"
228 env = env1;
230 env_to_regs();
231 #if defined(TARGET_I386)
232 /* put eflags in CPU temporary format */
233 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234 DF = 1 - (2 * ((env->eflags >> 10) & 1));
235 CC_OP = CC_OP_EFLAGS;
236 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237 #elif defined(TARGET_SPARC)
238 #elif defined(TARGET_M68K)
239 env->cc_op = CC_OP_FLAGS;
240 env->cc_dest = env->sr & 0xf;
241 env->cc_x = (env->sr >> 4) & 1;
242 #elif defined(TARGET_ALPHA)
243 #elif defined(TARGET_ARM)
244 #elif defined(TARGET_PPC)
245 #elif defined(TARGET_MIPS)
246 #elif defined(TARGET_SH4)
247 #elif defined(TARGET_CRIS)
248 /* XXXXX */
249 #else
250 #error unsupported target CPU
251 #endif
252 env->exception_index = -1;
254 /* prepare setjmp context for exception handling */
255 for(;;) {
256 if (setjmp(env->jmp_env) == 0) {
257 env->current_tb = NULL;
258 /* if an exception is pending, we execute it here */
259 if (env->exception_index >= 0) {
260 if (env->exception_index >= EXCP_INTERRUPT) {
261 /* exit request from the cpu execution loop */
262 ret = env->exception_index;
263 if (ret == EXCP_DEBUG)
264 cpu_handle_debug_exception(env);
265 break;
266 } else {
267 #if defined(CONFIG_USER_ONLY)
268 /* if user mode only, we simulate a fake exception
269 which will be handled outside the cpu execution
270 loop */
271 #if defined(TARGET_I386)
272 do_interrupt_user(env->exception_index,
273 env->exception_is_int,
274 env->error_code,
275 env->exception_next_eip);
276 /* successfully delivered */
277 env->old_exception = -1;
278 #endif
279 ret = env->exception_index;
280 break;
281 #else
282 #if defined(TARGET_I386)
283 /* simulate a real cpu exception. On i386, it can
284 trigger new exceptions, but we do not handle
285 double or triple faults yet. */
286 do_interrupt(env->exception_index,
287 env->exception_is_int,
288 env->error_code,
289 env->exception_next_eip, 0);
290 /* successfully delivered */
291 env->old_exception = -1;
292 #elif defined(TARGET_PPC)
293 do_interrupt(env);
294 #elif defined(TARGET_MIPS)
295 do_interrupt(env);
296 #elif defined(TARGET_SPARC)
297 do_interrupt(env);
298 #elif defined(TARGET_ARM)
299 do_interrupt(env);
300 #elif defined(TARGET_SH4)
301 do_interrupt(env);
302 #elif defined(TARGET_ALPHA)
303 do_interrupt(env);
304 #elif defined(TARGET_CRIS)
305 do_interrupt(env);
306 #elif defined(TARGET_M68K)
307 do_interrupt(0);
308 #endif
309 #endif
311 env->exception_index = -1;
313 #ifdef USE_KQEMU
314 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
315 int ret;
316 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
317 ret = kqemu_cpu_exec(env);
318 /* put eflags in CPU temporary format */
319 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
320 DF = 1 - (2 * ((env->eflags >> 10) & 1));
321 CC_OP = CC_OP_EFLAGS;
322 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
323 if (ret == 1) {
324 /* exception */
325 longjmp(env->jmp_env, 1);
326 } else if (ret == 2) {
327 /* softmmu execution needed */
328 } else {
329 if (env->interrupt_request != 0) {
330 /* hardware interrupt will be executed just after */
331 } else {
332 /* otherwise, we restart */
333 longjmp(env->jmp_env, 1);
337 #endif
339 if (kvm_enabled()) {
340 kvm_cpu_exec(env);
341 longjmp(env->jmp_env, 1);
344 next_tb = 0; /* force lookup of first TB */
345 for(;;) {
346 interrupt_request = env->interrupt_request;
347 if (unlikely(interrupt_request)) {
348 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
349 /* Mask out external interrupts for this step. */
350 interrupt_request &= ~(CPU_INTERRUPT_HARD |
351 CPU_INTERRUPT_FIQ |
352 CPU_INTERRUPT_SMI |
353 CPU_INTERRUPT_NMI);
355 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
356 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
357 env->exception_index = EXCP_DEBUG;
358 cpu_loop_exit();
360 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
361 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
362 if (interrupt_request & CPU_INTERRUPT_HALT) {
363 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
364 env->halted = 1;
365 env->exception_index = EXCP_HLT;
366 cpu_loop_exit();
368 #endif
369 #if defined(TARGET_I386)
370 if (env->hflags2 & HF2_GIF_MASK) {
371 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
372 !(env->hflags & HF_SMM_MASK)) {
373 svm_check_intercept(SVM_EXIT_SMI);
374 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
375 do_smm_enter();
376 next_tb = 0;
377 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
378 !(env->hflags2 & HF2_NMI_MASK)) {
379 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
380 env->hflags2 |= HF2_NMI_MASK;
381 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
382 next_tb = 0;
383 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
384 (((env->hflags2 & HF2_VINTR_MASK) &&
385 (env->hflags2 & HF2_HIF_MASK)) ||
386 (!(env->hflags2 & HF2_VINTR_MASK) &&
387 (env->eflags & IF_MASK &&
388 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
389 int intno;
390 svm_check_intercept(SVM_EXIT_INTR);
391 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
392 intno = cpu_get_pic_interrupt(env);
393 if (loglevel & CPU_LOG_TB_IN_ASM) {
394 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
396 do_interrupt(intno, 0, 0, 0, 1);
397 /* ensure that no TB jump will be modified as
398 the program flow was changed */
399 next_tb = 0;
400 #if !defined(CONFIG_USER_ONLY)
401 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
402 (env->eflags & IF_MASK) &&
403 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
404 int intno;
405 /* FIXME: this should respect TPR */
406 svm_check_intercept(SVM_EXIT_VINTR);
407 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
408 if (loglevel & CPU_LOG_TB_IN_ASM)
409 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
410 do_interrupt(intno, 0, 0, 0, 1);
411 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
412 next_tb = 0;
413 #endif
416 #elif defined(TARGET_PPC)
417 #if 0
418 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
419 cpu_ppc_reset(env);
421 #endif
422 if (interrupt_request & CPU_INTERRUPT_HARD) {
423 ppc_hw_interrupt(env);
424 if (env->pending_interrupts == 0)
425 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
426 next_tb = 0;
428 #elif defined(TARGET_MIPS)
429 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
430 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
431 (env->CP0_Status & (1 << CP0St_IE)) &&
432 !(env->CP0_Status & (1 << CP0St_EXL)) &&
433 !(env->CP0_Status & (1 << CP0St_ERL)) &&
434 !(env->hflags & MIPS_HFLAG_DM)) {
435 /* Raise it */
436 env->exception_index = EXCP_EXT_INTERRUPT;
437 env->error_code = 0;
438 do_interrupt(env);
439 next_tb = 0;
441 #elif defined(TARGET_SPARC)
442 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
443 (env->psret != 0)) {
444 int pil = env->interrupt_index & 15;
445 int type = env->interrupt_index & 0xf0;
447 if (((type == TT_EXTINT) &&
448 (pil == 15 || pil > env->psrpil)) ||
449 type != TT_EXTINT) {
450 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
451 env->exception_index = env->interrupt_index;
452 do_interrupt(env);
453 env->interrupt_index = 0;
454 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
455 cpu_check_irqs(env);
456 #endif
457 next_tb = 0;
459 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
460 //do_interrupt(0, 0, 0, 0, 0);
461 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
463 #elif defined(TARGET_ARM)
464 if (interrupt_request & CPU_INTERRUPT_FIQ
465 && !(env->uncached_cpsr & CPSR_F)) {
466 env->exception_index = EXCP_FIQ;
467 do_interrupt(env);
468 next_tb = 0;
470 /* ARMv7-M interrupt return works by loading a magic value
471 into the PC. On real hardware the load causes the
472 return to occur. The qemu implementation performs the
473 jump normally, then does the exception return when the
474 CPU tries to execute code at the magic address.
475 This will cause the magic PC value to be pushed to
476 the stack if an interrupt occured at the wrong time.
477 We avoid this by disabling interrupts when
478 pc contains a magic address. */
479 if (interrupt_request & CPU_INTERRUPT_HARD
480 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
481 || !(env->uncached_cpsr & CPSR_I))) {
482 env->exception_index = EXCP_IRQ;
483 do_interrupt(env);
484 next_tb = 0;
486 #elif defined(TARGET_SH4)
487 if (interrupt_request & CPU_INTERRUPT_HARD) {
488 do_interrupt(env);
489 next_tb = 0;
491 #elif defined(TARGET_ALPHA)
492 if (interrupt_request & CPU_INTERRUPT_HARD) {
493 do_interrupt(env);
494 next_tb = 0;
496 #elif defined(TARGET_CRIS)
497 if (interrupt_request & CPU_INTERRUPT_HARD
498 && (env->pregs[PR_CCS] & I_FLAG)) {
499 env->exception_index = EXCP_IRQ;
500 do_interrupt(env);
501 next_tb = 0;
503 if (interrupt_request & CPU_INTERRUPT_NMI
504 && (env->pregs[PR_CCS] & M_FLAG)) {
505 env->exception_index = EXCP_NMI;
506 do_interrupt(env);
507 next_tb = 0;
509 #elif defined(TARGET_M68K)
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && ((env->sr & SR_I) >> SR_I_SHIFT)
512 < env->pending_level) {
513 /* Real hardware gets the interrupt vector via an
514 IACK cycle at this point. Current emulated
515 hardware doesn't rely on this, so we
516 provide/save the vector when the interrupt is
517 first signalled. */
518 env->exception_index = env->pending_vector;
519 do_interrupt(1);
520 next_tb = 0;
522 #endif
523 /* Don't use the cached interupt_request value,
524 do_interrupt may have updated the EXITTB flag. */
525 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
526 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
527 /* ensure that no TB jump will be modified as
528 the program flow was changed */
529 next_tb = 0;
531 if (interrupt_request & CPU_INTERRUPT_EXIT) {
532 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
533 env->exception_index = EXCP_INTERRUPT;
534 cpu_loop_exit();
537 #ifdef DEBUG_EXEC
538 if ((loglevel & CPU_LOG_TB_CPU)) {
539 /* restore flags in standard format */
540 regs_to_env();
541 #if defined(TARGET_I386)
542 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
543 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
544 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
545 #elif defined(TARGET_ARM)
546 cpu_dump_state(env, logfile, fprintf, 0);
547 #elif defined(TARGET_SPARC)
548 cpu_dump_state(env, logfile, fprintf, 0);
549 #elif defined(TARGET_PPC)
550 cpu_dump_state(env, logfile, fprintf, 0);
551 #elif defined(TARGET_M68K)
552 cpu_m68k_flush_flags(env, env->cc_op);
553 env->cc_op = CC_OP_FLAGS;
554 env->sr = (env->sr & 0xffe0)
555 | env->cc_dest | (env->cc_x << 4);
556 cpu_dump_state(env, logfile, fprintf, 0);
557 #elif defined(TARGET_MIPS)
558 cpu_dump_state(env, logfile, fprintf, 0);
559 #elif defined(TARGET_SH4)
560 cpu_dump_state(env, logfile, fprintf, 0);
561 #elif defined(TARGET_ALPHA)
562 cpu_dump_state(env, logfile, fprintf, 0);
563 #elif defined(TARGET_CRIS)
564 cpu_dump_state(env, logfile, fprintf, 0);
565 #else
566 #error unsupported target CPU
567 #endif
569 #endif
570 spin_lock(&tb_lock);
571 tb = tb_find_fast();
572 /* Note: we do it here to avoid a gcc bug on Mac OS X when
573 doing it in tb_find_slow */
574 if (tb_invalidated_flag) {
575 /* as some TB could have been invalidated because
576 of memory exceptions while generating the code, we
577 must recompute the hash index here */
578 next_tb = 0;
579 tb_invalidated_flag = 0;
581 #ifdef DEBUG_EXEC
582 if ((loglevel & CPU_LOG_EXEC)) {
583 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
584 (long)tb->tc_ptr, tb->pc,
585 lookup_symbol(tb->pc));
587 #endif
588 /* see if we can patch the calling TB. When the TB
589 spans two pages, we cannot safely do a direct
590 jump. */
592 if (next_tb != 0 &&
593 #ifdef USE_KQEMU
594 (env->kqemu_enabled != 2) &&
595 #endif
596 tb->page_addr[1] == -1) {
597 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
600 spin_unlock(&tb_lock);
601 env->current_tb = tb;
603 /* cpu_interrupt might be called while translating the
604 TB, but before it is linked into a potentially
605 infinite loop and becomes env->current_tb. Avoid
606 starting execution if there is a pending interrupt. */
607 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
608 env->current_tb = NULL;
610 while (env->current_tb) {
611 tc_ptr = tb->tc_ptr;
612 /* execute the generated code */
613 #if defined(__sparc__) && !defined(HOST_SOLARIS)
614 #undef env
615 env = cpu_single_env;
616 #define env cpu_single_env
617 #endif
618 next_tb = tcg_qemu_tb_exec(tc_ptr);
619 env->current_tb = NULL;
620 if ((next_tb & 3) == 2) {
621 /* Instruction counter expired. */
622 int insns_left;
623 tb = (TranslationBlock *)(long)(next_tb & ~3);
624 /* Restore PC. */
625 cpu_pc_from_tb(env, tb);
626 insns_left = env->icount_decr.u32;
627 if (env->icount_extra && insns_left >= 0) {
628 /* Refill decrementer and continue execution. */
629 env->icount_extra += insns_left;
630 if (env->icount_extra > 0xffff) {
631 insns_left = 0xffff;
632 } else {
633 insns_left = env->icount_extra;
635 env->icount_extra -= insns_left;
636 env->icount_decr.u16.low = insns_left;
637 } else {
638 if (insns_left > 0) {
639 /* Execute remaining instructions. */
640 cpu_exec_nocache(insns_left, tb);
642 env->exception_index = EXCP_INTERRUPT;
643 next_tb = 0;
644 cpu_loop_exit();
648 /* reset soft MMU for next block (it can currently
649 only be set by a memory fault) */
650 #if defined(USE_KQEMU)
651 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
652 if (kqemu_is_ok(env) &&
653 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
654 cpu_loop_exit();
656 #endif
657 } /* for(;;) */
658 } else {
659 env_to_regs();
661 } /* for(;;) */
664 #if defined(TARGET_I386)
665 /* restore flags in standard format */
666 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
667 #elif defined(TARGET_ARM)
668 /* XXX: Save/restore host fpu exception state?. */
669 #elif defined(TARGET_SPARC)
670 #elif defined(TARGET_PPC)
671 #elif defined(TARGET_M68K)
672 cpu_m68k_flush_flags(env, env->cc_op);
673 env->cc_op = CC_OP_FLAGS;
674 env->sr = (env->sr & 0xffe0)
675 | env->cc_dest | (env->cc_x << 4);
676 #elif defined(TARGET_MIPS)
677 #elif defined(TARGET_SH4)
678 #elif defined(TARGET_ALPHA)
679 #elif defined(TARGET_CRIS)
680 /* XXXXX */
681 #else
682 #error unsupported target CPU
683 #endif
685 /* restore global registers */
686 #include "hostregs_helper.h"
688 /* fail safe : never use cpu_single_env outside cpu_exec() */
689 cpu_single_env = NULL;
690 return ret;
693 /* must only be called from the generated code as an exception can be
694 generated */
695 void tb_invalidate_page_range(target_ulong start, target_ulong end)
697 /* XXX: cannot enable it yet because it yields to MMU exception
698 where NIP != read address on PowerPC */
699 #if 0
700 target_ulong phys_addr;
701 phys_addr = get_phys_addr_code(env, start);
702 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
703 #endif
706 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
708 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
710 CPUX86State *saved_env;
712 saved_env = env;
713 env = s;
714 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
715 selector &= 0xffff;
716 cpu_x86_load_seg_cache(env, seg_reg, selector,
717 (selector << 4), 0xffff, 0);
718 } else {
719 helper_load_seg(seg_reg, selector);
721 env = saved_env;
724 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
726 CPUX86State *saved_env;
728 saved_env = env;
729 env = s;
731 helper_fsave(ptr, data32);
733 env = saved_env;
736 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
738 CPUX86State *saved_env;
740 saved_env = env;
741 env = s;
743 helper_frstor(ptr, data32);
745 env = saved_env;
748 #endif /* TARGET_I386 */
750 #if !defined(CONFIG_SOFTMMU)
752 #if defined(TARGET_I386)
754 /* 'pc' is the host PC at which the exception was raised. 'address' is
755 the effective address of the memory exception. 'is_write' is 1 if a
756 write caused the exception and otherwise 0'. 'old_set' is the
757 signal set which should be restored */
758 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
759 int is_write, sigset_t *old_set,
760 void *puc)
762 TranslationBlock *tb;
763 int ret;
765 if (cpu_single_env)
766 env = cpu_single_env; /* XXX: find a correct solution for multithread */
767 #if defined(DEBUG_SIGNAL)
768 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
769 pc, address, is_write, *(unsigned long *)old_set);
770 #endif
771 /* XXX: locking issue */
772 if (is_write && page_unprotect(h2g(address), pc, puc)) {
773 return 1;
776 /* see if it is an MMU fault */
777 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
778 if (ret < 0)
779 return 0; /* not an MMU fault */
780 if (ret == 0)
781 return 1; /* the MMU fault was handled without causing real CPU fault */
782 /* now we have a real cpu fault */
783 tb = tb_find_pc(pc);
784 if (tb) {
785 /* the PC is inside the translated code. It means that we have
786 a virtual CPU fault */
787 cpu_restore_state(tb, env, pc, puc);
789 if (ret == 1) {
790 #if 0
791 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
792 env->eip, env->cr[2], env->error_code);
793 #endif
794 /* we restore the process signal mask as the sigreturn should
795 do it (XXX: use sigsetjmp) */
796 sigprocmask(SIG_SETMASK, old_set, NULL);
797 raise_exception_err(env->exception_index, env->error_code);
798 } else {
799 /* activate soft MMU for this block */
800 env->hflags |= HF_SOFTMMU_MASK;
801 cpu_resume_from_signal(env, puc);
803 /* never comes here */
804 return 1;
807 #elif defined(TARGET_ARM)
808 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
809 int is_write, sigset_t *old_set,
810 void *puc)
812 TranslationBlock *tb;
813 int ret;
815 if (cpu_single_env)
816 env = cpu_single_env; /* XXX: find a correct solution for multithread */
817 #if defined(DEBUG_SIGNAL)
818 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
819 pc, address, is_write, *(unsigned long *)old_set);
820 #endif
821 /* XXX: locking issue */
822 if (is_write && page_unprotect(h2g(address), pc, puc)) {
823 return 1;
825 /* see if it is an MMU fault */
826 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
827 if (ret < 0)
828 return 0; /* not an MMU fault */
829 if (ret == 0)
830 return 1; /* the MMU fault was handled without causing real CPU fault */
831 /* now we have a real cpu fault */
832 tb = tb_find_pc(pc);
833 if (tb) {
834 /* the PC is inside the translated code. It means that we have
835 a virtual CPU fault */
836 cpu_restore_state(tb, env, pc, puc);
838 /* we restore the process signal mask as the sigreturn should
839 do it (XXX: use sigsetjmp) */
840 sigprocmask(SIG_SETMASK, old_set, NULL);
841 cpu_loop_exit();
842 /* never comes here */
843 return 1;
845 #elif defined(TARGET_SPARC)
846 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
847 int is_write, sigset_t *old_set,
848 void *puc)
850 TranslationBlock *tb;
851 int ret;
853 if (cpu_single_env)
854 env = cpu_single_env; /* XXX: find a correct solution for multithread */
855 #if defined(DEBUG_SIGNAL)
856 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
857 pc, address, is_write, *(unsigned long *)old_set);
858 #endif
859 /* XXX: locking issue */
860 if (is_write && page_unprotect(h2g(address), pc, puc)) {
861 return 1;
863 /* see if it is an MMU fault */
864 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
865 if (ret < 0)
866 return 0; /* not an MMU fault */
867 if (ret == 0)
868 return 1; /* the MMU fault was handled without causing real CPU fault */
869 /* now we have a real cpu fault */
870 tb = tb_find_pc(pc);
871 if (tb) {
872 /* the PC is inside the translated code. It means that we have
873 a virtual CPU fault */
874 cpu_restore_state(tb, env, pc, puc);
876 /* we restore the process signal mask as the sigreturn should
877 do it (XXX: use sigsetjmp) */
878 sigprocmask(SIG_SETMASK, old_set, NULL);
879 cpu_loop_exit();
880 /* never comes here */
881 return 1;
883 #elif defined (TARGET_PPC)
884 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
885 int is_write, sigset_t *old_set,
886 void *puc)
888 TranslationBlock *tb;
889 int ret;
891 if (cpu_single_env)
892 env = cpu_single_env; /* XXX: find a correct solution for multithread */
893 #if defined(DEBUG_SIGNAL)
894 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
895 pc, address, is_write, *(unsigned long *)old_set);
896 #endif
897 /* XXX: locking issue */
898 if (is_write && page_unprotect(h2g(address), pc, puc)) {
899 return 1;
902 /* see if it is an MMU fault */
903 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
904 if (ret < 0)
905 return 0; /* not an MMU fault */
906 if (ret == 0)
907 return 1; /* the MMU fault was handled without causing real CPU fault */
909 /* now we have a real cpu fault */
910 tb = tb_find_pc(pc);
911 if (tb) {
912 /* the PC is inside the translated code. It means that we have
913 a virtual CPU fault */
914 cpu_restore_state(tb, env, pc, puc);
916 if (ret == 1) {
917 #if 0
918 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
919 env->nip, env->error_code, tb);
920 #endif
921 /* we restore the process signal mask as the sigreturn should
922 do it (XXX: use sigsetjmp) */
923 sigprocmask(SIG_SETMASK, old_set, NULL);
924 cpu_loop_exit();
925 } else {
926 /* activate soft MMU for this block */
927 cpu_resume_from_signal(env, puc);
929 /* never comes here */
930 return 1;
933 #elif defined(TARGET_M68K)
934 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
935 int is_write, sigset_t *old_set,
936 void *puc)
938 TranslationBlock *tb;
939 int ret;
941 if (cpu_single_env)
942 env = cpu_single_env; /* XXX: find a correct solution for multithread */
943 #if defined(DEBUG_SIGNAL)
944 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
945 pc, address, is_write, *(unsigned long *)old_set);
946 #endif
947 /* XXX: locking issue */
948 if (is_write && page_unprotect(address, pc, puc)) {
949 return 1;
951 /* see if it is an MMU fault */
952 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
953 if (ret < 0)
954 return 0; /* not an MMU fault */
955 if (ret == 0)
956 return 1; /* the MMU fault was handled without causing real CPU fault */
957 /* now we have a real cpu fault */
958 tb = tb_find_pc(pc);
959 if (tb) {
960 /* the PC is inside the translated code. It means that we have
961 a virtual CPU fault */
962 cpu_restore_state(tb, env, pc, puc);
964 /* we restore the process signal mask as the sigreturn should
965 do it (XXX: use sigsetjmp) */
966 sigprocmask(SIG_SETMASK, old_set, NULL);
967 cpu_loop_exit();
968 /* never comes here */
969 return 1;
972 #elif defined (TARGET_MIPS)
973 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
974 int is_write, sigset_t *old_set,
975 void *puc)
977 TranslationBlock *tb;
978 int ret;
980 if (cpu_single_env)
981 env = cpu_single_env; /* XXX: find a correct solution for multithread */
982 #if defined(DEBUG_SIGNAL)
983 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
984 pc, address, is_write, *(unsigned long *)old_set);
985 #endif
986 /* XXX: locking issue */
987 if (is_write && page_unprotect(h2g(address), pc, puc)) {
988 return 1;
991 /* see if it is an MMU fault */
992 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
993 if (ret < 0)
994 return 0; /* not an MMU fault */
995 if (ret == 0)
996 return 1; /* the MMU fault was handled without causing real CPU fault */
998 /* now we have a real cpu fault */
999 tb = tb_find_pc(pc);
1000 if (tb) {
1001 /* the PC is inside the translated code. It means that we have
1002 a virtual CPU fault */
1003 cpu_restore_state(tb, env, pc, puc);
1005 if (ret == 1) {
1006 #if 0
1007 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1008 env->PC, env->error_code, tb);
1009 #endif
1010 /* we restore the process signal mask as the sigreturn should
1011 do it (XXX: use sigsetjmp) */
1012 sigprocmask(SIG_SETMASK, old_set, NULL);
1013 cpu_loop_exit();
1014 } else {
1015 /* activate soft MMU for this block */
1016 cpu_resume_from_signal(env, puc);
1018 /* never comes here */
1019 return 1;
1022 #elif defined (TARGET_SH4)
1023 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1024 int is_write, sigset_t *old_set,
1025 void *puc)
1027 TranslationBlock *tb;
1028 int ret;
1030 if (cpu_single_env)
1031 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1032 #if defined(DEBUG_SIGNAL)
1033 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1034 pc, address, is_write, *(unsigned long *)old_set);
1035 #endif
1036 /* XXX: locking issue */
1037 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1038 return 1;
1041 /* see if it is an MMU fault */
1042 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1043 if (ret < 0)
1044 return 0; /* not an MMU fault */
1045 if (ret == 0)
1046 return 1; /* the MMU fault was handled without causing real CPU fault */
1048 /* now we have a real cpu fault */
1049 tb = tb_find_pc(pc);
1050 if (tb) {
1051 /* the PC is inside the translated code. It means that we have
1052 a virtual CPU fault */
1053 cpu_restore_state(tb, env, pc, puc);
1055 #if 0
1056 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1057 env->nip, env->error_code, tb);
1058 #endif
1059 /* we restore the process signal mask as the sigreturn should
1060 do it (XXX: use sigsetjmp) */
1061 sigprocmask(SIG_SETMASK, old_set, NULL);
1062 cpu_loop_exit();
1063 /* never comes here */
1064 return 1;
1067 #elif defined (TARGET_ALPHA)
1068 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1069 int is_write, sigset_t *old_set,
1070 void *puc)
1072 TranslationBlock *tb;
1073 int ret;
1075 if (cpu_single_env)
1076 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1077 #if defined(DEBUG_SIGNAL)
1078 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1079 pc, address, is_write, *(unsigned long *)old_set);
1080 #endif
1081 /* XXX: locking issue */
1082 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1083 return 1;
1086 /* see if it is an MMU fault */
1087 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1088 if (ret < 0)
1089 return 0; /* not an MMU fault */
1090 if (ret == 0)
1091 return 1; /* the MMU fault was handled without causing real CPU fault */
1093 /* now we have a real cpu fault */
1094 tb = tb_find_pc(pc);
1095 if (tb) {
1096 /* the PC is inside the translated code. It means that we have
1097 a virtual CPU fault */
1098 cpu_restore_state(tb, env, pc, puc);
1100 #if 0
1101 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1102 env->nip, env->error_code, tb);
1103 #endif
1104 /* we restore the process signal mask as the sigreturn should
1105 do it (XXX: use sigsetjmp) */
1106 sigprocmask(SIG_SETMASK, old_set, NULL);
1107 cpu_loop_exit();
1108 /* never comes here */
1109 return 1;
1111 #elif defined (TARGET_CRIS)
1112 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1113 int is_write, sigset_t *old_set,
1114 void *puc)
1116 TranslationBlock *tb;
1117 int ret;
1119 if (cpu_single_env)
1120 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1121 #if defined(DEBUG_SIGNAL)
1122 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1123 pc, address, is_write, *(unsigned long *)old_set);
1124 #endif
1125 /* XXX: locking issue */
1126 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1127 return 1;
1130 /* see if it is an MMU fault */
1131 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1132 if (ret < 0)
1133 return 0; /* not an MMU fault */
1134 if (ret == 0)
1135 return 1; /* the MMU fault was handled without causing real CPU fault */
1137 /* now we have a real cpu fault */
1138 tb = tb_find_pc(pc);
1139 if (tb) {
1140 /* the PC is inside the translated code. It means that we have
1141 a virtual CPU fault */
1142 cpu_restore_state(tb, env, pc, puc);
1144 /* we restore the process signal mask as the sigreturn should
1145 do it (XXX: use sigsetjmp) */
1146 sigprocmask(SIG_SETMASK, old_set, NULL);
1147 cpu_loop_exit();
1148 /* never comes here */
1149 return 1;
1152 #else
1153 #error unsupported target CPU
1154 #endif
1156 #if defined(__i386__)
1158 #if defined(__APPLE__)
1159 # include <sys/ucontext.h>
1161 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1162 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1163 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1164 #else
1165 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1166 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1167 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1168 #endif
1170 int cpu_signal_handler(int host_signum, void *pinfo,
1171 void *puc)
1173 siginfo_t *info = pinfo;
1174 struct ucontext *uc = puc;
1175 unsigned long pc;
1176 int trapno;
1178 #ifndef REG_EIP
1179 /* for glibc 2.1 */
1180 #define REG_EIP EIP
1181 #define REG_ERR ERR
1182 #define REG_TRAPNO TRAPNO
1183 #endif
1184 pc = EIP_sig(uc);
1185 trapno = TRAP_sig(uc);
1186 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1187 trapno == 0xe ?
1188 (ERROR_sig(uc) >> 1) & 1 : 0,
1189 &uc->uc_sigmask, puc);
1192 #elif defined(__x86_64__)
1194 #ifdef __NetBSD__
1195 #define REG_ERR _REG_ERR
1196 #define REG_TRAPNO _REG_TRAPNO
1198 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1199 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1200 #else
1201 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1202 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1203 #endif
1205 int cpu_signal_handler(int host_signum, void *pinfo,
1206 void *puc)
1208 siginfo_t *info = pinfo;
1209 unsigned long pc;
1210 #ifdef __NetBSD__
1211 ucontext_t *uc = puc;
1212 #else
1213 struct ucontext *uc = puc;
1214 #endif
1216 pc = QEMU_UC_MACHINE_PC(uc);
1217 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1218 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1219 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
1220 &uc->uc_sigmask, puc);
1223 #elif defined(_ARCH_PPC)
1225 /***********************************************************************
1226 * signal context platform-specific definitions
1227 * From Wine
1229 #ifdef linux
1230 /* All Registers access - only for local access */
1231 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1232 /* Gpr Registers access */
1233 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1234 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1235 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1236 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1237 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1238 # define LR_sig(context) REG_sig(link, context) /* Link register */
1239 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1240 /* Float Registers access */
1241 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1242 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1243 /* Exception Registers access */
1244 # define DAR_sig(context) REG_sig(dar, context)
1245 # define DSISR_sig(context) REG_sig(dsisr, context)
1246 # define TRAP_sig(context) REG_sig(trap, context)
1247 #endif /* linux */
1249 #ifdef __APPLE__
1250 # include <sys/ucontext.h>
1251 typedef struct ucontext SIGCONTEXT;
1252 /* All Registers access - only for local access */
1253 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1254 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1255 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1256 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1257 /* Gpr Registers access */
1258 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1259 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1260 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1261 # define CTR_sig(context) REG_sig(ctr, context)
1262 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1263 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1264 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1265 /* Float Registers access */
1266 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1267 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1268 /* Exception Registers access */
1269 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1270 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1271 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1272 #endif /* __APPLE__ */
1274 int cpu_signal_handler(int host_signum, void *pinfo,
1275 void *puc)
1277 siginfo_t *info = pinfo;
1278 struct ucontext *uc = puc;
1279 unsigned long pc;
1280 int is_write;
1282 pc = IAR_sig(uc);
1283 is_write = 0;
1284 #if 0
1285 /* ppc 4xx case */
1286 if (DSISR_sig(uc) & 0x00800000)
1287 is_write = 1;
1288 #else
1289 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1290 is_write = 1;
1291 #endif
1292 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1293 is_write, &uc->uc_sigmask, puc);
1296 #elif defined(__alpha__)
1298 int cpu_signal_handler(int host_signum, void *pinfo,
1299 void *puc)
1301 siginfo_t *info = pinfo;
1302 struct ucontext *uc = puc;
1303 uint32_t *pc = uc->uc_mcontext.sc_pc;
1304 uint32_t insn = *pc;
1305 int is_write = 0;
1307 /* XXX: need kernel patch to get write flag faster */
1308 switch (insn >> 26) {
1309 case 0x0d: // stw
1310 case 0x0e: // stb
1311 case 0x0f: // stq_u
1312 case 0x24: // stf
1313 case 0x25: // stg
1314 case 0x26: // sts
1315 case 0x27: // stt
1316 case 0x2c: // stl
1317 case 0x2d: // stq
1318 case 0x2e: // stl_c
1319 case 0x2f: // stq_c
1320 is_write = 1;
1323 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1324 is_write, &uc->uc_sigmask, puc);
1326 #elif defined(__sparc__)
1328 int cpu_signal_handler(int host_signum, void *pinfo,
1329 void *puc)
1331 siginfo_t *info = pinfo;
1332 int is_write;
1333 uint32_t insn;
1334 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1335 uint32_t *regs = (uint32_t *)(info + 1);
1336 void *sigmask = (regs + 20);
1337 /* XXX: is there a standard glibc define ? */
1338 unsigned long pc = regs[1];
1339 #else
1340 #ifdef __linux__
1341 struct sigcontext *sc = puc;
1342 unsigned long pc = sc->sigc_regs.tpc;
1343 void *sigmask = (void *)sc->sigc_mask;
1344 #elif defined(__OpenBSD__)
1345 struct sigcontext *uc = puc;
1346 unsigned long pc = uc->sc_pc;
1347 void *sigmask = (void *)(long)uc->sc_mask;
1348 #endif
1349 #endif
1351 /* XXX: need kernel patch to get write flag faster */
1352 is_write = 0;
1353 insn = *(uint32_t *)pc;
1354 if ((insn >> 30) == 3) {
1355 switch((insn >> 19) & 0x3f) {
1356 case 0x05: // stb
1357 case 0x06: // sth
1358 case 0x04: // st
1359 case 0x07: // std
1360 case 0x24: // stf
1361 case 0x27: // stdf
1362 case 0x25: // stfsr
1363 is_write = 1;
1364 break;
1367 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1368 is_write, sigmask, NULL);
1371 #elif defined(__arm__)
1373 int cpu_signal_handler(int host_signum, void *pinfo,
1374 void *puc)
1376 siginfo_t *info = pinfo;
1377 struct ucontext *uc = puc;
1378 unsigned long pc;
1379 int is_write;
1381 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1382 pc = uc->uc_mcontext.gregs[R15];
1383 #else
1384 pc = uc->uc_mcontext.arm_pc;
1385 #endif
1386 /* XXX: compute is_write */
1387 is_write = 0;
1388 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1389 is_write,
1390 &uc->uc_sigmask, puc);
1393 #elif defined(__mc68000)
1395 int cpu_signal_handler(int host_signum, void *pinfo,
1396 void *puc)
1398 siginfo_t *info = pinfo;
1399 struct ucontext *uc = puc;
1400 unsigned long pc;
1401 int is_write;
1403 pc = uc->uc_mcontext.gregs[16];
1404 /* XXX: compute is_write */
1405 is_write = 0;
1406 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1407 is_write,
1408 &uc->uc_sigmask, puc);
1411 #elif defined(__ia64)
1413 #ifndef __ISR_VALID
1414 /* This ought to be in <bits/siginfo.h>... */
1415 # define __ISR_VALID 1
1416 #endif
1418 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1420 siginfo_t *info = pinfo;
1421 struct ucontext *uc = puc;
1422 unsigned long ip;
1423 int is_write = 0;
1425 ip = uc->uc_mcontext.sc_ip;
1426 switch (host_signum) {
1427 case SIGILL:
1428 case SIGFPE:
1429 case SIGSEGV:
1430 case SIGBUS:
1431 case SIGTRAP:
1432 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1433 /* ISR.W (write-access) is bit 33: */
1434 is_write = (info->si_isr >> 33) & 1;
1435 break;
1437 default:
1438 break;
1440 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1441 is_write,
1442 &uc->uc_sigmask, puc);
1445 #elif defined(__s390__)
1447 int cpu_signal_handler(int host_signum, void *pinfo,
1448 void *puc)
1450 siginfo_t *info = pinfo;
1451 struct ucontext *uc = puc;
1452 unsigned long pc;
1453 int is_write;
1455 pc = uc->uc_mcontext.psw.addr;
1456 /* XXX: compute is_write */
1457 is_write = 0;
1458 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1459 is_write, &uc->uc_sigmask, puc);
1462 #elif defined(__mips__)
1464 int cpu_signal_handler(int host_signum, void *pinfo,
1465 void *puc)
1467 siginfo_t *info = pinfo;
1468 struct ucontext *uc = puc;
1469 greg_t pc = uc->uc_mcontext.pc;
1470 int is_write;
1472 /* XXX: compute is_write */
1473 is_write = 0;
1474 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1475 is_write, &uc->uc_sigmask, puc);
1478 #elif defined(__hppa__)
1480 int cpu_signal_handler(int host_signum, void *pinfo,
1481 void *puc)
1483 struct siginfo *info = pinfo;
1484 struct ucontext *uc = puc;
1485 unsigned long pc;
1486 int is_write;
1488 pc = uc->uc_mcontext.sc_iaoq[0];
1489 /* FIXME: compute is_write */
1490 is_write = 0;
1491 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1492 is_write,
1493 &uc->uc_sigmask, puc);
1496 #else
1498 #error host CPU specific signal handler needed
1500 #endif
1502 #endif /* !defined(CONFIG_SOFTMMU) */