hppa: specify target_phys_bits in configure script
[qemu/hppa.git] / cpu-exec.c
bloba47c06793cb1ddc36944f05f1c77f7fe3f47c829
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include "tcg.h"
24 #include "kvm.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
50 //#define DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState *env)
55 return cpu_has_work(env);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
63 longjmp(env->jmp_env, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
72 #ifdef __linux__
73 struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76 #endif
77 #endif
79 env = env1;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
86 #ifdef __linux__
87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90 #endif
92 #endif
93 env->exception_index = -1;
94 longjmp(env->jmp_env, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
101 unsigned long next_tb;
102 TranslationBlock *tb;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles > CF_COUNT_MASK)
107 max_cycles = CF_COUNT_MASK;
109 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110 max_cycles);
111 env->current_tb = tb;
112 /* execute the generated code */
113 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115 if ((next_tb & 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env, tb);
120 tb_phys_invalidate(tb, -1);
121 tb_free(tb);
124 static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
126 uint64_t flags)
128 TranslationBlock *tb, **ptb1;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
132 tb_invalidated_flag = 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc = get_phys_addr_code(env, pc);
138 phys_page1 = phys_pc & TARGET_PAGE_MASK;
139 phys_page2 = -1;
140 h = tb_phys_hash_func(phys_pc);
141 ptb1 = &tb_phys_hash[h];
142 for(;;) {
143 tb = *ptb1;
144 if (!tb)
145 goto not_found;
146 if (tb->pc == pc &&
147 tb->page_addr[0] == phys_page1 &&
148 tb->cs_base == cs_base &&
149 tb->flags == flags) {
150 /* check next page if needed */
151 if (tb->page_addr[1] != -1) {
152 virt_page2 = (pc & TARGET_PAGE_MASK) +
153 TARGET_PAGE_SIZE;
154 phys_page2 = get_phys_addr_code(env, virt_page2);
155 if (tb->page_addr[1] == phys_page2)
156 goto found;
157 } else {
158 goto found;
161 ptb1 = &tb->phys_hash_next;
163 not_found:
164 /* if no translated code available, then translate it now */
165 tb = tb_gen_code(env, pc, cs_base, flags, 0);
167 found:
168 /* we add the TB in the virtual pc hash table */
169 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
170 return tb;
173 static inline TranslationBlock *tb_find_fast(void)
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
177 int flags;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
181 is executed. */
182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
186 tb = tb_find_slow(pc, cs_base, flags);
188 return tb;
191 static CPUDebugExcpHandler *debug_excp_handler;
193 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 CPUDebugExcpHandler *old_handler = debug_excp_handler;
197 debug_excp_handler = handler;
198 return old_handler;
201 static void cpu_handle_debug_exception(CPUState *env)
203 CPUWatchpoint *wp;
205 if (!env->watchpoint_hit)
206 TAILQ_FOREACH(wp, &env->watchpoints, entry)
207 wp->flags &= ~BP_WATCHPOINT_HIT;
209 if (debug_excp_handler)
210 debug_excp_handler(env);
213 /* main execution loop */
215 int cpu_exec(CPUState *env1)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret, interrupt_request;
220 TranslationBlock *tb;
221 uint8_t *tc_ptr;
222 unsigned long next_tb;
224 if (cpu_halted(env1) == EXCP_HALTED)
225 return EXCP_HALTED;
227 cpu_single_env = env1;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
232 env = env1;
234 env_to_regs();
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 DF = 1 - (2 * ((env->eflags >> 10) & 1));
239 CC_OP = CC_OP_EFLAGS;
240 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env->cc_op = CC_OP_FLAGS;
244 env->cc_dest = env->sr & 0xf;
245 env->cc_x = (env->sr >> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_HPPA)
250 #elif defined(TARGET_MICROBLAZE)
251 #elif defined(TARGET_MIPS)
252 #elif defined(TARGET_SH4)
253 #elif defined(TARGET_CRIS)
254 /* XXXXX */
255 #else
256 #error unsupported target CPU
257 #endif
258 env->exception_index = -1;
260 /* prepare setjmp context for exception handling */
261 for(;;) {
262 if (setjmp(env->jmp_env) == 0) {
263 #if defined(__sparc__) && !defined(HOST_SOLARIS)
264 #undef env
265 env = cpu_single_env;
266 #define env cpu_single_env
267 #endif
268 env->current_tb = NULL;
269 /* if an exception is pending, we execute it here */
270 if (env->exception_index >= 0) {
271 if (env->exception_index >= EXCP_INTERRUPT) {
272 /* exit request from the cpu execution loop */
273 ret = env->exception_index;
274 if (ret == EXCP_DEBUG)
275 cpu_handle_debug_exception(env);
276 break;
277 } else {
278 #if defined(CONFIG_USER_ONLY)
279 /* if user mode only, we simulate a fake exception
280 which will be handled outside the cpu execution
281 loop */
282 #if defined(TARGET_I386)
283 do_interrupt_user(env->exception_index,
284 env->exception_is_int,
285 env->error_code,
286 env->exception_next_eip);
287 /* successfully delivered */
288 env->old_exception = -1;
289 #endif
290 ret = env->exception_index;
291 break;
292 #else
293 #if defined(TARGET_I386)
294 /* simulate a real cpu exception. On i386, it can
295 trigger new exceptions, but we do not handle
296 double or triple faults yet. */
297 do_interrupt(env->exception_index,
298 env->exception_is_int,
299 env->error_code,
300 env->exception_next_eip, 0);
301 /* successfully delivered */
302 env->old_exception = -1;
303 #elif defined(TARGET_PPC)
304 do_interrupt(env);
305 #elif defined(TARGET_MICROBLAZE)
306 do_interrupt(env);
307 #elif defined(TARGET_MIPS)
308 do_interrupt(env);
309 #elif defined(TARGET_SPARC)
310 do_interrupt(env);
311 #elif defined(TARGET_ARM)
312 do_interrupt(env);
313 #elif defined(TARGET_SH4)
314 do_interrupt(env);
315 #elif defined(TARGET_ALPHA)
316 do_interrupt(env);
317 #elif defined(TARGET_CRIS)
318 do_interrupt(env);
319 #elif defined(TARGET_M68K)
320 do_interrupt(0);
321 #elif defined(TARGET_HPPA)
322 do_interrupt(env);
323 #endif
324 #endif
326 env->exception_index = -1;
328 #ifdef CONFIG_KQEMU
329 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
330 int ret;
331 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
332 ret = kqemu_cpu_exec(env);
333 /* put eflags in CPU temporary format */
334 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
335 DF = 1 - (2 * ((env->eflags >> 10) & 1));
336 CC_OP = CC_OP_EFLAGS;
337 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
338 if (ret == 1) {
339 /* exception */
340 longjmp(env->jmp_env, 1);
341 } else if (ret == 2) {
342 /* softmmu execution needed */
343 } else {
344 if (env->interrupt_request != 0 || env->exit_request != 0) {
345 /* hardware interrupt will be executed just after */
346 } else {
347 /* otherwise, we restart */
348 longjmp(env->jmp_env, 1);
352 #endif
354 if (kvm_enabled()) {
355 kvm_cpu_exec(env);
356 longjmp(env->jmp_env, 1);
359 next_tb = 0; /* force lookup of first TB */
360 for(;;) {
361 interrupt_request = env->interrupt_request;
362 if (unlikely(interrupt_request)) {
363 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
364 /* Mask out external interrupts for this step. */
365 interrupt_request &= ~(CPU_INTERRUPT_HARD |
366 CPU_INTERRUPT_FIQ |
367 CPU_INTERRUPT_SMI |
368 CPU_INTERRUPT_NMI);
370 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
371 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
372 env->exception_index = EXCP_DEBUG;
373 cpu_loop_exit();
375 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
376 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
377 defined(TARGET_HPPA) || defined(TARGET_MICROBLAZE)
378 if (interrupt_request & CPU_INTERRUPT_HALT) {
379 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
380 env->halted = 1;
381 env->exception_index = EXCP_HLT;
382 cpu_loop_exit();
384 #endif
385 #if defined(TARGET_I386)
386 if (env->hflags2 & HF2_GIF_MASK) {
387 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
388 !(env->hflags & HF_SMM_MASK)) {
389 svm_check_intercept(SVM_EXIT_SMI);
390 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
391 do_smm_enter();
392 next_tb = 0;
393 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
394 !(env->hflags2 & HF2_NMI_MASK)) {
395 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
396 env->hflags2 |= HF2_NMI_MASK;
397 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
398 next_tb = 0;
399 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
400 (((env->hflags2 & HF2_VINTR_MASK) &&
401 (env->hflags2 & HF2_HIF_MASK)) ||
402 (!(env->hflags2 & HF2_VINTR_MASK) &&
403 (env->eflags & IF_MASK &&
404 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
405 int intno;
406 svm_check_intercept(SVM_EXIT_INTR);
407 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
408 intno = cpu_get_pic_interrupt(env);
409 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
410 #if defined(__sparc__) && !defined(HOST_SOLARIS)
411 #undef env
412 env = cpu_single_env;
413 #define env cpu_single_env
414 #endif
415 do_interrupt(intno, 0, 0, 0, 1);
416 /* ensure that no TB jump will be modified as
417 the program flow was changed */
418 next_tb = 0;
419 #if !defined(CONFIG_USER_ONLY)
420 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
421 (env->eflags & IF_MASK) &&
422 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
423 int intno;
424 /* FIXME: this should respect TPR */
425 svm_check_intercept(SVM_EXIT_VINTR);
426 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
427 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
428 do_interrupt(intno, 0, 0, 0, 1);
429 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
430 next_tb = 0;
431 #endif
434 #elif defined(TARGET_PPC)
435 #if 0
436 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
437 cpu_ppc_reset(env);
439 #endif
440 if (interrupt_request & CPU_INTERRUPT_HARD) {
441 ppc_hw_interrupt(env);
442 if (env->pending_interrupts == 0)
443 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
444 next_tb = 0;
446 #elif defined(TARGET_MICROBLAZE)
447 if ((interrupt_request & CPU_INTERRUPT_HARD)
448 && (env->sregs[SR_MSR] & MSR_IE)
449 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
450 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
451 env->exception_index = EXCP_IRQ;
452 do_interrupt(env);
453 next_tb = 0;
455 #elif defined(TARGET_MIPS)
456 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
457 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
458 (env->CP0_Status & (1 << CP0St_IE)) &&
459 !(env->CP0_Status & (1 << CP0St_EXL)) &&
460 !(env->CP0_Status & (1 << CP0St_ERL)) &&
461 !(env->hflags & MIPS_HFLAG_DM)) {
462 /* Raise it */
463 env->exception_index = EXCP_EXT_INTERRUPT;
464 env->error_code = 0;
465 do_interrupt(env);
466 next_tb = 0;
468 #elif defined(TARGET_SPARC)
469 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
470 (env->psret != 0)) {
471 int pil = env->interrupt_index & 15;
472 int type = env->interrupt_index & 0xf0;
474 if (((type == TT_EXTINT) &&
475 (pil == 15 || pil > env->psrpil)) ||
476 type != TT_EXTINT) {
477 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
478 env->exception_index = env->interrupt_index;
479 do_interrupt(env);
480 env->interrupt_index = 0;
481 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
482 cpu_check_irqs(env);
483 #endif
484 next_tb = 0;
486 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
487 //do_interrupt(0, 0, 0, 0, 0);
488 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
490 #elif defined(TARGET_ARM)
491 if (interrupt_request & CPU_INTERRUPT_FIQ
492 && !(env->uncached_cpsr & CPSR_F)) {
493 env->exception_index = EXCP_FIQ;
494 do_interrupt(env);
495 next_tb = 0;
497 /* ARMv7-M interrupt return works by loading a magic value
498 into the PC. On real hardware the load causes the
499 return to occur. The qemu implementation performs the
500 jump normally, then does the exception return when the
501 CPU tries to execute code at the magic address.
502 This will cause the magic PC value to be pushed to
503 the stack if an interrupt occured at the wrong time.
504 We avoid this by disabling interrupts when
505 pc contains a magic address. */
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
508 || !(env->uncached_cpsr & CPSR_I))) {
509 env->exception_index = EXCP_IRQ;
510 do_interrupt(env);
511 next_tb = 0;
513 #elif defined(TARGET_SH4)
514 if (interrupt_request & CPU_INTERRUPT_HARD) {
515 do_interrupt(env);
516 next_tb = 0;
518 #elif defined(TARGET_ALPHA)
519 if (interrupt_request & CPU_INTERRUPT_HARD) {
520 do_interrupt(env);
521 next_tb = 0;
523 #elif defined(TARGET_CRIS)
524 if (interrupt_request & CPU_INTERRUPT_HARD
525 && (env->pregs[PR_CCS] & I_FLAG)) {
526 env->exception_index = EXCP_IRQ;
527 do_interrupt(env);
528 next_tb = 0;
530 if (interrupt_request & CPU_INTERRUPT_NMI
531 && (env->pregs[PR_CCS] & M_FLAG)) {
532 env->exception_index = EXCP_NMI;
533 do_interrupt(env);
534 next_tb = 0;
536 #elif defined(TARGET_M68K)
537 if (interrupt_request & CPU_INTERRUPT_HARD
538 && ((env->sr & SR_I) >> SR_I_SHIFT)
539 < env->pending_level) {
540 /* Real hardware gets the interrupt vector via an
541 IACK cycle at this point. Current emulated
542 hardware doesn't rely on this, so we
543 provide/save the vector when the interrupt is
544 first signalled. */
545 env->exception_index = env->pending_vector;
546 do_interrupt(1);
547 next_tb = 0;
549 #elif defined(TARGET_HPPA)
550 if (interrupt_request & CPU_INTERRUPT_HARD
551 && !(env->psw & PSW_I)) {
552 env->exception_index = EXCP_EXTINT;
553 do_interrupt(env);
554 next_tb = 0;
556 #endif
557 /* Don't use the cached interupt_request value,
558 do_interrupt may have updated the EXITTB flag. */
559 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
560 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
561 /* ensure that no TB jump will be modified as
562 the program flow was changed */
563 next_tb = 0;
566 if (unlikely(env->exit_request)) {
567 env->exit_request = 0;
568 env->exception_index = EXCP_INTERRUPT;
569 cpu_loop_exit();
571 #ifdef DEBUG_EXEC
572 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
573 /* restore flags in standard format */
574 regs_to_env();
575 #if defined(TARGET_I386)
576 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
577 log_cpu_state(env, X86_DUMP_CCOP);
578 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
579 #elif defined(TARGET_ARM)
580 log_cpu_state(env, 0);
581 #elif defined(TARGET_SPARC)
582 log_cpu_state(env, 0);
583 #elif defined(TARGET_PPC)
584 log_cpu_state(env, 0);
585 #elif defined(TARGET_M68K)
586 cpu_m68k_flush_flags(env, env->cc_op);
587 env->cc_op = CC_OP_FLAGS;
588 env->sr = (env->sr & 0xffe0)
589 | env->cc_dest | (env->cc_x << 4);
590 log_cpu_state(env, 0);
591 #elif defined(TARGET_MICROBLAZE)
592 log_cpu_state(env, 0);
593 #elif defined(TARGET_MIPS)
594 log_cpu_state(env, 0);
595 #elif defined(TARGET_SH4)
596 log_cpu_state(env, 0);
597 #elif defined(TARGET_ALPHA)
598 log_cpu_state(env, 0);
599 #elif defined(TARGET_CRIS)
600 log_cpu_state(env, 0);
601 #elif defined(TARGET_HPPA)
602 log_cpu_state(env, 0);
603 #else
604 #error unsupported target CPU
605 #endif
607 #endif
608 spin_lock(&tb_lock);
609 tb = tb_find_fast();
610 /* Note: we do it here to avoid a gcc bug on Mac OS X when
611 doing it in tb_find_slow */
612 if (tb_invalidated_flag) {
613 /* as some TB could have been invalidated because
614 of memory exceptions while generating the code, we
615 must recompute the hash index here */
616 next_tb = 0;
617 tb_invalidated_flag = 0;
619 #ifdef DEBUG_EXEC
620 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
621 (long)tb->tc_ptr, tb->pc,
622 lookup_symbol(tb->pc));
623 #endif
624 /* see if we can patch the calling TB. When the TB
625 spans two pages, we cannot safely do a direct
626 jump. */
628 if (next_tb != 0 &&
629 #ifdef CONFIG_KQEMU
630 (env->kqemu_enabled != 2) &&
631 #endif
632 tb->page_addr[1] == -1) {
633 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
636 spin_unlock(&tb_lock);
637 env->current_tb = tb;
639 /* cpu_interrupt might be called while translating the
640 TB, but before it is linked into a potentially
641 infinite loop and becomes env->current_tb. Avoid
642 starting execution if there is a pending interrupt. */
643 if (unlikely (env->exit_request))
644 env->current_tb = NULL;
646 while (env->current_tb) {
647 tc_ptr = tb->tc_ptr;
648 /* execute the generated code */
649 #if defined(__sparc__) && !defined(HOST_SOLARIS)
650 #undef env
651 env = cpu_single_env;
652 #define env cpu_single_env
653 #endif
654 next_tb = tcg_qemu_tb_exec(tc_ptr);
655 env->current_tb = NULL;
656 if ((next_tb & 3) == 2) {
657 /* Instruction counter expired. */
658 int insns_left;
659 tb = (TranslationBlock *)(long)(next_tb & ~3);
660 /* Restore PC. */
661 cpu_pc_from_tb(env, tb);
662 insns_left = env->icount_decr.u32;
663 if (env->icount_extra && insns_left >= 0) {
664 /* Refill decrementer and continue execution. */
665 env->icount_extra += insns_left;
666 if (env->icount_extra > 0xffff) {
667 insns_left = 0xffff;
668 } else {
669 insns_left = env->icount_extra;
671 env->icount_extra -= insns_left;
672 env->icount_decr.u16.low = insns_left;
673 } else {
674 if (insns_left > 0) {
675 /* Execute remaining instructions. */
676 cpu_exec_nocache(insns_left, tb);
678 env->exception_index = EXCP_INTERRUPT;
679 next_tb = 0;
680 cpu_loop_exit();
684 /* reset soft MMU for next block (it can currently
685 only be set by a memory fault) */
686 #if defined(CONFIG_KQEMU)
687 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
688 if (kqemu_is_ok(env) &&
689 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
690 cpu_loop_exit();
692 #endif
693 } /* for(;;) */
694 } else {
695 env_to_regs();
697 } /* for(;;) */
700 #if defined(TARGET_I386)
701 /* restore flags in standard format */
702 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
703 #elif defined(TARGET_ARM)
704 /* XXX: Save/restore host fpu exception state?. */
705 #elif defined(TARGET_SPARC)
706 #elif defined(TARGET_PPC)
707 #elif defined(TARGET_M68K)
708 cpu_m68k_flush_flags(env, env->cc_op);
709 env->cc_op = CC_OP_FLAGS;
710 env->sr = (env->sr & 0xffe0)
711 | env->cc_dest | (env->cc_x << 4);
712 #elif defined(TARGET_MICROBLAZE)
713 #elif defined(TARGET_MIPS)
714 #elif defined(TARGET_SH4)
715 #elif defined(TARGET_ALPHA)
716 #elif defined(TARGET_CRIS)
717 #elif defined(TARGET_HPPA)
718 /* XXXXX */
719 #else
720 #error unsupported target CPU
721 #endif
723 /* restore global registers */
724 #include "hostregs_helper.h"
726 /* fail safe : never use cpu_single_env outside cpu_exec() */
727 cpu_single_env = NULL;
728 return ret;
731 /* must only be called from the generated code as an exception can be
732 generated */
733 void tb_invalidate_page_range(target_ulong start, target_ulong end)
735 /* XXX: cannot enable it yet because it yields to MMU exception
736 where NIP != read address on PowerPC */
737 #if 0
738 target_ulong phys_addr;
739 phys_addr = get_phys_addr_code(env, start);
740 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
741 #endif
744 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
746 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
748 CPUX86State *saved_env;
750 saved_env = env;
751 env = s;
752 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
753 selector &= 0xffff;
754 cpu_x86_load_seg_cache(env, seg_reg, selector,
755 (selector << 4), 0xffff, 0);
756 } else {
757 helper_load_seg(seg_reg, selector);
759 env = saved_env;
762 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
764 CPUX86State *saved_env;
766 saved_env = env;
767 env = s;
769 helper_fsave(ptr, data32);
771 env = saved_env;
774 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
776 CPUX86State *saved_env;
778 saved_env = env;
779 env = s;
781 helper_frstor(ptr, data32);
783 env = saved_env;
786 #endif /* TARGET_I386 */
788 #if !defined(CONFIG_SOFTMMU)
790 #if defined(TARGET_I386)
792 /* 'pc' is the host PC at which the exception was raised. 'address' is
793 the effective address of the memory exception. 'is_write' is 1 if a
794 write caused the exception and otherwise 0'. 'old_set' is the
795 signal set which should be restored */
796 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
797 int is_write, sigset_t *old_set,
798 void *puc)
800 TranslationBlock *tb;
801 int ret;
803 if (cpu_single_env)
804 env = cpu_single_env; /* XXX: find a correct solution for multithread */
805 #if defined(DEBUG_SIGNAL)
806 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
807 pc, address, is_write, *(unsigned long *)old_set);
808 #endif
809 /* XXX: locking issue */
810 if (is_write && page_unprotect(h2g(address), pc, puc)) {
811 return 1;
814 /* see if it is an MMU fault */
815 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
816 if (ret < 0)
817 return 0; /* not an MMU fault */
818 if (ret == 0)
819 return 1; /* the MMU fault was handled without causing real CPU fault */
820 /* now we have a real cpu fault */
821 tb = tb_find_pc(pc);
822 if (tb) {
823 /* the PC is inside the translated code. It means that we have
824 a virtual CPU fault */
825 cpu_restore_state(tb, env, pc, puc);
827 if (ret == 1) {
828 #if 0
829 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
830 env->eip, env->cr[2], env->error_code);
831 #endif
832 /* we restore the process signal mask as the sigreturn should
833 do it (XXX: use sigsetjmp) */
834 sigprocmask(SIG_SETMASK, old_set, NULL);
835 raise_exception_err(env->exception_index, env->error_code);
836 } else {
837 /* activate soft MMU for this block */
838 env->hflags |= HF_SOFTMMU_MASK;
839 cpu_resume_from_signal(env, puc);
841 /* never comes here */
842 return 1;
845 #elif defined(TARGET_ARM)
846 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
847 int is_write, sigset_t *old_set,
848 void *puc)
850 TranslationBlock *tb;
851 int ret;
853 if (cpu_single_env)
854 env = cpu_single_env; /* XXX: find a correct solution for multithread */
855 #if defined(DEBUG_SIGNAL)
856 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
857 pc, address, is_write, *(unsigned long *)old_set);
858 #endif
859 /* XXX: locking issue */
860 if (is_write && page_unprotect(h2g(address), pc, puc)) {
861 return 1;
863 /* see if it is an MMU fault */
864 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
865 if (ret < 0)
866 return 0; /* not an MMU fault */
867 if (ret == 0)
868 return 1; /* the MMU fault was handled without causing real CPU fault */
869 /* now we have a real cpu fault */
870 tb = tb_find_pc(pc);
871 if (tb) {
872 /* the PC is inside the translated code. It means that we have
873 a virtual CPU fault */
874 cpu_restore_state(tb, env, pc, puc);
876 /* we restore the process signal mask as the sigreturn should
877 do it (XXX: use sigsetjmp) */
878 sigprocmask(SIG_SETMASK, old_set, NULL);
879 cpu_loop_exit();
880 /* never comes here */
881 return 1;
883 #elif defined(TARGET_SPARC)
884 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
885 int is_write, sigset_t *old_set,
886 void *puc)
888 TranslationBlock *tb;
889 int ret;
891 if (cpu_single_env)
892 env = cpu_single_env; /* XXX: find a correct solution for multithread */
893 #if defined(DEBUG_SIGNAL)
894 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
895 pc, address, is_write, *(unsigned long *)old_set);
896 #endif
897 /* XXX: locking issue */
898 if (is_write && page_unprotect(h2g(address), pc, puc)) {
899 return 1;
901 /* see if it is an MMU fault */
902 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
903 if (ret < 0)
904 return 0; /* not an MMU fault */
905 if (ret == 0)
906 return 1; /* the MMU fault was handled without causing real CPU fault */
907 /* now we have a real cpu fault */
908 tb = tb_find_pc(pc);
909 if (tb) {
910 /* the PC is inside the translated code. It means that we have
911 a virtual CPU fault */
912 cpu_restore_state(tb, env, pc, puc);
914 /* we restore the process signal mask as the sigreturn should
915 do it (XXX: use sigsetjmp) */
916 sigprocmask(SIG_SETMASK, old_set, NULL);
917 cpu_loop_exit();
918 /* never comes here */
919 return 1;
921 #elif defined (TARGET_PPC)
922 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
923 int is_write, sigset_t *old_set,
924 void *puc)
926 TranslationBlock *tb;
927 int ret;
929 if (cpu_single_env)
930 env = cpu_single_env; /* XXX: find a correct solution for multithread */
931 #if defined(DEBUG_SIGNAL)
932 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
933 pc, address, is_write, *(unsigned long *)old_set);
934 #endif
935 /* XXX: locking issue */
936 if (is_write && page_unprotect(h2g(address), pc, puc)) {
937 return 1;
940 /* see if it is an MMU fault */
941 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
942 if (ret < 0)
943 return 0; /* not an MMU fault */
944 if (ret == 0)
945 return 1; /* the MMU fault was handled without causing real CPU fault */
947 /* now we have a real cpu fault */
948 tb = tb_find_pc(pc);
949 if (tb) {
950 /* the PC is inside the translated code. It means that we have
951 a virtual CPU fault */
952 cpu_restore_state(tb, env, pc, puc);
954 if (ret == 1) {
955 #if 0
956 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
957 env->nip, env->error_code, tb);
958 #endif
959 /* we restore the process signal mask as the sigreturn should
960 do it (XXX: use sigsetjmp) */
961 sigprocmask(SIG_SETMASK, old_set, NULL);
962 cpu_loop_exit();
963 } else {
964 /* activate soft MMU for this block */
965 cpu_resume_from_signal(env, puc);
967 /* never comes here */
968 return 1;
971 #elif defined(TARGET_M68K)
972 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
973 int is_write, sigset_t *old_set,
974 void *puc)
976 TranslationBlock *tb;
977 int ret;
979 if (cpu_single_env)
980 env = cpu_single_env; /* XXX: find a correct solution for multithread */
981 #if defined(DEBUG_SIGNAL)
982 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
983 pc, address, is_write, *(unsigned long *)old_set);
984 #endif
985 /* XXX: locking issue */
986 if (is_write && page_unprotect(address, pc, puc)) {
987 return 1;
989 /* see if it is an MMU fault */
990 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
991 if (ret < 0)
992 return 0; /* not an MMU fault */
993 if (ret == 0)
994 return 1; /* the MMU fault was handled without causing real CPU fault */
995 /* now we have a real cpu fault */
996 tb = tb_find_pc(pc);
997 if (tb) {
998 /* the PC is inside the translated code. It means that we have
999 a virtual CPU fault */
1000 cpu_restore_state(tb, env, pc, puc);
1002 /* we restore the process signal mask as the sigreturn should
1003 do it (XXX: use sigsetjmp) */
1004 sigprocmask(SIG_SETMASK, old_set, NULL);
1005 cpu_loop_exit();
1006 /* never comes here */
1007 return 1;
1010 #elif defined (TARGET_MIPS)
1011 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1012 int is_write, sigset_t *old_set,
1013 void *puc)
1015 TranslationBlock *tb;
1016 int ret;
1018 if (cpu_single_env)
1019 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1020 #if defined(DEBUG_SIGNAL)
1021 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1022 pc, address, is_write, *(unsigned long *)old_set);
1023 #endif
1024 /* XXX: locking issue */
1025 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1026 return 1;
1029 /* see if it is an MMU fault */
1030 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1031 if (ret < 0)
1032 return 0; /* not an MMU fault */
1033 if (ret == 0)
1034 return 1; /* the MMU fault was handled without causing real CPU fault */
1036 /* now we have a real cpu fault */
1037 tb = tb_find_pc(pc);
1038 if (tb) {
1039 /* the PC is inside the translated code. It means that we have
1040 a virtual CPU fault */
1041 cpu_restore_state(tb, env, pc, puc);
1043 if (ret == 1) {
1044 #if 0
1045 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1046 env->PC, env->error_code, tb);
1047 #endif
1048 /* we restore the process signal mask as the sigreturn should
1049 do it (XXX: use sigsetjmp) */
1050 sigprocmask(SIG_SETMASK, old_set, NULL);
1051 cpu_loop_exit();
1052 } else {
1053 /* activate soft MMU for this block */
1054 cpu_resume_from_signal(env, puc);
1056 /* never comes here */
1057 return 1;
1060 #elif defined (TARGET_MICROBLAZE)
1061 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1062 int is_write, sigset_t *old_set,
1063 void *puc)
1065 TranslationBlock *tb;
1066 int ret;
1068 if (cpu_single_env)
1069 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1070 #if defined(DEBUG_SIGNAL)
1071 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1072 pc, address, is_write, *(unsigned long *)old_set);
1073 #endif
1074 /* XXX: locking issue */
1075 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1076 return 1;
1079 /* see if it is an MMU fault */
1080 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1081 if (ret < 0)
1082 return 0; /* not an MMU fault */
1083 if (ret == 0)
1084 return 1; /* the MMU fault was handled without causing real CPU fault */
1086 /* now we have a real cpu fault */
1087 tb = tb_find_pc(pc);
1088 if (tb) {
1089 /* the PC is inside the translated code. It means that we have
1090 a virtual CPU fault */
1091 cpu_restore_state(tb, env, pc, puc);
1093 if (ret == 1) {
1094 #if 0
1095 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1096 env->PC, env->error_code, tb);
1097 #endif
1098 /* we restore the process signal mask as the sigreturn should
1099 do it (XXX: use sigsetjmp) */
1100 sigprocmask(SIG_SETMASK, old_set, NULL);
1101 cpu_loop_exit();
1102 } else {
1103 /* activate soft MMU for this block */
1104 cpu_resume_from_signal(env, puc);
1106 /* never comes here */
1107 return 1;
1110 #elif defined (TARGET_SH4)
1111 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1112 int is_write, sigset_t *old_set,
1113 void *puc)
1115 TranslationBlock *tb;
1116 int ret;
1118 if (cpu_single_env)
1119 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1120 #if defined(DEBUG_SIGNAL)
1121 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1122 pc, address, is_write, *(unsigned long *)old_set);
1123 #endif
1124 /* XXX: locking issue */
1125 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1126 return 1;
1129 /* see if it is an MMU fault */
1130 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1131 if (ret < 0)
1132 return 0; /* not an MMU fault */
1133 if (ret == 0)
1134 return 1; /* the MMU fault was handled without causing real CPU fault */
1136 /* now we have a real cpu fault */
1137 tb = tb_find_pc(pc);
1138 if (tb) {
1139 /* the PC is inside the translated code. It means that we have
1140 a virtual CPU fault */
1141 cpu_restore_state(tb, env, pc, puc);
1143 #if 0
1144 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1145 env->nip, env->error_code, tb);
1146 #endif
1147 /* we restore the process signal mask as the sigreturn should
1148 do it (XXX: use sigsetjmp) */
1149 sigprocmask(SIG_SETMASK, old_set, NULL);
1150 cpu_loop_exit();
1151 /* never comes here */
1152 return 1;
1155 #elif defined (TARGET_ALPHA)
1156 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1157 int is_write, sigset_t *old_set,
1158 void *puc)
1160 TranslationBlock *tb;
1161 int ret;
1163 if (cpu_single_env)
1164 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1165 #if defined(DEBUG_SIGNAL)
1166 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1167 pc, address, is_write, *(unsigned long *)old_set);
1168 #endif
1169 /* XXX: locking issue */
1170 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1171 return 1;
1174 /* see if it is an MMU fault */
1175 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1176 if (ret < 0)
1177 return 0; /* not an MMU fault */
1178 if (ret == 0)
1179 return 1; /* the MMU fault was handled without causing real CPU fault */
1181 /* now we have a real cpu fault */
1182 tb = tb_find_pc(pc);
1183 if (tb) {
1184 /* the PC is inside the translated code. It means that we have
1185 a virtual CPU fault */
1186 cpu_restore_state(tb, env, pc, puc);
1188 #if 0
1189 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1190 env->nip, env->error_code, tb);
1191 #endif
1192 /* we restore the process signal mask as the sigreturn should
1193 do it (XXX: use sigsetjmp) */
1194 sigprocmask(SIG_SETMASK, old_set, NULL);
1195 cpu_loop_exit();
1196 /* never comes here */
1197 return 1;
1199 #elif defined (TARGET_CRIS)
1200 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1201 int is_write, sigset_t *old_set,
1202 void *puc)
1204 TranslationBlock *tb;
1205 int ret;
1207 if (cpu_single_env)
1208 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1209 #if defined(DEBUG_SIGNAL)
1210 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1211 pc, address, is_write, *(unsigned long *)old_set);
1212 #endif
1213 /* XXX: locking issue */
1214 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1215 return 1;
1218 /* see if it is an MMU fault */
1219 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1220 if (ret < 0)
1221 return 0; /* not an MMU fault */
1222 if (ret == 0)
1223 return 1; /* the MMU fault was handled without causing real CPU fault */
1225 /* now we have a real cpu fault */
1226 tb = tb_find_pc(pc);
1227 if (tb) {
1228 /* the PC is inside the translated code. It means that we have
1229 a virtual CPU fault */
1230 cpu_restore_state(tb, env, pc, puc);
1232 /* we restore the process signal mask as the sigreturn should
1233 do it (XXX: use sigsetjmp) */
1234 sigprocmask(SIG_SETMASK, old_set, NULL);
1235 cpu_loop_exit();
1236 /* never comes here */
1237 return 1;
1240 #elif defined(TARGET_HPPA)
1241 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1242 int is_write, sigset_t *old_set,
1243 void *puc)
1245 TranslationBlock *tb;
1246 int ret;
1248 if (cpu_single_env)
1249 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1250 #if defined(DEBUG_SIGNAL)
1251 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1252 pc, address, is_write, *(unsigned long *)old_set);
1253 #endif
1254 /* XXX: locking issue */
1255 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1256 return 1;
1258 /* see if it is an MMU fault */
1259 ret = cpu_hppa_handle_mmu_fault(env, address, is_write, 1, 0);
1260 if (ret < 0)
1261 return 0; /* not an MMU fault */
1262 if (ret == 0)
1263 return 1; /* the MMU fault was handled without causing real CPU fault */
1264 /* now we have a real cpu fault */
1265 tb = tb_find_pc(pc);
1266 if (tb) {
1267 /* the PC is inside the translated code. It means that we have
1268 a virtual CPU fault */
1269 cpu_restore_state(tb, env, pc, puc);
1271 /* we restore the process signal mask as the sigreturn should
1272 do it (XXX: use sigsetjmp) */
1273 sigprocmask(SIG_SETMASK, old_set, NULL);
1274 cpu_loop_exit();
1275 /* never comes here */
1276 return 1;
1279 #else
1280 #error unsupported target CPU
1281 #endif
1283 #if defined(__i386__)
1285 #if defined(__APPLE__)
1286 # include <sys/ucontext.h>
1288 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1289 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1290 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1291 # define MASK_sig(context) ((context)->uc_sigmask)
1292 #elif defined(__OpenBSD__)
1293 # define EIP_sig(context) ((context)->sc_eip)
1294 # define TRAP_sig(context) ((context)->sc_trapno)
1295 # define ERROR_sig(context) ((context)->sc_err)
1296 # define MASK_sig(context) ((context)->sc_mask)
1297 #else
1298 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1299 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1300 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1301 # define MASK_sig(context) ((context)->uc_sigmask)
1302 #endif
1304 int cpu_signal_handler(int host_signum, void *pinfo,
1305 void *puc)
1307 siginfo_t *info = pinfo;
1308 #if defined(__OpenBSD__)
1309 struct sigcontext *uc = puc;
1310 #else
1311 struct ucontext *uc = puc;
1312 #endif
1313 unsigned long pc;
1314 int trapno;
1316 #ifndef REG_EIP
1317 /* for glibc 2.1 */
1318 #define REG_EIP EIP
1319 #define REG_ERR ERR
1320 #define REG_TRAPNO TRAPNO
1321 #endif
1322 pc = EIP_sig(uc);
1323 trapno = TRAP_sig(uc);
1324 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1325 trapno == 0xe ?
1326 (ERROR_sig(uc) >> 1) & 1 : 0,
1327 &MASK_sig(uc), puc);
1330 #elif defined(__x86_64__)
1332 #ifdef __NetBSD__
1333 #define PC_sig(context) _UC_MACHINE_PC(context)
1334 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1335 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1336 #define MASK_sig(context) ((context)->uc_sigmask)
1337 #elif defined(__OpenBSD__)
1338 #define PC_sig(context) ((context)->sc_rip)
1339 #define TRAP_sig(context) ((context)->sc_trapno)
1340 #define ERROR_sig(context) ((context)->sc_err)
1341 #define MASK_sig(context) ((context)->sc_mask)
1342 #else
1343 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1344 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1345 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1346 #define MASK_sig(context) ((context)->uc_sigmask)
1347 #endif
1349 int cpu_signal_handler(int host_signum, void *pinfo,
1350 void *puc)
1352 siginfo_t *info = pinfo;
1353 unsigned long pc;
1354 #ifdef __NetBSD__
1355 ucontext_t *uc = puc;
1356 #elif defined(__OpenBSD__)
1357 struct sigcontext *uc = puc;
1358 #else
1359 struct ucontext *uc = puc;
1360 #endif
1362 pc = PC_sig(uc);
1363 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1364 TRAP_sig(uc) == 0xe ?
1365 (ERROR_sig(uc) >> 1) & 1 : 0,
1366 &MASK_sig(uc), puc);
1369 #elif defined(_ARCH_PPC)
1371 /***********************************************************************
1372 * signal context platform-specific definitions
1373 * From Wine
1375 #ifdef linux
1376 /* All Registers access - only for local access */
1377 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1378 /* Gpr Registers access */
1379 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1380 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1381 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1382 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1383 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1384 # define LR_sig(context) REG_sig(link, context) /* Link register */
1385 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1386 /* Float Registers access */
1387 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1388 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1389 /* Exception Registers access */
1390 # define DAR_sig(context) REG_sig(dar, context)
1391 # define DSISR_sig(context) REG_sig(dsisr, context)
1392 # define TRAP_sig(context) REG_sig(trap, context)
1393 #endif /* linux */
1395 #ifdef __APPLE__
1396 # include <sys/ucontext.h>
1397 typedef struct ucontext SIGCONTEXT;
1398 /* All Registers access - only for local access */
1399 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1400 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1401 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1402 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1403 /* Gpr Registers access */
1404 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1405 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1406 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1407 # define CTR_sig(context) REG_sig(ctr, context)
1408 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1409 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1410 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1411 /* Float Registers access */
1412 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1413 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1414 /* Exception Registers access */
1415 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1416 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1417 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1418 #endif /* __APPLE__ */
1420 int cpu_signal_handler(int host_signum, void *pinfo,
1421 void *puc)
1423 siginfo_t *info = pinfo;
1424 struct ucontext *uc = puc;
1425 unsigned long pc;
1426 int is_write;
1428 pc = IAR_sig(uc);
1429 is_write = 0;
1430 #if 0
1431 /* ppc 4xx case */
1432 if (DSISR_sig(uc) & 0x00800000)
1433 is_write = 1;
1434 #else
1435 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1436 is_write = 1;
1437 #endif
1438 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1439 is_write, &uc->uc_sigmask, puc);
1442 #elif defined(__alpha__)
1444 int cpu_signal_handler(int host_signum, void *pinfo,
1445 void *puc)
1447 siginfo_t *info = pinfo;
1448 struct ucontext *uc = puc;
1449 uint32_t *pc = uc->uc_mcontext.sc_pc;
1450 uint32_t insn = *pc;
1451 int is_write = 0;
1453 /* XXX: need kernel patch to get write flag faster */
1454 switch (insn >> 26) {
1455 case 0x0d: // stw
1456 case 0x0e: // stb
1457 case 0x0f: // stq_u
1458 case 0x24: // stf
1459 case 0x25: // stg
1460 case 0x26: // sts
1461 case 0x27: // stt
1462 case 0x2c: // stl
1463 case 0x2d: // stq
1464 case 0x2e: // stl_c
1465 case 0x2f: // stq_c
1466 is_write = 1;
1469 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1470 is_write, &uc->uc_sigmask, puc);
1472 #elif defined(__sparc__)
1474 int cpu_signal_handler(int host_signum, void *pinfo,
1475 void *puc)
1477 siginfo_t *info = pinfo;
1478 int is_write;
1479 uint32_t insn;
1480 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1481 uint32_t *regs = (uint32_t *)(info + 1);
1482 void *sigmask = (regs + 20);
1483 /* XXX: is there a standard glibc define ? */
1484 unsigned long pc = regs[1];
1485 #else
1486 #ifdef __linux__
1487 struct sigcontext *sc = puc;
1488 unsigned long pc = sc->sigc_regs.tpc;
1489 void *sigmask = (void *)sc->sigc_mask;
1490 #elif defined(__OpenBSD__)
1491 struct sigcontext *uc = puc;
1492 unsigned long pc = uc->sc_pc;
1493 void *sigmask = (void *)(long)uc->sc_mask;
1494 #endif
1495 #endif
1497 /* XXX: need kernel patch to get write flag faster */
1498 is_write = 0;
1499 insn = *(uint32_t *)pc;
1500 if ((insn >> 30) == 3) {
1501 switch((insn >> 19) & 0x3f) {
1502 case 0x05: // stb
1503 case 0x15: // stba
1504 case 0x06: // sth
1505 case 0x16: // stha
1506 case 0x04: // st
1507 case 0x14: // sta
1508 case 0x07: // std
1509 case 0x17: // stda
1510 case 0x0e: // stx
1511 case 0x1e: // stxa
1512 case 0x24: // stf
1513 case 0x34: // stfa
1514 case 0x27: // stdf
1515 case 0x37: // stdfa
1516 case 0x26: // stqf
1517 case 0x36: // stqfa
1518 case 0x25: // stfsr
1519 case 0x3c: // casa
1520 case 0x3e: // casxa
1521 is_write = 1;
1522 break;
1525 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1526 is_write, sigmask, NULL);
1529 #elif defined(__arm__)
1531 int cpu_signal_handler(int host_signum, void *pinfo,
1532 void *puc)
1534 siginfo_t *info = pinfo;
1535 struct ucontext *uc = puc;
1536 unsigned long pc;
1537 int is_write;
1539 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1540 pc = uc->uc_mcontext.gregs[R15];
1541 #else
1542 pc = uc->uc_mcontext.arm_pc;
1543 #endif
1544 /* XXX: compute is_write */
1545 is_write = 0;
1546 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1547 is_write,
1548 &uc->uc_sigmask, puc);
1551 #elif defined(__mc68000)
1553 int cpu_signal_handler(int host_signum, void *pinfo,
1554 void *puc)
1556 siginfo_t *info = pinfo;
1557 struct ucontext *uc = puc;
1558 unsigned long pc;
1559 int is_write;
1561 pc = uc->uc_mcontext.gregs[16];
1562 /* XXX: compute is_write */
1563 is_write = 0;
1564 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1565 is_write,
1566 &uc->uc_sigmask, puc);
1569 #elif defined(__ia64)
1571 #ifndef __ISR_VALID
1572 /* This ought to be in <bits/siginfo.h>... */
1573 # define __ISR_VALID 1
1574 #endif
1576 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1578 siginfo_t *info = pinfo;
1579 struct ucontext *uc = puc;
1580 unsigned long ip;
1581 int is_write = 0;
1583 ip = uc->uc_mcontext.sc_ip;
1584 switch (host_signum) {
1585 case SIGILL:
1586 case SIGFPE:
1587 case SIGSEGV:
1588 case SIGBUS:
1589 case SIGTRAP:
1590 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1591 /* ISR.W (write-access) is bit 33: */
1592 is_write = (info->si_isr >> 33) & 1;
1593 break;
1595 default:
1596 break;
1598 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1599 is_write,
1600 &uc->uc_sigmask, puc);
1603 #elif defined(__s390__)
1605 int cpu_signal_handler(int host_signum, void *pinfo,
1606 void *puc)
1608 siginfo_t *info = pinfo;
1609 struct ucontext *uc = puc;
1610 unsigned long pc;
1611 int is_write;
1613 pc = uc->uc_mcontext.psw.addr;
1614 /* XXX: compute is_write */
1615 is_write = 0;
1616 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1617 is_write, &uc->uc_sigmask, puc);
1620 #elif defined(__mips__)
1622 int cpu_signal_handler(int host_signum, void *pinfo,
1623 void *puc)
1625 siginfo_t *info = pinfo;
1626 struct ucontext *uc = puc;
1627 greg_t pc = uc->uc_mcontext.pc;
1628 int is_write;
1630 /* XXX: compute is_write */
1631 is_write = 0;
1632 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1633 is_write, &uc->uc_sigmask, puc);
1636 #elif defined(__hppa__)
1638 int cpu_signal_handler(int host_signum, void *pinfo,
1639 void *puc)
1641 struct siginfo *info = pinfo;
1642 struct ucontext *uc = puc;
1643 unsigned long pc;
1644 int is_write;
1646 pc = uc->uc_mcontext.sc_iaoq[0];
1647 /* FIXME: compute is_write */
1648 is_write = 0;
1649 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1650 is_write,
1651 &uc->uc_sigmask, puc);
1654 #else
1656 #error host CPU specific signal handler needed
1658 #endif
1660 #endif /* !defined(CONFIG_SOFTMMU) */