remove gdb stub debug information
[qemu/qemu-loongson.git] / cpu-exec.c
blobcf7c1fba7ea8bc0a249ab6931705b8b714d7eeb4
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include "tcg.h"
24 #include "kvm.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
50 //#define DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 void cpu_loop_exit(void)
55 /* NOTE: the register at this point must be saved by hand because
56 longjmp restore them */
57 regs_to_env();
58 longjmp(env->jmp_env, 1);
61 /* exit the current TB from a signal handler. The host registers are
62 restored in a state compatible with the CPU emulator
64 void cpu_resume_from_signal(CPUState *env1, void *puc)
66 #if !defined(CONFIG_SOFTMMU)
67 #ifdef __linux__
68 struct ucontext *uc = puc;
69 #elif defined(__OpenBSD__)
70 struct sigcontext *uc = puc;
71 #endif
72 #endif
74 env = env1;
76 /* XXX: restore cpu registers saved in host registers */
78 #if !defined(CONFIG_SOFTMMU)
79 if (puc) {
80 /* XXX: use siglongjmp ? */
81 #ifdef __linux__
82 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
83 #elif defined(__OpenBSD__)
84 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
85 #endif
87 #endif
88 env->exception_index = -1;
89 longjmp(env->jmp_env, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96 unsigned long next_tb;
97 TranslationBlock *tb;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105 max_cycles);
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 cpu_pc_from_tb(env, tb);
115 tb_phys_invalidate(tb, -1);
116 tb_free(tb);
119 static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
121 uint64_t flags)
123 TranslationBlock *tb, **ptb1;
124 unsigned int h;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
127 tb_invalidated_flag = 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
134 phys_page2 = -1;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
137 for(;;) {
138 tb = *ptb1;
139 if (!tb)
140 goto not_found;
141 if (tb->pc == pc &&
142 tb->page_addr[0] == phys_page1 &&
143 tb->cs_base == cs_base &&
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
147 virt_page2 = (pc & TARGET_PAGE_MASK) +
148 TARGET_PAGE_SIZE;
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
151 goto found;
152 } else {
153 goto found;
156 ptb1 = &tb->phys_hash_next;
158 not_found:
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
162 found:
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
165 return tb;
168 static inline TranslationBlock *tb_find_fast(void)
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
172 int flags;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
176 is executed. */
177 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
178 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
179 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
180 tb->flags != flags)) {
181 tb = tb_find_slow(pc, cs_base, flags);
183 return tb;
186 static CPUDebugExcpHandler *debug_excp_handler;
188 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
190 CPUDebugExcpHandler *old_handler = debug_excp_handler;
192 debug_excp_handler = handler;
193 return old_handler;
196 static void cpu_handle_debug_exception(CPUState *env)
198 CPUWatchpoint *wp;
200 if (!env->watchpoint_hit)
201 TAILQ_FOREACH(wp, &env->watchpoints, entry)
202 wp->flags &= ~BP_WATCHPOINT_HIT;
204 if (debug_excp_handler)
205 debug_excp_handler(env);
208 /* main execution loop */
210 int cpu_exec(CPUState *env1)
212 #define DECLARE_HOST_REGS 1
213 #include "hostregs_helper.h"
214 int ret, interrupt_request;
215 TranslationBlock *tb;
216 uint8_t *tc_ptr;
217 unsigned long next_tb;
219 if (cpu_halted(env1) == EXCP_HALTED)
220 return EXCP_HALTED;
222 cpu_single_env = env1;
224 /* first we save global registers */
225 #define SAVE_HOST_REGS 1
226 #include "hostregs_helper.h"
227 env = env1;
229 env_to_regs();
230 #if defined(TARGET_I386)
231 /* put eflags in CPU temporary format */
232 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
233 DF = 1 - (2 * ((env->eflags >> 10) & 1));
234 CC_OP = CC_OP_EFLAGS;
235 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
236 #elif defined(TARGET_SPARC)
237 #elif defined(TARGET_M68K)
238 env->cc_op = CC_OP_FLAGS;
239 env->cc_dest = env->sr & 0xf;
240 env->cc_x = (env->sr >> 4) & 1;
241 #elif defined(TARGET_ALPHA)
242 #elif defined(TARGET_ARM)
243 #elif defined(TARGET_PPC)
244 #elif defined(TARGET_MIPS)
245 #elif defined(TARGET_SH4)
246 #elif defined(TARGET_CRIS)
247 /* XXXXX */
248 #else
249 #error unsupported target CPU
250 #endif
251 env->exception_index = -1;
253 /* prepare setjmp context for exception handling */
254 for(;;) {
255 if (setjmp(env->jmp_env) == 0) {
256 env->current_tb = NULL;
257 /* if an exception is pending, we execute it here */
258 if (env->exception_index >= 0) {
259 if (env->exception_index >= EXCP_INTERRUPT) {
260 /* exit request from the cpu execution loop */
261 ret = env->exception_index;
262 if (ret == EXCP_DEBUG)
263 cpu_handle_debug_exception(env);
264 break;
265 } else {
266 #if defined(CONFIG_USER_ONLY)
267 /* if user mode only, we simulate a fake exception
268 which will be handled outside the cpu execution
269 loop */
270 #if defined(TARGET_I386)
271 do_interrupt_user(env->exception_index,
272 env->exception_is_int,
273 env->error_code,
274 env->exception_next_eip);
275 /* successfully delivered */
276 env->old_exception = -1;
277 #endif
278 ret = env->exception_index;
279 break;
280 #else
281 #if defined(TARGET_I386)
282 /* simulate a real cpu exception. On i386, it can
283 trigger new exceptions, but we do not handle
284 double or triple faults yet. */
285 do_interrupt(env->exception_index,
286 env->exception_is_int,
287 env->error_code,
288 env->exception_next_eip, 0);
289 /* successfully delivered */
290 env->old_exception = -1;
291 #elif defined(TARGET_PPC)
292 do_interrupt(env);
293 #elif defined(TARGET_MIPS)
294 do_interrupt(env);
295 #elif defined(TARGET_SPARC)
296 do_interrupt(env);
297 #elif defined(TARGET_ARM)
298 do_interrupt(env);
299 #elif defined(TARGET_SH4)
300 do_interrupt(env);
301 #elif defined(TARGET_ALPHA)
302 do_interrupt(env);
303 #elif defined(TARGET_CRIS)
304 do_interrupt(env);
305 #elif defined(TARGET_M68K)
306 do_interrupt(0);
307 #endif
308 #endif
310 env->exception_index = -1;
312 #ifdef USE_KQEMU
313 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
314 int ret;
315 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
316 ret = kqemu_cpu_exec(env);
317 /* put eflags in CPU temporary format */
318 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
319 DF = 1 - (2 * ((env->eflags >> 10) & 1));
320 CC_OP = CC_OP_EFLAGS;
321 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
322 if (ret == 1) {
323 /* exception */
324 longjmp(env->jmp_env, 1);
325 } else if (ret == 2) {
326 /* softmmu execution needed */
327 } else {
328 if (env->interrupt_request != 0 || env->exit_request != 0) {
329 /* hardware interrupt will be executed just after */
330 } else {
331 /* otherwise, we restart */
332 longjmp(env->jmp_env, 1);
336 #endif
338 if (kvm_enabled()) {
339 kvm_cpu_exec(env);
340 longjmp(env->jmp_env, 1);
343 next_tb = 0; /* force lookup of first TB */
344 for(;;) {
345 interrupt_request = env->interrupt_request;
346 if (unlikely(interrupt_request)) {
347 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
348 /* Mask out external interrupts for this step. */
349 interrupt_request &= ~(CPU_INTERRUPT_HARD |
350 CPU_INTERRUPT_FIQ |
351 CPU_INTERRUPT_SMI |
352 CPU_INTERRUPT_NMI);
354 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
355 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
356 env->exception_index = EXCP_DEBUG;
357 cpu_loop_exit();
359 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
360 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
361 if (interrupt_request & CPU_INTERRUPT_HALT) {
362 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
363 env->halted = 1;
364 env->exception_index = EXCP_HLT;
365 cpu_loop_exit();
367 #endif
368 #if defined(TARGET_I386)
369 if (env->hflags2 & HF2_GIF_MASK) {
370 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
371 !(env->hflags & HF_SMM_MASK)) {
372 svm_check_intercept(SVM_EXIT_SMI);
373 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
374 do_smm_enter();
375 next_tb = 0;
376 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
377 !(env->hflags2 & HF2_NMI_MASK)) {
378 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
379 env->hflags2 |= HF2_NMI_MASK;
380 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
381 next_tb = 0;
382 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
383 (((env->hflags2 & HF2_VINTR_MASK) &&
384 (env->hflags2 & HF2_HIF_MASK)) ||
385 (!(env->hflags2 & HF2_VINTR_MASK) &&
386 (env->eflags & IF_MASK &&
387 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
388 int intno;
389 svm_check_intercept(SVM_EXIT_INTR);
390 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
391 intno = cpu_get_pic_interrupt(env);
392 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
393 do_interrupt(intno, 0, 0, 0, 1);
394 /* ensure that no TB jump will be modified as
395 the program flow was changed */
396 next_tb = 0;
397 #if !defined(CONFIG_USER_ONLY)
398 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
399 (env->eflags & IF_MASK) &&
400 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
401 int intno;
402 /* FIXME: this should respect TPR */
403 svm_check_intercept(SVM_EXIT_VINTR);
404 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
405 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
406 do_interrupt(intno, 0, 0, 0, 1);
407 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
408 next_tb = 0;
409 #endif
412 #elif defined(TARGET_PPC)
413 #if 0
414 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
415 cpu_ppc_reset(env);
417 #endif
418 if (interrupt_request & CPU_INTERRUPT_HARD) {
419 ppc_hw_interrupt(env);
420 if (env->pending_interrupts == 0)
421 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
422 next_tb = 0;
424 #elif defined(TARGET_MIPS)
425 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
426 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
427 (env->CP0_Status & (1 << CP0St_IE)) &&
428 !(env->CP0_Status & (1 << CP0St_EXL)) &&
429 !(env->CP0_Status & (1 << CP0St_ERL)) &&
430 !(env->hflags & MIPS_HFLAG_DM)) {
431 /* Raise it */
432 env->exception_index = EXCP_EXT_INTERRUPT;
433 env->error_code = 0;
434 do_interrupt(env);
435 next_tb = 0;
437 #elif defined(TARGET_SPARC)
438 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
439 (env->psret != 0)) {
440 int pil = env->interrupt_index & 15;
441 int type = env->interrupt_index & 0xf0;
443 if (((type == TT_EXTINT) &&
444 (pil == 15 || pil > env->psrpil)) ||
445 type != TT_EXTINT) {
446 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
447 env->exception_index = env->interrupt_index;
448 do_interrupt(env);
449 env->interrupt_index = 0;
450 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
451 cpu_check_irqs(env);
452 #endif
453 next_tb = 0;
455 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
456 //do_interrupt(0, 0, 0, 0, 0);
457 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
459 #elif defined(TARGET_ARM)
460 if (interrupt_request & CPU_INTERRUPT_FIQ
461 && !(env->uncached_cpsr & CPSR_F)) {
462 env->exception_index = EXCP_FIQ;
463 do_interrupt(env);
464 next_tb = 0;
466 /* ARMv7-M interrupt return works by loading a magic value
467 into the PC. On real hardware the load causes the
468 return to occur. The qemu implementation performs the
469 jump normally, then does the exception return when the
470 CPU tries to execute code at the magic address.
471 This will cause the magic PC value to be pushed to
472 the stack if an interrupt occured at the wrong time.
473 We avoid this by disabling interrupts when
474 pc contains a magic address. */
475 if (interrupt_request & CPU_INTERRUPT_HARD
476 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
477 || !(env->uncached_cpsr & CPSR_I))) {
478 env->exception_index = EXCP_IRQ;
479 do_interrupt(env);
480 next_tb = 0;
482 #elif defined(TARGET_SH4)
483 if (interrupt_request & CPU_INTERRUPT_HARD) {
484 do_interrupt(env);
485 next_tb = 0;
487 #elif defined(TARGET_ALPHA)
488 if (interrupt_request & CPU_INTERRUPT_HARD) {
489 do_interrupt(env);
490 next_tb = 0;
492 #elif defined(TARGET_CRIS)
493 if (interrupt_request & CPU_INTERRUPT_HARD
494 && (env->pregs[PR_CCS] & I_FLAG)) {
495 env->exception_index = EXCP_IRQ;
496 do_interrupt(env);
497 next_tb = 0;
499 if (interrupt_request & CPU_INTERRUPT_NMI
500 && (env->pregs[PR_CCS] & M_FLAG)) {
501 env->exception_index = EXCP_NMI;
502 do_interrupt(env);
503 next_tb = 0;
505 #elif defined(TARGET_M68K)
506 if (interrupt_request & CPU_INTERRUPT_HARD
507 && ((env->sr & SR_I) >> SR_I_SHIFT)
508 < env->pending_level) {
509 /* Real hardware gets the interrupt vector via an
510 IACK cycle at this point. Current emulated
511 hardware doesn't rely on this, so we
512 provide/save the vector when the interrupt is
513 first signalled. */
514 env->exception_index = env->pending_vector;
515 do_interrupt(1);
516 next_tb = 0;
518 #endif
519 /* Don't use the cached interupt_request value,
520 do_interrupt may have updated the EXITTB flag. */
521 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
522 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
523 /* ensure that no TB jump will be modified as
524 the program flow was changed */
525 next_tb = 0;
528 if (unlikely(env->exit_request)) {
529 env->exit_request = 0;
530 env->exception_index = EXCP_INTERRUPT;
531 cpu_loop_exit();
533 #ifdef DEBUG_EXEC
534 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
535 /* restore flags in standard format */
536 regs_to_env();
537 #if defined(TARGET_I386)
538 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
539 log_cpu_state(env, X86_DUMP_CCOP);
540 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
541 #elif defined(TARGET_ARM)
542 log_cpu_state(env, 0);
543 #elif defined(TARGET_SPARC)
544 log_cpu_state(env, 0);
545 #elif defined(TARGET_PPC)
546 log_cpu_state(env, 0);
547 #elif defined(TARGET_M68K)
548 cpu_m68k_flush_flags(env, env->cc_op);
549 env->cc_op = CC_OP_FLAGS;
550 env->sr = (env->sr & 0xffe0)
551 | env->cc_dest | (env->cc_x << 4);
552 log_cpu_state(env, 0);
553 #elif defined(TARGET_MIPS)
554 log_cpu_state(env, 0);
555 #elif defined(TARGET_SH4)
556 log_cpu_state(env, 0);
557 #elif defined(TARGET_ALPHA)
558 log_cpu_state(env, 0);
559 #elif defined(TARGET_CRIS)
560 log_cpu_state(env, 0);
561 #else
562 #error unsupported target CPU
563 #endif
565 #endif
566 spin_lock(&tb_lock);
567 tb = tb_find_fast();
568 /* Note: we do it here to avoid a gcc bug on Mac OS X when
569 doing it in tb_find_slow */
570 if (tb_invalidated_flag) {
571 /* as some TB could have been invalidated because
572 of memory exceptions while generating the code, we
573 must recompute the hash index here */
574 next_tb = 0;
575 tb_invalidated_flag = 0;
577 #ifdef DEBUG_EXEC
578 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
579 (long)tb->tc_ptr, tb->pc,
580 lookup_symbol(tb->pc));
581 #endif
582 /* see if we can patch the calling TB. When the TB
583 spans two pages, we cannot safely do a direct
584 jump. */
586 if (next_tb != 0 &&
587 #ifdef USE_KQEMU
588 (env->kqemu_enabled != 2) &&
589 #endif
590 tb->page_addr[1] == -1) {
591 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
594 spin_unlock(&tb_lock);
595 env->current_tb = tb;
597 /* cpu_interrupt might be called while translating the
598 TB, but before it is linked into a potentially
599 infinite loop and becomes env->current_tb. Avoid
600 starting execution if there is a pending interrupt. */
601 if (unlikely (env->exit_request))
602 env->current_tb = NULL;
604 while (env->current_tb) {
605 tc_ptr = tb->tc_ptr;
606 /* execute the generated code */
607 #if defined(__sparc__) && !defined(HOST_SOLARIS)
608 #undef env
609 env = cpu_single_env;
610 #define env cpu_single_env
611 #endif
612 next_tb = tcg_qemu_tb_exec(tc_ptr);
613 env->current_tb = NULL;
614 if ((next_tb & 3) == 2) {
615 /* Instruction counter expired. */
616 int insns_left;
617 tb = (TranslationBlock *)(long)(next_tb & ~3);
618 /* Restore PC. */
619 cpu_pc_from_tb(env, tb);
620 insns_left = env->icount_decr.u32;
621 if (env->icount_extra && insns_left >= 0) {
622 /* Refill decrementer and continue execution. */
623 env->icount_extra += insns_left;
624 if (env->icount_extra > 0xffff) {
625 insns_left = 0xffff;
626 } else {
627 insns_left = env->icount_extra;
629 env->icount_extra -= insns_left;
630 env->icount_decr.u16.low = insns_left;
631 } else {
632 if (insns_left > 0) {
633 /* Execute remaining instructions. */
634 cpu_exec_nocache(insns_left, tb);
636 env->exception_index = EXCP_INTERRUPT;
637 next_tb = 0;
638 cpu_loop_exit();
642 /* reset soft MMU for next block (it can currently
643 only be set by a memory fault) */
644 #if defined(USE_KQEMU)
645 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
646 if (kqemu_is_ok(env) &&
647 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
648 cpu_loop_exit();
650 #endif
651 } /* for(;;) */
652 } else {
653 env_to_regs();
655 } /* for(;;) */
658 #if defined(TARGET_I386)
659 /* restore flags in standard format */
660 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
661 #elif defined(TARGET_ARM)
662 /* XXX: Save/restore host fpu exception state?. */
663 #elif defined(TARGET_SPARC)
664 #elif defined(TARGET_PPC)
665 #elif defined(TARGET_M68K)
666 cpu_m68k_flush_flags(env, env->cc_op);
667 env->cc_op = CC_OP_FLAGS;
668 env->sr = (env->sr & 0xffe0)
669 | env->cc_dest | (env->cc_x << 4);
670 #elif defined(TARGET_MIPS)
671 #elif defined(TARGET_SH4)
672 #elif defined(TARGET_ALPHA)
673 #elif defined(TARGET_CRIS)
674 /* XXXXX */
675 #else
676 #error unsupported target CPU
677 #endif
679 /* restore global registers */
680 #include "hostregs_helper.h"
682 /* fail safe : never use cpu_single_env outside cpu_exec() */
683 cpu_single_env = NULL;
684 return ret;
687 /* must only be called from the generated code as an exception can be
688 generated */
689 void tb_invalidate_page_range(target_ulong start, target_ulong end)
691 /* XXX: cannot enable it yet because it yields to MMU exception
692 where NIP != read address on PowerPC */
693 #if 0
694 target_ulong phys_addr;
695 phys_addr = get_phys_addr_code(env, start);
696 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
697 #endif
700 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
702 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
704 CPUX86State *saved_env;
706 saved_env = env;
707 env = s;
708 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
709 selector &= 0xffff;
710 cpu_x86_load_seg_cache(env, seg_reg, selector,
711 (selector << 4), 0xffff, 0);
712 } else {
713 helper_load_seg(seg_reg, selector);
715 env = saved_env;
718 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
720 CPUX86State *saved_env;
722 saved_env = env;
723 env = s;
725 helper_fsave(ptr, data32);
727 env = saved_env;
730 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
732 CPUX86State *saved_env;
734 saved_env = env;
735 env = s;
737 helper_frstor(ptr, data32);
739 env = saved_env;
742 #endif /* TARGET_I386 */
744 #if !defined(CONFIG_SOFTMMU)
746 #if defined(TARGET_I386)
748 /* 'pc' is the host PC at which the exception was raised. 'address' is
749 the effective address of the memory exception. 'is_write' is 1 if a
750 write caused the exception and otherwise 0'. 'old_set' is the
751 signal set which should be restored */
752 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
753 int is_write, sigset_t *old_set,
754 void *puc)
756 TranslationBlock *tb;
757 int ret;
759 if (cpu_single_env)
760 env = cpu_single_env; /* XXX: find a correct solution for multithread */
761 #if defined(DEBUG_SIGNAL)
762 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
763 pc, address, is_write, *(unsigned long *)old_set);
764 #endif
765 /* XXX: locking issue */
766 if (is_write && page_unprotect(h2g(address), pc, puc)) {
767 return 1;
770 /* see if it is an MMU fault */
771 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
772 if (ret < 0)
773 return 0; /* not an MMU fault */
774 if (ret == 0)
775 return 1; /* the MMU fault was handled without causing real CPU fault */
776 /* now we have a real cpu fault */
777 tb = tb_find_pc(pc);
778 if (tb) {
779 /* the PC is inside the translated code. It means that we have
780 a virtual CPU fault */
781 cpu_restore_state(tb, env, pc, puc);
783 if (ret == 1) {
784 #if 0
785 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
786 env->eip, env->cr[2], env->error_code);
787 #endif
788 /* we restore the process signal mask as the sigreturn should
789 do it (XXX: use sigsetjmp) */
790 sigprocmask(SIG_SETMASK, old_set, NULL);
791 raise_exception_err(env->exception_index, env->error_code);
792 } else {
793 /* activate soft MMU for this block */
794 env->hflags |= HF_SOFTMMU_MASK;
795 cpu_resume_from_signal(env, puc);
797 /* never comes here */
798 return 1;
801 #elif defined(TARGET_ARM)
802 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
803 int is_write, sigset_t *old_set,
804 void *puc)
806 TranslationBlock *tb;
807 int ret;
809 if (cpu_single_env)
810 env = cpu_single_env; /* XXX: find a correct solution for multithread */
811 #if defined(DEBUG_SIGNAL)
812 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
813 pc, address, is_write, *(unsigned long *)old_set);
814 #endif
815 /* XXX: locking issue */
816 if (is_write && page_unprotect(h2g(address), pc, puc)) {
817 return 1;
819 /* see if it is an MMU fault */
820 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
821 if (ret < 0)
822 return 0; /* not an MMU fault */
823 if (ret == 0)
824 return 1; /* the MMU fault was handled without causing real CPU fault */
825 /* now we have a real cpu fault */
826 tb = tb_find_pc(pc);
827 if (tb) {
828 /* the PC is inside the translated code. It means that we have
829 a virtual CPU fault */
830 cpu_restore_state(tb, env, pc, puc);
832 /* we restore the process signal mask as the sigreturn should
833 do it (XXX: use sigsetjmp) */
834 sigprocmask(SIG_SETMASK, old_set, NULL);
835 cpu_loop_exit();
836 /* never comes here */
837 return 1;
839 #elif defined(TARGET_SPARC)
840 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
841 int is_write, sigset_t *old_set,
842 void *puc)
844 TranslationBlock *tb;
845 int ret;
847 if (cpu_single_env)
848 env = cpu_single_env; /* XXX: find a correct solution for multithread */
849 #if defined(DEBUG_SIGNAL)
850 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
851 pc, address, is_write, *(unsigned long *)old_set);
852 #endif
853 /* XXX: locking issue */
854 if (is_write && page_unprotect(h2g(address), pc, puc)) {
855 return 1;
857 /* see if it is an MMU fault */
858 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
859 if (ret < 0)
860 return 0; /* not an MMU fault */
861 if (ret == 0)
862 return 1; /* the MMU fault was handled without causing real CPU fault */
863 /* now we have a real cpu fault */
864 tb = tb_find_pc(pc);
865 if (tb) {
866 /* the PC is inside the translated code. It means that we have
867 a virtual CPU fault */
868 cpu_restore_state(tb, env, pc, puc);
870 /* we restore the process signal mask as the sigreturn should
871 do it (XXX: use sigsetjmp) */
872 sigprocmask(SIG_SETMASK, old_set, NULL);
873 cpu_loop_exit();
874 /* never comes here */
875 return 1;
877 #elif defined (TARGET_PPC)
878 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
879 int is_write, sigset_t *old_set,
880 void *puc)
882 TranslationBlock *tb;
883 int ret;
885 if (cpu_single_env)
886 env = cpu_single_env; /* XXX: find a correct solution for multithread */
887 #if defined(DEBUG_SIGNAL)
888 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
889 pc, address, is_write, *(unsigned long *)old_set);
890 #endif
891 /* XXX: locking issue */
892 if (is_write && page_unprotect(h2g(address), pc, puc)) {
893 return 1;
896 /* see if it is an MMU fault */
897 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
898 if (ret < 0)
899 return 0; /* not an MMU fault */
900 if (ret == 0)
901 return 1; /* the MMU fault was handled without causing real CPU fault */
903 /* now we have a real cpu fault */
904 tb = tb_find_pc(pc);
905 if (tb) {
906 /* the PC is inside the translated code. It means that we have
907 a virtual CPU fault */
908 cpu_restore_state(tb, env, pc, puc);
910 if (ret == 1) {
911 #if 0
912 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
913 env->nip, env->error_code, tb);
914 #endif
915 /* we restore the process signal mask as the sigreturn should
916 do it (XXX: use sigsetjmp) */
917 sigprocmask(SIG_SETMASK, old_set, NULL);
918 cpu_loop_exit();
919 } else {
920 /* activate soft MMU for this block */
921 cpu_resume_from_signal(env, puc);
923 /* never comes here */
924 return 1;
927 #elif defined(TARGET_M68K)
928 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
929 int is_write, sigset_t *old_set,
930 void *puc)
932 TranslationBlock *tb;
933 int ret;
935 if (cpu_single_env)
936 env = cpu_single_env; /* XXX: find a correct solution for multithread */
937 #if defined(DEBUG_SIGNAL)
938 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
939 pc, address, is_write, *(unsigned long *)old_set);
940 #endif
941 /* XXX: locking issue */
942 if (is_write && page_unprotect(address, pc, puc)) {
943 return 1;
945 /* see if it is an MMU fault */
946 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
947 if (ret < 0)
948 return 0; /* not an MMU fault */
949 if (ret == 0)
950 return 1; /* the MMU fault was handled without causing real CPU fault */
951 /* now we have a real cpu fault */
952 tb = tb_find_pc(pc);
953 if (tb) {
954 /* the PC is inside the translated code. It means that we have
955 a virtual CPU fault */
956 cpu_restore_state(tb, env, pc, puc);
958 /* we restore the process signal mask as the sigreturn should
959 do it (XXX: use sigsetjmp) */
960 sigprocmask(SIG_SETMASK, old_set, NULL);
961 cpu_loop_exit();
962 /* never comes here */
963 return 1;
966 #elif defined (TARGET_MIPS)
967 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
968 int is_write, sigset_t *old_set,
969 void *puc)
971 TranslationBlock *tb;
972 int ret;
974 if (cpu_single_env)
975 env = cpu_single_env; /* XXX: find a correct solution for multithread */
976 #if defined(DEBUG_SIGNAL)
977 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
978 pc, address, is_write, *(unsigned long *)old_set);
979 #endif
980 /* XXX: locking issue */
981 if (is_write && page_unprotect(h2g(address), pc, puc)) {
982 return 1;
985 /* see if it is an MMU fault */
986 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
987 if (ret < 0)
988 return 0; /* not an MMU fault */
989 if (ret == 0)
990 return 1; /* the MMU fault was handled without causing real CPU fault */
992 /* now we have a real cpu fault */
993 tb = tb_find_pc(pc);
994 if (tb) {
995 /* the PC is inside the translated code. It means that we have
996 a virtual CPU fault */
997 cpu_restore_state(tb, env, pc, puc);
999 if (ret == 1) {
1000 #if 0
1001 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1002 env->PC, env->error_code, tb);
1003 #endif
1004 /* we restore the process signal mask as the sigreturn should
1005 do it (XXX: use sigsetjmp) */
1006 sigprocmask(SIG_SETMASK, old_set, NULL);
1007 cpu_loop_exit();
1008 } else {
1009 /* activate soft MMU for this block */
1010 cpu_resume_from_signal(env, puc);
1012 /* never comes here */
1013 return 1;
1016 #elif defined (TARGET_SH4)
1017 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1018 int is_write, sigset_t *old_set,
1019 void *puc)
1021 TranslationBlock *tb;
1022 int ret;
1024 if (cpu_single_env)
1025 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1026 #if defined(DEBUG_SIGNAL)
1027 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1028 pc, address, is_write, *(unsigned long *)old_set);
1029 #endif
1030 /* XXX: locking issue */
1031 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1032 return 1;
1035 /* see if it is an MMU fault */
1036 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1037 if (ret < 0)
1038 return 0; /* not an MMU fault */
1039 if (ret == 0)
1040 return 1; /* the MMU fault was handled without causing real CPU fault */
1042 /* now we have a real cpu fault */
1043 tb = tb_find_pc(pc);
1044 if (tb) {
1045 /* the PC is inside the translated code. It means that we have
1046 a virtual CPU fault */
1047 cpu_restore_state(tb, env, pc, puc);
1049 #if 0
1050 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1051 env->nip, env->error_code, tb);
1052 #endif
1053 /* we restore the process signal mask as the sigreturn should
1054 do it (XXX: use sigsetjmp) */
1055 sigprocmask(SIG_SETMASK, old_set, NULL);
1056 cpu_loop_exit();
1057 /* never comes here */
1058 return 1;
1061 #elif defined (TARGET_ALPHA)
1062 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1063 int is_write, sigset_t *old_set,
1064 void *puc)
1066 TranslationBlock *tb;
1067 int ret;
1069 if (cpu_single_env)
1070 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1071 #if defined(DEBUG_SIGNAL)
1072 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1073 pc, address, is_write, *(unsigned long *)old_set);
1074 #endif
1075 /* XXX: locking issue */
1076 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1077 return 1;
1080 /* see if it is an MMU fault */
1081 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1082 if (ret < 0)
1083 return 0; /* not an MMU fault */
1084 if (ret == 0)
1085 return 1; /* the MMU fault was handled without causing real CPU fault */
1087 /* now we have a real cpu fault */
1088 tb = tb_find_pc(pc);
1089 if (tb) {
1090 /* the PC is inside the translated code. It means that we have
1091 a virtual CPU fault */
1092 cpu_restore_state(tb, env, pc, puc);
1094 #if 0
1095 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1096 env->nip, env->error_code, tb);
1097 #endif
1098 /* we restore the process signal mask as the sigreturn should
1099 do it (XXX: use sigsetjmp) */
1100 sigprocmask(SIG_SETMASK, old_set, NULL);
1101 cpu_loop_exit();
1102 /* never comes here */
1103 return 1;
1105 #elif defined (TARGET_CRIS)
1106 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1107 int is_write, sigset_t *old_set,
1108 void *puc)
1110 TranslationBlock *tb;
1111 int ret;
1113 if (cpu_single_env)
1114 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1115 #if defined(DEBUG_SIGNAL)
1116 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1117 pc, address, is_write, *(unsigned long *)old_set);
1118 #endif
1119 /* XXX: locking issue */
1120 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1121 return 1;
1124 /* see if it is an MMU fault */
1125 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1126 if (ret < 0)
1127 return 0; /* not an MMU fault */
1128 if (ret == 0)
1129 return 1; /* the MMU fault was handled without causing real CPU fault */
1131 /* now we have a real cpu fault */
1132 tb = tb_find_pc(pc);
1133 if (tb) {
1134 /* the PC is inside the translated code. It means that we have
1135 a virtual CPU fault */
1136 cpu_restore_state(tb, env, pc, puc);
1138 /* we restore the process signal mask as the sigreturn should
1139 do it (XXX: use sigsetjmp) */
1140 sigprocmask(SIG_SETMASK, old_set, NULL);
1141 cpu_loop_exit();
1142 /* never comes here */
1143 return 1;
1146 #else
1147 #error unsupported target CPU
1148 #endif
1150 #if defined(__i386__)
1152 #if defined(__APPLE__)
1153 # include <sys/ucontext.h>
1155 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1156 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1157 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1158 #else
1159 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1160 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1161 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1162 #endif
1164 int cpu_signal_handler(int host_signum, void *pinfo,
1165 void *puc)
1167 siginfo_t *info = pinfo;
1168 struct ucontext *uc = puc;
1169 unsigned long pc;
1170 int trapno;
1172 #ifndef REG_EIP
1173 /* for glibc 2.1 */
1174 #define REG_EIP EIP
1175 #define REG_ERR ERR
1176 #define REG_TRAPNO TRAPNO
1177 #endif
1178 pc = EIP_sig(uc);
1179 trapno = TRAP_sig(uc);
1180 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1181 trapno == 0xe ?
1182 (ERROR_sig(uc) >> 1) & 1 : 0,
1183 &uc->uc_sigmask, puc);
1186 #elif defined(__x86_64__)
1188 #ifdef __NetBSD__
1189 #define REG_ERR _REG_ERR
1190 #define REG_TRAPNO _REG_TRAPNO
1192 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1193 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1194 #else
1195 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1196 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1197 #endif
1199 int cpu_signal_handler(int host_signum, void *pinfo,
1200 void *puc)
1202 siginfo_t *info = pinfo;
1203 unsigned long pc;
1204 #ifdef __NetBSD__
1205 ucontext_t *uc = puc;
1206 #else
1207 struct ucontext *uc = puc;
1208 #endif
1210 pc = QEMU_UC_MACHINE_PC(uc);
1211 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1212 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1213 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
1214 &uc->uc_sigmask, puc);
1217 #elif defined(_ARCH_PPC)
1219 /***********************************************************************
1220 * signal context platform-specific definitions
1221 * From Wine
1223 #ifdef linux
1224 /* All Registers access - only for local access */
1225 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1226 /* Gpr Registers access */
1227 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1228 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1229 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1230 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1231 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1232 # define LR_sig(context) REG_sig(link, context) /* Link register */
1233 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1234 /* Float Registers access */
1235 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1236 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1237 /* Exception Registers access */
1238 # define DAR_sig(context) REG_sig(dar, context)
1239 # define DSISR_sig(context) REG_sig(dsisr, context)
1240 # define TRAP_sig(context) REG_sig(trap, context)
1241 #endif /* linux */
1243 #ifdef __APPLE__
1244 # include <sys/ucontext.h>
1245 typedef struct ucontext SIGCONTEXT;
1246 /* All Registers access - only for local access */
1247 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1248 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1249 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1250 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1251 /* Gpr Registers access */
1252 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1253 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1254 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1255 # define CTR_sig(context) REG_sig(ctr, context)
1256 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1257 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1258 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1259 /* Float Registers access */
1260 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1261 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1262 /* Exception Registers access */
1263 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1264 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1265 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1266 #endif /* __APPLE__ */
1268 int cpu_signal_handler(int host_signum, void *pinfo,
1269 void *puc)
1271 siginfo_t *info = pinfo;
1272 struct ucontext *uc = puc;
1273 unsigned long pc;
1274 int is_write;
1276 pc = IAR_sig(uc);
1277 is_write = 0;
1278 #if 0
1279 /* ppc 4xx case */
1280 if (DSISR_sig(uc) & 0x00800000)
1281 is_write = 1;
1282 #else
1283 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1284 is_write = 1;
1285 #endif
1286 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1287 is_write, &uc->uc_sigmask, puc);
1290 #elif defined(__alpha__)
1292 int cpu_signal_handler(int host_signum, void *pinfo,
1293 void *puc)
1295 siginfo_t *info = pinfo;
1296 struct ucontext *uc = puc;
1297 uint32_t *pc = uc->uc_mcontext.sc_pc;
1298 uint32_t insn = *pc;
1299 int is_write = 0;
1301 /* XXX: need kernel patch to get write flag faster */
1302 switch (insn >> 26) {
1303 case 0x0d: // stw
1304 case 0x0e: // stb
1305 case 0x0f: // stq_u
1306 case 0x24: // stf
1307 case 0x25: // stg
1308 case 0x26: // sts
1309 case 0x27: // stt
1310 case 0x2c: // stl
1311 case 0x2d: // stq
1312 case 0x2e: // stl_c
1313 case 0x2f: // stq_c
1314 is_write = 1;
1317 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1318 is_write, &uc->uc_sigmask, puc);
1320 #elif defined(__sparc__)
1322 int cpu_signal_handler(int host_signum, void *pinfo,
1323 void *puc)
1325 siginfo_t *info = pinfo;
1326 int is_write;
1327 uint32_t insn;
1328 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1329 uint32_t *regs = (uint32_t *)(info + 1);
1330 void *sigmask = (regs + 20);
1331 /* XXX: is there a standard glibc define ? */
1332 unsigned long pc = regs[1];
1333 #else
1334 #ifdef __linux__
1335 struct sigcontext *sc = puc;
1336 unsigned long pc = sc->sigc_regs.tpc;
1337 void *sigmask = (void *)sc->sigc_mask;
1338 #elif defined(__OpenBSD__)
1339 struct sigcontext *uc = puc;
1340 unsigned long pc = uc->sc_pc;
1341 void *sigmask = (void *)(long)uc->sc_mask;
1342 #endif
1343 #endif
1345 /* XXX: need kernel patch to get write flag faster */
1346 is_write = 0;
1347 insn = *(uint32_t *)pc;
1348 if ((insn >> 30) == 3) {
1349 switch((insn >> 19) & 0x3f) {
1350 case 0x05: // stb
1351 case 0x06: // sth
1352 case 0x04: // st
1353 case 0x07: // std
1354 case 0x24: // stf
1355 case 0x27: // stdf
1356 case 0x25: // stfsr
1357 is_write = 1;
1358 break;
1361 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1362 is_write, sigmask, NULL);
1365 #elif defined(__arm__)
1367 int cpu_signal_handler(int host_signum, void *pinfo,
1368 void *puc)
1370 siginfo_t *info = pinfo;
1371 struct ucontext *uc = puc;
1372 unsigned long pc;
1373 int is_write;
1375 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1376 pc = uc->uc_mcontext.gregs[R15];
1377 #else
1378 pc = uc->uc_mcontext.arm_pc;
1379 #endif
1380 /* XXX: compute is_write */
1381 is_write = 0;
1382 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1383 is_write,
1384 &uc->uc_sigmask, puc);
1387 #elif defined(__mc68000)
1389 int cpu_signal_handler(int host_signum, void *pinfo,
1390 void *puc)
1392 siginfo_t *info = pinfo;
1393 struct ucontext *uc = puc;
1394 unsigned long pc;
1395 int is_write;
1397 pc = uc->uc_mcontext.gregs[16];
1398 /* XXX: compute is_write */
1399 is_write = 0;
1400 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1401 is_write,
1402 &uc->uc_sigmask, puc);
1405 #elif defined(__ia64)
1407 #ifndef __ISR_VALID
1408 /* This ought to be in <bits/siginfo.h>... */
1409 # define __ISR_VALID 1
1410 #endif
1412 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1414 siginfo_t *info = pinfo;
1415 struct ucontext *uc = puc;
1416 unsigned long ip;
1417 int is_write = 0;
1419 ip = uc->uc_mcontext.sc_ip;
1420 switch (host_signum) {
1421 case SIGILL:
1422 case SIGFPE:
1423 case SIGSEGV:
1424 case SIGBUS:
1425 case SIGTRAP:
1426 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1427 /* ISR.W (write-access) is bit 33: */
1428 is_write = (info->si_isr >> 33) & 1;
1429 break;
1431 default:
1432 break;
1434 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1435 is_write,
1436 &uc->uc_sigmask, puc);
1439 #elif defined(__s390__)
1441 int cpu_signal_handler(int host_signum, void *pinfo,
1442 void *puc)
1444 siginfo_t *info = pinfo;
1445 struct ucontext *uc = puc;
1446 unsigned long pc;
1447 int is_write;
1449 pc = uc->uc_mcontext.psw.addr;
1450 /* XXX: compute is_write */
1451 is_write = 0;
1452 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1453 is_write, &uc->uc_sigmask, puc);
1456 #elif defined(__mips__)
1458 int cpu_signal_handler(int host_signum, void *pinfo,
1459 void *puc)
1461 siginfo_t *info = pinfo;
1462 struct ucontext *uc = puc;
1463 greg_t pc = uc->uc_mcontext.pc;
1464 int is_write;
1466 /* XXX: compute is_write */
1467 is_write = 0;
1468 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1469 is_write, &uc->uc_sigmask, puc);
1472 #elif defined(__hppa__)
1474 int cpu_signal_handler(int host_signum, void *pinfo,
1475 void *puc)
1477 struct siginfo *info = pinfo;
1478 struct ucontext *uc = puc;
1479 unsigned long pc;
1480 int is_write;
1482 pc = uc->uc_mcontext.sc_iaoq[0];
1483 /* FIXME: compute is_write */
1484 is_write = 0;
1485 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1486 is_write,
1487 &uc->uc_sigmask, puc);
1490 #else
1492 #error host CPU specific signal handler needed
1494 #endif
1496 #endif /* !defined(CONFIG_SOFTMMU) */