Merge branch 'master' of ssh://repo.or.cz/srv/git/qemu
[qemu/mmix.git] / cpu-exec.c
blob714d5d9fadf0ab02f845704b53bbd841d76b5543
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include "tcg.h"
24 #include "kvm.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
50 #define DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState *env)
55 return cpu_has_work(env);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
63 longjmp(env->jmp_env, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
72 #ifdef __linux__
73 struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76 #endif
77 #endif
79 env = env1;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
86 #ifdef __linux__
87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90 #endif
92 #endif
93 env->exception_index = -1;
94 longjmp(env->jmp_env, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
101 unsigned long next_tb;
102 TranslationBlock *tb;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles > CF_COUNT_MASK)
107 max_cycles = CF_COUNT_MASK;
109 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110 max_cycles);
111 env->current_tb = tb;
112 /* execute the generated code */
113 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115 if ((next_tb & 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env, tb);
120 tb_phys_invalidate(tb, -1);
121 tb_free(tb);
124 static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
126 uint64_t flags)
128 TranslationBlock *tb, **ptb1;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
132 tb_invalidated_flag = 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc = get_phys_addr_code(env, pc);
138 phys_page1 = phys_pc & TARGET_PAGE_MASK;
139 phys_page2 = -1;
140 h = tb_phys_hash_func(phys_pc);
141 ptb1 = &tb_phys_hash[h];
142 for(;;) {
143 tb = *ptb1;
144 if (!tb)
145 goto not_found;
146 if (tb->pc == pc &&
147 tb->page_addr[0] == phys_page1 &&
148 tb->cs_base == cs_base &&
149 tb->flags == flags) {
150 /* check next page if needed */
151 if (tb->page_addr[1] != -1) {
152 virt_page2 = (pc & TARGET_PAGE_MASK) +
153 TARGET_PAGE_SIZE;
154 phys_page2 = get_phys_addr_code(env, virt_page2);
155 if (tb->page_addr[1] == phys_page2)
156 goto found;
157 } else {
158 goto found;
161 ptb1 = &tb->phys_hash_next;
163 not_found:
164 /* if no translated code available, then translate it now */
165 tb = tb_gen_code(env, pc, cs_base, flags, 0);
167 found:
168 /* we add the TB in the virtual pc hash table */
169 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
170 return tb;
173 static inline TranslationBlock *tb_find_fast(void)
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
177 int flags;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
181 is executed. */
182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
186 tb = tb_find_slow(pc, cs_base, flags);
188 return tb;
191 static CPUDebugExcpHandler *debug_excp_handler;
193 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 CPUDebugExcpHandler *old_handler = debug_excp_handler;
197 debug_excp_handler = handler;
198 return old_handler;
201 static void cpu_handle_debug_exception(CPUState *env)
203 CPUWatchpoint *wp;
205 if (!env->watchpoint_hit)
206 TAILQ_FOREACH(wp, &env->watchpoints, entry)
207 wp->flags &= ~BP_WATCHPOINT_HIT;
209 if (debug_excp_handler)
210 debug_excp_handler(env);
213 /* main execution loop */
215 int cpu_exec(CPUState *env1)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret, interrupt_request;
220 TranslationBlock *tb;
221 uint8_t *tc_ptr;
222 unsigned long next_tb;
224 if (cpu_halted(env1) == EXCP_HALTED)
225 return EXCP_HALTED;
227 cpu_single_env = env1;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
232 env = env1;
234 env_to_regs();
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 DF = 1 - (2 * ((env->eflags >> 10) & 1));
239 CC_OP = CC_OP_EFLAGS;
240 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env->cc_op = CC_OP_FLAGS;
244 env->cc_dest = env->sr & 0xf;
245 env->cc_x = (env->sr >> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MIPS)
250 #elif defined(TARGET_SH4)
251 #elif defined(TARGET_CRIS)
252 #elif defined(TARGET_MMIX)
253 /* XXXXX */
254 #else
255 #error unsupported target CPU
256 #endif
257 env->exception_index = -1;
259 /* prepare setjmp context for exception handling */
260 for(;;) {
261 if (setjmp(env->jmp_env) == 0) {
262 #if defined(__sparc__) && !defined(HOST_SOLARIS)
263 #undef env
264 env = cpu_single_env;
265 #define env cpu_single_env
266 #endif
267 env->current_tb = NULL;
268 /* if an exception is pending, we execute it here */
269 if (env->exception_index >= 0) {
270 if (env->exception_index >= EXCP_INTERRUPT) {
271 /* exit request from the cpu execution loop */
272 ret = env->exception_index;
273 if (ret == EXCP_DEBUG)
274 cpu_handle_debug_exception(env);
275 break;
276 } else {
277 #if defined(CONFIG_USER_ONLY)
278 /* if user mode only, we simulate a fake exception
279 which will be handled outside the cpu execution
280 loop */
281 #if defined(TARGET_I386)
282 do_interrupt_user(env->exception_index,
283 env->exception_is_int,
284 env->error_code,
285 env->exception_next_eip);
286 /* successfully delivered */
287 env->old_exception = -1;
288 #endif
289 ret = env->exception_index;
290 break;
291 #else
292 #if defined(TARGET_I386)
293 /* simulate a real cpu exception. On i386, it can
294 trigger new exceptions, but we do not handle
295 double or triple faults yet. */
296 do_interrupt(env->exception_index,
297 env->exception_is_int,
298 env->error_code,
299 env->exception_next_eip, 0);
300 /* successfully delivered */
301 env->old_exception = -1;
302 #elif defined(TARGET_PPC)
303 do_interrupt(env);
304 #elif defined(TARGET_MIPS)
305 do_interrupt(env);
306 #elif defined(TARGET_SPARC)
307 do_interrupt(env);
308 #elif defined(TARGET_ARM)
309 do_interrupt(env);
310 #elif defined(TARGET_SH4)
311 do_interrupt(env);
312 #elif defined(TARGET_ALPHA)
313 do_interrupt(env);
314 #elif defined(TARGET_CRIS)
315 do_interrupt(env);
316 #elif defined(TARGET_M68K)
317 do_interrupt(0);
318 #elif defined(TARGET_MMIX)
319 /* XXX: tempo */
320 do_interrupt(0);
321 #endif
322 #endif
324 env->exception_index = -1;
326 #ifdef CONFIG_KQEMU
327 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
328 int ret;
329 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
330 ret = kqemu_cpu_exec(env);
331 /* put eflags in CPU temporary format */
332 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
333 DF = 1 - (2 * ((env->eflags >> 10) & 1));
334 CC_OP = CC_OP_EFLAGS;
335 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
336 if (ret == 1) {
337 /* exception */
338 longjmp(env->jmp_env, 1);
339 } else if (ret == 2) {
340 /* softmmu execution needed */
341 } else {
342 if (env->interrupt_request != 0 || env->exit_request != 0) {
343 /* hardware interrupt will be executed just after */
344 } else {
345 /* otherwise, we restart */
346 longjmp(env->jmp_env, 1);
350 #endif
352 if (kvm_enabled()) {
353 kvm_cpu_exec(env);
354 longjmp(env->jmp_env, 1);
357 next_tb = 0; /* force lookup of first TB */
358 for(;;) {
359 interrupt_request = env->interrupt_request;
360 if (unlikely(interrupt_request)) {
361 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
362 /* Mask out external interrupts for this step. */
363 interrupt_request &= ~(CPU_INTERRUPT_HARD |
364 CPU_INTERRUPT_FIQ |
365 CPU_INTERRUPT_SMI |
366 CPU_INTERRUPT_NMI);
368 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
369 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
370 env->exception_index = EXCP_DEBUG;
371 cpu_loop_exit();
373 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
374 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
375 defined(TARGET_MMIX)
376 if (interrupt_request & CPU_INTERRUPT_HALT) {
377 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
378 env->halted = 1;
379 env->exception_index = EXCP_HLT;
380 cpu_loop_exit();
382 #endif
383 #if defined(TARGET_I386)
384 if (env->hflags2 & HF2_GIF_MASK) {
385 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
386 !(env->hflags & HF_SMM_MASK)) {
387 svm_check_intercept(SVM_EXIT_SMI);
388 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
389 do_smm_enter();
390 next_tb = 0;
391 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
392 !(env->hflags2 & HF2_NMI_MASK)) {
393 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
394 env->hflags2 |= HF2_NMI_MASK;
395 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
396 next_tb = 0;
397 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
398 (((env->hflags2 & HF2_VINTR_MASK) &&
399 (env->hflags2 & HF2_HIF_MASK)) ||
400 (!(env->hflags2 & HF2_VINTR_MASK) &&
401 (env->eflags & IF_MASK &&
402 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
403 int intno;
404 svm_check_intercept(SVM_EXIT_INTR);
405 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
406 intno = cpu_get_pic_interrupt(env);
407 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
408 #if defined(__sparc__) && !defined(HOST_SOLARIS)
409 #undef env
410 env = cpu_single_env;
411 #define env cpu_single_env
412 #endif
413 do_interrupt(intno, 0, 0, 0, 1);
414 /* ensure that no TB jump will be modified as
415 the program flow was changed */
416 next_tb = 0;
417 #if !defined(CONFIG_USER_ONLY)
418 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
419 (env->eflags & IF_MASK) &&
420 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
421 int intno;
422 /* FIXME: this should respect TPR */
423 svm_check_intercept(SVM_EXIT_VINTR);
424 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
425 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
426 do_interrupt(intno, 0, 0, 0, 1);
427 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
428 next_tb = 0;
429 #endif
432 #elif defined(TARGET_PPC)
433 #if 0
434 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
435 cpu_ppc_reset(env);
437 #endif
438 if (interrupt_request & CPU_INTERRUPT_HARD) {
439 ppc_hw_interrupt(env);
440 if (env->pending_interrupts == 0)
441 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
442 next_tb = 0;
444 #elif defined(TARGET_MIPS)
445 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
446 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
447 (env->CP0_Status & (1 << CP0St_IE)) &&
448 !(env->CP0_Status & (1 << CP0St_EXL)) &&
449 !(env->CP0_Status & (1 << CP0St_ERL)) &&
450 !(env->hflags & MIPS_HFLAG_DM)) {
451 /* Raise it */
452 env->exception_index = EXCP_EXT_INTERRUPT;
453 env->error_code = 0;
454 do_interrupt(env);
455 next_tb = 0;
457 #elif defined(TARGET_SPARC)
458 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
459 (env->psret != 0)) {
460 int pil = env->interrupt_index & 15;
461 int type = env->interrupt_index & 0xf0;
463 if (((type == TT_EXTINT) &&
464 (pil == 15 || pil > env->psrpil)) ||
465 type != TT_EXTINT) {
466 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
467 env->exception_index = env->interrupt_index;
468 do_interrupt(env);
469 env->interrupt_index = 0;
470 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
471 cpu_check_irqs(env);
472 #endif
473 next_tb = 0;
475 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
476 //do_interrupt(0, 0, 0, 0, 0);
477 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
479 #elif defined(TARGET_ARM)
480 if (interrupt_request & CPU_INTERRUPT_FIQ
481 && !(env->uncached_cpsr & CPSR_F)) {
482 env->exception_index = EXCP_FIQ;
483 do_interrupt(env);
484 next_tb = 0;
486 /* ARMv7-M interrupt return works by loading a magic value
487 into the PC. On real hardware the load causes the
488 return to occur. The qemu implementation performs the
489 jump normally, then does the exception return when the
490 CPU tries to execute code at the magic address.
491 This will cause the magic PC value to be pushed to
492 the stack if an interrupt occured at the wrong time.
493 We avoid this by disabling interrupts when
494 pc contains a magic address. */
495 if (interrupt_request & CPU_INTERRUPT_HARD
496 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
497 || !(env->uncached_cpsr & CPSR_I))) {
498 env->exception_index = EXCP_IRQ;
499 do_interrupt(env);
500 next_tb = 0;
502 #elif defined(TARGET_SH4)
503 if (interrupt_request & CPU_INTERRUPT_HARD) {
504 do_interrupt(env);
505 next_tb = 0;
507 #elif defined(TARGET_ALPHA)
508 if (interrupt_request & CPU_INTERRUPT_HARD) {
509 do_interrupt(env);
510 next_tb = 0;
512 #elif defined(TARGET_CRIS)
513 if (interrupt_request & CPU_INTERRUPT_HARD
514 && (env->pregs[PR_CCS] & I_FLAG)) {
515 env->exception_index = EXCP_IRQ;
516 do_interrupt(env);
517 next_tb = 0;
519 if (interrupt_request & CPU_INTERRUPT_NMI
520 && (env->pregs[PR_CCS] & M_FLAG)) {
521 env->exception_index = EXCP_NMI;
522 do_interrupt(env);
523 next_tb = 0;
525 #elif defined(TARGET_M68K)
526 if (interrupt_request & CPU_INTERRUPT_HARD
527 && ((env->sr & SR_I) >> SR_I_SHIFT)
528 < env->pending_level) {
529 /* Real hardware gets the interrupt vector via an
530 IACK cycle at this point. Current emulated
531 hardware doesn't rely on this, so we
532 provide/save the vector when the interrupt is
533 first signalled. */
534 env->exception_index = env->pending_vector;
535 do_interrupt(1);
536 next_tb = 0;
538 #elif defined(TARGET_MMIX)
539 /* XXX: tempo */
540 if (interrupt_request & CPU_INTERRUPT_HARD) {
541 do_interrupt(env);
542 next_tb = 0;
544 #endif
545 /* Don't use the cached interupt_request value,
546 do_interrupt may have updated the EXITTB flag. */
547 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
548 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
549 /* ensure that no TB jump will be modified as
550 the program flow was changed */
551 next_tb = 0;
554 if (unlikely(env->exit_request)) {
555 env->exit_request = 0;
556 env->exception_index = EXCP_INTERRUPT;
557 cpu_loop_exit();
559 #ifdef DEBUG_EXEC
560 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
561 /* restore flags in standard format */
562 regs_to_env();
563 #if defined(TARGET_I386)
564 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
565 log_cpu_state(env, X86_DUMP_CCOP);
566 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
567 #elif defined(TARGET_ARM)
568 log_cpu_state(env, 0);
569 #elif defined(TARGET_SPARC)
570 log_cpu_state(env, 0);
571 #elif defined(TARGET_PPC)
572 log_cpu_state(env, 0);
573 #elif defined(TARGET_M68K)
574 cpu_m68k_flush_flags(env, env->cc_op);
575 env->cc_op = CC_OP_FLAGS;
576 env->sr = (env->sr & 0xffe0)
577 | env->cc_dest | (env->cc_x << 4);
578 log_cpu_state(env, 0);
579 #elif defined(TARGET_MIPS)
580 log_cpu_state(env, 0);
581 #elif defined(TARGET_SH4)
582 log_cpu_state(env, 0);
583 #elif defined(TARGET_ALPHA)
584 log_cpu_state(env, 0);
585 #elif defined(TARGET_CRIS)
586 log_cpu_state(env, 0);
587 #elif defined(TARGET_MMIX)
588 log_cpu_state(env, 0);
589 #else
590 #error unsupported target CPU
591 #endif
593 #endif
594 spin_lock(&tb_lock);
595 tb = tb_find_fast();
596 /* Note: we do it here to avoid a gcc bug on Mac OS X when
597 doing it in tb_find_slow */
598 if (tb_invalidated_flag) {
599 /* as some TB could have been invalidated because
600 of memory exceptions while generating the code, we
601 must recompute the hash index here */
602 next_tb = 0;
603 tb_invalidated_flag = 0;
605 #ifdef DEBUG_EXEC
606 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
607 (long)tb->tc_ptr, tb->pc,
608 lookup_symbol(tb->pc));
609 #endif
610 /* see if we can patch the calling TB. When the TB
611 spans two pages, we cannot safely do a direct
612 jump. */
614 if (next_tb != 0 &&
615 #ifdef CONFIG_KQEMU
616 (env->kqemu_enabled != 2) &&
617 #endif
618 tb->page_addr[1] == -1) {
619 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
622 spin_unlock(&tb_lock);
623 env->current_tb = tb;
625 /* cpu_interrupt might be called while translating the
626 TB, but before it is linked into a potentially
627 infinite loop and becomes env->current_tb. Avoid
628 starting execution if there is a pending interrupt. */
629 if (unlikely (env->exit_request))
630 env->current_tb = NULL;
632 while (env->current_tb) {
633 tc_ptr = tb->tc_ptr;
634 /* execute the generated code */
635 #if defined(__sparc__) && !defined(HOST_SOLARIS)
636 #undef env
637 env = cpu_single_env;
638 #define env cpu_single_env
639 #endif
640 next_tb = tcg_qemu_tb_exec(tc_ptr);
641 env->current_tb = NULL;
642 if ((next_tb & 3) == 2) {
643 /* Instruction counter expired. */
644 int insns_left;
645 tb = (TranslationBlock *)(long)(next_tb & ~3);
646 /* Restore PC. */
647 cpu_pc_from_tb(env, tb);
648 insns_left = env->icount_decr.u32;
649 if (env->icount_extra && insns_left >= 0) {
650 /* Refill decrementer and continue execution. */
651 env->icount_extra += insns_left;
652 if (env->icount_extra > 0xffff) {
653 insns_left = 0xffff;
654 } else {
655 insns_left = env->icount_extra;
657 env->icount_extra -= insns_left;
658 env->icount_decr.u16.low = insns_left;
659 } else {
660 if (insns_left > 0) {
661 /* Execute remaining instructions. */
662 cpu_exec_nocache(insns_left, tb);
664 env->exception_index = EXCP_INTERRUPT;
665 next_tb = 0;
666 cpu_loop_exit();
670 /* reset soft MMU for next block (it can currently
671 only be set by a memory fault) */
672 #if defined(CONFIG_KQEMU)
673 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
674 if (kqemu_is_ok(env) &&
675 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
676 cpu_loop_exit();
678 #endif
679 } /* for(;;) */
680 } else {
681 env_to_regs();
683 } /* for(;;) */
686 #if defined(TARGET_I386)
687 /* restore flags in standard format */
688 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
689 #elif defined(TARGET_ARM)
690 /* XXX: Save/restore host fpu exception state?. */
691 #elif defined(TARGET_SPARC)
692 #elif defined(TARGET_PPC)
693 #elif defined(TARGET_M68K)
694 cpu_m68k_flush_flags(env, env->cc_op);
695 env->cc_op = CC_OP_FLAGS;
696 env->sr = (env->sr & 0xffe0)
697 | env->cc_dest | (env->cc_x << 4);
698 #elif defined(TARGET_MIPS)
699 #elif defined(TARGET_SH4)
700 #elif defined(TARGET_ALPHA)
701 #elif defined(TARGET_CRIS)
702 #elif defined(TARGET_MMIX)
703 /* XXXXX */
704 #else
705 #error unsupported target CPU
706 #endif
708 /* restore global registers */
709 #include "hostregs_helper.h"
711 /* fail safe : never use cpu_single_env outside cpu_exec() */
712 cpu_single_env = NULL;
713 return ret;
716 /* must only be called from the generated code as an exception can be
717 generated */
718 void tb_invalidate_page_range(target_ulong start, target_ulong end)
720 /* XXX: cannot enable it yet because it yields to MMU exception
721 where NIP != read address on PowerPC */
722 #if 0
723 target_ulong phys_addr;
724 phys_addr = get_phys_addr_code(env, start);
725 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
726 #endif
729 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
731 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
733 CPUX86State *saved_env;
735 saved_env = env;
736 env = s;
737 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
738 selector &= 0xffff;
739 cpu_x86_load_seg_cache(env, seg_reg, selector,
740 (selector << 4), 0xffff, 0);
741 } else {
742 helper_load_seg(seg_reg, selector);
744 env = saved_env;
747 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
749 CPUX86State *saved_env;
751 saved_env = env;
752 env = s;
754 helper_fsave(ptr, data32);
756 env = saved_env;
759 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
761 CPUX86State *saved_env;
763 saved_env = env;
764 env = s;
766 helper_frstor(ptr, data32);
768 env = saved_env;
771 #endif /* TARGET_I386 */
773 #if !defined(CONFIG_SOFTMMU)
775 #if defined(TARGET_I386)
777 /* 'pc' is the host PC at which the exception was raised. 'address' is
778 the effective address of the memory exception. 'is_write' is 1 if a
779 write caused the exception and otherwise 0'. 'old_set' is the
780 signal set which should be restored */
781 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
782 int is_write, sigset_t *old_set,
783 void *puc)
785 TranslationBlock *tb;
786 int ret;
788 if (cpu_single_env)
789 env = cpu_single_env; /* XXX: find a correct solution for multithread */
790 #if defined(DEBUG_SIGNAL)
791 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
792 pc, address, is_write, *(unsigned long *)old_set);
793 #endif
794 /* XXX: locking issue */
795 if (is_write && page_unprotect(h2g(address), pc, puc)) {
796 return 1;
799 /* see if it is an MMU fault */
800 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
801 if (ret < 0)
802 return 0; /* not an MMU fault */
803 if (ret == 0)
804 return 1; /* the MMU fault was handled without causing real CPU fault */
805 /* now we have a real cpu fault */
806 tb = tb_find_pc(pc);
807 if (tb) {
808 /* the PC is inside the translated code. It means that we have
809 a virtual CPU fault */
810 cpu_restore_state(tb, env, pc, puc);
812 if (ret == 1) {
813 #if 0
814 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
815 env->eip, env->cr[2], env->error_code);
816 #endif
817 /* we restore the process signal mask as the sigreturn should
818 do it (XXX: use sigsetjmp) */
819 sigprocmask(SIG_SETMASK, old_set, NULL);
820 raise_exception_err(env->exception_index, env->error_code);
821 } else {
822 /* activate soft MMU for this block */
823 env->hflags |= HF_SOFTMMU_MASK;
824 cpu_resume_from_signal(env, puc);
826 /* never comes here */
827 return 1;
830 #elif defined(TARGET_ARM)
831 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
832 int is_write, sigset_t *old_set,
833 void *puc)
835 TranslationBlock *tb;
836 int ret;
838 if (cpu_single_env)
839 env = cpu_single_env; /* XXX: find a correct solution for multithread */
840 #if defined(DEBUG_SIGNAL)
841 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
842 pc, address, is_write, *(unsigned long *)old_set);
843 #endif
844 /* XXX: locking issue */
845 if (is_write && page_unprotect(h2g(address), pc, puc)) {
846 return 1;
848 /* see if it is an MMU fault */
849 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
850 if (ret < 0)
851 return 0; /* not an MMU fault */
852 if (ret == 0)
853 return 1; /* the MMU fault was handled without causing real CPU fault */
854 /* now we have a real cpu fault */
855 tb = tb_find_pc(pc);
856 if (tb) {
857 /* the PC is inside the translated code. It means that we have
858 a virtual CPU fault */
859 cpu_restore_state(tb, env, pc, puc);
861 /* we restore the process signal mask as the sigreturn should
862 do it (XXX: use sigsetjmp) */
863 sigprocmask(SIG_SETMASK, old_set, NULL);
864 cpu_loop_exit();
865 /* never comes here */
866 return 1;
868 #elif defined(TARGET_SPARC)
869 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
870 int is_write, sigset_t *old_set,
871 void *puc)
873 TranslationBlock *tb;
874 int ret;
876 if (cpu_single_env)
877 env = cpu_single_env; /* XXX: find a correct solution for multithread */
878 #if defined(DEBUG_SIGNAL)
879 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
880 pc, address, is_write, *(unsigned long *)old_set);
881 #endif
882 /* XXX: locking issue */
883 if (is_write && page_unprotect(h2g(address), pc, puc)) {
884 return 1;
886 /* see if it is an MMU fault */
887 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
888 if (ret < 0)
889 return 0; /* not an MMU fault */
890 if (ret == 0)
891 return 1; /* the MMU fault was handled without causing real CPU fault */
892 /* now we have a real cpu fault */
893 tb = tb_find_pc(pc);
894 if (tb) {
895 /* the PC is inside the translated code. It means that we have
896 a virtual CPU fault */
897 cpu_restore_state(tb, env, pc, puc);
899 /* we restore the process signal mask as the sigreturn should
900 do it (XXX: use sigsetjmp) */
901 sigprocmask(SIG_SETMASK, old_set, NULL);
902 cpu_loop_exit();
903 /* never comes here */
904 return 1;
906 #elif defined (TARGET_PPC)
907 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
908 int is_write, sigset_t *old_set,
909 void *puc)
911 TranslationBlock *tb;
912 int ret;
914 if (cpu_single_env)
915 env = cpu_single_env; /* XXX: find a correct solution for multithread */
916 #if defined(DEBUG_SIGNAL)
917 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
918 pc, address, is_write, *(unsigned long *)old_set);
919 #endif
920 /* XXX: locking issue */
921 if (is_write && page_unprotect(h2g(address), pc, puc)) {
922 return 1;
925 /* see if it is an MMU fault */
926 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
927 if (ret < 0)
928 return 0; /* not an MMU fault */
929 if (ret == 0)
930 return 1; /* the MMU fault was handled without causing real CPU fault */
932 /* now we have a real cpu fault */
933 tb = tb_find_pc(pc);
934 if (tb) {
935 /* the PC is inside the translated code. It means that we have
936 a virtual CPU fault */
937 cpu_restore_state(tb, env, pc, puc);
939 if (ret == 1) {
940 #if 0
941 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
942 env->nip, env->error_code, tb);
943 #endif
944 /* we restore the process signal mask as the sigreturn should
945 do it (XXX: use sigsetjmp) */
946 sigprocmask(SIG_SETMASK, old_set, NULL);
947 cpu_loop_exit();
948 } else {
949 /* activate soft MMU for this block */
950 cpu_resume_from_signal(env, puc);
952 /* never comes here */
953 return 1;
956 #elif defined(TARGET_M68K)
957 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
958 int is_write, sigset_t *old_set,
959 void *puc)
961 TranslationBlock *tb;
962 int ret;
964 if (cpu_single_env)
965 env = cpu_single_env; /* XXX: find a correct solution for multithread */
966 #if defined(DEBUG_SIGNAL)
967 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
968 pc, address, is_write, *(unsigned long *)old_set);
969 #endif
970 /* XXX: locking issue */
971 if (is_write && page_unprotect(address, pc, puc)) {
972 return 1;
974 /* see if it is an MMU fault */
975 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
976 if (ret < 0)
977 return 0; /* not an MMU fault */
978 if (ret == 0)
979 return 1; /* the MMU fault was handled without causing real CPU fault */
980 /* now we have a real cpu fault */
981 tb = tb_find_pc(pc);
982 if (tb) {
983 /* the PC is inside the translated code. It means that we have
984 a virtual CPU fault */
985 cpu_restore_state(tb, env, pc, puc);
987 /* we restore the process signal mask as the sigreturn should
988 do it (XXX: use sigsetjmp) */
989 sigprocmask(SIG_SETMASK, old_set, NULL);
990 cpu_loop_exit();
991 /* never comes here */
992 return 1;
995 #elif defined (TARGET_MIPS)
996 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
997 int is_write, sigset_t *old_set,
998 void *puc)
1000 TranslationBlock *tb;
1001 int ret;
1003 if (cpu_single_env)
1004 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1005 #if defined(DEBUG_SIGNAL)
1006 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1007 pc, address, is_write, *(unsigned long *)old_set);
1008 #endif
1009 /* XXX: locking issue */
1010 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1011 return 1;
1014 /* see if it is an MMU fault */
1015 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1016 if (ret < 0)
1017 return 0; /* not an MMU fault */
1018 if (ret == 0)
1019 return 1; /* the MMU fault was handled without causing real CPU fault */
1021 /* now we have a real cpu fault */
1022 tb = tb_find_pc(pc);
1023 if (tb) {
1024 /* the PC is inside the translated code. It means that we have
1025 a virtual CPU fault */
1026 cpu_restore_state(tb, env, pc, puc);
1028 if (ret == 1) {
1029 #if 0
1030 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1031 env->PC, env->error_code, tb);
1032 #endif
1033 /* we restore the process signal mask as the sigreturn should
1034 do it (XXX: use sigsetjmp) */
1035 sigprocmask(SIG_SETMASK, old_set, NULL);
1036 cpu_loop_exit();
1037 } else {
1038 /* activate soft MMU for this block */
1039 cpu_resume_from_signal(env, puc);
1041 /* never comes here */
1042 return 1;
1045 #elif defined (TARGET_SH4)
1046 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1047 int is_write, sigset_t *old_set,
1048 void *puc)
1050 TranslationBlock *tb;
1051 int ret;
1053 if (cpu_single_env)
1054 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1055 #if defined(DEBUG_SIGNAL)
1056 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1057 pc, address, is_write, *(unsigned long *)old_set);
1058 #endif
1059 /* XXX: locking issue */
1060 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1061 return 1;
1064 /* see if it is an MMU fault */
1065 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1066 if (ret < 0)
1067 return 0; /* not an MMU fault */
1068 if (ret == 0)
1069 return 1; /* the MMU fault was handled without causing real CPU fault */
1071 /* now we have a real cpu fault */
1072 tb = tb_find_pc(pc);
1073 if (tb) {
1074 /* the PC is inside the translated code. It means that we have
1075 a virtual CPU fault */
1076 cpu_restore_state(tb, env, pc, puc);
1078 #if 0
1079 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1080 env->nip, env->error_code, tb);
1081 #endif
1082 /* we restore the process signal mask as the sigreturn should
1083 do it (XXX: use sigsetjmp) */
1084 sigprocmask(SIG_SETMASK, old_set, NULL);
1085 cpu_loop_exit();
1086 /* never comes here */
1087 return 1;
1090 #elif defined (TARGET_ALPHA)
1091 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1092 int is_write, sigset_t *old_set,
1093 void *puc)
1095 TranslationBlock *tb;
1096 int ret;
1098 if (cpu_single_env)
1099 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1100 #if defined(DEBUG_SIGNAL)
1101 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1102 pc, address, is_write, *(unsigned long *)old_set);
1103 #endif
1104 /* XXX: locking issue */
1105 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1106 return 1;
1109 /* see if it is an MMU fault */
1110 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1111 if (ret < 0)
1112 return 0; /* not an MMU fault */
1113 if (ret == 0)
1114 return 1; /* the MMU fault was handled without causing real CPU fault */
1116 /* now we have a real cpu fault */
1117 tb = tb_find_pc(pc);
1118 if (tb) {
1119 /* the PC is inside the translated code. It means that we have
1120 a virtual CPU fault */
1121 cpu_restore_state(tb, env, pc, puc);
1123 #if 0
1124 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1125 env->nip, env->error_code, tb);
1126 #endif
1127 /* we restore the process signal mask as the sigreturn should
1128 do it (XXX: use sigsetjmp) */
1129 sigprocmask(SIG_SETMASK, old_set, NULL);
1130 cpu_loop_exit();
1131 /* never comes here */
1132 return 1;
1134 #elif defined (TARGET_CRIS)
1135 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1136 int is_write, sigset_t *old_set,
1137 void *puc)
1139 TranslationBlock *tb;
1140 int ret;
1142 if (cpu_single_env)
1143 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1144 #if defined(DEBUG_SIGNAL)
1145 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1146 pc, address, is_write, *(unsigned long *)old_set);
1147 #endif
1148 /* XXX: locking issue */
1149 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1150 return 1;
1153 /* see if it is an MMU fault */
1154 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1155 if (ret < 0)
1156 return 0; /* not an MMU fault */
1157 if (ret == 0)
1158 return 1; /* the MMU fault was handled without causing real CPU fault */
1160 /* now we have a real cpu fault */
1161 tb = tb_find_pc(pc);
1162 if (tb) {
1163 /* the PC is inside the translated code. It means that we have
1164 a virtual CPU fault */
1165 cpu_restore_state(tb, env, pc, puc);
1167 /* we restore the process signal mask as the sigreturn should
1168 do it (XXX: use sigsetjmp) */
1169 sigprocmask(SIG_SETMASK, old_set, NULL);
1170 cpu_loop_exit();
1171 /* never comes here */
1172 return 1;
1175 #else
1176 #error unsupported target CPU
1177 #endif
1179 #if defined(__i386__)
1181 #if defined(__APPLE__)
1182 # include <sys/ucontext.h>
1184 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1185 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1186 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1187 # define MASK_sig(context) ((context)->uc_sigmask)
1188 #elif defined(__OpenBSD__)
1189 # define EIP_sig(context) ((context)->sc_eip)
1190 # define TRAP_sig(context) ((context)->sc_trapno)
1191 # define ERROR_sig(context) ((context)->sc_err)
1192 # define MASK_sig(context) ((context)->sc_mask)
1193 #else
1194 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1195 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1196 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1197 # define MASK_sig(context) ((context)->uc_sigmask)
1198 #endif
1200 int cpu_signal_handler(int host_signum, void *pinfo,
1201 void *puc)
1203 siginfo_t *info = pinfo;
1204 #if defined(__OpenBSD__)
1205 struct sigcontext *uc = puc;
1206 #else
1207 struct ucontext *uc = puc;
1208 #endif
1209 unsigned long pc;
1210 int trapno;
1212 #ifndef REG_EIP
1213 /* for glibc 2.1 */
1214 #define REG_EIP EIP
1215 #define REG_ERR ERR
1216 #define REG_TRAPNO TRAPNO
1217 #endif
1218 pc = EIP_sig(uc);
1219 trapno = TRAP_sig(uc);
1220 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1221 trapno == 0xe ?
1222 (ERROR_sig(uc) >> 1) & 1 : 0,
1223 &MASK_sig(uc), puc);
1226 #elif defined(__x86_64__)
1228 #ifdef __NetBSD__
1229 #define PC_sig(context) _UC_MACHINE_PC(context)
1230 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1231 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1232 #define MASK_sig(context) ((context)->uc_sigmask)
1233 #elif defined(__OpenBSD__)
1234 #define PC_sig(context) ((context)->sc_rip)
1235 #define TRAP_sig(context) ((context)->sc_trapno)
1236 #define ERROR_sig(context) ((context)->sc_err)
1237 #define MASK_sig(context) ((context)->sc_mask)
1238 #else
1239 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1240 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1241 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1242 #define MASK_sig(context) ((context)->uc_sigmask)
1243 #endif
1245 int cpu_signal_handler(int host_signum, void *pinfo,
1246 void *puc)
1248 siginfo_t *info = pinfo;
1249 unsigned long pc;
1250 #ifdef __NetBSD__
1251 ucontext_t *uc = puc;
1252 #elif defined(__OpenBSD__)
1253 struct sigcontext *uc = puc;
1254 #else
1255 struct ucontext *uc = puc;
1256 #endif
1258 pc = PC_sig(uc);
1259 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1260 TRAP_sig(uc) == 0xe ?
1261 (ERROR_sig(uc) >> 1) & 1 : 0,
1262 &MASK_sig(uc), puc);
1265 #elif defined(_ARCH_PPC)
1267 /***********************************************************************
1268 * signal context platform-specific definitions
1269 * From Wine
1271 #ifdef linux
1272 /* All Registers access - only for local access */
1273 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1274 /* Gpr Registers access */
1275 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1276 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1277 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1278 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1279 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1280 # define LR_sig(context) REG_sig(link, context) /* Link register */
1281 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1282 /* Float Registers access */
1283 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1284 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1285 /* Exception Registers access */
1286 # define DAR_sig(context) REG_sig(dar, context)
1287 # define DSISR_sig(context) REG_sig(dsisr, context)
1288 # define TRAP_sig(context) REG_sig(trap, context)
1289 #endif /* linux */
1291 #ifdef __APPLE__
1292 # include <sys/ucontext.h>
1293 typedef struct ucontext SIGCONTEXT;
1294 /* All Registers access - only for local access */
1295 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1296 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1297 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1298 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1299 /* Gpr Registers access */
1300 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1301 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1302 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1303 # define CTR_sig(context) REG_sig(ctr, context)
1304 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1305 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1306 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1307 /* Float Registers access */
1308 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1309 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1310 /* Exception Registers access */
1311 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1312 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1313 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1314 #endif /* __APPLE__ */
1316 int cpu_signal_handler(int host_signum, void *pinfo,
1317 void *puc)
1319 siginfo_t *info = pinfo;
1320 struct ucontext *uc = puc;
1321 unsigned long pc;
1322 int is_write;
1324 pc = IAR_sig(uc);
1325 is_write = 0;
1326 #if 0
1327 /* ppc 4xx case */
1328 if (DSISR_sig(uc) & 0x00800000)
1329 is_write = 1;
1330 #else
1331 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1332 is_write = 1;
1333 #endif
1334 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1335 is_write, &uc->uc_sigmask, puc);
1338 #elif defined(__alpha__)
1340 int cpu_signal_handler(int host_signum, void *pinfo,
1341 void *puc)
1343 siginfo_t *info = pinfo;
1344 struct ucontext *uc = puc;
1345 uint32_t *pc = uc->uc_mcontext.sc_pc;
1346 uint32_t insn = *pc;
1347 int is_write = 0;
1349 /* XXX: need kernel patch to get write flag faster */
1350 switch (insn >> 26) {
1351 case 0x0d: // stw
1352 case 0x0e: // stb
1353 case 0x0f: // stq_u
1354 case 0x24: // stf
1355 case 0x25: // stg
1356 case 0x26: // sts
1357 case 0x27: // stt
1358 case 0x2c: // stl
1359 case 0x2d: // stq
1360 case 0x2e: // stl_c
1361 case 0x2f: // stq_c
1362 is_write = 1;
1365 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1366 is_write, &uc->uc_sigmask, puc);
1368 #elif defined(__sparc__)
1370 int cpu_signal_handler(int host_signum, void *pinfo,
1371 void *puc)
1373 siginfo_t *info = pinfo;
1374 int is_write;
1375 uint32_t insn;
1376 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1377 uint32_t *regs = (uint32_t *)(info + 1);
1378 void *sigmask = (regs + 20);
1379 /* XXX: is there a standard glibc define ? */
1380 unsigned long pc = regs[1];
1381 #else
1382 #ifdef __linux__
1383 struct sigcontext *sc = puc;
1384 unsigned long pc = sc->sigc_regs.tpc;
1385 void *sigmask = (void *)sc->sigc_mask;
1386 #elif defined(__OpenBSD__)
1387 struct sigcontext *uc = puc;
1388 unsigned long pc = uc->sc_pc;
1389 void *sigmask = (void *)(long)uc->sc_mask;
1390 #endif
1391 #endif
1393 /* XXX: need kernel patch to get write flag faster */
1394 is_write = 0;
1395 insn = *(uint32_t *)pc;
1396 if ((insn >> 30) == 3) {
1397 switch((insn >> 19) & 0x3f) {
1398 case 0x05: // stb
1399 case 0x15: // stba
1400 case 0x06: // sth
1401 case 0x16: // stha
1402 case 0x04: // st
1403 case 0x14: // sta
1404 case 0x07: // std
1405 case 0x17: // stda
1406 case 0x0e: // stx
1407 case 0x1e: // stxa
1408 case 0x24: // stf
1409 case 0x34: // stfa
1410 case 0x27: // stdf
1411 case 0x37: // stdfa
1412 case 0x26: // stqf
1413 case 0x36: // stqfa
1414 case 0x25: // stfsr
1415 case 0x3c: // casa
1416 case 0x3e: // casxa
1417 is_write = 1;
1418 break;
1421 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1422 is_write, sigmask, NULL);
1425 #elif defined(__arm__)
1427 int cpu_signal_handler(int host_signum, void *pinfo,
1428 void *puc)
1430 siginfo_t *info = pinfo;
1431 struct ucontext *uc = puc;
1432 unsigned long pc;
1433 int is_write;
1435 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1436 pc = uc->uc_mcontext.gregs[R15];
1437 #else
1438 pc = uc->uc_mcontext.arm_pc;
1439 #endif
1440 /* XXX: compute is_write */
1441 is_write = 0;
1442 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1443 is_write,
1444 &uc->uc_sigmask, puc);
1447 #elif defined(__mc68000)
1449 int cpu_signal_handler(int host_signum, void *pinfo,
1450 void *puc)
1452 siginfo_t *info = pinfo;
1453 struct ucontext *uc = puc;
1454 unsigned long pc;
1455 int is_write;
1457 pc = uc->uc_mcontext.gregs[16];
1458 /* XXX: compute is_write */
1459 is_write = 0;
1460 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1461 is_write,
1462 &uc->uc_sigmask, puc);
1465 #elif defined(__ia64)
1467 #ifndef __ISR_VALID
1468 /* This ought to be in <bits/siginfo.h>... */
1469 # define __ISR_VALID 1
1470 #endif
1472 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1474 siginfo_t *info = pinfo;
1475 struct ucontext *uc = puc;
1476 unsigned long ip;
1477 int is_write = 0;
1479 ip = uc->uc_mcontext.sc_ip;
1480 switch (host_signum) {
1481 case SIGILL:
1482 case SIGFPE:
1483 case SIGSEGV:
1484 case SIGBUS:
1485 case SIGTRAP:
1486 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1487 /* ISR.W (write-access) is bit 33: */
1488 is_write = (info->si_isr >> 33) & 1;
1489 break;
1491 default:
1492 break;
1494 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1495 is_write,
1496 &uc->uc_sigmask, puc);
1499 #elif defined(__s390__)
1501 int cpu_signal_handler(int host_signum, void *pinfo,
1502 void *puc)
1504 siginfo_t *info = pinfo;
1505 struct ucontext *uc = puc;
1506 unsigned long pc;
1507 int is_write;
1509 pc = uc->uc_mcontext.psw.addr;
1510 /* XXX: compute is_write */
1511 is_write = 0;
1512 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1513 is_write, &uc->uc_sigmask, puc);
1516 #elif defined(__mips__)
1518 int cpu_signal_handler(int host_signum, void *pinfo,
1519 void *puc)
1521 siginfo_t *info = pinfo;
1522 struct ucontext *uc = puc;
1523 greg_t pc = uc->uc_mcontext.pc;
1524 int is_write;
1526 /* XXX: compute is_write */
1527 is_write = 0;
1528 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1529 is_write, &uc->uc_sigmask, puc);
1532 #elif defined(__hppa__)
1534 int cpu_signal_handler(int host_signum, void *pinfo,
1535 void *puc)
1537 struct siginfo *info = pinfo;
1538 struct ucontext *uc = puc;
1539 unsigned long pc;
1540 int is_write;
1542 pc = uc->uc_mcontext.sc_iaoq[0];
1543 /* FIXME: compute is_write */
1544 is_write = 0;
1545 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1546 is_write,
1547 &uc->uc_sigmask, puc);
1550 #else
1552 #error host CPU specific signal handler needed
1554 #endif
1556 #endif /* !defined(CONFIG_SOFTMMU) */