Allow pulseaudio backend to be the default
[qemu-kvm/fedora.git] / cpu-exec.c
blobe6709dc3a5675489ebb8f9ef4d6950f152d0e786
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #if !defined(TARGET_IA64)
23 #include "tcg.h"
24 #endif
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #include "qemu-kvm.h"
45 #if defined(__sparc__) && !defined(HOST_SOLARIS)
46 // Work around ugly bugs in glibc that mangle global register contents
47 #undef env
48 #define env cpu_single_env
49 #endif
51 int tb_invalidated_flag;
53 //#define DEBUG_EXEC
54 //#define DEBUG_SIGNAL
56 int qemu_cpu_has_work(CPUState *env)
58 return cpu_has_work(env);
61 void cpu_loop_exit(void)
63 /* NOTE: the register at this point must be saved by hand because
64 longjmp restore them */
65 regs_to_env();
66 longjmp(env->jmp_env, 1);
69 /* exit the current TB from a signal handler. The host registers are
70 restored in a state compatible with the CPU emulator
72 void cpu_resume_from_signal(CPUState *env1, void *puc)
74 #if !defined(CONFIG_SOFTMMU)
75 #ifdef __linux__
76 struct ucontext *uc = puc;
77 #elif defined(__OpenBSD__)
78 struct sigcontext *uc = puc;
79 #endif
80 #endif
82 env = env1;
84 /* XXX: restore cpu registers saved in host registers */
86 #if !defined(CONFIG_SOFTMMU)
87 if (puc) {
88 /* XXX: use siglongjmp ? */
89 #ifdef __linux__
90 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
91 #elif defined(__OpenBSD__)
92 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
93 #endif
95 #endif
96 env->exception_index = -1;
97 longjmp(env->jmp_env, 1);
100 /* Execute the code without caching the generated code. An interpreter
101 could be used if available. */
102 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
104 unsigned long next_tb;
105 TranslationBlock *tb;
107 /* Should never happen.
108 We only end up here when an existing TB is too long. */
109 if (max_cycles > CF_COUNT_MASK)
110 max_cycles = CF_COUNT_MASK;
112 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
113 max_cycles);
114 env->current_tb = tb;
115 /* execute the generated code */
116 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
118 if ((next_tb & 3) == 2) {
119 /* Restore PC. This may happen if async event occurs before
120 the TB starts executing. */
121 cpu_pc_from_tb(env, tb);
123 tb_phys_invalidate(tb, -1);
124 tb_free(tb);
127 static TranslationBlock *tb_find_slow(target_ulong pc,
128 target_ulong cs_base,
129 uint64_t flags)
131 TranslationBlock *tb, **ptb1;
132 unsigned int h;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
135 tb_invalidated_flag = 0;
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
149 if (tb->pc == pc &&
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
164 ptb1 = &tb->phys_hash_next;
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_gen_code(env, pc, cs_base, flags, 0);
170 found:
171 /* we add the TB in the virtual pc hash table */
172 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
173 return tb;
176 static inline TranslationBlock *tb_find_fast(void)
178 TranslationBlock *tb;
179 target_ulong cs_base, pc;
180 int flags;
182 /* we record a subset of the CPU state. It will
183 always be the same before a given translated block
184 is executed. */
185 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
186 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
187 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
188 tb->flags != flags)) {
189 tb = tb_find_slow(pc, cs_base, flags);
191 return tb;
194 static CPUDebugExcpHandler *debug_excp_handler;
196 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
198 CPUDebugExcpHandler *old_handler = debug_excp_handler;
200 debug_excp_handler = handler;
201 return old_handler;
204 static void cpu_handle_debug_exception(CPUState *env)
206 CPUWatchpoint *wp;
208 if (!env->watchpoint_hit)
209 TAILQ_FOREACH(wp, &env->watchpoints, entry)
210 wp->flags &= ~BP_WATCHPOINT_HIT;
212 if (debug_excp_handler)
213 debug_excp_handler(env);
216 /* main execution loop */
218 int cpu_exec(CPUState *env1)
220 #define DECLARE_HOST_REGS 1
221 #include "hostregs_helper.h"
222 int ret, interrupt_request;
223 TranslationBlock *tb;
224 uint8_t *tc_ptr;
225 unsigned long next_tb;
227 if (cpu_halted(env1) == EXCP_HALTED)
228 return EXCP_HALTED;
230 cpu_single_env = env1;
232 /* first we save global registers */
233 #define SAVE_HOST_REGS 1
234 #include "hostregs_helper.h"
235 env = env1;
237 env_to_regs();
238 #if defined(TARGET_I386)
239 /* put eflags in CPU temporary format */
240 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 DF = 1 - (2 * ((env->eflags >> 10) & 1));
242 CC_OP = CC_OP_EFLAGS;
243 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
244 #elif defined(TARGET_SPARC)
245 #elif defined(TARGET_M68K)
246 env->cc_op = CC_OP_FLAGS;
247 env->cc_dest = env->sr & 0xf;
248 env->cc_x = (env->sr >> 4) & 1;
249 #elif defined(TARGET_ALPHA)
250 #elif defined(TARGET_ARM)
251 #elif defined(TARGET_PPC)
252 #elif defined(TARGET_MICROBLAZE)
253 #elif defined(TARGET_MIPS)
254 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_CRIS)
256 #elif defined(TARGET_IA64)
257 /* XXXXX */
258 #else
259 #error unsupported target CPU
260 #endif
261 env->exception_index = -1;
263 /* prepare setjmp context for exception handling */
264 for(;;) {
265 if (setjmp(env->jmp_env) == 0) {
266 #if defined(__sparc__) && !defined(HOST_SOLARIS)
267 #undef env
268 env = cpu_single_env;
269 #define env cpu_single_env
270 #endif
271 env->current_tb = NULL;
272 /* if an exception is pending, we execute it here */
273 if (env->exception_index >= 0) {
274 if (env->exception_index >= EXCP_INTERRUPT) {
275 /* exit request from the cpu execution loop */
276 ret = env->exception_index;
277 if (ret == EXCP_DEBUG)
278 cpu_handle_debug_exception(env);
279 break;
280 } else {
281 #if defined(CONFIG_USER_ONLY)
282 /* if user mode only, we simulate a fake exception
283 which will be handled outside the cpu execution
284 loop */
285 #if defined(TARGET_I386)
286 do_interrupt_user(env->exception_index,
287 env->exception_is_int,
288 env->error_code,
289 env->exception_next_eip);
290 /* successfully delivered */
291 env->old_exception = -1;
292 #endif
293 ret = env->exception_index;
294 break;
295 #else
296 #if defined(TARGET_I386)
297 /* simulate a real cpu exception. On i386, it can
298 trigger new exceptions, but we do not handle
299 double or triple faults yet. */
300 do_interrupt(env->exception_index,
301 env->exception_is_int,
302 env->error_code,
303 env->exception_next_eip, 0);
304 /* successfully delivered */
305 env->old_exception = -1;
306 #elif defined(TARGET_PPC)
307 do_interrupt(env);
308 #elif defined(TARGET_MICROBLAZE)
309 do_interrupt(env);
310 #elif defined(TARGET_MIPS)
311 do_interrupt(env);
312 #elif defined(TARGET_SPARC)
313 do_interrupt(env);
314 #elif defined(TARGET_ARM)
315 do_interrupt(env);
316 #elif defined(TARGET_SH4)
317 do_interrupt(env);
318 #elif defined(TARGET_ALPHA)
319 do_interrupt(env);
320 #elif defined(TARGET_CRIS)
321 do_interrupt(env);
322 #elif defined(TARGET_M68K)
323 do_interrupt(0);
324 #elif defined(TARGET_IA64)
325 do_interrupt(env);
326 #endif
327 #endif
329 env->exception_index = -1;
331 #ifdef CONFIG_KQEMU
332 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
333 int ret;
334 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
335 ret = kqemu_cpu_exec(env);
336 /* put eflags in CPU temporary format */
337 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
338 DF = 1 - (2 * ((env->eflags >> 10) & 1));
339 CC_OP = CC_OP_EFLAGS;
340 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
341 if (ret == 1) {
342 /* exception */
343 longjmp(env->jmp_env, 1);
344 } else if (ret == 2) {
345 /* softmmu execution needed */
346 } else {
347 if (env->interrupt_request != 0 || env->exit_request != 0) {
348 /* hardware interrupt will be executed just after */
349 } else {
350 /* otherwise, we restart */
351 longjmp(env->jmp_env, 1);
355 #endif
357 if (kvm_enabled()) {
358 kvm_cpu_exec(env);
359 longjmp(env->jmp_env, 1);
362 next_tb = 0; /* force lookup of first TB */
363 for(;;) {
364 interrupt_request = env->interrupt_request;
365 if (unlikely(interrupt_request)) {
366 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
367 /* Mask out external interrupts for this step. */
368 interrupt_request &= ~(CPU_INTERRUPT_HARD |
369 CPU_INTERRUPT_FIQ |
370 CPU_INTERRUPT_SMI |
371 CPU_INTERRUPT_NMI);
373 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
374 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
375 env->exception_index = EXCP_DEBUG;
376 cpu_loop_exit();
378 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
379 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
380 defined(TARGET_MICROBLAZE)
381 if (interrupt_request & CPU_INTERRUPT_HALT) {
382 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
383 env->halted = 1;
384 env->exception_index = EXCP_HLT;
385 cpu_loop_exit();
387 #endif
388 #if defined(TARGET_I386)
389 if (interrupt_request & CPU_INTERRUPT_INIT) {
390 svm_check_intercept(SVM_EXIT_INIT);
391 do_cpu_init(env);
392 env->exception_index = EXCP_HALTED;
393 cpu_loop_exit();
394 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
395 do_cpu_sipi(env);
396 } else if (env->hflags2 & HF2_GIF_MASK) {
397 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
398 !(env->hflags & HF_SMM_MASK)) {
399 svm_check_intercept(SVM_EXIT_SMI);
400 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
401 do_smm_enter();
402 next_tb = 0;
403 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
404 !(env->hflags2 & HF2_NMI_MASK)) {
405 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
406 env->hflags2 |= HF2_NMI_MASK;
407 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
408 next_tb = 0;
409 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
410 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
411 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
412 next_tb = 0;
413 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
414 (((env->hflags2 & HF2_VINTR_MASK) &&
415 (env->hflags2 & HF2_HIF_MASK)) ||
416 (!(env->hflags2 & HF2_VINTR_MASK) &&
417 (env->eflags & IF_MASK &&
418 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
419 int intno;
420 svm_check_intercept(SVM_EXIT_INTR);
421 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
422 intno = cpu_get_pic_interrupt(env);
423 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
424 #if defined(__sparc__) && !defined(HOST_SOLARIS)
425 #undef env
426 env = cpu_single_env;
427 #define env cpu_single_env
428 #endif
429 do_interrupt(intno, 0, 0, 0, 1);
430 /* ensure that no TB jump will be modified as
431 the program flow was changed */
432 next_tb = 0;
433 #if !defined(CONFIG_USER_ONLY)
434 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
435 (env->eflags & IF_MASK) &&
436 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
437 int intno;
438 /* FIXME: this should respect TPR */
439 svm_check_intercept(SVM_EXIT_VINTR);
440 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
441 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
442 do_interrupt(intno, 0, 0, 0, 1);
443 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
444 next_tb = 0;
445 #endif
448 #elif defined(TARGET_PPC)
449 #if 0
450 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
451 cpu_ppc_reset(env);
453 #endif
454 if (interrupt_request & CPU_INTERRUPT_HARD) {
455 ppc_hw_interrupt(env);
456 if (env->pending_interrupts == 0)
457 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
458 next_tb = 0;
460 #elif defined(TARGET_MICROBLAZE)
461 if ((interrupt_request & CPU_INTERRUPT_HARD)
462 && (env->sregs[SR_MSR] & MSR_IE)
463 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
464 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
465 env->exception_index = EXCP_IRQ;
466 do_interrupt(env);
467 next_tb = 0;
469 #elif defined(TARGET_MIPS)
470 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
471 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
472 (env->CP0_Status & (1 << CP0St_IE)) &&
473 !(env->CP0_Status & (1 << CP0St_EXL)) &&
474 !(env->CP0_Status & (1 << CP0St_ERL)) &&
475 !(env->hflags & MIPS_HFLAG_DM)) {
476 /* Raise it */
477 env->exception_index = EXCP_EXT_INTERRUPT;
478 env->error_code = 0;
479 do_interrupt(env);
480 next_tb = 0;
482 #elif defined(TARGET_SPARC)
483 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
484 cpu_interrupts_enabled(env)) {
485 int pil = env->interrupt_index & 15;
486 int type = env->interrupt_index & 0xf0;
488 if (((type == TT_EXTINT) &&
489 (pil == 15 || pil > env->psrpil)) ||
490 type != TT_EXTINT) {
491 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
492 env->exception_index = env->interrupt_index;
493 do_interrupt(env);
494 env->interrupt_index = 0;
495 #if !defined(CONFIG_USER_ONLY)
496 cpu_check_irqs(env);
497 #endif
498 next_tb = 0;
500 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
501 //do_interrupt(0, 0, 0, 0, 0);
502 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
504 #elif defined(TARGET_ARM)
505 if (interrupt_request & CPU_INTERRUPT_FIQ
506 && !(env->uncached_cpsr & CPSR_F)) {
507 env->exception_index = EXCP_FIQ;
508 do_interrupt(env);
509 next_tb = 0;
511 /* ARMv7-M interrupt return works by loading a magic value
512 into the PC. On real hardware the load causes the
513 return to occur. The qemu implementation performs the
514 jump normally, then does the exception return when the
515 CPU tries to execute code at the magic address.
516 This will cause the magic PC value to be pushed to
517 the stack if an interrupt occured at the wrong time.
518 We avoid this by disabling interrupts when
519 pc contains a magic address. */
520 if (interrupt_request & CPU_INTERRUPT_HARD
521 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
522 || !(env->uncached_cpsr & CPSR_I))) {
523 env->exception_index = EXCP_IRQ;
524 do_interrupt(env);
525 next_tb = 0;
527 #elif defined(TARGET_SH4)
528 if (interrupt_request & CPU_INTERRUPT_HARD) {
529 do_interrupt(env);
530 next_tb = 0;
532 #elif defined(TARGET_ALPHA)
533 if (interrupt_request & CPU_INTERRUPT_HARD) {
534 do_interrupt(env);
535 next_tb = 0;
537 #elif defined(TARGET_CRIS)
538 if (interrupt_request & CPU_INTERRUPT_HARD
539 && (env->pregs[PR_CCS] & I_FLAG)) {
540 env->exception_index = EXCP_IRQ;
541 do_interrupt(env);
542 next_tb = 0;
544 if (interrupt_request & CPU_INTERRUPT_NMI
545 && (env->pregs[PR_CCS] & M_FLAG)) {
546 env->exception_index = EXCP_NMI;
547 do_interrupt(env);
548 next_tb = 0;
550 #elif defined(TARGET_M68K)
551 if (interrupt_request & CPU_INTERRUPT_HARD
552 && ((env->sr & SR_I) >> SR_I_SHIFT)
553 < env->pending_level) {
554 /* Real hardware gets the interrupt vector via an
555 IACK cycle at this point. Current emulated
556 hardware doesn't rely on this, so we
557 provide/save the vector when the interrupt is
558 first signalled. */
559 env->exception_index = env->pending_vector;
560 do_interrupt(1);
561 next_tb = 0;
563 #endif
564 /* Don't use the cached interupt_request value,
565 do_interrupt may have updated the EXITTB flag. */
566 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
567 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
568 /* ensure that no TB jump will be modified as
569 the program flow was changed */
570 next_tb = 0;
573 if (unlikely(env->exit_request)) {
574 env->exit_request = 0;
575 env->exception_index = EXCP_INTERRUPT;
576 cpu_loop_exit();
578 #ifdef DEBUG_EXEC
579 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
580 /* restore flags in standard format */
581 regs_to_env();
582 #if defined(TARGET_I386)
583 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
584 log_cpu_state(env, X86_DUMP_CCOP);
585 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
586 #elif defined(TARGET_ARM)
587 log_cpu_state(env, 0);
588 #elif defined(TARGET_SPARC)
589 log_cpu_state(env, 0);
590 #elif defined(TARGET_PPC)
591 log_cpu_state(env, 0);
592 #elif defined(TARGET_M68K)
593 cpu_m68k_flush_flags(env, env->cc_op);
594 env->cc_op = CC_OP_FLAGS;
595 env->sr = (env->sr & 0xffe0)
596 | env->cc_dest | (env->cc_x << 4);
597 log_cpu_state(env, 0);
598 #elif defined(TARGET_MICROBLAZE)
599 log_cpu_state(env, 0);
600 #elif defined(TARGET_MIPS)
601 log_cpu_state(env, 0);
602 #elif defined(TARGET_SH4)
603 log_cpu_state(env, 0);
604 #elif defined(TARGET_ALPHA)
605 log_cpu_state(env, 0);
606 #elif defined(TARGET_CRIS)
607 log_cpu_state(env, 0);
608 #else
609 #error unsupported target CPU
610 #endif
612 #endif
613 spin_lock(&tb_lock);
614 tb = tb_find_fast();
615 /* Note: we do it here to avoid a gcc bug on Mac OS X when
616 doing it in tb_find_slow */
617 if (tb_invalidated_flag) {
618 /* as some TB could have been invalidated because
619 of memory exceptions while generating the code, we
620 must recompute the hash index here */
621 next_tb = 0;
622 tb_invalidated_flag = 0;
624 #ifdef DEBUG_EXEC
625 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
626 (long)tb->tc_ptr, tb->pc,
627 lookup_symbol(tb->pc));
628 #endif
629 /* see if we can patch the calling TB. When the TB
630 spans two pages, we cannot safely do a direct
631 jump. */
633 if (next_tb != 0 &&
634 #ifdef CONFIG_KQEMU
635 (env->kqemu_enabled != 2) &&
636 #endif
637 tb->page_addr[1] == -1) {
638 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
641 spin_unlock(&tb_lock);
642 env->current_tb = tb;
644 /* cpu_interrupt might be called while translating the
645 TB, but before it is linked into a potentially
646 infinite loop and becomes env->current_tb. Avoid
647 starting execution if there is a pending interrupt. */
648 if (unlikely (env->exit_request))
649 env->current_tb = NULL;
651 while (env->current_tb) {
652 tc_ptr = tb->tc_ptr;
653 /* execute the generated code */
654 #if defined(__sparc__) && !defined(HOST_SOLARIS)
655 #undef env
656 env = cpu_single_env;
657 #define env cpu_single_env
658 #endif
659 next_tb = tcg_qemu_tb_exec(tc_ptr);
660 env->current_tb = NULL;
661 if ((next_tb & 3) == 2) {
662 /* Instruction counter expired. */
663 int insns_left;
664 tb = (TranslationBlock *)(long)(next_tb & ~3);
665 /* Restore PC. */
666 cpu_pc_from_tb(env, tb);
667 insns_left = env->icount_decr.u32;
668 if (env->icount_extra && insns_left >= 0) {
669 /* Refill decrementer and continue execution. */
670 env->icount_extra += insns_left;
671 if (env->icount_extra > 0xffff) {
672 insns_left = 0xffff;
673 } else {
674 insns_left = env->icount_extra;
676 env->icount_extra -= insns_left;
677 env->icount_decr.u16.low = insns_left;
678 } else {
679 if (insns_left > 0) {
680 /* Execute remaining instructions. */
681 cpu_exec_nocache(insns_left, tb);
683 env->exception_index = EXCP_INTERRUPT;
684 next_tb = 0;
685 cpu_loop_exit();
689 /* reset soft MMU for next block (it can currently
690 only be set by a memory fault) */
691 #if defined(CONFIG_KQEMU)
692 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
693 if (kqemu_is_ok(env) &&
694 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
695 cpu_loop_exit();
697 #endif
698 } /* for(;;) */
699 } else {
700 env_to_regs();
702 } /* for(;;) */
705 #if defined(TARGET_I386)
706 /* restore flags in standard format */
707 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
708 #elif defined(TARGET_ARM)
709 /* XXX: Save/restore host fpu exception state?. */
710 #elif defined(TARGET_SPARC)
711 #elif defined(TARGET_PPC)
712 #elif defined(TARGET_M68K)
713 cpu_m68k_flush_flags(env, env->cc_op);
714 env->cc_op = CC_OP_FLAGS;
715 env->sr = (env->sr & 0xffe0)
716 | env->cc_dest | (env->cc_x << 4);
717 #elif defined(TARGET_MICROBLAZE)
718 #elif defined(TARGET_MIPS)
719 #elif defined(TARGET_SH4)
720 #elif defined(TARGET_IA64)
721 #elif defined(TARGET_ALPHA)
722 #elif defined(TARGET_CRIS)
723 /* XXXXX */
724 #else
725 #error unsupported target CPU
726 #endif
728 /* restore global registers */
729 #include "hostregs_helper.h"
731 /* fail safe : never use cpu_single_env outside cpu_exec() */
732 cpu_single_env = NULL;
733 return ret;
736 /* must only be called from the generated code as an exception can be
737 generated */
738 void tb_invalidate_page_range(target_ulong start, target_ulong end)
740 /* XXX: cannot enable it yet because it yields to MMU exception
741 where NIP != read address on PowerPC */
742 #if 0
743 target_ulong phys_addr;
744 phys_addr = get_phys_addr_code(env, start);
745 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
746 #endif
749 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
751 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
753 CPUX86State *saved_env;
755 saved_env = env;
756 env = s;
757 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
758 selector &= 0xffff;
759 cpu_x86_load_seg_cache(env, seg_reg, selector,
760 (selector << 4), 0xffff, 0);
761 } else {
762 helper_load_seg(seg_reg, selector);
764 env = saved_env;
767 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
769 CPUX86State *saved_env;
771 saved_env = env;
772 env = s;
774 helper_fsave(ptr, data32);
776 env = saved_env;
779 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
781 CPUX86State *saved_env;
783 saved_env = env;
784 env = s;
786 helper_frstor(ptr, data32);
788 env = saved_env;
791 #endif /* TARGET_I386 */
793 #if !defined(CONFIG_SOFTMMU)
795 #if defined(TARGET_I386)
797 /* 'pc' is the host PC at which the exception was raised. 'address' is
798 the effective address of the memory exception. 'is_write' is 1 if a
799 write caused the exception and otherwise 0'. 'old_set' is the
800 signal set which should be restored */
801 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
802 int is_write, sigset_t *old_set,
803 void *puc)
805 TranslationBlock *tb;
806 int ret;
808 if (cpu_single_env)
809 env = cpu_single_env; /* XXX: find a correct solution for multithread */
810 #if defined(DEBUG_SIGNAL)
811 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
812 pc, address, is_write, *(unsigned long *)old_set);
813 #endif
814 /* XXX: locking issue */
815 if (is_write && page_unprotect(h2g(address), pc, puc)) {
816 return 1;
819 /* see if it is an MMU fault */
820 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
821 if (ret < 0)
822 return 0; /* not an MMU fault */
823 if (ret == 0)
824 return 1; /* the MMU fault was handled without causing real CPU fault */
825 /* now we have a real cpu fault */
826 tb = tb_find_pc(pc);
827 if (tb) {
828 /* the PC is inside the translated code. It means that we have
829 a virtual CPU fault */
830 cpu_restore_state(tb, env, pc, puc);
832 if (ret == 1) {
833 #if 0
834 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
835 env->eip, env->cr[2], env->error_code);
836 #endif
837 /* we restore the process signal mask as the sigreturn should
838 do it (XXX: use sigsetjmp) */
839 sigprocmask(SIG_SETMASK, old_set, NULL);
840 raise_exception_err(env->exception_index, env->error_code);
841 } else {
842 /* activate soft MMU for this block */
843 env->hflags |= HF_SOFTMMU_MASK;
844 cpu_resume_from_signal(env, puc);
846 /* never comes here */
847 return 1;
850 #elif defined(TARGET_ARM)
851 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
852 int is_write, sigset_t *old_set,
853 void *puc)
855 TranslationBlock *tb;
856 int ret;
858 if (cpu_single_env)
859 env = cpu_single_env; /* XXX: find a correct solution for multithread */
860 #if defined(DEBUG_SIGNAL)
861 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
862 pc, address, is_write, *(unsigned long *)old_set);
863 #endif
864 /* XXX: locking issue */
865 if (is_write && page_unprotect(h2g(address), pc, puc)) {
866 return 1;
868 /* see if it is an MMU fault */
869 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
870 if (ret < 0)
871 return 0; /* not an MMU fault */
872 if (ret == 0)
873 return 1; /* the MMU fault was handled without causing real CPU fault */
874 /* now we have a real cpu fault */
875 tb = tb_find_pc(pc);
876 if (tb) {
877 /* the PC is inside the translated code. It means that we have
878 a virtual CPU fault */
879 cpu_restore_state(tb, env, pc, puc);
881 /* we restore the process signal mask as the sigreturn should
882 do it (XXX: use sigsetjmp) */
883 sigprocmask(SIG_SETMASK, old_set, NULL);
884 cpu_loop_exit();
885 /* never comes here */
886 return 1;
888 #elif defined(TARGET_SPARC)
889 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
890 int is_write, sigset_t *old_set,
891 void *puc)
893 TranslationBlock *tb;
894 int ret;
896 if (cpu_single_env)
897 env = cpu_single_env; /* XXX: find a correct solution for multithread */
898 #if defined(DEBUG_SIGNAL)
899 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
900 pc, address, is_write, *(unsigned long *)old_set);
901 #endif
902 /* XXX: locking issue */
903 if (is_write && page_unprotect(h2g(address), pc, puc)) {
904 return 1;
906 /* see if it is an MMU fault */
907 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
908 if (ret < 0)
909 return 0; /* not an MMU fault */
910 if (ret == 0)
911 return 1; /* the MMU fault was handled without causing real CPU fault */
912 /* now we have a real cpu fault */
913 tb = tb_find_pc(pc);
914 if (tb) {
915 /* the PC is inside the translated code. It means that we have
916 a virtual CPU fault */
917 cpu_restore_state(tb, env, pc, puc);
919 /* we restore the process signal mask as the sigreturn should
920 do it (XXX: use sigsetjmp) */
921 sigprocmask(SIG_SETMASK, old_set, NULL);
922 cpu_loop_exit();
923 /* never comes here */
924 return 1;
926 #elif defined (TARGET_PPC)
927 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
928 int is_write, sigset_t *old_set,
929 void *puc)
931 TranslationBlock *tb;
932 int ret;
934 if (cpu_single_env)
935 env = cpu_single_env; /* XXX: find a correct solution for multithread */
936 #if defined(DEBUG_SIGNAL)
937 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
938 pc, address, is_write, *(unsigned long *)old_set);
939 #endif
940 /* XXX: locking issue */
941 if (is_write && page_unprotect(h2g(address), pc, puc)) {
942 return 1;
945 /* see if it is an MMU fault */
946 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
947 if (ret < 0)
948 return 0; /* not an MMU fault */
949 if (ret == 0)
950 return 1; /* the MMU fault was handled without causing real CPU fault */
952 /* now we have a real cpu fault */
953 tb = tb_find_pc(pc);
954 if (tb) {
955 /* the PC is inside the translated code. It means that we have
956 a virtual CPU fault */
957 cpu_restore_state(tb, env, pc, puc);
959 if (ret == 1) {
960 #if 0
961 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
962 env->nip, env->error_code, tb);
963 #endif
964 /* we restore the process signal mask as the sigreturn should
965 do it (XXX: use sigsetjmp) */
966 sigprocmask(SIG_SETMASK, old_set, NULL);
967 cpu_loop_exit();
968 } else {
969 /* activate soft MMU for this block */
970 cpu_resume_from_signal(env, puc);
972 /* never comes here */
973 return 1;
976 #elif defined(TARGET_M68K)
977 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
978 int is_write, sigset_t *old_set,
979 void *puc)
981 TranslationBlock *tb;
982 int ret;
984 if (cpu_single_env)
985 env = cpu_single_env; /* XXX: find a correct solution for multithread */
986 #if defined(DEBUG_SIGNAL)
987 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
988 pc, address, is_write, *(unsigned long *)old_set);
989 #endif
990 /* XXX: locking issue */
991 if (is_write && page_unprotect(address, pc, puc)) {
992 return 1;
994 /* see if it is an MMU fault */
995 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
996 if (ret < 0)
997 return 0; /* not an MMU fault */
998 if (ret == 0)
999 return 1; /* the MMU fault was handled without causing real CPU fault */
1000 /* now we have a real cpu fault */
1001 tb = tb_find_pc(pc);
1002 if (tb) {
1003 /* the PC is inside the translated code. It means that we have
1004 a virtual CPU fault */
1005 cpu_restore_state(tb, env, pc, puc);
1007 /* we restore the process signal mask as the sigreturn should
1008 do it (XXX: use sigsetjmp) */
1009 sigprocmask(SIG_SETMASK, old_set, NULL);
1010 cpu_loop_exit();
1011 /* never comes here */
1012 return 1;
1015 #elif defined (TARGET_MIPS)
1016 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1017 int is_write, sigset_t *old_set,
1018 void *puc)
1020 TranslationBlock *tb;
1021 int ret;
1023 if (cpu_single_env)
1024 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1025 #if defined(DEBUG_SIGNAL)
1026 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1027 pc, address, is_write, *(unsigned long *)old_set);
1028 #endif
1029 /* XXX: locking issue */
1030 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1031 return 1;
1034 /* see if it is an MMU fault */
1035 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1036 if (ret < 0)
1037 return 0; /* not an MMU fault */
1038 if (ret == 0)
1039 return 1; /* the MMU fault was handled without causing real CPU fault */
1041 /* now we have a real cpu fault */
1042 tb = tb_find_pc(pc);
1043 if (tb) {
1044 /* the PC is inside the translated code. It means that we have
1045 a virtual CPU fault */
1046 cpu_restore_state(tb, env, pc, puc);
1048 if (ret == 1) {
1049 #if 0
1050 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1051 env->PC, env->error_code, tb);
1052 #endif
1053 /* we restore the process signal mask as the sigreturn should
1054 do it (XXX: use sigsetjmp) */
1055 sigprocmask(SIG_SETMASK, old_set, NULL);
1056 cpu_loop_exit();
1057 } else {
1058 /* activate soft MMU for this block */
1059 cpu_resume_from_signal(env, puc);
1061 /* never comes here */
1062 return 1;
1065 #elif defined (TARGET_MICROBLAZE)
1066 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1067 int is_write, sigset_t *old_set,
1068 void *puc)
1070 TranslationBlock *tb;
1071 int ret;
1073 if (cpu_single_env)
1074 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1075 #if defined(DEBUG_SIGNAL)
1076 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1077 pc, address, is_write, *(unsigned long *)old_set);
1078 #endif
1079 /* XXX: locking issue */
1080 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1081 return 1;
1084 /* see if it is an MMU fault */
1085 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1086 if (ret < 0)
1087 return 0; /* not an MMU fault */
1088 if (ret == 0)
1089 return 1; /* the MMU fault was handled without causing real CPU fault */
1091 /* now we have a real cpu fault */
1092 tb = tb_find_pc(pc);
1093 if (tb) {
1094 /* the PC is inside the translated code. It means that we have
1095 a virtual CPU fault */
1096 cpu_restore_state(tb, env, pc, puc);
1098 if (ret == 1) {
1099 #if 0
1100 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1101 env->PC, env->error_code, tb);
1102 #endif
1103 /* we restore the process signal mask as the sigreturn should
1104 do it (XXX: use sigsetjmp) */
1105 sigprocmask(SIG_SETMASK, old_set, NULL);
1106 cpu_loop_exit();
1107 } else {
1108 /* activate soft MMU for this block */
1109 cpu_resume_from_signal(env, puc);
1111 /* never comes here */
1112 return 1;
1115 #elif defined (TARGET_SH4)
1116 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1117 int is_write, sigset_t *old_set,
1118 void *puc)
1120 TranslationBlock *tb;
1121 int ret;
1123 if (cpu_single_env)
1124 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1125 #if defined(DEBUG_SIGNAL)
1126 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1127 pc, address, is_write, *(unsigned long *)old_set);
1128 #endif
1129 /* XXX: locking issue */
1130 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1131 return 1;
1134 /* see if it is an MMU fault */
1135 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1136 if (ret < 0)
1137 return 0; /* not an MMU fault */
1138 if (ret == 0)
1139 return 1; /* the MMU fault was handled without causing real CPU fault */
1141 /* now we have a real cpu fault */
1142 tb = tb_find_pc(pc);
1143 if (tb) {
1144 /* the PC is inside the translated code. It means that we have
1145 a virtual CPU fault */
1146 cpu_restore_state(tb, env, pc, puc);
1148 #if 0
1149 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1150 env->nip, env->error_code, tb);
1151 #endif
1152 /* we restore the process signal mask as the sigreturn should
1153 do it (XXX: use sigsetjmp) */
1154 sigprocmask(SIG_SETMASK, old_set, NULL);
1155 cpu_loop_exit();
1156 /* never comes here */
1157 return 1;
1160 #elif defined (TARGET_ALPHA)
1161 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1162 int is_write, sigset_t *old_set,
1163 void *puc)
1165 TranslationBlock *tb;
1166 int ret;
1168 if (cpu_single_env)
1169 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1170 #if defined(DEBUG_SIGNAL)
1171 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1172 pc, address, is_write, *(unsigned long *)old_set);
1173 #endif
1174 /* XXX: locking issue */
1175 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1176 return 1;
1179 /* see if it is an MMU fault */
1180 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1181 if (ret < 0)
1182 return 0; /* not an MMU fault */
1183 if (ret == 0)
1184 return 1; /* the MMU fault was handled without causing real CPU fault */
1186 /* now we have a real cpu fault */
1187 tb = tb_find_pc(pc);
1188 if (tb) {
1189 /* the PC is inside the translated code. It means that we have
1190 a virtual CPU fault */
1191 cpu_restore_state(tb, env, pc, puc);
1193 #if 0
1194 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1195 env->nip, env->error_code, tb);
1196 #endif
1197 /* we restore the process signal mask as the sigreturn should
1198 do it (XXX: use sigsetjmp) */
1199 sigprocmask(SIG_SETMASK, old_set, NULL);
1200 cpu_loop_exit();
1201 /* never comes here */
1202 return 1;
1204 #elif defined (TARGET_CRIS)
1205 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1206 int is_write, sigset_t *old_set,
1207 void *puc)
1209 TranslationBlock *tb;
1210 int ret;
1212 if (cpu_single_env)
1213 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1214 #if defined(DEBUG_SIGNAL)
1215 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1216 pc, address, is_write, *(unsigned long *)old_set);
1217 #endif
1218 /* XXX: locking issue */
1219 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1220 return 1;
1223 /* see if it is an MMU fault */
1224 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1225 if (ret < 0)
1226 return 0; /* not an MMU fault */
1227 if (ret == 0)
1228 return 1; /* the MMU fault was handled without causing real CPU fault */
1230 /* now we have a real cpu fault */
1231 tb = tb_find_pc(pc);
1232 if (tb) {
1233 /* the PC is inside the translated code. It means that we have
1234 a virtual CPU fault */
1235 cpu_restore_state(tb, env, pc, puc);
1237 /* we restore the process signal mask as the sigreturn should
1238 do it (XXX: use sigsetjmp) */
1239 sigprocmask(SIG_SETMASK, old_set, NULL);
1240 cpu_loop_exit();
1241 /* never comes here */
1242 return 1;
1245 #else
1246 #error unsupported target CPU
1247 #endif
1249 #if defined(__i386__)
1251 #if defined(__APPLE__)
1252 # include <sys/ucontext.h>
1254 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1255 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1256 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1257 # define MASK_sig(context) ((context)->uc_sigmask)
1258 #elif defined(__OpenBSD__)
1259 # define EIP_sig(context) ((context)->sc_eip)
1260 # define TRAP_sig(context) ((context)->sc_trapno)
1261 # define ERROR_sig(context) ((context)->sc_err)
1262 # define MASK_sig(context) ((context)->sc_mask)
1263 #else
1264 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1265 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1266 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1267 # define MASK_sig(context) ((context)->uc_sigmask)
1268 #endif
1270 int cpu_signal_handler(int host_signum, void *pinfo,
1271 void *puc)
1273 siginfo_t *info = pinfo;
1274 #if defined(__OpenBSD__)
1275 struct sigcontext *uc = puc;
1276 #else
1277 struct ucontext *uc = puc;
1278 #endif
1279 unsigned long pc;
1280 int trapno;
1282 #ifndef REG_EIP
1283 /* for glibc 2.1 */
1284 #define REG_EIP EIP
1285 #define REG_ERR ERR
1286 #define REG_TRAPNO TRAPNO
1287 #endif
1288 pc = EIP_sig(uc);
1289 trapno = TRAP_sig(uc);
1290 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1291 trapno == 0xe ?
1292 (ERROR_sig(uc) >> 1) & 1 : 0,
1293 &MASK_sig(uc), puc);
1296 #elif defined(__x86_64__)
1298 #ifdef __NetBSD__
1299 #define PC_sig(context) _UC_MACHINE_PC(context)
1300 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1301 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1302 #define MASK_sig(context) ((context)->uc_sigmask)
1303 #elif defined(__OpenBSD__)
1304 #define PC_sig(context) ((context)->sc_rip)
1305 #define TRAP_sig(context) ((context)->sc_trapno)
1306 #define ERROR_sig(context) ((context)->sc_err)
1307 #define MASK_sig(context) ((context)->sc_mask)
1308 #else
1309 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1310 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1311 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1312 #define MASK_sig(context) ((context)->uc_sigmask)
1313 #endif
1315 int cpu_signal_handler(int host_signum, void *pinfo,
1316 void *puc)
1318 siginfo_t *info = pinfo;
1319 unsigned long pc;
1320 #ifdef __NetBSD__
1321 ucontext_t *uc = puc;
1322 #elif defined(__OpenBSD__)
1323 struct sigcontext *uc = puc;
1324 #else
1325 struct ucontext *uc = puc;
1326 #endif
1328 pc = PC_sig(uc);
1329 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1330 TRAP_sig(uc) == 0xe ?
1331 (ERROR_sig(uc) >> 1) & 1 : 0,
1332 &MASK_sig(uc), puc);
1335 #elif defined(_ARCH_PPC)
1337 /***********************************************************************
1338 * signal context platform-specific definitions
1339 * From Wine
1341 #ifdef linux
1342 /* All Registers access - only for local access */
1343 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1344 /* Gpr Registers access */
1345 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1346 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1347 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1348 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1349 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1350 # define LR_sig(context) REG_sig(link, context) /* Link register */
1351 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1352 /* Float Registers access */
1353 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1354 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1355 /* Exception Registers access */
1356 # define DAR_sig(context) REG_sig(dar, context)
1357 # define DSISR_sig(context) REG_sig(dsisr, context)
1358 # define TRAP_sig(context) REG_sig(trap, context)
1359 #endif /* linux */
1361 #ifdef __APPLE__
1362 # include <sys/ucontext.h>
1363 typedef struct ucontext SIGCONTEXT;
1364 /* All Registers access - only for local access */
1365 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1366 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1367 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1368 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1369 /* Gpr Registers access */
1370 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1371 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1372 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1373 # define CTR_sig(context) REG_sig(ctr, context)
1374 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1375 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1376 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1377 /* Float Registers access */
1378 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1379 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1380 /* Exception Registers access */
1381 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1382 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1383 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1384 #endif /* __APPLE__ */
1386 int cpu_signal_handler(int host_signum, void *pinfo,
1387 void *puc)
1389 siginfo_t *info = pinfo;
1390 struct ucontext *uc = puc;
1391 unsigned long pc;
1392 int is_write;
1394 pc = IAR_sig(uc);
1395 is_write = 0;
1396 #if 0
1397 /* ppc 4xx case */
1398 if (DSISR_sig(uc) & 0x00800000)
1399 is_write = 1;
1400 #else
1401 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1402 is_write = 1;
1403 #endif
1404 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1405 is_write, &uc->uc_sigmask, puc);
1408 #elif defined(__alpha__)
1410 int cpu_signal_handler(int host_signum, void *pinfo,
1411 void *puc)
1413 siginfo_t *info = pinfo;
1414 struct ucontext *uc = puc;
1415 uint32_t *pc = uc->uc_mcontext.sc_pc;
1416 uint32_t insn = *pc;
1417 int is_write = 0;
1419 /* XXX: need kernel patch to get write flag faster */
1420 switch (insn >> 26) {
1421 case 0x0d: // stw
1422 case 0x0e: // stb
1423 case 0x0f: // stq_u
1424 case 0x24: // stf
1425 case 0x25: // stg
1426 case 0x26: // sts
1427 case 0x27: // stt
1428 case 0x2c: // stl
1429 case 0x2d: // stq
1430 case 0x2e: // stl_c
1431 case 0x2f: // stq_c
1432 is_write = 1;
1435 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1436 is_write, &uc->uc_sigmask, puc);
1438 #elif defined(__sparc__)
1440 int cpu_signal_handler(int host_signum, void *pinfo,
1441 void *puc)
1443 siginfo_t *info = pinfo;
1444 int is_write;
1445 uint32_t insn;
1446 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1447 uint32_t *regs = (uint32_t *)(info + 1);
1448 void *sigmask = (regs + 20);
1449 /* XXX: is there a standard glibc define ? */
1450 unsigned long pc = regs[1];
1451 #else
1452 #ifdef __linux__
1453 struct sigcontext *sc = puc;
1454 unsigned long pc = sc->sigc_regs.tpc;
1455 void *sigmask = (void *)sc->sigc_mask;
1456 #elif defined(__OpenBSD__)
1457 struct sigcontext *uc = puc;
1458 unsigned long pc = uc->sc_pc;
1459 void *sigmask = (void *)(long)uc->sc_mask;
1460 #endif
1461 #endif
1463 /* XXX: need kernel patch to get write flag faster */
1464 is_write = 0;
1465 insn = *(uint32_t *)pc;
1466 if ((insn >> 30) == 3) {
1467 switch((insn >> 19) & 0x3f) {
1468 case 0x05: // stb
1469 case 0x15: // stba
1470 case 0x06: // sth
1471 case 0x16: // stha
1472 case 0x04: // st
1473 case 0x14: // sta
1474 case 0x07: // std
1475 case 0x17: // stda
1476 case 0x0e: // stx
1477 case 0x1e: // stxa
1478 case 0x24: // stf
1479 case 0x34: // stfa
1480 case 0x27: // stdf
1481 case 0x37: // stdfa
1482 case 0x26: // stqf
1483 case 0x36: // stqfa
1484 case 0x25: // stfsr
1485 case 0x3c: // casa
1486 case 0x3e: // casxa
1487 is_write = 1;
1488 break;
1491 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1492 is_write, sigmask, NULL);
1495 #elif defined(__arm__)
1497 int cpu_signal_handler(int host_signum, void *pinfo,
1498 void *puc)
1500 siginfo_t *info = pinfo;
1501 struct ucontext *uc = puc;
1502 unsigned long pc;
1503 int is_write;
1505 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1506 pc = uc->uc_mcontext.gregs[R15];
1507 #else
1508 pc = uc->uc_mcontext.arm_pc;
1509 #endif
1510 /* XXX: compute is_write */
1511 is_write = 0;
1512 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1513 is_write,
1514 &uc->uc_sigmask, puc);
1517 #elif defined(__mc68000)
1519 int cpu_signal_handler(int host_signum, void *pinfo,
1520 void *puc)
1522 siginfo_t *info = pinfo;
1523 struct ucontext *uc = puc;
1524 unsigned long pc;
1525 int is_write;
1527 pc = uc->uc_mcontext.gregs[16];
1528 /* XXX: compute is_write */
1529 is_write = 0;
1530 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1531 is_write,
1532 &uc->uc_sigmask, puc);
1535 #elif defined(__ia64)
1537 #ifndef __ISR_VALID
1538 /* This ought to be in <bits/siginfo.h>... */
1539 # define __ISR_VALID 1
1540 #endif
1542 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1544 siginfo_t *info = pinfo;
1545 struct ucontext *uc = puc;
1546 unsigned long ip;
1547 int is_write = 0;
1549 ip = uc->uc_mcontext.sc_ip;
1550 switch (host_signum) {
1551 case SIGILL:
1552 case SIGFPE:
1553 case SIGSEGV:
1554 case SIGBUS:
1555 case SIGTRAP:
1556 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1557 /* ISR.W (write-access) is bit 33: */
1558 is_write = (info->si_isr >> 33) & 1;
1559 break;
1561 default:
1562 break;
1564 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1565 is_write,
1566 &uc->uc_sigmask, puc);
1569 #elif defined(__s390__)
1571 int cpu_signal_handler(int host_signum, void *pinfo,
1572 void *puc)
1574 siginfo_t *info = pinfo;
1575 struct ucontext *uc = puc;
1576 unsigned long pc;
1577 int is_write;
1579 pc = uc->uc_mcontext.psw.addr;
1580 /* XXX: compute is_write */
1581 is_write = 0;
1582 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1583 is_write, &uc->uc_sigmask, puc);
1586 #elif defined(__mips__)
1588 int cpu_signal_handler(int host_signum, void *pinfo,
1589 void *puc)
1591 siginfo_t *info = pinfo;
1592 struct ucontext *uc = puc;
1593 greg_t pc = uc->uc_mcontext.pc;
1594 int is_write;
1596 /* XXX: compute is_write */
1597 is_write = 0;
1598 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1599 is_write, &uc->uc_sigmask, puc);
1602 #elif defined(__hppa__)
1604 int cpu_signal_handler(int host_signum, void *pinfo,
1605 void *puc)
1607 struct siginfo *info = pinfo;
1608 struct ucontext *uc = puc;
1609 unsigned long pc;
1610 int is_write;
1612 pc = uc->uc_mcontext.sc_iaoq[0];
1613 /* FIXME: compute is_write */
1614 is_write = 0;
1615 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1616 is_write,
1617 &uc->uc_sigmask, puc);
1620 #else
1622 #error host CPU specific signal handler needed
1624 #endif
1626 #endif /* !defined(CONFIG_SOFTMMU) */