build-fix typo in target-ia64/libkvm.c
[qemu-kvm/fedora.git] / cpu-exec.c
blob71102958ac284d164c1ac3a0baa161ac5876d6d0
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #if !defined(TARGET_IA64)
24 #include "tcg.h"
25 #endif
26 #include "kvm.h"
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #ifdef __linux__
40 #include <sys/ucontext.h>
41 #endif
42 #endif
44 #include "qemu-kvm.h"
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
48 #undef env
49 #define env cpu_single_env
50 #endif
52 int tb_invalidated_flag;
54 //#define DEBUG_EXEC
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState *env)
59 return cpu_has_work(env);
62 void cpu_loop_exit(void)
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
66 regs_to_env();
67 longjmp(env->jmp_env, 1);
70 /* exit the current TB from a signal handler. The host registers are
71 restored in a state compatible with the CPU emulator
73 void cpu_resume_from_signal(CPUState *env1, void *puc)
75 #if !defined(CONFIG_SOFTMMU)
76 #ifdef __linux__
77 struct ucontext *uc = puc;
78 #elif defined(__OpenBSD__)
79 struct sigcontext *uc = puc;
80 #endif
81 #endif
83 env = env1;
85 /* XXX: restore cpu registers saved in host registers */
87 #if !defined(CONFIG_SOFTMMU)
88 if (puc) {
89 /* XXX: use siglongjmp ? */
90 #ifdef __linux__
91 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
92 #elif defined(__OpenBSD__)
93 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
94 #endif
96 #endif
97 env->exception_index = -1;
98 longjmp(env->jmp_env, 1);
101 /* Execute the code without caching the generated code. An interpreter
102 could be used if available. */
103 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
105 unsigned long next_tb;
106 TranslationBlock *tb;
108 /* Should never happen.
109 We only end up here when an existing TB is too long. */
110 if (max_cycles > CF_COUNT_MASK)
111 max_cycles = CF_COUNT_MASK;
113 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
114 max_cycles);
115 env->current_tb = tb;
116 /* execute the generated code */
117 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
119 if ((next_tb & 3) == 2) {
120 /* Restore PC. This may happen if async event occurs before
121 the TB starts executing. */
122 cpu_pc_from_tb(env, tb);
124 tb_phys_invalidate(tb, -1);
125 tb_free(tb);
128 static TranslationBlock *tb_find_slow(target_ulong pc,
129 target_ulong cs_base,
130 uint64_t flags)
132 TranslationBlock *tb, **ptb1;
133 unsigned int h;
134 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
136 tb_invalidated_flag = 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc = get_phys_addr_code(env, pc);
142 phys_page1 = phys_pc & TARGET_PAGE_MASK;
143 phys_page2 = -1;
144 h = tb_phys_hash_func(phys_pc);
145 ptb1 = &tb_phys_hash[h];
146 for(;;) {
147 tb = *ptb1;
148 if (!tb)
149 goto not_found;
150 if (tb->pc == pc &&
151 tb->page_addr[0] == phys_page1 &&
152 tb->cs_base == cs_base &&
153 tb->flags == flags) {
154 /* check next page if needed */
155 if (tb->page_addr[1] != -1) {
156 virt_page2 = (pc & TARGET_PAGE_MASK) +
157 TARGET_PAGE_SIZE;
158 phys_page2 = get_phys_addr_code(env, virt_page2);
159 if (tb->page_addr[1] == phys_page2)
160 goto found;
161 } else {
162 goto found;
165 ptb1 = &tb->phys_hash_next;
167 not_found:
168 /* if no translated code available, then translate it now */
169 tb = tb_gen_code(env, pc, cs_base, flags, 0);
171 found:
172 /* we add the TB in the virtual pc hash table */
173 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
174 return tb;
177 static inline TranslationBlock *tb_find_fast(void)
179 TranslationBlock *tb;
180 target_ulong cs_base, pc;
181 int flags;
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
185 is executed. */
186 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
187 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
188 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
189 tb->flags != flags)) {
190 tb = tb_find_slow(pc, cs_base, flags);
192 return tb;
195 static CPUDebugExcpHandler *debug_excp_handler;
197 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
199 CPUDebugExcpHandler *old_handler = debug_excp_handler;
201 debug_excp_handler = handler;
202 return old_handler;
205 static void cpu_handle_debug_exception(CPUState *env)
207 CPUWatchpoint *wp;
209 if (!env->watchpoint_hit)
210 TAILQ_FOREACH(wp, &env->watchpoints, entry)
211 wp->flags &= ~BP_WATCHPOINT_HIT;
213 if (debug_excp_handler)
214 debug_excp_handler(env);
217 /* main execution loop */
219 int cpu_exec(CPUState *env1)
221 #define DECLARE_HOST_REGS 1
222 #include "hostregs_helper.h"
223 int ret, interrupt_request;
224 TranslationBlock *tb;
225 uint8_t *tc_ptr;
226 unsigned long next_tb;
228 if (cpu_halted(env1) == EXCP_HALTED)
229 return EXCP_HALTED;
231 cpu_single_env = env1;
233 /* first we save global registers */
234 #define SAVE_HOST_REGS 1
235 #include "hostregs_helper.h"
236 env = env1;
238 env_to_regs();
239 #if defined(TARGET_I386)
240 /* put eflags in CPU temporary format */
241 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
242 DF = 1 - (2 * ((env->eflags >> 10) & 1));
243 CC_OP = CC_OP_EFLAGS;
244 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env->cc_op = CC_OP_FLAGS;
248 env->cc_dest = env->sr & 0xf;
249 env->cc_x = (env->sr >> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_PPC)
253 #elif defined(TARGET_MICROBLAZE)
254 #elif defined(TARGET_MIPS)
255 #elif defined(TARGET_SH4)
256 #elif defined(TARGET_CRIS)
257 #elif defined(TARGET_IA64)
258 /* XXXXX */
259 #else
260 #error unsupported target CPU
261 #endif
262 env->exception_index = -1;
264 /* prepare setjmp context for exception handling */
265 for(;;) {
266 if (setjmp(env->jmp_env) == 0) {
267 #if defined(__sparc__) && !defined(HOST_SOLARIS)
268 #undef env
269 env = cpu_single_env;
270 #define env cpu_single_env
271 #endif
272 env->current_tb = NULL;
273 /* if an exception is pending, we execute it here */
274 if (env->exception_index >= 0) {
275 if (env->exception_index >= EXCP_INTERRUPT) {
276 /* exit request from the cpu execution loop */
277 ret = env->exception_index;
278 if (ret == EXCP_DEBUG)
279 cpu_handle_debug_exception(env);
280 break;
281 } else {
282 #if defined(CONFIG_USER_ONLY)
283 /* if user mode only, we simulate a fake exception
284 which will be handled outside the cpu execution
285 loop */
286 #if defined(TARGET_I386)
287 do_interrupt_user(env->exception_index,
288 env->exception_is_int,
289 env->error_code,
290 env->exception_next_eip);
291 /* successfully delivered */
292 env->old_exception = -1;
293 #endif
294 ret = env->exception_index;
295 break;
296 #else
297 #if defined(TARGET_I386)
298 /* simulate a real cpu exception. On i386, it can
299 trigger new exceptions, but we do not handle
300 double or triple faults yet. */
301 do_interrupt(env->exception_index,
302 env->exception_is_int,
303 env->error_code,
304 env->exception_next_eip, 0);
305 /* successfully delivered */
306 env->old_exception = -1;
307 #elif defined(TARGET_PPC)
308 do_interrupt(env);
309 #elif defined(TARGET_MICROBLAZE)
310 do_interrupt(env);
311 #elif defined(TARGET_MIPS)
312 do_interrupt(env);
313 #elif defined(TARGET_SPARC)
314 do_interrupt(env);
315 #elif defined(TARGET_ARM)
316 do_interrupt(env);
317 #elif defined(TARGET_SH4)
318 do_interrupt(env);
319 #elif defined(TARGET_ALPHA)
320 do_interrupt(env);
321 #elif defined(TARGET_CRIS)
322 do_interrupt(env);
323 #elif defined(TARGET_M68K)
324 do_interrupt(0);
325 #elif defined(TARGET_IA64)
326 do_interrupt(env);
327 #endif
328 #endif
330 env->exception_index = -1;
332 #ifdef CONFIG_KQEMU
333 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
334 int ret;
335 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
336 ret = kqemu_cpu_exec(env);
337 /* put eflags in CPU temporary format */
338 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
339 DF = 1 - (2 * ((env->eflags >> 10) & 1));
340 CC_OP = CC_OP_EFLAGS;
341 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
342 if (ret == 1) {
343 /* exception */
344 longjmp(env->jmp_env, 1);
345 } else if (ret == 2) {
346 /* softmmu execution needed */
347 } else {
348 if (env->interrupt_request != 0 || env->exit_request != 0) {
349 /* hardware interrupt will be executed just after */
350 } else {
351 /* otherwise, we restart */
352 longjmp(env->jmp_env, 1);
356 #endif
358 if (kvm_enabled()) {
359 kvm_cpu_exec(env);
360 longjmp(env->jmp_env, 1);
363 next_tb = 0; /* force lookup of first TB */
364 for(;;) {
365 interrupt_request = env->interrupt_request;
366 if (unlikely(interrupt_request)) {
367 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
368 /* Mask out external interrupts for this step. */
369 interrupt_request &= ~(CPU_INTERRUPT_HARD |
370 CPU_INTERRUPT_FIQ |
371 CPU_INTERRUPT_SMI |
372 CPU_INTERRUPT_NMI);
374 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
375 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
376 env->exception_index = EXCP_DEBUG;
377 cpu_loop_exit();
379 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
380 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
381 defined(TARGET_MICROBLAZE)
382 if (interrupt_request & CPU_INTERRUPT_HALT) {
383 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
384 env->halted = 1;
385 env->exception_index = EXCP_HLT;
386 cpu_loop_exit();
388 #endif
389 #if defined(TARGET_I386)
390 if (env->hflags2 & HF2_GIF_MASK) {
391 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
392 !(env->hflags & HF_SMM_MASK)) {
393 svm_check_intercept(SVM_EXIT_SMI);
394 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
395 do_smm_enter();
396 next_tb = 0;
397 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
398 !(env->hflags2 & HF2_NMI_MASK)) {
399 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
400 env->hflags2 |= HF2_NMI_MASK;
401 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
402 next_tb = 0;
403 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
404 (((env->hflags2 & HF2_VINTR_MASK) &&
405 (env->hflags2 & HF2_HIF_MASK)) ||
406 (!(env->hflags2 & HF2_VINTR_MASK) &&
407 (env->eflags & IF_MASK &&
408 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
409 int intno;
410 svm_check_intercept(SVM_EXIT_INTR);
411 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
412 intno = cpu_get_pic_interrupt(env);
413 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
414 #if defined(__sparc__) && !defined(HOST_SOLARIS)
415 #undef env
416 env = cpu_single_env;
417 #define env cpu_single_env
418 #endif
419 do_interrupt(intno, 0, 0, 0, 1);
420 /* ensure that no TB jump will be modified as
421 the program flow was changed */
422 next_tb = 0;
423 #if !defined(CONFIG_USER_ONLY)
424 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
425 (env->eflags & IF_MASK) &&
426 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
427 int intno;
428 /* FIXME: this should respect TPR */
429 svm_check_intercept(SVM_EXIT_VINTR);
430 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
431 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
432 do_interrupt(intno, 0, 0, 0, 1);
433 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
434 next_tb = 0;
435 #endif
438 #elif defined(TARGET_PPC)
439 #if 0
440 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
441 cpu_ppc_reset(env);
443 #endif
444 if (interrupt_request & CPU_INTERRUPT_HARD) {
445 ppc_hw_interrupt(env);
446 if (env->pending_interrupts == 0)
447 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
448 next_tb = 0;
450 #elif defined(TARGET_MICROBLAZE)
451 if ((interrupt_request & CPU_INTERRUPT_HARD)
452 && (env->sregs[SR_MSR] & MSR_IE)
453 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
454 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
455 env->exception_index = EXCP_IRQ;
456 do_interrupt(env);
457 next_tb = 0;
459 #elif defined(TARGET_MIPS)
460 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
462 (env->CP0_Status & (1 << CP0St_IE)) &&
463 !(env->CP0_Status & (1 << CP0St_EXL)) &&
464 !(env->CP0_Status & (1 << CP0St_ERL)) &&
465 !(env->hflags & MIPS_HFLAG_DM)) {
466 /* Raise it */
467 env->exception_index = EXCP_EXT_INTERRUPT;
468 env->error_code = 0;
469 do_interrupt(env);
470 next_tb = 0;
472 #elif defined(TARGET_SPARC)
473 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
474 (env->psret != 0)) {
475 int pil = env->interrupt_index & 15;
476 int type = env->interrupt_index & 0xf0;
478 if (((type == TT_EXTINT) &&
479 (pil == 15 || pil > env->psrpil)) ||
480 type != TT_EXTINT) {
481 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
482 env->exception_index = env->interrupt_index;
483 do_interrupt(env);
484 env->interrupt_index = 0;
485 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
486 cpu_check_irqs(env);
487 #endif
488 next_tb = 0;
490 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
491 //do_interrupt(0, 0, 0, 0, 0);
492 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
494 #elif defined(TARGET_ARM)
495 if (interrupt_request & CPU_INTERRUPT_FIQ
496 && !(env->uncached_cpsr & CPSR_F)) {
497 env->exception_index = EXCP_FIQ;
498 do_interrupt(env);
499 next_tb = 0;
501 /* ARMv7-M interrupt return works by loading a magic value
502 into the PC. On real hardware the load causes the
503 return to occur. The qemu implementation performs the
504 jump normally, then does the exception return when the
505 CPU tries to execute code at the magic address.
506 This will cause the magic PC value to be pushed to
507 the stack if an interrupt occured at the wrong time.
508 We avoid this by disabling interrupts when
509 pc contains a magic address. */
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
512 || !(env->uncached_cpsr & CPSR_I))) {
513 env->exception_index = EXCP_IRQ;
514 do_interrupt(env);
515 next_tb = 0;
517 #elif defined(TARGET_SH4)
518 if (interrupt_request & CPU_INTERRUPT_HARD) {
519 do_interrupt(env);
520 next_tb = 0;
522 #elif defined(TARGET_ALPHA)
523 if (interrupt_request & CPU_INTERRUPT_HARD) {
524 do_interrupt(env);
525 next_tb = 0;
527 #elif defined(TARGET_CRIS)
528 if (interrupt_request & CPU_INTERRUPT_HARD
529 && (env->pregs[PR_CCS] & I_FLAG)) {
530 env->exception_index = EXCP_IRQ;
531 do_interrupt(env);
532 next_tb = 0;
534 if (interrupt_request & CPU_INTERRUPT_NMI
535 && (env->pregs[PR_CCS] & M_FLAG)) {
536 env->exception_index = EXCP_NMI;
537 do_interrupt(env);
538 next_tb = 0;
540 #elif defined(TARGET_M68K)
541 if (interrupt_request & CPU_INTERRUPT_HARD
542 && ((env->sr & SR_I) >> SR_I_SHIFT)
543 < env->pending_level) {
544 /* Real hardware gets the interrupt vector via an
545 IACK cycle at this point. Current emulated
546 hardware doesn't rely on this, so we
547 provide/save the vector when the interrupt is
548 first signalled. */
549 env->exception_index = env->pending_vector;
550 do_interrupt(1);
551 next_tb = 0;
553 #endif
554 /* Don't use the cached interupt_request value,
555 do_interrupt may have updated the EXITTB flag. */
556 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
557 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
558 /* ensure that no TB jump will be modified as
559 the program flow was changed */
560 next_tb = 0;
563 if (unlikely(env->exit_request)) {
564 env->exit_request = 0;
565 env->exception_index = EXCP_INTERRUPT;
566 cpu_loop_exit();
568 #ifdef DEBUG_EXEC
569 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
570 /* restore flags in standard format */
571 regs_to_env();
572 #if defined(TARGET_I386)
573 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
574 log_cpu_state(env, X86_DUMP_CCOP);
575 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
576 #elif defined(TARGET_ARM)
577 log_cpu_state(env, 0);
578 #elif defined(TARGET_SPARC)
579 log_cpu_state(env, 0);
580 #elif defined(TARGET_PPC)
581 log_cpu_state(env, 0);
582 #elif defined(TARGET_M68K)
583 cpu_m68k_flush_flags(env, env->cc_op);
584 env->cc_op = CC_OP_FLAGS;
585 env->sr = (env->sr & 0xffe0)
586 | env->cc_dest | (env->cc_x << 4);
587 log_cpu_state(env, 0);
588 #elif defined(TARGET_MICROBLAZE)
589 log_cpu_state(env, 0);
590 #elif defined(TARGET_MIPS)
591 log_cpu_state(env, 0);
592 #elif defined(TARGET_SH4)
593 log_cpu_state(env, 0);
594 #elif defined(TARGET_ALPHA)
595 log_cpu_state(env, 0);
596 #elif defined(TARGET_CRIS)
597 log_cpu_state(env, 0);
598 #else
599 #error unsupported target CPU
600 #endif
602 #endif
603 spin_lock(&tb_lock);
604 tb = tb_find_fast();
605 /* Note: we do it here to avoid a gcc bug on Mac OS X when
606 doing it in tb_find_slow */
607 if (tb_invalidated_flag) {
608 /* as some TB could have been invalidated because
609 of memory exceptions while generating the code, we
610 must recompute the hash index here */
611 next_tb = 0;
612 tb_invalidated_flag = 0;
614 #ifdef DEBUG_EXEC
615 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
616 (long)tb->tc_ptr, tb->pc,
617 lookup_symbol(tb->pc));
618 #endif
619 /* see if we can patch the calling TB. When the TB
620 spans two pages, we cannot safely do a direct
621 jump. */
623 if (next_tb != 0 &&
624 #ifdef CONFIG_KQEMU
625 (env->kqemu_enabled != 2) &&
626 #endif
627 tb->page_addr[1] == -1) {
628 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
631 spin_unlock(&tb_lock);
632 env->current_tb = tb;
634 /* cpu_interrupt might be called while translating the
635 TB, but before it is linked into a potentially
636 infinite loop and becomes env->current_tb. Avoid
637 starting execution if there is a pending interrupt. */
638 if (unlikely (env->exit_request))
639 env->current_tb = NULL;
641 while (env->current_tb) {
642 tc_ptr = tb->tc_ptr;
643 /* execute the generated code */
644 #if defined(__sparc__) && !defined(HOST_SOLARIS)
645 #undef env
646 env = cpu_single_env;
647 #define env cpu_single_env
648 #endif
649 next_tb = tcg_qemu_tb_exec(tc_ptr);
650 env->current_tb = NULL;
651 if ((next_tb & 3) == 2) {
652 /* Instruction counter expired. */
653 int insns_left;
654 tb = (TranslationBlock *)(long)(next_tb & ~3);
655 /* Restore PC. */
656 cpu_pc_from_tb(env, tb);
657 insns_left = env->icount_decr.u32;
658 if (env->icount_extra && insns_left >= 0) {
659 /* Refill decrementer and continue execution. */
660 env->icount_extra += insns_left;
661 if (env->icount_extra > 0xffff) {
662 insns_left = 0xffff;
663 } else {
664 insns_left = env->icount_extra;
666 env->icount_extra -= insns_left;
667 env->icount_decr.u16.low = insns_left;
668 } else {
669 if (insns_left > 0) {
670 /* Execute remaining instructions. */
671 cpu_exec_nocache(insns_left, tb);
673 env->exception_index = EXCP_INTERRUPT;
674 next_tb = 0;
675 cpu_loop_exit();
679 /* reset soft MMU for next block (it can currently
680 only be set by a memory fault) */
681 #if defined(CONFIG_KQEMU)
682 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
683 if (kqemu_is_ok(env) &&
684 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
685 cpu_loop_exit();
687 #endif
688 } /* for(;;) */
689 } else {
690 env_to_regs();
692 } /* for(;;) */
695 #if defined(TARGET_I386)
696 /* restore flags in standard format */
697 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
698 #elif defined(TARGET_ARM)
699 /* XXX: Save/restore host fpu exception state?. */
700 #elif defined(TARGET_SPARC)
701 #elif defined(TARGET_PPC)
702 #elif defined(TARGET_M68K)
703 cpu_m68k_flush_flags(env, env->cc_op);
704 env->cc_op = CC_OP_FLAGS;
705 env->sr = (env->sr & 0xffe0)
706 | env->cc_dest | (env->cc_x << 4);
707 #elif defined(TARGET_MICROBLAZE)
708 #elif defined(TARGET_MIPS)
709 #elif defined(TARGET_SH4)
710 #elif defined(TARGET_IA64)
711 #elif defined(TARGET_ALPHA)
712 #elif defined(TARGET_CRIS)
713 /* XXXXX */
714 #else
715 #error unsupported target CPU
716 #endif
718 /* restore global registers */
719 #include "hostregs_helper.h"
721 /* fail safe : never use cpu_single_env outside cpu_exec() */
722 cpu_single_env = NULL;
723 return ret;
726 /* must only be called from the generated code as an exception can be
727 generated */
728 void tb_invalidate_page_range(target_ulong start, target_ulong end)
730 /* XXX: cannot enable it yet because it yields to MMU exception
731 where NIP != read address on PowerPC */
732 #if 0
733 target_ulong phys_addr;
734 phys_addr = get_phys_addr_code(env, start);
735 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
736 #endif
739 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
741 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
743 CPUX86State *saved_env;
745 saved_env = env;
746 env = s;
747 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
748 selector &= 0xffff;
749 cpu_x86_load_seg_cache(env, seg_reg, selector,
750 (selector << 4), 0xffff, 0);
751 } else {
752 helper_load_seg(seg_reg, selector);
754 env = saved_env;
757 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
759 CPUX86State *saved_env;
761 saved_env = env;
762 env = s;
764 helper_fsave(ptr, data32);
766 env = saved_env;
769 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
771 CPUX86State *saved_env;
773 saved_env = env;
774 env = s;
776 helper_frstor(ptr, data32);
778 env = saved_env;
781 #endif /* TARGET_I386 */
783 #if !defined(CONFIG_SOFTMMU)
785 #if defined(TARGET_I386)
787 /* 'pc' is the host PC at which the exception was raised. 'address' is
788 the effective address of the memory exception. 'is_write' is 1 if a
789 write caused the exception and otherwise 0'. 'old_set' is the
790 signal set which should be restored */
791 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
792 int is_write, sigset_t *old_set,
793 void *puc)
795 TranslationBlock *tb;
796 int ret;
798 if (cpu_single_env)
799 env = cpu_single_env; /* XXX: find a correct solution for multithread */
800 #if defined(DEBUG_SIGNAL)
801 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
802 pc, address, is_write, *(unsigned long *)old_set);
803 #endif
804 /* XXX: locking issue */
805 if (is_write && page_unprotect(h2g(address), pc, puc)) {
806 return 1;
809 /* see if it is an MMU fault */
810 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
811 if (ret < 0)
812 return 0; /* not an MMU fault */
813 if (ret == 0)
814 return 1; /* the MMU fault was handled without causing real CPU fault */
815 /* now we have a real cpu fault */
816 tb = tb_find_pc(pc);
817 if (tb) {
818 /* the PC is inside the translated code. It means that we have
819 a virtual CPU fault */
820 cpu_restore_state(tb, env, pc, puc);
822 if (ret == 1) {
823 #if 0
824 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
825 env->eip, env->cr[2], env->error_code);
826 #endif
827 /* we restore the process signal mask as the sigreturn should
828 do it (XXX: use sigsetjmp) */
829 sigprocmask(SIG_SETMASK, old_set, NULL);
830 raise_exception_err(env->exception_index, env->error_code);
831 } else {
832 /* activate soft MMU for this block */
833 env->hflags |= HF_SOFTMMU_MASK;
834 cpu_resume_from_signal(env, puc);
836 /* never comes here */
837 return 1;
840 #elif defined(TARGET_ARM)
841 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
842 int is_write, sigset_t *old_set,
843 void *puc)
845 TranslationBlock *tb;
846 int ret;
848 if (cpu_single_env)
849 env = cpu_single_env; /* XXX: find a correct solution for multithread */
850 #if defined(DEBUG_SIGNAL)
851 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
852 pc, address, is_write, *(unsigned long *)old_set);
853 #endif
854 /* XXX: locking issue */
855 if (is_write && page_unprotect(h2g(address), pc, puc)) {
856 return 1;
858 /* see if it is an MMU fault */
859 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
860 if (ret < 0)
861 return 0; /* not an MMU fault */
862 if (ret == 0)
863 return 1; /* the MMU fault was handled without causing real CPU fault */
864 /* now we have a real cpu fault */
865 tb = tb_find_pc(pc);
866 if (tb) {
867 /* the PC is inside the translated code. It means that we have
868 a virtual CPU fault */
869 cpu_restore_state(tb, env, pc, puc);
871 /* we restore the process signal mask as the sigreturn should
872 do it (XXX: use sigsetjmp) */
873 sigprocmask(SIG_SETMASK, old_set, NULL);
874 cpu_loop_exit();
875 /* never comes here */
876 return 1;
878 #elif defined(TARGET_SPARC)
879 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
880 int is_write, sigset_t *old_set,
881 void *puc)
883 TranslationBlock *tb;
884 int ret;
886 if (cpu_single_env)
887 env = cpu_single_env; /* XXX: find a correct solution for multithread */
888 #if defined(DEBUG_SIGNAL)
889 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
890 pc, address, is_write, *(unsigned long *)old_set);
891 #endif
892 /* XXX: locking issue */
893 if (is_write && page_unprotect(h2g(address), pc, puc)) {
894 return 1;
896 /* see if it is an MMU fault */
897 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
898 if (ret < 0)
899 return 0; /* not an MMU fault */
900 if (ret == 0)
901 return 1; /* the MMU fault was handled without causing real CPU fault */
902 /* now we have a real cpu fault */
903 tb = tb_find_pc(pc);
904 if (tb) {
905 /* the PC is inside the translated code. It means that we have
906 a virtual CPU fault */
907 cpu_restore_state(tb, env, pc, puc);
909 /* we restore the process signal mask as the sigreturn should
910 do it (XXX: use sigsetjmp) */
911 sigprocmask(SIG_SETMASK, old_set, NULL);
912 cpu_loop_exit();
913 /* never comes here */
914 return 1;
916 #elif defined (TARGET_PPC)
917 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
918 int is_write, sigset_t *old_set,
919 void *puc)
921 TranslationBlock *tb;
922 int ret;
924 if (cpu_single_env)
925 env = cpu_single_env; /* XXX: find a correct solution for multithread */
926 #if defined(DEBUG_SIGNAL)
927 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
928 pc, address, is_write, *(unsigned long *)old_set);
929 #endif
930 /* XXX: locking issue */
931 if (is_write && page_unprotect(h2g(address), pc, puc)) {
932 return 1;
935 /* see if it is an MMU fault */
936 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
937 if (ret < 0)
938 return 0; /* not an MMU fault */
939 if (ret == 0)
940 return 1; /* the MMU fault was handled without causing real CPU fault */
942 /* now we have a real cpu fault */
943 tb = tb_find_pc(pc);
944 if (tb) {
945 /* the PC is inside the translated code. It means that we have
946 a virtual CPU fault */
947 cpu_restore_state(tb, env, pc, puc);
949 if (ret == 1) {
950 #if 0
951 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
952 env->nip, env->error_code, tb);
953 #endif
954 /* we restore the process signal mask as the sigreturn should
955 do it (XXX: use sigsetjmp) */
956 sigprocmask(SIG_SETMASK, old_set, NULL);
957 cpu_loop_exit();
958 } else {
959 /* activate soft MMU for this block */
960 cpu_resume_from_signal(env, puc);
962 /* never comes here */
963 return 1;
966 #elif defined(TARGET_M68K)
967 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
968 int is_write, sigset_t *old_set,
969 void *puc)
971 TranslationBlock *tb;
972 int ret;
974 if (cpu_single_env)
975 env = cpu_single_env; /* XXX: find a correct solution for multithread */
976 #if defined(DEBUG_SIGNAL)
977 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
978 pc, address, is_write, *(unsigned long *)old_set);
979 #endif
980 /* XXX: locking issue */
981 if (is_write && page_unprotect(address, pc, puc)) {
982 return 1;
984 /* see if it is an MMU fault */
985 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
986 if (ret < 0)
987 return 0; /* not an MMU fault */
988 if (ret == 0)
989 return 1; /* the MMU fault was handled without causing real CPU fault */
990 /* now we have a real cpu fault */
991 tb = tb_find_pc(pc);
992 if (tb) {
993 /* the PC is inside the translated code. It means that we have
994 a virtual CPU fault */
995 cpu_restore_state(tb, env, pc, puc);
997 /* we restore the process signal mask as the sigreturn should
998 do it (XXX: use sigsetjmp) */
999 sigprocmask(SIG_SETMASK, old_set, NULL);
1000 cpu_loop_exit();
1001 /* never comes here */
1002 return 1;
1005 #elif defined (TARGET_MIPS)
1006 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1007 int is_write, sigset_t *old_set,
1008 void *puc)
1010 TranslationBlock *tb;
1011 int ret;
1013 if (cpu_single_env)
1014 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1015 #if defined(DEBUG_SIGNAL)
1016 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1017 pc, address, is_write, *(unsigned long *)old_set);
1018 #endif
1019 /* XXX: locking issue */
1020 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1021 return 1;
1024 /* see if it is an MMU fault */
1025 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1026 if (ret < 0)
1027 return 0; /* not an MMU fault */
1028 if (ret == 0)
1029 return 1; /* the MMU fault was handled without causing real CPU fault */
1031 /* now we have a real cpu fault */
1032 tb = tb_find_pc(pc);
1033 if (tb) {
1034 /* the PC is inside the translated code. It means that we have
1035 a virtual CPU fault */
1036 cpu_restore_state(tb, env, pc, puc);
1038 if (ret == 1) {
1039 #if 0
1040 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1041 env->PC, env->error_code, tb);
1042 #endif
1043 /* we restore the process signal mask as the sigreturn should
1044 do it (XXX: use sigsetjmp) */
1045 sigprocmask(SIG_SETMASK, old_set, NULL);
1046 cpu_loop_exit();
1047 } else {
1048 /* activate soft MMU for this block */
1049 cpu_resume_from_signal(env, puc);
1051 /* never comes here */
1052 return 1;
1055 #elif defined (TARGET_MICROBLAZE)
1056 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1057 int is_write, sigset_t *old_set,
1058 void *puc)
1060 TranslationBlock *tb;
1061 int ret;
1063 if (cpu_single_env)
1064 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1065 #if defined(DEBUG_SIGNAL)
1066 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1067 pc, address, is_write, *(unsigned long *)old_set);
1068 #endif
1069 /* XXX: locking issue */
1070 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1071 return 1;
1074 /* see if it is an MMU fault */
1075 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1076 if (ret < 0)
1077 return 0; /* not an MMU fault */
1078 if (ret == 0)
1079 return 1; /* the MMU fault was handled without causing real CPU fault */
1081 /* now we have a real cpu fault */
1082 tb = tb_find_pc(pc);
1083 if (tb) {
1084 /* the PC is inside the translated code. It means that we have
1085 a virtual CPU fault */
1086 cpu_restore_state(tb, env, pc, puc);
1088 if (ret == 1) {
1089 #if 0
1090 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1091 env->PC, env->error_code, tb);
1092 #endif
1093 /* we restore the process signal mask as the sigreturn should
1094 do it (XXX: use sigsetjmp) */
1095 sigprocmask(SIG_SETMASK, old_set, NULL);
1096 cpu_loop_exit();
1097 } else {
1098 /* activate soft MMU for this block */
1099 cpu_resume_from_signal(env, puc);
1101 /* never comes here */
1102 return 1;
1105 #elif defined (TARGET_SH4)
1106 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1107 int is_write, sigset_t *old_set,
1108 void *puc)
1110 TranslationBlock *tb;
1111 int ret;
1113 if (cpu_single_env)
1114 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1115 #if defined(DEBUG_SIGNAL)
1116 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1117 pc, address, is_write, *(unsigned long *)old_set);
1118 #endif
1119 /* XXX: locking issue */
1120 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1121 return 1;
1124 /* see if it is an MMU fault */
1125 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1126 if (ret < 0)
1127 return 0; /* not an MMU fault */
1128 if (ret == 0)
1129 return 1; /* the MMU fault was handled without causing real CPU fault */
1131 /* now we have a real cpu fault */
1132 tb = tb_find_pc(pc);
1133 if (tb) {
1134 /* the PC is inside the translated code. It means that we have
1135 a virtual CPU fault */
1136 cpu_restore_state(tb, env, pc, puc);
1138 #if 0
1139 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1140 env->nip, env->error_code, tb);
1141 #endif
1142 /* we restore the process signal mask as the sigreturn should
1143 do it (XXX: use sigsetjmp) */
1144 sigprocmask(SIG_SETMASK, old_set, NULL);
1145 cpu_loop_exit();
1146 /* never comes here */
1147 return 1;
1150 #elif defined (TARGET_ALPHA)
1151 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1152 int is_write, sigset_t *old_set,
1153 void *puc)
1155 TranslationBlock *tb;
1156 int ret;
1158 if (cpu_single_env)
1159 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1160 #if defined(DEBUG_SIGNAL)
1161 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1162 pc, address, is_write, *(unsigned long *)old_set);
1163 #endif
1164 /* XXX: locking issue */
1165 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1166 return 1;
1169 /* see if it is an MMU fault */
1170 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1171 if (ret < 0)
1172 return 0; /* not an MMU fault */
1173 if (ret == 0)
1174 return 1; /* the MMU fault was handled without causing real CPU fault */
1176 /* now we have a real cpu fault */
1177 tb = tb_find_pc(pc);
1178 if (tb) {
1179 /* the PC is inside the translated code. It means that we have
1180 a virtual CPU fault */
1181 cpu_restore_state(tb, env, pc, puc);
1183 #if 0
1184 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1185 env->nip, env->error_code, tb);
1186 #endif
1187 /* we restore the process signal mask as the sigreturn should
1188 do it (XXX: use sigsetjmp) */
1189 sigprocmask(SIG_SETMASK, old_set, NULL);
1190 cpu_loop_exit();
1191 /* never comes here */
1192 return 1;
1194 #elif defined (TARGET_CRIS)
1195 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1196 int is_write, sigset_t *old_set,
1197 void *puc)
1199 TranslationBlock *tb;
1200 int ret;
1202 if (cpu_single_env)
1203 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1204 #if defined(DEBUG_SIGNAL)
1205 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1206 pc, address, is_write, *(unsigned long *)old_set);
1207 #endif
1208 /* XXX: locking issue */
1209 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1210 return 1;
1213 /* see if it is an MMU fault */
1214 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1215 if (ret < 0)
1216 return 0; /* not an MMU fault */
1217 if (ret == 0)
1218 return 1; /* the MMU fault was handled without causing real CPU fault */
1220 /* now we have a real cpu fault */
1221 tb = tb_find_pc(pc);
1222 if (tb) {
1223 /* the PC is inside the translated code. It means that we have
1224 a virtual CPU fault */
1225 cpu_restore_state(tb, env, pc, puc);
1227 /* we restore the process signal mask as the sigreturn should
1228 do it (XXX: use sigsetjmp) */
1229 sigprocmask(SIG_SETMASK, old_set, NULL);
1230 cpu_loop_exit();
1231 /* never comes here */
1232 return 1;
1235 #else
1236 #error unsupported target CPU
1237 #endif
1239 #if defined(__i386__)
1241 #if defined(__APPLE__)
1242 # include <sys/ucontext.h>
1244 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1245 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1246 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1247 # define MASK_sig(context) ((context)->uc_sigmask)
1248 #elif defined(__OpenBSD__)
1249 # define EIP_sig(context) ((context)->sc_eip)
1250 # define TRAP_sig(context) ((context)->sc_trapno)
1251 # define ERROR_sig(context) ((context)->sc_err)
1252 # define MASK_sig(context) ((context)->sc_mask)
1253 #else
1254 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1255 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1256 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1257 # define MASK_sig(context) ((context)->uc_sigmask)
1258 #endif
1260 int cpu_signal_handler(int host_signum, void *pinfo,
1261 void *puc)
1263 siginfo_t *info = pinfo;
1264 #if defined(__OpenBSD__)
1265 struct sigcontext *uc = puc;
1266 #else
1267 struct ucontext *uc = puc;
1268 #endif
1269 unsigned long pc;
1270 int trapno;
1272 #ifndef REG_EIP
1273 /* for glibc 2.1 */
1274 #define REG_EIP EIP
1275 #define REG_ERR ERR
1276 #define REG_TRAPNO TRAPNO
1277 #endif
1278 pc = EIP_sig(uc);
1279 trapno = TRAP_sig(uc);
1280 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1281 trapno == 0xe ?
1282 (ERROR_sig(uc) >> 1) & 1 : 0,
1283 &MASK_sig(uc), puc);
1286 #elif defined(__x86_64__)
1288 #ifdef __NetBSD__
1289 #define PC_sig(context) _UC_MACHINE_PC(context)
1290 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1291 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1292 #define MASK_sig(context) ((context)->uc_sigmask)
1293 #elif defined(__OpenBSD__)
1294 #define PC_sig(context) ((context)->sc_rip)
1295 #define TRAP_sig(context) ((context)->sc_trapno)
1296 #define ERROR_sig(context) ((context)->sc_err)
1297 #define MASK_sig(context) ((context)->sc_mask)
1298 #else
1299 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1300 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1301 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1302 #define MASK_sig(context) ((context)->uc_sigmask)
1303 #endif
1305 int cpu_signal_handler(int host_signum, void *pinfo,
1306 void *puc)
1308 siginfo_t *info = pinfo;
1309 unsigned long pc;
1310 #ifdef __NetBSD__
1311 ucontext_t *uc = puc;
1312 #elif defined(__OpenBSD__)
1313 struct sigcontext *uc = puc;
1314 #else
1315 struct ucontext *uc = puc;
1316 #endif
1318 pc = PC_sig(uc);
1319 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1320 TRAP_sig(uc) == 0xe ?
1321 (ERROR_sig(uc) >> 1) & 1 : 0,
1322 &MASK_sig(uc), puc);
1325 #elif defined(_ARCH_PPC)
1327 /***********************************************************************
1328 * signal context platform-specific definitions
1329 * From Wine
1331 #ifdef linux
1332 /* All Registers access - only for local access */
1333 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1334 /* Gpr Registers access */
1335 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1336 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1337 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1338 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1339 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1340 # define LR_sig(context) REG_sig(link, context) /* Link register */
1341 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1342 /* Float Registers access */
1343 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1344 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1345 /* Exception Registers access */
1346 # define DAR_sig(context) REG_sig(dar, context)
1347 # define DSISR_sig(context) REG_sig(dsisr, context)
1348 # define TRAP_sig(context) REG_sig(trap, context)
1349 #endif /* linux */
1351 #ifdef __APPLE__
1352 # include <sys/ucontext.h>
1353 typedef struct ucontext SIGCONTEXT;
1354 /* All Registers access - only for local access */
1355 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1356 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1357 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1358 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1359 /* Gpr Registers access */
1360 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1361 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1362 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1363 # define CTR_sig(context) REG_sig(ctr, context)
1364 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1365 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1366 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1367 /* Float Registers access */
1368 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1369 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1370 /* Exception Registers access */
1371 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1372 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1373 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1374 #endif /* __APPLE__ */
1376 int cpu_signal_handler(int host_signum, void *pinfo,
1377 void *puc)
1379 siginfo_t *info = pinfo;
1380 struct ucontext *uc = puc;
1381 unsigned long pc;
1382 int is_write;
1384 pc = IAR_sig(uc);
1385 is_write = 0;
1386 #if 0
1387 /* ppc 4xx case */
1388 if (DSISR_sig(uc) & 0x00800000)
1389 is_write = 1;
1390 #else
1391 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1392 is_write = 1;
1393 #endif
1394 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1395 is_write, &uc->uc_sigmask, puc);
1398 #elif defined(__alpha__)
1400 int cpu_signal_handler(int host_signum, void *pinfo,
1401 void *puc)
1403 siginfo_t *info = pinfo;
1404 struct ucontext *uc = puc;
1405 uint32_t *pc = uc->uc_mcontext.sc_pc;
1406 uint32_t insn = *pc;
1407 int is_write = 0;
1409 /* XXX: need kernel patch to get write flag faster */
1410 switch (insn >> 26) {
1411 case 0x0d: // stw
1412 case 0x0e: // stb
1413 case 0x0f: // stq_u
1414 case 0x24: // stf
1415 case 0x25: // stg
1416 case 0x26: // sts
1417 case 0x27: // stt
1418 case 0x2c: // stl
1419 case 0x2d: // stq
1420 case 0x2e: // stl_c
1421 case 0x2f: // stq_c
1422 is_write = 1;
1425 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1426 is_write, &uc->uc_sigmask, puc);
1428 #elif defined(__sparc__)
1430 int cpu_signal_handler(int host_signum, void *pinfo,
1431 void *puc)
1433 siginfo_t *info = pinfo;
1434 int is_write;
1435 uint32_t insn;
1436 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1437 uint32_t *regs = (uint32_t *)(info + 1);
1438 void *sigmask = (regs + 20);
1439 /* XXX: is there a standard glibc define ? */
1440 unsigned long pc = regs[1];
1441 #else
1442 #ifdef __linux__
1443 struct sigcontext *sc = puc;
1444 unsigned long pc = sc->sigc_regs.tpc;
1445 void *sigmask = (void *)sc->sigc_mask;
1446 #elif defined(__OpenBSD__)
1447 struct sigcontext *uc = puc;
1448 unsigned long pc = uc->sc_pc;
1449 void *sigmask = (void *)(long)uc->sc_mask;
1450 #endif
1451 #endif
1453 /* XXX: need kernel patch to get write flag faster */
1454 is_write = 0;
1455 insn = *(uint32_t *)pc;
1456 if ((insn >> 30) == 3) {
1457 switch((insn >> 19) & 0x3f) {
1458 case 0x05: // stb
1459 case 0x15: // stba
1460 case 0x06: // sth
1461 case 0x16: // stha
1462 case 0x04: // st
1463 case 0x14: // sta
1464 case 0x07: // std
1465 case 0x17: // stda
1466 case 0x0e: // stx
1467 case 0x1e: // stxa
1468 case 0x24: // stf
1469 case 0x34: // stfa
1470 case 0x27: // stdf
1471 case 0x37: // stdfa
1472 case 0x26: // stqf
1473 case 0x36: // stqfa
1474 case 0x25: // stfsr
1475 case 0x3c: // casa
1476 case 0x3e: // casxa
1477 is_write = 1;
1478 break;
1481 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1482 is_write, sigmask, NULL);
1485 #elif defined(__arm__)
1487 int cpu_signal_handler(int host_signum, void *pinfo,
1488 void *puc)
1490 siginfo_t *info = pinfo;
1491 struct ucontext *uc = puc;
1492 unsigned long pc;
1493 int is_write;
1495 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1496 pc = uc->uc_mcontext.gregs[R15];
1497 #else
1498 pc = uc->uc_mcontext.arm_pc;
1499 #endif
1500 /* XXX: compute is_write */
1501 is_write = 0;
1502 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1503 is_write,
1504 &uc->uc_sigmask, puc);
1507 #elif defined(__mc68000)
1509 int cpu_signal_handler(int host_signum, void *pinfo,
1510 void *puc)
1512 siginfo_t *info = pinfo;
1513 struct ucontext *uc = puc;
1514 unsigned long pc;
1515 int is_write;
1517 pc = uc->uc_mcontext.gregs[16];
1518 /* XXX: compute is_write */
1519 is_write = 0;
1520 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1521 is_write,
1522 &uc->uc_sigmask, puc);
1525 #elif defined(__ia64)
1527 #ifndef __ISR_VALID
1528 /* This ought to be in <bits/siginfo.h>... */
1529 # define __ISR_VALID 1
1530 #endif
1532 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1534 siginfo_t *info = pinfo;
1535 struct ucontext *uc = puc;
1536 unsigned long ip;
1537 int is_write = 0;
1539 ip = uc->uc_mcontext.sc_ip;
1540 switch (host_signum) {
1541 case SIGILL:
1542 case SIGFPE:
1543 case SIGSEGV:
1544 case SIGBUS:
1545 case SIGTRAP:
1546 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1547 /* ISR.W (write-access) is bit 33: */
1548 is_write = (info->si_isr >> 33) & 1;
1549 break;
1551 default:
1552 break;
1554 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1555 is_write,
1556 &uc->uc_sigmask, puc);
1559 #elif defined(__s390__)
1561 int cpu_signal_handler(int host_signum, void *pinfo,
1562 void *puc)
1564 siginfo_t *info = pinfo;
1565 struct ucontext *uc = puc;
1566 unsigned long pc;
1567 int is_write;
1569 pc = uc->uc_mcontext.psw.addr;
1570 /* XXX: compute is_write */
1571 is_write = 0;
1572 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1573 is_write, &uc->uc_sigmask, puc);
1576 #elif defined(__mips__)
1578 int cpu_signal_handler(int host_signum, void *pinfo,
1579 void *puc)
1581 siginfo_t *info = pinfo;
1582 struct ucontext *uc = puc;
1583 greg_t pc = uc->uc_mcontext.pc;
1584 int is_write;
1586 /* XXX: compute is_write */
1587 is_write = 0;
1588 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1589 is_write, &uc->uc_sigmask, puc);
1592 #elif defined(__hppa__)
1594 int cpu_signal_handler(int host_signum, void *pinfo,
1595 void *puc)
1597 struct siginfo *info = pinfo;
1598 struct ucontext *uc = puc;
1599 unsigned long pc;
1600 int is_write;
1602 pc = uc->uc_mcontext.sc_iaoq[0];
1603 /* FIXME: compute is_write */
1604 is_write = 0;
1605 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1606 is_write,
1607 &uc->uc_sigmask, puc);
1610 #else
1612 #error host CPU specific signal handler needed
1614 #endif
1616 #endif /* !defined(CONFIG_SOFTMMU) */