Device-assignment: free device if hotplug fails
[qemu-kvm/fedora.git] / cpu-exec.c
blob5377172740c64092e708dfa26386840a6715d6d6
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #if !defined(TARGET_IA64)
25 #include "tcg.h"
26 #endif
27 #include "kvm.h"
29 #if !defined(CONFIG_SOFTMMU)
30 #undef EAX
31 #undef ECX
32 #undef EDX
33 #undef EBX
34 #undef ESP
35 #undef EBP
36 #undef ESI
37 #undef EDI
38 #undef EIP
39 #include <signal.h>
40 #ifdef __linux__
41 #include <sys/ucontext.h>
42 #endif
43 #endif
45 #include "qemu-kvm.h"
47 #if defined(__sparc__) && !defined(HOST_SOLARIS)
48 // Work around ugly bugs in glibc that mangle global register contents
49 #undef env
50 #define env cpu_single_env
51 #endif
53 int tb_invalidated_flag;
55 //#define DEBUG_EXEC
56 //#define DEBUG_SIGNAL
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
63 longjmp(env->jmp_env, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
72 #ifdef __linux__
73 struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76 #endif
77 #endif
79 env = env1;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
86 #ifdef __linux__
87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90 #endif
92 #endif
93 longjmp(env->jmp_env, 1);
96 /* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
100 unsigned long next_tb;
101 TranslationBlock *tb;
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109 max_cycles);
110 env->current_tb = tb;
111 /* execute the generated code */
112 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
114 if ((next_tb & 3) == 2) {
115 /* Restore PC. This may happen if async event occurs before
116 the TB starts executing. */
117 cpu_pc_from_tb(env, tb);
119 tb_phys_invalidate(tb, -1);
120 tb_free(tb);
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
125 uint64_t flags)
127 TranslationBlock *tb, **ptb1;
128 unsigned int h;
129 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 tb_invalidated_flag = 0;
133 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
135 /* find translated block using physical mappings */
136 phys_pc = get_phys_addr_code(env, pc);
137 phys_page1 = phys_pc & TARGET_PAGE_MASK;
138 phys_page2 = -1;
139 h = tb_phys_hash_func(phys_pc);
140 ptb1 = &tb_phys_hash[h];
141 for(;;) {
142 tb = *ptb1;
143 if (!tb)
144 goto not_found;
145 if (tb->pc == pc &&
146 tb->page_addr[0] == phys_page1 &&
147 tb->cs_base == cs_base &&
148 tb->flags == flags) {
149 /* check next page if needed */
150 if (tb->page_addr[1] != -1) {
151 virt_page2 = (pc & TARGET_PAGE_MASK) +
152 TARGET_PAGE_SIZE;
153 phys_page2 = get_phys_addr_code(env, virt_page2);
154 if (tb->page_addr[1] == phys_page2)
155 goto found;
156 } else {
157 goto found;
160 ptb1 = &tb->phys_hash_next;
162 not_found:
163 /* if no translated code available, then translate it now */
164 tb = tb_gen_code(env, pc, cs_base, flags, 0);
166 found:
167 /* we add the TB in the virtual pc hash table */
168 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169 return tb;
172 static inline TranslationBlock *tb_find_fast(void)
174 TranslationBlock *tb;
175 target_ulong cs_base, pc;
176 int flags;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
180 is executed. */
181 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
182 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
183 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184 tb->flags != flags)) {
185 tb = tb_find_slow(pc, cs_base, flags);
187 return tb;
190 static CPUDebugExcpHandler *debug_excp_handler;
192 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194 CPUDebugExcpHandler *old_handler = debug_excp_handler;
196 debug_excp_handler = handler;
197 return old_handler;
200 static void cpu_handle_debug_exception(CPUState *env)
202 CPUWatchpoint *wp;
204 if (!env->watchpoint_hit)
205 for (wp = env->watchpoints; wp != NULL; wp = wp->next)
206 wp->flags &= ~BP_WATCHPOINT_HIT;
208 if (debug_excp_handler)
209 debug_excp_handler(env);
212 /* main execution loop */
214 int cpu_exec(CPUState *env1)
216 #define DECLARE_HOST_REGS 1
217 #include "hostregs_helper.h"
218 int ret, interrupt_request;
219 TranslationBlock *tb;
220 uint8_t *tc_ptr;
221 unsigned long next_tb;
223 if (cpu_halted(env1) == EXCP_HALTED)
224 return EXCP_HALTED;
226 cpu_single_env = env1;
228 /* first we save global registers */
229 #define SAVE_HOST_REGS 1
230 #include "hostregs_helper.h"
231 env = env1;
233 env_to_regs();
234 #if defined(TARGET_I386)
235 /* put eflags in CPU temporary format */
236 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237 DF = 1 - (2 * ((env->eflags >> 10) & 1));
238 CC_OP = CC_OP_EFLAGS;
239 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
240 #elif defined(TARGET_SPARC)
241 #elif defined(TARGET_M68K)
242 env->cc_op = CC_OP_FLAGS;
243 env->cc_dest = env->sr & 0xf;
244 env->cc_x = (env->sr >> 4) & 1;
245 #elif defined(TARGET_ALPHA)
246 #elif defined(TARGET_ARM)
247 #elif defined(TARGET_PPC)
248 #elif defined(TARGET_MIPS)
249 #elif defined(TARGET_SH4)
250 #elif defined(TARGET_CRIS)
251 #elif defined(TARGET_IA64)
252 /* XXXXX */
253 #else
254 #error unsupported target CPU
255 #endif
256 env->exception_index = -1;
258 /* prepare setjmp context for exception handling */
259 for(;;) {
260 if (setjmp(env->jmp_env) == 0) {
261 env->current_tb = NULL;
262 /* if an exception is pending, we execute it here */
263 if (env->exception_index >= 0) {
264 if (env->exception_index >= EXCP_INTERRUPT) {
265 /* exit request from the cpu execution loop */
266 ret = env->exception_index;
267 if (ret == EXCP_DEBUG)
268 cpu_handle_debug_exception(env);
269 break;
270 } else if (env->user_mode_only) {
271 /* if user mode only, we simulate a fake exception
272 which will be handled outside the cpu execution
273 loop */
274 #if defined(TARGET_I386)
275 do_interrupt_user(env->exception_index,
276 env->exception_is_int,
277 env->error_code,
278 env->exception_next_eip);
279 /* successfully delivered */
280 env->old_exception = -1;
281 #endif
282 ret = env->exception_index;
283 break;
284 } else {
285 #if defined(TARGET_I386)
286 /* simulate a real cpu exception. On i386, it can
287 trigger new exceptions, but we do not handle
288 double or triple faults yet. */
289 do_interrupt(env->exception_index,
290 env->exception_is_int,
291 env->error_code,
292 env->exception_next_eip, 0);
293 /* successfully delivered */
294 env->old_exception = -1;
295 #elif defined(TARGET_PPC)
296 do_interrupt(env);
297 #elif defined(TARGET_MIPS)
298 do_interrupt(env);
299 #elif defined(TARGET_SPARC)
300 do_interrupt(env);
301 #elif defined(TARGET_ARM)
302 do_interrupt(env);
303 #elif defined(TARGET_SH4)
304 do_interrupt(env);
305 #elif defined(TARGET_ALPHA)
306 do_interrupt(env);
307 #elif defined(TARGET_CRIS)
308 do_interrupt(env);
309 #elif defined(TARGET_M68K)
310 do_interrupt(0);
311 #elif defined(TARGET_IA64)
312 do_interrupt(env);
313 #endif
315 env->exception_index = -1;
317 #ifdef USE_KQEMU
318 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
319 int ret;
320 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
321 ret = kqemu_cpu_exec(env);
322 /* put eflags in CPU temporary format */
323 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
324 DF = 1 - (2 * ((env->eflags >> 10) & 1));
325 CC_OP = CC_OP_EFLAGS;
326 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
327 if (ret == 1) {
328 /* exception */
329 longjmp(env->jmp_env, 1);
330 } else if (ret == 2) {
331 /* softmmu execution needed */
332 } else {
333 if (env->interrupt_request != 0) {
334 /* hardware interrupt will be executed just after */
335 } else {
336 /* otherwise, we restart */
337 longjmp(env->jmp_env, 1);
341 #endif
343 /* kvm vcpu threads */
344 if (kvm_enabled()) {
345 kvm_cpu_exec(env);
346 longjmp(env->jmp_env, 1);
349 if (kvm_enabled()) {
350 kvm_cpu_exec(env);
351 longjmp(env->jmp_env, 1);
354 next_tb = 0; /* force lookup of first TB */
355 for(;;) {
356 interrupt_request = env->interrupt_request;
357 if (unlikely(interrupt_request)) {
358 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
359 /* Mask out external interrupts for this step. */
360 interrupt_request &= ~(CPU_INTERRUPT_HARD |
361 CPU_INTERRUPT_FIQ |
362 CPU_INTERRUPT_SMI |
363 CPU_INTERRUPT_NMI);
365 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
366 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
367 env->exception_index = EXCP_DEBUG;
368 cpu_loop_exit();
370 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
371 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
372 if (interrupt_request & CPU_INTERRUPT_HALT) {
373 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
374 env->halted = 1;
375 env->exception_index = EXCP_HLT;
376 cpu_loop_exit();
378 #endif
379 #if defined(TARGET_I386)
380 if (env->hflags2 & HF2_GIF_MASK) {
381 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
382 !(env->hflags & HF_SMM_MASK)) {
383 svm_check_intercept(SVM_EXIT_SMI);
384 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
385 do_smm_enter();
386 next_tb = 0;
387 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
388 !(env->hflags2 & HF2_NMI_MASK)) {
389 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
390 env->hflags2 |= HF2_NMI_MASK;
391 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
392 next_tb = 0;
393 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
394 (((env->hflags2 & HF2_VINTR_MASK) &&
395 (env->hflags2 & HF2_HIF_MASK)) ||
396 (!(env->hflags2 & HF2_VINTR_MASK) &&
397 (env->eflags & IF_MASK &&
398 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
399 int intno;
400 svm_check_intercept(SVM_EXIT_INTR);
401 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
402 intno = cpu_get_pic_interrupt(env);
403 if (loglevel & CPU_LOG_TB_IN_ASM) {
404 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
406 do_interrupt(intno, 0, 0, 0, 1);
407 /* ensure that no TB jump will be modified as
408 the program flow was changed */
409 next_tb = 0;
410 #if !defined(CONFIG_USER_ONLY)
411 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
412 (env->eflags & IF_MASK) &&
413 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
414 int intno;
415 /* FIXME: this should respect TPR */
416 svm_check_intercept(SVM_EXIT_VINTR);
417 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
418 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
419 if (loglevel & CPU_LOG_TB_IN_ASM)
420 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
421 do_interrupt(intno, 0, 0, 0, 1);
422 next_tb = 0;
423 #endif
426 #elif defined(TARGET_PPC)
427 #if 0
428 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
429 cpu_ppc_reset(env);
431 #endif
432 if (interrupt_request & CPU_INTERRUPT_HARD) {
433 ppc_hw_interrupt(env);
434 if (env->pending_interrupts == 0)
435 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
436 next_tb = 0;
438 #elif defined(TARGET_MIPS)
439 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
440 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
441 (env->CP0_Status & (1 << CP0St_IE)) &&
442 !(env->CP0_Status & (1 << CP0St_EXL)) &&
443 !(env->CP0_Status & (1 << CP0St_ERL)) &&
444 !(env->hflags & MIPS_HFLAG_DM)) {
445 /* Raise it */
446 env->exception_index = EXCP_EXT_INTERRUPT;
447 env->error_code = 0;
448 do_interrupt(env);
449 next_tb = 0;
451 #elif defined(TARGET_SPARC)
452 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 (env->psret != 0)) {
454 int pil = env->interrupt_index & 15;
455 int type = env->interrupt_index & 0xf0;
457 if (((type == TT_EXTINT) &&
458 (pil == 15 || pil > env->psrpil)) ||
459 type != TT_EXTINT) {
460 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
461 env->exception_index = env->interrupt_index;
462 do_interrupt(env);
463 env->interrupt_index = 0;
464 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
465 cpu_check_irqs(env);
466 #endif
467 next_tb = 0;
469 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
470 //do_interrupt(0, 0, 0, 0, 0);
471 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
473 #elif defined(TARGET_ARM)
474 if (interrupt_request & CPU_INTERRUPT_FIQ
475 && !(env->uncached_cpsr & CPSR_F)) {
476 env->exception_index = EXCP_FIQ;
477 do_interrupt(env);
478 next_tb = 0;
480 /* ARMv7-M interrupt return works by loading a magic value
481 into the PC. On real hardware the load causes the
482 return to occur. The qemu implementation performs the
483 jump normally, then does the exception return when the
484 CPU tries to execute code at the magic address.
485 This will cause the magic PC value to be pushed to
486 the stack if an interrupt occured at the wrong time.
487 We avoid this by disabling interrupts when
488 pc contains a magic address. */
489 if (interrupt_request & CPU_INTERRUPT_HARD
490 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
491 || !(env->uncached_cpsr & CPSR_I))) {
492 env->exception_index = EXCP_IRQ;
493 do_interrupt(env);
494 next_tb = 0;
496 #elif defined(TARGET_SH4)
497 if (interrupt_request & CPU_INTERRUPT_HARD) {
498 do_interrupt(env);
499 next_tb = 0;
501 #elif defined(TARGET_ALPHA)
502 if (interrupt_request & CPU_INTERRUPT_HARD) {
503 do_interrupt(env);
504 next_tb = 0;
506 #elif defined(TARGET_CRIS)
507 if (interrupt_request & CPU_INTERRUPT_HARD
508 && (env->pregs[PR_CCS] & I_FLAG)) {
509 env->exception_index = EXCP_IRQ;
510 do_interrupt(env);
511 next_tb = 0;
513 if (interrupt_request & CPU_INTERRUPT_NMI
514 && (env->pregs[PR_CCS] & M_FLAG)) {
515 env->exception_index = EXCP_NMI;
516 do_interrupt(env);
517 next_tb = 0;
519 #elif defined(TARGET_M68K)
520 if (interrupt_request & CPU_INTERRUPT_HARD
521 && ((env->sr & SR_I) >> SR_I_SHIFT)
522 < env->pending_level) {
523 /* Real hardware gets the interrupt vector via an
524 IACK cycle at this point. Current emulated
525 hardware doesn't rely on this, so we
526 provide/save the vector when the interrupt is
527 first signalled. */
528 env->exception_index = env->pending_vector;
529 do_interrupt(1);
530 next_tb = 0;
532 #endif
533 /* Don't use the cached interupt_request value,
534 do_interrupt may have updated the EXITTB flag. */
535 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
536 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
539 next_tb = 0;
541 if (interrupt_request & CPU_INTERRUPT_EXIT) {
542 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
543 env->exception_index = EXCP_INTERRUPT;
544 cpu_loop_exit();
547 #ifdef DEBUG_EXEC
548 if ((loglevel & CPU_LOG_TB_CPU)) {
549 /* restore flags in standard format */
550 regs_to_env();
551 #if defined(TARGET_I386)
552 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
553 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
554 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
555 #elif defined(TARGET_ARM)
556 cpu_dump_state(env, logfile, fprintf, 0);
557 #elif defined(TARGET_SPARC)
558 cpu_dump_state(env, logfile, fprintf, 0);
559 #elif defined(TARGET_PPC)
560 cpu_dump_state(env, logfile, fprintf, 0);
561 #elif defined(TARGET_M68K)
562 cpu_m68k_flush_flags(env, env->cc_op);
563 env->cc_op = CC_OP_FLAGS;
564 env->sr = (env->sr & 0xffe0)
565 | env->cc_dest | (env->cc_x << 4);
566 cpu_dump_state(env, logfile, fprintf, 0);
567 #elif defined(TARGET_MIPS)
568 cpu_dump_state(env, logfile, fprintf, 0);
569 #elif defined(TARGET_SH4)
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_ALPHA)
572 cpu_dump_state(env, logfile, fprintf, 0);
573 #elif defined(TARGET_CRIS)
574 cpu_dump_state(env, logfile, fprintf, 0);
575 #else
576 #error unsupported target CPU
577 #endif
579 #endif
580 spin_lock(&tb_lock);
581 tb = tb_find_fast();
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
588 next_tb = 0;
589 tb_invalidated_flag = 0;
591 #ifdef DEBUG_EXEC
592 if ((loglevel & CPU_LOG_EXEC)) {
593 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
594 (long)tb->tc_ptr, tb->pc,
595 lookup_symbol(tb->pc));
597 #endif
598 /* see if we can patch the calling TB. When the TB
599 spans two pages, we cannot safely do a direct
600 jump. */
602 if (next_tb != 0 &&
603 #ifdef USE_KQEMU
604 (env->kqemu_enabled != 2) &&
605 #endif
606 tb->page_addr[1] == -1) {
607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
610 spin_unlock(&tb_lock);
611 env->current_tb = tb;
613 /* cpu_interrupt might be called while translating the
614 TB, but before it is linked into a potentially
615 infinite loop and becomes env->current_tb. Avoid
616 starting execution if there is a pending interrupt. */
617 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
618 env->current_tb = NULL;
620 while (env->current_tb) {
621 tc_ptr = tb->tc_ptr;
622 /* execute the generated code */
623 #if defined(__sparc__) && !defined(HOST_SOLARIS)
624 #undef env
625 env = cpu_single_env;
626 #define env cpu_single_env
627 #endif
628 next_tb = tcg_qemu_tb_exec(tc_ptr);
629 env->current_tb = NULL;
630 if ((next_tb & 3) == 2) {
631 /* Instruction counter expired. */
632 int insns_left;
633 tb = (TranslationBlock *)(long)(next_tb & ~3);
634 /* Restore PC. */
635 cpu_pc_from_tb(env, tb);
636 insns_left = env->icount_decr.u32;
637 if (env->icount_extra && insns_left >= 0) {
638 /* Refill decrementer and continue execution. */
639 env->icount_extra += insns_left;
640 if (env->icount_extra > 0xffff) {
641 insns_left = 0xffff;
642 } else {
643 insns_left = env->icount_extra;
645 env->icount_extra -= insns_left;
646 env->icount_decr.u16.low = insns_left;
647 } else {
648 if (insns_left > 0) {
649 /* Execute remaining instructions. */
650 cpu_exec_nocache(insns_left, tb);
652 env->exception_index = EXCP_INTERRUPT;
653 next_tb = 0;
654 cpu_loop_exit();
658 /* reset soft MMU for next block (it can currently
659 only be set by a memory fault) */
660 #if defined(USE_KQEMU)
661 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
662 if (kqemu_is_ok(env) &&
663 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
664 cpu_loop_exit();
666 #endif
667 } /* for(;;) */
668 } else {
669 env_to_regs();
671 } /* for(;;) */
674 #if defined(TARGET_I386)
675 /* restore flags in standard format */
676 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
677 #elif defined(TARGET_ARM)
678 /* XXX: Save/restore host fpu exception state?. */
679 #elif defined(TARGET_SPARC)
680 #elif defined(TARGET_PPC)
681 #elif defined(TARGET_M68K)
682 cpu_m68k_flush_flags(env, env->cc_op);
683 env->cc_op = CC_OP_FLAGS;
684 env->sr = (env->sr & 0xffe0)
685 | env->cc_dest | (env->cc_x << 4);
686 #elif defined(TARGET_MIPS)
687 #elif defined(TARGET_SH4)
688 #elif defined(TARGET_IA64)
689 #elif defined(TARGET_ALPHA)
690 #elif defined(TARGET_CRIS)
691 /* XXXXX */
692 #else
693 #error unsupported target CPU
694 #endif
696 /* restore global registers */
697 #include "hostregs_helper.h"
699 /* fail safe : never use cpu_single_env outside cpu_exec() */
700 cpu_single_env = NULL;
701 return ret;
704 /* must only be called from the generated code as an exception can be
705 generated */
706 void tb_invalidate_page_range(target_ulong start, target_ulong end)
708 /* XXX: cannot enable it yet because it yields to MMU exception
709 where NIP != read address on PowerPC */
710 #if 0
711 target_ulong phys_addr;
712 phys_addr = get_phys_addr_code(env, start);
713 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
714 #endif
717 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
719 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
721 CPUX86State *saved_env;
723 saved_env = env;
724 env = s;
725 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
726 selector &= 0xffff;
727 cpu_x86_load_seg_cache(env, seg_reg, selector,
728 (selector << 4), 0xffff, 0);
729 } else {
730 helper_load_seg(seg_reg, selector);
732 env = saved_env;
735 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
737 CPUX86State *saved_env;
739 saved_env = env;
740 env = s;
742 helper_fsave(ptr, data32);
744 env = saved_env;
747 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
749 CPUX86State *saved_env;
751 saved_env = env;
752 env = s;
754 helper_frstor(ptr, data32);
756 env = saved_env;
759 #endif /* TARGET_I386 */
761 #if !defined(CONFIG_SOFTMMU)
763 #if defined(TARGET_I386)
765 /* 'pc' is the host PC at which the exception was raised. 'address' is
766 the effective address of the memory exception. 'is_write' is 1 if a
767 write caused the exception and otherwise 0'. 'old_set' is the
768 signal set which should be restored */
769 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
770 int is_write, sigset_t *old_set,
771 void *puc)
773 TranslationBlock *tb;
774 int ret;
776 if (cpu_single_env)
777 env = cpu_single_env; /* XXX: find a correct solution for multithread */
778 #if defined(DEBUG_SIGNAL)
779 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
780 pc, address, is_write, *(unsigned long *)old_set);
781 #endif
782 /* XXX: locking issue */
783 if (is_write && page_unprotect(h2g(address), pc, puc)) {
784 return 1;
787 /* see if it is an MMU fault */
788 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
789 if (ret < 0)
790 return 0; /* not an MMU fault */
791 if (ret == 0)
792 return 1; /* the MMU fault was handled without causing real CPU fault */
793 /* now we have a real cpu fault */
794 tb = tb_find_pc(pc);
795 if (tb) {
796 /* the PC is inside the translated code. It means that we have
797 a virtual CPU fault */
798 cpu_restore_state(tb, env, pc, puc);
800 if (ret == 1) {
801 #if 0
802 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
803 env->eip, env->cr[2], env->error_code);
804 #endif
805 /* we restore the process signal mask as the sigreturn should
806 do it (XXX: use sigsetjmp) */
807 sigprocmask(SIG_SETMASK, old_set, NULL);
808 raise_exception_err(env->exception_index, env->error_code);
809 } else {
810 /* activate soft MMU for this block */
811 env->hflags |= HF_SOFTMMU_MASK;
812 cpu_resume_from_signal(env, puc);
814 /* never comes here */
815 return 1;
818 #elif defined(TARGET_ARM)
819 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
820 int is_write, sigset_t *old_set,
821 void *puc)
823 TranslationBlock *tb;
824 int ret;
826 if (cpu_single_env)
827 env = cpu_single_env; /* XXX: find a correct solution for multithread */
828 #if defined(DEBUG_SIGNAL)
829 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
830 pc, address, is_write, *(unsigned long *)old_set);
831 #endif
832 /* XXX: locking issue */
833 if (is_write && page_unprotect(h2g(address), pc, puc)) {
834 return 1;
836 /* see if it is an MMU fault */
837 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
838 if (ret < 0)
839 return 0; /* not an MMU fault */
840 if (ret == 0)
841 return 1; /* the MMU fault was handled without causing real CPU fault */
842 /* now we have a real cpu fault */
843 tb = tb_find_pc(pc);
844 if (tb) {
845 /* the PC is inside the translated code. It means that we have
846 a virtual CPU fault */
847 cpu_restore_state(tb, env, pc, puc);
849 /* we restore the process signal mask as the sigreturn should
850 do it (XXX: use sigsetjmp) */
851 sigprocmask(SIG_SETMASK, old_set, NULL);
852 cpu_loop_exit();
853 /* never comes here */
854 return 1;
856 #elif defined(TARGET_SPARC)
857 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
858 int is_write, sigset_t *old_set,
859 void *puc)
861 TranslationBlock *tb;
862 int ret;
864 if (cpu_single_env)
865 env = cpu_single_env; /* XXX: find a correct solution for multithread */
866 #if defined(DEBUG_SIGNAL)
867 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
868 pc, address, is_write, *(unsigned long *)old_set);
869 #endif
870 /* XXX: locking issue */
871 if (is_write && page_unprotect(h2g(address), pc, puc)) {
872 return 1;
874 /* see if it is an MMU fault */
875 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
876 if (ret < 0)
877 return 0; /* not an MMU fault */
878 if (ret == 0)
879 return 1; /* the MMU fault was handled without causing real CPU fault */
880 /* now we have a real cpu fault */
881 tb = tb_find_pc(pc);
882 if (tb) {
883 /* the PC is inside the translated code. It means that we have
884 a virtual CPU fault */
885 cpu_restore_state(tb, env, pc, puc);
887 /* we restore the process signal mask as the sigreturn should
888 do it (XXX: use sigsetjmp) */
889 sigprocmask(SIG_SETMASK, old_set, NULL);
890 cpu_loop_exit();
891 /* never comes here */
892 return 1;
894 #elif defined (TARGET_PPC)
895 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
896 int is_write, sigset_t *old_set,
897 void *puc)
899 TranslationBlock *tb;
900 int ret;
902 if (cpu_single_env)
903 env = cpu_single_env; /* XXX: find a correct solution for multithread */
904 #if defined(DEBUG_SIGNAL)
905 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
906 pc, address, is_write, *(unsigned long *)old_set);
907 #endif
908 /* XXX: locking issue */
909 if (is_write && page_unprotect(h2g(address), pc, puc)) {
910 return 1;
913 /* see if it is an MMU fault */
914 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
915 if (ret < 0)
916 return 0; /* not an MMU fault */
917 if (ret == 0)
918 return 1; /* the MMU fault was handled without causing real CPU fault */
920 /* now we have a real cpu fault */
921 tb = tb_find_pc(pc);
922 if (tb) {
923 /* the PC is inside the translated code. It means that we have
924 a virtual CPU fault */
925 cpu_restore_state(tb, env, pc, puc);
927 if (ret == 1) {
928 #if 0
929 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
930 env->nip, env->error_code, tb);
931 #endif
932 /* we restore the process signal mask as the sigreturn should
933 do it (XXX: use sigsetjmp) */
934 sigprocmask(SIG_SETMASK, old_set, NULL);
935 raise_exception_err(env, env->exception_index, env->error_code);
936 } else {
937 /* activate soft MMU for this block */
938 cpu_resume_from_signal(env, puc);
940 /* never comes here */
941 return 1;
944 #elif defined(TARGET_M68K)
945 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
946 int is_write, sigset_t *old_set,
947 void *puc)
949 TranslationBlock *tb;
950 int ret;
952 if (cpu_single_env)
953 env = cpu_single_env; /* XXX: find a correct solution for multithread */
954 #if defined(DEBUG_SIGNAL)
955 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
956 pc, address, is_write, *(unsigned long *)old_set);
957 #endif
958 /* XXX: locking issue */
959 if (is_write && page_unprotect(address, pc, puc)) {
960 return 1;
962 /* see if it is an MMU fault */
963 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
964 if (ret < 0)
965 return 0; /* not an MMU fault */
966 if (ret == 0)
967 return 1; /* the MMU fault was handled without causing real CPU fault */
968 /* now we have a real cpu fault */
969 tb = tb_find_pc(pc);
970 if (tb) {
971 /* the PC is inside the translated code. It means that we have
972 a virtual CPU fault */
973 cpu_restore_state(tb, env, pc, puc);
975 /* we restore the process signal mask as the sigreturn should
976 do it (XXX: use sigsetjmp) */
977 sigprocmask(SIG_SETMASK, old_set, NULL);
978 cpu_loop_exit();
979 /* never comes here */
980 return 1;
983 #elif defined (TARGET_MIPS)
984 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
985 int is_write, sigset_t *old_set,
986 void *puc)
988 TranslationBlock *tb;
989 int ret;
991 if (cpu_single_env)
992 env = cpu_single_env; /* XXX: find a correct solution for multithread */
993 #if defined(DEBUG_SIGNAL)
994 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
995 pc, address, is_write, *(unsigned long *)old_set);
996 #endif
997 /* XXX: locking issue */
998 if (is_write && page_unprotect(h2g(address), pc, puc)) {
999 return 1;
1002 /* see if it is an MMU fault */
1003 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1004 if (ret < 0)
1005 return 0; /* not an MMU fault */
1006 if (ret == 0)
1007 return 1; /* the MMU fault was handled without causing real CPU fault */
1009 /* now we have a real cpu fault */
1010 tb = tb_find_pc(pc);
1011 if (tb) {
1012 /* the PC is inside the translated code. It means that we have
1013 a virtual CPU fault */
1014 cpu_restore_state(tb, env, pc, puc);
1016 if (ret == 1) {
1017 #if 0
1018 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1019 env->PC, env->error_code, tb);
1020 #endif
1021 /* we restore the process signal mask as the sigreturn should
1022 do it (XXX: use sigsetjmp) */
1023 sigprocmask(SIG_SETMASK, old_set, NULL);
1024 do_raise_exception_err(env->exception_index, env->error_code);
1025 } else {
1026 /* activate soft MMU for this block */
1027 cpu_resume_from_signal(env, puc);
1029 /* never comes here */
1030 return 1;
1033 #elif defined (TARGET_SH4)
1034 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1035 int is_write, sigset_t *old_set,
1036 void *puc)
1038 TranslationBlock *tb;
1039 int ret;
1041 if (cpu_single_env)
1042 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1043 #if defined(DEBUG_SIGNAL)
1044 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1045 pc, address, is_write, *(unsigned long *)old_set);
1046 #endif
1047 /* XXX: locking issue */
1048 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1049 return 1;
1052 /* see if it is an MMU fault */
1053 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1054 if (ret < 0)
1055 return 0; /* not an MMU fault */
1056 if (ret == 0)
1057 return 1; /* the MMU fault was handled without causing real CPU fault */
1059 /* now we have a real cpu fault */
1060 tb = tb_find_pc(pc);
1061 if (tb) {
1062 /* the PC is inside the translated code. It means that we have
1063 a virtual CPU fault */
1064 cpu_restore_state(tb, env, pc, puc);
1066 #if 0
1067 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1068 env->nip, env->error_code, tb);
1069 #endif
1070 /* we restore the process signal mask as the sigreturn should
1071 do it (XXX: use sigsetjmp) */
1072 sigprocmask(SIG_SETMASK, old_set, NULL);
1073 cpu_loop_exit();
1074 /* never comes here */
1075 return 1;
1078 #elif defined (TARGET_ALPHA)
1079 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1080 int is_write, sigset_t *old_set,
1081 void *puc)
1083 TranslationBlock *tb;
1084 int ret;
1086 if (cpu_single_env)
1087 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1088 #if defined(DEBUG_SIGNAL)
1089 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1090 pc, address, is_write, *(unsigned long *)old_set);
1091 #endif
1092 /* XXX: locking issue */
1093 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1094 return 1;
1097 /* see if it is an MMU fault */
1098 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1099 if (ret < 0)
1100 return 0; /* not an MMU fault */
1101 if (ret == 0)
1102 return 1; /* the MMU fault was handled without causing real CPU fault */
1104 /* now we have a real cpu fault */
1105 tb = tb_find_pc(pc);
1106 if (tb) {
1107 /* the PC is inside the translated code. It means that we have
1108 a virtual CPU fault */
1109 cpu_restore_state(tb, env, pc, puc);
1111 #if 0
1112 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1113 env->nip, env->error_code, tb);
1114 #endif
1115 /* we restore the process signal mask as the sigreturn should
1116 do it (XXX: use sigsetjmp) */
1117 sigprocmask(SIG_SETMASK, old_set, NULL);
1118 cpu_loop_exit();
1119 /* never comes here */
1120 return 1;
1122 #elif defined (TARGET_CRIS)
1123 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1124 int is_write, sigset_t *old_set,
1125 void *puc)
1127 TranslationBlock *tb;
1128 int ret;
1130 if (cpu_single_env)
1131 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1132 #if defined(DEBUG_SIGNAL)
1133 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1134 pc, address, is_write, *(unsigned long *)old_set);
1135 #endif
1136 /* XXX: locking issue */
1137 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1138 return 1;
1141 /* see if it is an MMU fault */
1142 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1143 if (ret < 0)
1144 return 0; /* not an MMU fault */
1145 if (ret == 0)
1146 return 1; /* the MMU fault was handled without causing real CPU fault */
1148 /* now we have a real cpu fault */
1149 tb = tb_find_pc(pc);
1150 if (tb) {
1151 /* the PC is inside the translated code. It means that we have
1152 a virtual CPU fault */
1153 cpu_restore_state(tb, env, pc, puc);
1155 /* we restore the process signal mask as the sigreturn should
1156 do it (XXX: use sigsetjmp) */
1157 sigprocmask(SIG_SETMASK, old_set, NULL);
1158 cpu_loop_exit();
1159 /* never comes here */
1160 return 1;
1163 #else
1164 #error unsupported target CPU
1165 #endif
1167 #if defined(__i386__)
1169 #if defined(__APPLE__)
1170 # include <sys/ucontext.h>
1172 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1173 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1174 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1175 #else
1176 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1177 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1178 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1179 #endif
1181 int cpu_signal_handler(int host_signum, void *pinfo,
1182 void *puc)
1184 siginfo_t *info = pinfo;
1185 struct ucontext *uc = puc;
1186 unsigned long pc;
1187 int trapno;
1189 #ifndef REG_EIP
1190 /* for glibc 2.1 */
1191 #define REG_EIP EIP
1192 #define REG_ERR ERR
1193 #define REG_TRAPNO TRAPNO
1194 #endif
1195 pc = EIP_sig(uc);
1196 trapno = TRAP_sig(uc);
1197 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1198 trapno == 0xe ?
1199 (ERROR_sig(uc) >> 1) & 1 : 0,
1200 &uc->uc_sigmask, puc);
1203 #elif defined(__x86_64__)
1205 int cpu_signal_handler(int host_signum, void *pinfo,
1206 void *puc)
1208 siginfo_t *info = pinfo;
1209 struct ucontext *uc = puc;
1210 unsigned long pc;
1212 pc = uc->uc_mcontext.gregs[REG_RIP];
1213 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1214 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1215 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1216 &uc->uc_sigmask, puc);
1219 #elif defined(__powerpc__)
1221 /***********************************************************************
1222 * signal context platform-specific definitions
1223 * From Wine
1225 #ifdef linux
1226 /* All Registers access - only for local access */
1227 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1228 /* Gpr Registers access */
1229 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1230 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1231 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1232 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1233 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1234 # define LR_sig(context) REG_sig(link, context) /* Link register */
1235 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1236 /* Float Registers access */
1237 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1238 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1239 /* Exception Registers access */
1240 # define DAR_sig(context) REG_sig(dar, context)
1241 # define DSISR_sig(context) REG_sig(dsisr, context)
1242 # define TRAP_sig(context) REG_sig(trap, context)
1243 #endif /* linux */
1245 #ifdef __APPLE__
1246 # include <sys/ucontext.h>
1247 typedef struct ucontext SIGCONTEXT;
1248 /* All Registers access - only for local access */
1249 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1250 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1251 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1252 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1253 /* Gpr Registers access */
1254 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1255 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1256 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1257 # define CTR_sig(context) REG_sig(ctr, context)
1258 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1259 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1260 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1261 /* Float Registers access */
1262 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1263 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1264 /* Exception Registers access */
1265 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1266 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1267 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1268 #endif /* __APPLE__ */
1270 int cpu_signal_handler(int host_signum, void *pinfo,
1271 void *puc)
1273 siginfo_t *info = pinfo;
1274 struct ucontext *uc = puc;
1275 unsigned long pc;
1276 int is_write;
1278 pc = IAR_sig(uc);
1279 is_write = 0;
1280 #if 0
1281 /* ppc 4xx case */
1282 if (DSISR_sig(uc) & 0x00800000)
1283 is_write = 1;
1284 #else
1285 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1286 is_write = 1;
1287 #endif
1288 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1289 is_write, &uc->uc_sigmask, puc);
1292 #elif defined(__alpha__)
1294 int cpu_signal_handler(int host_signum, void *pinfo,
1295 void *puc)
1297 siginfo_t *info = pinfo;
1298 struct ucontext *uc = puc;
1299 uint32_t *pc = uc->uc_mcontext.sc_pc;
1300 uint32_t insn = *pc;
1301 int is_write = 0;
1303 /* XXX: need kernel patch to get write flag faster */
1304 switch (insn >> 26) {
1305 case 0x0d: // stw
1306 case 0x0e: // stb
1307 case 0x0f: // stq_u
1308 case 0x24: // stf
1309 case 0x25: // stg
1310 case 0x26: // sts
1311 case 0x27: // stt
1312 case 0x2c: // stl
1313 case 0x2d: // stq
1314 case 0x2e: // stl_c
1315 case 0x2f: // stq_c
1316 is_write = 1;
1319 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1320 is_write, &uc->uc_sigmask, puc);
1322 #elif defined(__sparc__)
1324 int cpu_signal_handler(int host_signum, void *pinfo,
1325 void *puc)
1327 siginfo_t *info = pinfo;
1328 int is_write;
1329 uint32_t insn;
1330 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1331 uint32_t *regs = (uint32_t *)(info + 1);
1332 void *sigmask = (regs + 20);
1333 /* XXX: is there a standard glibc define ? */
1334 unsigned long pc = regs[1];
1335 #else
1336 #ifdef __linux__
1337 struct sigcontext *sc = puc;
1338 unsigned long pc = sc->sigc_regs.tpc;
1339 void *sigmask = (void *)sc->sigc_mask;
1340 #elif defined(__OpenBSD__)
1341 struct sigcontext *uc = puc;
1342 unsigned long pc = uc->sc_pc;
1343 void *sigmask = (void *)(long)uc->sc_mask;
1344 #endif
1345 #endif
1347 /* XXX: need kernel patch to get write flag faster */
1348 is_write = 0;
1349 insn = *(uint32_t *)pc;
1350 if ((insn >> 30) == 3) {
1351 switch((insn >> 19) & 0x3f) {
1352 case 0x05: // stb
1353 case 0x06: // sth
1354 case 0x04: // st
1355 case 0x07: // std
1356 case 0x24: // stf
1357 case 0x27: // stdf
1358 case 0x25: // stfsr
1359 is_write = 1;
1360 break;
1363 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1364 is_write, sigmask, NULL);
1367 #elif defined(__arm__)
1369 int cpu_signal_handler(int host_signum, void *pinfo,
1370 void *puc)
1372 siginfo_t *info = pinfo;
1373 struct ucontext *uc = puc;
1374 unsigned long pc;
1375 int is_write;
1377 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1378 pc = uc->uc_mcontext.gregs[R15];
1379 #else
1380 pc = uc->uc_mcontext.arm_pc;
1381 #endif
1382 /* XXX: compute is_write */
1383 is_write = 0;
1384 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1385 is_write,
1386 &uc->uc_sigmask, puc);
1389 #elif defined(__mc68000)
1391 int cpu_signal_handler(int host_signum, void *pinfo,
1392 void *puc)
1394 siginfo_t *info = pinfo;
1395 struct ucontext *uc = puc;
1396 unsigned long pc;
1397 int is_write;
1399 pc = uc->uc_mcontext.gregs[16];
1400 /* XXX: compute is_write */
1401 is_write = 0;
1402 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1403 is_write,
1404 &uc->uc_sigmask, puc);
1407 #elif defined(__ia64)
1409 #ifndef __ISR_VALID
1410 /* This ought to be in <bits/siginfo.h>... */
1411 # define __ISR_VALID 1
1412 #endif
1414 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1416 siginfo_t *info = pinfo;
1417 struct ucontext *uc = puc;
1418 unsigned long ip;
1419 int is_write = 0;
1421 ip = uc->uc_mcontext.sc_ip;
1422 switch (host_signum) {
1423 case SIGILL:
1424 case SIGFPE:
1425 case SIGSEGV:
1426 case SIGBUS:
1427 case SIGTRAP:
1428 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1429 /* ISR.W (write-access) is bit 33: */
1430 is_write = (info->si_isr >> 33) & 1;
1431 break;
1433 default:
1434 break;
1436 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1437 is_write,
1438 &uc->uc_sigmask, puc);
1441 #elif defined(__s390__)
1443 int cpu_signal_handler(int host_signum, void *pinfo,
1444 void *puc)
1446 siginfo_t *info = pinfo;
1447 struct ucontext *uc = puc;
1448 unsigned long pc;
1449 int is_write;
1451 pc = uc->uc_mcontext.psw.addr;
1452 /* XXX: compute is_write */
1453 is_write = 0;
1454 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1455 is_write, &uc->uc_sigmask, puc);
1458 #elif defined(__mips__)
1460 int cpu_signal_handler(int host_signum, void *pinfo,
1461 void *puc)
1463 siginfo_t *info = pinfo;
1464 struct ucontext *uc = puc;
1465 greg_t pc = uc->uc_mcontext.pc;
1466 int is_write;
1468 /* XXX: compute is_write */
1469 is_write = 0;
1470 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1471 is_write, &uc->uc_sigmask, puc);
1474 #elif defined(__hppa__)
1476 int cpu_signal_handler(int host_signum, void *pinfo,
1477 void *puc)
1479 struct siginfo *info = pinfo;
1480 struct ucontext *uc = puc;
1481 unsigned long pc;
1482 int is_write;
1484 pc = uc->uc_mcontext.sc_iaoq[0];
1485 /* FIXME: compute is_write */
1486 is_write = 0;
1487 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1488 is_write,
1489 &uc->uc_sigmask, puc);
1492 #else
1494 #error host CPU specific signal handler needed
1496 #endif
1498 #endif /* !defined(CONFIG_SOFTMMU) */