Add MSI-X related macro to pci.c
[qemu-kvm/fedora.git] / cpu-exec.c
blob4e6d6adaf72629640a9bb63ff8cb3af18122b4a3
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #if !defined(TARGET_IA64)
25 #include "tcg.h"
26 #endif
27 #include "kvm.h"
29 #if !defined(CONFIG_SOFTMMU)
30 #undef EAX
31 #undef ECX
32 #undef EDX
33 #undef EBX
34 #undef ESP
35 #undef EBP
36 #undef ESI
37 #undef EDI
38 #undef EIP
39 #include <signal.h>
40 #ifdef __linux__
41 #include <sys/ucontext.h>
42 #endif
43 #endif
45 #include "qemu-kvm.h"
47 #if defined(__sparc__) && !defined(HOST_SOLARIS)
48 // Work around ugly bugs in glibc that mangle global register contents
49 #undef env
50 #define env cpu_single_env
51 #endif
53 int tb_invalidated_flag;
55 //#define DEBUG_EXEC
56 //#define DEBUG_SIGNAL
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
63 longjmp(env->jmp_env, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
72 #ifdef __linux__
73 struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76 #endif
77 #endif
79 env = env1;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
86 #ifdef __linux__
87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90 #endif
92 #endif
93 env->exception_index = -1;
94 longjmp(env->jmp_env, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
101 unsigned long next_tb;
102 TranslationBlock *tb;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles > CF_COUNT_MASK)
107 max_cycles = CF_COUNT_MASK;
109 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110 max_cycles);
111 env->current_tb = tb;
112 /* execute the generated code */
113 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115 if ((next_tb & 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env, tb);
120 tb_phys_invalidate(tb, -1);
121 tb_free(tb);
124 static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
126 uint64_t flags)
128 TranslationBlock *tb, **ptb1;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
132 tb_invalidated_flag = 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc = get_phys_addr_code(env, pc);
138 phys_page1 = phys_pc & TARGET_PAGE_MASK;
139 phys_page2 = -1;
140 h = tb_phys_hash_func(phys_pc);
141 ptb1 = &tb_phys_hash[h];
142 for(;;) {
143 tb = *ptb1;
144 if (!tb)
145 goto not_found;
146 if (tb->pc == pc &&
147 tb->page_addr[0] == phys_page1 &&
148 tb->cs_base == cs_base &&
149 tb->flags == flags) {
150 /* check next page if needed */
151 if (tb->page_addr[1] != -1) {
152 virt_page2 = (pc & TARGET_PAGE_MASK) +
153 TARGET_PAGE_SIZE;
154 phys_page2 = get_phys_addr_code(env, virt_page2);
155 if (tb->page_addr[1] == phys_page2)
156 goto found;
157 } else {
158 goto found;
161 ptb1 = &tb->phys_hash_next;
163 not_found:
164 /* if no translated code available, then translate it now */
165 tb = tb_gen_code(env, pc, cs_base, flags, 0);
167 found:
168 /* we add the TB in the virtual pc hash table */
169 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
170 return tb;
173 static inline TranslationBlock *tb_find_fast(void)
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
177 int flags;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
181 is executed. */
182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
186 tb = tb_find_slow(pc, cs_base, flags);
188 return tb;
191 static CPUDebugExcpHandler *debug_excp_handler;
193 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 CPUDebugExcpHandler *old_handler = debug_excp_handler;
197 debug_excp_handler = handler;
198 return old_handler;
201 static void cpu_handle_debug_exception(CPUState *env)
203 CPUWatchpoint *wp;
205 if (!env->watchpoint_hit)
206 TAILQ_FOREACH(wp, &env->watchpoints, entry)
207 wp->flags &= ~BP_WATCHPOINT_HIT;
209 if (debug_excp_handler)
210 debug_excp_handler(env);
213 /* main execution loop */
215 int cpu_exec(CPUState *env1)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret, interrupt_request;
220 TranslationBlock *tb;
221 uint8_t *tc_ptr;
222 unsigned long next_tb;
224 if (cpu_halted(env1) == EXCP_HALTED)
225 return EXCP_HALTED;
227 cpu_single_env = env1;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
232 env = env1;
234 env_to_regs();
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 DF = 1 - (2 * ((env->eflags >> 10) & 1));
239 CC_OP = CC_OP_EFLAGS;
240 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env->cc_op = CC_OP_FLAGS;
244 env->cc_dest = env->sr & 0xf;
245 env->cc_x = (env->sr >> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MIPS)
250 #elif defined(TARGET_SH4)
251 #elif defined(TARGET_CRIS)
252 #elif defined(TARGET_IA64)
253 /* XXXXX */
254 #else
255 #error unsupported target CPU
256 #endif
257 env->exception_index = -1;
259 /* prepare setjmp context for exception handling */
260 for(;;) {
261 if (setjmp(env->jmp_env) == 0) {
262 env->current_tb = NULL;
263 /* if an exception is pending, we execute it here */
264 if (env->exception_index >= 0) {
265 if (env->exception_index >= EXCP_INTERRUPT) {
266 /* exit request from the cpu execution loop */
267 ret = env->exception_index;
268 if (ret == EXCP_DEBUG)
269 cpu_handle_debug_exception(env);
270 break;
271 } else {
272 #if defined(CONFIG_USER_ONLY)
273 /* if user mode only, we simulate a fake exception
274 which will be handled outside the cpu execution
275 loop */
276 #if defined(TARGET_I386)
277 do_interrupt_user(env->exception_index,
278 env->exception_is_int,
279 env->error_code,
280 env->exception_next_eip);
281 /* successfully delivered */
282 env->old_exception = -1;
283 #endif
284 ret = env->exception_index;
285 break;
286 #else
287 #if defined(TARGET_I386)
288 /* simulate a real cpu exception. On i386, it can
289 trigger new exceptions, but we do not handle
290 double or triple faults yet. */
291 do_interrupt(env->exception_index,
292 env->exception_is_int,
293 env->error_code,
294 env->exception_next_eip, 0);
295 /* successfully delivered */
296 env->old_exception = -1;
297 #elif defined(TARGET_PPC)
298 do_interrupt(env);
299 #elif defined(TARGET_MIPS)
300 do_interrupt(env);
301 #elif defined(TARGET_SPARC)
302 do_interrupt(env);
303 #elif defined(TARGET_ARM)
304 do_interrupt(env);
305 #elif defined(TARGET_SH4)
306 do_interrupt(env);
307 #elif defined(TARGET_ALPHA)
308 do_interrupt(env);
309 #elif defined(TARGET_CRIS)
310 do_interrupt(env);
311 #elif defined(TARGET_M68K)
312 do_interrupt(0);
313 #elif defined(TARGET_IA64)
314 do_interrupt(env);
315 #endif
316 #endif
318 env->exception_index = -1;
320 #ifdef USE_KQEMU
321 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
322 int ret;
323 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
324 ret = kqemu_cpu_exec(env);
325 /* put eflags in CPU temporary format */
326 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
327 DF = 1 - (2 * ((env->eflags >> 10) & 1));
328 CC_OP = CC_OP_EFLAGS;
329 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
330 if (ret == 1) {
331 /* exception */
332 longjmp(env->jmp_env, 1);
333 } else if (ret == 2) {
334 /* softmmu execution needed */
335 } else {
336 if (env->interrupt_request != 0 || env->exit_request != 0) {
337 /* hardware interrupt will be executed just after */
338 } else {
339 /* otherwise, we restart */
340 longjmp(env->jmp_env, 1);
344 #endif
346 /* kvm vcpu threads */
347 if (kvm_enabled()) {
348 kvm_cpu_exec(env);
349 longjmp(env->jmp_env, 1);
352 if (kvm_enabled()) {
353 kvm_cpu_exec(env);
354 longjmp(env->jmp_env, 1);
357 next_tb = 0; /* force lookup of first TB */
358 for(;;) {
359 interrupt_request = env->interrupt_request;
360 if (unlikely(interrupt_request)) {
361 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
362 /* Mask out external interrupts for this step. */
363 interrupt_request &= ~(CPU_INTERRUPT_HARD |
364 CPU_INTERRUPT_FIQ |
365 CPU_INTERRUPT_SMI |
366 CPU_INTERRUPT_NMI);
368 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
369 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
370 env->exception_index = EXCP_DEBUG;
371 cpu_loop_exit();
373 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
374 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
375 if (interrupt_request & CPU_INTERRUPT_HALT) {
376 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
377 env->halted = 1;
378 env->exception_index = EXCP_HLT;
379 cpu_loop_exit();
381 #endif
382 #if defined(TARGET_I386)
383 if (env->hflags2 & HF2_GIF_MASK) {
384 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
385 !(env->hflags & HF_SMM_MASK)) {
386 svm_check_intercept(SVM_EXIT_SMI);
387 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
388 do_smm_enter();
389 next_tb = 0;
390 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
391 !(env->hflags2 & HF2_NMI_MASK)) {
392 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
393 env->hflags2 |= HF2_NMI_MASK;
394 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
395 next_tb = 0;
396 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
397 (((env->hflags2 & HF2_VINTR_MASK) &&
398 (env->hflags2 & HF2_HIF_MASK)) ||
399 (!(env->hflags2 & HF2_VINTR_MASK) &&
400 (env->eflags & IF_MASK &&
401 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
402 int intno;
403 svm_check_intercept(SVM_EXIT_INTR);
404 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
405 intno = cpu_get_pic_interrupt(env);
406 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
407 do_interrupt(intno, 0, 0, 0, 1);
408 /* ensure that no TB jump will be modified as
409 the program flow was changed */
410 next_tb = 0;
411 #if !defined(CONFIG_USER_ONLY)
412 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
413 (env->eflags & IF_MASK) &&
414 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
415 int intno;
416 /* FIXME: this should respect TPR */
417 svm_check_intercept(SVM_EXIT_VINTR);
418 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
419 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
420 do_interrupt(intno, 0, 0, 0, 1);
421 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
422 next_tb = 0;
423 #endif
426 #elif defined(TARGET_PPC)
427 #if 0
428 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
429 cpu_ppc_reset(env);
431 #endif
432 if (interrupt_request & CPU_INTERRUPT_HARD) {
433 ppc_hw_interrupt(env);
434 if (env->pending_interrupts == 0)
435 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
436 next_tb = 0;
438 #elif defined(TARGET_MIPS)
439 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
440 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
441 (env->CP0_Status & (1 << CP0St_IE)) &&
442 !(env->CP0_Status & (1 << CP0St_EXL)) &&
443 !(env->CP0_Status & (1 << CP0St_ERL)) &&
444 !(env->hflags & MIPS_HFLAG_DM)) {
445 /* Raise it */
446 env->exception_index = EXCP_EXT_INTERRUPT;
447 env->error_code = 0;
448 do_interrupt(env);
449 next_tb = 0;
451 #elif defined(TARGET_SPARC)
452 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 (env->psret != 0)) {
454 int pil = env->interrupt_index & 15;
455 int type = env->interrupt_index & 0xf0;
457 if (((type == TT_EXTINT) &&
458 (pil == 15 || pil > env->psrpil)) ||
459 type != TT_EXTINT) {
460 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
461 env->exception_index = env->interrupt_index;
462 do_interrupt(env);
463 env->interrupt_index = 0;
464 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
465 cpu_check_irqs(env);
466 #endif
467 next_tb = 0;
469 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
470 //do_interrupt(0, 0, 0, 0, 0);
471 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
473 #elif defined(TARGET_ARM)
474 if (interrupt_request & CPU_INTERRUPT_FIQ
475 && !(env->uncached_cpsr & CPSR_F)) {
476 env->exception_index = EXCP_FIQ;
477 do_interrupt(env);
478 next_tb = 0;
480 /* ARMv7-M interrupt return works by loading a magic value
481 into the PC. On real hardware the load causes the
482 return to occur. The qemu implementation performs the
483 jump normally, then does the exception return when the
484 CPU tries to execute code at the magic address.
485 This will cause the magic PC value to be pushed to
486 the stack if an interrupt occured at the wrong time.
487 We avoid this by disabling interrupts when
488 pc contains a magic address. */
489 if (interrupt_request & CPU_INTERRUPT_HARD
490 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
491 || !(env->uncached_cpsr & CPSR_I))) {
492 env->exception_index = EXCP_IRQ;
493 do_interrupt(env);
494 next_tb = 0;
496 #elif defined(TARGET_SH4)
497 if (interrupt_request & CPU_INTERRUPT_HARD) {
498 do_interrupt(env);
499 next_tb = 0;
501 #elif defined(TARGET_ALPHA)
502 if (interrupt_request & CPU_INTERRUPT_HARD) {
503 do_interrupt(env);
504 next_tb = 0;
506 #elif defined(TARGET_CRIS)
507 if (interrupt_request & CPU_INTERRUPT_HARD
508 && (env->pregs[PR_CCS] & I_FLAG)) {
509 env->exception_index = EXCP_IRQ;
510 do_interrupt(env);
511 next_tb = 0;
513 if (interrupt_request & CPU_INTERRUPT_NMI
514 && (env->pregs[PR_CCS] & M_FLAG)) {
515 env->exception_index = EXCP_NMI;
516 do_interrupt(env);
517 next_tb = 0;
519 #elif defined(TARGET_M68K)
520 if (interrupt_request & CPU_INTERRUPT_HARD
521 && ((env->sr & SR_I) >> SR_I_SHIFT)
522 < env->pending_level) {
523 /* Real hardware gets the interrupt vector via an
524 IACK cycle at this point. Current emulated
525 hardware doesn't rely on this, so we
526 provide/save the vector when the interrupt is
527 first signalled. */
528 env->exception_index = env->pending_vector;
529 do_interrupt(1);
530 next_tb = 0;
532 #endif
533 /* Don't use the cached interupt_request value,
534 do_interrupt may have updated the EXITTB flag. */
535 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
536 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
539 next_tb = 0;
542 if (unlikely(env->exit_request)) {
543 env->exit_request = 0;
544 env->exception_index = EXCP_INTERRUPT;
545 cpu_loop_exit();
547 #ifdef DEBUG_EXEC
548 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
549 /* restore flags in standard format */
550 regs_to_env();
551 #if defined(TARGET_I386)
552 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
553 log_cpu_state(env, X86_DUMP_CCOP);
554 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
555 #elif defined(TARGET_ARM)
556 log_cpu_state(env, 0);
557 #elif defined(TARGET_SPARC)
558 log_cpu_state(env, 0);
559 #elif defined(TARGET_PPC)
560 log_cpu_state(env, 0);
561 #elif defined(TARGET_M68K)
562 cpu_m68k_flush_flags(env, env->cc_op);
563 env->cc_op = CC_OP_FLAGS;
564 env->sr = (env->sr & 0xffe0)
565 | env->cc_dest | (env->cc_x << 4);
566 log_cpu_state(env, 0);
567 #elif defined(TARGET_MIPS)
568 log_cpu_state(env, 0);
569 #elif defined(TARGET_SH4)
570 log_cpu_state(env, 0);
571 #elif defined(TARGET_ALPHA)
572 log_cpu_state(env, 0);
573 #elif defined(TARGET_CRIS)
574 log_cpu_state(env, 0);
575 #else
576 #error unsupported target CPU
577 #endif
579 #endif
580 spin_lock(&tb_lock);
581 tb = tb_find_fast();
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
588 next_tb = 0;
589 tb_invalidated_flag = 0;
591 #ifdef DEBUG_EXEC
592 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
595 #endif
596 /* see if we can patch the calling TB. When the TB
597 spans two pages, we cannot safely do a direct
598 jump. */
600 if (next_tb != 0 &&
601 #ifdef USE_KQEMU
602 (env->kqemu_enabled != 2) &&
603 #endif
604 tb->page_addr[1] == -1) {
605 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
608 spin_unlock(&tb_lock);
609 env->current_tb = tb;
611 /* cpu_interrupt might be called while translating the
612 TB, but before it is linked into a potentially
613 infinite loop and becomes env->current_tb. Avoid
614 starting execution if there is a pending interrupt. */
615 if (unlikely (env->exit_request))
616 env->current_tb = NULL;
618 while (env->current_tb) {
619 tc_ptr = tb->tc_ptr;
620 /* execute the generated code */
621 #if defined(__sparc__) && !defined(HOST_SOLARIS)
622 #undef env
623 env = cpu_single_env;
624 #define env cpu_single_env
625 #endif
626 next_tb = tcg_qemu_tb_exec(tc_ptr);
627 env->current_tb = NULL;
628 if ((next_tb & 3) == 2) {
629 /* Instruction counter expired. */
630 int insns_left;
631 tb = (TranslationBlock *)(long)(next_tb & ~3);
632 /* Restore PC. */
633 cpu_pc_from_tb(env, tb);
634 insns_left = env->icount_decr.u32;
635 if (env->icount_extra && insns_left >= 0) {
636 /* Refill decrementer and continue execution. */
637 env->icount_extra += insns_left;
638 if (env->icount_extra > 0xffff) {
639 insns_left = 0xffff;
640 } else {
641 insns_left = env->icount_extra;
643 env->icount_extra -= insns_left;
644 env->icount_decr.u16.low = insns_left;
645 } else {
646 if (insns_left > 0) {
647 /* Execute remaining instructions. */
648 cpu_exec_nocache(insns_left, tb);
650 env->exception_index = EXCP_INTERRUPT;
651 next_tb = 0;
652 cpu_loop_exit();
656 /* reset soft MMU for next block (it can currently
657 only be set by a memory fault) */
658 #if defined(USE_KQEMU)
659 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
660 if (kqemu_is_ok(env) &&
661 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
662 cpu_loop_exit();
664 #endif
665 } /* for(;;) */
666 } else {
667 env_to_regs();
669 } /* for(;;) */
672 #if defined(TARGET_I386)
673 /* restore flags in standard format */
674 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
675 #elif defined(TARGET_ARM)
676 /* XXX: Save/restore host fpu exception state?. */
677 #elif defined(TARGET_SPARC)
678 #elif defined(TARGET_PPC)
679 #elif defined(TARGET_M68K)
680 cpu_m68k_flush_flags(env, env->cc_op);
681 env->cc_op = CC_OP_FLAGS;
682 env->sr = (env->sr & 0xffe0)
683 | env->cc_dest | (env->cc_x << 4);
684 #elif defined(TARGET_MIPS)
685 #elif defined(TARGET_SH4)
686 #elif defined(TARGET_IA64)
687 #elif defined(TARGET_ALPHA)
688 #elif defined(TARGET_CRIS)
689 /* XXXXX */
690 #else
691 #error unsupported target CPU
692 #endif
694 /* restore global registers */
695 #include "hostregs_helper.h"
697 /* fail safe : never use cpu_single_env outside cpu_exec() */
698 cpu_single_env = NULL;
699 return ret;
702 /* must only be called from the generated code as an exception can be
703 generated */
704 void tb_invalidate_page_range(target_ulong start, target_ulong end)
706 /* XXX: cannot enable it yet because it yields to MMU exception
707 where NIP != read address on PowerPC */
708 #if 0
709 target_ulong phys_addr;
710 phys_addr = get_phys_addr_code(env, start);
711 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
712 #endif
715 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
717 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
719 CPUX86State *saved_env;
721 saved_env = env;
722 env = s;
723 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
724 selector &= 0xffff;
725 cpu_x86_load_seg_cache(env, seg_reg, selector,
726 (selector << 4), 0xffff, 0);
727 } else {
728 helper_load_seg(seg_reg, selector);
730 env = saved_env;
733 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
735 CPUX86State *saved_env;
737 saved_env = env;
738 env = s;
740 helper_fsave(ptr, data32);
742 env = saved_env;
745 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
747 CPUX86State *saved_env;
749 saved_env = env;
750 env = s;
752 helper_frstor(ptr, data32);
754 env = saved_env;
757 #endif /* TARGET_I386 */
759 #if !defined(CONFIG_SOFTMMU)
761 #if defined(TARGET_I386)
763 /* 'pc' is the host PC at which the exception was raised. 'address' is
764 the effective address of the memory exception. 'is_write' is 1 if a
765 write caused the exception and otherwise 0'. 'old_set' is the
766 signal set which should be restored */
767 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
768 int is_write, sigset_t *old_set,
769 void *puc)
771 TranslationBlock *tb;
772 int ret;
774 if (cpu_single_env)
775 env = cpu_single_env; /* XXX: find a correct solution for multithread */
776 #if defined(DEBUG_SIGNAL)
777 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
778 pc, address, is_write, *(unsigned long *)old_set);
779 #endif
780 /* XXX: locking issue */
781 if (is_write && page_unprotect(h2g(address), pc, puc)) {
782 return 1;
785 /* see if it is an MMU fault */
786 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
787 if (ret < 0)
788 return 0; /* not an MMU fault */
789 if (ret == 0)
790 return 1; /* the MMU fault was handled without causing real CPU fault */
791 /* now we have a real cpu fault */
792 tb = tb_find_pc(pc);
793 if (tb) {
794 /* the PC is inside the translated code. It means that we have
795 a virtual CPU fault */
796 cpu_restore_state(tb, env, pc, puc);
798 if (ret == 1) {
799 #if 0
800 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
801 env->eip, env->cr[2], env->error_code);
802 #endif
803 /* we restore the process signal mask as the sigreturn should
804 do it (XXX: use sigsetjmp) */
805 sigprocmask(SIG_SETMASK, old_set, NULL);
806 raise_exception_err(env->exception_index, env->error_code);
807 } else {
808 /* activate soft MMU for this block */
809 env->hflags |= HF_SOFTMMU_MASK;
810 cpu_resume_from_signal(env, puc);
812 /* never comes here */
813 return 1;
816 #elif defined(TARGET_ARM)
817 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
818 int is_write, sigset_t *old_set,
819 void *puc)
821 TranslationBlock *tb;
822 int ret;
824 if (cpu_single_env)
825 env = cpu_single_env; /* XXX: find a correct solution for multithread */
826 #if defined(DEBUG_SIGNAL)
827 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
828 pc, address, is_write, *(unsigned long *)old_set);
829 #endif
830 /* XXX: locking issue */
831 if (is_write && page_unprotect(h2g(address), pc, puc)) {
832 return 1;
834 /* see if it is an MMU fault */
835 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
836 if (ret < 0)
837 return 0; /* not an MMU fault */
838 if (ret == 0)
839 return 1; /* the MMU fault was handled without causing real CPU fault */
840 /* now we have a real cpu fault */
841 tb = tb_find_pc(pc);
842 if (tb) {
843 /* the PC is inside the translated code. It means that we have
844 a virtual CPU fault */
845 cpu_restore_state(tb, env, pc, puc);
847 /* we restore the process signal mask as the sigreturn should
848 do it (XXX: use sigsetjmp) */
849 sigprocmask(SIG_SETMASK, old_set, NULL);
850 cpu_loop_exit();
851 /* never comes here */
852 return 1;
854 #elif defined(TARGET_SPARC)
855 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
856 int is_write, sigset_t *old_set,
857 void *puc)
859 TranslationBlock *tb;
860 int ret;
862 if (cpu_single_env)
863 env = cpu_single_env; /* XXX: find a correct solution for multithread */
864 #if defined(DEBUG_SIGNAL)
865 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
866 pc, address, is_write, *(unsigned long *)old_set);
867 #endif
868 /* XXX: locking issue */
869 if (is_write && page_unprotect(h2g(address), pc, puc)) {
870 return 1;
872 /* see if it is an MMU fault */
873 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
874 if (ret < 0)
875 return 0; /* not an MMU fault */
876 if (ret == 0)
877 return 1; /* the MMU fault was handled without causing real CPU fault */
878 /* now we have a real cpu fault */
879 tb = tb_find_pc(pc);
880 if (tb) {
881 /* the PC is inside the translated code. It means that we have
882 a virtual CPU fault */
883 cpu_restore_state(tb, env, pc, puc);
885 /* we restore the process signal mask as the sigreturn should
886 do it (XXX: use sigsetjmp) */
887 sigprocmask(SIG_SETMASK, old_set, NULL);
888 cpu_loop_exit();
889 /* never comes here */
890 return 1;
892 #elif defined (TARGET_PPC)
893 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
894 int is_write, sigset_t *old_set,
895 void *puc)
897 TranslationBlock *tb;
898 int ret;
900 if (cpu_single_env)
901 env = cpu_single_env; /* XXX: find a correct solution for multithread */
902 #if defined(DEBUG_SIGNAL)
903 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
904 pc, address, is_write, *(unsigned long *)old_set);
905 #endif
906 /* XXX: locking issue */
907 if (is_write && page_unprotect(h2g(address), pc, puc)) {
908 return 1;
911 /* see if it is an MMU fault */
912 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
913 if (ret < 0)
914 return 0; /* not an MMU fault */
915 if (ret == 0)
916 return 1; /* the MMU fault was handled without causing real CPU fault */
918 /* now we have a real cpu fault */
919 tb = tb_find_pc(pc);
920 if (tb) {
921 /* the PC is inside the translated code. It means that we have
922 a virtual CPU fault */
923 cpu_restore_state(tb, env, pc, puc);
925 if (ret == 1) {
926 #if 0
927 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
928 env->nip, env->error_code, tb);
929 #endif
930 /* we restore the process signal mask as the sigreturn should
931 do it (XXX: use sigsetjmp) */
932 sigprocmask(SIG_SETMASK, old_set, NULL);
933 cpu_loop_exit();
934 } else {
935 /* activate soft MMU for this block */
936 cpu_resume_from_signal(env, puc);
938 /* never comes here */
939 return 1;
942 #elif defined(TARGET_M68K)
943 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
944 int is_write, sigset_t *old_set,
945 void *puc)
947 TranslationBlock *tb;
948 int ret;
950 if (cpu_single_env)
951 env = cpu_single_env; /* XXX: find a correct solution for multithread */
952 #if defined(DEBUG_SIGNAL)
953 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
954 pc, address, is_write, *(unsigned long *)old_set);
955 #endif
956 /* XXX: locking issue */
957 if (is_write && page_unprotect(address, pc, puc)) {
958 return 1;
960 /* see if it is an MMU fault */
961 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
962 if (ret < 0)
963 return 0; /* not an MMU fault */
964 if (ret == 0)
965 return 1; /* the MMU fault was handled without causing real CPU fault */
966 /* now we have a real cpu fault */
967 tb = tb_find_pc(pc);
968 if (tb) {
969 /* the PC is inside the translated code. It means that we have
970 a virtual CPU fault */
971 cpu_restore_state(tb, env, pc, puc);
973 /* we restore the process signal mask as the sigreturn should
974 do it (XXX: use sigsetjmp) */
975 sigprocmask(SIG_SETMASK, old_set, NULL);
976 cpu_loop_exit();
977 /* never comes here */
978 return 1;
981 #elif defined (TARGET_MIPS)
982 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
983 int is_write, sigset_t *old_set,
984 void *puc)
986 TranslationBlock *tb;
987 int ret;
989 if (cpu_single_env)
990 env = cpu_single_env; /* XXX: find a correct solution for multithread */
991 #if defined(DEBUG_SIGNAL)
992 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
993 pc, address, is_write, *(unsigned long *)old_set);
994 #endif
995 /* XXX: locking issue */
996 if (is_write && page_unprotect(h2g(address), pc, puc)) {
997 return 1;
1000 /* see if it is an MMU fault */
1001 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1002 if (ret < 0)
1003 return 0; /* not an MMU fault */
1004 if (ret == 0)
1005 return 1; /* the MMU fault was handled without causing real CPU fault */
1007 /* now we have a real cpu fault */
1008 tb = tb_find_pc(pc);
1009 if (tb) {
1010 /* the PC is inside the translated code. It means that we have
1011 a virtual CPU fault */
1012 cpu_restore_state(tb, env, pc, puc);
1014 if (ret == 1) {
1015 #if 0
1016 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1017 env->PC, env->error_code, tb);
1018 #endif
1019 /* we restore the process signal mask as the sigreturn should
1020 do it (XXX: use sigsetjmp) */
1021 sigprocmask(SIG_SETMASK, old_set, NULL);
1022 cpu_loop_exit();
1023 } else {
1024 /* activate soft MMU for this block */
1025 cpu_resume_from_signal(env, puc);
1027 /* never comes here */
1028 return 1;
1031 #elif defined (TARGET_SH4)
1032 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1033 int is_write, sigset_t *old_set,
1034 void *puc)
1036 TranslationBlock *tb;
1037 int ret;
1039 if (cpu_single_env)
1040 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1041 #if defined(DEBUG_SIGNAL)
1042 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1043 pc, address, is_write, *(unsigned long *)old_set);
1044 #endif
1045 /* XXX: locking issue */
1046 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1047 return 1;
1050 /* see if it is an MMU fault */
1051 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1052 if (ret < 0)
1053 return 0; /* not an MMU fault */
1054 if (ret == 0)
1055 return 1; /* the MMU fault was handled without causing real CPU fault */
1057 /* now we have a real cpu fault */
1058 tb = tb_find_pc(pc);
1059 if (tb) {
1060 /* the PC is inside the translated code. It means that we have
1061 a virtual CPU fault */
1062 cpu_restore_state(tb, env, pc, puc);
1064 #if 0
1065 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1066 env->nip, env->error_code, tb);
1067 #endif
1068 /* we restore the process signal mask as the sigreturn should
1069 do it (XXX: use sigsetjmp) */
1070 sigprocmask(SIG_SETMASK, old_set, NULL);
1071 cpu_loop_exit();
1072 /* never comes here */
1073 return 1;
1076 #elif defined (TARGET_ALPHA)
1077 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1078 int is_write, sigset_t *old_set,
1079 void *puc)
1081 TranslationBlock *tb;
1082 int ret;
1084 if (cpu_single_env)
1085 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1086 #if defined(DEBUG_SIGNAL)
1087 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1088 pc, address, is_write, *(unsigned long *)old_set);
1089 #endif
1090 /* XXX: locking issue */
1091 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1092 return 1;
1095 /* see if it is an MMU fault */
1096 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1097 if (ret < 0)
1098 return 0; /* not an MMU fault */
1099 if (ret == 0)
1100 return 1; /* the MMU fault was handled without causing real CPU fault */
1102 /* now we have a real cpu fault */
1103 tb = tb_find_pc(pc);
1104 if (tb) {
1105 /* the PC is inside the translated code. It means that we have
1106 a virtual CPU fault */
1107 cpu_restore_state(tb, env, pc, puc);
1109 #if 0
1110 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1111 env->nip, env->error_code, tb);
1112 #endif
1113 /* we restore the process signal mask as the sigreturn should
1114 do it (XXX: use sigsetjmp) */
1115 sigprocmask(SIG_SETMASK, old_set, NULL);
1116 cpu_loop_exit();
1117 /* never comes here */
1118 return 1;
1120 #elif defined (TARGET_CRIS)
1121 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1122 int is_write, sigset_t *old_set,
1123 void *puc)
1125 TranslationBlock *tb;
1126 int ret;
1128 if (cpu_single_env)
1129 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1130 #if defined(DEBUG_SIGNAL)
1131 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1132 pc, address, is_write, *(unsigned long *)old_set);
1133 #endif
1134 /* XXX: locking issue */
1135 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1136 return 1;
1139 /* see if it is an MMU fault */
1140 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1141 if (ret < 0)
1142 return 0; /* not an MMU fault */
1143 if (ret == 0)
1144 return 1; /* the MMU fault was handled without causing real CPU fault */
1146 /* now we have a real cpu fault */
1147 tb = tb_find_pc(pc);
1148 if (tb) {
1149 /* the PC is inside the translated code. It means that we have
1150 a virtual CPU fault */
1151 cpu_restore_state(tb, env, pc, puc);
1153 /* we restore the process signal mask as the sigreturn should
1154 do it (XXX: use sigsetjmp) */
1155 sigprocmask(SIG_SETMASK, old_set, NULL);
1156 cpu_loop_exit();
1157 /* never comes here */
1158 return 1;
1161 #else
1162 #error unsupported target CPU
1163 #endif
1165 #if defined(__i386__)
1167 #if defined(__APPLE__)
1168 # include <sys/ucontext.h>
1170 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1171 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1172 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1173 #else
1174 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1175 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1176 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1177 #endif
1179 int cpu_signal_handler(int host_signum, void *pinfo,
1180 void *puc)
1182 siginfo_t *info = pinfo;
1183 struct ucontext *uc = puc;
1184 unsigned long pc;
1185 int trapno;
1187 #ifndef REG_EIP
1188 /* for glibc 2.1 */
1189 #define REG_EIP EIP
1190 #define REG_ERR ERR
1191 #define REG_TRAPNO TRAPNO
1192 #endif
1193 pc = EIP_sig(uc);
1194 trapno = TRAP_sig(uc);
1195 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1196 trapno == 0xe ?
1197 (ERROR_sig(uc) >> 1) & 1 : 0,
1198 &uc->uc_sigmask, puc);
1201 #elif defined(__x86_64__)
1203 #ifdef __NetBSD__
1204 #define REG_ERR _REG_ERR
1205 #define REG_TRAPNO _REG_TRAPNO
1207 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.__gregs[(reg)]
1208 #define QEMU_UC_MACHINE_PC(uc) _UC_MACHINE_PC(uc)
1209 #else
1210 #define QEMU_UC_MCONTEXT_GREGS(uc, reg) (uc)->uc_mcontext.gregs[(reg)]
1211 #define QEMU_UC_MACHINE_PC(uc) QEMU_UC_MCONTEXT_GREGS(uc, REG_RIP)
1212 #endif
1214 int cpu_signal_handler(int host_signum, void *pinfo,
1215 void *puc)
1217 siginfo_t *info = pinfo;
1218 unsigned long pc;
1219 #ifdef __NetBSD__
1220 ucontext_t *uc = puc;
1221 #else
1222 struct ucontext *uc = puc;
1223 #endif
1225 pc = QEMU_UC_MACHINE_PC(uc);
1226 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1227 QEMU_UC_MCONTEXT_GREGS(uc, REG_TRAPNO) == 0xe ?
1228 (QEMU_UC_MCONTEXT_GREGS(uc, REG_ERR) >> 1) & 1 : 0,
1229 &uc->uc_sigmask, puc);
1232 #elif defined(_ARCH_PPC)
1234 /***********************************************************************
1235 * signal context platform-specific definitions
1236 * From Wine
1238 #ifdef linux
1239 /* All Registers access - only for local access */
1240 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1241 /* Gpr Registers access */
1242 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1243 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1244 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1245 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1246 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1247 # define LR_sig(context) REG_sig(link, context) /* Link register */
1248 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1249 /* Float Registers access */
1250 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1251 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1252 /* Exception Registers access */
1253 # define DAR_sig(context) REG_sig(dar, context)
1254 # define DSISR_sig(context) REG_sig(dsisr, context)
1255 # define TRAP_sig(context) REG_sig(trap, context)
1256 #endif /* linux */
1258 #ifdef __APPLE__
1259 # include <sys/ucontext.h>
1260 typedef struct ucontext SIGCONTEXT;
1261 /* All Registers access - only for local access */
1262 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1263 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1264 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1265 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1266 /* Gpr Registers access */
1267 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1268 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1269 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1270 # define CTR_sig(context) REG_sig(ctr, context)
1271 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1272 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1273 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1274 /* Float Registers access */
1275 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1276 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1277 /* Exception Registers access */
1278 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1279 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1280 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1281 #endif /* __APPLE__ */
1283 int cpu_signal_handler(int host_signum, void *pinfo,
1284 void *puc)
1286 siginfo_t *info = pinfo;
1287 struct ucontext *uc = puc;
1288 unsigned long pc;
1289 int is_write;
1291 pc = IAR_sig(uc);
1292 is_write = 0;
1293 #if 0
1294 /* ppc 4xx case */
1295 if (DSISR_sig(uc) & 0x00800000)
1296 is_write = 1;
1297 #else
1298 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1299 is_write = 1;
1300 #endif
1301 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1302 is_write, &uc->uc_sigmask, puc);
1305 #elif defined(__alpha__)
1307 int cpu_signal_handler(int host_signum, void *pinfo,
1308 void *puc)
1310 siginfo_t *info = pinfo;
1311 struct ucontext *uc = puc;
1312 uint32_t *pc = uc->uc_mcontext.sc_pc;
1313 uint32_t insn = *pc;
1314 int is_write = 0;
1316 /* XXX: need kernel patch to get write flag faster */
1317 switch (insn >> 26) {
1318 case 0x0d: // stw
1319 case 0x0e: // stb
1320 case 0x0f: // stq_u
1321 case 0x24: // stf
1322 case 0x25: // stg
1323 case 0x26: // sts
1324 case 0x27: // stt
1325 case 0x2c: // stl
1326 case 0x2d: // stq
1327 case 0x2e: // stl_c
1328 case 0x2f: // stq_c
1329 is_write = 1;
1332 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1333 is_write, &uc->uc_sigmask, puc);
1335 #elif defined(__sparc__)
1337 int cpu_signal_handler(int host_signum, void *pinfo,
1338 void *puc)
1340 siginfo_t *info = pinfo;
1341 int is_write;
1342 uint32_t insn;
1343 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1344 uint32_t *regs = (uint32_t *)(info + 1);
1345 void *sigmask = (regs + 20);
1346 /* XXX: is there a standard glibc define ? */
1347 unsigned long pc = regs[1];
1348 #else
1349 #ifdef __linux__
1350 struct sigcontext *sc = puc;
1351 unsigned long pc = sc->sigc_regs.tpc;
1352 void *sigmask = (void *)sc->sigc_mask;
1353 #elif defined(__OpenBSD__)
1354 struct sigcontext *uc = puc;
1355 unsigned long pc = uc->sc_pc;
1356 void *sigmask = (void *)(long)uc->sc_mask;
1357 #endif
1358 #endif
1360 /* XXX: need kernel patch to get write flag faster */
1361 is_write = 0;
1362 insn = *(uint32_t *)pc;
1363 if ((insn >> 30) == 3) {
1364 switch((insn >> 19) & 0x3f) {
1365 case 0x05: // stb
1366 case 0x06: // sth
1367 case 0x04: // st
1368 case 0x07: // std
1369 case 0x24: // stf
1370 case 0x27: // stdf
1371 case 0x25: // stfsr
1372 is_write = 1;
1373 break;
1376 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1377 is_write, sigmask, NULL);
1380 #elif defined(__arm__)
1382 int cpu_signal_handler(int host_signum, void *pinfo,
1383 void *puc)
1385 siginfo_t *info = pinfo;
1386 struct ucontext *uc = puc;
1387 unsigned long pc;
1388 int is_write;
1390 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1391 pc = uc->uc_mcontext.gregs[R15];
1392 #else
1393 pc = uc->uc_mcontext.arm_pc;
1394 #endif
1395 /* XXX: compute is_write */
1396 is_write = 0;
1397 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1398 is_write,
1399 &uc->uc_sigmask, puc);
1402 #elif defined(__mc68000)
1404 int cpu_signal_handler(int host_signum, void *pinfo,
1405 void *puc)
1407 siginfo_t *info = pinfo;
1408 struct ucontext *uc = puc;
1409 unsigned long pc;
1410 int is_write;
1412 pc = uc->uc_mcontext.gregs[16];
1413 /* XXX: compute is_write */
1414 is_write = 0;
1415 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1416 is_write,
1417 &uc->uc_sigmask, puc);
1420 #elif defined(__ia64)
1422 #ifndef __ISR_VALID
1423 /* This ought to be in <bits/siginfo.h>... */
1424 # define __ISR_VALID 1
1425 #endif
1427 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1429 siginfo_t *info = pinfo;
1430 struct ucontext *uc = puc;
1431 unsigned long ip;
1432 int is_write = 0;
1434 ip = uc->uc_mcontext.sc_ip;
1435 switch (host_signum) {
1436 case SIGILL:
1437 case SIGFPE:
1438 case SIGSEGV:
1439 case SIGBUS:
1440 case SIGTRAP:
1441 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1442 /* ISR.W (write-access) is bit 33: */
1443 is_write = (info->si_isr >> 33) & 1;
1444 break;
1446 default:
1447 break;
1449 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1450 is_write,
1451 &uc->uc_sigmask, puc);
1454 #elif defined(__s390__)
1456 int cpu_signal_handler(int host_signum, void *pinfo,
1457 void *puc)
1459 siginfo_t *info = pinfo;
1460 struct ucontext *uc = puc;
1461 unsigned long pc;
1462 int is_write;
1464 pc = uc->uc_mcontext.psw.addr;
1465 /* XXX: compute is_write */
1466 is_write = 0;
1467 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1468 is_write, &uc->uc_sigmask, puc);
1471 #elif defined(__mips__)
1473 int cpu_signal_handler(int host_signum, void *pinfo,
1474 void *puc)
1476 siginfo_t *info = pinfo;
1477 struct ucontext *uc = puc;
1478 greg_t pc = uc->uc_mcontext.pc;
1479 int is_write;
1481 /* XXX: compute is_write */
1482 is_write = 0;
1483 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1484 is_write, &uc->uc_sigmask, puc);
1487 #elif defined(__hppa__)
1489 int cpu_signal_handler(int host_signum, void *pinfo,
1490 void *puc)
1492 struct siginfo *info = pinfo;
1493 struct ucontext *uc = puc;
1494 unsigned long pc;
1495 int is_write;
1497 pc = uc->uc_mcontext.sc_iaoq[0];
1498 /* FIXME: compute is_write */
1499 is_write = 0;
1500 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1501 is_write,
1502 &uc->uc_sigmask, puc);
1505 #else
1507 #error host CPU specific signal handler needed
1509 #endif
1511 #endif /* !defined(CONFIG_SOFTMMU) */