Merge branch 'master' of git://git.sv.gnu.org/qemu
[qemu-kvm/fedora.git] / cpu-exec.c
blob777fa5fefe6f9c9156392bd818a98a38831a2536
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #if !defined(TARGET_IA64)
24 #include "tcg.h"
25 #endif
26 #include "kvm.h"
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #ifdef __linux__
40 #include <sys/ucontext.h>
41 #endif
42 #endif
44 #include "qemu-kvm.h"
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
48 #undef env
49 #define env cpu_single_env
50 #endif
52 int tb_invalidated_flag;
54 //#define DEBUG_EXEC
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState *env)
59 return cpu_has_work(env);
62 void cpu_loop_exit(void)
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
66 regs_to_env();
67 longjmp(env->jmp_env, 1);
70 /* exit the current TB from a signal handler. The host registers are
71 restored in a state compatible with the CPU emulator
73 void cpu_resume_from_signal(CPUState *env1, void *puc)
75 #if !defined(CONFIG_SOFTMMU)
76 #ifdef __linux__
77 struct ucontext *uc = puc;
78 #elif defined(__OpenBSD__)
79 struct sigcontext *uc = puc;
80 #endif
81 #endif
83 env = env1;
85 /* XXX: restore cpu registers saved in host registers */
87 #if !defined(CONFIG_SOFTMMU)
88 if (puc) {
89 /* XXX: use siglongjmp ? */
90 #ifdef __linux__
91 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
92 #elif defined(__OpenBSD__)
93 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
94 #endif
96 #endif
97 env->exception_index = -1;
98 longjmp(env->jmp_env, 1);
101 /* Execute the code without caching the generated code. An interpreter
102 could be used if available. */
103 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
105 unsigned long next_tb;
106 TranslationBlock *tb;
108 /* Should never happen.
109 We only end up here when an existing TB is too long. */
110 if (max_cycles > CF_COUNT_MASK)
111 max_cycles = CF_COUNT_MASK;
113 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
114 max_cycles);
115 env->current_tb = tb;
116 /* execute the generated code */
117 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
119 if ((next_tb & 3) == 2) {
120 /* Restore PC. This may happen if async event occurs before
121 the TB starts executing. */
122 cpu_pc_from_tb(env, tb);
124 tb_phys_invalidate(tb, -1);
125 tb_free(tb);
128 static TranslationBlock *tb_find_slow(target_ulong pc,
129 target_ulong cs_base,
130 uint64_t flags)
132 TranslationBlock *tb, **ptb1;
133 unsigned int h;
134 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
136 tb_invalidated_flag = 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc = get_phys_addr_code(env, pc);
142 phys_page1 = phys_pc & TARGET_PAGE_MASK;
143 phys_page2 = -1;
144 h = tb_phys_hash_func(phys_pc);
145 ptb1 = &tb_phys_hash[h];
146 for(;;) {
147 tb = *ptb1;
148 if (!tb)
149 goto not_found;
150 if (tb->pc == pc &&
151 tb->page_addr[0] == phys_page1 &&
152 tb->cs_base == cs_base &&
153 tb->flags == flags) {
154 /* check next page if needed */
155 if (tb->page_addr[1] != -1) {
156 virt_page2 = (pc & TARGET_PAGE_MASK) +
157 TARGET_PAGE_SIZE;
158 phys_page2 = get_phys_addr_code(env, virt_page2);
159 if (tb->page_addr[1] == phys_page2)
160 goto found;
161 } else {
162 goto found;
165 ptb1 = &tb->phys_hash_next;
167 not_found:
168 /* if no translated code available, then translate it now */
169 tb = tb_gen_code(env, pc, cs_base, flags, 0);
171 found:
172 /* we add the TB in the virtual pc hash table */
173 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
174 return tb;
177 static inline TranslationBlock *tb_find_fast(void)
179 TranslationBlock *tb;
180 target_ulong cs_base, pc;
181 int flags;
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
185 is executed. */
186 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
187 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
188 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
189 tb->flags != flags)) {
190 tb = tb_find_slow(pc, cs_base, flags);
192 return tb;
195 static CPUDebugExcpHandler *debug_excp_handler;
197 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
199 CPUDebugExcpHandler *old_handler = debug_excp_handler;
201 debug_excp_handler = handler;
202 return old_handler;
205 static void cpu_handle_debug_exception(CPUState *env)
207 CPUWatchpoint *wp;
209 if (!env->watchpoint_hit)
210 TAILQ_FOREACH(wp, &env->watchpoints, entry)
211 wp->flags &= ~BP_WATCHPOINT_HIT;
213 if (debug_excp_handler)
214 debug_excp_handler(env);
217 /* main execution loop */
219 int cpu_exec(CPUState *env1)
221 #define DECLARE_HOST_REGS 1
222 #include "hostregs_helper.h"
223 int ret, interrupt_request;
224 TranslationBlock *tb;
225 uint8_t *tc_ptr;
226 unsigned long next_tb;
228 if (cpu_halted(env1) == EXCP_HALTED)
229 return EXCP_HALTED;
231 cpu_single_env = env1;
233 /* first we save global registers */
234 #define SAVE_HOST_REGS 1
235 #include "hostregs_helper.h"
236 env = env1;
238 env_to_regs();
239 #if defined(TARGET_I386)
240 /* put eflags in CPU temporary format */
241 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
242 DF = 1 - (2 * ((env->eflags >> 10) & 1));
243 CC_OP = CC_OP_EFLAGS;
244 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env->cc_op = CC_OP_FLAGS;
248 env->cc_dest = env->sr & 0xf;
249 env->cc_x = (env->sr >> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_PPC)
253 #elif defined(TARGET_MIPS)
254 #elif defined(TARGET_SH4)
255 #elif defined(TARGET_CRIS)
256 #elif defined(TARGET_IA64)
257 /* XXXXX */
258 #else
259 #error unsupported target CPU
260 #endif
261 env->exception_index = -1;
263 /* prepare setjmp context for exception handling */
264 for(;;) {
265 if (setjmp(env->jmp_env) == 0) {
266 #if defined(__sparc__) && !defined(HOST_SOLARIS)
267 #undef env
268 env = cpu_single_env;
269 #define env cpu_single_env
270 #endif
271 env->current_tb = NULL;
272 /* if an exception is pending, we execute it here */
273 if (env->exception_index >= 0) {
274 if (env->exception_index >= EXCP_INTERRUPT) {
275 /* exit request from the cpu execution loop */
276 ret = env->exception_index;
277 if (ret == EXCP_DEBUG)
278 cpu_handle_debug_exception(env);
279 break;
280 } else {
281 #if defined(CONFIG_USER_ONLY)
282 /* if user mode only, we simulate a fake exception
283 which will be handled outside the cpu execution
284 loop */
285 #if defined(TARGET_I386)
286 do_interrupt_user(env->exception_index,
287 env->exception_is_int,
288 env->error_code,
289 env->exception_next_eip);
290 /* successfully delivered */
291 env->old_exception = -1;
292 #endif
293 ret = env->exception_index;
294 break;
295 #else
296 #if defined(TARGET_I386)
297 /* simulate a real cpu exception. On i386, it can
298 trigger new exceptions, but we do not handle
299 double or triple faults yet. */
300 do_interrupt(env->exception_index,
301 env->exception_is_int,
302 env->error_code,
303 env->exception_next_eip, 0);
304 /* successfully delivered */
305 env->old_exception = -1;
306 #elif defined(TARGET_PPC)
307 do_interrupt(env);
308 #elif defined(TARGET_MIPS)
309 do_interrupt(env);
310 #elif defined(TARGET_SPARC)
311 do_interrupt(env);
312 #elif defined(TARGET_ARM)
313 do_interrupt(env);
314 #elif defined(TARGET_SH4)
315 do_interrupt(env);
316 #elif defined(TARGET_ALPHA)
317 do_interrupt(env);
318 #elif defined(TARGET_CRIS)
319 do_interrupt(env);
320 #elif defined(TARGET_M68K)
321 do_interrupt(0);
322 #elif defined(TARGET_IA64)
323 do_interrupt(env);
324 #endif
325 #endif
327 env->exception_index = -1;
329 #ifdef CONFIG_KQEMU
330 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
331 int ret;
332 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
333 ret = kqemu_cpu_exec(env);
334 /* put eflags in CPU temporary format */
335 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
336 DF = 1 - (2 * ((env->eflags >> 10) & 1));
337 CC_OP = CC_OP_EFLAGS;
338 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
339 if (ret == 1) {
340 /* exception */
341 longjmp(env->jmp_env, 1);
342 } else if (ret == 2) {
343 /* softmmu execution needed */
344 } else {
345 if (env->interrupt_request != 0 || env->exit_request != 0) {
346 /* hardware interrupt will be executed just after */
347 } else {
348 /* otherwise, we restart */
349 longjmp(env->jmp_env, 1);
353 #endif
355 if (kvm_enabled()) {
356 kvm_cpu_exec(env);
357 longjmp(env->jmp_env, 1);
360 next_tb = 0; /* force lookup of first TB */
361 for(;;) {
362 interrupt_request = env->interrupt_request;
363 if (unlikely(interrupt_request)) {
364 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
365 /* Mask out external interrupts for this step. */
366 interrupt_request &= ~(CPU_INTERRUPT_HARD |
367 CPU_INTERRUPT_FIQ |
368 CPU_INTERRUPT_SMI |
369 CPU_INTERRUPT_NMI);
371 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
372 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
373 env->exception_index = EXCP_DEBUG;
374 cpu_loop_exit();
376 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
377 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
378 if (interrupt_request & CPU_INTERRUPT_HALT) {
379 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
380 env->halted = 1;
381 env->exception_index = EXCP_HLT;
382 cpu_loop_exit();
384 #endif
385 #if defined(TARGET_I386)
386 if (env->hflags2 & HF2_GIF_MASK) {
387 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
388 !(env->hflags & HF_SMM_MASK)) {
389 svm_check_intercept(SVM_EXIT_SMI);
390 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
391 do_smm_enter();
392 next_tb = 0;
393 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
394 !(env->hflags2 & HF2_NMI_MASK)) {
395 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
396 env->hflags2 |= HF2_NMI_MASK;
397 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
398 next_tb = 0;
399 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
400 (((env->hflags2 & HF2_VINTR_MASK) &&
401 (env->hflags2 & HF2_HIF_MASK)) ||
402 (!(env->hflags2 & HF2_VINTR_MASK) &&
403 (env->eflags & IF_MASK &&
404 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
405 int intno;
406 svm_check_intercept(SVM_EXIT_INTR);
407 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
408 intno = cpu_get_pic_interrupt(env);
409 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
410 #if defined(__sparc__) && !defined(HOST_SOLARIS)
411 #undef env
412 env = cpu_single_env;
413 #define env cpu_single_env
414 #endif
415 do_interrupt(intno, 0, 0, 0, 1);
416 /* ensure that no TB jump will be modified as
417 the program flow was changed */
418 next_tb = 0;
419 #if !defined(CONFIG_USER_ONLY)
420 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
421 (env->eflags & IF_MASK) &&
422 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
423 int intno;
424 /* FIXME: this should respect TPR */
425 svm_check_intercept(SVM_EXIT_VINTR);
426 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
427 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
428 do_interrupt(intno, 0, 0, 0, 1);
429 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
430 next_tb = 0;
431 #endif
434 #elif defined(TARGET_PPC)
435 #if 0
436 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
437 cpu_ppc_reset(env);
439 #endif
440 if (interrupt_request & CPU_INTERRUPT_HARD) {
441 ppc_hw_interrupt(env);
442 if (env->pending_interrupts == 0)
443 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
444 next_tb = 0;
446 #elif defined(TARGET_MIPS)
447 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
448 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
449 (env->CP0_Status & (1 << CP0St_IE)) &&
450 !(env->CP0_Status & (1 << CP0St_EXL)) &&
451 !(env->CP0_Status & (1 << CP0St_ERL)) &&
452 !(env->hflags & MIPS_HFLAG_DM)) {
453 /* Raise it */
454 env->exception_index = EXCP_EXT_INTERRUPT;
455 env->error_code = 0;
456 do_interrupt(env);
457 next_tb = 0;
459 #elif defined(TARGET_SPARC)
460 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (env->psret != 0)) {
462 int pil = env->interrupt_index & 15;
463 int type = env->interrupt_index & 0xf0;
465 if (((type == TT_EXTINT) &&
466 (pil == 15 || pil > env->psrpil)) ||
467 type != TT_EXTINT) {
468 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
469 env->exception_index = env->interrupt_index;
470 do_interrupt(env);
471 env->interrupt_index = 0;
472 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
473 cpu_check_irqs(env);
474 #endif
475 next_tb = 0;
477 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
478 //do_interrupt(0, 0, 0, 0, 0);
479 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
481 #elif defined(TARGET_ARM)
482 if (interrupt_request & CPU_INTERRUPT_FIQ
483 && !(env->uncached_cpsr & CPSR_F)) {
484 env->exception_index = EXCP_FIQ;
485 do_interrupt(env);
486 next_tb = 0;
488 /* ARMv7-M interrupt return works by loading a magic value
489 into the PC. On real hardware the load causes the
490 return to occur. The qemu implementation performs the
491 jump normally, then does the exception return when the
492 CPU tries to execute code at the magic address.
493 This will cause the magic PC value to be pushed to
494 the stack if an interrupt occured at the wrong time.
495 We avoid this by disabling interrupts when
496 pc contains a magic address. */
497 if (interrupt_request & CPU_INTERRUPT_HARD
498 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
499 || !(env->uncached_cpsr & CPSR_I))) {
500 env->exception_index = EXCP_IRQ;
501 do_interrupt(env);
502 next_tb = 0;
504 #elif defined(TARGET_SH4)
505 if (interrupt_request & CPU_INTERRUPT_HARD) {
506 do_interrupt(env);
507 next_tb = 0;
509 #elif defined(TARGET_ALPHA)
510 if (interrupt_request & CPU_INTERRUPT_HARD) {
511 do_interrupt(env);
512 next_tb = 0;
514 #elif defined(TARGET_CRIS)
515 if (interrupt_request & CPU_INTERRUPT_HARD
516 && (env->pregs[PR_CCS] & I_FLAG)) {
517 env->exception_index = EXCP_IRQ;
518 do_interrupt(env);
519 next_tb = 0;
521 if (interrupt_request & CPU_INTERRUPT_NMI
522 && (env->pregs[PR_CCS] & M_FLAG)) {
523 env->exception_index = EXCP_NMI;
524 do_interrupt(env);
525 next_tb = 0;
527 #elif defined(TARGET_M68K)
528 if (interrupt_request & CPU_INTERRUPT_HARD
529 && ((env->sr & SR_I) >> SR_I_SHIFT)
530 < env->pending_level) {
531 /* Real hardware gets the interrupt vector via an
532 IACK cycle at this point. Current emulated
533 hardware doesn't rely on this, so we
534 provide/save the vector when the interrupt is
535 first signalled. */
536 env->exception_index = env->pending_vector;
537 do_interrupt(1);
538 next_tb = 0;
540 #endif
541 /* Don't use the cached interupt_request value,
542 do_interrupt may have updated the EXITTB flag. */
543 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
544 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
545 /* ensure that no TB jump will be modified as
546 the program flow was changed */
547 next_tb = 0;
550 if (unlikely(env->exit_request)) {
551 env->exit_request = 0;
552 env->exception_index = EXCP_INTERRUPT;
553 cpu_loop_exit();
555 #ifdef DEBUG_EXEC
556 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
557 /* restore flags in standard format */
558 regs_to_env();
559 #if defined(TARGET_I386)
560 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
561 log_cpu_state(env, X86_DUMP_CCOP);
562 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
563 #elif defined(TARGET_ARM)
564 log_cpu_state(env, 0);
565 #elif defined(TARGET_SPARC)
566 log_cpu_state(env, 0);
567 #elif defined(TARGET_PPC)
568 log_cpu_state(env, 0);
569 #elif defined(TARGET_M68K)
570 cpu_m68k_flush_flags(env, env->cc_op);
571 env->cc_op = CC_OP_FLAGS;
572 env->sr = (env->sr & 0xffe0)
573 | env->cc_dest | (env->cc_x << 4);
574 log_cpu_state(env, 0);
575 #elif defined(TARGET_MIPS)
576 log_cpu_state(env, 0);
577 #elif defined(TARGET_SH4)
578 log_cpu_state(env, 0);
579 #elif defined(TARGET_ALPHA)
580 log_cpu_state(env, 0);
581 #elif defined(TARGET_CRIS)
582 log_cpu_state(env, 0);
583 #else
584 #error unsupported target CPU
585 #endif
587 #endif
588 spin_lock(&tb_lock);
589 tb = tb_find_fast();
590 /* Note: we do it here to avoid a gcc bug on Mac OS X when
591 doing it in tb_find_slow */
592 if (tb_invalidated_flag) {
593 /* as some TB could have been invalidated because
594 of memory exceptions while generating the code, we
595 must recompute the hash index here */
596 next_tb = 0;
597 tb_invalidated_flag = 0;
599 #ifdef DEBUG_EXEC
600 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
601 (long)tb->tc_ptr, tb->pc,
602 lookup_symbol(tb->pc));
603 #endif
604 /* see if we can patch the calling TB. When the TB
605 spans two pages, we cannot safely do a direct
606 jump. */
608 if (next_tb != 0 &&
609 #ifdef CONFIG_KQEMU
610 (env->kqemu_enabled != 2) &&
611 #endif
612 tb->page_addr[1] == -1) {
613 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
616 spin_unlock(&tb_lock);
617 env->current_tb = tb;
619 /* cpu_interrupt might be called while translating the
620 TB, but before it is linked into a potentially
621 infinite loop and becomes env->current_tb. Avoid
622 starting execution if there is a pending interrupt. */
623 if (unlikely (env->exit_request))
624 env->current_tb = NULL;
626 while (env->current_tb) {
627 tc_ptr = tb->tc_ptr;
628 /* execute the generated code */
629 #if defined(__sparc__) && !defined(HOST_SOLARIS)
630 #undef env
631 env = cpu_single_env;
632 #define env cpu_single_env
633 #endif
634 next_tb = tcg_qemu_tb_exec(tc_ptr);
635 env->current_tb = NULL;
636 if ((next_tb & 3) == 2) {
637 /* Instruction counter expired. */
638 int insns_left;
639 tb = (TranslationBlock *)(long)(next_tb & ~3);
640 /* Restore PC. */
641 cpu_pc_from_tb(env, tb);
642 insns_left = env->icount_decr.u32;
643 if (env->icount_extra && insns_left >= 0) {
644 /* Refill decrementer and continue execution. */
645 env->icount_extra += insns_left;
646 if (env->icount_extra > 0xffff) {
647 insns_left = 0xffff;
648 } else {
649 insns_left = env->icount_extra;
651 env->icount_extra -= insns_left;
652 env->icount_decr.u16.low = insns_left;
653 } else {
654 if (insns_left > 0) {
655 /* Execute remaining instructions. */
656 cpu_exec_nocache(insns_left, tb);
658 env->exception_index = EXCP_INTERRUPT;
659 next_tb = 0;
660 cpu_loop_exit();
664 /* reset soft MMU for next block (it can currently
665 only be set by a memory fault) */
666 #if defined(CONFIG_KQEMU)
667 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
668 if (kqemu_is_ok(env) &&
669 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
670 cpu_loop_exit();
672 #endif
673 } /* for(;;) */
674 } else {
675 env_to_regs();
677 } /* for(;;) */
680 #if defined(TARGET_I386)
681 /* restore flags in standard format */
682 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
683 #elif defined(TARGET_ARM)
684 /* XXX: Save/restore host fpu exception state?. */
685 #elif defined(TARGET_SPARC)
686 #elif defined(TARGET_PPC)
687 #elif defined(TARGET_M68K)
688 cpu_m68k_flush_flags(env, env->cc_op);
689 env->cc_op = CC_OP_FLAGS;
690 env->sr = (env->sr & 0xffe0)
691 | env->cc_dest | (env->cc_x << 4);
692 #elif defined(TARGET_MIPS)
693 #elif defined(TARGET_SH4)
694 #elif defined(TARGET_IA64)
695 #elif defined(TARGET_ALPHA)
696 #elif defined(TARGET_CRIS)
697 /* XXXXX */
698 #else
699 #error unsupported target CPU
700 #endif
702 /* restore global registers */
703 #include "hostregs_helper.h"
705 /* fail safe : never use cpu_single_env outside cpu_exec() */
706 cpu_single_env = NULL;
707 return ret;
710 /* must only be called from the generated code as an exception can be
711 generated */
712 void tb_invalidate_page_range(target_ulong start, target_ulong end)
714 /* XXX: cannot enable it yet because it yields to MMU exception
715 where NIP != read address on PowerPC */
716 #if 0
717 target_ulong phys_addr;
718 phys_addr = get_phys_addr_code(env, start);
719 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
720 #endif
723 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
725 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
727 CPUX86State *saved_env;
729 saved_env = env;
730 env = s;
731 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
732 selector &= 0xffff;
733 cpu_x86_load_seg_cache(env, seg_reg, selector,
734 (selector << 4), 0xffff, 0);
735 } else {
736 helper_load_seg(seg_reg, selector);
738 env = saved_env;
741 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
743 CPUX86State *saved_env;
745 saved_env = env;
746 env = s;
748 helper_fsave(ptr, data32);
750 env = saved_env;
753 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
755 CPUX86State *saved_env;
757 saved_env = env;
758 env = s;
760 helper_frstor(ptr, data32);
762 env = saved_env;
765 #endif /* TARGET_I386 */
767 #if !defined(CONFIG_SOFTMMU)
769 #if defined(TARGET_I386)
771 /* 'pc' is the host PC at which the exception was raised. 'address' is
772 the effective address of the memory exception. 'is_write' is 1 if a
773 write caused the exception and otherwise 0'. 'old_set' is the
774 signal set which should be restored */
775 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
776 int is_write, sigset_t *old_set,
777 void *puc)
779 TranslationBlock *tb;
780 int ret;
782 if (cpu_single_env)
783 env = cpu_single_env; /* XXX: find a correct solution for multithread */
784 #if defined(DEBUG_SIGNAL)
785 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
786 pc, address, is_write, *(unsigned long *)old_set);
787 #endif
788 /* XXX: locking issue */
789 if (is_write && page_unprotect(h2g(address), pc, puc)) {
790 return 1;
793 /* see if it is an MMU fault */
794 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
795 if (ret < 0)
796 return 0; /* not an MMU fault */
797 if (ret == 0)
798 return 1; /* the MMU fault was handled without causing real CPU fault */
799 /* now we have a real cpu fault */
800 tb = tb_find_pc(pc);
801 if (tb) {
802 /* the PC is inside the translated code. It means that we have
803 a virtual CPU fault */
804 cpu_restore_state(tb, env, pc, puc);
806 if (ret == 1) {
807 #if 0
808 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
809 env->eip, env->cr[2], env->error_code);
810 #endif
811 /* we restore the process signal mask as the sigreturn should
812 do it (XXX: use sigsetjmp) */
813 sigprocmask(SIG_SETMASK, old_set, NULL);
814 raise_exception_err(env->exception_index, env->error_code);
815 } else {
816 /* activate soft MMU for this block */
817 env->hflags |= HF_SOFTMMU_MASK;
818 cpu_resume_from_signal(env, puc);
820 /* never comes here */
821 return 1;
824 #elif defined(TARGET_ARM)
825 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
826 int is_write, sigset_t *old_set,
827 void *puc)
829 TranslationBlock *tb;
830 int ret;
832 if (cpu_single_env)
833 env = cpu_single_env; /* XXX: find a correct solution for multithread */
834 #if defined(DEBUG_SIGNAL)
835 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
836 pc, address, is_write, *(unsigned long *)old_set);
837 #endif
838 /* XXX: locking issue */
839 if (is_write && page_unprotect(h2g(address), pc, puc)) {
840 return 1;
842 /* see if it is an MMU fault */
843 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
844 if (ret < 0)
845 return 0; /* not an MMU fault */
846 if (ret == 0)
847 return 1; /* the MMU fault was handled without causing real CPU fault */
848 /* now we have a real cpu fault */
849 tb = tb_find_pc(pc);
850 if (tb) {
851 /* the PC is inside the translated code. It means that we have
852 a virtual CPU fault */
853 cpu_restore_state(tb, env, pc, puc);
855 /* we restore the process signal mask as the sigreturn should
856 do it (XXX: use sigsetjmp) */
857 sigprocmask(SIG_SETMASK, old_set, NULL);
858 cpu_loop_exit();
859 /* never comes here */
860 return 1;
862 #elif defined(TARGET_SPARC)
863 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
864 int is_write, sigset_t *old_set,
865 void *puc)
867 TranslationBlock *tb;
868 int ret;
870 if (cpu_single_env)
871 env = cpu_single_env; /* XXX: find a correct solution for multithread */
872 #if defined(DEBUG_SIGNAL)
873 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
874 pc, address, is_write, *(unsigned long *)old_set);
875 #endif
876 /* XXX: locking issue */
877 if (is_write && page_unprotect(h2g(address), pc, puc)) {
878 return 1;
880 /* see if it is an MMU fault */
881 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
882 if (ret < 0)
883 return 0; /* not an MMU fault */
884 if (ret == 0)
885 return 1; /* the MMU fault was handled without causing real CPU fault */
886 /* now we have a real cpu fault */
887 tb = tb_find_pc(pc);
888 if (tb) {
889 /* the PC is inside the translated code. It means that we have
890 a virtual CPU fault */
891 cpu_restore_state(tb, env, pc, puc);
893 /* we restore the process signal mask as the sigreturn should
894 do it (XXX: use sigsetjmp) */
895 sigprocmask(SIG_SETMASK, old_set, NULL);
896 cpu_loop_exit();
897 /* never comes here */
898 return 1;
900 #elif defined (TARGET_PPC)
901 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
902 int is_write, sigset_t *old_set,
903 void *puc)
905 TranslationBlock *tb;
906 int ret;
908 if (cpu_single_env)
909 env = cpu_single_env; /* XXX: find a correct solution for multithread */
910 #if defined(DEBUG_SIGNAL)
911 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
912 pc, address, is_write, *(unsigned long *)old_set);
913 #endif
914 /* XXX: locking issue */
915 if (is_write && page_unprotect(h2g(address), pc, puc)) {
916 return 1;
919 /* see if it is an MMU fault */
920 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
921 if (ret < 0)
922 return 0; /* not an MMU fault */
923 if (ret == 0)
924 return 1; /* the MMU fault was handled without causing real CPU fault */
926 /* now we have a real cpu fault */
927 tb = tb_find_pc(pc);
928 if (tb) {
929 /* the PC is inside the translated code. It means that we have
930 a virtual CPU fault */
931 cpu_restore_state(tb, env, pc, puc);
933 if (ret == 1) {
934 #if 0
935 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
936 env->nip, env->error_code, tb);
937 #endif
938 /* we restore the process signal mask as the sigreturn should
939 do it (XXX: use sigsetjmp) */
940 sigprocmask(SIG_SETMASK, old_set, NULL);
941 cpu_loop_exit();
942 } else {
943 /* activate soft MMU for this block */
944 cpu_resume_from_signal(env, puc);
946 /* never comes here */
947 return 1;
950 #elif defined(TARGET_M68K)
951 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
952 int is_write, sigset_t *old_set,
953 void *puc)
955 TranslationBlock *tb;
956 int ret;
958 if (cpu_single_env)
959 env = cpu_single_env; /* XXX: find a correct solution for multithread */
960 #if defined(DEBUG_SIGNAL)
961 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
962 pc, address, is_write, *(unsigned long *)old_set);
963 #endif
964 /* XXX: locking issue */
965 if (is_write && page_unprotect(address, pc, puc)) {
966 return 1;
968 /* see if it is an MMU fault */
969 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
970 if (ret < 0)
971 return 0; /* not an MMU fault */
972 if (ret == 0)
973 return 1; /* the MMU fault was handled without causing real CPU fault */
974 /* now we have a real cpu fault */
975 tb = tb_find_pc(pc);
976 if (tb) {
977 /* the PC is inside the translated code. It means that we have
978 a virtual CPU fault */
979 cpu_restore_state(tb, env, pc, puc);
981 /* we restore the process signal mask as the sigreturn should
982 do it (XXX: use sigsetjmp) */
983 sigprocmask(SIG_SETMASK, old_set, NULL);
984 cpu_loop_exit();
985 /* never comes here */
986 return 1;
989 #elif defined (TARGET_MIPS)
990 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
991 int is_write, sigset_t *old_set,
992 void *puc)
994 TranslationBlock *tb;
995 int ret;
997 if (cpu_single_env)
998 env = cpu_single_env; /* XXX: find a correct solution for multithread */
999 #if defined(DEBUG_SIGNAL)
1000 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1001 pc, address, is_write, *(unsigned long *)old_set);
1002 #endif
1003 /* XXX: locking issue */
1004 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1005 return 1;
1008 /* see if it is an MMU fault */
1009 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1010 if (ret < 0)
1011 return 0; /* not an MMU fault */
1012 if (ret == 0)
1013 return 1; /* the MMU fault was handled without causing real CPU fault */
1015 /* now we have a real cpu fault */
1016 tb = tb_find_pc(pc);
1017 if (tb) {
1018 /* the PC is inside the translated code. It means that we have
1019 a virtual CPU fault */
1020 cpu_restore_state(tb, env, pc, puc);
1022 if (ret == 1) {
1023 #if 0
1024 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1025 env->PC, env->error_code, tb);
1026 #endif
1027 /* we restore the process signal mask as the sigreturn should
1028 do it (XXX: use sigsetjmp) */
1029 sigprocmask(SIG_SETMASK, old_set, NULL);
1030 cpu_loop_exit();
1031 } else {
1032 /* activate soft MMU for this block */
1033 cpu_resume_from_signal(env, puc);
1035 /* never comes here */
1036 return 1;
1039 #elif defined (TARGET_SH4)
1040 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1041 int is_write, sigset_t *old_set,
1042 void *puc)
1044 TranslationBlock *tb;
1045 int ret;
1047 if (cpu_single_env)
1048 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1049 #if defined(DEBUG_SIGNAL)
1050 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1051 pc, address, is_write, *(unsigned long *)old_set);
1052 #endif
1053 /* XXX: locking issue */
1054 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1055 return 1;
1058 /* see if it is an MMU fault */
1059 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1060 if (ret < 0)
1061 return 0; /* not an MMU fault */
1062 if (ret == 0)
1063 return 1; /* the MMU fault was handled without causing real CPU fault */
1065 /* now we have a real cpu fault */
1066 tb = tb_find_pc(pc);
1067 if (tb) {
1068 /* the PC is inside the translated code. It means that we have
1069 a virtual CPU fault */
1070 cpu_restore_state(tb, env, pc, puc);
1072 #if 0
1073 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1074 env->nip, env->error_code, tb);
1075 #endif
1076 /* we restore the process signal mask as the sigreturn should
1077 do it (XXX: use sigsetjmp) */
1078 sigprocmask(SIG_SETMASK, old_set, NULL);
1079 cpu_loop_exit();
1080 /* never comes here */
1081 return 1;
1084 #elif defined (TARGET_ALPHA)
1085 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1086 int is_write, sigset_t *old_set,
1087 void *puc)
1089 TranslationBlock *tb;
1090 int ret;
1092 if (cpu_single_env)
1093 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1094 #if defined(DEBUG_SIGNAL)
1095 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1096 pc, address, is_write, *(unsigned long *)old_set);
1097 #endif
1098 /* XXX: locking issue */
1099 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1100 return 1;
1103 /* see if it is an MMU fault */
1104 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1105 if (ret < 0)
1106 return 0; /* not an MMU fault */
1107 if (ret == 0)
1108 return 1; /* the MMU fault was handled without causing real CPU fault */
1110 /* now we have a real cpu fault */
1111 tb = tb_find_pc(pc);
1112 if (tb) {
1113 /* the PC is inside the translated code. It means that we have
1114 a virtual CPU fault */
1115 cpu_restore_state(tb, env, pc, puc);
1117 #if 0
1118 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1119 env->nip, env->error_code, tb);
1120 #endif
1121 /* we restore the process signal mask as the sigreturn should
1122 do it (XXX: use sigsetjmp) */
1123 sigprocmask(SIG_SETMASK, old_set, NULL);
1124 cpu_loop_exit();
1125 /* never comes here */
1126 return 1;
1128 #elif defined (TARGET_CRIS)
1129 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1130 int is_write, sigset_t *old_set,
1131 void *puc)
1133 TranslationBlock *tb;
1134 int ret;
1136 if (cpu_single_env)
1137 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1138 #if defined(DEBUG_SIGNAL)
1139 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1140 pc, address, is_write, *(unsigned long *)old_set);
1141 #endif
1142 /* XXX: locking issue */
1143 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1144 return 1;
1147 /* see if it is an MMU fault */
1148 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1149 if (ret < 0)
1150 return 0; /* not an MMU fault */
1151 if (ret == 0)
1152 return 1; /* the MMU fault was handled without causing real CPU fault */
1154 /* now we have a real cpu fault */
1155 tb = tb_find_pc(pc);
1156 if (tb) {
1157 /* the PC is inside the translated code. It means that we have
1158 a virtual CPU fault */
1159 cpu_restore_state(tb, env, pc, puc);
1161 /* we restore the process signal mask as the sigreturn should
1162 do it (XXX: use sigsetjmp) */
1163 sigprocmask(SIG_SETMASK, old_set, NULL);
1164 cpu_loop_exit();
1165 /* never comes here */
1166 return 1;
1169 #else
1170 #error unsupported target CPU
1171 #endif
1173 #if defined(__i386__)
1175 #if defined(__APPLE__)
1176 # include <sys/ucontext.h>
1178 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1179 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1180 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1181 # define MASK_sig(context) ((context)->uc_sigmask)
1182 #elif defined(__OpenBSD__)
1183 # define EIP_sig(context) ((context)->sc_eip)
1184 # define TRAP_sig(context) ((context)->sc_trapno)
1185 # define ERROR_sig(context) ((context)->sc_err)
1186 # define MASK_sig(context) ((context)->sc_mask)
1187 #else
1188 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1189 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1190 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1191 # define MASK_sig(context) ((context)->uc_sigmask)
1192 #endif
1194 int cpu_signal_handler(int host_signum, void *pinfo,
1195 void *puc)
1197 siginfo_t *info = pinfo;
1198 #if defined(__OpenBSD__)
1199 struct sigcontext *uc = puc;
1200 #else
1201 struct ucontext *uc = puc;
1202 #endif
1203 unsigned long pc;
1204 int trapno;
1206 #ifndef REG_EIP
1207 /* for glibc 2.1 */
1208 #define REG_EIP EIP
1209 #define REG_ERR ERR
1210 #define REG_TRAPNO TRAPNO
1211 #endif
1212 pc = EIP_sig(uc);
1213 trapno = TRAP_sig(uc);
1214 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1215 trapno == 0xe ?
1216 (ERROR_sig(uc) >> 1) & 1 : 0,
1217 &MASK_sig(uc), puc);
1220 #elif defined(__x86_64__)
1222 #ifdef __NetBSD__
1223 #define PC_sig(context) _UC_MACHINE_PC(context)
1224 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1225 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1226 #define MASK_sig(context) ((context)->uc_sigmask)
1227 #elif defined(__OpenBSD__)
1228 #define PC_sig(context) ((context)->sc_rip)
1229 #define TRAP_sig(context) ((context)->sc_trapno)
1230 #define ERROR_sig(context) ((context)->sc_err)
1231 #define MASK_sig(context) ((context)->sc_mask)
1232 #else
1233 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1234 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1235 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1236 #define MASK_sig(context) ((context)->uc_sigmask)
1237 #endif
1239 int cpu_signal_handler(int host_signum, void *pinfo,
1240 void *puc)
1242 siginfo_t *info = pinfo;
1243 unsigned long pc;
1244 #ifdef __NetBSD__
1245 ucontext_t *uc = puc;
1246 #elif defined(__OpenBSD__)
1247 struct sigcontext *uc = puc;
1248 #else
1249 struct ucontext *uc = puc;
1250 #endif
1252 pc = PC_sig(uc);
1253 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1254 TRAP_sig(uc) == 0xe ?
1255 (ERROR_sig(uc) >> 1) & 1 : 0,
1256 &MASK_sig(uc), puc);
1259 #elif defined(_ARCH_PPC)
1261 /***********************************************************************
1262 * signal context platform-specific definitions
1263 * From Wine
1265 #ifdef linux
1266 /* All Registers access - only for local access */
1267 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1268 /* Gpr Registers access */
1269 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1270 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1271 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1272 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1273 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1274 # define LR_sig(context) REG_sig(link, context) /* Link register */
1275 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1276 /* Float Registers access */
1277 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1278 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1279 /* Exception Registers access */
1280 # define DAR_sig(context) REG_sig(dar, context)
1281 # define DSISR_sig(context) REG_sig(dsisr, context)
1282 # define TRAP_sig(context) REG_sig(trap, context)
1283 #endif /* linux */
1285 #ifdef __APPLE__
1286 # include <sys/ucontext.h>
1287 typedef struct ucontext SIGCONTEXT;
1288 /* All Registers access - only for local access */
1289 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1290 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1291 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1292 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1293 /* Gpr Registers access */
1294 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1295 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1296 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1297 # define CTR_sig(context) REG_sig(ctr, context)
1298 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1299 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1300 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1301 /* Float Registers access */
1302 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1303 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1304 /* Exception Registers access */
1305 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1306 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1307 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1308 #endif /* __APPLE__ */
1310 int cpu_signal_handler(int host_signum, void *pinfo,
1311 void *puc)
1313 siginfo_t *info = pinfo;
1314 struct ucontext *uc = puc;
1315 unsigned long pc;
1316 int is_write;
1318 pc = IAR_sig(uc);
1319 is_write = 0;
1320 #if 0
1321 /* ppc 4xx case */
1322 if (DSISR_sig(uc) & 0x00800000)
1323 is_write = 1;
1324 #else
1325 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1326 is_write = 1;
1327 #endif
1328 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1329 is_write, &uc->uc_sigmask, puc);
1332 #elif defined(__alpha__)
1334 int cpu_signal_handler(int host_signum, void *pinfo,
1335 void *puc)
1337 siginfo_t *info = pinfo;
1338 struct ucontext *uc = puc;
1339 uint32_t *pc = uc->uc_mcontext.sc_pc;
1340 uint32_t insn = *pc;
1341 int is_write = 0;
1343 /* XXX: need kernel patch to get write flag faster */
1344 switch (insn >> 26) {
1345 case 0x0d: // stw
1346 case 0x0e: // stb
1347 case 0x0f: // stq_u
1348 case 0x24: // stf
1349 case 0x25: // stg
1350 case 0x26: // sts
1351 case 0x27: // stt
1352 case 0x2c: // stl
1353 case 0x2d: // stq
1354 case 0x2e: // stl_c
1355 case 0x2f: // stq_c
1356 is_write = 1;
1359 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1360 is_write, &uc->uc_sigmask, puc);
1362 #elif defined(__sparc__)
1364 int cpu_signal_handler(int host_signum, void *pinfo,
1365 void *puc)
1367 siginfo_t *info = pinfo;
1368 int is_write;
1369 uint32_t insn;
1370 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1371 uint32_t *regs = (uint32_t *)(info + 1);
1372 void *sigmask = (regs + 20);
1373 /* XXX: is there a standard glibc define ? */
1374 unsigned long pc = regs[1];
1375 #else
1376 #ifdef __linux__
1377 struct sigcontext *sc = puc;
1378 unsigned long pc = sc->sigc_regs.tpc;
1379 void *sigmask = (void *)sc->sigc_mask;
1380 #elif defined(__OpenBSD__)
1381 struct sigcontext *uc = puc;
1382 unsigned long pc = uc->sc_pc;
1383 void *sigmask = (void *)(long)uc->sc_mask;
1384 #endif
1385 #endif
1387 /* XXX: need kernel patch to get write flag faster */
1388 is_write = 0;
1389 insn = *(uint32_t *)pc;
1390 if ((insn >> 30) == 3) {
1391 switch((insn >> 19) & 0x3f) {
1392 case 0x05: // stb
1393 case 0x15: // stba
1394 case 0x06: // sth
1395 case 0x16: // stha
1396 case 0x04: // st
1397 case 0x14: // sta
1398 case 0x07: // std
1399 case 0x17: // stda
1400 case 0x0e: // stx
1401 case 0x1e: // stxa
1402 case 0x24: // stf
1403 case 0x34: // stfa
1404 case 0x27: // stdf
1405 case 0x37: // stdfa
1406 case 0x26: // stqf
1407 case 0x36: // stqfa
1408 case 0x25: // stfsr
1409 case 0x3c: // casa
1410 case 0x3e: // casxa
1411 is_write = 1;
1412 break;
1415 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1416 is_write, sigmask, NULL);
1419 #elif defined(__arm__)
1421 int cpu_signal_handler(int host_signum, void *pinfo,
1422 void *puc)
1424 siginfo_t *info = pinfo;
1425 struct ucontext *uc = puc;
1426 unsigned long pc;
1427 int is_write;
1429 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1430 pc = uc->uc_mcontext.gregs[R15];
1431 #else
1432 pc = uc->uc_mcontext.arm_pc;
1433 #endif
1434 /* XXX: compute is_write */
1435 is_write = 0;
1436 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1437 is_write,
1438 &uc->uc_sigmask, puc);
1441 #elif defined(__mc68000)
1443 int cpu_signal_handler(int host_signum, void *pinfo,
1444 void *puc)
1446 siginfo_t *info = pinfo;
1447 struct ucontext *uc = puc;
1448 unsigned long pc;
1449 int is_write;
1451 pc = uc->uc_mcontext.gregs[16];
1452 /* XXX: compute is_write */
1453 is_write = 0;
1454 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1455 is_write,
1456 &uc->uc_sigmask, puc);
1459 #elif defined(__ia64)
1461 #ifndef __ISR_VALID
1462 /* This ought to be in <bits/siginfo.h>... */
1463 # define __ISR_VALID 1
1464 #endif
1466 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1468 siginfo_t *info = pinfo;
1469 struct ucontext *uc = puc;
1470 unsigned long ip;
1471 int is_write = 0;
1473 ip = uc->uc_mcontext.sc_ip;
1474 switch (host_signum) {
1475 case SIGILL:
1476 case SIGFPE:
1477 case SIGSEGV:
1478 case SIGBUS:
1479 case SIGTRAP:
1480 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1481 /* ISR.W (write-access) is bit 33: */
1482 is_write = (info->si_isr >> 33) & 1;
1483 break;
1485 default:
1486 break;
1488 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1489 is_write,
1490 &uc->uc_sigmask, puc);
1493 #elif defined(__s390__)
1495 int cpu_signal_handler(int host_signum, void *pinfo,
1496 void *puc)
1498 siginfo_t *info = pinfo;
1499 struct ucontext *uc = puc;
1500 unsigned long pc;
1501 int is_write;
1503 pc = uc->uc_mcontext.psw.addr;
1504 /* XXX: compute is_write */
1505 is_write = 0;
1506 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1507 is_write, &uc->uc_sigmask, puc);
1510 #elif defined(__mips__)
1512 int cpu_signal_handler(int host_signum, void *pinfo,
1513 void *puc)
1515 siginfo_t *info = pinfo;
1516 struct ucontext *uc = puc;
1517 greg_t pc = uc->uc_mcontext.pc;
1518 int is_write;
1520 /* XXX: compute is_write */
1521 is_write = 0;
1522 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1523 is_write, &uc->uc_sigmask, puc);
1526 #elif defined(__hppa__)
1528 int cpu_signal_handler(int host_signum, void *pinfo,
1529 void *puc)
1531 struct siginfo *info = pinfo;
1532 struct ucontext *uc = puc;
1533 unsigned long pc;
1534 int is_write;
1536 pc = uc->uc_mcontext.sc_iaoq[0];
1537 /* FIXME: compute is_write */
1538 is_write = 0;
1539 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1540 is_write,
1541 &uc->uc_sigmask, puc);
1544 #else
1546 #error host CPU specific signal handler needed
1548 #endif
1550 #endif /* !defined(CONFIG_SOFTMMU) */