Fix screendump with multiple consoles
[qemu-kvm/fedora.git] / cpu-exec.c
blob68feb8bfc154fa47785e0276fcceee17fee3682b
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #if !defined(TARGET_IA64)
25 #include "tcg.h"
26 #endif
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #include <sys/ucontext.h>
40 #endif
42 #include "qemu-kvm.h"
44 #if defined(__sparc__) && !defined(HOST_SOLARIS)
45 // Work around ugly bugs in glibc that mangle global register contents
46 #undef env
47 #define env cpu_single_env
48 #endif
50 int tb_invalidated_flag;
52 //#define DEBUG_EXEC
53 //#define DEBUG_SIGNAL
55 void cpu_loop_exit(void)
57 /* NOTE: the register at this point must be saved by hand because
58 longjmp restore them */
59 regs_to_env();
60 longjmp(env->jmp_env, 1);
63 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
64 #define reg_T2
65 #endif
67 /* exit the current TB from a signal handler. The host registers are
68 restored in a state compatible with the CPU emulator
70 void cpu_resume_from_signal(CPUState *env1, void *puc)
72 #if !defined(CONFIG_SOFTMMU)
73 struct ucontext *uc = puc;
74 #endif
76 env = env1;
78 /* XXX: restore cpu registers saved in host registers */
80 #if !defined(CONFIG_SOFTMMU)
81 if (puc) {
82 /* XXX: use siglongjmp ? */
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
85 #endif
86 longjmp(env->jmp_env, 1);
89 /* Execute the code without caching the generated code. An interpreter
90 could be used if available. */
91 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
93 unsigned long next_tb;
94 TranslationBlock *tb;
96 /* Should never happen.
97 We only end up here when an existing TB is too long. */
98 if (max_cycles > CF_COUNT_MASK)
99 max_cycles = CF_COUNT_MASK;
101 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
102 max_cycles);
103 env->current_tb = tb;
104 /* execute the generated code */
105 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
107 if ((next_tb & 3) == 2) {
108 /* Restore PC. This may happen if async event occurs before
109 the TB starts executing. */
110 CPU_PC_FROM_TB(env, tb);
112 tb_phys_invalidate(tb, -1);
113 tb_free(tb);
116 static TranslationBlock *tb_find_slow(target_ulong pc,
117 target_ulong cs_base,
118 uint64_t flags)
120 TranslationBlock *tb, **ptb1;
121 unsigned int h;
122 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
124 tb_invalidated_flag = 0;
126 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
128 /* find translated block using physical mappings */
129 phys_pc = get_phys_addr_code(env, pc);
130 phys_page1 = phys_pc & TARGET_PAGE_MASK;
131 phys_page2 = -1;
132 h = tb_phys_hash_func(phys_pc);
133 ptb1 = &tb_phys_hash[h];
134 for(;;) {
135 tb = *ptb1;
136 if (!tb)
137 goto not_found;
138 if (tb->pc == pc &&
139 tb->page_addr[0] == phys_page1 &&
140 tb->cs_base == cs_base &&
141 tb->flags == flags) {
142 /* check next page if needed */
143 if (tb->page_addr[1] != -1) {
144 virt_page2 = (pc & TARGET_PAGE_MASK) +
145 TARGET_PAGE_SIZE;
146 phys_page2 = get_phys_addr_code(env, virt_page2);
147 if (tb->page_addr[1] == phys_page2)
148 goto found;
149 } else {
150 goto found;
153 ptb1 = &tb->phys_hash_next;
155 not_found:
156 /* if no translated code available, then translate it now */
157 tb = tb_gen_code(env, pc, cs_base, flags, 0);
159 found:
160 /* we add the TB in the virtual pc hash table */
161 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
162 return tb;
165 static inline TranslationBlock *tb_find_fast(void)
167 TranslationBlock *tb;
168 target_ulong cs_base, pc;
169 uint64_t flags;
171 /* we record a subset of the CPU state. It will
172 always be the same before a given translated block
173 is executed. */
174 #if defined(TARGET_I386)
175 flags = env->hflags;
176 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
177 cs_base = env->segs[R_CS].base;
178 pc = cs_base + env->eip;
179 #elif defined(TARGET_ARM)
180 flags = env->thumb | (env->vfp.vec_len << 1)
181 | (env->vfp.vec_stride << 4);
182 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
183 flags |= (1 << 6);
184 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
185 flags |= (1 << 7);
186 flags |= (env->condexec_bits << 8);
187 cs_base = 0;
188 pc = env->regs[15];
189 #elif defined(TARGET_SPARC)
190 #ifdef TARGET_SPARC64
191 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
192 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
193 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
194 #else
195 // FPU enable . Supervisor
196 flags = (env->psref << 4) | env->psrs;
197 #endif
198 cs_base = env->npc;
199 pc = env->pc;
200 #elif defined(TARGET_PPC)
201 flags = env->hflags;
202 cs_base = 0;
203 pc = env->nip;
204 #elif defined(TARGET_MIPS)
205 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
206 cs_base = 0;
207 pc = env->active_tc.PC;
208 #elif defined(TARGET_M68K)
209 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
210 | (env->sr & SR_S) /* Bit 13 */
211 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
212 cs_base = 0;
213 pc = env->pc;
214 #elif defined(TARGET_SH4)
215 flags = env->flags;
216 cs_base = 0;
217 pc = env->pc;
218 #elif defined(TARGET_ALPHA)
219 flags = env->ps;
220 cs_base = 0;
221 pc = env->pc;
222 #elif defined(TARGET_CRIS)
223 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
224 flags |= env->dslot;
225 cs_base = 0;
226 pc = env->pc;
227 #elif defined(TARGET_IA64)
228 flags = 0;
229 cs_base = 0; /* XXXXX */
230 pc = 0;
231 #else
232 #error unsupported CPU
233 #endif
234 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
235 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
236 tb->flags != flags)) {
237 tb = tb_find_slow(pc, cs_base, flags);
239 return tb;
242 /* main execution loop */
244 int cpu_exec(CPUState *env1)
246 #define DECLARE_HOST_REGS 1
247 #include "hostregs_helper.h"
248 int ret, interrupt_request;
249 TranslationBlock *tb;
250 uint8_t *tc_ptr;
251 unsigned long next_tb;
253 if (cpu_halted(env1) == EXCP_HALTED)
254 return EXCP_HALTED;
256 cpu_single_env = env1;
258 /* first we save global registers */
259 #define SAVE_HOST_REGS 1
260 #include "hostregs_helper.h"
261 env = env1;
263 env_to_regs();
264 #if defined(TARGET_I386)
265 /* put eflags in CPU temporary format */
266 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
267 DF = 1 - (2 * ((env->eflags >> 10) & 1));
268 CC_OP = CC_OP_EFLAGS;
269 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270 #elif defined(TARGET_SPARC)
271 #elif defined(TARGET_M68K)
272 env->cc_op = CC_OP_FLAGS;
273 env->cc_dest = env->sr & 0xf;
274 env->cc_x = (env->sr >> 4) & 1;
275 #elif defined(TARGET_ALPHA)
276 #elif defined(TARGET_ARM)
277 #elif defined(TARGET_PPC)
278 #elif defined(TARGET_MIPS)
279 #elif defined(TARGET_SH4)
280 #elif defined(TARGET_CRIS)
281 #elif defined(TARGET_IA64)
282 /* XXXXX */
283 #else
284 #error unsupported target CPU
285 #endif
286 env->exception_index = -1;
288 /* prepare setjmp context for exception handling */
289 for(;;) {
290 if (setjmp(env->jmp_env) == 0) {
291 env->current_tb = NULL;
292 /* if an exception is pending, we execute it here */
293 if (env->exception_index >= 0) {
294 if (env->exception_index >= EXCP_INTERRUPT) {
295 /* exit request from the cpu execution loop */
296 ret = env->exception_index;
297 break;
298 } else if (env->user_mode_only) {
299 /* if user mode only, we simulate a fake exception
300 which will be handled outside the cpu execution
301 loop */
302 #if defined(TARGET_I386)
303 do_interrupt_user(env->exception_index,
304 env->exception_is_int,
305 env->error_code,
306 env->exception_next_eip);
307 /* successfully delivered */
308 env->old_exception = -1;
309 #endif
310 ret = env->exception_index;
311 break;
312 } else {
313 #if defined(TARGET_I386)
314 /* simulate a real cpu exception. On i386, it can
315 trigger new exceptions, but we do not handle
316 double or triple faults yet. */
317 do_interrupt(env->exception_index,
318 env->exception_is_int,
319 env->error_code,
320 env->exception_next_eip, 0);
321 /* successfully delivered */
322 env->old_exception = -1;
323 #elif defined(TARGET_PPC)
324 do_interrupt(env);
325 #elif defined(TARGET_MIPS)
326 do_interrupt(env);
327 #elif defined(TARGET_SPARC)
328 do_interrupt(env);
329 #elif defined(TARGET_ARM)
330 do_interrupt(env);
331 #elif defined(TARGET_SH4)
332 do_interrupt(env);
333 #elif defined(TARGET_ALPHA)
334 do_interrupt(env);
335 #elif defined(TARGET_CRIS)
336 do_interrupt(env);
337 #elif defined(TARGET_M68K)
338 do_interrupt(0);
339 #elif defined(TARGET_IA64)
340 do_interrupt(env);
341 #endif
343 env->exception_index = -1;
345 #ifdef USE_KQEMU
346 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
347 int ret;
348 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
349 ret = kqemu_cpu_exec(env);
350 /* put eflags in CPU temporary format */
351 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 DF = 1 - (2 * ((env->eflags >> 10) & 1));
353 CC_OP = CC_OP_EFLAGS;
354 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
355 if (ret == 1) {
356 /* exception */
357 longjmp(env->jmp_env, 1);
358 } else if (ret == 2) {
359 /* softmmu execution needed */
360 } else {
361 if (env->interrupt_request != 0) {
362 /* hardware interrupt will be executed just after */
363 } else {
364 /* otherwise, we restart */
365 longjmp(env->jmp_env, 1);
369 #endif
371 if (kvm_enabled()) {
372 kvm_cpu_exec(env);
373 longjmp(env->jmp_env, 1);
375 next_tb = 0; /* force lookup of first TB */
376 for(;;) {
377 interrupt_request = env->interrupt_request;
378 if (unlikely(interrupt_request) &&
379 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
380 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
381 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
382 env->exception_index = EXCP_DEBUG;
383 cpu_loop_exit();
385 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
386 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
387 if (interrupt_request & CPU_INTERRUPT_HALT) {
388 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
389 env->halted = 1;
390 env->exception_index = EXCP_HLT;
391 cpu_loop_exit();
393 #endif
394 #if defined(TARGET_I386)
395 if (env->hflags2 & HF2_GIF_MASK) {
396 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
397 !(env->hflags & HF_SMM_MASK)) {
398 svm_check_intercept(SVM_EXIT_SMI);
399 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
400 do_smm_enter();
401 next_tb = 0;
402 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
403 !(env->hflags2 & HF2_NMI_MASK)) {
404 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
405 env->hflags2 |= HF2_NMI_MASK;
406 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
407 next_tb = 0;
408 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
409 (((env->hflags2 & HF2_VINTR_MASK) &&
410 (env->hflags2 & HF2_HIF_MASK)) ||
411 (!(env->hflags2 & HF2_VINTR_MASK) &&
412 (env->eflags & IF_MASK &&
413 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
414 int intno;
415 svm_check_intercept(SVM_EXIT_INTR);
416 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
417 intno = cpu_get_pic_interrupt(env);
418 if (loglevel & CPU_LOG_TB_IN_ASM) {
419 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
421 do_interrupt(intno, 0, 0, 0, 1);
422 /* ensure that no TB jump will be modified as
423 the program flow was changed */
424 next_tb = 0;
425 #if !defined(CONFIG_USER_ONLY)
426 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
427 (env->eflags & IF_MASK) &&
428 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
429 int intno;
430 /* FIXME: this should respect TPR */
431 svm_check_intercept(SVM_EXIT_VINTR);
432 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
433 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
434 if (loglevel & CPU_LOG_TB_IN_ASM)
435 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
436 do_interrupt(intno, 0, 0, 0, 1);
437 next_tb = 0;
438 #endif
441 #elif defined(TARGET_PPC)
442 #if 0
443 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
444 cpu_ppc_reset(env);
446 #endif
447 if (interrupt_request & CPU_INTERRUPT_HARD) {
448 ppc_hw_interrupt(env);
449 if (env->pending_interrupts == 0)
450 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
451 next_tb = 0;
453 #elif defined(TARGET_MIPS)
454 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
455 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
456 (env->CP0_Status & (1 << CP0St_IE)) &&
457 !(env->CP0_Status & (1 << CP0St_EXL)) &&
458 !(env->CP0_Status & (1 << CP0St_ERL)) &&
459 !(env->hflags & MIPS_HFLAG_DM)) {
460 /* Raise it */
461 env->exception_index = EXCP_EXT_INTERRUPT;
462 env->error_code = 0;
463 do_interrupt(env);
464 next_tb = 0;
466 #elif defined(TARGET_SPARC)
467 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
468 (env->psret != 0)) {
469 int pil = env->interrupt_index & 15;
470 int type = env->interrupt_index & 0xf0;
472 if (((type == TT_EXTINT) &&
473 (pil == 15 || pil > env->psrpil)) ||
474 type != TT_EXTINT) {
475 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
476 env->exception_index = env->interrupt_index;
477 do_interrupt(env);
478 env->interrupt_index = 0;
479 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
480 cpu_check_irqs(env);
481 #endif
482 next_tb = 0;
484 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
485 //do_interrupt(0, 0, 0, 0, 0);
486 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
488 #elif defined(TARGET_ARM)
489 if (interrupt_request & CPU_INTERRUPT_FIQ
490 && !(env->uncached_cpsr & CPSR_F)) {
491 env->exception_index = EXCP_FIQ;
492 do_interrupt(env);
493 next_tb = 0;
495 /* ARMv7-M interrupt return works by loading a magic value
496 into the PC. On real hardware the load causes the
497 return to occur. The qemu implementation performs the
498 jump normally, then does the exception return when the
499 CPU tries to execute code at the magic address.
500 This will cause the magic PC value to be pushed to
501 the stack if an interrupt occured at the wrong time.
502 We avoid this by disabling interrupts when
503 pc contains a magic address. */
504 if (interrupt_request & CPU_INTERRUPT_HARD
505 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
506 || !(env->uncached_cpsr & CPSR_I))) {
507 env->exception_index = EXCP_IRQ;
508 do_interrupt(env);
509 next_tb = 0;
511 #elif defined(TARGET_SH4)
512 if (interrupt_request & CPU_INTERRUPT_HARD) {
513 do_interrupt(env);
514 next_tb = 0;
516 #elif defined(TARGET_ALPHA)
517 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 do_interrupt(env);
519 next_tb = 0;
521 #elif defined(TARGET_CRIS)
522 if (interrupt_request & CPU_INTERRUPT_HARD
523 && (env->pregs[PR_CCS] & I_FLAG)) {
524 env->exception_index = EXCP_IRQ;
525 do_interrupt(env);
526 next_tb = 0;
528 if (interrupt_request & CPU_INTERRUPT_NMI
529 && (env->pregs[PR_CCS] & M_FLAG)) {
530 env->exception_index = EXCP_NMI;
531 do_interrupt(env);
532 next_tb = 0;
534 #elif defined(TARGET_M68K)
535 if (interrupt_request & CPU_INTERRUPT_HARD
536 && ((env->sr & SR_I) >> SR_I_SHIFT)
537 < env->pending_level) {
538 /* Real hardware gets the interrupt vector via an
539 IACK cycle at this point. Current emulated
540 hardware doesn't rely on this, so we
541 provide/save the vector when the interrupt is
542 first signalled. */
543 env->exception_index = env->pending_vector;
544 do_interrupt(1);
545 next_tb = 0;
547 #endif
548 /* Don't use the cached interupt_request value,
549 do_interrupt may have updated the EXITTB flag. */
550 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
551 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
552 /* ensure that no TB jump will be modified as
553 the program flow was changed */
554 next_tb = 0;
556 if (interrupt_request & CPU_INTERRUPT_EXIT) {
557 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
558 env->exception_index = EXCP_INTERRUPT;
559 cpu_loop_exit();
562 #ifdef DEBUG_EXEC
563 if ((loglevel & CPU_LOG_TB_CPU)) {
564 /* restore flags in standard format */
565 regs_to_env();
566 #if defined(TARGET_I386)
567 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
568 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
569 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
570 #elif defined(TARGET_ARM)
571 cpu_dump_state(env, logfile, fprintf, 0);
572 #elif defined(TARGET_SPARC)
573 cpu_dump_state(env, logfile, fprintf, 0);
574 #elif defined(TARGET_PPC)
575 cpu_dump_state(env, logfile, fprintf, 0);
576 #elif defined(TARGET_M68K)
577 cpu_m68k_flush_flags(env, env->cc_op);
578 env->cc_op = CC_OP_FLAGS;
579 env->sr = (env->sr & 0xffe0)
580 | env->cc_dest | (env->cc_x << 4);
581 cpu_dump_state(env, logfile, fprintf, 0);
582 #elif defined(TARGET_MIPS)
583 cpu_dump_state(env, logfile, fprintf, 0);
584 #elif defined(TARGET_SH4)
585 cpu_dump_state(env, logfile, fprintf, 0);
586 #elif defined(TARGET_ALPHA)
587 cpu_dump_state(env, logfile, fprintf, 0);
588 #elif defined(TARGET_CRIS)
589 cpu_dump_state(env, logfile, fprintf, 0);
590 #else
591 #error unsupported target CPU
592 #endif
594 #endif
595 spin_lock(&tb_lock);
596 tb = tb_find_fast();
597 /* Note: we do it here to avoid a gcc bug on Mac OS X when
598 doing it in tb_find_slow */
599 if (tb_invalidated_flag) {
600 /* as some TB could have been invalidated because
601 of memory exceptions while generating the code, we
602 must recompute the hash index here */
603 next_tb = 0;
604 tb_invalidated_flag = 0;
606 #ifdef DEBUG_EXEC
607 if ((loglevel & CPU_LOG_EXEC)) {
608 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
609 (long)tb->tc_ptr, tb->pc,
610 lookup_symbol(tb->pc));
612 #endif
613 /* see if we can patch the calling TB. When the TB
614 spans two pages, we cannot safely do a direct
615 jump. */
617 if (next_tb != 0 &&
618 #ifdef USE_KQEMU
619 (env->kqemu_enabled != 2) &&
620 #endif
621 tb->page_addr[1] == -1) {
622 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
625 spin_unlock(&tb_lock);
626 env->current_tb = tb;
627 while (env->current_tb) {
628 tc_ptr = tb->tc_ptr;
629 /* execute the generated code */
630 #if defined(__sparc__) && !defined(HOST_SOLARIS)
631 #undef env
632 env = cpu_single_env;
633 #define env cpu_single_env
634 #endif
635 next_tb = tcg_qemu_tb_exec(tc_ptr);
636 env->current_tb = NULL;
637 if ((next_tb & 3) == 2) {
638 /* Instruction counter expired. */
639 int insns_left;
640 tb = (TranslationBlock *)(long)(next_tb & ~3);
641 /* Restore PC. */
642 CPU_PC_FROM_TB(env, tb);
643 insns_left = env->icount_decr.u32;
644 if (env->icount_extra && insns_left >= 0) {
645 /* Refill decrementer and continue execution. */
646 env->icount_extra += insns_left;
647 if (env->icount_extra > 0xffff) {
648 insns_left = 0xffff;
649 } else {
650 insns_left = env->icount_extra;
652 env->icount_extra -= insns_left;
653 env->icount_decr.u16.low = insns_left;
654 } else {
655 if (insns_left > 0) {
656 /* Execute remaining instructions. */
657 cpu_exec_nocache(insns_left, tb);
659 env->exception_index = EXCP_INTERRUPT;
660 next_tb = 0;
661 cpu_loop_exit();
665 /* reset soft MMU for next block (it can currently
666 only be set by a memory fault) */
667 #if defined(USE_KQEMU)
668 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
669 if (kqemu_is_ok(env) &&
670 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
671 cpu_loop_exit();
673 #endif
674 } /* for(;;) */
675 } else {
676 env_to_regs();
678 } /* for(;;) */
681 #if defined(TARGET_I386)
682 /* restore flags in standard format */
683 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
684 #elif defined(TARGET_ARM)
685 /* XXX: Save/restore host fpu exception state?. */
686 #elif defined(TARGET_SPARC)
687 #elif defined(TARGET_PPC)
688 #elif defined(TARGET_M68K)
689 cpu_m68k_flush_flags(env, env->cc_op);
690 env->cc_op = CC_OP_FLAGS;
691 env->sr = (env->sr & 0xffe0)
692 | env->cc_dest | (env->cc_x << 4);
693 #elif defined(TARGET_MIPS)
694 #elif defined(TARGET_SH4)
695 #elif defined(TARGET_IA64)
696 #elif defined(TARGET_ALPHA)
697 #elif defined(TARGET_CRIS)
698 /* XXXXX */
699 #else
700 #error unsupported target CPU
701 #endif
703 /* restore global registers */
704 #include "hostregs_helper.h"
706 /* fail safe : never use cpu_single_env outside cpu_exec() */
707 cpu_single_env = NULL;
708 return ret;
711 /* must only be called from the generated code as an exception can be
712 generated */
713 void tb_invalidate_page_range(target_ulong start, target_ulong end)
715 /* XXX: cannot enable it yet because it yields to MMU exception
716 where NIP != read address on PowerPC */
717 #if 0
718 target_ulong phys_addr;
719 phys_addr = get_phys_addr_code(env, start);
720 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
721 #endif
724 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
726 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
728 CPUX86State *saved_env;
730 saved_env = env;
731 env = s;
732 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
733 selector &= 0xffff;
734 cpu_x86_load_seg_cache(env, seg_reg, selector,
735 (selector << 4), 0xffff, 0);
736 } else {
737 helper_load_seg(seg_reg, selector);
739 env = saved_env;
742 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
744 CPUX86State *saved_env;
746 saved_env = env;
747 env = s;
749 helper_fsave(ptr, data32);
751 env = saved_env;
754 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
756 CPUX86State *saved_env;
758 saved_env = env;
759 env = s;
761 helper_frstor(ptr, data32);
763 env = saved_env;
766 #endif /* TARGET_I386 */
768 #if !defined(CONFIG_SOFTMMU)
770 #if defined(TARGET_I386)
772 /* 'pc' is the host PC at which the exception was raised. 'address' is
773 the effective address of the memory exception. 'is_write' is 1 if a
774 write caused the exception and otherwise 0'. 'old_set' is the
775 signal set which should be restored */
776 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
777 int is_write, sigset_t *old_set,
778 void *puc)
780 TranslationBlock *tb;
781 int ret;
783 if (cpu_single_env)
784 env = cpu_single_env; /* XXX: find a correct solution for multithread */
785 #if defined(DEBUG_SIGNAL)
786 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
787 pc, address, is_write, *(unsigned long *)old_set);
788 #endif
789 /* XXX: locking issue */
790 if (is_write && page_unprotect(h2g(address), pc, puc)) {
791 return 1;
794 /* see if it is an MMU fault */
795 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
796 if (ret < 0)
797 return 0; /* not an MMU fault */
798 if (ret == 0)
799 return 1; /* the MMU fault was handled without causing real CPU fault */
800 /* now we have a real cpu fault */
801 tb = tb_find_pc(pc);
802 if (tb) {
803 /* the PC is inside the translated code. It means that we have
804 a virtual CPU fault */
805 cpu_restore_state(tb, env, pc, puc);
807 if (ret == 1) {
808 #if 0
809 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
810 env->eip, env->cr[2], env->error_code);
811 #endif
812 /* we restore the process signal mask as the sigreturn should
813 do it (XXX: use sigsetjmp) */
814 sigprocmask(SIG_SETMASK, old_set, NULL);
815 raise_exception_err(env->exception_index, env->error_code);
816 } else {
817 /* activate soft MMU for this block */
818 env->hflags |= HF_SOFTMMU_MASK;
819 cpu_resume_from_signal(env, puc);
821 /* never comes here */
822 return 1;
825 #elif defined(TARGET_ARM)
826 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
827 int is_write, sigset_t *old_set,
828 void *puc)
830 TranslationBlock *tb;
831 int ret;
833 if (cpu_single_env)
834 env = cpu_single_env; /* XXX: find a correct solution for multithread */
835 #if defined(DEBUG_SIGNAL)
836 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
837 pc, address, is_write, *(unsigned long *)old_set);
838 #endif
839 /* XXX: locking issue */
840 if (is_write && page_unprotect(h2g(address), pc, puc)) {
841 return 1;
843 /* see if it is an MMU fault */
844 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
845 if (ret < 0)
846 return 0; /* not an MMU fault */
847 if (ret == 0)
848 return 1; /* the MMU fault was handled without causing real CPU fault */
849 /* now we have a real cpu fault */
850 tb = tb_find_pc(pc);
851 if (tb) {
852 /* the PC is inside the translated code. It means that we have
853 a virtual CPU fault */
854 cpu_restore_state(tb, env, pc, puc);
856 /* we restore the process signal mask as the sigreturn should
857 do it (XXX: use sigsetjmp) */
858 sigprocmask(SIG_SETMASK, old_set, NULL);
859 cpu_loop_exit();
860 /* never comes here */
861 return 1;
863 #elif defined(TARGET_SPARC)
864 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
865 int is_write, sigset_t *old_set,
866 void *puc)
868 TranslationBlock *tb;
869 int ret;
871 if (cpu_single_env)
872 env = cpu_single_env; /* XXX: find a correct solution for multithread */
873 #if defined(DEBUG_SIGNAL)
874 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
875 pc, address, is_write, *(unsigned long *)old_set);
876 #endif
877 /* XXX: locking issue */
878 if (is_write && page_unprotect(h2g(address), pc, puc)) {
879 return 1;
881 /* see if it is an MMU fault */
882 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
883 if (ret < 0)
884 return 0; /* not an MMU fault */
885 if (ret == 0)
886 return 1; /* the MMU fault was handled without causing real CPU fault */
887 /* now we have a real cpu fault */
888 tb = tb_find_pc(pc);
889 if (tb) {
890 /* the PC is inside the translated code. It means that we have
891 a virtual CPU fault */
892 cpu_restore_state(tb, env, pc, puc);
894 /* we restore the process signal mask as the sigreturn should
895 do it (XXX: use sigsetjmp) */
896 sigprocmask(SIG_SETMASK, old_set, NULL);
897 cpu_loop_exit();
898 /* never comes here */
899 return 1;
901 #elif defined (TARGET_PPC)
902 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
903 int is_write, sigset_t *old_set,
904 void *puc)
906 TranslationBlock *tb;
907 int ret;
909 if (cpu_single_env)
910 env = cpu_single_env; /* XXX: find a correct solution for multithread */
911 #if defined(DEBUG_SIGNAL)
912 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
913 pc, address, is_write, *(unsigned long *)old_set);
914 #endif
915 /* XXX: locking issue */
916 if (is_write && page_unprotect(h2g(address), pc, puc)) {
917 return 1;
920 /* see if it is an MMU fault */
921 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
922 if (ret < 0)
923 return 0; /* not an MMU fault */
924 if (ret == 0)
925 return 1; /* the MMU fault was handled without causing real CPU fault */
927 /* now we have a real cpu fault */
928 tb = tb_find_pc(pc);
929 if (tb) {
930 /* the PC is inside the translated code. It means that we have
931 a virtual CPU fault */
932 cpu_restore_state(tb, env, pc, puc);
934 if (ret == 1) {
935 #if 0
936 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
937 env->nip, env->error_code, tb);
938 #endif
939 /* we restore the process signal mask as the sigreturn should
940 do it (XXX: use sigsetjmp) */
941 sigprocmask(SIG_SETMASK, old_set, NULL);
942 do_raise_exception_err(env->exception_index, env->error_code);
943 } else {
944 /* activate soft MMU for this block */
945 cpu_resume_from_signal(env, puc);
947 /* never comes here */
948 return 1;
951 #elif defined(TARGET_M68K)
952 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
953 int is_write, sigset_t *old_set,
954 void *puc)
956 TranslationBlock *tb;
957 int ret;
959 if (cpu_single_env)
960 env = cpu_single_env; /* XXX: find a correct solution for multithread */
961 #if defined(DEBUG_SIGNAL)
962 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
963 pc, address, is_write, *(unsigned long *)old_set);
964 #endif
965 /* XXX: locking issue */
966 if (is_write && page_unprotect(address, pc, puc)) {
967 return 1;
969 /* see if it is an MMU fault */
970 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
971 if (ret < 0)
972 return 0; /* not an MMU fault */
973 if (ret == 0)
974 return 1; /* the MMU fault was handled without causing real CPU fault */
975 /* now we have a real cpu fault */
976 tb = tb_find_pc(pc);
977 if (tb) {
978 /* the PC is inside the translated code. It means that we have
979 a virtual CPU fault */
980 cpu_restore_state(tb, env, pc, puc);
982 /* we restore the process signal mask as the sigreturn should
983 do it (XXX: use sigsetjmp) */
984 sigprocmask(SIG_SETMASK, old_set, NULL);
985 cpu_loop_exit();
986 /* never comes here */
987 return 1;
990 #elif defined (TARGET_MIPS)
991 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
992 int is_write, sigset_t *old_set,
993 void *puc)
995 TranslationBlock *tb;
996 int ret;
998 if (cpu_single_env)
999 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1000 #if defined(DEBUG_SIGNAL)
1001 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1002 pc, address, is_write, *(unsigned long *)old_set);
1003 #endif
1004 /* XXX: locking issue */
1005 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1006 return 1;
1009 /* see if it is an MMU fault */
1010 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1011 if (ret < 0)
1012 return 0; /* not an MMU fault */
1013 if (ret == 0)
1014 return 1; /* the MMU fault was handled without causing real CPU fault */
1016 /* now we have a real cpu fault */
1017 tb = tb_find_pc(pc);
1018 if (tb) {
1019 /* the PC is inside the translated code. It means that we have
1020 a virtual CPU fault */
1021 cpu_restore_state(tb, env, pc, puc);
1023 if (ret == 1) {
1024 #if 0
1025 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1026 env->PC, env->error_code, tb);
1027 #endif
1028 /* we restore the process signal mask as the sigreturn should
1029 do it (XXX: use sigsetjmp) */
1030 sigprocmask(SIG_SETMASK, old_set, NULL);
1031 do_raise_exception_err(env->exception_index, env->error_code);
1032 } else {
1033 /* activate soft MMU for this block */
1034 cpu_resume_from_signal(env, puc);
1036 /* never comes here */
1037 return 1;
1040 #elif defined (TARGET_SH4)
1041 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1042 int is_write, sigset_t *old_set,
1043 void *puc)
1045 TranslationBlock *tb;
1046 int ret;
1048 if (cpu_single_env)
1049 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1050 #if defined(DEBUG_SIGNAL)
1051 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1052 pc, address, is_write, *(unsigned long *)old_set);
1053 #endif
1054 /* XXX: locking issue */
1055 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1056 return 1;
1059 /* see if it is an MMU fault */
1060 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1061 if (ret < 0)
1062 return 0; /* not an MMU fault */
1063 if (ret == 0)
1064 return 1; /* the MMU fault was handled without causing real CPU fault */
1066 /* now we have a real cpu fault */
1067 tb = tb_find_pc(pc);
1068 if (tb) {
1069 /* the PC is inside the translated code. It means that we have
1070 a virtual CPU fault */
1071 cpu_restore_state(tb, env, pc, puc);
1073 #if 0
1074 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1075 env->nip, env->error_code, tb);
1076 #endif
1077 /* we restore the process signal mask as the sigreturn should
1078 do it (XXX: use sigsetjmp) */
1079 sigprocmask(SIG_SETMASK, old_set, NULL);
1080 cpu_loop_exit();
1081 /* never comes here */
1082 return 1;
1085 #elif defined (TARGET_ALPHA)
1086 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1087 int is_write, sigset_t *old_set,
1088 void *puc)
1090 TranslationBlock *tb;
1091 int ret;
1093 if (cpu_single_env)
1094 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1095 #if defined(DEBUG_SIGNAL)
1096 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1097 pc, address, is_write, *(unsigned long *)old_set);
1098 #endif
1099 /* XXX: locking issue */
1100 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1101 return 1;
1104 /* see if it is an MMU fault */
1105 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1106 if (ret < 0)
1107 return 0; /* not an MMU fault */
1108 if (ret == 0)
1109 return 1; /* the MMU fault was handled without causing real CPU fault */
1111 /* now we have a real cpu fault */
1112 tb = tb_find_pc(pc);
1113 if (tb) {
1114 /* the PC is inside the translated code. It means that we have
1115 a virtual CPU fault */
1116 cpu_restore_state(tb, env, pc, puc);
1118 #if 0
1119 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1120 env->nip, env->error_code, tb);
1121 #endif
1122 /* we restore the process signal mask as the sigreturn should
1123 do it (XXX: use sigsetjmp) */
1124 sigprocmask(SIG_SETMASK, old_set, NULL);
1125 cpu_loop_exit();
1126 /* never comes here */
1127 return 1;
1129 #elif defined (TARGET_CRIS)
1130 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1131 int is_write, sigset_t *old_set,
1132 void *puc)
1134 TranslationBlock *tb;
1135 int ret;
1137 if (cpu_single_env)
1138 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1139 #if defined(DEBUG_SIGNAL)
1140 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1141 pc, address, is_write, *(unsigned long *)old_set);
1142 #endif
1143 /* XXX: locking issue */
1144 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1145 return 1;
1148 /* see if it is an MMU fault */
1149 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1150 if (ret < 0)
1151 return 0; /* not an MMU fault */
1152 if (ret == 0)
1153 return 1; /* the MMU fault was handled without causing real CPU fault */
1155 /* now we have a real cpu fault */
1156 tb = tb_find_pc(pc);
1157 if (tb) {
1158 /* the PC is inside the translated code. It means that we have
1159 a virtual CPU fault */
1160 cpu_restore_state(tb, env, pc, puc);
1162 /* we restore the process signal mask as the sigreturn should
1163 do it (XXX: use sigsetjmp) */
1164 sigprocmask(SIG_SETMASK, old_set, NULL);
1165 cpu_loop_exit();
1166 /* never comes here */
1167 return 1;
1170 #else
1171 #error unsupported target CPU
1172 #endif
1174 #if defined(__i386__)
1176 #if defined(__APPLE__)
1177 # include <sys/ucontext.h>
1179 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1180 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1181 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1182 #else
1183 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1184 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1185 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1186 #endif
1188 int cpu_signal_handler(int host_signum, void *pinfo,
1189 void *puc)
1191 siginfo_t *info = pinfo;
1192 struct ucontext *uc = puc;
1193 unsigned long pc;
1194 int trapno;
1196 #ifndef REG_EIP
1197 /* for glibc 2.1 */
1198 #define REG_EIP EIP
1199 #define REG_ERR ERR
1200 #define REG_TRAPNO TRAPNO
1201 #endif
1202 pc = EIP_sig(uc);
1203 trapno = TRAP_sig(uc);
1204 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1205 trapno == 0xe ?
1206 (ERROR_sig(uc) >> 1) & 1 : 0,
1207 &uc->uc_sigmask, puc);
1210 #elif defined(__x86_64__)
1212 int cpu_signal_handler(int host_signum, void *pinfo,
1213 void *puc)
1215 siginfo_t *info = pinfo;
1216 struct ucontext *uc = puc;
1217 unsigned long pc;
1219 pc = uc->uc_mcontext.gregs[REG_RIP];
1220 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1221 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1222 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1223 &uc->uc_sigmask, puc);
1226 #elif defined(__powerpc__)
1228 /***********************************************************************
1229 * signal context platform-specific definitions
1230 * From Wine
1232 #ifdef linux
1233 /* All Registers access - only for local access */
1234 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1235 /* Gpr Registers access */
1236 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1237 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1238 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1239 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1240 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1241 # define LR_sig(context) REG_sig(link, context) /* Link register */
1242 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1243 /* Float Registers access */
1244 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1245 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1246 /* Exception Registers access */
1247 # define DAR_sig(context) REG_sig(dar, context)
1248 # define DSISR_sig(context) REG_sig(dsisr, context)
1249 # define TRAP_sig(context) REG_sig(trap, context)
1250 #endif /* linux */
1252 #ifdef __APPLE__
1253 # include <sys/ucontext.h>
1254 typedef struct ucontext SIGCONTEXT;
1255 /* All Registers access - only for local access */
1256 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1257 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1258 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1259 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1260 /* Gpr Registers access */
1261 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1262 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1263 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1264 # define CTR_sig(context) REG_sig(ctr, context)
1265 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1266 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1267 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1268 /* Float Registers access */
1269 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1270 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1271 /* Exception Registers access */
1272 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1273 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1274 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1275 #endif /* __APPLE__ */
1277 int cpu_signal_handler(int host_signum, void *pinfo,
1278 void *puc)
1280 siginfo_t *info = pinfo;
1281 struct ucontext *uc = puc;
1282 unsigned long pc;
1283 int is_write;
1285 pc = IAR_sig(uc);
1286 is_write = 0;
1287 #if 0
1288 /* ppc 4xx case */
1289 if (DSISR_sig(uc) & 0x00800000)
1290 is_write = 1;
1291 #else
1292 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1293 is_write = 1;
1294 #endif
1295 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1296 is_write, &uc->uc_sigmask, puc);
1299 #elif defined(__alpha__)
1301 int cpu_signal_handler(int host_signum, void *pinfo,
1302 void *puc)
1304 siginfo_t *info = pinfo;
1305 struct ucontext *uc = puc;
1306 uint32_t *pc = uc->uc_mcontext.sc_pc;
1307 uint32_t insn = *pc;
1308 int is_write = 0;
1310 /* XXX: need kernel patch to get write flag faster */
1311 switch (insn >> 26) {
1312 case 0x0d: // stw
1313 case 0x0e: // stb
1314 case 0x0f: // stq_u
1315 case 0x24: // stf
1316 case 0x25: // stg
1317 case 0x26: // sts
1318 case 0x27: // stt
1319 case 0x2c: // stl
1320 case 0x2d: // stq
1321 case 0x2e: // stl_c
1322 case 0x2f: // stq_c
1323 is_write = 1;
1326 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1327 is_write, &uc->uc_sigmask, puc);
1329 #elif defined(__sparc__)
1331 int cpu_signal_handler(int host_signum, void *pinfo,
1332 void *puc)
1334 siginfo_t *info = pinfo;
1335 int is_write;
1336 uint32_t insn;
1337 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1338 uint32_t *regs = (uint32_t *)(info + 1);
1339 void *sigmask = (regs + 20);
1340 /* XXX: is there a standard glibc define ? */
1341 unsigned long pc = regs[1];
1342 #else
1343 struct sigcontext *sc = puc;
1344 unsigned long pc = sc->sigc_regs.tpc;
1345 void *sigmask = (void *)sc->sigc_mask;
1346 #endif
1348 /* XXX: need kernel patch to get write flag faster */
1349 is_write = 0;
1350 insn = *(uint32_t *)pc;
1351 if ((insn >> 30) == 3) {
1352 switch((insn >> 19) & 0x3f) {
1353 case 0x05: // stb
1354 case 0x06: // sth
1355 case 0x04: // st
1356 case 0x07: // std
1357 case 0x24: // stf
1358 case 0x27: // stdf
1359 case 0x25: // stfsr
1360 is_write = 1;
1361 break;
1364 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1365 is_write, sigmask, NULL);
1368 #elif defined(__arm__)
1370 int cpu_signal_handler(int host_signum, void *pinfo,
1371 void *puc)
1373 siginfo_t *info = pinfo;
1374 struct ucontext *uc = puc;
1375 unsigned long pc;
1376 int is_write;
1378 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1379 pc = uc->uc_mcontext.gregs[R15];
1380 #else
1381 pc = uc->uc_mcontext.arm_pc;
1382 #endif
1383 /* XXX: compute is_write */
1384 is_write = 0;
1385 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1386 is_write,
1387 &uc->uc_sigmask, puc);
1390 #elif defined(__mc68000)
1392 int cpu_signal_handler(int host_signum, void *pinfo,
1393 void *puc)
1395 siginfo_t *info = pinfo;
1396 struct ucontext *uc = puc;
1397 unsigned long pc;
1398 int is_write;
1400 pc = uc->uc_mcontext.gregs[16];
1401 /* XXX: compute is_write */
1402 is_write = 0;
1403 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1404 is_write,
1405 &uc->uc_sigmask, puc);
1408 #elif defined(__ia64)
1410 #ifndef __ISR_VALID
1411 /* This ought to be in <bits/siginfo.h>... */
1412 # define __ISR_VALID 1
1413 #endif
1415 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1417 siginfo_t *info = pinfo;
1418 struct ucontext *uc = puc;
1419 unsigned long ip;
1420 int is_write = 0;
1422 ip = uc->uc_mcontext.sc_ip;
1423 switch (host_signum) {
1424 case SIGILL:
1425 case SIGFPE:
1426 case SIGSEGV:
1427 case SIGBUS:
1428 case SIGTRAP:
1429 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1430 /* ISR.W (write-access) is bit 33: */
1431 is_write = (info->si_isr >> 33) & 1;
1432 break;
1434 default:
1435 break;
1437 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1438 is_write,
1439 &uc->uc_sigmask, puc);
1442 #elif defined(__s390__)
1444 int cpu_signal_handler(int host_signum, void *pinfo,
1445 void *puc)
1447 siginfo_t *info = pinfo;
1448 struct ucontext *uc = puc;
1449 unsigned long pc;
1450 int is_write;
1452 pc = uc->uc_mcontext.psw.addr;
1453 /* XXX: compute is_write */
1454 is_write = 0;
1455 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1456 is_write, &uc->uc_sigmask, puc);
1459 #elif defined(__mips__)
1461 int cpu_signal_handler(int host_signum, void *pinfo,
1462 void *puc)
1464 siginfo_t *info = pinfo;
1465 struct ucontext *uc = puc;
1466 greg_t pc = uc->uc_mcontext.pc;
1467 int is_write;
1469 /* XXX: compute is_write */
1470 is_write = 0;
1471 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1472 is_write, &uc->uc_sigmask, puc);
1475 #elif defined(__hppa__)
1477 int cpu_signal_handler(int host_signum, void *pinfo,
1478 void *puc)
1480 struct siginfo *info = pinfo;
1481 struct ucontext *uc = puc;
1482 unsigned long pc;
1483 int is_write;
1485 pc = uc->uc_mcontext.sc_iaoq[0];
1486 /* FIXME: compute is_write */
1487 is_write = 0;
1488 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1489 is_write,
1490 &uc->uc_sigmask, puc);
1493 #else
1495 #error host CPU specific signal handler needed
1497 #endif
1499 #endif /* !defined(CONFIG_SOFTMMU) */