target-mips: use the new rotr/rotri instructions
[qemu/qemu-JZ.git] / cpu-exec.c
blob62886a4f187103ca878e831ab5bf97f03b75712e
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
50 //#define DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 void cpu_loop_exit(void)
55 /* NOTE: the register at this point must be saved by hand because
56 longjmp restore them */
57 regs_to_env();
58 longjmp(env->jmp_env, 1);
61 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
62 #define reg_T2
63 #endif
65 /* exit the current TB from a signal handler. The host registers are
66 restored in a state compatible with the CPU emulator
68 void cpu_resume_from_signal(CPUState *env1, void *puc)
70 #if !defined(CONFIG_SOFTMMU)
71 #ifdef __linux__
72 struct ucontext *uc = puc;
73 #elif defined(__OpenBSD__)
74 struct sigcontext *uc = puc;
75 #endif
76 #endif
78 env = env1;
80 /* XXX: restore cpu registers saved in host registers */
82 #if !defined(CONFIG_SOFTMMU)
83 if (puc) {
84 /* XXX: use siglongjmp ? */
85 #ifdef __linux__
86 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
87 #elif defined(__OpenBSD__)
88 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89 #endif
91 #endif
92 longjmp(env->jmp_env, 1);
95 /* Execute the code without caching the generated code. An interpreter
96 could be used if available. */
97 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99 unsigned long next_tb;
100 TranslationBlock *tb;
102 /* Should never happen.
103 We only end up here when an existing TB is too long. */
104 if (max_cycles > CF_COUNT_MASK)
105 max_cycles = CF_COUNT_MASK;
107 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
108 max_cycles);
109 env->current_tb = tb;
110 /* execute the generated code */
111 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
113 if ((next_tb & 3) == 2) {
114 /* Restore PC. This may happen if async event occurs before
115 the TB starts executing. */
116 CPU_PC_FROM_TB(env, tb);
118 tb_phys_invalidate(tb, -1);
119 tb_free(tb);
122 static TranslationBlock *tb_find_slow(target_ulong pc,
123 target_ulong cs_base,
124 uint64_t flags)
126 TranslationBlock *tb, **ptb1;
127 unsigned int h;
128 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
130 tb_invalidated_flag = 0;
132 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
134 /* find translated block using physical mappings */
135 phys_pc = get_phys_addr_code(env, pc);
136 phys_page1 = phys_pc & TARGET_PAGE_MASK;
137 phys_page2 = -1;
138 h = tb_phys_hash_func(phys_pc);
139 ptb1 = &tb_phys_hash[h];
140 for(;;) {
141 tb = *ptb1;
142 if (!tb)
143 goto not_found;
144 if (tb->pc == pc &&
145 tb->page_addr[0] == phys_page1 &&
146 tb->cs_base == cs_base &&
147 tb->flags == flags) {
148 /* check next page if needed */
149 if (tb->page_addr[1] != -1) {
150 virt_page2 = (pc & TARGET_PAGE_MASK) +
151 TARGET_PAGE_SIZE;
152 phys_page2 = get_phys_addr_code(env, virt_page2);
153 if (tb->page_addr[1] == phys_page2)
154 goto found;
155 } else {
156 goto found;
159 ptb1 = &tb->phys_hash_next;
161 not_found:
162 /* if no translated code available, then translate it now */
163 tb = tb_gen_code(env, pc, cs_base, flags, 0);
165 found:
166 /* we add the TB in the virtual pc hash table */
167 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
168 return tb;
171 static inline TranslationBlock *tb_find_fast(void)
173 TranslationBlock *tb;
174 target_ulong cs_base, pc;
175 uint64_t flags;
177 /* we record a subset of the CPU state. It will
178 always be the same before a given translated block
179 is executed. */
180 #if defined(TARGET_I386)
181 flags = env->hflags;
182 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
183 cs_base = env->segs[R_CS].base;
184 pc = cs_base + env->eip;
185 #elif defined(TARGET_ARM)
186 flags = env->thumb | (env->vfp.vec_len << 1)
187 | (env->vfp.vec_stride << 4);
188 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
189 flags |= (1 << 6);
190 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
191 flags |= (1 << 7);
192 flags |= (env->condexec_bits << 8);
193 cs_base = 0;
194 pc = env->regs[15];
195 #elif defined(TARGET_SPARC)
196 #ifdef TARGET_SPARC64
197 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
198 flags = ((env->pstate & PS_AM) << 2)
199 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
200 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
201 #else
202 // FPU enable . Supervisor
203 flags = (env->psref << 4) | env->psrs;
204 #endif
205 cs_base = env->npc;
206 pc = env->pc;
207 #elif defined(TARGET_PPC)
208 flags = env->hflags;
209 cs_base = 0;
210 pc = env->nip;
211 #elif defined(TARGET_MIPS)
212 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
213 cs_base = 0;
214 pc = env->active_tc.PC;
215 #elif defined(TARGET_M68K)
216 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
217 | (env->sr & SR_S) /* Bit 13 */
218 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
219 cs_base = 0;
220 pc = env->pc;
221 #elif defined(TARGET_SH4)
222 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
223 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
224 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
225 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
226 cs_base = 0;
227 pc = env->pc;
228 #elif defined(TARGET_ALPHA)
229 flags = env->ps;
230 cs_base = 0;
231 pc = env->pc;
232 #elif defined(TARGET_CRIS)
233 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
234 flags |= env->dslot;
235 cs_base = 0;
236 pc = env->pc;
237 #else
238 #error unsupported CPU
239 #endif
240 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
241 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
242 tb->flags != flags)) {
243 tb = tb_find_slow(pc, cs_base, flags);
245 return tb;
248 /* main execution loop */
250 int cpu_exec(CPUState *env1)
252 #define DECLARE_HOST_REGS 1
253 #include "hostregs_helper.h"
254 int ret, interrupt_request;
255 TranslationBlock *tb;
256 uint8_t *tc_ptr;
257 unsigned long next_tb;
259 if (cpu_halted(env1) == EXCP_HALTED)
260 return EXCP_HALTED;
262 cpu_single_env = env1;
264 /* first we save global registers */
265 #define SAVE_HOST_REGS 1
266 #include "hostregs_helper.h"
267 env = env1;
269 env_to_regs();
270 #if defined(TARGET_I386)
271 /* put eflags in CPU temporary format */
272 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 DF = 1 - (2 * ((env->eflags >> 10) & 1));
274 CC_OP = CC_OP_EFLAGS;
275 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
276 #elif defined(TARGET_SPARC)
277 #elif defined(TARGET_M68K)
278 env->cc_op = CC_OP_FLAGS;
279 env->cc_dest = env->sr & 0xf;
280 env->cc_x = (env->sr >> 4) & 1;
281 #elif defined(TARGET_ALPHA)
282 #elif defined(TARGET_ARM)
283 #elif defined(TARGET_PPC)
284 #elif defined(TARGET_MIPS)
285 #elif defined(TARGET_SH4)
286 #elif defined(TARGET_CRIS)
287 /* XXXXX */
288 #else
289 #error unsupported target CPU
290 #endif
291 env->exception_index = -1;
293 /* prepare setjmp context for exception handling */
294 for(;;) {
295 if (setjmp(env->jmp_env) == 0) {
296 env->current_tb = NULL;
297 /* if an exception is pending, we execute it here */
298 if (env->exception_index >= 0) {
299 if (env->exception_index >= EXCP_INTERRUPT) {
300 /* exit request from the cpu execution loop */
301 ret = env->exception_index;
302 break;
303 } else if (env->user_mode_only) {
304 /* if user mode only, we simulate a fake exception
305 which will be handled outside the cpu execution
306 loop */
307 #if defined(TARGET_I386)
308 do_interrupt_user(env->exception_index,
309 env->exception_is_int,
310 env->error_code,
311 env->exception_next_eip);
312 /* successfully delivered */
313 env->old_exception = -1;
314 #endif
315 ret = env->exception_index;
316 break;
317 } else {
318 #if defined(TARGET_I386)
319 /* simulate a real cpu exception. On i386, it can
320 trigger new exceptions, but we do not handle
321 double or triple faults yet. */
322 do_interrupt(env->exception_index,
323 env->exception_is_int,
324 env->error_code,
325 env->exception_next_eip, 0);
326 /* successfully delivered */
327 env->old_exception = -1;
328 #elif defined(TARGET_PPC)
329 do_interrupt(env);
330 #elif defined(TARGET_MIPS)
331 do_interrupt(env);
332 #elif defined(TARGET_SPARC)
333 do_interrupt(env);
334 #elif defined(TARGET_ARM)
335 do_interrupt(env);
336 #elif defined(TARGET_SH4)
337 do_interrupt(env);
338 #elif defined(TARGET_ALPHA)
339 do_interrupt(env);
340 #elif defined(TARGET_CRIS)
341 do_interrupt(env);
342 #elif defined(TARGET_M68K)
343 do_interrupt(0);
344 #endif
346 env->exception_index = -1;
348 #ifdef USE_KQEMU
349 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
350 int ret;
351 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
352 ret = kqemu_cpu_exec(env);
353 /* put eflags in CPU temporary format */
354 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
355 DF = 1 - (2 * ((env->eflags >> 10) & 1));
356 CC_OP = CC_OP_EFLAGS;
357 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
358 if (ret == 1) {
359 /* exception */
360 longjmp(env->jmp_env, 1);
361 } else if (ret == 2) {
362 /* softmmu execution needed */
363 } else {
364 if (env->interrupt_request != 0) {
365 /* hardware interrupt will be executed just after */
366 } else {
367 /* otherwise, we restart */
368 longjmp(env->jmp_env, 1);
372 #endif
374 next_tb = 0; /* force lookup of first TB */
375 for(;;) {
376 interrupt_request = env->interrupt_request;
377 if (unlikely(interrupt_request) &&
378 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
379 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
380 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
381 env->exception_index = EXCP_DEBUG;
382 cpu_loop_exit();
384 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
385 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
386 if (interrupt_request & CPU_INTERRUPT_HALT) {
387 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
388 env->halted = 1;
389 env->exception_index = EXCP_HLT;
390 cpu_loop_exit();
392 #endif
393 #if defined(TARGET_I386)
394 if (env->hflags2 & HF2_GIF_MASK) {
395 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
396 !(env->hflags & HF_SMM_MASK)) {
397 svm_check_intercept(SVM_EXIT_SMI);
398 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
399 do_smm_enter();
400 next_tb = 0;
401 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
402 !(env->hflags2 & HF2_NMI_MASK)) {
403 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
404 env->hflags2 |= HF2_NMI_MASK;
405 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
406 next_tb = 0;
407 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
408 (((env->hflags2 & HF2_VINTR_MASK) &&
409 (env->hflags2 & HF2_HIF_MASK)) ||
410 (!(env->hflags2 & HF2_VINTR_MASK) &&
411 (env->eflags & IF_MASK &&
412 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
413 int intno;
414 svm_check_intercept(SVM_EXIT_INTR);
415 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
416 intno = cpu_get_pic_interrupt(env);
417 if (loglevel & CPU_LOG_TB_IN_ASM) {
418 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
420 do_interrupt(intno, 0, 0, 0, 1);
421 /* ensure that no TB jump will be modified as
422 the program flow was changed */
423 next_tb = 0;
424 #if !defined(CONFIG_USER_ONLY)
425 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
426 (env->eflags & IF_MASK) &&
427 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
428 int intno;
429 /* FIXME: this should respect TPR */
430 svm_check_intercept(SVM_EXIT_VINTR);
431 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
432 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
433 if (loglevel & CPU_LOG_TB_IN_ASM)
434 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
435 do_interrupt(intno, 0, 0, 0, 1);
436 next_tb = 0;
437 #endif
440 #elif defined(TARGET_PPC)
441 #if 0
442 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
443 cpu_ppc_reset(env);
445 #endif
446 if (interrupt_request & CPU_INTERRUPT_HARD) {
447 ppc_hw_interrupt(env);
448 if (env->pending_interrupts == 0)
449 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
450 next_tb = 0;
452 #elif defined(TARGET_MIPS)
453 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
455 (env->CP0_Status & (1 << CP0St_IE)) &&
456 !(env->CP0_Status & (1 << CP0St_EXL)) &&
457 !(env->CP0_Status & (1 << CP0St_ERL)) &&
458 !(env->hflags & MIPS_HFLAG_DM)) {
459 /* Raise it */
460 env->exception_index = EXCP_EXT_INTERRUPT;
461 env->error_code = 0;
462 do_interrupt(env);
463 next_tb = 0;
465 #elif defined(TARGET_SPARC)
466 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
467 (env->psret != 0)) {
468 int pil = env->interrupt_index & 15;
469 int type = env->interrupt_index & 0xf0;
471 if (((type == TT_EXTINT) &&
472 (pil == 15 || pil > env->psrpil)) ||
473 type != TT_EXTINT) {
474 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
475 env->exception_index = env->interrupt_index;
476 do_interrupt(env);
477 env->interrupt_index = 0;
478 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
479 cpu_check_irqs(env);
480 #endif
481 next_tb = 0;
483 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
484 //do_interrupt(0, 0, 0, 0, 0);
485 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
487 #elif defined(TARGET_ARM)
488 if (interrupt_request & CPU_INTERRUPT_FIQ
489 && !(env->uncached_cpsr & CPSR_F)) {
490 env->exception_index = EXCP_FIQ;
491 do_interrupt(env);
492 next_tb = 0;
494 /* ARMv7-M interrupt return works by loading a magic value
495 into the PC. On real hardware the load causes the
496 return to occur. The qemu implementation performs the
497 jump normally, then does the exception return when the
498 CPU tries to execute code at the magic address.
499 This will cause the magic PC value to be pushed to
500 the stack if an interrupt occured at the wrong time.
501 We avoid this by disabling interrupts when
502 pc contains a magic address. */
503 if (interrupt_request & CPU_INTERRUPT_HARD
504 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
505 || !(env->uncached_cpsr & CPSR_I))) {
506 env->exception_index = EXCP_IRQ;
507 do_interrupt(env);
508 next_tb = 0;
510 #elif defined(TARGET_SH4)
511 if (interrupt_request & CPU_INTERRUPT_HARD) {
512 do_interrupt(env);
513 next_tb = 0;
515 #elif defined(TARGET_ALPHA)
516 if (interrupt_request & CPU_INTERRUPT_HARD) {
517 do_interrupt(env);
518 next_tb = 0;
520 #elif defined(TARGET_CRIS)
521 if (interrupt_request & CPU_INTERRUPT_HARD
522 && (env->pregs[PR_CCS] & I_FLAG)) {
523 env->exception_index = EXCP_IRQ;
524 do_interrupt(env);
525 next_tb = 0;
527 if (interrupt_request & CPU_INTERRUPT_NMI
528 && (env->pregs[PR_CCS] & M_FLAG)) {
529 env->exception_index = EXCP_NMI;
530 do_interrupt(env);
531 next_tb = 0;
533 #elif defined(TARGET_M68K)
534 if (interrupt_request & CPU_INTERRUPT_HARD
535 && ((env->sr & SR_I) >> SR_I_SHIFT)
536 < env->pending_level) {
537 /* Real hardware gets the interrupt vector via an
538 IACK cycle at this point. Current emulated
539 hardware doesn't rely on this, so we
540 provide/save the vector when the interrupt is
541 first signalled. */
542 env->exception_index = env->pending_vector;
543 do_interrupt(1);
544 next_tb = 0;
546 #endif
547 /* Don't use the cached interupt_request value,
548 do_interrupt may have updated the EXITTB flag. */
549 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
550 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
551 /* ensure that no TB jump will be modified as
552 the program flow was changed */
553 next_tb = 0;
555 if (interrupt_request & CPU_INTERRUPT_EXIT) {
556 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
557 env->exception_index = EXCP_INTERRUPT;
558 cpu_loop_exit();
561 #ifdef DEBUG_EXEC
562 if ((loglevel & CPU_LOG_TB_CPU)) {
563 /* restore flags in standard format */
564 regs_to_env();
565 #if defined(TARGET_I386)
566 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
567 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
568 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
569 #elif defined(TARGET_ARM)
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_SPARC)
572 cpu_dump_state(env, logfile, fprintf, 0);
573 #elif defined(TARGET_PPC)
574 cpu_dump_state(env, logfile, fprintf, 0);
575 #elif defined(TARGET_M68K)
576 cpu_m68k_flush_flags(env, env->cc_op);
577 env->cc_op = CC_OP_FLAGS;
578 env->sr = (env->sr & 0xffe0)
579 | env->cc_dest | (env->cc_x << 4);
580 cpu_dump_state(env, logfile, fprintf, 0);
581 #elif defined(TARGET_MIPS)
582 cpu_dump_state(env, logfile, fprintf, 0);
583 #elif defined(TARGET_SH4)
584 cpu_dump_state(env, logfile, fprintf, 0);
585 #elif defined(TARGET_ALPHA)
586 cpu_dump_state(env, logfile, fprintf, 0);
587 #elif defined(TARGET_CRIS)
588 cpu_dump_state(env, logfile, fprintf, 0);
589 #else
590 #error unsupported target CPU
591 #endif
593 #endif
594 spin_lock(&tb_lock);
595 tb = tb_find_fast();
596 /* Note: we do it here to avoid a gcc bug on Mac OS X when
597 doing it in tb_find_slow */
598 if (tb_invalidated_flag) {
599 /* as some TB could have been invalidated because
600 of memory exceptions while generating the code, we
601 must recompute the hash index here */
602 next_tb = 0;
603 tb_invalidated_flag = 0;
605 #ifdef DEBUG_EXEC
606 if ((loglevel & CPU_LOG_EXEC)) {
607 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
608 (long)tb->tc_ptr, tb->pc,
609 lookup_symbol(tb->pc));
611 #endif
612 /* see if we can patch the calling TB. When the TB
613 spans two pages, we cannot safely do a direct
614 jump. */
616 if (next_tb != 0 &&
617 #ifdef USE_KQEMU
618 (env->kqemu_enabled != 2) &&
619 #endif
620 tb->page_addr[1] == -1) {
621 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
624 spin_unlock(&tb_lock);
625 env->current_tb = tb;
627 /* cpu_interrupt might be called while translating the
628 TB, but before it is linked into a potentially
629 infinite loop and becomes env->current_tb. Avoid
630 starting execution if there is a pending interrupt. */
631 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
632 env->current_tb = NULL;
634 while (env->current_tb) {
635 tc_ptr = tb->tc_ptr;
636 /* execute the generated code */
637 #if defined(__sparc__) && !defined(HOST_SOLARIS)
638 #undef env
639 env = cpu_single_env;
640 #define env cpu_single_env
641 #endif
642 next_tb = tcg_qemu_tb_exec(tc_ptr);
643 env->current_tb = NULL;
644 if ((next_tb & 3) == 2) {
645 /* Instruction counter expired. */
646 int insns_left;
647 tb = (TranslationBlock *)(long)(next_tb & ~3);
648 /* Restore PC. */
649 CPU_PC_FROM_TB(env, tb);
650 insns_left = env->icount_decr.u32;
651 if (env->icount_extra && insns_left >= 0) {
652 /* Refill decrementer and continue execution. */
653 env->icount_extra += insns_left;
654 if (env->icount_extra > 0xffff) {
655 insns_left = 0xffff;
656 } else {
657 insns_left = env->icount_extra;
659 env->icount_extra -= insns_left;
660 env->icount_decr.u16.low = insns_left;
661 } else {
662 if (insns_left > 0) {
663 /* Execute remaining instructions. */
664 cpu_exec_nocache(insns_left, tb);
666 env->exception_index = EXCP_INTERRUPT;
667 next_tb = 0;
668 cpu_loop_exit();
672 /* reset soft MMU for next block (it can currently
673 only be set by a memory fault) */
674 #if defined(USE_KQEMU)
675 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
676 if (kqemu_is_ok(env) &&
677 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
678 cpu_loop_exit();
680 #endif
681 } /* for(;;) */
682 } else {
683 env_to_regs();
685 } /* for(;;) */
688 #if defined(TARGET_I386)
689 /* restore flags in standard format */
690 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
691 #elif defined(TARGET_ARM)
692 /* XXX: Save/restore host fpu exception state?. */
693 #elif defined(TARGET_SPARC)
694 #elif defined(TARGET_PPC)
695 #elif defined(TARGET_M68K)
696 cpu_m68k_flush_flags(env, env->cc_op);
697 env->cc_op = CC_OP_FLAGS;
698 env->sr = (env->sr & 0xffe0)
699 | env->cc_dest | (env->cc_x << 4);
700 #elif defined(TARGET_MIPS)
701 #elif defined(TARGET_SH4)
702 #elif defined(TARGET_ALPHA)
703 #elif defined(TARGET_CRIS)
704 /* XXXXX */
705 #else
706 #error unsupported target CPU
707 #endif
709 /* restore global registers */
710 #include "hostregs_helper.h"
712 /* fail safe : never use cpu_single_env outside cpu_exec() */
713 cpu_single_env = NULL;
714 return ret;
717 /* must only be called from the generated code as an exception can be
718 generated */
719 void tb_invalidate_page_range(target_ulong start, target_ulong end)
721 /* XXX: cannot enable it yet because it yields to MMU exception
722 where NIP != read address on PowerPC */
723 #if 0
724 target_ulong phys_addr;
725 phys_addr = get_phys_addr_code(env, start);
726 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
727 #endif
730 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
732 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
734 CPUX86State *saved_env;
736 saved_env = env;
737 env = s;
738 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
739 selector &= 0xffff;
740 cpu_x86_load_seg_cache(env, seg_reg, selector,
741 (selector << 4), 0xffff, 0);
742 } else {
743 helper_load_seg(seg_reg, selector);
745 env = saved_env;
748 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
750 CPUX86State *saved_env;
752 saved_env = env;
753 env = s;
755 helper_fsave(ptr, data32);
757 env = saved_env;
760 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
762 CPUX86State *saved_env;
764 saved_env = env;
765 env = s;
767 helper_frstor(ptr, data32);
769 env = saved_env;
772 #endif /* TARGET_I386 */
774 #if !defined(CONFIG_SOFTMMU)
776 #if defined(TARGET_I386)
778 /* 'pc' is the host PC at which the exception was raised. 'address' is
779 the effective address of the memory exception. 'is_write' is 1 if a
780 write caused the exception and otherwise 0'. 'old_set' is the
781 signal set which should be restored */
782 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
783 int is_write, sigset_t *old_set,
784 void *puc)
786 TranslationBlock *tb;
787 int ret;
789 if (cpu_single_env)
790 env = cpu_single_env; /* XXX: find a correct solution for multithread */
791 #if defined(DEBUG_SIGNAL)
792 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
793 pc, address, is_write, *(unsigned long *)old_set);
794 #endif
795 /* XXX: locking issue */
796 if (is_write && page_unprotect(h2g(address), pc, puc)) {
797 return 1;
800 /* see if it is an MMU fault */
801 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
802 if (ret < 0)
803 return 0; /* not an MMU fault */
804 if (ret == 0)
805 return 1; /* the MMU fault was handled without causing real CPU fault */
806 /* now we have a real cpu fault */
807 tb = tb_find_pc(pc);
808 if (tb) {
809 /* the PC is inside the translated code. It means that we have
810 a virtual CPU fault */
811 cpu_restore_state(tb, env, pc, puc);
813 if (ret == 1) {
814 #if 0
815 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
816 env->eip, env->cr[2], env->error_code);
817 #endif
818 /* we restore the process signal mask as the sigreturn should
819 do it (XXX: use sigsetjmp) */
820 sigprocmask(SIG_SETMASK, old_set, NULL);
821 raise_exception_err(env->exception_index, env->error_code);
822 } else {
823 /* activate soft MMU for this block */
824 env->hflags |= HF_SOFTMMU_MASK;
825 cpu_resume_from_signal(env, puc);
827 /* never comes here */
828 return 1;
831 #elif defined(TARGET_ARM)
832 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
833 int is_write, sigset_t *old_set,
834 void *puc)
836 TranslationBlock *tb;
837 int ret;
839 if (cpu_single_env)
840 env = cpu_single_env; /* XXX: find a correct solution for multithread */
841 #if defined(DEBUG_SIGNAL)
842 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
843 pc, address, is_write, *(unsigned long *)old_set);
844 #endif
845 /* XXX: locking issue */
846 if (is_write && page_unprotect(h2g(address), pc, puc)) {
847 return 1;
849 /* see if it is an MMU fault */
850 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
851 if (ret < 0)
852 return 0; /* not an MMU fault */
853 if (ret == 0)
854 return 1; /* the MMU fault was handled without causing real CPU fault */
855 /* now we have a real cpu fault */
856 tb = tb_find_pc(pc);
857 if (tb) {
858 /* the PC is inside the translated code. It means that we have
859 a virtual CPU fault */
860 cpu_restore_state(tb, env, pc, puc);
862 /* we restore the process signal mask as the sigreturn should
863 do it (XXX: use sigsetjmp) */
864 sigprocmask(SIG_SETMASK, old_set, NULL);
865 cpu_loop_exit();
866 /* never comes here */
867 return 1;
869 #elif defined(TARGET_SPARC)
870 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
871 int is_write, sigset_t *old_set,
872 void *puc)
874 TranslationBlock *tb;
875 int ret;
877 if (cpu_single_env)
878 env = cpu_single_env; /* XXX: find a correct solution for multithread */
879 #if defined(DEBUG_SIGNAL)
880 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
881 pc, address, is_write, *(unsigned long *)old_set);
882 #endif
883 /* XXX: locking issue */
884 if (is_write && page_unprotect(h2g(address), pc, puc)) {
885 return 1;
887 /* see if it is an MMU fault */
888 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
889 if (ret < 0)
890 return 0; /* not an MMU fault */
891 if (ret == 0)
892 return 1; /* the MMU fault was handled without causing real CPU fault */
893 /* now we have a real cpu fault */
894 tb = tb_find_pc(pc);
895 if (tb) {
896 /* the PC is inside the translated code. It means that we have
897 a virtual CPU fault */
898 cpu_restore_state(tb, env, pc, puc);
900 /* we restore the process signal mask as the sigreturn should
901 do it (XXX: use sigsetjmp) */
902 sigprocmask(SIG_SETMASK, old_set, NULL);
903 cpu_loop_exit();
904 /* never comes here */
905 return 1;
907 #elif defined (TARGET_PPC)
908 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
909 int is_write, sigset_t *old_set,
910 void *puc)
912 TranslationBlock *tb;
913 int ret;
915 if (cpu_single_env)
916 env = cpu_single_env; /* XXX: find a correct solution for multithread */
917 #if defined(DEBUG_SIGNAL)
918 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
919 pc, address, is_write, *(unsigned long *)old_set);
920 #endif
921 /* XXX: locking issue */
922 if (is_write && page_unprotect(h2g(address), pc, puc)) {
923 return 1;
926 /* see if it is an MMU fault */
927 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
928 if (ret < 0)
929 return 0; /* not an MMU fault */
930 if (ret == 0)
931 return 1; /* the MMU fault was handled without causing real CPU fault */
933 /* now we have a real cpu fault */
934 tb = tb_find_pc(pc);
935 if (tb) {
936 /* the PC is inside the translated code. It means that we have
937 a virtual CPU fault */
938 cpu_restore_state(tb, env, pc, puc);
940 if (ret == 1) {
941 #if 0
942 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
943 env->nip, env->error_code, tb);
944 #endif
945 /* we restore the process signal mask as the sigreturn should
946 do it (XXX: use sigsetjmp) */
947 sigprocmask(SIG_SETMASK, old_set, NULL);
948 do_raise_exception_err(env->exception_index, env->error_code);
949 } else {
950 /* activate soft MMU for this block */
951 cpu_resume_from_signal(env, puc);
953 /* never comes here */
954 return 1;
957 #elif defined(TARGET_M68K)
958 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
959 int is_write, sigset_t *old_set,
960 void *puc)
962 TranslationBlock *tb;
963 int ret;
965 if (cpu_single_env)
966 env = cpu_single_env; /* XXX: find a correct solution for multithread */
967 #if defined(DEBUG_SIGNAL)
968 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
969 pc, address, is_write, *(unsigned long *)old_set);
970 #endif
971 /* XXX: locking issue */
972 if (is_write && page_unprotect(address, pc, puc)) {
973 return 1;
975 /* see if it is an MMU fault */
976 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
977 if (ret < 0)
978 return 0; /* not an MMU fault */
979 if (ret == 0)
980 return 1; /* the MMU fault was handled without causing real CPU fault */
981 /* now we have a real cpu fault */
982 tb = tb_find_pc(pc);
983 if (tb) {
984 /* the PC is inside the translated code. It means that we have
985 a virtual CPU fault */
986 cpu_restore_state(tb, env, pc, puc);
988 /* we restore the process signal mask as the sigreturn should
989 do it (XXX: use sigsetjmp) */
990 sigprocmask(SIG_SETMASK, old_set, NULL);
991 cpu_loop_exit();
992 /* never comes here */
993 return 1;
996 #elif defined (TARGET_MIPS)
997 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
998 int is_write, sigset_t *old_set,
999 void *puc)
1001 TranslationBlock *tb;
1002 int ret;
1004 if (cpu_single_env)
1005 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1006 #if defined(DEBUG_SIGNAL)
1007 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1008 pc, address, is_write, *(unsigned long *)old_set);
1009 #endif
1010 /* XXX: locking issue */
1011 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1012 return 1;
1015 /* see if it is an MMU fault */
1016 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1017 if (ret < 0)
1018 return 0; /* not an MMU fault */
1019 if (ret == 0)
1020 return 1; /* the MMU fault was handled without causing real CPU fault */
1022 /* now we have a real cpu fault */
1023 tb = tb_find_pc(pc);
1024 if (tb) {
1025 /* the PC is inside the translated code. It means that we have
1026 a virtual CPU fault */
1027 cpu_restore_state(tb, env, pc, puc);
1029 if (ret == 1) {
1030 #if 0
1031 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1032 env->PC, env->error_code, tb);
1033 #endif
1034 /* we restore the process signal mask as the sigreturn should
1035 do it (XXX: use sigsetjmp) */
1036 sigprocmask(SIG_SETMASK, old_set, NULL);
1037 do_raise_exception_err(env->exception_index, env->error_code);
1038 } else {
1039 /* activate soft MMU for this block */
1040 cpu_resume_from_signal(env, puc);
1042 /* never comes here */
1043 return 1;
1046 #elif defined (TARGET_SH4)
1047 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1048 int is_write, sigset_t *old_set,
1049 void *puc)
1051 TranslationBlock *tb;
1052 int ret;
1054 if (cpu_single_env)
1055 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1056 #if defined(DEBUG_SIGNAL)
1057 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1058 pc, address, is_write, *(unsigned long *)old_set);
1059 #endif
1060 /* XXX: locking issue */
1061 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1062 return 1;
1065 /* see if it is an MMU fault */
1066 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1067 if (ret < 0)
1068 return 0; /* not an MMU fault */
1069 if (ret == 0)
1070 return 1; /* the MMU fault was handled without causing real CPU fault */
1072 /* now we have a real cpu fault */
1073 tb = tb_find_pc(pc);
1074 if (tb) {
1075 /* the PC is inside the translated code. It means that we have
1076 a virtual CPU fault */
1077 cpu_restore_state(tb, env, pc, puc);
1079 #if 0
1080 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1081 env->nip, env->error_code, tb);
1082 #endif
1083 /* we restore the process signal mask as the sigreturn should
1084 do it (XXX: use sigsetjmp) */
1085 sigprocmask(SIG_SETMASK, old_set, NULL);
1086 cpu_loop_exit();
1087 /* never comes here */
1088 return 1;
1091 #elif defined (TARGET_ALPHA)
1092 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1093 int is_write, sigset_t *old_set,
1094 void *puc)
1096 TranslationBlock *tb;
1097 int ret;
1099 if (cpu_single_env)
1100 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1101 #if defined(DEBUG_SIGNAL)
1102 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1103 pc, address, is_write, *(unsigned long *)old_set);
1104 #endif
1105 /* XXX: locking issue */
1106 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1107 return 1;
1110 /* see if it is an MMU fault */
1111 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1112 if (ret < 0)
1113 return 0; /* not an MMU fault */
1114 if (ret == 0)
1115 return 1; /* the MMU fault was handled without causing real CPU fault */
1117 /* now we have a real cpu fault */
1118 tb = tb_find_pc(pc);
1119 if (tb) {
1120 /* the PC is inside the translated code. It means that we have
1121 a virtual CPU fault */
1122 cpu_restore_state(tb, env, pc, puc);
1124 #if 0
1125 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1126 env->nip, env->error_code, tb);
1127 #endif
1128 /* we restore the process signal mask as the sigreturn should
1129 do it (XXX: use sigsetjmp) */
1130 sigprocmask(SIG_SETMASK, old_set, NULL);
1131 cpu_loop_exit();
1132 /* never comes here */
1133 return 1;
1135 #elif defined (TARGET_CRIS)
1136 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1137 int is_write, sigset_t *old_set,
1138 void *puc)
1140 TranslationBlock *tb;
1141 int ret;
1143 if (cpu_single_env)
1144 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1145 #if defined(DEBUG_SIGNAL)
1146 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1147 pc, address, is_write, *(unsigned long *)old_set);
1148 #endif
1149 /* XXX: locking issue */
1150 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1151 return 1;
1154 /* see if it is an MMU fault */
1155 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1156 if (ret < 0)
1157 return 0; /* not an MMU fault */
1158 if (ret == 0)
1159 return 1; /* the MMU fault was handled without causing real CPU fault */
1161 /* now we have a real cpu fault */
1162 tb = tb_find_pc(pc);
1163 if (tb) {
1164 /* the PC is inside the translated code. It means that we have
1165 a virtual CPU fault */
1166 cpu_restore_state(tb, env, pc, puc);
1168 /* we restore the process signal mask as the sigreturn should
1169 do it (XXX: use sigsetjmp) */
1170 sigprocmask(SIG_SETMASK, old_set, NULL);
1171 cpu_loop_exit();
1172 /* never comes here */
1173 return 1;
1176 #else
1177 #error unsupported target CPU
1178 #endif
1180 #if defined(__i386__)
1182 #if defined(__APPLE__)
1183 # include <sys/ucontext.h>
1185 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1186 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1187 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1188 #else
1189 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1190 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1191 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1192 #endif
1194 int cpu_signal_handler(int host_signum, void *pinfo,
1195 void *puc)
1197 siginfo_t *info = pinfo;
1198 struct ucontext *uc = puc;
1199 unsigned long pc;
1200 int trapno;
1202 #ifndef REG_EIP
1203 /* for glibc 2.1 */
1204 #define REG_EIP EIP
1205 #define REG_ERR ERR
1206 #define REG_TRAPNO TRAPNO
1207 #endif
1208 pc = EIP_sig(uc);
1209 trapno = TRAP_sig(uc);
1210 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1211 trapno == 0xe ?
1212 (ERROR_sig(uc) >> 1) & 1 : 0,
1213 &uc->uc_sigmask, puc);
1216 #elif defined(__x86_64__)
1218 int cpu_signal_handler(int host_signum, void *pinfo,
1219 void *puc)
1221 siginfo_t *info = pinfo;
1222 struct ucontext *uc = puc;
1223 unsigned long pc;
1225 pc = uc->uc_mcontext.gregs[REG_RIP];
1226 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1227 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1228 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1229 &uc->uc_sigmask, puc);
1232 #elif defined(__powerpc__)
1234 /***********************************************************************
1235 * signal context platform-specific definitions
1236 * From Wine
1238 #ifdef linux
1239 /* All Registers access - only for local access */
1240 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1241 /* Gpr Registers access */
1242 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1243 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1244 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1245 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1246 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1247 # define LR_sig(context) REG_sig(link, context) /* Link register */
1248 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1249 /* Float Registers access */
1250 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1251 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1252 /* Exception Registers access */
1253 # define DAR_sig(context) REG_sig(dar, context)
1254 # define DSISR_sig(context) REG_sig(dsisr, context)
1255 # define TRAP_sig(context) REG_sig(trap, context)
1256 #endif /* linux */
1258 #ifdef __APPLE__
1259 # include <sys/ucontext.h>
1260 typedef struct ucontext SIGCONTEXT;
1261 /* All Registers access - only for local access */
1262 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1263 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1264 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1265 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1266 /* Gpr Registers access */
1267 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1268 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1269 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1270 # define CTR_sig(context) REG_sig(ctr, context)
1271 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1272 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1273 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1274 /* Float Registers access */
1275 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1276 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1277 /* Exception Registers access */
1278 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1279 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1280 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1281 #endif /* __APPLE__ */
1283 int cpu_signal_handler(int host_signum, void *pinfo,
1284 void *puc)
1286 siginfo_t *info = pinfo;
1287 struct ucontext *uc = puc;
1288 unsigned long pc;
1289 int is_write;
1291 pc = IAR_sig(uc);
1292 is_write = 0;
1293 #if 0
1294 /* ppc 4xx case */
1295 if (DSISR_sig(uc) & 0x00800000)
1296 is_write = 1;
1297 #else
1298 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1299 is_write = 1;
1300 #endif
1301 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1302 is_write, &uc->uc_sigmask, puc);
1305 #elif defined(__alpha__)
1307 int cpu_signal_handler(int host_signum, void *pinfo,
1308 void *puc)
1310 siginfo_t *info = pinfo;
1311 struct ucontext *uc = puc;
1312 uint32_t *pc = uc->uc_mcontext.sc_pc;
1313 uint32_t insn = *pc;
1314 int is_write = 0;
1316 /* XXX: need kernel patch to get write flag faster */
1317 switch (insn >> 26) {
1318 case 0x0d: // stw
1319 case 0x0e: // stb
1320 case 0x0f: // stq_u
1321 case 0x24: // stf
1322 case 0x25: // stg
1323 case 0x26: // sts
1324 case 0x27: // stt
1325 case 0x2c: // stl
1326 case 0x2d: // stq
1327 case 0x2e: // stl_c
1328 case 0x2f: // stq_c
1329 is_write = 1;
1332 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1333 is_write, &uc->uc_sigmask, puc);
1335 #elif defined(__sparc__)
1337 int cpu_signal_handler(int host_signum, void *pinfo,
1338 void *puc)
1340 siginfo_t *info = pinfo;
1341 int is_write;
1342 uint32_t insn;
1343 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1344 uint32_t *regs = (uint32_t *)(info + 1);
1345 void *sigmask = (regs + 20);
1346 /* XXX: is there a standard glibc define ? */
1347 unsigned long pc = regs[1];
1348 #else
1349 #ifdef __linux__
1350 struct sigcontext *sc = puc;
1351 unsigned long pc = sc->sigc_regs.tpc;
1352 void *sigmask = (void *)sc->sigc_mask;
1353 #elif defined(__OpenBSD__)
1354 struct sigcontext *uc = puc;
1355 unsigned long pc = uc->sc_pc;
1356 void *sigmask = (void *)(long)uc->sc_mask;
1357 #endif
1358 #endif
1360 /* XXX: need kernel patch to get write flag faster */
1361 is_write = 0;
1362 insn = *(uint32_t *)pc;
1363 if ((insn >> 30) == 3) {
1364 switch((insn >> 19) & 0x3f) {
1365 case 0x05: // stb
1366 case 0x06: // sth
1367 case 0x04: // st
1368 case 0x07: // std
1369 case 0x24: // stf
1370 case 0x27: // stdf
1371 case 0x25: // stfsr
1372 is_write = 1;
1373 break;
1376 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1377 is_write, sigmask, NULL);
1380 #elif defined(__arm__)
1382 int cpu_signal_handler(int host_signum, void *pinfo,
1383 void *puc)
1385 siginfo_t *info = pinfo;
1386 struct ucontext *uc = puc;
1387 unsigned long pc;
1388 int is_write;
1390 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1391 pc = uc->uc_mcontext.gregs[R15];
1392 #else
1393 pc = uc->uc_mcontext.arm_pc;
1394 #endif
1395 /* XXX: compute is_write */
1396 is_write = 0;
1397 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1398 is_write,
1399 &uc->uc_sigmask, puc);
1402 #elif defined(__mc68000)
1404 int cpu_signal_handler(int host_signum, void *pinfo,
1405 void *puc)
1407 siginfo_t *info = pinfo;
1408 struct ucontext *uc = puc;
1409 unsigned long pc;
1410 int is_write;
1412 pc = uc->uc_mcontext.gregs[16];
1413 /* XXX: compute is_write */
1414 is_write = 0;
1415 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1416 is_write,
1417 &uc->uc_sigmask, puc);
1420 #elif defined(__ia64)
1422 #ifndef __ISR_VALID
1423 /* This ought to be in <bits/siginfo.h>... */
1424 # define __ISR_VALID 1
1425 #endif
1427 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1429 siginfo_t *info = pinfo;
1430 struct ucontext *uc = puc;
1431 unsigned long ip;
1432 int is_write = 0;
1434 ip = uc->uc_mcontext.sc_ip;
1435 switch (host_signum) {
1436 case SIGILL:
1437 case SIGFPE:
1438 case SIGSEGV:
1439 case SIGBUS:
1440 case SIGTRAP:
1441 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1442 /* ISR.W (write-access) is bit 33: */
1443 is_write = (info->si_isr >> 33) & 1;
1444 break;
1446 default:
1447 break;
1449 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1450 is_write,
1451 &uc->uc_sigmask, puc);
1454 #elif defined(__s390__)
1456 int cpu_signal_handler(int host_signum, void *pinfo,
1457 void *puc)
1459 siginfo_t *info = pinfo;
1460 struct ucontext *uc = puc;
1461 unsigned long pc;
1462 int is_write;
1464 pc = uc->uc_mcontext.psw.addr;
1465 /* XXX: compute is_write */
1466 is_write = 0;
1467 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1468 is_write, &uc->uc_sigmask, puc);
1471 #elif defined(__mips__)
1473 int cpu_signal_handler(int host_signum, void *pinfo,
1474 void *puc)
1476 siginfo_t *info = pinfo;
1477 struct ucontext *uc = puc;
1478 greg_t pc = uc->uc_mcontext.pc;
1479 int is_write;
1481 /* XXX: compute is_write */
1482 is_write = 0;
1483 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1484 is_write, &uc->uc_sigmask, puc);
1487 #elif defined(__hppa__)
1489 int cpu_signal_handler(int host_signum, void *pinfo,
1490 void *puc)
1492 struct siginfo *info = pinfo;
1493 struct ucontext *uc = puc;
1494 unsigned long pc;
1495 int is_write;
1497 pc = uc->uc_mcontext.sc_iaoq[0];
1498 /* FIXME: compute is_write */
1499 is_write = 0;
1500 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1501 is_write,
1502 &uc->uc_sigmask, puc);
1505 #else
1507 #error host CPU specific signal handler needed
1509 #endif
1511 #endif /* !defined(CONFIG_SOFTMMU) */