Fix typo
[qemu/mini2440/sniper_sniper_test.git] / cpu-exec.c
blob6b46bd2c980d021987d33b399afb409886b459ee
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
42 #undef env
43 #define env cpu_single_env
44 #endif
46 int tb_invalidated_flag;
48 //#define DEBUG_EXEC
49 //#define DEBUG_SIGNAL
51 void cpu_loop_exit(void)
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
55 regs_to_env();
56 longjmp(env->jmp_env, 1);
59 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
60 #define reg_T2
61 #endif
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70 #endif
72 env = env1;
74 /* XXX: restore cpu registers saved in host registers */
76 #if !defined(CONFIG_SOFTMMU)
77 if (puc) {
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 #endif
82 longjmp(env->jmp_env, 1);
85 /* Execute the code without caching the generated code. An interpreter
86 could be used if available. */
87 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
89 unsigned long next_tb;
90 TranslationBlock *tb;
92 /* Should never happen.
93 We only end up here when an existing TB is too long. */
94 if (max_cycles > CF_COUNT_MASK)
95 max_cycles = CF_COUNT_MASK;
97 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
98 max_cycles);
99 env->current_tb = tb;
100 /* execute the generated code */
101 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
103 if ((next_tb & 3) == 2) {
104 /* Restore PC. This may happen if async event occurs before
105 the TB starts executing. */
106 CPU_PC_FROM_TB(env, tb);
108 tb_phys_invalidate(tb, -1);
109 tb_free(tb);
112 static TranslationBlock *tb_find_slow(target_ulong pc,
113 target_ulong cs_base,
114 uint64_t flags)
116 TranslationBlock *tb, **ptb1;
117 unsigned int h;
118 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
120 tb_invalidated_flag = 0;
122 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
124 /* find translated block using physical mappings */
125 phys_pc = get_phys_addr_code(env, pc);
126 phys_page1 = phys_pc & TARGET_PAGE_MASK;
127 phys_page2 = -1;
128 h = tb_phys_hash_func(phys_pc);
129 ptb1 = &tb_phys_hash[h];
130 for(;;) {
131 tb = *ptb1;
132 if (!tb)
133 goto not_found;
134 if (tb->pc == pc &&
135 tb->page_addr[0] == phys_page1 &&
136 tb->cs_base == cs_base &&
137 tb->flags == flags) {
138 /* check next page if needed */
139 if (tb->page_addr[1] != -1) {
140 virt_page2 = (pc & TARGET_PAGE_MASK) +
141 TARGET_PAGE_SIZE;
142 phys_page2 = get_phys_addr_code(env, virt_page2);
143 if (tb->page_addr[1] == phys_page2)
144 goto found;
145 } else {
146 goto found;
149 ptb1 = &tb->phys_hash_next;
151 not_found:
152 /* if no translated code available, then translate it now */
153 tb = tb_gen_code(env, pc, cs_base, flags, 0);
155 found:
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 return tb;
161 static inline TranslationBlock *tb_find_fast(void)
163 TranslationBlock *tb;
164 target_ulong cs_base, pc;
165 uint64_t flags;
167 /* we record a subset of the CPU state. It will
168 always be the same before a given translated block
169 is executed. */
170 #if defined(TARGET_I386)
171 flags = env->hflags;
172 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
173 cs_base = env->segs[R_CS].base;
174 pc = cs_base + env->eip;
175 #elif defined(TARGET_ARM)
176 flags = env->thumb | (env->vfp.vec_len << 1)
177 | (env->vfp.vec_stride << 4);
178 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
179 flags |= (1 << 6);
180 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
181 flags |= (1 << 7);
182 flags |= (env->condexec_bits << 8);
183 cs_base = 0;
184 pc = env->regs[15];
185 #elif defined(TARGET_SPARC)
186 #ifdef TARGET_SPARC64
187 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
188 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
189 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
190 #else
191 // FPU enable . Supervisor
192 flags = (env->psref << 4) | env->psrs;
193 #endif
194 cs_base = env->npc;
195 pc = env->pc;
196 #elif defined(TARGET_PPC)
197 flags = env->hflags;
198 cs_base = 0;
199 pc = env->nip;
200 #elif defined(TARGET_MIPS)
201 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
202 cs_base = 0;
203 pc = env->active_tc.PC;
204 #elif defined(TARGET_M68K)
205 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
206 | (env->sr & SR_S) /* Bit 13 */
207 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
208 cs_base = 0;
209 pc = env->pc;
210 #elif defined(TARGET_SH4)
211 flags = env->flags;
212 cs_base = 0;
213 pc = env->pc;
214 #elif defined(TARGET_ALPHA)
215 flags = env->ps;
216 cs_base = 0;
217 pc = env->pc;
218 #elif defined(TARGET_CRIS)
219 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
220 flags |= env->dslot;
221 cs_base = 0;
222 pc = env->pc;
223 #else
224 #error unsupported CPU
225 #endif
226 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
227 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
228 tb->flags != flags, 0)) {
229 tb = tb_find_slow(pc, cs_base, flags);
231 return tb;
234 /* main execution loop */
236 int cpu_exec(CPUState *env1)
238 #define DECLARE_HOST_REGS 1
239 #include "hostregs_helper.h"
240 int ret, interrupt_request;
241 TranslationBlock *tb;
242 uint8_t *tc_ptr;
243 unsigned long next_tb;
245 if (cpu_halted(env1) == EXCP_HALTED)
246 return EXCP_HALTED;
248 cpu_single_env = env1;
250 /* first we save global registers */
251 #define SAVE_HOST_REGS 1
252 #include "hostregs_helper.h"
253 env = env1;
255 env_to_regs();
256 #if defined(TARGET_I386)
257 /* put eflags in CPU temporary format */
258 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
259 DF = 1 - (2 * ((env->eflags >> 10) & 1));
260 CC_OP = CC_OP_EFLAGS;
261 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
262 #elif defined(TARGET_SPARC)
263 #elif defined(TARGET_M68K)
264 env->cc_op = CC_OP_FLAGS;
265 env->cc_dest = env->sr & 0xf;
266 env->cc_x = (env->sr >> 4) & 1;
267 #elif defined(TARGET_ALPHA)
268 #elif defined(TARGET_ARM)
269 #elif defined(TARGET_PPC)
270 #elif defined(TARGET_MIPS)
271 #elif defined(TARGET_SH4)
272 #elif defined(TARGET_CRIS)
273 /* XXXXX */
274 #else
275 #error unsupported target CPU
276 #endif
277 env->exception_index = -1;
279 /* prepare setjmp context for exception handling */
280 for(;;) {
281 if (setjmp(env->jmp_env) == 0) {
282 env->current_tb = NULL;
283 /* if an exception is pending, we execute it here */
284 if (env->exception_index >= 0) {
285 if (env->exception_index >= EXCP_INTERRUPT) {
286 /* exit request from the cpu execution loop */
287 ret = env->exception_index;
288 break;
289 } else if (env->user_mode_only) {
290 /* if user mode only, we simulate a fake exception
291 which will be handled outside the cpu execution
292 loop */
293 #if defined(TARGET_I386)
294 do_interrupt_user(env->exception_index,
295 env->exception_is_int,
296 env->error_code,
297 env->exception_next_eip);
298 /* successfully delivered */
299 env->old_exception = -1;
300 #endif
301 ret = env->exception_index;
302 break;
303 } else {
304 #if defined(TARGET_I386)
305 /* simulate a real cpu exception. On i386, it can
306 trigger new exceptions, but we do not handle
307 double or triple faults yet. */
308 do_interrupt(env->exception_index,
309 env->exception_is_int,
310 env->error_code,
311 env->exception_next_eip, 0);
312 /* successfully delivered */
313 env->old_exception = -1;
314 #elif defined(TARGET_PPC)
315 do_interrupt(env);
316 #elif defined(TARGET_MIPS)
317 do_interrupt(env);
318 #elif defined(TARGET_SPARC)
319 do_interrupt(env);
320 #elif defined(TARGET_ARM)
321 do_interrupt(env);
322 #elif defined(TARGET_SH4)
323 do_interrupt(env);
324 #elif defined(TARGET_ALPHA)
325 do_interrupt(env);
326 #elif defined(TARGET_CRIS)
327 do_interrupt(env);
328 #elif defined(TARGET_M68K)
329 do_interrupt(0);
330 #endif
332 env->exception_index = -1;
334 #ifdef USE_KQEMU
335 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
336 int ret;
337 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
338 ret = kqemu_cpu_exec(env);
339 /* put eflags in CPU temporary format */
340 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
341 DF = 1 - (2 * ((env->eflags >> 10) & 1));
342 CC_OP = CC_OP_EFLAGS;
343 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
344 if (ret == 1) {
345 /* exception */
346 longjmp(env->jmp_env, 1);
347 } else if (ret == 2) {
348 /* softmmu execution needed */
349 } else {
350 if (env->interrupt_request != 0) {
351 /* hardware interrupt will be executed just after */
352 } else {
353 /* otherwise, we restart */
354 longjmp(env->jmp_env, 1);
358 #endif
360 next_tb = 0; /* force lookup of first TB */
361 for(;;) {
362 interrupt_request = env->interrupt_request;
363 if (__builtin_expect(interrupt_request, 0) &&
364 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
365 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
366 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
367 env->exception_index = EXCP_DEBUG;
368 cpu_loop_exit();
370 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
371 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
372 if (interrupt_request & CPU_INTERRUPT_HALT) {
373 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
374 env->halted = 1;
375 env->exception_index = EXCP_HLT;
376 cpu_loop_exit();
378 #endif
379 #if defined(TARGET_I386)
380 if (env->hflags2 & HF2_GIF_MASK) {
381 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
382 !(env->hflags & HF_SMM_MASK)) {
383 svm_check_intercept(SVM_EXIT_SMI);
384 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
385 do_smm_enter();
386 next_tb = 0;
387 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
388 !(env->hflags2 & HF2_NMI_MASK)) {
389 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
390 env->hflags2 |= HF2_NMI_MASK;
391 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
392 next_tb = 0;
393 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
394 (((env->hflags2 & HF2_VINTR_MASK) &&
395 (env->hflags2 & HF2_HIF_MASK)) ||
396 (!(env->hflags2 & HF2_VINTR_MASK) &&
397 (env->eflags & IF_MASK &&
398 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
399 int intno;
400 svm_check_intercept(SVM_EXIT_INTR);
401 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
402 intno = cpu_get_pic_interrupt(env);
403 if (loglevel & CPU_LOG_TB_IN_ASM) {
404 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
406 do_interrupt(intno, 0, 0, 0, 1);
407 /* ensure that no TB jump will be modified as
408 the program flow was changed */
409 next_tb = 0;
410 #if !defined(CONFIG_USER_ONLY)
411 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
412 (env->eflags & IF_MASK) &&
413 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
414 int intno;
415 /* FIXME: this should respect TPR */
416 svm_check_intercept(SVM_EXIT_VINTR);
417 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
418 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
419 if (loglevel & CPU_LOG_TB_IN_ASM)
420 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
421 do_interrupt(intno, 0, 0, 0, 1);
422 next_tb = 0;
423 #endif
426 #elif defined(TARGET_PPC)
427 #if 0
428 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
429 cpu_ppc_reset(env);
431 #endif
432 if (interrupt_request & CPU_INTERRUPT_HARD) {
433 ppc_hw_interrupt(env);
434 if (env->pending_interrupts == 0)
435 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
436 next_tb = 0;
438 #elif defined(TARGET_MIPS)
439 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
440 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
441 (env->CP0_Status & (1 << CP0St_IE)) &&
442 !(env->CP0_Status & (1 << CP0St_EXL)) &&
443 !(env->CP0_Status & (1 << CP0St_ERL)) &&
444 !(env->hflags & MIPS_HFLAG_DM)) {
445 /* Raise it */
446 env->exception_index = EXCP_EXT_INTERRUPT;
447 env->error_code = 0;
448 do_interrupt(env);
449 next_tb = 0;
451 #elif defined(TARGET_SPARC)
452 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 (env->psret != 0)) {
454 int pil = env->interrupt_index & 15;
455 int type = env->interrupt_index & 0xf0;
457 if (((type == TT_EXTINT) &&
458 (pil == 15 || pil > env->psrpil)) ||
459 type != TT_EXTINT) {
460 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
461 env->exception_index = env->interrupt_index;
462 do_interrupt(env);
463 env->interrupt_index = 0;
464 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
465 cpu_check_irqs(env);
466 #endif
467 next_tb = 0;
469 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
470 //do_interrupt(0, 0, 0, 0, 0);
471 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
473 #elif defined(TARGET_ARM)
474 if (interrupt_request & CPU_INTERRUPT_FIQ
475 && !(env->uncached_cpsr & CPSR_F)) {
476 env->exception_index = EXCP_FIQ;
477 do_interrupt(env);
478 next_tb = 0;
480 /* ARMv7-M interrupt return works by loading a magic value
481 into the PC. On real hardware the load causes the
482 return to occur. The qemu implementation performs the
483 jump normally, then does the exception return when the
484 CPU tries to execute code at the magic address.
485 This will cause the magic PC value to be pushed to
486 the stack if an interrupt occured at the wrong time.
487 We avoid this by disabling interrupts when
488 pc contains a magic address. */
489 if (interrupt_request & CPU_INTERRUPT_HARD
490 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
491 || !(env->uncached_cpsr & CPSR_I))) {
492 env->exception_index = EXCP_IRQ;
493 do_interrupt(env);
494 next_tb = 0;
496 #elif defined(TARGET_SH4)
497 if (interrupt_request & CPU_INTERRUPT_HARD) {
498 do_interrupt(env);
499 next_tb = 0;
501 #elif defined(TARGET_ALPHA)
502 if (interrupt_request & CPU_INTERRUPT_HARD) {
503 do_interrupt(env);
504 next_tb = 0;
506 #elif defined(TARGET_CRIS)
507 if (interrupt_request & CPU_INTERRUPT_HARD
508 && (env->pregs[PR_CCS] & I_FLAG)) {
509 env->exception_index = EXCP_IRQ;
510 do_interrupt(env);
511 next_tb = 0;
513 if (interrupt_request & CPU_INTERRUPT_NMI
514 && (env->pregs[PR_CCS] & M_FLAG)) {
515 env->exception_index = EXCP_NMI;
516 do_interrupt(env);
517 next_tb = 0;
519 #elif defined(TARGET_M68K)
520 if (interrupt_request & CPU_INTERRUPT_HARD
521 && ((env->sr & SR_I) >> SR_I_SHIFT)
522 < env->pending_level) {
523 /* Real hardware gets the interrupt vector via an
524 IACK cycle at this point. Current emulated
525 hardware doesn't rely on this, so we
526 provide/save the vector when the interrupt is
527 first signalled. */
528 env->exception_index = env->pending_vector;
529 do_interrupt(1);
530 next_tb = 0;
532 #endif
533 /* Don't use the cached interupt_request value,
534 do_interrupt may have updated the EXITTB flag. */
535 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
536 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
537 /* ensure that no TB jump will be modified as
538 the program flow was changed */
539 next_tb = 0;
541 if (interrupt_request & CPU_INTERRUPT_EXIT) {
542 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
543 env->exception_index = EXCP_INTERRUPT;
544 cpu_loop_exit();
547 #ifdef DEBUG_EXEC
548 if ((loglevel & CPU_LOG_TB_CPU)) {
549 /* restore flags in standard format */
550 regs_to_env();
551 #if defined(TARGET_I386)
552 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
553 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
554 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
555 #elif defined(TARGET_ARM)
556 cpu_dump_state(env, logfile, fprintf, 0);
557 #elif defined(TARGET_SPARC)
558 cpu_dump_state(env, logfile, fprintf, 0);
559 #elif defined(TARGET_PPC)
560 cpu_dump_state(env, logfile, fprintf, 0);
561 #elif defined(TARGET_M68K)
562 cpu_m68k_flush_flags(env, env->cc_op);
563 env->cc_op = CC_OP_FLAGS;
564 env->sr = (env->sr & 0xffe0)
565 | env->cc_dest | (env->cc_x << 4);
566 cpu_dump_state(env, logfile, fprintf, 0);
567 #elif defined(TARGET_MIPS)
568 cpu_dump_state(env, logfile, fprintf, 0);
569 #elif defined(TARGET_SH4)
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_ALPHA)
572 cpu_dump_state(env, logfile, fprintf, 0);
573 #elif defined(TARGET_CRIS)
574 cpu_dump_state(env, logfile, fprintf, 0);
575 #else
576 #error unsupported target CPU
577 #endif
579 #endif
580 spin_lock(&tb_lock);
581 tb = tb_find_fast();
582 /* Note: we do it here to avoid a gcc bug on Mac OS X when
583 doing it in tb_find_slow */
584 if (tb_invalidated_flag) {
585 /* as some TB could have been invalidated because
586 of memory exceptions while generating the code, we
587 must recompute the hash index here */
588 next_tb = 0;
589 tb_invalidated_flag = 0;
591 #ifdef DEBUG_EXEC
592 if ((loglevel & CPU_LOG_EXEC)) {
593 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
594 (long)tb->tc_ptr, tb->pc,
595 lookup_symbol(tb->pc));
597 #endif
598 /* see if we can patch the calling TB. When the TB
599 spans two pages, we cannot safely do a direct
600 jump. */
602 if (next_tb != 0 &&
603 #ifdef USE_KQEMU
604 (env->kqemu_enabled != 2) &&
605 #endif
606 tb->page_addr[1] == -1) {
607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
610 spin_unlock(&tb_lock);
611 env->current_tb = tb;
612 while (env->current_tb) {
613 tc_ptr = tb->tc_ptr;
614 /* execute the generated code */
615 #if defined(__sparc__) && !defined(HOST_SOLARIS)
616 #undef env
617 env = cpu_single_env;
618 #define env cpu_single_env
619 #endif
620 next_tb = tcg_qemu_tb_exec(tc_ptr);
621 env->current_tb = NULL;
622 if ((next_tb & 3) == 2) {
623 /* Instruction counter expired. */
624 int insns_left;
625 tb = (TranslationBlock *)(long)(next_tb & ~3);
626 /* Restore PC. */
627 CPU_PC_FROM_TB(env, tb);
628 insns_left = env->icount_decr.u32;
629 if (env->icount_extra && insns_left >= 0) {
630 /* Refill decrementer and continue execution. */
631 env->icount_extra += insns_left;
632 if (env->icount_extra > 0xffff) {
633 insns_left = 0xffff;
634 } else {
635 insns_left = env->icount_extra;
637 env->icount_extra -= insns_left;
638 env->icount_decr.u16.low = insns_left;
639 } else {
640 if (insns_left > 0) {
641 /* Execute remaining instructions. */
642 cpu_exec_nocache(insns_left, tb);
644 env->exception_index = EXCP_INTERRUPT;
645 next_tb = 0;
646 cpu_loop_exit();
650 /* reset soft MMU for next block (it can currently
651 only be set by a memory fault) */
652 #if defined(USE_KQEMU)
653 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
654 if (kqemu_is_ok(env) &&
655 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
656 cpu_loop_exit();
658 #endif
659 } /* for(;;) */
660 } else {
661 env_to_regs();
663 } /* for(;;) */
666 #if defined(TARGET_I386)
667 /* restore flags in standard format */
668 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
669 #elif defined(TARGET_ARM)
670 /* XXX: Save/restore host fpu exception state?. */
671 #elif defined(TARGET_SPARC)
672 #elif defined(TARGET_PPC)
673 #elif defined(TARGET_M68K)
674 cpu_m68k_flush_flags(env, env->cc_op);
675 env->cc_op = CC_OP_FLAGS;
676 env->sr = (env->sr & 0xffe0)
677 | env->cc_dest | (env->cc_x << 4);
678 #elif defined(TARGET_MIPS)
679 #elif defined(TARGET_SH4)
680 #elif defined(TARGET_ALPHA)
681 #elif defined(TARGET_CRIS)
682 /* XXXXX */
683 #else
684 #error unsupported target CPU
685 #endif
687 /* restore global registers */
688 #include "hostregs_helper.h"
690 /* fail safe : never use cpu_single_env outside cpu_exec() */
691 cpu_single_env = NULL;
692 return ret;
695 /* must only be called from the generated code as an exception can be
696 generated */
697 void tb_invalidate_page_range(target_ulong start, target_ulong end)
699 /* XXX: cannot enable it yet because it yields to MMU exception
700 where NIP != read address on PowerPC */
701 #if 0
702 target_ulong phys_addr;
703 phys_addr = get_phys_addr_code(env, start);
704 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
705 #endif
708 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
710 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
712 CPUX86State *saved_env;
714 saved_env = env;
715 env = s;
716 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
717 selector &= 0xffff;
718 cpu_x86_load_seg_cache(env, seg_reg, selector,
719 (selector << 4), 0xffff, 0);
720 } else {
721 helper_load_seg(seg_reg, selector);
723 env = saved_env;
726 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
728 CPUX86State *saved_env;
730 saved_env = env;
731 env = s;
733 helper_fsave(ptr, data32);
735 env = saved_env;
738 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
740 CPUX86State *saved_env;
742 saved_env = env;
743 env = s;
745 helper_frstor(ptr, data32);
747 env = saved_env;
750 #endif /* TARGET_I386 */
752 #if !defined(CONFIG_SOFTMMU)
754 #if defined(TARGET_I386)
756 /* 'pc' is the host PC at which the exception was raised. 'address' is
757 the effective address of the memory exception. 'is_write' is 1 if a
758 write caused the exception and otherwise 0'. 'old_set' is the
759 signal set which should be restored */
760 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
761 int is_write, sigset_t *old_set,
762 void *puc)
764 TranslationBlock *tb;
765 int ret;
767 if (cpu_single_env)
768 env = cpu_single_env; /* XXX: find a correct solution for multithread */
769 #if defined(DEBUG_SIGNAL)
770 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
771 pc, address, is_write, *(unsigned long *)old_set);
772 #endif
773 /* XXX: locking issue */
774 if (is_write && page_unprotect(h2g(address), pc, puc)) {
775 return 1;
778 /* see if it is an MMU fault */
779 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
780 if (ret < 0)
781 return 0; /* not an MMU fault */
782 if (ret == 0)
783 return 1; /* the MMU fault was handled without causing real CPU fault */
784 /* now we have a real cpu fault */
785 tb = tb_find_pc(pc);
786 if (tb) {
787 /* the PC is inside the translated code. It means that we have
788 a virtual CPU fault */
789 cpu_restore_state(tb, env, pc, puc);
791 if (ret == 1) {
792 #if 0
793 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
794 env->eip, env->cr[2], env->error_code);
795 #endif
796 /* we restore the process signal mask as the sigreturn should
797 do it (XXX: use sigsetjmp) */
798 sigprocmask(SIG_SETMASK, old_set, NULL);
799 raise_exception_err(env->exception_index, env->error_code);
800 } else {
801 /* activate soft MMU for this block */
802 env->hflags |= HF_SOFTMMU_MASK;
803 cpu_resume_from_signal(env, puc);
805 /* never comes here */
806 return 1;
809 #elif defined(TARGET_ARM)
810 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
811 int is_write, sigset_t *old_set,
812 void *puc)
814 TranslationBlock *tb;
815 int ret;
817 if (cpu_single_env)
818 env = cpu_single_env; /* XXX: find a correct solution for multithread */
819 #if defined(DEBUG_SIGNAL)
820 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
821 pc, address, is_write, *(unsigned long *)old_set);
822 #endif
823 /* XXX: locking issue */
824 if (is_write && page_unprotect(h2g(address), pc, puc)) {
825 return 1;
827 /* see if it is an MMU fault */
828 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
829 if (ret < 0)
830 return 0; /* not an MMU fault */
831 if (ret == 0)
832 return 1; /* the MMU fault was handled without causing real CPU fault */
833 /* now we have a real cpu fault */
834 tb = tb_find_pc(pc);
835 if (tb) {
836 /* the PC is inside the translated code. It means that we have
837 a virtual CPU fault */
838 cpu_restore_state(tb, env, pc, puc);
840 /* we restore the process signal mask as the sigreturn should
841 do it (XXX: use sigsetjmp) */
842 sigprocmask(SIG_SETMASK, old_set, NULL);
843 cpu_loop_exit();
844 /* never comes here */
845 return 1;
847 #elif defined(TARGET_SPARC)
848 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
849 int is_write, sigset_t *old_set,
850 void *puc)
852 TranslationBlock *tb;
853 int ret;
855 if (cpu_single_env)
856 env = cpu_single_env; /* XXX: find a correct solution for multithread */
857 #if defined(DEBUG_SIGNAL)
858 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
859 pc, address, is_write, *(unsigned long *)old_set);
860 #endif
861 /* XXX: locking issue */
862 if (is_write && page_unprotect(h2g(address), pc, puc)) {
863 return 1;
865 /* see if it is an MMU fault */
866 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
867 if (ret < 0)
868 return 0; /* not an MMU fault */
869 if (ret == 0)
870 return 1; /* the MMU fault was handled without causing real CPU fault */
871 /* now we have a real cpu fault */
872 tb = tb_find_pc(pc);
873 if (tb) {
874 /* the PC is inside the translated code. It means that we have
875 a virtual CPU fault */
876 cpu_restore_state(tb, env, pc, puc);
878 /* we restore the process signal mask as the sigreturn should
879 do it (XXX: use sigsetjmp) */
880 sigprocmask(SIG_SETMASK, old_set, NULL);
881 cpu_loop_exit();
882 /* never comes here */
883 return 1;
885 #elif defined (TARGET_PPC)
886 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
887 int is_write, sigset_t *old_set,
888 void *puc)
890 TranslationBlock *tb;
891 int ret;
893 if (cpu_single_env)
894 env = cpu_single_env; /* XXX: find a correct solution for multithread */
895 #if defined(DEBUG_SIGNAL)
896 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
897 pc, address, is_write, *(unsigned long *)old_set);
898 #endif
899 /* XXX: locking issue */
900 if (is_write && page_unprotect(h2g(address), pc, puc)) {
901 return 1;
904 /* see if it is an MMU fault */
905 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
906 if (ret < 0)
907 return 0; /* not an MMU fault */
908 if (ret == 0)
909 return 1; /* the MMU fault was handled without causing real CPU fault */
911 /* now we have a real cpu fault */
912 tb = tb_find_pc(pc);
913 if (tb) {
914 /* the PC is inside the translated code. It means that we have
915 a virtual CPU fault */
916 cpu_restore_state(tb, env, pc, puc);
918 if (ret == 1) {
919 #if 0
920 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
921 env->nip, env->error_code, tb);
922 #endif
923 /* we restore the process signal mask as the sigreturn should
924 do it (XXX: use sigsetjmp) */
925 sigprocmask(SIG_SETMASK, old_set, NULL);
926 do_raise_exception_err(env->exception_index, env->error_code);
927 } else {
928 /* activate soft MMU for this block */
929 cpu_resume_from_signal(env, puc);
931 /* never comes here */
932 return 1;
935 #elif defined(TARGET_M68K)
936 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
937 int is_write, sigset_t *old_set,
938 void *puc)
940 TranslationBlock *tb;
941 int ret;
943 if (cpu_single_env)
944 env = cpu_single_env; /* XXX: find a correct solution for multithread */
945 #if defined(DEBUG_SIGNAL)
946 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
947 pc, address, is_write, *(unsigned long *)old_set);
948 #endif
949 /* XXX: locking issue */
950 if (is_write && page_unprotect(address, pc, puc)) {
951 return 1;
953 /* see if it is an MMU fault */
954 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
955 if (ret < 0)
956 return 0; /* not an MMU fault */
957 if (ret == 0)
958 return 1; /* the MMU fault was handled without causing real CPU fault */
959 /* now we have a real cpu fault */
960 tb = tb_find_pc(pc);
961 if (tb) {
962 /* the PC is inside the translated code. It means that we have
963 a virtual CPU fault */
964 cpu_restore_state(tb, env, pc, puc);
966 /* we restore the process signal mask as the sigreturn should
967 do it (XXX: use sigsetjmp) */
968 sigprocmask(SIG_SETMASK, old_set, NULL);
969 cpu_loop_exit();
970 /* never comes here */
971 return 1;
974 #elif defined (TARGET_MIPS)
975 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
976 int is_write, sigset_t *old_set,
977 void *puc)
979 TranslationBlock *tb;
980 int ret;
982 if (cpu_single_env)
983 env = cpu_single_env; /* XXX: find a correct solution for multithread */
984 #if defined(DEBUG_SIGNAL)
985 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
986 pc, address, is_write, *(unsigned long *)old_set);
987 #endif
988 /* XXX: locking issue */
989 if (is_write && page_unprotect(h2g(address), pc, puc)) {
990 return 1;
993 /* see if it is an MMU fault */
994 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
995 if (ret < 0)
996 return 0; /* not an MMU fault */
997 if (ret == 0)
998 return 1; /* the MMU fault was handled without causing real CPU fault */
1000 /* now we have a real cpu fault */
1001 tb = tb_find_pc(pc);
1002 if (tb) {
1003 /* the PC is inside the translated code. It means that we have
1004 a virtual CPU fault */
1005 cpu_restore_state(tb, env, pc, puc);
1007 if (ret == 1) {
1008 #if 0
1009 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1010 env->PC, env->error_code, tb);
1011 #endif
1012 /* we restore the process signal mask as the sigreturn should
1013 do it (XXX: use sigsetjmp) */
1014 sigprocmask(SIG_SETMASK, old_set, NULL);
1015 do_raise_exception_err(env->exception_index, env->error_code);
1016 } else {
1017 /* activate soft MMU for this block */
1018 cpu_resume_from_signal(env, puc);
1020 /* never comes here */
1021 return 1;
1024 #elif defined (TARGET_SH4)
1025 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1026 int is_write, sigset_t *old_set,
1027 void *puc)
1029 TranslationBlock *tb;
1030 int ret;
1032 if (cpu_single_env)
1033 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1034 #if defined(DEBUG_SIGNAL)
1035 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1036 pc, address, is_write, *(unsigned long *)old_set);
1037 #endif
1038 /* XXX: locking issue */
1039 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1040 return 1;
1043 /* see if it is an MMU fault */
1044 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1045 if (ret < 0)
1046 return 0; /* not an MMU fault */
1047 if (ret == 0)
1048 return 1; /* the MMU fault was handled without causing real CPU fault */
1050 /* now we have a real cpu fault */
1051 tb = tb_find_pc(pc);
1052 if (tb) {
1053 /* the PC is inside the translated code. It means that we have
1054 a virtual CPU fault */
1055 cpu_restore_state(tb, env, pc, puc);
1057 #if 0
1058 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1059 env->nip, env->error_code, tb);
1060 #endif
1061 /* we restore the process signal mask as the sigreturn should
1062 do it (XXX: use sigsetjmp) */
1063 sigprocmask(SIG_SETMASK, old_set, NULL);
1064 cpu_loop_exit();
1065 /* never comes here */
1066 return 1;
1069 #elif defined (TARGET_ALPHA)
1070 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1071 int is_write, sigset_t *old_set,
1072 void *puc)
1074 TranslationBlock *tb;
1075 int ret;
1077 if (cpu_single_env)
1078 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1079 #if defined(DEBUG_SIGNAL)
1080 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1081 pc, address, is_write, *(unsigned long *)old_set);
1082 #endif
1083 /* XXX: locking issue */
1084 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1085 return 1;
1088 /* see if it is an MMU fault */
1089 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1090 if (ret < 0)
1091 return 0; /* not an MMU fault */
1092 if (ret == 0)
1093 return 1; /* the MMU fault was handled without causing real CPU fault */
1095 /* now we have a real cpu fault */
1096 tb = tb_find_pc(pc);
1097 if (tb) {
1098 /* the PC is inside the translated code. It means that we have
1099 a virtual CPU fault */
1100 cpu_restore_state(tb, env, pc, puc);
1102 #if 0
1103 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1104 env->nip, env->error_code, tb);
1105 #endif
1106 /* we restore the process signal mask as the sigreturn should
1107 do it (XXX: use sigsetjmp) */
1108 sigprocmask(SIG_SETMASK, old_set, NULL);
1109 cpu_loop_exit();
1110 /* never comes here */
1111 return 1;
1113 #elif defined (TARGET_CRIS)
1114 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1115 int is_write, sigset_t *old_set,
1116 void *puc)
1118 TranslationBlock *tb;
1119 int ret;
1121 if (cpu_single_env)
1122 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1123 #if defined(DEBUG_SIGNAL)
1124 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1125 pc, address, is_write, *(unsigned long *)old_set);
1126 #endif
1127 /* XXX: locking issue */
1128 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1129 return 1;
1132 /* see if it is an MMU fault */
1133 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1134 if (ret < 0)
1135 return 0; /* not an MMU fault */
1136 if (ret == 0)
1137 return 1; /* the MMU fault was handled without causing real CPU fault */
1139 /* now we have a real cpu fault */
1140 tb = tb_find_pc(pc);
1141 if (tb) {
1142 /* the PC is inside the translated code. It means that we have
1143 a virtual CPU fault */
1144 cpu_restore_state(tb, env, pc, puc);
1146 /* we restore the process signal mask as the sigreturn should
1147 do it (XXX: use sigsetjmp) */
1148 sigprocmask(SIG_SETMASK, old_set, NULL);
1149 cpu_loop_exit();
1150 /* never comes here */
1151 return 1;
1154 #else
1155 #error unsupported target CPU
1156 #endif
1158 #if defined(__i386__)
1160 #if defined(__APPLE__)
1161 # include <sys/ucontext.h>
1163 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1164 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1165 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1166 #else
1167 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1168 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1169 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1170 #endif
1172 int cpu_signal_handler(int host_signum, void *pinfo,
1173 void *puc)
1175 siginfo_t *info = pinfo;
1176 struct ucontext *uc = puc;
1177 unsigned long pc;
1178 int trapno;
1180 #ifndef REG_EIP
1181 /* for glibc 2.1 */
1182 #define REG_EIP EIP
1183 #define REG_ERR ERR
1184 #define REG_TRAPNO TRAPNO
1185 #endif
1186 pc = EIP_sig(uc);
1187 trapno = TRAP_sig(uc);
1188 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1189 trapno == 0xe ?
1190 (ERROR_sig(uc) >> 1) & 1 : 0,
1191 &uc->uc_sigmask, puc);
1194 #elif defined(__x86_64__)
1196 int cpu_signal_handler(int host_signum, void *pinfo,
1197 void *puc)
1199 siginfo_t *info = pinfo;
1200 struct ucontext *uc = puc;
1201 unsigned long pc;
1203 pc = uc->uc_mcontext.gregs[REG_RIP];
1204 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1205 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1206 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1207 &uc->uc_sigmask, puc);
1210 #elif defined(__powerpc__)
1212 /***********************************************************************
1213 * signal context platform-specific definitions
1214 * From Wine
1216 #ifdef linux
1217 /* All Registers access - only for local access */
1218 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1219 /* Gpr Registers access */
1220 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1221 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1222 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1223 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1224 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1225 # define LR_sig(context) REG_sig(link, context) /* Link register */
1226 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1227 /* Float Registers access */
1228 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1229 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1230 /* Exception Registers access */
1231 # define DAR_sig(context) REG_sig(dar, context)
1232 # define DSISR_sig(context) REG_sig(dsisr, context)
1233 # define TRAP_sig(context) REG_sig(trap, context)
1234 #endif /* linux */
1236 #ifdef __APPLE__
1237 # include <sys/ucontext.h>
1238 typedef struct ucontext SIGCONTEXT;
1239 /* All Registers access - only for local access */
1240 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1241 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1242 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1243 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1244 /* Gpr Registers access */
1245 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1246 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1247 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1248 # define CTR_sig(context) REG_sig(ctr, context)
1249 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1250 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1251 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1252 /* Float Registers access */
1253 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1254 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1255 /* Exception Registers access */
1256 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1257 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1258 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1259 #endif /* __APPLE__ */
1261 int cpu_signal_handler(int host_signum, void *pinfo,
1262 void *puc)
1264 siginfo_t *info = pinfo;
1265 struct ucontext *uc = puc;
1266 unsigned long pc;
1267 int is_write;
1269 pc = IAR_sig(uc);
1270 is_write = 0;
1271 #if 0
1272 /* ppc 4xx case */
1273 if (DSISR_sig(uc) & 0x00800000)
1274 is_write = 1;
1275 #else
1276 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1277 is_write = 1;
1278 #endif
1279 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1280 is_write, &uc->uc_sigmask, puc);
1283 #elif defined(__alpha__)
1285 int cpu_signal_handler(int host_signum, void *pinfo,
1286 void *puc)
1288 siginfo_t *info = pinfo;
1289 struct ucontext *uc = puc;
1290 uint32_t *pc = uc->uc_mcontext.sc_pc;
1291 uint32_t insn = *pc;
1292 int is_write = 0;
1294 /* XXX: need kernel patch to get write flag faster */
1295 switch (insn >> 26) {
1296 case 0x0d: // stw
1297 case 0x0e: // stb
1298 case 0x0f: // stq_u
1299 case 0x24: // stf
1300 case 0x25: // stg
1301 case 0x26: // sts
1302 case 0x27: // stt
1303 case 0x2c: // stl
1304 case 0x2d: // stq
1305 case 0x2e: // stl_c
1306 case 0x2f: // stq_c
1307 is_write = 1;
1310 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1311 is_write, &uc->uc_sigmask, puc);
1313 #elif defined(__sparc__)
1315 int cpu_signal_handler(int host_signum, void *pinfo,
1316 void *puc)
1318 siginfo_t *info = pinfo;
1319 int is_write;
1320 uint32_t insn;
1321 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1322 uint32_t *regs = (uint32_t *)(info + 1);
1323 void *sigmask = (regs + 20);
1324 /* XXX: is there a standard glibc define ? */
1325 unsigned long pc = regs[1];
1326 #else
1327 struct sigcontext *sc = puc;
1328 unsigned long pc = sc->sigc_regs.tpc;
1329 void *sigmask = (void *)sc->sigc_mask;
1330 #endif
1332 /* XXX: need kernel patch to get write flag faster */
1333 is_write = 0;
1334 insn = *(uint32_t *)pc;
1335 if ((insn >> 30) == 3) {
1336 switch((insn >> 19) & 0x3f) {
1337 case 0x05: // stb
1338 case 0x06: // sth
1339 case 0x04: // st
1340 case 0x07: // std
1341 case 0x24: // stf
1342 case 0x27: // stdf
1343 case 0x25: // stfsr
1344 is_write = 1;
1345 break;
1348 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1349 is_write, sigmask, NULL);
1352 #elif defined(__arm__)
1354 int cpu_signal_handler(int host_signum, void *pinfo,
1355 void *puc)
1357 siginfo_t *info = pinfo;
1358 struct ucontext *uc = puc;
1359 unsigned long pc;
1360 int is_write;
1362 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1363 pc = uc->uc_mcontext.gregs[R15];
1364 #else
1365 pc = uc->uc_mcontext.arm_pc;
1366 #endif
1367 /* XXX: compute is_write */
1368 is_write = 0;
1369 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1370 is_write,
1371 &uc->uc_sigmask, puc);
1374 #elif defined(__mc68000)
1376 int cpu_signal_handler(int host_signum, void *pinfo,
1377 void *puc)
1379 siginfo_t *info = pinfo;
1380 struct ucontext *uc = puc;
1381 unsigned long pc;
1382 int is_write;
1384 pc = uc->uc_mcontext.gregs[16];
1385 /* XXX: compute is_write */
1386 is_write = 0;
1387 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1388 is_write,
1389 &uc->uc_sigmask, puc);
1392 #elif defined(__ia64)
1394 #ifndef __ISR_VALID
1395 /* This ought to be in <bits/siginfo.h>... */
1396 # define __ISR_VALID 1
1397 #endif
1399 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1401 siginfo_t *info = pinfo;
1402 struct ucontext *uc = puc;
1403 unsigned long ip;
1404 int is_write = 0;
1406 ip = uc->uc_mcontext.sc_ip;
1407 switch (host_signum) {
1408 case SIGILL:
1409 case SIGFPE:
1410 case SIGSEGV:
1411 case SIGBUS:
1412 case SIGTRAP:
1413 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1414 /* ISR.W (write-access) is bit 33: */
1415 is_write = (info->si_isr >> 33) & 1;
1416 break;
1418 default:
1419 break;
1421 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1422 is_write,
1423 &uc->uc_sigmask, puc);
1426 #elif defined(__s390__)
1428 int cpu_signal_handler(int host_signum, void *pinfo,
1429 void *puc)
1431 siginfo_t *info = pinfo;
1432 struct ucontext *uc = puc;
1433 unsigned long pc;
1434 int is_write;
1436 pc = uc->uc_mcontext.psw.addr;
1437 /* XXX: compute is_write */
1438 is_write = 0;
1439 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1440 is_write, &uc->uc_sigmask, puc);
1443 #elif defined(__mips__)
1445 int cpu_signal_handler(int host_signum, void *pinfo,
1446 void *puc)
1448 siginfo_t *info = pinfo;
1449 struct ucontext *uc = puc;
1450 greg_t pc = uc->uc_mcontext.pc;
1451 int is_write;
1453 /* XXX: compute is_write */
1454 is_write = 0;
1455 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1456 is_write, &uc->uc_sigmask, puc);
1459 #elif defined(__hppa__)
1461 int cpu_signal_handler(int host_signum, void *pinfo,
1462 void *puc)
1464 struct siginfo *info = pinfo;
1465 struct ucontext *uc = puc;
1466 unsigned long pc;
1467 int is_write;
1469 pc = uc->uc_mcontext.sc_iaoq[0];
1470 /* FIXME: compute is_write */
1471 is_write = 0;
1472 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1473 is_write,
1474 &uc->uc_sigmask, puc);
1477 #else
1479 #error host CPU specific signal handler needed
1481 #endif
1483 #endif /* !defined(CONFIG_SOFTMMU) */