Add limited support for the etrax ethernet controller.
[qemu/qemu-JZ.git] / cpu-exec.c
blob8d70c89a9cf7426e5f4e1db7905553f87826f778
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 int tb_invalidated_flag;
41 static unsigned long next_tb;
43 //#define DEBUG_EXEC
44 //#define DEBUG_SIGNAL
46 #define SAVE_GLOBALS()
47 #define RESTORE_GLOBALS()
49 #if defined(__sparc__) && !defined(HOST_SOLARIS)
50 #include <features.h>
51 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
52 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
53 // Work around ugly bugs in glibc that mangle global register contents
55 static volatile void *saved_env;
56 static volatile unsigned long saved_t0, saved_i7;
57 #undef SAVE_GLOBALS
58 #define SAVE_GLOBALS() do { \
59 saved_env = env; \
60 saved_t0 = T0; \
61 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
62 } while(0)
64 #undef RESTORE_GLOBALS
65 #define RESTORE_GLOBALS() do { \
66 env = (void *)saved_env; \
67 T0 = saved_t0; \
68 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
69 } while(0)
71 static int sparc_setjmp(jmp_buf buf)
73 int ret;
75 SAVE_GLOBALS();
76 ret = setjmp(buf);
77 RESTORE_GLOBALS();
78 return ret;
80 #undef setjmp
81 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
83 static void sparc_longjmp(jmp_buf buf, int val)
85 SAVE_GLOBALS();
86 longjmp(buf, val);
88 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
89 #endif
90 #endif
92 void cpu_loop_exit(void)
94 /* NOTE: the register at this point must be saved by hand because
95 longjmp restore them */
96 regs_to_env();
97 longjmp(env->jmp_env, 1);
100 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
101 #define reg_T2
102 #endif
104 /* exit the current TB from a signal handler. The host registers are
105 restored in a state compatible with the CPU emulator
107 void cpu_resume_from_signal(CPUState *env1, void *puc)
109 #if !defined(CONFIG_SOFTMMU)
110 struct ucontext *uc = puc;
111 #endif
113 env = env1;
115 /* XXX: restore cpu registers saved in host registers */
117 #if !defined(CONFIG_SOFTMMU)
118 if (puc) {
119 /* XXX: use siglongjmp ? */
120 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
122 #endif
123 longjmp(env->jmp_env, 1);
126 static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
128 uint64_t flags)
130 TranslationBlock *tb, **ptb1;
131 int code_gen_size;
132 unsigned int h;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
134 uint8_t *tc_ptr;
136 spin_lock(&tb_lock);
138 tb_invalidated_flag = 0;
140 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
142 /* find translated block using physical mappings */
143 phys_pc = get_phys_addr_code(env, pc);
144 phys_page1 = phys_pc & TARGET_PAGE_MASK;
145 phys_page2 = -1;
146 h = tb_phys_hash_func(phys_pc);
147 ptb1 = &tb_phys_hash[h];
148 for(;;) {
149 tb = *ptb1;
150 if (!tb)
151 goto not_found;
152 if (tb->pc == pc &&
153 tb->page_addr[0] == phys_page1 &&
154 tb->cs_base == cs_base &&
155 tb->flags == flags) {
156 /* check next page if needed */
157 if (tb->page_addr[1] != -1) {
158 virt_page2 = (pc & TARGET_PAGE_MASK) +
159 TARGET_PAGE_SIZE;
160 phys_page2 = get_phys_addr_code(env, virt_page2);
161 if (tb->page_addr[1] == phys_page2)
162 goto found;
163 } else {
164 goto found;
167 ptb1 = &tb->phys_hash_next;
169 not_found:
170 /* if no translated code available, then translate it now */
171 tb = tb_alloc(pc);
172 if (!tb) {
173 /* flush must be done */
174 tb_flush(env);
175 /* cannot fail at this point */
176 tb = tb_alloc(pc);
177 /* don't forget to invalidate previous TB info */
178 tb_invalidated_flag = 1;
180 tc_ptr = code_gen_ptr;
181 tb->tc_ptr = tc_ptr;
182 tb->cs_base = cs_base;
183 tb->flags = flags;
184 SAVE_GLOBALS();
185 cpu_gen_code(env, tb, &code_gen_size);
186 RESTORE_GLOBALS();
187 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
189 /* check next page if needed */
190 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
191 phys_page2 = -1;
192 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
193 phys_page2 = get_phys_addr_code(env, virt_page2);
195 tb_link_phys(tb, phys_pc, phys_page2);
197 found:
198 /* we add the TB in the virtual pc hash table */
199 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
200 spin_unlock(&tb_lock);
201 return tb;
204 static inline TranslationBlock *tb_find_fast(void)
206 TranslationBlock *tb;
207 target_ulong cs_base, pc;
208 uint64_t flags;
210 /* we record a subset of the CPU state. It will
211 always be the same before a given translated block
212 is executed. */
213 #if defined(TARGET_I386)
214 flags = env->hflags;
215 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
216 flags |= env->intercept;
217 cs_base = env->segs[R_CS].base;
218 pc = cs_base + env->eip;
219 #elif defined(TARGET_ARM)
220 flags = env->thumb | (env->vfp.vec_len << 1)
221 | (env->vfp.vec_stride << 4);
222 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
223 flags |= (1 << 6);
224 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
225 flags |= (1 << 7);
226 flags |= (env->condexec_bits << 8);
227 cs_base = 0;
228 pc = env->regs[15];
229 #elif defined(TARGET_SPARC)
230 #ifdef TARGET_SPARC64
231 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
232 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
233 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
234 #else
235 // FPU enable . Supervisor
236 flags = (env->psref << 4) | env->psrs;
237 #endif
238 cs_base = env->npc;
239 pc = env->pc;
240 #elif defined(TARGET_PPC)
241 flags = env->hflags;
242 cs_base = 0;
243 pc = env->nip;
244 #elif defined(TARGET_MIPS)
245 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
246 cs_base = 0;
247 pc = env->PC[env->current_tc];
248 #elif defined(TARGET_M68K)
249 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
250 | (env->sr & SR_S) /* Bit 13 */
251 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
252 cs_base = 0;
253 pc = env->pc;
254 #elif defined(TARGET_SH4)
255 flags = env->flags;
256 cs_base = 0;
257 pc = env->pc;
258 #elif defined(TARGET_ALPHA)
259 flags = env->ps;
260 cs_base = 0;
261 pc = env->pc;
262 #elif defined(TARGET_CRIS)
263 flags = env->pregs[PR_CCS] & U_FLAG;
264 cs_base = 0;
265 pc = env->pc;
266 #else
267 #error unsupported CPU
268 #endif
269 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
270 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
271 tb->flags != flags, 0)) {
272 tb = tb_find_slow(pc, cs_base, flags);
273 /* Note: we do it here to avoid a gcc bug on Mac OS X when
274 doing it in tb_find_slow */
275 if (tb_invalidated_flag) {
276 /* as some TB could have been invalidated because
277 of memory exceptions while generating the code, we
278 must recompute the hash index here */
279 next_tb = 0;
282 return tb;
285 /* main execution loop */
287 int cpu_exec(CPUState *env1)
289 #define DECLARE_HOST_REGS 1
290 #include "hostregs_helper.h"
291 #if defined(TARGET_SPARC)
292 #if defined(reg_REGWPTR)
293 uint32_t *saved_regwptr;
294 #endif
295 #endif
296 int ret, interrupt_request;
297 TranslationBlock *tb;
298 uint8_t *tc_ptr;
300 if (cpu_halted(env1) == EXCP_HALTED)
301 return EXCP_HALTED;
303 cpu_single_env = env1;
305 /* first we save global registers */
306 #define SAVE_HOST_REGS 1
307 #include "hostregs_helper.h"
308 env = env1;
309 SAVE_GLOBALS();
311 env_to_regs();
312 #if defined(TARGET_I386)
313 /* put eflags in CPU temporary format */
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
316 CC_OP = CC_OP_EFLAGS;
317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318 #elif defined(TARGET_SPARC)
319 #if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
321 #endif
322 #elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
326 #elif defined(TARGET_ALPHA)
327 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_PPC)
329 #elif defined(TARGET_MIPS)
330 #elif defined(TARGET_SH4)
331 #elif defined(TARGET_CRIS)
332 /* XXXXX */
333 #else
334 #error unsupported target CPU
335 #endif
336 env->exception_index = -1;
338 /* prepare setjmp context for exception handling */
339 for(;;) {
340 if (setjmp(env->jmp_env) == 0) {
341 env->current_tb = NULL;
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
347 break;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
350 which will be handled outside the cpu execution
351 loop */
352 #if defined(TARGET_I386)
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
355 env->error_code,
356 env->exception_next_eip);
357 #endif
358 ret = env->exception_index;
359 break;
360 } else {
361 #if defined(TARGET_I386)
362 /* simulate a real cpu exception. On i386, it can
363 trigger new exceptions, but we do not handle
364 double or triple faults yet. */
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
367 env->error_code,
368 env->exception_next_eip, 0);
369 /* successfully delivered */
370 env->old_exception = -1;
371 #elif defined(TARGET_PPC)
372 do_interrupt(env);
373 #elif defined(TARGET_MIPS)
374 do_interrupt(env);
375 #elif defined(TARGET_SPARC)
376 do_interrupt(env->exception_index);
377 #elif defined(TARGET_ARM)
378 do_interrupt(env);
379 #elif defined(TARGET_SH4)
380 do_interrupt(env);
381 #elif defined(TARGET_ALPHA)
382 do_interrupt(env);
383 #elif defined(TARGET_CRIS)
384 do_interrupt(env);
385 #elif defined(TARGET_M68K)
386 do_interrupt(0);
387 #endif
389 env->exception_index = -1;
391 #ifdef USE_KQEMU
392 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
393 int ret;
394 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395 ret = kqemu_cpu_exec(env);
396 /* put eflags in CPU temporary format */
397 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398 DF = 1 - (2 * ((env->eflags >> 10) & 1));
399 CC_OP = CC_OP_EFLAGS;
400 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
401 if (ret == 1) {
402 /* exception */
403 longjmp(env->jmp_env, 1);
404 } else if (ret == 2) {
405 /* softmmu execution needed */
406 } else {
407 if (env->interrupt_request != 0) {
408 /* hardware interrupt will be executed just after */
409 } else {
410 /* otherwise, we restart */
411 longjmp(env->jmp_env, 1);
415 #endif
417 next_tb = 0; /* force lookup of first TB */
418 for(;;) {
419 SAVE_GLOBALS();
420 interrupt_request = env->interrupt_request;
421 if (__builtin_expect(interrupt_request, 0)
422 #if defined(TARGET_I386)
423 && env->hflags & HF_GIF_MASK
424 #endif
425 && !(env->singlestep_enabled & SSTEP_NOIRQ)) {
426 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428 env->exception_index = EXCP_DEBUG;
429 cpu_loop_exit();
431 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
432 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
433 if (interrupt_request & CPU_INTERRUPT_HALT) {
434 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
435 env->halted = 1;
436 env->exception_index = EXCP_HLT;
437 cpu_loop_exit();
439 #endif
440 #if defined(TARGET_I386)
441 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442 !(env->hflags & HF_SMM_MASK)) {
443 svm_check_intercept(SVM_EXIT_SMI);
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
445 do_smm_enter();
446 next_tb = 0;
447 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
448 !(env->hflags & HF_NMI_MASK)) {
449 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
450 env->hflags |= HF_NMI_MASK;
451 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
452 next_tb = 0;
453 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
455 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
456 int intno;
457 svm_check_intercept(SVM_EXIT_INTR);
458 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
459 intno = cpu_get_pic_interrupt(env);
460 if (loglevel & CPU_LOG_TB_IN_ASM) {
461 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
463 do_interrupt(intno, 0, 0, 0, 1);
464 /* ensure that no TB jump will be modified as
465 the program flow was changed */
466 next_tb = 0;
467 #if !defined(CONFIG_USER_ONLY)
468 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
469 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
470 int intno;
471 /* FIXME: this should respect TPR */
472 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
473 svm_check_intercept(SVM_EXIT_VINTR);
474 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
475 if (loglevel & CPU_LOG_TB_IN_ASM)
476 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
477 do_interrupt(intno, 0, 0, -1, 1);
478 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
479 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
480 next_tb = 0;
481 #endif
483 #elif defined(TARGET_PPC)
484 #if 0
485 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
486 cpu_ppc_reset(env);
488 #endif
489 if (interrupt_request & CPU_INTERRUPT_HARD) {
490 ppc_hw_interrupt(env);
491 if (env->pending_interrupts == 0)
492 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
493 next_tb = 0;
495 #elif defined(TARGET_MIPS)
496 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
497 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
498 (env->CP0_Status & (1 << CP0St_IE)) &&
499 !(env->CP0_Status & (1 << CP0St_EXL)) &&
500 !(env->CP0_Status & (1 << CP0St_ERL)) &&
501 !(env->hflags & MIPS_HFLAG_DM)) {
502 /* Raise it */
503 env->exception_index = EXCP_EXT_INTERRUPT;
504 env->error_code = 0;
505 do_interrupt(env);
506 next_tb = 0;
508 #elif defined(TARGET_SPARC)
509 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
510 (env->psret != 0)) {
511 int pil = env->interrupt_index & 15;
512 int type = env->interrupt_index & 0xf0;
514 if (((type == TT_EXTINT) &&
515 (pil == 15 || pil > env->psrpil)) ||
516 type != TT_EXTINT) {
517 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
518 do_interrupt(env->interrupt_index);
519 env->interrupt_index = 0;
520 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
521 cpu_check_irqs(env);
522 #endif
523 next_tb = 0;
525 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
526 //do_interrupt(0, 0, 0, 0, 0);
527 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
529 #elif defined(TARGET_ARM)
530 if (interrupt_request & CPU_INTERRUPT_FIQ
531 && !(env->uncached_cpsr & CPSR_F)) {
532 env->exception_index = EXCP_FIQ;
533 do_interrupt(env);
534 next_tb = 0;
536 /* ARMv7-M interrupt return works by loading a magic value
537 into the PC. On real hardware the load causes the
538 return to occur. The qemu implementation performs the
539 jump normally, then does the exception return when the
540 CPU tries to execute code at the magic address.
541 This will cause the magic PC value to be pushed to
542 the stack if an interrupt occured at the wrong time.
543 We avoid this by disabling interrupts when
544 pc contains a magic address. */
545 if (interrupt_request & CPU_INTERRUPT_HARD
546 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
547 || !(env->uncached_cpsr & CPSR_I))) {
548 env->exception_index = EXCP_IRQ;
549 do_interrupt(env);
550 next_tb = 0;
552 #elif defined(TARGET_SH4)
553 if (interrupt_request & CPU_INTERRUPT_HARD) {
554 do_interrupt(env);
555 next_tb = 0;
557 #elif defined(TARGET_ALPHA)
558 if (interrupt_request & CPU_INTERRUPT_HARD) {
559 do_interrupt(env);
560 next_tb = 0;
562 #elif defined(TARGET_CRIS)
563 if (interrupt_request & CPU_INTERRUPT_HARD) {
564 do_interrupt(env);
565 next_tb = 0;
567 #elif defined(TARGET_M68K)
568 if (interrupt_request & CPU_INTERRUPT_HARD
569 && ((env->sr & SR_I) >> SR_I_SHIFT)
570 < env->pending_level) {
571 /* Real hardware gets the interrupt vector via an
572 IACK cycle at this point. Current emulated
573 hardware doesn't rely on this, so we
574 provide/save the vector when the interrupt is
575 first signalled. */
576 env->exception_index = env->pending_vector;
577 do_interrupt(1);
578 next_tb = 0;
580 #endif
581 /* Don't use the cached interupt_request value,
582 do_interrupt may have updated the EXITTB flag. */
583 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
584 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
585 /* ensure that no TB jump will be modified as
586 the program flow was changed */
587 next_tb = 0;
589 if (interrupt_request & CPU_INTERRUPT_EXIT) {
590 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
591 env->exception_index = EXCP_INTERRUPT;
592 cpu_loop_exit();
595 #ifdef DEBUG_EXEC
596 if ((loglevel & CPU_LOG_TB_CPU)) {
597 /* restore flags in standard format */
598 regs_to_env();
599 #if defined(TARGET_I386)
600 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
601 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
602 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
603 #elif defined(TARGET_ARM)
604 cpu_dump_state(env, logfile, fprintf, 0);
605 #elif defined(TARGET_SPARC)
606 REGWPTR = env->regbase + (env->cwp * 16);
607 env->regwptr = REGWPTR;
608 cpu_dump_state(env, logfile, fprintf, 0);
609 #elif defined(TARGET_PPC)
610 cpu_dump_state(env, logfile, fprintf, 0);
611 #elif defined(TARGET_M68K)
612 cpu_m68k_flush_flags(env, env->cc_op);
613 env->cc_op = CC_OP_FLAGS;
614 env->sr = (env->sr & 0xffe0)
615 | env->cc_dest | (env->cc_x << 4);
616 cpu_dump_state(env, logfile, fprintf, 0);
617 #elif defined(TARGET_MIPS)
618 cpu_dump_state(env, logfile, fprintf, 0);
619 #elif defined(TARGET_SH4)
620 cpu_dump_state(env, logfile, fprintf, 0);
621 #elif defined(TARGET_ALPHA)
622 cpu_dump_state(env, logfile, fprintf, 0);
623 #elif defined(TARGET_CRIS)
624 cpu_dump_state(env, logfile, fprintf, 0);
625 #else
626 #error unsupported target CPU
627 #endif
629 #endif
630 tb = tb_find_fast();
631 #ifdef DEBUG_EXEC
632 if ((loglevel & CPU_LOG_EXEC)) {
633 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
634 (long)tb->tc_ptr, tb->pc,
635 lookup_symbol(tb->pc));
637 #endif
638 RESTORE_GLOBALS();
639 /* see if we can patch the calling TB. When the TB
640 spans two pages, we cannot safely do a direct
641 jump. */
643 if (next_tb != 0 &&
644 #ifdef USE_KQEMU
645 (env->kqemu_enabled != 2) &&
646 #endif
647 tb->page_addr[1] == -1) {
648 spin_lock(&tb_lock);
649 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
650 spin_unlock(&tb_lock);
653 tc_ptr = tb->tc_ptr;
654 env->current_tb = tb;
655 /* execute the generated code */
656 next_tb = tcg_qemu_tb_exec(tc_ptr);
657 env->current_tb = NULL;
658 /* reset soft MMU for next block (it can currently
659 only be set by a memory fault) */
660 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
661 if (env->hflags & HF_SOFTMMU_MASK) {
662 env->hflags &= ~HF_SOFTMMU_MASK;
663 /* do not allow linking to another block */
664 next_tb = 0;
666 #endif
667 #if defined(USE_KQEMU)
668 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
669 if (kqemu_is_ok(env) &&
670 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
671 cpu_loop_exit();
673 #endif
674 } /* for(;;) */
675 } else {
676 env_to_regs();
678 } /* for(;;) */
681 #if defined(TARGET_I386)
682 /* restore flags in standard format */
683 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
684 #elif defined(TARGET_ARM)
685 /* XXX: Save/restore host fpu exception state?. */
686 #elif defined(TARGET_SPARC)
687 #if defined(reg_REGWPTR)
688 REGWPTR = saved_regwptr;
689 #endif
690 #elif defined(TARGET_PPC)
691 #elif defined(TARGET_M68K)
692 cpu_m68k_flush_flags(env, env->cc_op);
693 env->cc_op = CC_OP_FLAGS;
694 env->sr = (env->sr & 0xffe0)
695 | env->cc_dest | (env->cc_x << 4);
696 #elif defined(TARGET_MIPS)
697 #elif defined(TARGET_SH4)
698 #elif defined(TARGET_ALPHA)
699 #elif defined(TARGET_CRIS)
700 /* XXXXX */
701 #else
702 #error unsupported target CPU
703 #endif
705 /* restore global registers */
706 RESTORE_GLOBALS();
707 #include "hostregs_helper.h"
709 /* fail safe : never use cpu_single_env outside cpu_exec() */
710 cpu_single_env = NULL;
711 return ret;
714 /* must only be called from the generated code as an exception can be
715 generated */
716 void tb_invalidate_page_range(target_ulong start, target_ulong end)
718 /* XXX: cannot enable it yet because it yields to MMU exception
719 where NIP != read address on PowerPC */
720 #if 0
721 target_ulong phys_addr;
722 phys_addr = get_phys_addr_code(env, start);
723 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
724 #endif
727 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
729 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
731 CPUX86State *saved_env;
733 saved_env = env;
734 env = s;
735 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
736 selector &= 0xffff;
737 cpu_x86_load_seg_cache(env, seg_reg, selector,
738 (selector << 4), 0xffff, 0);
739 } else {
740 load_seg(seg_reg, selector);
742 env = saved_env;
745 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
747 CPUX86State *saved_env;
749 saved_env = env;
750 env = s;
752 helper_fsave(ptr, data32);
754 env = saved_env;
757 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
759 CPUX86State *saved_env;
761 saved_env = env;
762 env = s;
764 helper_frstor(ptr, data32);
766 env = saved_env;
769 #endif /* TARGET_I386 */
771 #if !defined(CONFIG_SOFTMMU)
773 #if defined(TARGET_I386)
775 /* 'pc' is the host PC at which the exception was raised. 'address' is
776 the effective address of the memory exception. 'is_write' is 1 if a
777 write caused the exception and otherwise 0'. 'old_set' is the
778 signal set which should be restored */
779 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
780 int is_write, sigset_t *old_set,
781 void *puc)
783 TranslationBlock *tb;
784 int ret;
786 if (cpu_single_env)
787 env = cpu_single_env; /* XXX: find a correct solution for multithread */
788 #if defined(DEBUG_SIGNAL)
789 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
790 pc, address, is_write, *(unsigned long *)old_set);
791 #endif
792 /* XXX: locking issue */
793 if (is_write && page_unprotect(h2g(address), pc, puc)) {
794 return 1;
797 /* see if it is an MMU fault */
798 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
799 if (ret < 0)
800 return 0; /* not an MMU fault */
801 if (ret == 0)
802 return 1; /* the MMU fault was handled without causing real CPU fault */
803 /* now we have a real cpu fault */
804 tb = tb_find_pc(pc);
805 if (tb) {
806 /* the PC is inside the translated code. It means that we have
807 a virtual CPU fault */
808 cpu_restore_state(tb, env, pc, puc);
810 if (ret == 1) {
811 #if 0
812 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
813 env->eip, env->cr[2], env->error_code);
814 #endif
815 /* we restore the process signal mask as the sigreturn should
816 do it (XXX: use sigsetjmp) */
817 sigprocmask(SIG_SETMASK, old_set, NULL);
818 raise_exception_err(env->exception_index, env->error_code);
819 } else {
820 /* activate soft MMU for this block */
821 env->hflags |= HF_SOFTMMU_MASK;
822 cpu_resume_from_signal(env, puc);
824 /* never comes here */
825 return 1;
828 #elif defined(TARGET_ARM)
829 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
830 int is_write, sigset_t *old_set,
831 void *puc)
833 TranslationBlock *tb;
834 int ret;
836 if (cpu_single_env)
837 env = cpu_single_env; /* XXX: find a correct solution for multithread */
838 #if defined(DEBUG_SIGNAL)
839 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
840 pc, address, is_write, *(unsigned long *)old_set);
841 #endif
842 /* XXX: locking issue */
843 if (is_write && page_unprotect(h2g(address), pc, puc)) {
844 return 1;
846 /* see if it is an MMU fault */
847 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
848 if (ret < 0)
849 return 0; /* not an MMU fault */
850 if (ret == 0)
851 return 1; /* the MMU fault was handled without causing real CPU fault */
852 /* now we have a real cpu fault */
853 tb = tb_find_pc(pc);
854 if (tb) {
855 /* the PC is inside the translated code. It means that we have
856 a virtual CPU fault */
857 cpu_restore_state(tb, env, pc, puc);
859 /* we restore the process signal mask as the sigreturn should
860 do it (XXX: use sigsetjmp) */
861 sigprocmask(SIG_SETMASK, old_set, NULL);
862 cpu_loop_exit();
863 /* never comes here */
864 return 1;
866 #elif defined(TARGET_SPARC)
867 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
868 int is_write, sigset_t *old_set,
869 void *puc)
871 TranslationBlock *tb;
872 int ret;
874 if (cpu_single_env)
875 env = cpu_single_env; /* XXX: find a correct solution for multithread */
876 #if defined(DEBUG_SIGNAL)
877 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
878 pc, address, is_write, *(unsigned long *)old_set);
879 #endif
880 /* XXX: locking issue */
881 if (is_write && page_unprotect(h2g(address), pc, puc)) {
882 return 1;
884 /* see if it is an MMU fault */
885 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
886 if (ret < 0)
887 return 0; /* not an MMU fault */
888 if (ret == 0)
889 return 1; /* the MMU fault was handled without causing real CPU fault */
890 /* now we have a real cpu fault */
891 tb = tb_find_pc(pc);
892 if (tb) {
893 /* the PC is inside the translated code. It means that we have
894 a virtual CPU fault */
895 cpu_restore_state(tb, env, pc, puc);
897 /* we restore the process signal mask as the sigreturn should
898 do it (XXX: use sigsetjmp) */
899 sigprocmask(SIG_SETMASK, old_set, NULL);
900 cpu_loop_exit();
901 /* never comes here */
902 return 1;
904 #elif defined (TARGET_PPC)
905 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
906 int is_write, sigset_t *old_set,
907 void *puc)
909 TranslationBlock *tb;
910 int ret;
912 if (cpu_single_env)
913 env = cpu_single_env; /* XXX: find a correct solution for multithread */
914 #if defined(DEBUG_SIGNAL)
915 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
916 pc, address, is_write, *(unsigned long *)old_set);
917 #endif
918 /* XXX: locking issue */
919 if (is_write && page_unprotect(h2g(address), pc, puc)) {
920 return 1;
923 /* see if it is an MMU fault */
924 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
925 if (ret < 0)
926 return 0; /* not an MMU fault */
927 if (ret == 0)
928 return 1; /* the MMU fault was handled without causing real CPU fault */
930 /* now we have a real cpu fault */
931 tb = tb_find_pc(pc);
932 if (tb) {
933 /* the PC is inside the translated code. It means that we have
934 a virtual CPU fault */
935 cpu_restore_state(tb, env, pc, puc);
937 if (ret == 1) {
938 #if 0
939 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
940 env->nip, env->error_code, tb);
941 #endif
942 /* we restore the process signal mask as the sigreturn should
943 do it (XXX: use sigsetjmp) */
944 sigprocmask(SIG_SETMASK, old_set, NULL);
945 do_raise_exception_err(env->exception_index, env->error_code);
946 } else {
947 /* activate soft MMU for this block */
948 cpu_resume_from_signal(env, puc);
950 /* never comes here */
951 return 1;
954 #elif defined(TARGET_M68K)
955 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
956 int is_write, sigset_t *old_set,
957 void *puc)
959 TranslationBlock *tb;
960 int ret;
962 if (cpu_single_env)
963 env = cpu_single_env; /* XXX: find a correct solution for multithread */
964 #if defined(DEBUG_SIGNAL)
965 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
966 pc, address, is_write, *(unsigned long *)old_set);
967 #endif
968 /* XXX: locking issue */
969 if (is_write && page_unprotect(address, pc, puc)) {
970 return 1;
972 /* see if it is an MMU fault */
973 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
974 if (ret < 0)
975 return 0; /* not an MMU fault */
976 if (ret == 0)
977 return 1; /* the MMU fault was handled without causing real CPU fault */
978 /* now we have a real cpu fault */
979 tb = tb_find_pc(pc);
980 if (tb) {
981 /* the PC is inside the translated code. It means that we have
982 a virtual CPU fault */
983 cpu_restore_state(tb, env, pc, puc);
985 /* we restore the process signal mask as the sigreturn should
986 do it (XXX: use sigsetjmp) */
987 sigprocmask(SIG_SETMASK, old_set, NULL);
988 cpu_loop_exit();
989 /* never comes here */
990 return 1;
993 #elif defined (TARGET_MIPS)
994 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
995 int is_write, sigset_t *old_set,
996 void *puc)
998 TranslationBlock *tb;
999 int ret;
1001 if (cpu_single_env)
1002 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1003 #if defined(DEBUG_SIGNAL)
1004 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1005 pc, address, is_write, *(unsigned long *)old_set);
1006 #endif
1007 /* XXX: locking issue */
1008 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1009 return 1;
1012 /* see if it is an MMU fault */
1013 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1014 if (ret < 0)
1015 return 0; /* not an MMU fault */
1016 if (ret == 0)
1017 return 1; /* the MMU fault was handled without causing real CPU fault */
1019 /* now we have a real cpu fault */
1020 tb = tb_find_pc(pc);
1021 if (tb) {
1022 /* the PC is inside the translated code. It means that we have
1023 a virtual CPU fault */
1024 cpu_restore_state(tb, env, pc, puc);
1026 if (ret == 1) {
1027 #if 0
1028 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1029 env->PC, env->error_code, tb);
1030 #endif
1031 /* we restore the process signal mask as the sigreturn should
1032 do it (XXX: use sigsetjmp) */
1033 sigprocmask(SIG_SETMASK, old_set, NULL);
1034 do_raise_exception_err(env->exception_index, env->error_code);
1035 } else {
1036 /* activate soft MMU for this block */
1037 cpu_resume_from_signal(env, puc);
1039 /* never comes here */
1040 return 1;
1043 #elif defined (TARGET_SH4)
1044 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1045 int is_write, sigset_t *old_set,
1046 void *puc)
1048 TranslationBlock *tb;
1049 int ret;
1051 if (cpu_single_env)
1052 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1053 #if defined(DEBUG_SIGNAL)
1054 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1055 pc, address, is_write, *(unsigned long *)old_set);
1056 #endif
1057 /* XXX: locking issue */
1058 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1059 return 1;
1062 /* see if it is an MMU fault */
1063 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1064 if (ret < 0)
1065 return 0; /* not an MMU fault */
1066 if (ret == 0)
1067 return 1; /* the MMU fault was handled without causing real CPU fault */
1069 /* now we have a real cpu fault */
1070 tb = tb_find_pc(pc);
1071 if (tb) {
1072 /* the PC is inside the translated code. It means that we have
1073 a virtual CPU fault */
1074 cpu_restore_state(tb, env, pc, puc);
1076 #if 0
1077 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1078 env->nip, env->error_code, tb);
1079 #endif
1080 /* we restore the process signal mask as the sigreturn should
1081 do it (XXX: use sigsetjmp) */
1082 sigprocmask(SIG_SETMASK, old_set, NULL);
1083 cpu_loop_exit();
1084 /* never comes here */
1085 return 1;
1088 #elif defined (TARGET_ALPHA)
1089 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1090 int is_write, sigset_t *old_set,
1091 void *puc)
1093 TranslationBlock *tb;
1094 int ret;
1096 if (cpu_single_env)
1097 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1098 #if defined(DEBUG_SIGNAL)
1099 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1100 pc, address, is_write, *(unsigned long *)old_set);
1101 #endif
1102 /* XXX: locking issue */
1103 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1104 return 1;
1107 /* see if it is an MMU fault */
1108 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1109 if (ret < 0)
1110 return 0; /* not an MMU fault */
1111 if (ret == 0)
1112 return 1; /* the MMU fault was handled without causing real CPU fault */
1114 /* now we have a real cpu fault */
1115 tb = tb_find_pc(pc);
1116 if (tb) {
1117 /* the PC is inside the translated code. It means that we have
1118 a virtual CPU fault */
1119 cpu_restore_state(tb, env, pc, puc);
1121 #if 0
1122 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1123 env->nip, env->error_code, tb);
1124 #endif
1125 /* we restore the process signal mask as the sigreturn should
1126 do it (XXX: use sigsetjmp) */
1127 sigprocmask(SIG_SETMASK, old_set, NULL);
1128 cpu_loop_exit();
1129 /* never comes here */
1130 return 1;
1132 #elif defined (TARGET_CRIS)
1133 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1134 int is_write, sigset_t *old_set,
1135 void *puc)
1137 TranslationBlock *tb;
1138 int ret;
1140 if (cpu_single_env)
1141 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1142 #if defined(DEBUG_SIGNAL)
1143 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1144 pc, address, is_write, *(unsigned long *)old_set);
1145 #endif
1146 /* XXX: locking issue */
1147 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1148 return 1;
1151 /* see if it is an MMU fault */
1152 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1153 if (ret < 0)
1154 return 0; /* not an MMU fault */
1155 if (ret == 0)
1156 return 1; /* the MMU fault was handled without causing real CPU fault */
1158 /* now we have a real cpu fault */
1159 tb = tb_find_pc(pc);
1160 if (tb) {
1161 /* the PC is inside the translated code. It means that we have
1162 a virtual CPU fault */
1163 cpu_restore_state(tb, env, pc, puc);
1165 /* we restore the process signal mask as the sigreturn should
1166 do it (XXX: use sigsetjmp) */
1167 sigprocmask(SIG_SETMASK, old_set, NULL);
1168 cpu_loop_exit();
1169 /* never comes here */
1170 return 1;
1173 #else
1174 #error unsupported target CPU
1175 #endif
1177 #if defined(__i386__)
1179 #if defined(__APPLE__)
1180 # include <sys/ucontext.h>
1182 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1183 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1184 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1185 #else
1186 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1187 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1188 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1189 #endif
1191 int cpu_signal_handler(int host_signum, void *pinfo,
1192 void *puc)
1194 siginfo_t *info = pinfo;
1195 struct ucontext *uc = puc;
1196 unsigned long pc;
1197 int trapno;
1199 #ifndef REG_EIP
1200 /* for glibc 2.1 */
1201 #define REG_EIP EIP
1202 #define REG_ERR ERR
1203 #define REG_TRAPNO TRAPNO
1204 #endif
1205 pc = EIP_sig(uc);
1206 trapno = TRAP_sig(uc);
1207 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1208 trapno == 0xe ?
1209 (ERROR_sig(uc) >> 1) & 1 : 0,
1210 &uc->uc_sigmask, puc);
1213 #elif defined(__x86_64__)
1215 int cpu_signal_handler(int host_signum, void *pinfo,
1216 void *puc)
1218 siginfo_t *info = pinfo;
1219 struct ucontext *uc = puc;
1220 unsigned long pc;
1222 pc = uc->uc_mcontext.gregs[REG_RIP];
1223 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1224 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1225 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1226 &uc->uc_sigmask, puc);
1229 #elif defined(__powerpc__)
1231 /***********************************************************************
1232 * signal context platform-specific definitions
1233 * From Wine
1235 #ifdef linux
1236 /* All Registers access - only for local access */
1237 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1238 /* Gpr Registers access */
1239 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1240 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1241 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1242 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1243 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1244 # define LR_sig(context) REG_sig(link, context) /* Link register */
1245 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1246 /* Float Registers access */
1247 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1248 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1249 /* Exception Registers access */
1250 # define DAR_sig(context) REG_sig(dar, context)
1251 # define DSISR_sig(context) REG_sig(dsisr, context)
1252 # define TRAP_sig(context) REG_sig(trap, context)
1253 #endif /* linux */
1255 #ifdef __APPLE__
1256 # include <sys/ucontext.h>
1257 typedef struct ucontext SIGCONTEXT;
1258 /* All Registers access - only for local access */
1259 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1260 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1261 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1262 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1263 /* Gpr Registers access */
1264 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1265 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1266 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1267 # define CTR_sig(context) REG_sig(ctr, context)
1268 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1269 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1270 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1271 /* Float Registers access */
1272 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1273 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1274 /* Exception Registers access */
1275 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1276 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1277 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1278 #endif /* __APPLE__ */
1280 int cpu_signal_handler(int host_signum, void *pinfo,
1281 void *puc)
1283 siginfo_t *info = pinfo;
1284 struct ucontext *uc = puc;
1285 unsigned long pc;
1286 int is_write;
1288 pc = IAR_sig(uc);
1289 is_write = 0;
1290 #if 0
1291 /* ppc 4xx case */
1292 if (DSISR_sig(uc) & 0x00800000)
1293 is_write = 1;
1294 #else
1295 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1296 is_write = 1;
1297 #endif
1298 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1299 is_write, &uc->uc_sigmask, puc);
1302 #elif defined(__alpha__)
1304 int cpu_signal_handler(int host_signum, void *pinfo,
1305 void *puc)
1307 siginfo_t *info = pinfo;
1308 struct ucontext *uc = puc;
1309 uint32_t *pc = uc->uc_mcontext.sc_pc;
1310 uint32_t insn = *pc;
1311 int is_write = 0;
1313 /* XXX: need kernel patch to get write flag faster */
1314 switch (insn >> 26) {
1315 case 0x0d: // stw
1316 case 0x0e: // stb
1317 case 0x0f: // stq_u
1318 case 0x24: // stf
1319 case 0x25: // stg
1320 case 0x26: // sts
1321 case 0x27: // stt
1322 case 0x2c: // stl
1323 case 0x2d: // stq
1324 case 0x2e: // stl_c
1325 case 0x2f: // stq_c
1326 is_write = 1;
1329 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1330 is_write, &uc->uc_sigmask, puc);
1332 #elif defined(__sparc__)
1334 int cpu_signal_handler(int host_signum, void *pinfo,
1335 void *puc)
1337 siginfo_t *info = pinfo;
1338 uint32_t *regs = (uint32_t *)(info + 1);
1339 void *sigmask = (regs + 20);
1340 unsigned long pc;
1341 int is_write;
1342 uint32_t insn;
1344 /* XXX: is there a standard glibc define ? */
1345 pc = regs[1];
1346 /* XXX: need kernel patch to get write flag faster */
1347 is_write = 0;
1348 insn = *(uint32_t *)pc;
1349 if ((insn >> 30) == 3) {
1350 switch((insn >> 19) & 0x3f) {
1351 case 0x05: // stb
1352 case 0x06: // sth
1353 case 0x04: // st
1354 case 0x07: // std
1355 case 0x24: // stf
1356 case 0x27: // stdf
1357 case 0x25: // stfsr
1358 is_write = 1;
1359 break;
1362 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1363 is_write, sigmask, NULL);
1366 #elif defined(__arm__)
1368 int cpu_signal_handler(int host_signum, void *pinfo,
1369 void *puc)
1371 siginfo_t *info = pinfo;
1372 struct ucontext *uc = puc;
1373 unsigned long pc;
1374 int is_write;
1376 pc = uc->uc_mcontext.arm_pc;
1377 /* XXX: compute is_write */
1378 is_write = 0;
1379 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1380 is_write,
1381 &uc->uc_sigmask, puc);
1384 #elif defined(__mc68000)
1386 int cpu_signal_handler(int host_signum, void *pinfo,
1387 void *puc)
1389 siginfo_t *info = pinfo;
1390 struct ucontext *uc = puc;
1391 unsigned long pc;
1392 int is_write;
1394 pc = uc->uc_mcontext.gregs[16];
1395 /* XXX: compute is_write */
1396 is_write = 0;
1397 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1398 is_write,
1399 &uc->uc_sigmask, puc);
1402 #elif defined(__ia64)
1404 #ifndef __ISR_VALID
1405 /* This ought to be in <bits/siginfo.h>... */
1406 # define __ISR_VALID 1
1407 #endif
1409 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1411 siginfo_t *info = pinfo;
1412 struct ucontext *uc = puc;
1413 unsigned long ip;
1414 int is_write = 0;
1416 ip = uc->uc_mcontext.sc_ip;
1417 switch (host_signum) {
1418 case SIGILL:
1419 case SIGFPE:
1420 case SIGSEGV:
1421 case SIGBUS:
1422 case SIGTRAP:
1423 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1424 /* ISR.W (write-access) is bit 33: */
1425 is_write = (info->si_isr >> 33) & 1;
1426 break;
1428 default:
1429 break;
1431 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1432 is_write,
1433 &uc->uc_sigmask, puc);
1436 #elif defined(__s390__)
1438 int cpu_signal_handler(int host_signum, void *pinfo,
1439 void *puc)
1441 siginfo_t *info = pinfo;
1442 struct ucontext *uc = puc;
1443 unsigned long pc;
1444 int is_write;
1446 pc = uc->uc_mcontext.psw.addr;
1447 /* XXX: compute is_write */
1448 is_write = 0;
1449 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1450 is_write, &uc->uc_sigmask, puc);
1453 #elif defined(__mips__)
1455 int cpu_signal_handler(int host_signum, void *pinfo,
1456 void *puc)
1458 siginfo_t *info = pinfo;
1459 struct ucontext *uc = puc;
1460 greg_t pc = uc->uc_mcontext.pc;
1461 int is_write;
1463 /* XXX: compute is_write */
1464 is_write = 0;
1465 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1466 is_write, &uc->uc_sigmask, puc);
1469 #elif defined(__hppa__)
1471 int cpu_signal_handler(int host_signum, void *pinfo,
1472 void *puc)
1474 struct siginfo *info = pinfo;
1475 struct ucontext *uc = puc;
1476 unsigned long pc;
1477 int is_write;
1479 pc = uc->uc_mcontext.sc_iaoq[0];
1480 /* FIXME: compute is_write */
1481 is_write = 0;
1482 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1483 is_write,
1484 &uc->uc_sigmask, puc);
1487 #else
1489 #error host CPU specific signal handler needed
1491 #endif
1493 #endif /* !defined(CONFIG_SOFTMMU) */