Remove some uses of phys_ram_base (initial patch by Ian Jackson)
[qemu/mini2440.git] / cpu-exec.c
blob30caab15f26cdace6cedfd697ebab4aa26aee3c6
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 int tb_invalidated_flag;
41 static unsigned long next_tb;
43 //#define DEBUG_EXEC
44 //#define DEBUG_SIGNAL
46 #define SAVE_GLOBALS()
47 #define RESTORE_GLOBALS()
49 #if defined(__sparc__) && !defined(HOST_SOLARIS)
50 #include <features.h>
51 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
52 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
53 // Work around ugly bugs in glibc that mangle global register contents
55 static volatile void *saved_env;
56 static volatile unsigned long saved_t0, saved_i7;
57 #undef SAVE_GLOBALS
58 #define SAVE_GLOBALS() do { \
59 saved_env = env; \
60 saved_t0 = T0; \
61 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
62 } while(0)
64 #undef RESTORE_GLOBALS
65 #define RESTORE_GLOBALS() do { \
66 env = (void *)saved_env; \
67 T0 = saved_t0; \
68 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
69 } while(0)
71 static int sparc_setjmp(jmp_buf buf)
73 int ret;
75 SAVE_GLOBALS();
76 ret = setjmp(buf);
77 RESTORE_GLOBALS();
78 return ret;
80 #undef setjmp
81 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
83 static void sparc_longjmp(jmp_buf buf, int val)
85 SAVE_GLOBALS();
86 longjmp(buf, val);
88 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
89 #endif
90 #endif
92 void cpu_loop_exit(void)
94 /* NOTE: the register at this point must be saved by hand because
95 longjmp restore them */
96 regs_to_env();
97 longjmp(env->jmp_env, 1);
100 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
101 #define reg_T2
102 #endif
104 /* exit the current TB from a signal handler. The host registers are
105 restored in a state compatible with the CPU emulator
107 void cpu_resume_from_signal(CPUState *env1, void *puc)
109 #if !defined(CONFIG_SOFTMMU)
110 struct ucontext *uc = puc;
111 #endif
113 env = env1;
115 /* XXX: restore cpu registers saved in host registers */
117 #if !defined(CONFIG_SOFTMMU)
118 if (puc) {
119 /* XXX: use siglongjmp ? */
120 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
122 #endif
123 longjmp(env->jmp_env, 1);
126 static TranslationBlock *tb_find_slow(target_ulong pc,
127 target_ulong cs_base,
128 uint64_t flags)
130 TranslationBlock *tb, **ptb1;
131 int code_gen_size;
132 unsigned int h;
133 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
134 uint8_t *tc_ptr;
136 spin_lock(&tb_lock);
138 tb_invalidated_flag = 0;
140 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
142 /* find translated block using physical mappings */
143 phys_pc = get_phys_addr_code(env, pc);
144 phys_page1 = phys_pc & TARGET_PAGE_MASK;
145 phys_page2 = -1;
146 h = tb_phys_hash_func(phys_pc);
147 ptb1 = &tb_phys_hash[h];
148 for(;;) {
149 tb = *ptb1;
150 if (!tb)
151 goto not_found;
152 if (tb->pc == pc &&
153 tb->page_addr[0] == phys_page1 &&
154 tb->cs_base == cs_base &&
155 tb->flags == flags) {
156 /* check next page if needed */
157 if (tb->page_addr[1] != -1) {
158 virt_page2 = (pc & TARGET_PAGE_MASK) +
159 TARGET_PAGE_SIZE;
160 phys_page2 = get_phys_addr_code(env, virt_page2);
161 if (tb->page_addr[1] == phys_page2)
162 goto found;
163 } else {
164 goto found;
167 ptb1 = &tb->phys_hash_next;
169 not_found:
170 /* if no translated code available, then translate it now */
171 tb = tb_alloc(pc);
172 if (!tb) {
173 /* flush must be done */
174 tb_flush(env);
175 /* cannot fail at this point */
176 tb = tb_alloc(pc);
177 /* don't forget to invalidate previous TB info */
178 tb_invalidated_flag = 1;
180 tc_ptr = code_gen_ptr;
181 tb->tc_ptr = tc_ptr;
182 tb->cs_base = cs_base;
183 tb->flags = flags;
184 SAVE_GLOBALS();
185 cpu_gen_code(env, tb, &code_gen_size);
186 RESTORE_GLOBALS();
187 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
189 /* check next page if needed */
190 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
191 phys_page2 = -1;
192 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
193 phys_page2 = get_phys_addr_code(env, virt_page2);
195 tb_link_phys(tb, phys_pc, phys_page2);
197 found:
198 /* we add the TB in the virtual pc hash table */
199 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
200 spin_unlock(&tb_lock);
201 return tb;
204 static inline TranslationBlock *tb_find_fast(void)
206 TranslationBlock *tb;
207 target_ulong cs_base, pc;
208 uint64_t flags;
210 /* we record a subset of the CPU state. It will
211 always be the same before a given translated block
212 is executed. */
213 #if defined(TARGET_I386)
214 flags = env->hflags;
215 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
216 flags |= env->intercept;
217 cs_base = env->segs[R_CS].base;
218 pc = cs_base + env->eip;
219 #elif defined(TARGET_ARM)
220 flags = env->thumb | (env->vfp.vec_len << 1)
221 | (env->vfp.vec_stride << 4);
222 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
223 flags |= (1 << 6);
224 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
225 flags |= (1 << 7);
226 flags |= (env->condexec_bits << 8);
227 cs_base = 0;
228 pc = env->regs[15];
229 #elif defined(TARGET_SPARC)
230 #ifdef TARGET_SPARC64
231 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
232 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
233 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
234 #else
235 // FPU enable . Supervisor
236 flags = (env->psref << 4) | env->psrs;
237 #endif
238 cs_base = env->npc;
239 pc = env->pc;
240 #elif defined(TARGET_PPC)
241 flags = env->hflags;
242 cs_base = 0;
243 pc = env->nip;
244 #elif defined(TARGET_MIPS)
245 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
246 cs_base = 0;
247 pc = env->PC[env->current_tc];
248 #elif defined(TARGET_M68K)
249 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
250 | (env->sr & SR_S) /* Bit 13 */
251 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
252 cs_base = 0;
253 pc = env->pc;
254 #elif defined(TARGET_SH4)
255 flags = env->flags;
256 cs_base = 0;
257 pc = env->pc;
258 #elif defined(TARGET_ALPHA)
259 flags = env->ps;
260 cs_base = 0;
261 pc = env->pc;
262 #elif defined(TARGET_CRIS)
263 flags = env->pregs[PR_CCS] & U_FLAG;
264 flags |= env->dslot;
265 cs_base = 0;
266 pc = env->pc;
267 #else
268 #error unsupported CPU
269 #endif
270 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
271 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
272 tb->flags != flags, 0)) {
273 tb = tb_find_slow(pc, cs_base, flags);
274 /* Note: we do it here to avoid a gcc bug on Mac OS X when
275 doing it in tb_find_slow */
276 if (tb_invalidated_flag) {
277 /* as some TB could have been invalidated because
278 of memory exceptions while generating the code, we
279 must recompute the hash index here */
280 next_tb = 0;
283 return tb;
286 /* main execution loop */
288 int cpu_exec(CPUState *env1)
290 #define DECLARE_HOST_REGS 1
291 #include "hostregs_helper.h"
292 #if defined(TARGET_SPARC)
293 #if defined(reg_REGWPTR)
294 uint32_t *saved_regwptr;
295 #endif
296 #endif
297 int ret, interrupt_request;
298 TranslationBlock *tb;
299 uint8_t *tc_ptr;
301 if (cpu_halted(env1) == EXCP_HALTED)
302 return EXCP_HALTED;
304 cpu_single_env = env1;
306 /* first we save global registers */
307 #define SAVE_HOST_REGS 1
308 #include "hostregs_helper.h"
309 env = env1;
310 SAVE_GLOBALS();
312 env_to_regs();
313 #if defined(TARGET_I386)
314 /* put eflags in CPU temporary format */
315 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
316 DF = 1 - (2 * ((env->eflags >> 10) & 1));
317 CC_OP = CC_OP_EFLAGS;
318 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
319 #elif defined(TARGET_SPARC)
320 #if defined(reg_REGWPTR)
321 saved_regwptr = REGWPTR;
322 #endif
323 #elif defined(TARGET_M68K)
324 env->cc_op = CC_OP_FLAGS;
325 env->cc_dest = env->sr & 0xf;
326 env->cc_x = (env->sr >> 4) & 1;
327 #elif defined(TARGET_ALPHA)
328 #elif defined(TARGET_ARM)
329 #elif defined(TARGET_PPC)
330 #elif defined(TARGET_MIPS)
331 #elif defined(TARGET_SH4)
332 #elif defined(TARGET_CRIS)
333 /* XXXXX */
334 #else
335 #error unsupported target CPU
336 #endif
337 env->exception_index = -1;
339 /* prepare setjmp context for exception handling */
340 for(;;) {
341 if (setjmp(env->jmp_env) == 0) {
342 env->current_tb = NULL;
343 /* if an exception is pending, we execute it here */
344 if (env->exception_index >= 0) {
345 if (env->exception_index >= EXCP_INTERRUPT) {
346 /* exit request from the cpu execution loop */
347 ret = env->exception_index;
348 break;
349 } else if (env->user_mode_only) {
350 /* if user mode only, we simulate a fake exception
351 which will be handled outside the cpu execution
352 loop */
353 #if defined(TARGET_I386)
354 do_interrupt_user(env->exception_index,
355 env->exception_is_int,
356 env->error_code,
357 env->exception_next_eip);
358 /* successfully delivered */
359 env->old_exception = -1;
360 #endif
361 ret = env->exception_index;
362 break;
363 } else {
364 #if defined(TARGET_I386)
365 /* simulate a real cpu exception. On i386, it can
366 trigger new exceptions, but we do not handle
367 double or triple faults yet. */
368 do_interrupt(env->exception_index,
369 env->exception_is_int,
370 env->error_code,
371 env->exception_next_eip, 0);
372 /* successfully delivered */
373 env->old_exception = -1;
374 #elif defined(TARGET_PPC)
375 do_interrupt(env);
376 #elif defined(TARGET_MIPS)
377 do_interrupt(env);
378 #elif defined(TARGET_SPARC)
379 do_interrupt(env->exception_index);
380 #elif defined(TARGET_ARM)
381 do_interrupt(env);
382 #elif defined(TARGET_SH4)
383 do_interrupt(env);
384 #elif defined(TARGET_ALPHA)
385 do_interrupt(env);
386 #elif defined(TARGET_CRIS)
387 do_interrupt(env);
388 #elif defined(TARGET_M68K)
389 do_interrupt(0);
390 #endif
392 env->exception_index = -1;
394 #ifdef USE_KQEMU
395 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
396 int ret;
397 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
398 ret = kqemu_cpu_exec(env);
399 /* put eflags in CPU temporary format */
400 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
401 DF = 1 - (2 * ((env->eflags >> 10) & 1));
402 CC_OP = CC_OP_EFLAGS;
403 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
404 if (ret == 1) {
405 /* exception */
406 longjmp(env->jmp_env, 1);
407 } else if (ret == 2) {
408 /* softmmu execution needed */
409 } else {
410 if (env->interrupt_request != 0) {
411 /* hardware interrupt will be executed just after */
412 } else {
413 /* otherwise, we restart */
414 longjmp(env->jmp_env, 1);
418 #endif
420 next_tb = 0; /* force lookup of first TB */
421 for(;;) {
422 SAVE_GLOBALS();
423 interrupt_request = env->interrupt_request;
424 if (__builtin_expect(interrupt_request, 0)
425 #if defined(TARGET_I386)
426 && env->hflags & HF_GIF_MASK
427 #endif
428 && !(env->singlestep_enabled & SSTEP_NOIRQ)) {
429 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
430 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
431 env->exception_index = EXCP_DEBUG;
432 cpu_loop_exit();
434 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
435 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
436 if (interrupt_request & CPU_INTERRUPT_HALT) {
437 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
438 env->halted = 1;
439 env->exception_index = EXCP_HLT;
440 cpu_loop_exit();
442 #endif
443 #if defined(TARGET_I386)
444 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
445 !(env->hflags & HF_SMM_MASK)) {
446 svm_check_intercept(SVM_EXIT_SMI);
447 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
448 do_smm_enter();
449 next_tb = 0;
450 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
451 !(env->hflags & HF_NMI_MASK)) {
452 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
453 env->hflags |= HF_NMI_MASK;
454 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
455 next_tb = 0;
456 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
457 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
458 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
459 int intno;
460 svm_check_intercept(SVM_EXIT_INTR);
461 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
462 intno = cpu_get_pic_interrupt(env);
463 if (loglevel & CPU_LOG_TB_IN_ASM) {
464 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
466 do_interrupt(intno, 0, 0, 0, 1);
467 /* ensure that no TB jump will be modified as
468 the program flow was changed */
469 next_tb = 0;
470 #if !defined(CONFIG_USER_ONLY)
471 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
472 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
473 int intno;
474 /* FIXME: this should respect TPR */
475 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
476 svm_check_intercept(SVM_EXIT_VINTR);
477 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
478 if (loglevel & CPU_LOG_TB_IN_ASM)
479 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
480 do_interrupt(intno, 0, 0, -1, 1);
481 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
482 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
483 next_tb = 0;
484 #endif
486 #elif defined(TARGET_PPC)
487 #if 0
488 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
489 cpu_ppc_reset(env);
491 #endif
492 if (interrupt_request & CPU_INTERRUPT_HARD) {
493 ppc_hw_interrupt(env);
494 if (env->pending_interrupts == 0)
495 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
496 next_tb = 0;
498 #elif defined(TARGET_MIPS)
499 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
500 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
501 (env->CP0_Status & (1 << CP0St_IE)) &&
502 !(env->CP0_Status & (1 << CP0St_EXL)) &&
503 !(env->CP0_Status & (1 << CP0St_ERL)) &&
504 !(env->hflags & MIPS_HFLAG_DM)) {
505 /* Raise it */
506 env->exception_index = EXCP_EXT_INTERRUPT;
507 env->error_code = 0;
508 do_interrupt(env);
509 next_tb = 0;
511 #elif defined(TARGET_SPARC)
512 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
513 (env->psret != 0)) {
514 int pil = env->interrupt_index & 15;
515 int type = env->interrupt_index & 0xf0;
517 if (((type == TT_EXTINT) &&
518 (pil == 15 || pil > env->psrpil)) ||
519 type != TT_EXTINT) {
520 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
521 do_interrupt(env->interrupt_index);
522 env->interrupt_index = 0;
523 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
524 cpu_check_irqs(env);
525 #endif
526 next_tb = 0;
528 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
529 //do_interrupt(0, 0, 0, 0, 0);
530 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
532 #elif defined(TARGET_ARM)
533 if (interrupt_request & CPU_INTERRUPT_FIQ
534 && !(env->uncached_cpsr & CPSR_F)) {
535 env->exception_index = EXCP_FIQ;
536 do_interrupt(env);
537 next_tb = 0;
539 /* ARMv7-M interrupt return works by loading a magic value
540 into the PC. On real hardware the load causes the
541 return to occur. The qemu implementation performs the
542 jump normally, then does the exception return when the
543 CPU tries to execute code at the magic address.
544 This will cause the magic PC value to be pushed to
545 the stack if an interrupt occured at the wrong time.
546 We avoid this by disabling interrupts when
547 pc contains a magic address. */
548 if (interrupt_request & CPU_INTERRUPT_HARD
549 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
550 || !(env->uncached_cpsr & CPSR_I))) {
551 env->exception_index = EXCP_IRQ;
552 do_interrupt(env);
553 next_tb = 0;
555 #elif defined(TARGET_SH4)
556 if (interrupt_request & CPU_INTERRUPT_HARD) {
557 do_interrupt(env);
558 next_tb = 0;
560 #elif defined(TARGET_ALPHA)
561 if (interrupt_request & CPU_INTERRUPT_HARD) {
562 do_interrupt(env);
563 next_tb = 0;
565 #elif defined(TARGET_CRIS)
566 if (interrupt_request & CPU_INTERRUPT_HARD) {
567 do_interrupt(env);
568 next_tb = 0;
570 #elif defined(TARGET_M68K)
571 if (interrupt_request & CPU_INTERRUPT_HARD
572 && ((env->sr & SR_I) >> SR_I_SHIFT)
573 < env->pending_level) {
574 /* Real hardware gets the interrupt vector via an
575 IACK cycle at this point. Current emulated
576 hardware doesn't rely on this, so we
577 provide/save the vector when the interrupt is
578 first signalled. */
579 env->exception_index = env->pending_vector;
580 do_interrupt(1);
581 next_tb = 0;
583 #endif
584 /* Don't use the cached interupt_request value,
585 do_interrupt may have updated the EXITTB flag. */
586 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
587 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
588 /* ensure that no TB jump will be modified as
589 the program flow was changed */
590 next_tb = 0;
592 if (interrupt_request & CPU_INTERRUPT_EXIT) {
593 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
594 env->exception_index = EXCP_INTERRUPT;
595 cpu_loop_exit();
598 #ifdef DEBUG_EXEC
599 if ((loglevel & CPU_LOG_TB_CPU)) {
600 /* restore flags in standard format */
601 regs_to_env();
602 #if defined(TARGET_I386)
603 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
604 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
605 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
606 #elif defined(TARGET_ARM)
607 cpu_dump_state(env, logfile, fprintf, 0);
608 #elif defined(TARGET_SPARC)
609 REGWPTR = env->regbase + (env->cwp * 16);
610 env->regwptr = REGWPTR;
611 cpu_dump_state(env, logfile, fprintf, 0);
612 #elif defined(TARGET_PPC)
613 cpu_dump_state(env, logfile, fprintf, 0);
614 #elif defined(TARGET_M68K)
615 cpu_m68k_flush_flags(env, env->cc_op);
616 env->cc_op = CC_OP_FLAGS;
617 env->sr = (env->sr & 0xffe0)
618 | env->cc_dest | (env->cc_x << 4);
619 cpu_dump_state(env, logfile, fprintf, 0);
620 #elif defined(TARGET_MIPS)
621 cpu_dump_state(env, logfile, fprintf, 0);
622 #elif defined(TARGET_SH4)
623 cpu_dump_state(env, logfile, fprintf, 0);
624 #elif defined(TARGET_ALPHA)
625 cpu_dump_state(env, logfile, fprintf, 0);
626 #elif defined(TARGET_CRIS)
627 cpu_dump_state(env, logfile, fprintf, 0);
628 #else
629 #error unsupported target CPU
630 #endif
632 #endif
633 tb = tb_find_fast();
634 #ifdef DEBUG_EXEC
635 if ((loglevel & CPU_LOG_EXEC)) {
636 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
637 (long)tb->tc_ptr, tb->pc,
638 lookup_symbol(tb->pc));
640 #endif
641 RESTORE_GLOBALS();
642 /* see if we can patch the calling TB. When the TB
643 spans two pages, we cannot safely do a direct
644 jump. */
646 if (next_tb != 0 &&
647 #ifdef USE_KQEMU
648 (env->kqemu_enabled != 2) &&
649 #endif
650 tb->page_addr[1] == -1) {
651 spin_lock(&tb_lock);
652 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
653 spin_unlock(&tb_lock);
656 tc_ptr = tb->tc_ptr;
657 env->current_tb = tb;
658 /* execute the generated code */
659 next_tb = tcg_qemu_tb_exec(tc_ptr);
660 env->current_tb = NULL;
661 /* reset soft MMU for next block (it can currently
662 only be set by a memory fault) */
663 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
664 if (env->hflags & HF_SOFTMMU_MASK) {
665 env->hflags &= ~HF_SOFTMMU_MASK;
666 /* do not allow linking to another block */
667 next_tb = 0;
669 #endif
670 #if defined(USE_KQEMU)
671 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
672 if (kqemu_is_ok(env) &&
673 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
674 cpu_loop_exit();
676 #endif
677 } /* for(;;) */
678 } else {
679 env_to_regs();
681 } /* for(;;) */
684 #if defined(TARGET_I386)
685 /* restore flags in standard format */
686 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
687 #elif defined(TARGET_ARM)
688 /* XXX: Save/restore host fpu exception state?. */
689 #elif defined(TARGET_SPARC)
690 #if defined(reg_REGWPTR)
691 REGWPTR = saved_regwptr;
692 #endif
693 #elif defined(TARGET_PPC)
694 #elif defined(TARGET_M68K)
695 cpu_m68k_flush_flags(env, env->cc_op);
696 env->cc_op = CC_OP_FLAGS;
697 env->sr = (env->sr & 0xffe0)
698 | env->cc_dest | (env->cc_x << 4);
699 #elif defined(TARGET_MIPS)
700 #elif defined(TARGET_SH4)
701 #elif defined(TARGET_ALPHA)
702 #elif defined(TARGET_CRIS)
703 /* XXXXX */
704 #else
705 #error unsupported target CPU
706 #endif
708 /* restore global registers */
709 RESTORE_GLOBALS();
710 #include "hostregs_helper.h"
712 /* fail safe : never use cpu_single_env outside cpu_exec() */
713 cpu_single_env = NULL;
714 return ret;
717 /* must only be called from the generated code as an exception can be
718 generated */
719 void tb_invalidate_page_range(target_ulong start, target_ulong end)
721 /* XXX: cannot enable it yet because it yields to MMU exception
722 where NIP != read address on PowerPC */
723 #if 0
724 target_ulong phys_addr;
725 phys_addr = get_phys_addr_code(env, start);
726 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
727 #endif
730 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
732 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
734 CPUX86State *saved_env;
736 saved_env = env;
737 env = s;
738 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
739 selector &= 0xffff;
740 cpu_x86_load_seg_cache(env, seg_reg, selector,
741 (selector << 4), 0xffff, 0);
742 } else {
743 helper_load_seg(seg_reg, selector);
745 env = saved_env;
748 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
750 CPUX86State *saved_env;
752 saved_env = env;
753 env = s;
755 helper_fsave(ptr, data32);
757 env = saved_env;
760 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
762 CPUX86State *saved_env;
764 saved_env = env;
765 env = s;
767 helper_frstor(ptr, data32);
769 env = saved_env;
772 #endif /* TARGET_I386 */
774 #if !defined(CONFIG_SOFTMMU)
776 #if defined(TARGET_I386)
778 /* 'pc' is the host PC at which the exception was raised. 'address' is
779 the effective address of the memory exception. 'is_write' is 1 if a
780 write caused the exception and otherwise 0'. 'old_set' is the
781 signal set which should be restored */
782 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
783 int is_write, sigset_t *old_set,
784 void *puc)
786 TranslationBlock *tb;
787 int ret;
789 if (cpu_single_env)
790 env = cpu_single_env; /* XXX: find a correct solution for multithread */
791 #if defined(DEBUG_SIGNAL)
792 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
793 pc, address, is_write, *(unsigned long *)old_set);
794 #endif
795 /* XXX: locking issue */
796 if (is_write && page_unprotect(h2g(address), pc, puc)) {
797 return 1;
800 /* see if it is an MMU fault */
801 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
802 if (ret < 0)
803 return 0; /* not an MMU fault */
804 if (ret == 0)
805 return 1; /* the MMU fault was handled without causing real CPU fault */
806 /* now we have a real cpu fault */
807 tb = tb_find_pc(pc);
808 if (tb) {
809 /* the PC is inside the translated code. It means that we have
810 a virtual CPU fault */
811 cpu_restore_state(tb, env, pc, puc);
813 if (ret == 1) {
814 #if 0
815 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
816 env->eip, env->cr[2], env->error_code);
817 #endif
818 /* we restore the process signal mask as the sigreturn should
819 do it (XXX: use sigsetjmp) */
820 sigprocmask(SIG_SETMASK, old_set, NULL);
821 raise_exception_err(env->exception_index, env->error_code);
822 } else {
823 /* activate soft MMU for this block */
824 env->hflags |= HF_SOFTMMU_MASK;
825 cpu_resume_from_signal(env, puc);
827 /* never comes here */
828 return 1;
831 #elif defined(TARGET_ARM)
832 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
833 int is_write, sigset_t *old_set,
834 void *puc)
836 TranslationBlock *tb;
837 int ret;
839 if (cpu_single_env)
840 env = cpu_single_env; /* XXX: find a correct solution for multithread */
841 #if defined(DEBUG_SIGNAL)
842 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
843 pc, address, is_write, *(unsigned long *)old_set);
844 #endif
845 /* XXX: locking issue */
846 if (is_write && page_unprotect(h2g(address), pc, puc)) {
847 return 1;
849 /* see if it is an MMU fault */
850 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
851 if (ret < 0)
852 return 0; /* not an MMU fault */
853 if (ret == 0)
854 return 1; /* the MMU fault was handled without causing real CPU fault */
855 /* now we have a real cpu fault */
856 tb = tb_find_pc(pc);
857 if (tb) {
858 /* the PC is inside the translated code. It means that we have
859 a virtual CPU fault */
860 cpu_restore_state(tb, env, pc, puc);
862 /* we restore the process signal mask as the sigreturn should
863 do it (XXX: use sigsetjmp) */
864 sigprocmask(SIG_SETMASK, old_set, NULL);
865 cpu_loop_exit();
866 /* never comes here */
867 return 1;
869 #elif defined(TARGET_SPARC)
870 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
871 int is_write, sigset_t *old_set,
872 void *puc)
874 TranslationBlock *tb;
875 int ret;
877 if (cpu_single_env)
878 env = cpu_single_env; /* XXX: find a correct solution for multithread */
879 #if defined(DEBUG_SIGNAL)
880 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
881 pc, address, is_write, *(unsigned long *)old_set);
882 #endif
883 /* XXX: locking issue */
884 if (is_write && page_unprotect(h2g(address), pc, puc)) {
885 return 1;
887 /* see if it is an MMU fault */
888 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
889 if (ret < 0)
890 return 0; /* not an MMU fault */
891 if (ret == 0)
892 return 1; /* the MMU fault was handled without causing real CPU fault */
893 /* now we have a real cpu fault */
894 tb = tb_find_pc(pc);
895 if (tb) {
896 /* the PC is inside the translated code. It means that we have
897 a virtual CPU fault */
898 cpu_restore_state(tb, env, pc, puc);
900 /* we restore the process signal mask as the sigreturn should
901 do it (XXX: use sigsetjmp) */
902 sigprocmask(SIG_SETMASK, old_set, NULL);
903 cpu_loop_exit();
904 /* never comes here */
905 return 1;
907 #elif defined (TARGET_PPC)
908 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
909 int is_write, sigset_t *old_set,
910 void *puc)
912 TranslationBlock *tb;
913 int ret;
915 if (cpu_single_env)
916 env = cpu_single_env; /* XXX: find a correct solution for multithread */
917 #if defined(DEBUG_SIGNAL)
918 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
919 pc, address, is_write, *(unsigned long *)old_set);
920 #endif
921 /* XXX: locking issue */
922 if (is_write && page_unprotect(h2g(address), pc, puc)) {
923 return 1;
926 /* see if it is an MMU fault */
927 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
928 if (ret < 0)
929 return 0; /* not an MMU fault */
930 if (ret == 0)
931 return 1; /* the MMU fault was handled without causing real CPU fault */
933 /* now we have a real cpu fault */
934 tb = tb_find_pc(pc);
935 if (tb) {
936 /* the PC is inside the translated code. It means that we have
937 a virtual CPU fault */
938 cpu_restore_state(tb, env, pc, puc);
940 if (ret == 1) {
941 #if 0
942 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
943 env->nip, env->error_code, tb);
944 #endif
945 /* we restore the process signal mask as the sigreturn should
946 do it (XXX: use sigsetjmp) */
947 sigprocmask(SIG_SETMASK, old_set, NULL);
948 do_raise_exception_err(env->exception_index, env->error_code);
949 } else {
950 /* activate soft MMU for this block */
951 cpu_resume_from_signal(env, puc);
953 /* never comes here */
954 return 1;
957 #elif defined(TARGET_M68K)
958 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
959 int is_write, sigset_t *old_set,
960 void *puc)
962 TranslationBlock *tb;
963 int ret;
965 if (cpu_single_env)
966 env = cpu_single_env; /* XXX: find a correct solution for multithread */
967 #if defined(DEBUG_SIGNAL)
968 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
969 pc, address, is_write, *(unsigned long *)old_set);
970 #endif
971 /* XXX: locking issue */
972 if (is_write && page_unprotect(address, pc, puc)) {
973 return 1;
975 /* see if it is an MMU fault */
976 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
977 if (ret < 0)
978 return 0; /* not an MMU fault */
979 if (ret == 0)
980 return 1; /* the MMU fault was handled without causing real CPU fault */
981 /* now we have a real cpu fault */
982 tb = tb_find_pc(pc);
983 if (tb) {
984 /* the PC is inside the translated code. It means that we have
985 a virtual CPU fault */
986 cpu_restore_state(tb, env, pc, puc);
988 /* we restore the process signal mask as the sigreturn should
989 do it (XXX: use sigsetjmp) */
990 sigprocmask(SIG_SETMASK, old_set, NULL);
991 cpu_loop_exit();
992 /* never comes here */
993 return 1;
996 #elif defined (TARGET_MIPS)
997 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
998 int is_write, sigset_t *old_set,
999 void *puc)
1001 TranslationBlock *tb;
1002 int ret;
1004 if (cpu_single_env)
1005 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1006 #if defined(DEBUG_SIGNAL)
1007 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1008 pc, address, is_write, *(unsigned long *)old_set);
1009 #endif
1010 /* XXX: locking issue */
1011 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1012 return 1;
1015 /* see if it is an MMU fault */
1016 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1017 if (ret < 0)
1018 return 0; /* not an MMU fault */
1019 if (ret == 0)
1020 return 1; /* the MMU fault was handled without causing real CPU fault */
1022 /* now we have a real cpu fault */
1023 tb = tb_find_pc(pc);
1024 if (tb) {
1025 /* the PC is inside the translated code. It means that we have
1026 a virtual CPU fault */
1027 cpu_restore_state(tb, env, pc, puc);
1029 if (ret == 1) {
1030 #if 0
1031 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1032 env->PC, env->error_code, tb);
1033 #endif
1034 /* we restore the process signal mask as the sigreturn should
1035 do it (XXX: use sigsetjmp) */
1036 sigprocmask(SIG_SETMASK, old_set, NULL);
1037 do_raise_exception_err(env->exception_index, env->error_code);
1038 } else {
1039 /* activate soft MMU for this block */
1040 cpu_resume_from_signal(env, puc);
1042 /* never comes here */
1043 return 1;
1046 #elif defined (TARGET_SH4)
1047 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1048 int is_write, sigset_t *old_set,
1049 void *puc)
1051 TranslationBlock *tb;
1052 int ret;
1054 if (cpu_single_env)
1055 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1056 #if defined(DEBUG_SIGNAL)
1057 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1058 pc, address, is_write, *(unsigned long *)old_set);
1059 #endif
1060 /* XXX: locking issue */
1061 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1062 return 1;
1065 /* see if it is an MMU fault */
1066 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1067 if (ret < 0)
1068 return 0; /* not an MMU fault */
1069 if (ret == 0)
1070 return 1; /* the MMU fault was handled without causing real CPU fault */
1072 /* now we have a real cpu fault */
1073 tb = tb_find_pc(pc);
1074 if (tb) {
1075 /* the PC is inside the translated code. It means that we have
1076 a virtual CPU fault */
1077 cpu_restore_state(tb, env, pc, puc);
1079 #if 0
1080 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1081 env->nip, env->error_code, tb);
1082 #endif
1083 /* we restore the process signal mask as the sigreturn should
1084 do it (XXX: use sigsetjmp) */
1085 sigprocmask(SIG_SETMASK, old_set, NULL);
1086 cpu_loop_exit();
1087 /* never comes here */
1088 return 1;
1091 #elif defined (TARGET_ALPHA)
1092 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1093 int is_write, sigset_t *old_set,
1094 void *puc)
1096 TranslationBlock *tb;
1097 int ret;
1099 if (cpu_single_env)
1100 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1101 #if defined(DEBUG_SIGNAL)
1102 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1103 pc, address, is_write, *(unsigned long *)old_set);
1104 #endif
1105 /* XXX: locking issue */
1106 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1107 return 1;
1110 /* see if it is an MMU fault */
1111 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1112 if (ret < 0)
1113 return 0; /* not an MMU fault */
1114 if (ret == 0)
1115 return 1; /* the MMU fault was handled without causing real CPU fault */
1117 /* now we have a real cpu fault */
1118 tb = tb_find_pc(pc);
1119 if (tb) {
1120 /* the PC is inside the translated code. It means that we have
1121 a virtual CPU fault */
1122 cpu_restore_state(tb, env, pc, puc);
1124 #if 0
1125 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1126 env->nip, env->error_code, tb);
1127 #endif
1128 /* we restore the process signal mask as the sigreturn should
1129 do it (XXX: use sigsetjmp) */
1130 sigprocmask(SIG_SETMASK, old_set, NULL);
1131 cpu_loop_exit();
1132 /* never comes here */
1133 return 1;
1135 #elif defined (TARGET_CRIS)
1136 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1137 int is_write, sigset_t *old_set,
1138 void *puc)
1140 TranslationBlock *tb;
1141 int ret;
1143 if (cpu_single_env)
1144 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1145 #if defined(DEBUG_SIGNAL)
1146 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1147 pc, address, is_write, *(unsigned long *)old_set);
1148 #endif
1149 /* XXX: locking issue */
1150 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1151 return 1;
1154 /* see if it is an MMU fault */
1155 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1156 if (ret < 0)
1157 return 0; /* not an MMU fault */
1158 if (ret == 0)
1159 return 1; /* the MMU fault was handled without causing real CPU fault */
1161 /* now we have a real cpu fault */
1162 tb = tb_find_pc(pc);
1163 if (tb) {
1164 /* the PC is inside the translated code. It means that we have
1165 a virtual CPU fault */
1166 cpu_restore_state(tb, env, pc, puc);
1168 /* we restore the process signal mask as the sigreturn should
1169 do it (XXX: use sigsetjmp) */
1170 sigprocmask(SIG_SETMASK, old_set, NULL);
1171 cpu_loop_exit();
1172 /* never comes here */
1173 return 1;
1176 #else
1177 #error unsupported target CPU
1178 #endif
1180 #if defined(__i386__)
1182 #if defined(__APPLE__)
1183 # include <sys/ucontext.h>
1185 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1186 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1187 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1188 #else
1189 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1190 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1191 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1192 #endif
1194 int cpu_signal_handler(int host_signum, void *pinfo,
1195 void *puc)
1197 siginfo_t *info = pinfo;
1198 struct ucontext *uc = puc;
1199 unsigned long pc;
1200 int trapno;
1202 #ifndef REG_EIP
1203 /* for glibc 2.1 */
1204 #define REG_EIP EIP
1205 #define REG_ERR ERR
1206 #define REG_TRAPNO TRAPNO
1207 #endif
1208 pc = EIP_sig(uc);
1209 trapno = TRAP_sig(uc);
1210 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1211 trapno == 0xe ?
1212 (ERROR_sig(uc) >> 1) & 1 : 0,
1213 &uc->uc_sigmask, puc);
1216 #elif defined(__x86_64__)
1218 int cpu_signal_handler(int host_signum, void *pinfo,
1219 void *puc)
1221 siginfo_t *info = pinfo;
1222 struct ucontext *uc = puc;
1223 unsigned long pc;
1225 pc = uc->uc_mcontext.gregs[REG_RIP];
1226 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1227 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1228 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1229 &uc->uc_sigmask, puc);
1232 #elif defined(__powerpc__)
1234 /***********************************************************************
1235 * signal context platform-specific definitions
1236 * From Wine
1238 #ifdef linux
1239 /* All Registers access - only for local access */
1240 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1241 /* Gpr Registers access */
1242 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1243 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1244 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1245 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1246 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1247 # define LR_sig(context) REG_sig(link, context) /* Link register */
1248 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1249 /* Float Registers access */
1250 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1251 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1252 /* Exception Registers access */
1253 # define DAR_sig(context) REG_sig(dar, context)
1254 # define DSISR_sig(context) REG_sig(dsisr, context)
1255 # define TRAP_sig(context) REG_sig(trap, context)
1256 #endif /* linux */
1258 #ifdef __APPLE__
1259 # include <sys/ucontext.h>
1260 typedef struct ucontext SIGCONTEXT;
1261 /* All Registers access - only for local access */
1262 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1263 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1264 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1265 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1266 /* Gpr Registers access */
1267 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1268 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1269 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1270 # define CTR_sig(context) REG_sig(ctr, context)
1271 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1272 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1273 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1274 /* Float Registers access */
1275 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1276 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1277 /* Exception Registers access */
1278 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1279 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1280 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1281 #endif /* __APPLE__ */
1283 int cpu_signal_handler(int host_signum, void *pinfo,
1284 void *puc)
1286 siginfo_t *info = pinfo;
1287 struct ucontext *uc = puc;
1288 unsigned long pc;
1289 int is_write;
1291 pc = IAR_sig(uc);
1292 is_write = 0;
1293 #if 0
1294 /* ppc 4xx case */
1295 if (DSISR_sig(uc) & 0x00800000)
1296 is_write = 1;
1297 #else
1298 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1299 is_write = 1;
1300 #endif
1301 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1302 is_write, &uc->uc_sigmask, puc);
1305 #elif defined(__alpha__)
1307 int cpu_signal_handler(int host_signum, void *pinfo,
1308 void *puc)
1310 siginfo_t *info = pinfo;
1311 struct ucontext *uc = puc;
1312 uint32_t *pc = uc->uc_mcontext.sc_pc;
1313 uint32_t insn = *pc;
1314 int is_write = 0;
1316 /* XXX: need kernel patch to get write flag faster */
1317 switch (insn >> 26) {
1318 case 0x0d: // stw
1319 case 0x0e: // stb
1320 case 0x0f: // stq_u
1321 case 0x24: // stf
1322 case 0x25: // stg
1323 case 0x26: // sts
1324 case 0x27: // stt
1325 case 0x2c: // stl
1326 case 0x2d: // stq
1327 case 0x2e: // stl_c
1328 case 0x2f: // stq_c
1329 is_write = 1;
1332 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1333 is_write, &uc->uc_sigmask, puc);
1335 #elif defined(__sparc__)
1337 int cpu_signal_handler(int host_signum, void *pinfo,
1338 void *puc)
1340 siginfo_t *info = pinfo;
1341 uint32_t *regs = (uint32_t *)(info + 1);
1342 void *sigmask = (regs + 20);
1343 unsigned long pc;
1344 int is_write;
1345 uint32_t insn;
1347 /* XXX: is there a standard glibc define ? */
1348 pc = regs[1];
1349 /* XXX: need kernel patch to get write flag faster */
1350 is_write = 0;
1351 insn = *(uint32_t *)pc;
1352 if ((insn >> 30) == 3) {
1353 switch((insn >> 19) & 0x3f) {
1354 case 0x05: // stb
1355 case 0x06: // sth
1356 case 0x04: // st
1357 case 0x07: // std
1358 case 0x24: // stf
1359 case 0x27: // stdf
1360 case 0x25: // stfsr
1361 is_write = 1;
1362 break;
1365 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1366 is_write, sigmask, NULL);
1369 #elif defined(__arm__)
1371 int cpu_signal_handler(int host_signum, void *pinfo,
1372 void *puc)
1374 siginfo_t *info = pinfo;
1375 struct ucontext *uc = puc;
1376 unsigned long pc;
1377 int is_write;
1379 pc = uc->uc_mcontext.arm_pc;
1380 /* XXX: compute is_write */
1381 is_write = 0;
1382 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1383 is_write,
1384 &uc->uc_sigmask, puc);
1387 #elif defined(__mc68000)
1389 int cpu_signal_handler(int host_signum, void *pinfo,
1390 void *puc)
1392 siginfo_t *info = pinfo;
1393 struct ucontext *uc = puc;
1394 unsigned long pc;
1395 int is_write;
1397 pc = uc->uc_mcontext.gregs[16];
1398 /* XXX: compute is_write */
1399 is_write = 0;
1400 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1401 is_write,
1402 &uc->uc_sigmask, puc);
1405 #elif defined(__ia64)
1407 #ifndef __ISR_VALID
1408 /* This ought to be in <bits/siginfo.h>... */
1409 # define __ISR_VALID 1
1410 #endif
1412 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1414 siginfo_t *info = pinfo;
1415 struct ucontext *uc = puc;
1416 unsigned long ip;
1417 int is_write = 0;
1419 ip = uc->uc_mcontext.sc_ip;
1420 switch (host_signum) {
1421 case SIGILL:
1422 case SIGFPE:
1423 case SIGSEGV:
1424 case SIGBUS:
1425 case SIGTRAP:
1426 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1427 /* ISR.W (write-access) is bit 33: */
1428 is_write = (info->si_isr >> 33) & 1;
1429 break;
1431 default:
1432 break;
1434 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1435 is_write,
1436 &uc->uc_sigmask, puc);
1439 #elif defined(__s390__)
1441 int cpu_signal_handler(int host_signum, void *pinfo,
1442 void *puc)
1444 siginfo_t *info = pinfo;
1445 struct ucontext *uc = puc;
1446 unsigned long pc;
1447 int is_write;
1449 pc = uc->uc_mcontext.psw.addr;
1450 /* XXX: compute is_write */
1451 is_write = 0;
1452 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1453 is_write, &uc->uc_sigmask, puc);
1456 #elif defined(__mips__)
1458 int cpu_signal_handler(int host_signum, void *pinfo,
1459 void *puc)
1461 siginfo_t *info = pinfo;
1462 struct ucontext *uc = puc;
1463 greg_t pc = uc->uc_mcontext.pc;
1464 int is_write;
1466 /* XXX: compute is_write */
1467 is_write = 0;
1468 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1469 is_write, &uc->uc_sigmask, puc);
1472 #elif defined(__hppa__)
1474 int cpu_signal_handler(int host_signum, void *pinfo,
1475 void *puc)
1477 struct siginfo *info = pinfo;
1478 struct ucontext *uc = puc;
1479 unsigned long pc;
1480 int is_write;
1482 pc = uc->uc_mcontext.sc_iaoq[0];
1483 /* FIXME: compute is_write */
1484 is_write = 0;
1485 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1486 is_write,
1487 &uc->uc_sigmask, puc);
1490 #else
1492 #error host CPU specific signal handler needed
1494 #endif
1496 #endif /* !defined(CONFIG_SOFTMMU) */