First try at supporting ordinary signals for CRIS linux-user guests.
[qemu/qemu-JZ.git] / cpu-exec.c
blob66faf05a78b7af81595557ef0263501b11f32117
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
24 #if !defined(CONFIG_SOFTMMU)
25 #undef EAX
26 #undef ECX
27 #undef EDX
28 #undef EBX
29 #undef ESP
30 #undef EBP
31 #undef ESI
32 #undef EDI
33 #undef EIP
34 #include <signal.h>
35 #include <sys/ucontext.h>
36 #endif
38 int tb_invalidated_flag;
40 //#define DEBUG_EXEC
41 //#define DEBUG_SIGNAL
43 #define SAVE_GLOBALS()
44 #define RESTORE_GLOBALS()
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 #include <features.h>
48 #if defined(__GLIBC__) && ((__GLIBC__ < 2) || \
49 ((__GLIBC__ == 2) && (__GLIBC_MINOR__ <= 90)))
50 // Work around ugly bugs in glibc that mangle global register contents
52 static volatile void *saved_env;
53 static volatile unsigned long saved_t0, saved_i7;
54 #undef SAVE_GLOBALS
55 #define SAVE_GLOBALS() do { \
56 saved_env = env; \
57 saved_t0 = T0; \
58 asm volatile ("st %%i7, [%0]" : : "r" (&saved_i7)); \
59 } while(0)
61 #undef RESTORE_GLOBALS
62 #define RESTORE_GLOBALS() do { \
63 env = (void *)saved_env; \
64 T0 = saved_t0; \
65 asm volatile ("ld [%0], %%i7" : : "r" (&saved_i7)); \
66 } while(0)
68 static int sparc_setjmp(jmp_buf buf)
70 int ret;
72 SAVE_GLOBALS();
73 ret = setjmp(buf);
74 RESTORE_GLOBALS();
75 return ret;
77 #undef setjmp
78 #define setjmp(jmp_buf) sparc_setjmp(jmp_buf)
80 static void sparc_longjmp(jmp_buf buf, int val)
82 SAVE_GLOBALS();
83 longjmp(buf, val);
85 #define longjmp(jmp_buf, val) sparc_longjmp(jmp_buf, val)
86 #endif
87 #endif
89 void cpu_loop_exit(void)
91 /* NOTE: the register at this point must be saved by hand because
92 longjmp restore them */
93 regs_to_env();
94 longjmp(env->jmp_env, 1);
97 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
98 #define reg_T2
99 #endif
101 /* exit the current TB from a signal handler. The host registers are
102 restored in a state compatible with the CPU emulator
104 void cpu_resume_from_signal(CPUState *env1, void *puc)
106 #if !defined(CONFIG_SOFTMMU)
107 struct ucontext *uc = puc;
108 #endif
110 env = env1;
112 /* XXX: restore cpu registers saved in host registers */
114 #if !defined(CONFIG_SOFTMMU)
115 if (puc) {
116 /* XXX: use siglongjmp ? */
117 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
119 #endif
120 longjmp(env->jmp_env, 1);
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
125 uint64_t flags)
127 TranslationBlock *tb, **ptb1;
128 int code_gen_size;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 uint8_t *tc_ptr;
133 spin_lock(&tb_lock);
135 tb_invalidated_flag = 0;
137 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
139 /* find translated block using physical mappings */
140 phys_pc = get_phys_addr_code(env, pc);
141 phys_page1 = phys_pc & TARGET_PAGE_MASK;
142 phys_page2 = -1;
143 h = tb_phys_hash_func(phys_pc);
144 ptb1 = &tb_phys_hash[h];
145 for(;;) {
146 tb = *ptb1;
147 if (!tb)
148 goto not_found;
149 if (tb->pc == pc &&
150 tb->page_addr[0] == phys_page1 &&
151 tb->cs_base == cs_base &&
152 tb->flags == flags) {
153 /* check next page if needed */
154 if (tb->page_addr[1] != -1) {
155 virt_page2 = (pc & TARGET_PAGE_MASK) +
156 TARGET_PAGE_SIZE;
157 phys_page2 = get_phys_addr_code(env, virt_page2);
158 if (tb->page_addr[1] == phys_page2)
159 goto found;
160 } else {
161 goto found;
164 ptb1 = &tb->phys_hash_next;
166 not_found:
167 /* if no translated code available, then translate it now */
168 tb = tb_alloc(pc);
169 if (!tb) {
170 /* flush must be done */
171 tb_flush(env);
172 /* cannot fail at this point */
173 tb = tb_alloc(pc);
174 /* don't forget to invalidate previous TB info */
175 tb_invalidated_flag = 1;
177 tc_ptr = code_gen_ptr;
178 tb->tc_ptr = tc_ptr;
179 tb->cs_base = cs_base;
180 tb->flags = flags;
181 SAVE_GLOBALS();
182 cpu_gen_code(env, tb, &code_gen_size);
183 RESTORE_GLOBALS();
184 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
186 /* check next page if needed */
187 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
188 phys_page2 = -1;
189 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
190 phys_page2 = get_phys_addr_code(env, virt_page2);
192 tb_link_phys(tb, phys_pc, phys_page2);
194 found:
195 /* we add the TB in the virtual pc hash table */
196 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
197 spin_unlock(&tb_lock);
198 return tb;
201 static inline TranslationBlock *tb_find_fast(void)
203 TranslationBlock *tb;
204 target_ulong cs_base, pc;
205 uint64_t flags;
207 /* we record a subset of the CPU state. It will
208 always be the same before a given translated block
209 is executed. */
210 #if defined(TARGET_I386)
211 flags = env->hflags;
212 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
213 flags |= env->intercept;
214 cs_base = env->segs[R_CS].base;
215 pc = cs_base + env->eip;
216 #elif defined(TARGET_ARM)
217 flags = env->thumb | (env->vfp.vec_len << 1)
218 | (env->vfp.vec_stride << 4);
219 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
220 flags |= (1 << 6);
221 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
222 flags |= (1 << 7);
223 flags |= (env->condexec_bits << 8);
224 cs_base = 0;
225 pc = env->regs[15];
226 #elif defined(TARGET_SPARC)
227 #ifdef TARGET_SPARC64
228 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
229 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
230 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
231 #else
232 // FPU enable . Supervisor
233 flags = (env->psref << 4) | env->psrs;
234 #endif
235 cs_base = env->npc;
236 pc = env->pc;
237 #elif defined(TARGET_PPC)
238 flags = env->hflags;
239 cs_base = 0;
240 pc = env->nip;
241 #elif defined(TARGET_MIPS)
242 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
243 cs_base = 0;
244 pc = env->PC[env->current_tc];
245 #elif defined(TARGET_M68K)
246 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
247 | (env->sr & SR_S) /* Bit 13 */
248 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
249 cs_base = 0;
250 pc = env->pc;
251 #elif defined(TARGET_SH4)
252 flags = env->flags;
253 cs_base = 0;
254 pc = env->pc;
255 #elif defined(TARGET_ALPHA)
256 flags = env->ps;
257 cs_base = 0;
258 pc = env->pc;
259 #elif defined(TARGET_CRIS)
260 flags = 0;
261 cs_base = 0;
262 pc = env->pc;
263 #else
264 #error unsupported CPU
265 #endif
266 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
267 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
268 tb->flags != flags, 0)) {
269 tb = tb_find_slow(pc, cs_base, flags);
270 /* Note: we do it here to avoid a gcc bug on Mac OS X when
271 doing it in tb_find_slow */
272 if (tb_invalidated_flag) {
273 /* as some TB could have been invalidated because
274 of memory exceptions while generating the code, we
275 must recompute the hash index here */
276 T0 = 0;
279 return tb;
282 #define BREAK_CHAIN T0 = 0
284 /* main execution loop */
286 int cpu_exec(CPUState *env1)
288 #define DECLARE_HOST_REGS 1
289 #include "hostregs_helper.h"
290 #if defined(TARGET_SPARC)
291 #if defined(reg_REGWPTR)
292 uint32_t *saved_regwptr;
293 #endif
294 #endif
295 int ret, interrupt_request;
296 long (*gen_func)(void);
297 TranslationBlock *tb;
298 uint8_t *tc_ptr;
300 if (cpu_halted(env1) == EXCP_HALTED)
301 return EXCP_HALTED;
303 cpu_single_env = env1;
305 /* first we save global registers */
306 #define SAVE_HOST_REGS 1
307 #include "hostregs_helper.h"
308 env = env1;
309 SAVE_GLOBALS();
311 env_to_regs();
312 #if defined(TARGET_I386)
313 /* put eflags in CPU temporary format */
314 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
315 DF = 1 - (2 * ((env->eflags >> 10) & 1));
316 CC_OP = CC_OP_EFLAGS;
317 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
318 #elif defined(TARGET_SPARC)
319 #if defined(reg_REGWPTR)
320 saved_regwptr = REGWPTR;
321 #endif
322 #elif defined(TARGET_M68K)
323 env->cc_op = CC_OP_FLAGS;
324 env->cc_dest = env->sr & 0xf;
325 env->cc_x = (env->sr >> 4) & 1;
326 #elif defined(TARGET_ALPHA)
327 #elif defined(TARGET_ARM)
328 #elif defined(TARGET_PPC)
329 #elif defined(TARGET_MIPS)
330 #elif defined(TARGET_SH4)
331 #elif defined(TARGET_CRIS)
332 /* XXXXX */
333 #else
334 #error unsupported target CPU
335 #endif
336 env->exception_index = -1;
338 /* prepare setjmp context for exception handling */
339 for(;;) {
340 if (setjmp(env->jmp_env) == 0) {
341 env->current_tb = NULL;
342 /* if an exception is pending, we execute it here */
343 if (env->exception_index >= 0) {
344 if (env->exception_index >= EXCP_INTERRUPT) {
345 /* exit request from the cpu execution loop */
346 ret = env->exception_index;
347 break;
348 } else if (env->user_mode_only) {
349 /* if user mode only, we simulate a fake exception
350 which will be handled outside the cpu execution
351 loop */
352 #if defined(TARGET_I386)
353 do_interrupt_user(env->exception_index,
354 env->exception_is_int,
355 env->error_code,
356 env->exception_next_eip);
357 #endif
358 ret = env->exception_index;
359 break;
360 } else {
361 #if defined(TARGET_I386)
362 /* simulate a real cpu exception. On i386, it can
363 trigger new exceptions, but we do not handle
364 double or triple faults yet. */
365 do_interrupt(env->exception_index,
366 env->exception_is_int,
367 env->error_code,
368 env->exception_next_eip, 0);
369 /* successfully delivered */
370 env->old_exception = -1;
371 #elif defined(TARGET_PPC)
372 do_interrupt(env);
373 #elif defined(TARGET_MIPS)
374 do_interrupt(env);
375 #elif defined(TARGET_SPARC)
376 do_interrupt(env->exception_index);
377 #elif defined(TARGET_ARM)
378 do_interrupt(env);
379 #elif defined(TARGET_SH4)
380 do_interrupt(env);
381 #elif defined(TARGET_ALPHA)
382 do_interrupt(env);
383 #elif defined(TARGET_CRIS)
384 do_interrupt(env);
385 #elif defined(TARGET_M68K)
386 do_interrupt(0);
387 #endif
389 env->exception_index = -1;
391 #ifdef USE_KQEMU
392 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
393 int ret;
394 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
395 ret = kqemu_cpu_exec(env);
396 /* put eflags in CPU temporary format */
397 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
398 DF = 1 - (2 * ((env->eflags >> 10) & 1));
399 CC_OP = CC_OP_EFLAGS;
400 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
401 if (ret == 1) {
402 /* exception */
403 longjmp(env->jmp_env, 1);
404 } else if (ret == 2) {
405 /* softmmu execution needed */
406 } else {
407 if (env->interrupt_request != 0) {
408 /* hardware interrupt will be executed just after */
409 } else {
410 /* otherwise, we restart */
411 longjmp(env->jmp_env, 1);
415 #endif
417 T0 = 0; /* force lookup of first TB */
418 for(;;) {
419 SAVE_GLOBALS();
420 interrupt_request = env->interrupt_request;
421 if (__builtin_expect(interrupt_request, 0)
422 #if defined(TARGET_I386)
423 && env->hflags & HF_GIF_MASK
424 #endif
426 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
427 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
428 env->exception_index = EXCP_DEBUG;
429 cpu_loop_exit();
431 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
432 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
433 if (interrupt_request & CPU_INTERRUPT_HALT) {
434 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
435 env->halted = 1;
436 env->exception_index = EXCP_HLT;
437 cpu_loop_exit();
439 #endif
440 #if defined(TARGET_I386)
441 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
442 !(env->hflags & HF_SMM_MASK)) {
443 svm_check_intercept(SVM_EXIT_SMI);
444 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
445 do_smm_enter();
446 BREAK_CHAIN;
447 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
448 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
449 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
450 int intno;
451 svm_check_intercept(SVM_EXIT_INTR);
452 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
453 intno = cpu_get_pic_interrupt(env);
454 if (loglevel & CPU_LOG_TB_IN_ASM) {
455 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
457 do_interrupt(intno, 0, 0, 0, 1);
458 /* ensure that no TB jump will be modified as
459 the program flow was changed */
460 BREAK_CHAIN;
461 #if !defined(CONFIG_USER_ONLY)
462 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
463 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
464 int intno;
465 /* FIXME: this should respect TPR */
466 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
467 svm_check_intercept(SVM_EXIT_VINTR);
468 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
469 if (loglevel & CPU_LOG_TB_IN_ASM)
470 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
471 do_interrupt(intno, 0, 0, -1, 1);
472 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
473 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
474 BREAK_CHAIN;
475 #endif
477 #elif defined(TARGET_PPC)
478 #if 0
479 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
480 cpu_ppc_reset(env);
482 #endif
483 if (interrupt_request & CPU_INTERRUPT_HARD) {
484 ppc_hw_interrupt(env);
485 if (env->pending_interrupts == 0)
486 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
487 BREAK_CHAIN;
489 #elif defined(TARGET_MIPS)
490 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
491 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
492 (env->CP0_Status & (1 << CP0St_IE)) &&
493 !(env->CP0_Status & (1 << CP0St_EXL)) &&
494 !(env->CP0_Status & (1 << CP0St_ERL)) &&
495 !(env->hflags & MIPS_HFLAG_DM)) {
496 /* Raise it */
497 env->exception_index = EXCP_EXT_INTERRUPT;
498 env->error_code = 0;
499 do_interrupt(env);
500 BREAK_CHAIN;
502 #elif defined(TARGET_SPARC)
503 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
504 (env->psret != 0)) {
505 int pil = env->interrupt_index & 15;
506 int type = env->interrupt_index & 0xf0;
508 if (((type == TT_EXTINT) &&
509 (pil == 15 || pil > env->psrpil)) ||
510 type != TT_EXTINT) {
511 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
512 do_interrupt(env->interrupt_index);
513 env->interrupt_index = 0;
514 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
515 cpu_check_irqs(env);
516 #endif
517 BREAK_CHAIN;
519 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
520 //do_interrupt(0, 0, 0, 0, 0);
521 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
523 #elif defined(TARGET_ARM)
524 if (interrupt_request & CPU_INTERRUPT_FIQ
525 && !(env->uncached_cpsr & CPSR_F)) {
526 env->exception_index = EXCP_FIQ;
527 do_interrupt(env);
528 BREAK_CHAIN;
530 /* ARMv7-M interrupt return works by loading a magic value
531 into the PC. On real hardware the load causes the
532 return to occur. The qemu implementation performs the
533 jump normally, then does the exception return when the
534 CPU tries to execute code at the magic address.
535 This will cause the magic PC value to be pushed to
536 the stack if an interrupt occured at the wrong time.
537 We avoid this by disabling interrupts when
538 pc contains a magic address. */
539 if (interrupt_request & CPU_INTERRUPT_HARD
540 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
541 || !(env->uncached_cpsr & CPSR_I))) {
542 env->exception_index = EXCP_IRQ;
543 do_interrupt(env);
544 BREAK_CHAIN;
546 #elif defined(TARGET_SH4)
547 if (interrupt_request & CPU_INTERRUPT_HARD) {
548 do_interrupt(env);
549 BREAK_CHAIN;
551 #elif defined(TARGET_ALPHA)
552 if (interrupt_request & CPU_INTERRUPT_HARD) {
553 do_interrupt(env);
554 BREAK_CHAIN;
556 #elif defined(TARGET_CRIS)
557 if (interrupt_request & CPU_INTERRUPT_HARD) {
558 do_interrupt(env);
559 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
560 BREAK_CHAIN;
562 #elif defined(TARGET_M68K)
563 if (interrupt_request & CPU_INTERRUPT_HARD
564 && ((env->sr & SR_I) >> SR_I_SHIFT)
565 < env->pending_level) {
566 /* Real hardware gets the interrupt vector via an
567 IACK cycle at this point. Current emulated
568 hardware doesn't rely on this, so we
569 provide/save the vector when the interrupt is
570 first signalled. */
571 env->exception_index = env->pending_vector;
572 do_interrupt(1);
573 BREAK_CHAIN;
575 #endif
576 /* Don't use the cached interupt_request value,
577 do_interrupt may have updated the EXITTB flag. */
578 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
579 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
580 /* ensure that no TB jump will be modified as
581 the program flow was changed */
582 BREAK_CHAIN;
584 if (interrupt_request & CPU_INTERRUPT_EXIT) {
585 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
586 env->exception_index = EXCP_INTERRUPT;
587 cpu_loop_exit();
590 #ifdef DEBUG_EXEC
591 if ((loglevel & CPU_LOG_TB_CPU)) {
592 /* restore flags in standard format */
593 regs_to_env();
594 #if defined(TARGET_I386)
595 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
596 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
597 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
598 #elif defined(TARGET_ARM)
599 cpu_dump_state(env, logfile, fprintf, 0);
600 #elif defined(TARGET_SPARC)
601 REGWPTR = env->regbase + (env->cwp * 16);
602 env->regwptr = REGWPTR;
603 cpu_dump_state(env, logfile, fprintf, 0);
604 #elif defined(TARGET_PPC)
605 cpu_dump_state(env, logfile, fprintf, 0);
606 #elif defined(TARGET_M68K)
607 cpu_m68k_flush_flags(env, env->cc_op);
608 env->cc_op = CC_OP_FLAGS;
609 env->sr = (env->sr & 0xffe0)
610 | env->cc_dest | (env->cc_x << 4);
611 cpu_dump_state(env, logfile, fprintf, 0);
612 #elif defined(TARGET_MIPS)
613 cpu_dump_state(env, logfile, fprintf, 0);
614 #elif defined(TARGET_SH4)
615 cpu_dump_state(env, logfile, fprintf, 0);
616 #elif defined(TARGET_ALPHA)
617 cpu_dump_state(env, logfile, fprintf, 0);
618 #elif defined(TARGET_CRIS)
619 cpu_dump_state(env, logfile, fprintf, 0);
620 #else
621 #error unsupported target CPU
622 #endif
624 #endif
625 tb = tb_find_fast();
626 #ifdef DEBUG_EXEC
627 if ((loglevel & CPU_LOG_EXEC)) {
628 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
629 (long)tb->tc_ptr, tb->pc,
630 lookup_symbol(tb->pc));
632 #endif
633 RESTORE_GLOBALS();
634 /* see if we can patch the calling TB. When the TB
635 spans two pages, we cannot safely do a direct
636 jump. */
638 if (T0 != 0 &&
639 #if USE_KQEMU
640 (env->kqemu_enabled != 2) &&
641 #endif
642 tb->page_addr[1] == -1) {
643 spin_lock(&tb_lock);
644 tb_add_jump((TranslationBlock *)(long)(T0 & ~3), T0 & 3, tb);
645 spin_unlock(&tb_lock);
648 tc_ptr = tb->tc_ptr;
649 env->current_tb = tb;
650 /* execute the generated code */
651 gen_func = (void *)tc_ptr;
652 #if defined(__sparc__)
653 __asm__ __volatile__("call %0\n\t"
654 "mov %%o7,%%i0"
655 : /* no outputs */
656 : "r" (gen_func)
657 : "i0", "i1", "i2", "i3", "i4", "i5",
658 "o0", "o1", "o2", "o3", "o4", "o5",
659 "l0", "l1", "l2", "l3", "l4", "l5",
660 "l6", "l7");
661 #elif defined(__arm__)
662 asm volatile ("mov pc, %0\n\t"
663 ".global exec_loop\n\t"
664 "exec_loop:\n\t"
665 : /* no outputs */
666 : "r" (gen_func)
667 : "r1", "r2", "r3", "r8", "r9", "r10", "r12", "r14");
668 #elif defined(__ia64)
669 struct fptr {
670 void *ip;
671 void *gp;
672 } fp;
674 fp.ip = tc_ptr;
675 fp.gp = code_gen_buffer + 2 * (1 << 20);
676 (*(void (*)(void)) &fp)();
677 #else
678 T0 = gen_func();
679 #endif
680 env->current_tb = NULL;
681 /* reset soft MMU for next block (it can currently
682 only be set by a memory fault) */
683 #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
684 if (env->hflags & HF_SOFTMMU_MASK) {
685 env->hflags &= ~HF_SOFTMMU_MASK;
686 /* do not allow linking to another block */
687 T0 = 0;
689 #endif
690 #if defined(USE_KQEMU)
691 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
692 if (kqemu_is_ok(env) &&
693 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
694 cpu_loop_exit();
696 #endif
697 } /* for(;;) */
698 } else {
699 env_to_regs();
701 } /* for(;;) */
704 #if defined(TARGET_I386)
705 /* restore flags in standard format */
706 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
707 #elif defined(TARGET_ARM)
708 /* XXX: Save/restore host fpu exception state?. */
709 #elif defined(TARGET_SPARC)
710 #if defined(reg_REGWPTR)
711 REGWPTR = saved_regwptr;
712 #endif
713 #elif defined(TARGET_PPC)
714 #elif defined(TARGET_M68K)
715 cpu_m68k_flush_flags(env, env->cc_op);
716 env->cc_op = CC_OP_FLAGS;
717 env->sr = (env->sr & 0xffe0)
718 | env->cc_dest | (env->cc_x << 4);
719 #elif defined(TARGET_MIPS)
720 #elif defined(TARGET_SH4)
721 #elif defined(TARGET_ALPHA)
722 #elif defined(TARGET_CRIS)
723 /* XXXXX */
724 #else
725 #error unsupported target CPU
726 #endif
728 /* restore global registers */
729 RESTORE_GLOBALS();
730 #include "hostregs_helper.h"
732 /* fail safe : never use cpu_single_env outside cpu_exec() */
733 cpu_single_env = NULL;
734 return ret;
737 /* must only be called from the generated code as an exception can be
738 generated */
739 void tb_invalidate_page_range(target_ulong start, target_ulong end)
741 /* XXX: cannot enable it yet because it yields to MMU exception
742 where NIP != read address on PowerPC */
743 #if 0
744 target_ulong phys_addr;
745 phys_addr = get_phys_addr_code(env, start);
746 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
747 #endif
750 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
752 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
754 CPUX86State *saved_env;
756 saved_env = env;
757 env = s;
758 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
759 selector &= 0xffff;
760 cpu_x86_load_seg_cache(env, seg_reg, selector,
761 (selector << 4), 0xffff, 0);
762 } else {
763 load_seg(seg_reg, selector);
765 env = saved_env;
768 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
770 CPUX86State *saved_env;
772 saved_env = env;
773 env = s;
775 helper_fsave(ptr, data32);
777 env = saved_env;
780 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
782 CPUX86State *saved_env;
784 saved_env = env;
785 env = s;
787 helper_frstor(ptr, data32);
789 env = saved_env;
792 #endif /* TARGET_I386 */
794 #if !defined(CONFIG_SOFTMMU)
796 #if defined(TARGET_I386)
798 /* 'pc' is the host PC at which the exception was raised. 'address' is
799 the effective address of the memory exception. 'is_write' is 1 if a
800 write caused the exception and otherwise 0'. 'old_set' is the
801 signal set which should be restored */
802 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
803 int is_write, sigset_t *old_set,
804 void *puc)
806 TranslationBlock *tb;
807 int ret;
809 if (cpu_single_env)
810 env = cpu_single_env; /* XXX: find a correct solution for multithread */
811 #if defined(DEBUG_SIGNAL)
812 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
813 pc, address, is_write, *(unsigned long *)old_set);
814 #endif
815 /* XXX: locking issue */
816 if (is_write && page_unprotect(h2g(address), pc, puc)) {
817 return 1;
820 /* see if it is an MMU fault */
821 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
822 if (ret < 0)
823 return 0; /* not an MMU fault */
824 if (ret == 0)
825 return 1; /* the MMU fault was handled without causing real CPU fault */
826 /* now we have a real cpu fault */
827 tb = tb_find_pc(pc);
828 if (tb) {
829 /* the PC is inside the translated code. It means that we have
830 a virtual CPU fault */
831 cpu_restore_state(tb, env, pc, puc);
833 if (ret == 1) {
834 #if 0
835 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
836 env->eip, env->cr[2], env->error_code);
837 #endif
838 /* we restore the process signal mask as the sigreturn should
839 do it (XXX: use sigsetjmp) */
840 sigprocmask(SIG_SETMASK, old_set, NULL);
841 raise_exception_err(env->exception_index, env->error_code);
842 } else {
843 /* activate soft MMU for this block */
844 env->hflags |= HF_SOFTMMU_MASK;
845 cpu_resume_from_signal(env, puc);
847 /* never comes here */
848 return 1;
851 #elif defined(TARGET_ARM)
852 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
853 int is_write, sigset_t *old_set,
854 void *puc)
856 TranslationBlock *tb;
857 int ret;
859 if (cpu_single_env)
860 env = cpu_single_env; /* XXX: find a correct solution for multithread */
861 #if defined(DEBUG_SIGNAL)
862 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
863 pc, address, is_write, *(unsigned long *)old_set);
864 #endif
865 /* XXX: locking issue */
866 if (is_write && page_unprotect(h2g(address), pc, puc)) {
867 return 1;
869 /* see if it is an MMU fault */
870 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
871 if (ret < 0)
872 return 0; /* not an MMU fault */
873 if (ret == 0)
874 return 1; /* the MMU fault was handled without causing real CPU fault */
875 /* now we have a real cpu fault */
876 tb = tb_find_pc(pc);
877 if (tb) {
878 /* the PC is inside the translated code. It means that we have
879 a virtual CPU fault */
880 cpu_restore_state(tb, env, pc, puc);
882 /* we restore the process signal mask as the sigreturn should
883 do it (XXX: use sigsetjmp) */
884 sigprocmask(SIG_SETMASK, old_set, NULL);
885 cpu_loop_exit();
887 #elif defined(TARGET_SPARC)
888 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
889 int is_write, sigset_t *old_set,
890 void *puc)
892 TranslationBlock *tb;
893 int ret;
895 if (cpu_single_env)
896 env = cpu_single_env; /* XXX: find a correct solution for multithread */
897 #if defined(DEBUG_SIGNAL)
898 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
899 pc, address, is_write, *(unsigned long *)old_set);
900 #endif
901 /* XXX: locking issue */
902 if (is_write && page_unprotect(h2g(address), pc, puc)) {
903 return 1;
905 /* see if it is an MMU fault */
906 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
907 if (ret < 0)
908 return 0; /* not an MMU fault */
909 if (ret == 0)
910 return 1; /* the MMU fault was handled without causing real CPU fault */
911 /* now we have a real cpu fault */
912 tb = tb_find_pc(pc);
913 if (tb) {
914 /* the PC is inside the translated code. It means that we have
915 a virtual CPU fault */
916 cpu_restore_state(tb, env, pc, puc);
918 /* we restore the process signal mask as the sigreturn should
919 do it (XXX: use sigsetjmp) */
920 sigprocmask(SIG_SETMASK, old_set, NULL);
921 cpu_loop_exit();
923 #elif defined (TARGET_PPC)
924 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
925 int is_write, sigset_t *old_set,
926 void *puc)
928 TranslationBlock *tb;
929 int ret;
931 if (cpu_single_env)
932 env = cpu_single_env; /* XXX: find a correct solution for multithread */
933 #if defined(DEBUG_SIGNAL)
934 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
935 pc, address, is_write, *(unsigned long *)old_set);
936 #endif
937 /* XXX: locking issue */
938 if (is_write && page_unprotect(h2g(address), pc, puc)) {
939 return 1;
942 /* see if it is an MMU fault */
943 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
944 if (ret < 0)
945 return 0; /* not an MMU fault */
946 if (ret == 0)
947 return 1; /* the MMU fault was handled without causing real CPU fault */
949 /* now we have a real cpu fault */
950 tb = tb_find_pc(pc);
951 if (tb) {
952 /* the PC is inside the translated code. It means that we have
953 a virtual CPU fault */
954 cpu_restore_state(tb, env, pc, puc);
956 if (ret == 1) {
957 #if 0
958 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
959 env->nip, env->error_code, tb);
960 #endif
961 /* we restore the process signal mask as the sigreturn should
962 do it (XXX: use sigsetjmp) */
963 sigprocmask(SIG_SETMASK, old_set, NULL);
964 do_raise_exception_err(env->exception_index, env->error_code);
965 } else {
966 /* activate soft MMU for this block */
967 cpu_resume_from_signal(env, puc);
969 /* never comes here */
970 return 1;
973 #elif defined(TARGET_M68K)
974 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
975 int is_write, sigset_t *old_set,
976 void *puc)
978 TranslationBlock *tb;
979 int ret;
981 if (cpu_single_env)
982 env = cpu_single_env; /* XXX: find a correct solution for multithread */
983 #if defined(DEBUG_SIGNAL)
984 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
985 pc, address, is_write, *(unsigned long *)old_set);
986 #endif
987 /* XXX: locking issue */
988 if (is_write && page_unprotect(address, pc, puc)) {
989 return 1;
991 /* see if it is an MMU fault */
992 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
993 if (ret < 0)
994 return 0; /* not an MMU fault */
995 if (ret == 0)
996 return 1; /* the MMU fault was handled without causing real CPU fault */
997 /* now we have a real cpu fault */
998 tb = tb_find_pc(pc);
999 if (tb) {
1000 /* the PC is inside the translated code. It means that we have
1001 a virtual CPU fault */
1002 cpu_restore_state(tb, env, pc, puc);
1004 /* we restore the process signal mask as the sigreturn should
1005 do it (XXX: use sigsetjmp) */
1006 sigprocmask(SIG_SETMASK, old_set, NULL);
1007 cpu_loop_exit();
1008 /* never comes here */
1009 return 1;
1012 #elif defined (TARGET_MIPS)
1013 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1014 int is_write, sigset_t *old_set,
1015 void *puc)
1017 TranslationBlock *tb;
1018 int ret;
1020 if (cpu_single_env)
1021 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1022 #if defined(DEBUG_SIGNAL)
1023 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1024 pc, address, is_write, *(unsigned long *)old_set);
1025 #endif
1026 /* XXX: locking issue */
1027 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1028 return 1;
1031 /* see if it is an MMU fault */
1032 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1033 if (ret < 0)
1034 return 0; /* not an MMU fault */
1035 if (ret == 0)
1036 return 1; /* the MMU fault was handled without causing real CPU fault */
1038 /* now we have a real cpu fault */
1039 tb = tb_find_pc(pc);
1040 if (tb) {
1041 /* the PC is inside the translated code. It means that we have
1042 a virtual CPU fault */
1043 cpu_restore_state(tb, env, pc, puc);
1045 if (ret == 1) {
1046 #if 0
1047 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1048 env->PC, env->error_code, tb);
1049 #endif
1050 /* we restore the process signal mask as the sigreturn should
1051 do it (XXX: use sigsetjmp) */
1052 sigprocmask(SIG_SETMASK, old_set, NULL);
1053 do_raise_exception_err(env->exception_index, env->error_code);
1054 } else {
1055 /* activate soft MMU for this block */
1056 cpu_resume_from_signal(env, puc);
1058 /* never comes here */
1059 return 1;
1062 #elif defined (TARGET_SH4)
1063 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1064 int is_write, sigset_t *old_set,
1065 void *puc)
1067 TranslationBlock *tb;
1068 int ret;
1070 if (cpu_single_env)
1071 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1072 #if defined(DEBUG_SIGNAL)
1073 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1074 pc, address, is_write, *(unsigned long *)old_set);
1075 #endif
1076 /* XXX: locking issue */
1077 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1078 return 1;
1081 /* see if it is an MMU fault */
1082 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1083 if (ret < 0)
1084 return 0; /* not an MMU fault */
1085 if (ret == 0)
1086 return 1; /* the MMU fault was handled without causing real CPU fault */
1088 /* now we have a real cpu fault */
1089 tb = tb_find_pc(pc);
1090 if (tb) {
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb, env, pc, puc);
1095 #if 0
1096 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1097 env->nip, env->error_code, tb);
1098 #endif
1099 /* we restore the process signal mask as the sigreturn should
1100 do it (XXX: use sigsetjmp) */
1101 sigprocmask(SIG_SETMASK, old_set, NULL);
1102 cpu_loop_exit();
1103 /* never comes here */
1104 return 1;
1107 #elif defined (TARGET_ALPHA)
1108 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1109 int is_write, sigset_t *old_set,
1110 void *puc)
1112 TranslationBlock *tb;
1113 int ret;
1115 if (cpu_single_env)
1116 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1117 #if defined(DEBUG_SIGNAL)
1118 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1119 pc, address, is_write, *(unsigned long *)old_set);
1120 #endif
1121 /* XXX: locking issue */
1122 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1123 return 1;
1126 /* see if it is an MMU fault */
1127 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1128 if (ret < 0)
1129 return 0; /* not an MMU fault */
1130 if (ret == 0)
1131 return 1; /* the MMU fault was handled without causing real CPU fault */
1133 /* now we have a real cpu fault */
1134 tb = tb_find_pc(pc);
1135 if (tb) {
1136 /* the PC is inside the translated code. It means that we have
1137 a virtual CPU fault */
1138 cpu_restore_state(tb, env, pc, puc);
1140 #if 0
1141 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1142 env->nip, env->error_code, tb);
1143 #endif
1144 /* we restore the process signal mask as the sigreturn should
1145 do it (XXX: use sigsetjmp) */
1146 sigprocmask(SIG_SETMASK, old_set, NULL);
1147 cpu_loop_exit();
1148 /* never comes here */
1149 return 1;
1151 #elif defined (TARGET_CRIS)
1152 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1153 int is_write, sigset_t *old_set,
1154 void *puc)
1156 TranslationBlock *tb;
1157 int ret;
1159 if (cpu_single_env)
1160 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1161 #if defined(DEBUG_SIGNAL)
1162 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1163 pc, address, is_write, *(unsigned long *)old_set);
1164 #endif
1165 /* XXX: locking issue */
1166 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1167 return 1;
1170 /* see if it is an MMU fault */
1171 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1172 if (ret < 0)
1173 return 0; /* not an MMU fault */
1174 if (ret == 0)
1175 return 1; /* the MMU fault was handled without causing real CPU fault */
1177 /* now we have a real cpu fault */
1178 tb = tb_find_pc(pc);
1179 if (tb) {
1180 /* the PC is inside the translated code. It means that we have
1181 a virtual CPU fault */
1182 cpu_restore_state(tb, env, pc, puc);
1184 #if 0
1185 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1186 env->nip, env->error_code, tb);
1187 #endif
1188 /* we restore the process signal mask as the sigreturn should
1189 do it (XXX: use sigsetjmp) */
1190 sigprocmask(SIG_SETMASK, old_set, NULL);
1191 cpu_loop_exit();
1192 /* never comes here */
1193 return 1;
1196 #else
1197 #error unsupported target CPU
1198 #endif
1200 #if defined(__i386__)
1202 #if defined(__APPLE__)
1203 # include <sys/ucontext.h>
1205 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1206 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1207 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1208 #else
1209 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1210 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1211 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1212 #endif
1214 int cpu_signal_handler(int host_signum, void *pinfo,
1215 void *puc)
1217 siginfo_t *info = pinfo;
1218 struct ucontext *uc = puc;
1219 unsigned long pc;
1220 int trapno;
1222 #ifndef REG_EIP
1223 /* for glibc 2.1 */
1224 #define REG_EIP EIP
1225 #define REG_ERR ERR
1226 #define REG_TRAPNO TRAPNO
1227 #endif
1228 pc = EIP_sig(uc);
1229 trapno = TRAP_sig(uc);
1230 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1231 trapno == 0xe ?
1232 (ERROR_sig(uc) >> 1) & 1 : 0,
1233 &uc->uc_sigmask, puc);
1236 #elif defined(__x86_64__)
1238 int cpu_signal_handler(int host_signum, void *pinfo,
1239 void *puc)
1241 siginfo_t *info = pinfo;
1242 struct ucontext *uc = puc;
1243 unsigned long pc;
1245 pc = uc->uc_mcontext.gregs[REG_RIP];
1246 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1247 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1248 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1249 &uc->uc_sigmask, puc);
1252 #elif defined(__powerpc__)
1254 /***********************************************************************
1255 * signal context platform-specific definitions
1256 * From Wine
1258 #ifdef linux
1259 /* All Registers access - only for local access */
1260 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1261 /* Gpr Registers access */
1262 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1263 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1264 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1265 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1266 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1267 # define LR_sig(context) REG_sig(link, context) /* Link register */
1268 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1269 /* Float Registers access */
1270 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1271 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1272 /* Exception Registers access */
1273 # define DAR_sig(context) REG_sig(dar, context)
1274 # define DSISR_sig(context) REG_sig(dsisr, context)
1275 # define TRAP_sig(context) REG_sig(trap, context)
1276 #endif /* linux */
1278 #ifdef __APPLE__
1279 # include <sys/ucontext.h>
1280 typedef struct ucontext SIGCONTEXT;
1281 /* All Registers access - only for local access */
1282 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1283 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1284 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1285 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1286 /* Gpr Registers access */
1287 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1288 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1289 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1290 # define CTR_sig(context) REG_sig(ctr, context)
1291 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1292 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1293 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1294 /* Float Registers access */
1295 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1296 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1297 /* Exception Registers access */
1298 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1299 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1300 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1301 #endif /* __APPLE__ */
1303 int cpu_signal_handler(int host_signum, void *pinfo,
1304 void *puc)
1306 siginfo_t *info = pinfo;
1307 struct ucontext *uc = puc;
1308 unsigned long pc;
1309 int is_write;
1311 pc = IAR_sig(uc);
1312 is_write = 0;
1313 #if 0
1314 /* ppc 4xx case */
1315 if (DSISR_sig(uc) & 0x00800000)
1316 is_write = 1;
1317 #else
1318 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1319 is_write = 1;
1320 #endif
1321 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1322 is_write, &uc->uc_sigmask, puc);
1325 #elif defined(__alpha__)
1327 int cpu_signal_handler(int host_signum, void *pinfo,
1328 void *puc)
1330 siginfo_t *info = pinfo;
1331 struct ucontext *uc = puc;
1332 uint32_t *pc = uc->uc_mcontext.sc_pc;
1333 uint32_t insn = *pc;
1334 int is_write = 0;
1336 /* XXX: need kernel patch to get write flag faster */
1337 switch (insn >> 26) {
1338 case 0x0d: // stw
1339 case 0x0e: // stb
1340 case 0x0f: // stq_u
1341 case 0x24: // stf
1342 case 0x25: // stg
1343 case 0x26: // sts
1344 case 0x27: // stt
1345 case 0x2c: // stl
1346 case 0x2d: // stq
1347 case 0x2e: // stl_c
1348 case 0x2f: // stq_c
1349 is_write = 1;
1352 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1353 is_write, &uc->uc_sigmask, puc);
1355 #elif defined(__sparc__)
1357 int cpu_signal_handler(int host_signum, void *pinfo,
1358 void *puc)
1360 siginfo_t *info = pinfo;
1361 uint32_t *regs = (uint32_t *)(info + 1);
1362 void *sigmask = (regs + 20);
1363 unsigned long pc;
1364 int is_write;
1365 uint32_t insn;
1367 /* XXX: is there a standard glibc define ? */
1368 pc = regs[1];
1369 /* XXX: need kernel patch to get write flag faster */
1370 is_write = 0;
1371 insn = *(uint32_t *)pc;
1372 if ((insn >> 30) == 3) {
1373 switch((insn >> 19) & 0x3f) {
1374 case 0x05: // stb
1375 case 0x06: // sth
1376 case 0x04: // st
1377 case 0x07: // std
1378 case 0x24: // stf
1379 case 0x27: // stdf
1380 case 0x25: // stfsr
1381 is_write = 1;
1382 break;
1385 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1386 is_write, sigmask, NULL);
1389 #elif defined(__arm__)
1391 int cpu_signal_handler(int host_signum, void *pinfo,
1392 void *puc)
1394 siginfo_t *info = pinfo;
1395 struct ucontext *uc = puc;
1396 unsigned long pc;
1397 int is_write;
1399 pc = uc->uc_mcontext.gregs[R15];
1400 /* XXX: compute is_write */
1401 is_write = 0;
1402 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1403 is_write,
1404 &uc->uc_sigmask, puc);
1407 #elif defined(__mc68000)
1409 int cpu_signal_handler(int host_signum, void *pinfo,
1410 void *puc)
1412 siginfo_t *info = pinfo;
1413 struct ucontext *uc = puc;
1414 unsigned long pc;
1415 int is_write;
1417 pc = uc->uc_mcontext.gregs[16];
1418 /* XXX: compute is_write */
1419 is_write = 0;
1420 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1421 is_write,
1422 &uc->uc_sigmask, puc);
1425 #elif defined(__ia64)
1427 #ifndef __ISR_VALID
1428 /* This ought to be in <bits/siginfo.h>... */
1429 # define __ISR_VALID 1
1430 #endif
1432 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1434 siginfo_t *info = pinfo;
1435 struct ucontext *uc = puc;
1436 unsigned long ip;
1437 int is_write = 0;
1439 ip = uc->uc_mcontext.sc_ip;
1440 switch (host_signum) {
1441 case SIGILL:
1442 case SIGFPE:
1443 case SIGSEGV:
1444 case SIGBUS:
1445 case SIGTRAP:
1446 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1447 /* ISR.W (write-access) is bit 33: */
1448 is_write = (info->si_isr >> 33) & 1;
1449 break;
1451 default:
1452 break;
1454 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1455 is_write,
1456 &uc->uc_sigmask, puc);
1459 #elif defined(__s390__)
1461 int cpu_signal_handler(int host_signum, void *pinfo,
1462 void *puc)
1464 siginfo_t *info = pinfo;
1465 struct ucontext *uc = puc;
1466 unsigned long pc;
1467 int is_write;
1469 pc = uc->uc_mcontext.psw.addr;
1470 /* XXX: compute is_write */
1471 is_write = 0;
1472 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1473 is_write, &uc->uc_sigmask, puc);
1476 #elif defined(__mips__)
1478 int cpu_signal_handler(int host_signum, void *pinfo,
1479 void *puc)
1481 siginfo_t *info = pinfo;
1482 struct ucontext *uc = puc;
1483 greg_t pc = uc->uc_mcontext.pc;
1484 int is_write;
1486 /* XXX: compute is_write */
1487 is_write = 0;
1488 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1489 is_write, &uc->uc_sigmask, puc);
1492 #else
1494 #error host CPU specific signal handler needed
1496 #endif
1498 #endif /* !defined(CONFIG_SOFTMMU) */