Strace fallback for unknown syscalls.
[qemu/mini2440.git] / cpu-exec.c
blobb660954f5b24428159ce1072b3ddcb826d206118
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
42 #undef env
43 #define env cpu_single_env
44 #endif
46 int tb_invalidated_flag;
47 static unsigned long next_tb;
49 //#define DEBUG_EXEC
50 //#define DEBUG_SIGNAL
52 void cpu_loop_exit(void)
54 /* NOTE: the register at this point must be saved by hand because
55 longjmp restore them */
56 regs_to_env();
57 longjmp(env->jmp_env, 1);
60 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
61 #define reg_T2
62 #endif
64 /* exit the current TB from a signal handler. The host registers are
65 restored in a state compatible with the CPU emulator
67 void cpu_resume_from_signal(CPUState *env1, void *puc)
69 #if !defined(CONFIG_SOFTMMU)
70 struct ucontext *uc = puc;
71 #endif
73 env = env1;
75 /* XXX: restore cpu registers saved in host registers */
77 #if !defined(CONFIG_SOFTMMU)
78 if (puc) {
79 /* XXX: use siglongjmp ? */
80 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
82 #endif
83 longjmp(env->jmp_env, 1);
86 static TranslationBlock *tb_find_slow(target_ulong pc,
87 target_ulong cs_base,
88 uint64_t flags)
90 TranslationBlock *tb, **ptb1;
91 int code_gen_size;
92 unsigned int h;
93 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
94 uint8_t *tc_ptr;
96 spin_lock(&tb_lock);
98 tb_invalidated_flag = 0;
100 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
102 /* find translated block using physical mappings */
103 phys_pc = get_phys_addr_code(env, pc);
104 phys_page1 = phys_pc & TARGET_PAGE_MASK;
105 phys_page2 = -1;
106 h = tb_phys_hash_func(phys_pc);
107 ptb1 = &tb_phys_hash[h];
108 for(;;) {
109 tb = *ptb1;
110 if (!tb)
111 goto not_found;
112 if (tb->pc == pc &&
113 tb->page_addr[0] == phys_page1 &&
114 tb->cs_base == cs_base &&
115 tb->flags == flags) {
116 /* check next page if needed */
117 if (tb->page_addr[1] != -1) {
118 virt_page2 = (pc & TARGET_PAGE_MASK) +
119 TARGET_PAGE_SIZE;
120 phys_page2 = get_phys_addr_code(env, virt_page2);
121 if (tb->page_addr[1] == phys_page2)
122 goto found;
123 } else {
124 goto found;
127 ptb1 = &tb->phys_hash_next;
129 not_found:
130 /* if no translated code available, then translate it now */
131 tb = tb_alloc(pc);
132 if (!tb) {
133 /* flush must be done */
134 tb_flush(env);
135 /* cannot fail at this point */
136 tb = tb_alloc(pc);
137 /* don't forget to invalidate previous TB info */
138 tb_invalidated_flag = 1;
140 tc_ptr = code_gen_ptr;
141 tb->tc_ptr = tc_ptr;
142 tb->cs_base = cs_base;
143 tb->flags = flags;
144 cpu_gen_code(env, tb, &code_gen_size);
145 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
147 /* check next page if needed */
148 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
149 phys_page2 = -1;
150 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
151 phys_page2 = get_phys_addr_code(env, virt_page2);
153 tb_link_phys(tb, phys_pc, phys_page2);
155 found:
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 spin_unlock(&tb_lock);
159 return tb;
162 static inline TranslationBlock *tb_find_fast(void)
164 TranslationBlock *tb;
165 target_ulong cs_base, pc;
166 uint64_t flags;
168 /* we record a subset of the CPU state. It will
169 always be the same before a given translated block
170 is executed. */
171 #if defined(TARGET_I386)
172 flags = env->hflags;
173 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
174 cs_base = env->segs[R_CS].base;
175 pc = cs_base + env->eip;
176 #elif defined(TARGET_ARM)
177 flags = env->thumb | (env->vfp.vec_len << 1)
178 | (env->vfp.vec_stride << 4);
179 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
180 flags |= (1 << 6);
181 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
182 flags |= (1 << 7);
183 flags |= (env->condexec_bits << 8);
184 cs_base = 0;
185 pc = env->regs[15];
186 #elif defined(TARGET_SPARC)
187 #ifdef TARGET_SPARC64
188 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
189 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
190 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
191 #else
192 // FPU enable . Supervisor
193 flags = (env->psref << 4) | env->psrs;
194 #endif
195 cs_base = env->npc;
196 pc = env->pc;
197 #elif defined(TARGET_PPC)
198 flags = env->hflags;
199 cs_base = 0;
200 pc = env->nip;
201 #elif defined(TARGET_MIPS)
202 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
203 cs_base = 0;
204 pc = env->PC[env->current_tc];
205 #elif defined(TARGET_M68K)
206 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
207 | (env->sr & SR_S) /* Bit 13 */
208 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
209 cs_base = 0;
210 pc = env->pc;
211 #elif defined(TARGET_SH4)
212 flags = env->flags;
213 cs_base = 0;
214 pc = env->pc;
215 #elif defined(TARGET_ALPHA)
216 flags = env->ps;
217 cs_base = 0;
218 pc = env->pc;
219 #elif defined(TARGET_CRIS)
220 flags = env->pregs[PR_CCS] & (U_FLAG | X_FLAG);
221 flags |= env->dslot;
222 cs_base = 0;
223 pc = env->pc;
224 #else
225 #error unsupported CPU
226 #endif
227 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
228 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
229 tb->flags != flags, 0)) {
230 tb = tb_find_slow(pc, cs_base, flags);
231 /* Note: we do it here to avoid a gcc bug on Mac OS X when
232 doing it in tb_find_slow */
233 if (tb_invalidated_flag) {
234 /* as some TB could have been invalidated because
235 of memory exceptions while generating the code, we
236 must recompute the hash index here */
237 next_tb = 0;
240 return tb;
243 /* main execution loop */
245 int cpu_exec(CPUState *env1)
247 #define DECLARE_HOST_REGS 1
248 #include "hostregs_helper.h"
249 #if defined(TARGET_SPARC)
250 #if defined(reg_REGWPTR)
251 uint32_t *saved_regwptr;
252 #endif
253 #endif
254 int ret, interrupt_request;
255 TranslationBlock *tb;
256 uint8_t *tc_ptr;
258 if (cpu_halted(env1) == EXCP_HALTED)
259 return EXCP_HALTED;
261 cpu_single_env = env1;
263 /* first we save global registers */
264 #define SAVE_HOST_REGS 1
265 #include "hostregs_helper.h"
266 env = env1;
268 env_to_regs();
269 #if defined(TARGET_I386)
270 /* put eflags in CPU temporary format */
271 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
272 DF = 1 - (2 * ((env->eflags >> 10) & 1));
273 CC_OP = CC_OP_EFLAGS;
274 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
275 #elif defined(TARGET_SPARC)
276 #if defined(reg_REGWPTR)
277 saved_regwptr = REGWPTR;
278 #endif
279 #elif defined(TARGET_M68K)
280 env->cc_op = CC_OP_FLAGS;
281 env->cc_dest = env->sr & 0xf;
282 env->cc_x = (env->sr >> 4) & 1;
283 #elif defined(TARGET_ALPHA)
284 #elif defined(TARGET_ARM)
285 #elif defined(TARGET_PPC)
286 #elif defined(TARGET_MIPS)
287 #elif defined(TARGET_SH4)
288 #elif defined(TARGET_CRIS)
289 /* XXXXX */
290 #else
291 #error unsupported target CPU
292 #endif
293 env->exception_index = -1;
295 /* prepare setjmp context for exception handling */
296 for(;;) {
297 if (setjmp(env->jmp_env) == 0) {
298 env->current_tb = NULL;
299 /* if an exception is pending, we execute it here */
300 if (env->exception_index >= 0) {
301 if (env->exception_index >= EXCP_INTERRUPT) {
302 /* exit request from the cpu execution loop */
303 ret = env->exception_index;
304 break;
305 } else if (env->user_mode_only) {
306 /* if user mode only, we simulate a fake exception
307 which will be handled outside the cpu execution
308 loop */
309 #if defined(TARGET_I386)
310 do_interrupt_user(env->exception_index,
311 env->exception_is_int,
312 env->error_code,
313 env->exception_next_eip);
314 /* successfully delivered */
315 env->old_exception = -1;
316 #endif
317 ret = env->exception_index;
318 break;
319 } else {
320 #if defined(TARGET_I386)
321 /* simulate a real cpu exception. On i386, it can
322 trigger new exceptions, but we do not handle
323 double or triple faults yet. */
324 do_interrupt(env->exception_index,
325 env->exception_is_int,
326 env->error_code,
327 env->exception_next_eip, 0);
328 /* successfully delivered */
329 env->old_exception = -1;
330 #elif defined(TARGET_PPC)
331 do_interrupt(env);
332 #elif defined(TARGET_MIPS)
333 do_interrupt(env);
334 #elif defined(TARGET_SPARC)
335 do_interrupt(env);
336 #elif defined(TARGET_ARM)
337 do_interrupt(env);
338 #elif defined(TARGET_SH4)
339 do_interrupt(env);
340 #elif defined(TARGET_ALPHA)
341 do_interrupt(env);
342 #elif defined(TARGET_CRIS)
343 do_interrupt(env);
344 #elif defined(TARGET_M68K)
345 do_interrupt(0);
346 #endif
348 env->exception_index = -1;
350 #ifdef USE_KQEMU
351 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
352 int ret;
353 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
354 ret = kqemu_cpu_exec(env);
355 /* put eflags in CPU temporary format */
356 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
357 DF = 1 - (2 * ((env->eflags >> 10) & 1));
358 CC_OP = CC_OP_EFLAGS;
359 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
360 if (ret == 1) {
361 /* exception */
362 longjmp(env->jmp_env, 1);
363 } else if (ret == 2) {
364 /* softmmu execution needed */
365 } else {
366 if (env->interrupt_request != 0) {
367 /* hardware interrupt will be executed just after */
368 } else {
369 /* otherwise, we restart */
370 longjmp(env->jmp_env, 1);
374 #endif
376 next_tb = 0; /* force lookup of first TB */
377 for(;;) {
378 interrupt_request = env->interrupt_request;
379 if (__builtin_expect(interrupt_request, 0)
380 #if defined(TARGET_I386)
381 && env->hflags & HF_GIF_MASK
382 #endif
383 && likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
384 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
385 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
386 env->exception_index = EXCP_DEBUG;
387 cpu_loop_exit();
389 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
390 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
391 if (interrupt_request & CPU_INTERRUPT_HALT) {
392 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
393 env->halted = 1;
394 env->exception_index = EXCP_HLT;
395 cpu_loop_exit();
397 #endif
398 #if defined(TARGET_I386)
399 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
400 !(env->hflags & HF_SMM_MASK)) {
401 svm_check_intercept(SVM_EXIT_SMI);
402 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
403 do_smm_enter();
404 next_tb = 0;
405 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
406 !(env->hflags & HF_NMI_MASK)) {
407 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
408 env->hflags |= HF_NMI_MASK;
409 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
410 next_tb = 0;
411 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
412 (env->eflags & IF_MASK || env->hflags & HF_HIF_MASK) &&
413 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
414 int intno;
415 svm_check_intercept(SVM_EXIT_INTR);
416 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
417 intno = cpu_get_pic_interrupt(env);
418 if (loglevel & CPU_LOG_TB_IN_ASM) {
419 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
421 do_interrupt(intno, 0, 0, 0, 1);
422 /* ensure that no TB jump will be modified as
423 the program flow was changed */
424 next_tb = 0;
425 #if !defined(CONFIG_USER_ONLY)
426 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
427 (env->eflags & IF_MASK) && !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
428 int intno;
429 /* FIXME: this should respect TPR */
430 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
431 svm_check_intercept(SVM_EXIT_VINTR);
432 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
433 if (loglevel & CPU_LOG_TB_IN_ASM)
434 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
435 do_interrupt(intno, 0, 0, -1, 1);
436 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl),
437 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)) & ~V_IRQ_MASK);
438 next_tb = 0;
439 #endif
441 #elif defined(TARGET_PPC)
442 #if 0
443 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
444 cpu_ppc_reset(env);
446 #endif
447 if (interrupt_request & CPU_INTERRUPT_HARD) {
448 ppc_hw_interrupt(env);
449 if (env->pending_interrupts == 0)
450 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
451 next_tb = 0;
453 #elif defined(TARGET_MIPS)
454 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
455 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
456 (env->CP0_Status & (1 << CP0St_IE)) &&
457 !(env->CP0_Status & (1 << CP0St_EXL)) &&
458 !(env->CP0_Status & (1 << CP0St_ERL)) &&
459 !(env->hflags & MIPS_HFLAG_DM)) {
460 /* Raise it */
461 env->exception_index = EXCP_EXT_INTERRUPT;
462 env->error_code = 0;
463 do_interrupt(env);
464 next_tb = 0;
466 #elif defined(TARGET_SPARC)
467 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
468 (env->psret != 0)) {
469 int pil = env->interrupt_index & 15;
470 int type = env->interrupt_index & 0xf0;
472 if (((type == TT_EXTINT) &&
473 (pil == 15 || pil > env->psrpil)) ||
474 type != TT_EXTINT) {
475 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
476 env->exception_index = env->interrupt_index;
477 do_interrupt(env);
478 env->interrupt_index = 0;
479 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
480 cpu_check_irqs(env);
481 #endif
482 next_tb = 0;
484 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
485 //do_interrupt(0, 0, 0, 0, 0);
486 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
488 #elif defined(TARGET_ARM)
489 if (interrupt_request & CPU_INTERRUPT_FIQ
490 && !(env->uncached_cpsr & CPSR_F)) {
491 env->exception_index = EXCP_FIQ;
492 do_interrupt(env);
493 next_tb = 0;
495 /* ARMv7-M interrupt return works by loading a magic value
496 into the PC. On real hardware the load causes the
497 return to occur. The qemu implementation performs the
498 jump normally, then does the exception return when the
499 CPU tries to execute code at the magic address.
500 This will cause the magic PC value to be pushed to
501 the stack if an interrupt occured at the wrong time.
502 We avoid this by disabling interrupts when
503 pc contains a magic address. */
504 if (interrupt_request & CPU_INTERRUPT_HARD
505 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
506 || !(env->uncached_cpsr & CPSR_I))) {
507 env->exception_index = EXCP_IRQ;
508 do_interrupt(env);
509 next_tb = 0;
511 #elif defined(TARGET_SH4)
512 if (interrupt_request & CPU_INTERRUPT_HARD) {
513 do_interrupt(env);
514 next_tb = 0;
516 #elif defined(TARGET_ALPHA)
517 if (interrupt_request & CPU_INTERRUPT_HARD) {
518 do_interrupt(env);
519 next_tb = 0;
521 #elif defined(TARGET_CRIS)
522 if (interrupt_request & CPU_INTERRUPT_HARD) {
523 do_interrupt(env);
524 next_tb = 0;
526 #elif defined(TARGET_M68K)
527 if (interrupt_request & CPU_INTERRUPT_HARD
528 && ((env->sr & SR_I) >> SR_I_SHIFT)
529 < env->pending_level) {
530 /* Real hardware gets the interrupt vector via an
531 IACK cycle at this point. Current emulated
532 hardware doesn't rely on this, so we
533 provide/save the vector when the interrupt is
534 first signalled. */
535 env->exception_index = env->pending_vector;
536 do_interrupt(1);
537 next_tb = 0;
539 #endif
540 /* Don't use the cached interupt_request value,
541 do_interrupt may have updated the EXITTB flag. */
542 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
543 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
544 /* ensure that no TB jump will be modified as
545 the program flow was changed */
546 next_tb = 0;
548 if (interrupt_request & CPU_INTERRUPT_EXIT) {
549 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
550 env->exception_index = EXCP_INTERRUPT;
551 cpu_loop_exit();
554 #ifdef DEBUG_EXEC
555 if ((loglevel & CPU_LOG_TB_CPU)) {
556 /* restore flags in standard format */
557 regs_to_env();
558 #if defined(TARGET_I386)
559 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
560 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
561 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
562 #elif defined(TARGET_ARM)
563 cpu_dump_state(env, logfile, fprintf, 0);
564 #elif defined(TARGET_SPARC)
565 REGWPTR = env->regbase + (env->cwp * 16);
566 env->regwptr = REGWPTR;
567 cpu_dump_state(env, logfile, fprintf, 0);
568 #elif defined(TARGET_PPC)
569 cpu_dump_state(env, logfile, fprintf, 0);
570 #elif defined(TARGET_M68K)
571 cpu_m68k_flush_flags(env, env->cc_op);
572 env->cc_op = CC_OP_FLAGS;
573 env->sr = (env->sr & 0xffe0)
574 | env->cc_dest | (env->cc_x << 4);
575 cpu_dump_state(env, logfile, fprintf, 0);
576 #elif defined(TARGET_MIPS)
577 cpu_dump_state(env, logfile, fprintf, 0);
578 #elif defined(TARGET_SH4)
579 cpu_dump_state(env, logfile, fprintf, 0);
580 #elif defined(TARGET_ALPHA)
581 cpu_dump_state(env, logfile, fprintf, 0);
582 #elif defined(TARGET_CRIS)
583 cpu_dump_state(env, logfile, fprintf, 0);
584 #else
585 #error unsupported target CPU
586 #endif
588 #endif
589 tb = tb_find_fast();
590 #ifdef DEBUG_EXEC
591 if ((loglevel & CPU_LOG_EXEC)) {
592 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
593 (long)tb->tc_ptr, tb->pc,
594 lookup_symbol(tb->pc));
596 #endif
597 /* see if we can patch the calling TB. When the TB
598 spans two pages, we cannot safely do a direct
599 jump. */
601 if (next_tb != 0 &&
602 #ifdef USE_KQEMU
603 (env->kqemu_enabled != 2) &&
604 #endif
605 tb->page_addr[1] == -1) {
606 spin_lock(&tb_lock);
607 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
608 spin_unlock(&tb_lock);
611 tc_ptr = tb->tc_ptr;
612 env->current_tb = tb;
613 /* execute the generated code */
614 #if defined(__sparc__) && !defined(HOST_SOLARIS)
615 #undef env
616 env = cpu_single_env;
617 #define env cpu_single_env
618 #endif
619 next_tb = tcg_qemu_tb_exec(tc_ptr);
620 env->current_tb = NULL;
621 /* reset soft MMU for next block (it can currently
622 only be set by a memory fault) */
623 #if defined(USE_KQEMU)
624 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
625 if (kqemu_is_ok(env) &&
626 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
627 cpu_loop_exit();
629 #endif
630 } /* for(;;) */
631 } else {
632 env_to_regs();
634 } /* for(;;) */
637 #if defined(TARGET_I386)
638 /* restore flags in standard format */
639 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
640 #elif defined(TARGET_ARM)
641 /* XXX: Save/restore host fpu exception state?. */
642 #elif defined(TARGET_SPARC)
643 #if defined(reg_REGWPTR)
644 REGWPTR = saved_regwptr;
645 #endif
646 #elif defined(TARGET_PPC)
647 #elif defined(TARGET_M68K)
648 cpu_m68k_flush_flags(env, env->cc_op);
649 env->cc_op = CC_OP_FLAGS;
650 env->sr = (env->sr & 0xffe0)
651 | env->cc_dest | (env->cc_x << 4);
652 #elif defined(TARGET_MIPS)
653 #elif defined(TARGET_SH4)
654 #elif defined(TARGET_ALPHA)
655 #elif defined(TARGET_CRIS)
656 /* XXXXX */
657 #else
658 #error unsupported target CPU
659 #endif
661 /* restore global registers */
662 #include "hostregs_helper.h"
664 /* fail safe : never use cpu_single_env outside cpu_exec() */
665 cpu_single_env = NULL;
666 return ret;
669 /* must only be called from the generated code as an exception can be
670 generated */
671 void tb_invalidate_page_range(target_ulong start, target_ulong end)
673 /* XXX: cannot enable it yet because it yields to MMU exception
674 where NIP != read address on PowerPC */
675 #if 0
676 target_ulong phys_addr;
677 phys_addr = get_phys_addr_code(env, start);
678 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
679 #endif
682 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
684 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
686 CPUX86State *saved_env;
688 saved_env = env;
689 env = s;
690 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
691 selector &= 0xffff;
692 cpu_x86_load_seg_cache(env, seg_reg, selector,
693 (selector << 4), 0xffff, 0);
694 } else {
695 helper_load_seg(seg_reg, selector);
697 env = saved_env;
700 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
702 CPUX86State *saved_env;
704 saved_env = env;
705 env = s;
707 helper_fsave(ptr, data32);
709 env = saved_env;
712 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
714 CPUX86State *saved_env;
716 saved_env = env;
717 env = s;
719 helper_frstor(ptr, data32);
721 env = saved_env;
724 #endif /* TARGET_I386 */
726 #if !defined(CONFIG_SOFTMMU)
728 #if defined(TARGET_I386)
730 /* 'pc' is the host PC at which the exception was raised. 'address' is
731 the effective address of the memory exception. 'is_write' is 1 if a
732 write caused the exception and otherwise 0'. 'old_set' is the
733 signal set which should be restored */
734 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
735 int is_write, sigset_t *old_set,
736 void *puc)
738 TranslationBlock *tb;
739 int ret;
741 if (cpu_single_env)
742 env = cpu_single_env; /* XXX: find a correct solution for multithread */
743 #if defined(DEBUG_SIGNAL)
744 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
745 pc, address, is_write, *(unsigned long *)old_set);
746 #endif
747 /* XXX: locking issue */
748 if (is_write && page_unprotect(h2g(address), pc, puc)) {
749 return 1;
752 /* see if it is an MMU fault */
753 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
754 if (ret < 0)
755 return 0; /* not an MMU fault */
756 if (ret == 0)
757 return 1; /* the MMU fault was handled without causing real CPU fault */
758 /* now we have a real cpu fault */
759 tb = tb_find_pc(pc);
760 if (tb) {
761 /* the PC is inside the translated code. It means that we have
762 a virtual CPU fault */
763 cpu_restore_state(tb, env, pc, puc);
765 if (ret == 1) {
766 #if 0
767 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
768 env->eip, env->cr[2], env->error_code);
769 #endif
770 /* we restore the process signal mask as the sigreturn should
771 do it (XXX: use sigsetjmp) */
772 sigprocmask(SIG_SETMASK, old_set, NULL);
773 raise_exception_err(env->exception_index, env->error_code);
774 } else {
775 /* activate soft MMU for this block */
776 env->hflags |= HF_SOFTMMU_MASK;
777 cpu_resume_from_signal(env, puc);
779 /* never comes here */
780 return 1;
783 #elif defined(TARGET_ARM)
784 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
785 int is_write, sigset_t *old_set,
786 void *puc)
788 TranslationBlock *tb;
789 int ret;
791 if (cpu_single_env)
792 env = cpu_single_env; /* XXX: find a correct solution for multithread */
793 #if defined(DEBUG_SIGNAL)
794 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
795 pc, address, is_write, *(unsigned long *)old_set);
796 #endif
797 /* XXX: locking issue */
798 if (is_write && page_unprotect(h2g(address), pc, puc)) {
799 return 1;
801 /* see if it is an MMU fault */
802 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
803 if (ret < 0)
804 return 0; /* not an MMU fault */
805 if (ret == 0)
806 return 1; /* the MMU fault was handled without causing real CPU fault */
807 /* now we have a real cpu fault */
808 tb = tb_find_pc(pc);
809 if (tb) {
810 /* the PC is inside the translated code. It means that we have
811 a virtual CPU fault */
812 cpu_restore_state(tb, env, pc, puc);
814 /* we restore the process signal mask as the sigreturn should
815 do it (XXX: use sigsetjmp) */
816 sigprocmask(SIG_SETMASK, old_set, NULL);
817 cpu_loop_exit();
818 /* never comes here */
819 return 1;
821 #elif defined(TARGET_SPARC)
822 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
823 int is_write, sigset_t *old_set,
824 void *puc)
826 TranslationBlock *tb;
827 int ret;
829 if (cpu_single_env)
830 env = cpu_single_env; /* XXX: find a correct solution for multithread */
831 #if defined(DEBUG_SIGNAL)
832 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
833 pc, address, is_write, *(unsigned long *)old_set);
834 #endif
835 /* XXX: locking issue */
836 if (is_write && page_unprotect(h2g(address), pc, puc)) {
837 return 1;
839 /* see if it is an MMU fault */
840 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
841 if (ret < 0)
842 return 0; /* not an MMU fault */
843 if (ret == 0)
844 return 1; /* the MMU fault was handled without causing real CPU fault */
845 /* now we have a real cpu fault */
846 tb = tb_find_pc(pc);
847 if (tb) {
848 /* the PC is inside the translated code. It means that we have
849 a virtual CPU fault */
850 cpu_restore_state(tb, env, pc, puc);
852 /* we restore the process signal mask as the sigreturn should
853 do it (XXX: use sigsetjmp) */
854 sigprocmask(SIG_SETMASK, old_set, NULL);
855 cpu_loop_exit();
856 /* never comes here */
857 return 1;
859 #elif defined (TARGET_PPC)
860 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
861 int is_write, sigset_t *old_set,
862 void *puc)
864 TranslationBlock *tb;
865 int ret;
867 if (cpu_single_env)
868 env = cpu_single_env; /* XXX: find a correct solution for multithread */
869 #if defined(DEBUG_SIGNAL)
870 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
871 pc, address, is_write, *(unsigned long *)old_set);
872 #endif
873 /* XXX: locking issue */
874 if (is_write && page_unprotect(h2g(address), pc, puc)) {
875 return 1;
878 /* see if it is an MMU fault */
879 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
880 if (ret < 0)
881 return 0; /* not an MMU fault */
882 if (ret == 0)
883 return 1; /* the MMU fault was handled without causing real CPU fault */
885 /* now we have a real cpu fault */
886 tb = tb_find_pc(pc);
887 if (tb) {
888 /* the PC is inside the translated code. It means that we have
889 a virtual CPU fault */
890 cpu_restore_state(tb, env, pc, puc);
892 if (ret == 1) {
893 #if 0
894 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
895 env->nip, env->error_code, tb);
896 #endif
897 /* we restore the process signal mask as the sigreturn should
898 do it (XXX: use sigsetjmp) */
899 sigprocmask(SIG_SETMASK, old_set, NULL);
900 do_raise_exception_err(env->exception_index, env->error_code);
901 } else {
902 /* activate soft MMU for this block */
903 cpu_resume_from_signal(env, puc);
905 /* never comes here */
906 return 1;
909 #elif defined(TARGET_M68K)
910 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
911 int is_write, sigset_t *old_set,
912 void *puc)
914 TranslationBlock *tb;
915 int ret;
917 if (cpu_single_env)
918 env = cpu_single_env; /* XXX: find a correct solution for multithread */
919 #if defined(DEBUG_SIGNAL)
920 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
921 pc, address, is_write, *(unsigned long *)old_set);
922 #endif
923 /* XXX: locking issue */
924 if (is_write && page_unprotect(address, pc, puc)) {
925 return 1;
927 /* see if it is an MMU fault */
928 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
929 if (ret < 0)
930 return 0; /* not an MMU fault */
931 if (ret == 0)
932 return 1; /* the MMU fault was handled without causing real CPU fault */
933 /* now we have a real cpu fault */
934 tb = tb_find_pc(pc);
935 if (tb) {
936 /* the PC is inside the translated code. It means that we have
937 a virtual CPU fault */
938 cpu_restore_state(tb, env, pc, puc);
940 /* we restore the process signal mask as the sigreturn should
941 do it (XXX: use sigsetjmp) */
942 sigprocmask(SIG_SETMASK, old_set, NULL);
943 cpu_loop_exit();
944 /* never comes here */
945 return 1;
948 #elif defined (TARGET_MIPS)
949 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
950 int is_write, sigset_t *old_set,
951 void *puc)
953 TranslationBlock *tb;
954 int ret;
956 if (cpu_single_env)
957 env = cpu_single_env; /* XXX: find a correct solution for multithread */
958 #if defined(DEBUG_SIGNAL)
959 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
960 pc, address, is_write, *(unsigned long *)old_set);
961 #endif
962 /* XXX: locking issue */
963 if (is_write && page_unprotect(h2g(address), pc, puc)) {
964 return 1;
967 /* see if it is an MMU fault */
968 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
969 if (ret < 0)
970 return 0; /* not an MMU fault */
971 if (ret == 0)
972 return 1; /* the MMU fault was handled without causing real CPU fault */
974 /* now we have a real cpu fault */
975 tb = tb_find_pc(pc);
976 if (tb) {
977 /* the PC is inside the translated code. It means that we have
978 a virtual CPU fault */
979 cpu_restore_state(tb, env, pc, puc);
981 if (ret == 1) {
982 #if 0
983 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
984 env->PC, env->error_code, tb);
985 #endif
986 /* we restore the process signal mask as the sigreturn should
987 do it (XXX: use sigsetjmp) */
988 sigprocmask(SIG_SETMASK, old_set, NULL);
989 do_raise_exception_err(env->exception_index, env->error_code);
990 } else {
991 /* activate soft MMU for this block */
992 cpu_resume_from_signal(env, puc);
994 /* never comes here */
995 return 1;
998 #elif defined (TARGET_SH4)
999 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1000 int is_write, sigset_t *old_set,
1001 void *puc)
1003 TranslationBlock *tb;
1004 int ret;
1006 if (cpu_single_env)
1007 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1008 #if defined(DEBUG_SIGNAL)
1009 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1010 pc, address, is_write, *(unsigned long *)old_set);
1011 #endif
1012 /* XXX: locking issue */
1013 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1014 return 1;
1017 /* see if it is an MMU fault */
1018 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1019 if (ret < 0)
1020 return 0; /* not an MMU fault */
1021 if (ret == 0)
1022 return 1; /* the MMU fault was handled without causing real CPU fault */
1024 /* now we have a real cpu fault */
1025 tb = tb_find_pc(pc);
1026 if (tb) {
1027 /* the PC is inside the translated code. It means that we have
1028 a virtual CPU fault */
1029 cpu_restore_state(tb, env, pc, puc);
1031 #if 0
1032 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1033 env->nip, env->error_code, tb);
1034 #endif
1035 /* we restore the process signal mask as the sigreturn should
1036 do it (XXX: use sigsetjmp) */
1037 sigprocmask(SIG_SETMASK, old_set, NULL);
1038 cpu_loop_exit();
1039 /* never comes here */
1040 return 1;
1043 #elif defined (TARGET_ALPHA)
1044 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1045 int is_write, sigset_t *old_set,
1046 void *puc)
1048 TranslationBlock *tb;
1049 int ret;
1051 if (cpu_single_env)
1052 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1053 #if defined(DEBUG_SIGNAL)
1054 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1055 pc, address, is_write, *(unsigned long *)old_set);
1056 #endif
1057 /* XXX: locking issue */
1058 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1059 return 1;
1062 /* see if it is an MMU fault */
1063 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1064 if (ret < 0)
1065 return 0; /* not an MMU fault */
1066 if (ret == 0)
1067 return 1; /* the MMU fault was handled without causing real CPU fault */
1069 /* now we have a real cpu fault */
1070 tb = tb_find_pc(pc);
1071 if (tb) {
1072 /* the PC is inside the translated code. It means that we have
1073 a virtual CPU fault */
1074 cpu_restore_state(tb, env, pc, puc);
1076 #if 0
1077 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1078 env->nip, env->error_code, tb);
1079 #endif
1080 /* we restore the process signal mask as the sigreturn should
1081 do it (XXX: use sigsetjmp) */
1082 sigprocmask(SIG_SETMASK, old_set, NULL);
1083 cpu_loop_exit();
1084 /* never comes here */
1085 return 1;
1087 #elif defined (TARGET_CRIS)
1088 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1089 int is_write, sigset_t *old_set,
1090 void *puc)
1092 TranslationBlock *tb;
1093 int ret;
1095 if (cpu_single_env)
1096 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1097 #if defined(DEBUG_SIGNAL)
1098 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1099 pc, address, is_write, *(unsigned long *)old_set);
1100 #endif
1101 /* XXX: locking issue */
1102 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1103 return 1;
1106 /* see if it is an MMU fault */
1107 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1108 if (ret < 0)
1109 return 0; /* not an MMU fault */
1110 if (ret == 0)
1111 return 1; /* the MMU fault was handled without causing real CPU fault */
1113 /* now we have a real cpu fault */
1114 tb = tb_find_pc(pc);
1115 if (tb) {
1116 /* the PC is inside the translated code. It means that we have
1117 a virtual CPU fault */
1118 cpu_restore_state(tb, env, pc, puc);
1120 /* we restore the process signal mask as the sigreturn should
1121 do it (XXX: use sigsetjmp) */
1122 sigprocmask(SIG_SETMASK, old_set, NULL);
1123 cpu_loop_exit();
1124 /* never comes here */
1125 return 1;
1128 #else
1129 #error unsupported target CPU
1130 #endif
1132 #if defined(__i386__)
1134 #if defined(__APPLE__)
1135 # include <sys/ucontext.h>
1137 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1138 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1139 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1140 #else
1141 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1142 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1143 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1144 #endif
1146 int cpu_signal_handler(int host_signum, void *pinfo,
1147 void *puc)
1149 siginfo_t *info = pinfo;
1150 struct ucontext *uc = puc;
1151 unsigned long pc;
1152 int trapno;
1154 #ifndef REG_EIP
1155 /* for glibc 2.1 */
1156 #define REG_EIP EIP
1157 #define REG_ERR ERR
1158 #define REG_TRAPNO TRAPNO
1159 #endif
1160 pc = EIP_sig(uc);
1161 trapno = TRAP_sig(uc);
1162 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1163 trapno == 0xe ?
1164 (ERROR_sig(uc) >> 1) & 1 : 0,
1165 &uc->uc_sigmask, puc);
1168 #elif defined(__x86_64__)
1170 int cpu_signal_handler(int host_signum, void *pinfo,
1171 void *puc)
1173 siginfo_t *info = pinfo;
1174 struct ucontext *uc = puc;
1175 unsigned long pc;
1177 pc = uc->uc_mcontext.gregs[REG_RIP];
1178 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1179 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1180 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1181 &uc->uc_sigmask, puc);
1184 #elif defined(__powerpc__)
1186 /***********************************************************************
1187 * signal context platform-specific definitions
1188 * From Wine
1190 #ifdef linux
1191 /* All Registers access - only for local access */
1192 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1193 /* Gpr Registers access */
1194 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1195 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1196 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1197 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1198 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1199 # define LR_sig(context) REG_sig(link, context) /* Link register */
1200 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1201 /* Float Registers access */
1202 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1203 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1204 /* Exception Registers access */
1205 # define DAR_sig(context) REG_sig(dar, context)
1206 # define DSISR_sig(context) REG_sig(dsisr, context)
1207 # define TRAP_sig(context) REG_sig(trap, context)
1208 #endif /* linux */
1210 #ifdef __APPLE__
1211 # include <sys/ucontext.h>
1212 typedef struct ucontext SIGCONTEXT;
1213 /* All Registers access - only for local access */
1214 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1215 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1216 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1217 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1218 /* Gpr Registers access */
1219 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1220 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1221 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1222 # define CTR_sig(context) REG_sig(ctr, context)
1223 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1224 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1225 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1226 /* Float Registers access */
1227 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1228 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1229 /* Exception Registers access */
1230 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1231 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1232 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1233 #endif /* __APPLE__ */
1235 int cpu_signal_handler(int host_signum, void *pinfo,
1236 void *puc)
1238 siginfo_t *info = pinfo;
1239 struct ucontext *uc = puc;
1240 unsigned long pc;
1241 int is_write;
1243 pc = IAR_sig(uc);
1244 is_write = 0;
1245 #if 0
1246 /* ppc 4xx case */
1247 if (DSISR_sig(uc) & 0x00800000)
1248 is_write = 1;
1249 #else
1250 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1251 is_write = 1;
1252 #endif
1253 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1254 is_write, &uc->uc_sigmask, puc);
1257 #elif defined(__alpha__)
1259 int cpu_signal_handler(int host_signum, void *pinfo,
1260 void *puc)
1262 siginfo_t *info = pinfo;
1263 struct ucontext *uc = puc;
1264 uint32_t *pc = uc->uc_mcontext.sc_pc;
1265 uint32_t insn = *pc;
1266 int is_write = 0;
1268 /* XXX: need kernel patch to get write flag faster */
1269 switch (insn >> 26) {
1270 case 0x0d: // stw
1271 case 0x0e: // stb
1272 case 0x0f: // stq_u
1273 case 0x24: // stf
1274 case 0x25: // stg
1275 case 0x26: // sts
1276 case 0x27: // stt
1277 case 0x2c: // stl
1278 case 0x2d: // stq
1279 case 0x2e: // stl_c
1280 case 0x2f: // stq_c
1281 is_write = 1;
1284 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1285 is_write, &uc->uc_sigmask, puc);
1287 #elif defined(__sparc__)
1289 int cpu_signal_handler(int host_signum, void *pinfo,
1290 void *puc)
1292 siginfo_t *info = pinfo;
1293 int is_write;
1294 uint32_t insn;
1295 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1296 uint32_t *regs = (uint32_t *)(info + 1);
1297 void *sigmask = (regs + 20);
1298 /* XXX: is there a standard glibc define ? */
1299 unsigned long pc = regs[1];
1300 #else
1301 struct sigcontext *sc = puc;
1302 unsigned long pc = sc->sigc_regs.tpc;
1303 void *sigmask = (void *)sc->sigc_mask;
1304 #endif
1306 /* XXX: need kernel patch to get write flag faster */
1307 is_write = 0;
1308 insn = *(uint32_t *)pc;
1309 if ((insn >> 30) == 3) {
1310 switch((insn >> 19) & 0x3f) {
1311 case 0x05: // stb
1312 case 0x06: // sth
1313 case 0x04: // st
1314 case 0x07: // std
1315 case 0x24: // stf
1316 case 0x27: // stdf
1317 case 0x25: // stfsr
1318 is_write = 1;
1319 break;
1322 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1323 is_write, sigmask, NULL);
1326 #elif defined(__arm__)
1328 int cpu_signal_handler(int host_signum, void *pinfo,
1329 void *puc)
1331 siginfo_t *info = pinfo;
1332 struct ucontext *uc = puc;
1333 unsigned long pc;
1334 int is_write;
1336 pc = uc->uc_mcontext.arm_pc;
1337 /* XXX: compute is_write */
1338 is_write = 0;
1339 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1340 is_write,
1341 &uc->uc_sigmask, puc);
1344 #elif defined(__mc68000)
1346 int cpu_signal_handler(int host_signum, void *pinfo,
1347 void *puc)
1349 siginfo_t *info = pinfo;
1350 struct ucontext *uc = puc;
1351 unsigned long pc;
1352 int is_write;
1354 pc = uc->uc_mcontext.gregs[16];
1355 /* XXX: compute is_write */
1356 is_write = 0;
1357 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1358 is_write,
1359 &uc->uc_sigmask, puc);
1362 #elif defined(__ia64)
1364 #ifndef __ISR_VALID
1365 /* This ought to be in <bits/siginfo.h>... */
1366 # define __ISR_VALID 1
1367 #endif
1369 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1371 siginfo_t *info = pinfo;
1372 struct ucontext *uc = puc;
1373 unsigned long ip;
1374 int is_write = 0;
1376 ip = uc->uc_mcontext.sc_ip;
1377 switch (host_signum) {
1378 case SIGILL:
1379 case SIGFPE:
1380 case SIGSEGV:
1381 case SIGBUS:
1382 case SIGTRAP:
1383 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1384 /* ISR.W (write-access) is bit 33: */
1385 is_write = (info->si_isr >> 33) & 1;
1386 break;
1388 default:
1389 break;
1391 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1392 is_write,
1393 &uc->uc_sigmask, puc);
1396 #elif defined(__s390__)
1398 int cpu_signal_handler(int host_signum, void *pinfo,
1399 void *puc)
1401 siginfo_t *info = pinfo;
1402 struct ucontext *uc = puc;
1403 unsigned long pc;
1404 int is_write;
1406 pc = uc->uc_mcontext.psw.addr;
1407 /* XXX: compute is_write */
1408 is_write = 0;
1409 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1410 is_write, &uc->uc_sigmask, puc);
1413 #elif defined(__mips__)
1415 int cpu_signal_handler(int host_signum, void *pinfo,
1416 void *puc)
1418 siginfo_t *info = pinfo;
1419 struct ucontext *uc = puc;
1420 greg_t pc = uc->uc_mcontext.pc;
1421 int is_write;
1423 /* XXX: compute is_write */
1424 is_write = 0;
1425 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1426 is_write, &uc->uc_sigmask, puc);
1429 #elif defined(__hppa__)
1431 int cpu_signal_handler(int host_signum, void *pinfo,
1432 void *puc)
1434 struct siginfo *info = pinfo;
1435 struct ucontext *uc = puc;
1436 unsigned long pc;
1437 int is_write;
1439 pc = uc->uc_mcontext.sc_iaoq[0];
1440 /* FIXME: compute is_write */
1441 is_write = 0;
1442 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1443 is_write,
1444 &uc->uc_sigmask, puc);
1447 #else
1449 #error host CPU specific signal handler needed
1451 #endif
1453 #endif /* !defined(CONFIG_SOFTMMU) */