Fix interrupt exclusion via SSTEP_NOIRQ
[qemu/mini2440.git] / cpu-exec.c
blobc6db5adf5e6eb523fe38a5878ca813e0fe0afe6c
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
45 #undef env
46 #define env cpu_single_env
47 #endif
49 int tb_invalidated_flag;
51 //#define DEBUG_EXEC
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
68 #ifdef __linux__
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72 #endif
73 #endif
75 env = env1;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
82 #ifdef __linux__
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86 #endif
88 #endif
89 longjmp(env->jmp_env, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96 unsigned long next_tb;
97 TranslationBlock *tb;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105 max_cycles);
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 CPU_PC_FROM_TB(env, tb);
115 tb_phys_invalidate(tb, -1);
116 tb_free(tb);
119 static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
121 uint64_t flags)
123 TranslationBlock *tb, **ptb1;
124 unsigned int h;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
127 tb_invalidated_flag = 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
134 phys_page2 = -1;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
137 for(;;) {
138 tb = *ptb1;
139 if (!tb)
140 goto not_found;
141 if (tb->pc == pc &&
142 tb->page_addr[0] == phys_page1 &&
143 tb->cs_base == cs_base &&
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
147 virt_page2 = (pc & TARGET_PAGE_MASK) +
148 TARGET_PAGE_SIZE;
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
151 goto found;
152 } else {
153 goto found;
156 ptb1 = &tb->phys_hash_next;
158 not_found:
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
162 found:
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
165 return tb;
168 static inline TranslationBlock *tb_find_fast(void)
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
172 uint64_t flags;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
176 is executed. */
177 #if defined(TARGET_I386)
178 flags = env->hflags;
179 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
180 cs_base = env->segs[R_CS].base;
181 pc = cs_base + env->eip;
182 #elif defined(TARGET_ARM)
183 flags = env->thumb | (env->vfp.vec_len << 1)
184 | (env->vfp.vec_stride << 4);
185 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
186 flags |= (1 << 6);
187 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
188 flags |= (1 << 7);
189 flags |= (env->condexec_bits << 8);
190 cs_base = 0;
191 pc = env->regs[15];
192 #elif defined(TARGET_SPARC)
193 #ifdef TARGET_SPARC64
194 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
195 flags = ((env->pstate & PS_AM) << 2)
196 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
197 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
198 #else
199 // FPU enable . Supervisor
200 flags = (env->psref << 4) | env->psrs;
201 #endif
202 cs_base = env->npc;
203 pc = env->pc;
204 #elif defined(TARGET_PPC)
205 flags = env->hflags;
206 cs_base = 0;
207 pc = env->nip;
208 #elif defined(TARGET_MIPS)
209 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
210 cs_base = 0;
211 pc = env->active_tc.PC;
212 #elif defined(TARGET_M68K)
213 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
214 | (env->sr & SR_S) /* Bit 13 */
215 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
216 cs_base = 0;
217 pc = env->pc;
218 #elif defined(TARGET_SH4)
219 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
220 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
221 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
222 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
223 cs_base = 0;
224 pc = env->pc;
225 #elif defined(TARGET_ALPHA)
226 flags = env->ps;
227 cs_base = 0;
228 pc = env->pc;
229 #elif defined(TARGET_CRIS)
230 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
231 flags |= env->dslot;
232 cs_base = 0;
233 pc = env->pc;
234 #else
235 #error unsupported CPU
236 #endif
237 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
238 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
239 tb->flags != flags)) {
240 tb = tb_find_slow(pc, cs_base, flags);
242 return tb;
245 /* main execution loop */
247 int cpu_exec(CPUState *env1)
249 #define DECLARE_HOST_REGS 1
250 #include "hostregs_helper.h"
251 int ret, interrupt_request;
252 TranslationBlock *tb;
253 uint8_t *tc_ptr;
254 unsigned long next_tb;
256 if (cpu_halted(env1) == EXCP_HALTED)
257 return EXCP_HALTED;
259 cpu_single_env = env1;
261 /* first we save global registers */
262 #define SAVE_HOST_REGS 1
263 #include "hostregs_helper.h"
264 env = env1;
266 env_to_regs();
267 #if defined(TARGET_I386)
268 /* put eflags in CPU temporary format */
269 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270 DF = 1 - (2 * ((env->eflags >> 10) & 1));
271 CC_OP = CC_OP_EFLAGS;
272 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 #elif defined(TARGET_SPARC)
274 #elif defined(TARGET_M68K)
275 env->cc_op = CC_OP_FLAGS;
276 env->cc_dest = env->sr & 0xf;
277 env->cc_x = (env->sr >> 4) & 1;
278 #elif defined(TARGET_ALPHA)
279 #elif defined(TARGET_ARM)
280 #elif defined(TARGET_PPC)
281 #elif defined(TARGET_MIPS)
282 #elif defined(TARGET_SH4)
283 #elif defined(TARGET_CRIS)
284 /* XXXXX */
285 #else
286 #error unsupported target CPU
287 #endif
288 env->exception_index = -1;
290 /* prepare setjmp context for exception handling */
291 for(;;) {
292 if (setjmp(env->jmp_env) == 0) {
293 env->current_tb = NULL;
294 /* if an exception is pending, we execute it here */
295 if (env->exception_index >= 0) {
296 if (env->exception_index >= EXCP_INTERRUPT) {
297 /* exit request from the cpu execution loop */
298 ret = env->exception_index;
299 break;
300 } else if (env->user_mode_only) {
301 /* if user mode only, we simulate a fake exception
302 which will be handled outside the cpu execution
303 loop */
304 #if defined(TARGET_I386)
305 do_interrupt_user(env->exception_index,
306 env->exception_is_int,
307 env->error_code,
308 env->exception_next_eip);
309 /* successfully delivered */
310 env->old_exception = -1;
311 #endif
312 ret = env->exception_index;
313 break;
314 } else {
315 #if defined(TARGET_I386)
316 /* simulate a real cpu exception. On i386, it can
317 trigger new exceptions, but we do not handle
318 double or triple faults yet. */
319 do_interrupt(env->exception_index,
320 env->exception_is_int,
321 env->error_code,
322 env->exception_next_eip, 0);
323 /* successfully delivered */
324 env->old_exception = -1;
325 #elif defined(TARGET_PPC)
326 do_interrupt(env);
327 #elif defined(TARGET_MIPS)
328 do_interrupt(env);
329 #elif defined(TARGET_SPARC)
330 do_interrupt(env);
331 #elif defined(TARGET_ARM)
332 do_interrupt(env);
333 #elif defined(TARGET_SH4)
334 do_interrupt(env);
335 #elif defined(TARGET_ALPHA)
336 do_interrupt(env);
337 #elif defined(TARGET_CRIS)
338 do_interrupt(env);
339 #elif defined(TARGET_M68K)
340 do_interrupt(0);
341 #endif
343 env->exception_index = -1;
345 #ifdef USE_KQEMU
346 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
347 int ret;
348 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
349 ret = kqemu_cpu_exec(env);
350 /* put eflags in CPU temporary format */
351 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 DF = 1 - (2 * ((env->eflags >> 10) & 1));
353 CC_OP = CC_OP_EFLAGS;
354 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
355 if (ret == 1) {
356 /* exception */
357 longjmp(env->jmp_env, 1);
358 } else if (ret == 2) {
359 /* softmmu execution needed */
360 } else {
361 if (env->interrupt_request != 0) {
362 /* hardware interrupt will be executed just after */
363 } else {
364 /* otherwise, we restart */
365 longjmp(env->jmp_env, 1);
369 #endif
371 if (kvm_enabled()) {
372 int ret;
373 ret = kvm_cpu_exec(env);
374 if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) {
375 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
376 env->exception_index = EXCP_INTERRUPT;
377 cpu_loop_exit();
378 } else if (env->halted) {
379 cpu_loop_exit();
380 } else
381 longjmp(env->jmp_env, 1);
384 next_tb = 0; /* force lookup of first TB */
385 for(;;) {
386 interrupt_request = env->interrupt_request;
387 if (unlikely(interrupt_request)) {
388 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
389 /* Mask out external interrupts for this step. */
390 interrupt_request &= ~(CPU_INTERRUPT_HARD |
391 CPU_INTERRUPT_FIQ |
392 CPU_INTERRUPT_SMI |
393 CPU_INTERRUPT_NMI);
395 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
396 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
397 env->exception_index = EXCP_DEBUG;
398 cpu_loop_exit();
400 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
401 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
402 if (interrupt_request & CPU_INTERRUPT_HALT) {
403 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
404 env->halted = 1;
405 env->exception_index = EXCP_HLT;
406 cpu_loop_exit();
408 #endif
409 #if defined(TARGET_I386)
410 if (env->hflags2 & HF2_GIF_MASK) {
411 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
412 !(env->hflags & HF_SMM_MASK)) {
413 svm_check_intercept(SVM_EXIT_SMI);
414 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
415 do_smm_enter();
416 next_tb = 0;
417 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
418 !(env->hflags2 & HF2_NMI_MASK)) {
419 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
420 env->hflags2 |= HF2_NMI_MASK;
421 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
422 next_tb = 0;
423 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
424 (((env->hflags2 & HF2_VINTR_MASK) &&
425 (env->hflags2 & HF2_HIF_MASK)) ||
426 (!(env->hflags2 & HF2_VINTR_MASK) &&
427 (env->eflags & IF_MASK &&
428 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
429 int intno;
430 svm_check_intercept(SVM_EXIT_INTR);
431 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
432 intno = cpu_get_pic_interrupt(env);
433 if (loglevel & CPU_LOG_TB_IN_ASM) {
434 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
436 do_interrupt(intno, 0, 0, 0, 1);
437 /* ensure that no TB jump will be modified as
438 the program flow was changed */
439 next_tb = 0;
440 #if !defined(CONFIG_USER_ONLY)
441 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
442 (env->eflags & IF_MASK) &&
443 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
444 int intno;
445 /* FIXME: this should respect TPR */
446 svm_check_intercept(SVM_EXIT_VINTR);
447 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
448 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
449 if (loglevel & CPU_LOG_TB_IN_ASM)
450 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
451 do_interrupt(intno, 0, 0, 0, 1);
452 next_tb = 0;
453 #endif
456 #elif defined(TARGET_PPC)
457 #if 0
458 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
459 cpu_ppc_reset(env);
461 #endif
462 if (interrupt_request & CPU_INTERRUPT_HARD) {
463 ppc_hw_interrupt(env);
464 if (env->pending_interrupts == 0)
465 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
466 next_tb = 0;
468 #elif defined(TARGET_MIPS)
469 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
470 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
471 (env->CP0_Status & (1 << CP0St_IE)) &&
472 !(env->CP0_Status & (1 << CP0St_EXL)) &&
473 !(env->CP0_Status & (1 << CP0St_ERL)) &&
474 !(env->hflags & MIPS_HFLAG_DM)) {
475 /* Raise it */
476 env->exception_index = EXCP_EXT_INTERRUPT;
477 env->error_code = 0;
478 do_interrupt(env);
479 next_tb = 0;
481 #elif defined(TARGET_SPARC)
482 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
483 (env->psret != 0)) {
484 int pil = env->interrupt_index & 15;
485 int type = env->interrupt_index & 0xf0;
487 if (((type == TT_EXTINT) &&
488 (pil == 15 || pil > env->psrpil)) ||
489 type != TT_EXTINT) {
490 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
491 env->exception_index = env->interrupt_index;
492 do_interrupt(env);
493 env->interrupt_index = 0;
494 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
495 cpu_check_irqs(env);
496 #endif
497 next_tb = 0;
499 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
500 //do_interrupt(0, 0, 0, 0, 0);
501 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
503 #elif defined(TARGET_ARM)
504 if (interrupt_request & CPU_INTERRUPT_FIQ
505 && !(env->uncached_cpsr & CPSR_F)) {
506 env->exception_index = EXCP_FIQ;
507 do_interrupt(env);
508 next_tb = 0;
510 /* ARMv7-M interrupt return works by loading a magic value
511 into the PC. On real hardware the load causes the
512 return to occur. The qemu implementation performs the
513 jump normally, then does the exception return when the
514 CPU tries to execute code at the magic address.
515 This will cause the magic PC value to be pushed to
516 the stack if an interrupt occured at the wrong time.
517 We avoid this by disabling interrupts when
518 pc contains a magic address. */
519 if (interrupt_request & CPU_INTERRUPT_HARD
520 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
521 || !(env->uncached_cpsr & CPSR_I))) {
522 env->exception_index = EXCP_IRQ;
523 do_interrupt(env);
524 next_tb = 0;
526 #elif defined(TARGET_SH4)
527 if (interrupt_request & CPU_INTERRUPT_HARD) {
528 do_interrupt(env);
529 next_tb = 0;
531 #elif defined(TARGET_ALPHA)
532 if (interrupt_request & CPU_INTERRUPT_HARD) {
533 do_interrupt(env);
534 next_tb = 0;
536 #elif defined(TARGET_CRIS)
537 if (interrupt_request & CPU_INTERRUPT_HARD
538 && (env->pregs[PR_CCS] & I_FLAG)) {
539 env->exception_index = EXCP_IRQ;
540 do_interrupt(env);
541 next_tb = 0;
543 if (interrupt_request & CPU_INTERRUPT_NMI
544 && (env->pregs[PR_CCS] & M_FLAG)) {
545 env->exception_index = EXCP_NMI;
546 do_interrupt(env);
547 next_tb = 0;
549 #elif defined(TARGET_M68K)
550 if (interrupt_request & CPU_INTERRUPT_HARD
551 && ((env->sr & SR_I) >> SR_I_SHIFT)
552 < env->pending_level) {
553 /* Real hardware gets the interrupt vector via an
554 IACK cycle at this point. Current emulated
555 hardware doesn't rely on this, so we
556 provide/save the vector when the interrupt is
557 first signalled. */
558 env->exception_index = env->pending_vector;
559 do_interrupt(1);
560 next_tb = 0;
562 #endif
563 /* Don't use the cached interupt_request value,
564 do_interrupt may have updated the EXITTB flag. */
565 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
566 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
567 /* ensure that no TB jump will be modified as
568 the program flow was changed */
569 next_tb = 0;
571 if (interrupt_request & CPU_INTERRUPT_EXIT) {
572 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
573 env->exception_index = EXCP_INTERRUPT;
574 cpu_loop_exit();
577 #ifdef DEBUG_EXEC
578 if ((loglevel & CPU_LOG_TB_CPU)) {
579 /* restore flags in standard format */
580 regs_to_env();
581 #if defined(TARGET_I386)
582 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
583 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
584 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
585 #elif defined(TARGET_ARM)
586 cpu_dump_state(env, logfile, fprintf, 0);
587 #elif defined(TARGET_SPARC)
588 cpu_dump_state(env, logfile, fprintf, 0);
589 #elif defined(TARGET_PPC)
590 cpu_dump_state(env, logfile, fprintf, 0);
591 #elif defined(TARGET_M68K)
592 cpu_m68k_flush_flags(env, env->cc_op);
593 env->cc_op = CC_OP_FLAGS;
594 env->sr = (env->sr & 0xffe0)
595 | env->cc_dest | (env->cc_x << 4);
596 cpu_dump_state(env, logfile, fprintf, 0);
597 #elif defined(TARGET_MIPS)
598 cpu_dump_state(env, logfile, fprintf, 0);
599 #elif defined(TARGET_SH4)
600 cpu_dump_state(env, logfile, fprintf, 0);
601 #elif defined(TARGET_ALPHA)
602 cpu_dump_state(env, logfile, fprintf, 0);
603 #elif defined(TARGET_CRIS)
604 cpu_dump_state(env, logfile, fprintf, 0);
605 #else
606 #error unsupported target CPU
607 #endif
609 #endif
610 spin_lock(&tb_lock);
611 tb = tb_find_fast();
612 /* Note: we do it here to avoid a gcc bug on Mac OS X when
613 doing it in tb_find_slow */
614 if (tb_invalidated_flag) {
615 /* as some TB could have been invalidated because
616 of memory exceptions while generating the code, we
617 must recompute the hash index here */
618 next_tb = 0;
619 tb_invalidated_flag = 0;
621 #ifdef DEBUG_EXEC
622 if ((loglevel & CPU_LOG_EXEC)) {
623 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
624 (long)tb->tc_ptr, tb->pc,
625 lookup_symbol(tb->pc));
627 #endif
628 /* see if we can patch the calling TB. When the TB
629 spans two pages, we cannot safely do a direct
630 jump. */
632 if (next_tb != 0 &&
633 #ifdef USE_KQEMU
634 (env->kqemu_enabled != 2) &&
635 #endif
636 tb->page_addr[1] == -1) {
637 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
640 spin_unlock(&tb_lock);
641 env->current_tb = tb;
643 /* cpu_interrupt might be called while translating the
644 TB, but before it is linked into a potentially
645 infinite loop and becomes env->current_tb. Avoid
646 starting execution if there is a pending interrupt. */
647 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
648 env->current_tb = NULL;
650 while (env->current_tb) {
651 tc_ptr = tb->tc_ptr;
652 /* execute the generated code */
653 #if defined(__sparc__) && !defined(HOST_SOLARIS)
654 #undef env
655 env = cpu_single_env;
656 #define env cpu_single_env
657 #endif
658 next_tb = tcg_qemu_tb_exec(tc_ptr);
659 env->current_tb = NULL;
660 if ((next_tb & 3) == 2) {
661 /* Instruction counter expired. */
662 int insns_left;
663 tb = (TranslationBlock *)(long)(next_tb & ~3);
664 /* Restore PC. */
665 CPU_PC_FROM_TB(env, tb);
666 insns_left = env->icount_decr.u32;
667 if (env->icount_extra && insns_left >= 0) {
668 /* Refill decrementer and continue execution. */
669 env->icount_extra += insns_left;
670 if (env->icount_extra > 0xffff) {
671 insns_left = 0xffff;
672 } else {
673 insns_left = env->icount_extra;
675 env->icount_extra -= insns_left;
676 env->icount_decr.u16.low = insns_left;
677 } else {
678 if (insns_left > 0) {
679 /* Execute remaining instructions. */
680 cpu_exec_nocache(insns_left, tb);
682 env->exception_index = EXCP_INTERRUPT;
683 next_tb = 0;
684 cpu_loop_exit();
688 /* reset soft MMU for next block (it can currently
689 only be set by a memory fault) */
690 #if defined(USE_KQEMU)
691 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
692 if (kqemu_is_ok(env) &&
693 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
694 cpu_loop_exit();
696 #endif
697 } /* for(;;) */
698 } else {
699 env_to_regs();
701 } /* for(;;) */
704 #if defined(TARGET_I386)
705 /* restore flags in standard format */
706 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
707 #elif defined(TARGET_ARM)
708 /* XXX: Save/restore host fpu exception state?. */
709 #elif defined(TARGET_SPARC)
710 #elif defined(TARGET_PPC)
711 #elif defined(TARGET_M68K)
712 cpu_m68k_flush_flags(env, env->cc_op);
713 env->cc_op = CC_OP_FLAGS;
714 env->sr = (env->sr & 0xffe0)
715 | env->cc_dest | (env->cc_x << 4);
716 #elif defined(TARGET_MIPS)
717 #elif defined(TARGET_SH4)
718 #elif defined(TARGET_ALPHA)
719 #elif defined(TARGET_CRIS)
720 /* XXXXX */
721 #else
722 #error unsupported target CPU
723 #endif
725 /* restore global registers */
726 #include "hostregs_helper.h"
728 /* fail safe : never use cpu_single_env outside cpu_exec() */
729 cpu_single_env = NULL;
730 return ret;
733 /* must only be called from the generated code as an exception can be
734 generated */
735 void tb_invalidate_page_range(target_ulong start, target_ulong end)
737 /* XXX: cannot enable it yet because it yields to MMU exception
738 where NIP != read address on PowerPC */
739 #if 0
740 target_ulong phys_addr;
741 phys_addr = get_phys_addr_code(env, start);
742 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
743 #endif
746 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
748 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
750 CPUX86State *saved_env;
752 saved_env = env;
753 env = s;
754 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
755 selector &= 0xffff;
756 cpu_x86_load_seg_cache(env, seg_reg, selector,
757 (selector << 4), 0xffff, 0);
758 } else {
759 helper_load_seg(seg_reg, selector);
761 env = saved_env;
764 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
766 CPUX86State *saved_env;
768 saved_env = env;
769 env = s;
771 helper_fsave(ptr, data32);
773 env = saved_env;
776 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
778 CPUX86State *saved_env;
780 saved_env = env;
781 env = s;
783 helper_frstor(ptr, data32);
785 env = saved_env;
788 #endif /* TARGET_I386 */
790 #if !defined(CONFIG_SOFTMMU)
792 #if defined(TARGET_I386)
794 /* 'pc' is the host PC at which the exception was raised. 'address' is
795 the effective address of the memory exception. 'is_write' is 1 if a
796 write caused the exception and otherwise 0'. 'old_set' is the
797 signal set which should be restored */
798 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
799 int is_write, sigset_t *old_set,
800 void *puc)
802 TranslationBlock *tb;
803 int ret;
805 if (cpu_single_env)
806 env = cpu_single_env; /* XXX: find a correct solution for multithread */
807 #if defined(DEBUG_SIGNAL)
808 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
809 pc, address, is_write, *(unsigned long *)old_set);
810 #endif
811 /* XXX: locking issue */
812 if (is_write && page_unprotect(h2g(address), pc, puc)) {
813 return 1;
816 /* see if it is an MMU fault */
817 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
818 if (ret < 0)
819 return 0; /* not an MMU fault */
820 if (ret == 0)
821 return 1; /* the MMU fault was handled without causing real CPU fault */
822 /* now we have a real cpu fault */
823 tb = tb_find_pc(pc);
824 if (tb) {
825 /* the PC is inside the translated code. It means that we have
826 a virtual CPU fault */
827 cpu_restore_state(tb, env, pc, puc);
829 if (ret == 1) {
830 #if 0
831 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
832 env->eip, env->cr[2], env->error_code);
833 #endif
834 /* we restore the process signal mask as the sigreturn should
835 do it (XXX: use sigsetjmp) */
836 sigprocmask(SIG_SETMASK, old_set, NULL);
837 raise_exception_err(env->exception_index, env->error_code);
838 } else {
839 /* activate soft MMU for this block */
840 env->hflags |= HF_SOFTMMU_MASK;
841 cpu_resume_from_signal(env, puc);
843 /* never comes here */
844 return 1;
847 #elif defined(TARGET_ARM)
848 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
849 int is_write, sigset_t *old_set,
850 void *puc)
852 TranslationBlock *tb;
853 int ret;
855 if (cpu_single_env)
856 env = cpu_single_env; /* XXX: find a correct solution for multithread */
857 #if defined(DEBUG_SIGNAL)
858 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
859 pc, address, is_write, *(unsigned long *)old_set);
860 #endif
861 /* XXX: locking issue */
862 if (is_write && page_unprotect(h2g(address), pc, puc)) {
863 return 1;
865 /* see if it is an MMU fault */
866 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
867 if (ret < 0)
868 return 0; /* not an MMU fault */
869 if (ret == 0)
870 return 1; /* the MMU fault was handled without causing real CPU fault */
871 /* now we have a real cpu fault */
872 tb = tb_find_pc(pc);
873 if (tb) {
874 /* the PC is inside the translated code. It means that we have
875 a virtual CPU fault */
876 cpu_restore_state(tb, env, pc, puc);
878 /* we restore the process signal mask as the sigreturn should
879 do it (XXX: use sigsetjmp) */
880 sigprocmask(SIG_SETMASK, old_set, NULL);
881 cpu_loop_exit();
882 /* never comes here */
883 return 1;
885 #elif defined(TARGET_SPARC)
886 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
887 int is_write, sigset_t *old_set,
888 void *puc)
890 TranslationBlock *tb;
891 int ret;
893 if (cpu_single_env)
894 env = cpu_single_env; /* XXX: find a correct solution for multithread */
895 #if defined(DEBUG_SIGNAL)
896 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
897 pc, address, is_write, *(unsigned long *)old_set);
898 #endif
899 /* XXX: locking issue */
900 if (is_write && page_unprotect(h2g(address), pc, puc)) {
901 return 1;
903 /* see if it is an MMU fault */
904 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
905 if (ret < 0)
906 return 0; /* not an MMU fault */
907 if (ret == 0)
908 return 1; /* the MMU fault was handled without causing real CPU fault */
909 /* now we have a real cpu fault */
910 tb = tb_find_pc(pc);
911 if (tb) {
912 /* the PC is inside the translated code. It means that we have
913 a virtual CPU fault */
914 cpu_restore_state(tb, env, pc, puc);
916 /* we restore the process signal mask as the sigreturn should
917 do it (XXX: use sigsetjmp) */
918 sigprocmask(SIG_SETMASK, old_set, NULL);
919 cpu_loop_exit();
920 /* never comes here */
921 return 1;
923 #elif defined (TARGET_PPC)
924 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
925 int is_write, sigset_t *old_set,
926 void *puc)
928 TranslationBlock *tb;
929 int ret;
931 if (cpu_single_env)
932 env = cpu_single_env; /* XXX: find a correct solution for multithread */
933 #if defined(DEBUG_SIGNAL)
934 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
935 pc, address, is_write, *(unsigned long *)old_set);
936 #endif
937 /* XXX: locking issue */
938 if (is_write && page_unprotect(h2g(address), pc, puc)) {
939 return 1;
942 /* see if it is an MMU fault */
943 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
944 if (ret < 0)
945 return 0; /* not an MMU fault */
946 if (ret == 0)
947 return 1; /* the MMU fault was handled without causing real CPU fault */
949 /* now we have a real cpu fault */
950 tb = tb_find_pc(pc);
951 if (tb) {
952 /* the PC is inside the translated code. It means that we have
953 a virtual CPU fault */
954 cpu_restore_state(tb, env, pc, puc);
956 if (ret == 1) {
957 #if 0
958 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
959 env->nip, env->error_code, tb);
960 #endif
961 /* we restore the process signal mask as the sigreturn should
962 do it (XXX: use sigsetjmp) */
963 sigprocmask(SIG_SETMASK, old_set, NULL);
964 do_raise_exception_err(env->exception_index, env->error_code);
965 } else {
966 /* activate soft MMU for this block */
967 cpu_resume_from_signal(env, puc);
969 /* never comes here */
970 return 1;
973 #elif defined(TARGET_M68K)
974 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
975 int is_write, sigset_t *old_set,
976 void *puc)
978 TranslationBlock *tb;
979 int ret;
981 if (cpu_single_env)
982 env = cpu_single_env; /* XXX: find a correct solution for multithread */
983 #if defined(DEBUG_SIGNAL)
984 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
985 pc, address, is_write, *(unsigned long *)old_set);
986 #endif
987 /* XXX: locking issue */
988 if (is_write && page_unprotect(address, pc, puc)) {
989 return 1;
991 /* see if it is an MMU fault */
992 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
993 if (ret < 0)
994 return 0; /* not an MMU fault */
995 if (ret == 0)
996 return 1; /* the MMU fault was handled without causing real CPU fault */
997 /* now we have a real cpu fault */
998 tb = tb_find_pc(pc);
999 if (tb) {
1000 /* the PC is inside the translated code. It means that we have
1001 a virtual CPU fault */
1002 cpu_restore_state(tb, env, pc, puc);
1004 /* we restore the process signal mask as the sigreturn should
1005 do it (XXX: use sigsetjmp) */
1006 sigprocmask(SIG_SETMASK, old_set, NULL);
1007 cpu_loop_exit();
1008 /* never comes here */
1009 return 1;
1012 #elif defined (TARGET_MIPS)
1013 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1014 int is_write, sigset_t *old_set,
1015 void *puc)
1017 TranslationBlock *tb;
1018 int ret;
1020 if (cpu_single_env)
1021 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1022 #if defined(DEBUG_SIGNAL)
1023 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1024 pc, address, is_write, *(unsigned long *)old_set);
1025 #endif
1026 /* XXX: locking issue */
1027 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1028 return 1;
1031 /* see if it is an MMU fault */
1032 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1033 if (ret < 0)
1034 return 0; /* not an MMU fault */
1035 if (ret == 0)
1036 return 1; /* the MMU fault was handled without causing real CPU fault */
1038 /* now we have a real cpu fault */
1039 tb = tb_find_pc(pc);
1040 if (tb) {
1041 /* the PC is inside the translated code. It means that we have
1042 a virtual CPU fault */
1043 cpu_restore_state(tb, env, pc, puc);
1045 if (ret == 1) {
1046 #if 0
1047 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1048 env->PC, env->error_code, tb);
1049 #endif
1050 /* we restore the process signal mask as the sigreturn should
1051 do it (XXX: use sigsetjmp) */
1052 sigprocmask(SIG_SETMASK, old_set, NULL);
1053 do_raise_exception_err(env->exception_index, env->error_code);
1054 } else {
1055 /* activate soft MMU for this block */
1056 cpu_resume_from_signal(env, puc);
1058 /* never comes here */
1059 return 1;
1062 #elif defined (TARGET_SH4)
1063 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1064 int is_write, sigset_t *old_set,
1065 void *puc)
1067 TranslationBlock *tb;
1068 int ret;
1070 if (cpu_single_env)
1071 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1072 #if defined(DEBUG_SIGNAL)
1073 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1074 pc, address, is_write, *(unsigned long *)old_set);
1075 #endif
1076 /* XXX: locking issue */
1077 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1078 return 1;
1081 /* see if it is an MMU fault */
1082 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1083 if (ret < 0)
1084 return 0; /* not an MMU fault */
1085 if (ret == 0)
1086 return 1; /* the MMU fault was handled without causing real CPU fault */
1088 /* now we have a real cpu fault */
1089 tb = tb_find_pc(pc);
1090 if (tb) {
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb, env, pc, puc);
1095 #if 0
1096 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1097 env->nip, env->error_code, tb);
1098 #endif
1099 /* we restore the process signal mask as the sigreturn should
1100 do it (XXX: use sigsetjmp) */
1101 sigprocmask(SIG_SETMASK, old_set, NULL);
1102 cpu_loop_exit();
1103 /* never comes here */
1104 return 1;
1107 #elif defined (TARGET_ALPHA)
1108 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1109 int is_write, sigset_t *old_set,
1110 void *puc)
1112 TranslationBlock *tb;
1113 int ret;
1115 if (cpu_single_env)
1116 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1117 #if defined(DEBUG_SIGNAL)
1118 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1119 pc, address, is_write, *(unsigned long *)old_set);
1120 #endif
1121 /* XXX: locking issue */
1122 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1123 return 1;
1126 /* see if it is an MMU fault */
1127 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1128 if (ret < 0)
1129 return 0; /* not an MMU fault */
1130 if (ret == 0)
1131 return 1; /* the MMU fault was handled without causing real CPU fault */
1133 /* now we have a real cpu fault */
1134 tb = tb_find_pc(pc);
1135 if (tb) {
1136 /* the PC is inside the translated code. It means that we have
1137 a virtual CPU fault */
1138 cpu_restore_state(tb, env, pc, puc);
1140 #if 0
1141 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1142 env->nip, env->error_code, tb);
1143 #endif
1144 /* we restore the process signal mask as the sigreturn should
1145 do it (XXX: use sigsetjmp) */
1146 sigprocmask(SIG_SETMASK, old_set, NULL);
1147 cpu_loop_exit();
1148 /* never comes here */
1149 return 1;
1151 #elif defined (TARGET_CRIS)
1152 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1153 int is_write, sigset_t *old_set,
1154 void *puc)
1156 TranslationBlock *tb;
1157 int ret;
1159 if (cpu_single_env)
1160 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1161 #if defined(DEBUG_SIGNAL)
1162 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1163 pc, address, is_write, *(unsigned long *)old_set);
1164 #endif
1165 /* XXX: locking issue */
1166 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1167 return 1;
1170 /* see if it is an MMU fault */
1171 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1172 if (ret < 0)
1173 return 0; /* not an MMU fault */
1174 if (ret == 0)
1175 return 1; /* the MMU fault was handled without causing real CPU fault */
1177 /* now we have a real cpu fault */
1178 tb = tb_find_pc(pc);
1179 if (tb) {
1180 /* the PC is inside the translated code. It means that we have
1181 a virtual CPU fault */
1182 cpu_restore_state(tb, env, pc, puc);
1184 /* we restore the process signal mask as the sigreturn should
1185 do it (XXX: use sigsetjmp) */
1186 sigprocmask(SIG_SETMASK, old_set, NULL);
1187 cpu_loop_exit();
1188 /* never comes here */
1189 return 1;
1192 #else
1193 #error unsupported target CPU
1194 #endif
1196 #if defined(__i386__)
1198 #if defined(__APPLE__)
1199 # include <sys/ucontext.h>
1201 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1202 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1203 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1204 #else
1205 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1206 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1207 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1208 #endif
1210 int cpu_signal_handler(int host_signum, void *pinfo,
1211 void *puc)
1213 siginfo_t *info = pinfo;
1214 struct ucontext *uc = puc;
1215 unsigned long pc;
1216 int trapno;
1218 #ifndef REG_EIP
1219 /* for glibc 2.1 */
1220 #define REG_EIP EIP
1221 #define REG_ERR ERR
1222 #define REG_TRAPNO TRAPNO
1223 #endif
1224 pc = EIP_sig(uc);
1225 trapno = TRAP_sig(uc);
1226 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1227 trapno == 0xe ?
1228 (ERROR_sig(uc) >> 1) & 1 : 0,
1229 &uc->uc_sigmask, puc);
1232 #elif defined(__x86_64__)
1234 int cpu_signal_handler(int host_signum, void *pinfo,
1235 void *puc)
1237 siginfo_t *info = pinfo;
1238 struct ucontext *uc = puc;
1239 unsigned long pc;
1241 pc = uc->uc_mcontext.gregs[REG_RIP];
1242 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1243 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1244 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1245 &uc->uc_sigmask, puc);
1248 #elif defined(__powerpc__)
1250 /***********************************************************************
1251 * signal context platform-specific definitions
1252 * From Wine
1254 #ifdef linux
1255 /* All Registers access - only for local access */
1256 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1257 /* Gpr Registers access */
1258 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1259 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1260 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1261 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1262 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1263 # define LR_sig(context) REG_sig(link, context) /* Link register */
1264 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1265 /* Float Registers access */
1266 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1267 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1268 /* Exception Registers access */
1269 # define DAR_sig(context) REG_sig(dar, context)
1270 # define DSISR_sig(context) REG_sig(dsisr, context)
1271 # define TRAP_sig(context) REG_sig(trap, context)
1272 #endif /* linux */
1274 #ifdef __APPLE__
1275 # include <sys/ucontext.h>
1276 typedef struct ucontext SIGCONTEXT;
1277 /* All Registers access - only for local access */
1278 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1279 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1280 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1281 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1282 /* Gpr Registers access */
1283 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1284 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1285 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1286 # define CTR_sig(context) REG_sig(ctr, context)
1287 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1288 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1289 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1290 /* Float Registers access */
1291 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1292 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1293 /* Exception Registers access */
1294 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1295 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1296 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1297 #endif /* __APPLE__ */
1299 int cpu_signal_handler(int host_signum, void *pinfo,
1300 void *puc)
1302 siginfo_t *info = pinfo;
1303 struct ucontext *uc = puc;
1304 unsigned long pc;
1305 int is_write;
1307 pc = IAR_sig(uc);
1308 is_write = 0;
1309 #if 0
1310 /* ppc 4xx case */
1311 if (DSISR_sig(uc) & 0x00800000)
1312 is_write = 1;
1313 #else
1314 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1315 is_write = 1;
1316 #endif
1317 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1318 is_write, &uc->uc_sigmask, puc);
1321 #elif defined(__alpha__)
1323 int cpu_signal_handler(int host_signum, void *pinfo,
1324 void *puc)
1326 siginfo_t *info = pinfo;
1327 struct ucontext *uc = puc;
1328 uint32_t *pc = uc->uc_mcontext.sc_pc;
1329 uint32_t insn = *pc;
1330 int is_write = 0;
1332 /* XXX: need kernel patch to get write flag faster */
1333 switch (insn >> 26) {
1334 case 0x0d: // stw
1335 case 0x0e: // stb
1336 case 0x0f: // stq_u
1337 case 0x24: // stf
1338 case 0x25: // stg
1339 case 0x26: // sts
1340 case 0x27: // stt
1341 case 0x2c: // stl
1342 case 0x2d: // stq
1343 case 0x2e: // stl_c
1344 case 0x2f: // stq_c
1345 is_write = 1;
1348 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1349 is_write, &uc->uc_sigmask, puc);
1351 #elif defined(__sparc__)
1353 int cpu_signal_handler(int host_signum, void *pinfo,
1354 void *puc)
1356 siginfo_t *info = pinfo;
1357 int is_write;
1358 uint32_t insn;
1359 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1360 uint32_t *regs = (uint32_t *)(info + 1);
1361 void *sigmask = (regs + 20);
1362 /* XXX: is there a standard glibc define ? */
1363 unsigned long pc = regs[1];
1364 #else
1365 #ifdef __linux__
1366 struct sigcontext *sc = puc;
1367 unsigned long pc = sc->sigc_regs.tpc;
1368 void *sigmask = (void *)sc->sigc_mask;
1369 #elif defined(__OpenBSD__)
1370 struct sigcontext *uc = puc;
1371 unsigned long pc = uc->sc_pc;
1372 void *sigmask = (void *)(long)uc->sc_mask;
1373 #endif
1374 #endif
1376 /* XXX: need kernel patch to get write flag faster */
1377 is_write = 0;
1378 insn = *(uint32_t *)pc;
1379 if ((insn >> 30) == 3) {
1380 switch((insn >> 19) & 0x3f) {
1381 case 0x05: // stb
1382 case 0x06: // sth
1383 case 0x04: // st
1384 case 0x07: // std
1385 case 0x24: // stf
1386 case 0x27: // stdf
1387 case 0x25: // stfsr
1388 is_write = 1;
1389 break;
1392 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1393 is_write, sigmask, NULL);
1396 #elif defined(__arm__)
1398 int cpu_signal_handler(int host_signum, void *pinfo,
1399 void *puc)
1401 siginfo_t *info = pinfo;
1402 struct ucontext *uc = puc;
1403 unsigned long pc;
1404 int is_write;
1406 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1407 pc = uc->uc_mcontext.gregs[R15];
1408 #else
1409 pc = uc->uc_mcontext.arm_pc;
1410 #endif
1411 /* XXX: compute is_write */
1412 is_write = 0;
1413 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1414 is_write,
1415 &uc->uc_sigmask, puc);
1418 #elif defined(__mc68000)
1420 int cpu_signal_handler(int host_signum, void *pinfo,
1421 void *puc)
1423 siginfo_t *info = pinfo;
1424 struct ucontext *uc = puc;
1425 unsigned long pc;
1426 int is_write;
1428 pc = uc->uc_mcontext.gregs[16];
1429 /* XXX: compute is_write */
1430 is_write = 0;
1431 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1432 is_write,
1433 &uc->uc_sigmask, puc);
1436 #elif defined(__ia64)
1438 #ifndef __ISR_VALID
1439 /* This ought to be in <bits/siginfo.h>... */
1440 # define __ISR_VALID 1
1441 #endif
1443 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1445 siginfo_t *info = pinfo;
1446 struct ucontext *uc = puc;
1447 unsigned long ip;
1448 int is_write = 0;
1450 ip = uc->uc_mcontext.sc_ip;
1451 switch (host_signum) {
1452 case SIGILL:
1453 case SIGFPE:
1454 case SIGSEGV:
1455 case SIGBUS:
1456 case SIGTRAP:
1457 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1458 /* ISR.W (write-access) is bit 33: */
1459 is_write = (info->si_isr >> 33) & 1;
1460 break;
1462 default:
1463 break;
1465 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1466 is_write,
1467 &uc->uc_sigmask, puc);
1470 #elif defined(__s390__)
1472 int cpu_signal_handler(int host_signum, void *pinfo,
1473 void *puc)
1475 siginfo_t *info = pinfo;
1476 struct ucontext *uc = puc;
1477 unsigned long pc;
1478 int is_write;
1480 pc = uc->uc_mcontext.psw.addr;
1481 /* XXX: compute is_write */
1482 is_write = 0;
1483 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1484 is_write, &uc->uc_sigmask, puc);
1487 #elif defined(__mips__)
1489 int cpu_signal_handler(int host_signum, void *pinfo,
1490 void *puc)
1492 siginfo_t *info = pinfo;
1493 struct ucontext *uc = puc;
1494 greg_t pc = uc->uc_mcontext.pc;
1495 int is_write;
1497 /* XXX: compute is_write */
1498 is_write = 0;
1499 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1500 is_write, &uc->uc_sigmask, puc);
1503 #elif defined(__hppa__)
1505 int cpu_signal_handler(int host_signum, void *pinfo,
1506 void *puc)
1508 struct siginfo *info = pinfo;
1509 struct ucontext *uc = puc;
1510 unsigned long pc;
1511 int is_write;
1513 pc = uc->uc_mcontext.sc_iaoq[0];
1514 /* FIXME: compute is_write */
1515 is_write = 0;
1516 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1517 is_write,
1518 &uc->uc_sigmask, puc);
1521 #else
1523 #error host CPU specific signal handler needed
1525 #endif
1527 #endif /* !defined(CONFIG_SOFTMMU) */