MIPS: Fix tlbwi/tlbwr
[qemu/qemu-JZ.git] / cpu-exec.c
blob95d128553cbeb0c1420c0accd9959abe4bdf4d87
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
42 #undef env
43 #define env cpu_single_env
44 #endif
46 int tb_invalidated_flag;
48 //#define DEBUG_EXEC
49 //#define DEBUG_SIGNAL
51 void cpu_loop_exit(void)
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
55 regs_to_env();
56 longjmp(env->jmp_env, 1);
59 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
60 #define reg_T2
61 #endif
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70 #endif
72 env = env1;
74 /* XXX: restore cpu registers saved in host registers */
76 #if !defined(CONFIG_SOFTMMU)
77 if (puc) {
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 #endif
82 longjmp(env->jmp_env, 1);
85 /* Execute the code without caching the generated code. An interpreter
86 could be used if available. */
87 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
89 unsigned long next_tb;
90 TranslationBlock *tb;
92 /* Should never happen.
93 We only end up here when an existing TB is too long. */
94 if (max_cycles > CF_COUNT_MASK)
95 max_cycles = CF_COUNT_MASK;
97 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
98 max_cycles);
99 env->current_tb = tb;
100 /* execute the generated code */
101 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
103 if ((next_tb & 3) == 2) {
104 /* Restore PC. This may happen if async event occurs before
105 the TB starts executing. */
106 CPU_PC_FROM_TB(env, tb);
108 tb_phys_invalidate(tb, -1);
109 tb_free(tb);
112 static TranslationBlock *tb_find_slow(target_ulong pc,
113 target_ulong cs_base,
114 uint64_t flags)
116 TranslationBlock *tb, **ptb1;
117 unsigned int h;
118 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
120 tb_invalidated_flag = 0;
122 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
124 /* find translated block using physical mappings */
125 phys_pc = get_phys_addr_code(env, pc);
126 phys_page1 = phys_pc & TARGET_PAGE_MASK;
127 phys_page2 = -1;
128 h = tb_phys_hash_func(phys_pc);
129 ptb1 = &tb_phys_hash[h];
130 for(;;) {
131 tb = *ptb1;
132 if (!tb)
133 goto not_found;
134 if (tb->pc == pc &&
135 tb->page_addr[0] == phys_page1 &&
136 tb->cs_base == cs_base &&
137 tb->flags == flags) {
138 /* check next page if needed */
139 if (tb->page_addr[1] != -1) {
140 virt_page2 = (pc & TARGET_PAGE_MASK) +
141 TARGET_PAGE_SIZE;
142 phys_page2 = get_phys_addr_code(env, virt_page2);
143 if (tb->page_addr[1] == phys_page2)
144 goto found;
145 } else {
146 goto found;
149 ptb1 = &tb->phys_hash_next;
151 not_found:
152 /* if no translated code available, then translate it now */
153 tb = tb_gen_code(env, pc, cs_base, flags, 0);
155 found:
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 return tb;
161 static inline TranslationBlock *tb_find_fast(void)
163 TranslationBlock *tb;
164 target_ulong cs_base, pc;
165 uint64_t flags;
167 /* we record a subset of the CPU state. It will
168 always be the same before a given translated block
169 is executed. */
170 #if defined(TARGET_I386)
171 flags = env->hflags;
172 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
173 cs_base = env->segs[R_CS].base;
174 pc = cs_base + env->eip;
175 #elif defined(TARGET_ARM)
176 flags = env->thumb | (env->vfp.vec_len << 1)
177 | (env->vfp.vec_stride << 4);
178 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
179 flags |= (1 << 6);
180 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
181 flags |= (1 << 7);
182 flags |= (env->condexec_bits << 8);
183 cs_base = 0;
184 pc = env->regs[15];
185 #elif defined(TARGET_SPARC)
186 #ifdef TARGET_SPARC64
187 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
188 flags = ((env->pstate & PS_AM) << 2)
189 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
190 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
191 #else
192 // FPU enable . Supervisor
193 flags = (env->psref << 4) | env->psrs;
194 #endif
195 cs_base = env->npc;
196 pc = env->pc;
197 #elif defined(TARGET_PPC)
198 flags = env->hflags;
199 cs_base = 0;
200 pc = env->nip;
201 #elif defined(TARGET_MIPS)
202 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
203 cs_base = 0;
204 pc = env->active_tc.PC;
205 #elif defined(TARGET_M68K)
206 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
207 | (env->sr & SR_S) /* Bit 13 */
208 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
209 cs_base = 0;
210 pc = env->pc;
211 #elif defined(TARGET_SH4)
212 flags = env->flags;
213 cs_base = 0;
214 pc = env->pc;
215 #elif defined(TARGET_ALPHA)
216 flags = env->ps;
217 cs_base = 0;
218 pc = env->pc;
219 #elif defined(TARGET_CRIS)
220 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
221 flags |= env->dslot;
222 cs_base = 0;
223 pc = env->pc;
224 #else
225 #error unsupported CPU
226 #endif
227 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
228 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
229 tb->flags != flags)) {
230 tb = tb_find_slow(pc, cs_base, flags);
232 return tb;
235 /* main execution loop */
237 int cpu_exec(CPUState *env1)
239 #define DECLARE_HOST_REGS 1
240 #include "hostregs_helper.h"
241 int ret, interrupt_request;
242 TranslationBlock *tb;
243 uint8_t *tc_ptr;
244 unsigned long next_tb;
246 if (cpu_halted(env1) == EXCP_HALTED)
247 return EXCP_HALTED;
249 cpu_single_env = env1;
251 /* first we save global registers */
252 #define SAVE_HOST_REGS 1
253 #include "hostregs_helper.h"
254 env = env1;
256 env_to_regs();
257 #if defined(TARGET_I386)
258 /* put eflags in CPU temporary format */
259 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
260 DF = 1 - (2 * ((env->eflags >> 10) & 1));
261 CC_OP = CC_OP_EFLAGS;
262 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263 #elif defined(TARGET_SPARC)
264 #elif defined(TARGET_M68K)
265 env->cc_op = CC_OP_FLAGS;
266 env->cc_dest = env->sr & 0xf;
267 env->cc_x = (env->sr >> 4) & 1;
268 #elif defined(TARGET_ALPHA)
269 #elif defined(TARGET_ARM)
270 #elif defined(TARGET_PPC)
271 #elif defined(TARGET_MIPS)
272 #elif defined(TARGET_SH4)
273 #elif defined(TARGET_CRIS)
274 /* XXXXX */
275 #else
276 #error unsupported target CPU
277 #endif
278 env->exception_index = -1;
280 /* prepare setjmp context for exception handling */
281 for(;;) {
282 if (setjmp(env->jmp_env) == 0) {
283 env->current_tb = NULL;
284 /* if an exception is pending, we execute it here */
285 if (env->exception_index >= 0) {
286 if (env->exception_index >= EXCP_INTERRUPT) {
287 /* exit request from the cpu execution loop */
288 ret = env->exception_index;
289 break;
290 } else if (env->user_mode_only) {
291 /* if user mode only, we simulate a fake exception
292 which will be handled outside the cpu execution
293 loop */
294 #if defined(TARGET_I386)
295 do_interrupt_user(env->exception_index,
296 env->exception_is_int,
297 env->error_code,
298 env->exception_next_eip);
299 /* successfully delivered */
300 env->old_exception = -1;
301 #endif
302 ret = env->exception_index;
303 break;
304 } else {
305 #if defined(TARGET_I386)
306 /* simulate a real cpu exception. On i386, it can
307 trigger new exceptions, but we do not handle
308 double or triple faults yet. */
309 do_interrupt(env->exception_index,
310 env->exception_is_int,
311 env->error_code,
312 env->exception_next_eip, 0);
313 /* successfully delivered */
314 env->old_exception = -1;
315 #elif defined(TARGET_PPC)
316 do_interrupt(env);
317 #elif defined(TARGET_MIPS)
318 do_interrupt(env);
319 #elif defined(TARGET_SPARC)
320 do_interrupt(env);
321 #elif defined(TARGET_ARM)
322 do_interrupt(env);
323 #elif defined(TARGET_SH4)
324 do_interrupt(env);
325 #elif defined(TARGET_ALPHA)
326 do_interrupt(env);
327 #elif defined(TARGET_CRIS)
328 do_interrupt(env);
329 #elif defined(TARGET_M68K)
330 do_interrupt(0);
331 #endif
333 env->exception_index = -1;
335 #ifdef USE_KQEMU
336 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
337 int ret;
338 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
339 ret = kqemu_cpu_exec(env);
340 /* put eflags in CPU temporary format */
341 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
342 DF = 1 - (2 * ((env->eflags >> 10) & 1));
343 CC_OP = CC_OP_EFLAGS;
344 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
345 if (ret == 1) {
346 /* exception */
347 longjmp(env->jmp_env, 1);
348 } else if (ret == 2) {
349 /* softmmu execution needed */
350 } else {
351 if (env->interrupt_request != 0) {
352 /* hardware interrupt will be executed just after */
353 } else {
354 /* otherwise, we restart */
355 longjmp(env->jmp_env, 1);
359 #endif
361 next_tb = 0; /* force lookup of first TB */
362 for(;;) {
363 interrupt_request = env->interrupt_request;
364 if (unlikely(interrupt_request) &&
365 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
366 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
367 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
368 env->exception_index = EXCP_DEBUG;
369 cpu_loop_exit();
371 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
372 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
373 if (interrupt_request & CPU_INTERRUPT_HALT) {
374 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
375 env->halted = 1;
376 env->exception_index = EXCP_HLT;
377 cpu_loop_exit();
379 #endif
380 #if defined(TARGET_I386)
381 if (env->hflags2 & HF2_GIF_MASK) {
382 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
383 !(env->hflags & HF_SMM_MASK)) {
384 svm_check_intercept(SVM_EXIT_SMI);
385 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
386 do_smm_enter();
387 next_tb = 0;
388 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
389 !(env->hflags2 & HF2_NMI_MASK)) {
390 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
391 env->hflags2 |= HF2_NMI_MASK;
392 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
393 next_tb = 0;
394 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
395 (((env->hflags2 & HF2_VINTR_MASK) &&
396 (env->hflags2 & HF2_HIF_MASK)) ||
397 (!(env->hflags2 & HF2_VINTR_MASK) &&
398 (env->eflags & IF_MASK &&
399 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
400 int intno;
401 svm_check_intercept(SVM_EXIT_INTR);
402 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
403 intno = cpu_get_pic_interrupt(env);
404 if (loglevel & CPU_LOG_TB_IN_ASM) {
405 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
407 do_interrupt(intno, 0, 0, 0, 1);
408 /* ensure that no TB jump will be modified as
409 the program flow was changed */
410 next_tb = 0;
411 #if !defined(CONFIG_USER_ONLY)
412 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
413 (env->eflags & IF_MASK) &&
414 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
415 int intno;
416 /* FIXME: this should respect TPR */
417 svm_check_intercept(SVM_EXIT_VINTR);
418 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
419 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
420 if (loglevel & CPU_LOG_TB_IN_ASM)
421 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
422 do_interrupt(intno, 0, 0, 0, 1);
423 next_tb = 0;
424 #endif
427 #elif defined(TARGET_PPC)
428 #if 0
429 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
430 cpu_ppc_reset(env);
432 #endif
433 if (interrupt_request & CPU_INTERRUPT_HARD) {
434 ppc_hw_interrupt(env);
435 if (env->pending_interrupts == 0)
436 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
437 next_tb = 0;
439 #elif defined(TARGET_MIPS)
440 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
441 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
442 (env->CP0_Status & (1 << CP0St_IE)) &&
443 !(env->CP0_Status & (1 << CP0St_EXL)) &&
444 !(env->CP0_Status & (1 << CP0St_ERL)) &&
445 !(env->hflags & MIPS_HFLAG_DM)) {
446 /* Raise it */
447 env->exception_index = EXCP_EXT_INTERRUPT;
448 env->error_code = 0;
449 do_interrupt(env);
450 next_tb = 0;
452 #elif defined(TARGET_SPARC)
453 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454 (env->psret != 0)) {
455 int pil = env->interrupt_index & 15;
456 int type = env->interrupt_index & 0xf0;
458 if (((type == TT_EXTINT) &&
459 (pil == 15 || pil > env->psrpil)) ||
460 type != TT_EXTINT) {
461 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
462 env->exception_index = env->interrupt_index;
463 do_interrupt(env);
464 env->interrupt_index = 0;
465 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
466 cpu_check_irqs(env);
467 #endif
468 next_tb = 0;
470 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
471 //do_interrupt(0, 0, 0, 0, 0);
472 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
474 #elif defined(TARGET_ARM)
475 if (interrupt_request & CPU_INTERRUPT_FIQ
476 && !(env->uncached_cpsr & CPSR_F)) {
477 env->exception_index = EXCP_FIQ;
478 do_interrupt(env);
479 next_tb = 0;
481 /* ARMv7-M interrupt return works by loading a magic value
482 into the PC. On real hardware the load causes the
483 return to occur. The qemu implementation performs the
484 jump normally, then does the exception return when the
485 CPU tries to execute code at the magic address.
486 This will cause the magic PC value to be pushed to
487 the stack if an interrupt occured at the wrong time.
488 We avoid this by disabling interrupts when
489 pc contains a magic address. */
490 if (interrupt_request & CPU_INTERRUPT_HARD
491 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
492 || !(env->uncached_cpsr & CPSR_I))) {
493 env->exception_index = EXCP_IRQ;
494 do_interrupt(env);
495 next_tb = 0;
497 #elif defined(TARGET_SH4)
498 if (interrupt_request & CPU_INTERRUPT_HARD) {
499 do_interrupt(env);
500 next_tb = 0;
502 #elif defined(TARGET_ALPHA)
503 if (interrupt_request & CPU_INTERRUPT_HARD) {
504 do_interrupt(env);
505 next_tb = 0;
507 #elif defined(TARGET_CRIS)
508 if (interrupt_request & CPU_INTERRUPT_HARD
509 && (env->pregs[PR_CCS] & I_FLAG)) {
510 env->exception_index = EXCP_IRQ;
511 do_interrupt(env);
512 next_tb = 0;
514 if (interrupt_request & CPU_INTERRUPT_NMI
515 && (env->pregs[PR_CCS] & M_FLAG)) {
516 env->exception_index = EXCP_NMI;
517 do_interrupt(env);
518 next_tb = 0;
520 #elif defined(TARGET_M68K)
521 if (interrupt_request & CPU_INTERRUPT_HARD
522 && ((env->sr & SR_I) >> SR_I_SHIFT)
523 < env->pending_level) {
524 /* Real hardware gets the interrupt vector via an
525 IACK cycle at this point. Current emulated
526 hardware doesn't rely on this, so we
527 provide/save the vector when the interrupt is
528 first signalled. */
529 env->exception_index = env->pending_vector;
530 do_interrupt(1);
531 next_tb = 0;
533 #endif
534 /* Don't use the cached interupt_request value,
535 do_interrupt may have updated the EXITTB flag. */
536 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
537 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
538 /* ensure that no TB jump will be modified as
539 the program flow was changed */
540 next_tb = 0;
542 if (interrupt_request & CPU_INTERRUPT_EXIT) {
543 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
544 env->exception_index = EXCP_INTERRUPT;
545 cpu_loop_exit();
548 #ifdef DEBUG_EXEC
549 if ((loglevel & CPU_LOG_TB_CPU)) {
550 /* restore flags in standard format */
551 regs_to_env();
552 #if defined(TARGET_I386)
553 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
554 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
555 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
556 #elif defined(TARGET_ARM)
557 cpu_dump_state(env, logfile, fprintf, 0);
558 #elif defined(TARGET_SPARC)
559 cpu_dump_state(env, logfile, fprintf, 0);
560 #elif defined(TARGET_PPC)
561 cpu_dump_state(env, logfile, fprintf, 0);
562 #elif defined(TARGET_M68K)
563 cpu_m68k_flush_flags(env, env->cc_op);
564 env->cc_op = CC_OP_FLAGS;
565 env->sr = (env->sr & 0xffe0)
566 | env->cc_dest | (env->cc_x << 4);
567 cpu_dump_state(env, logfile, fprintf, 0);
568 #elif defined(TARGET_MIPS)
569 cpu_dump_state(env, logfile, fprintf, 0);
570 #elif defined(TARGET_SH4)
571 cpu_dump_state(env, logfile, fprintf, 0);
572 #elif defined(TARGET_ALPHA)
573 cpu_dump_state(env, logfile, fprintf, 0);
574 #elif defined(TARGET_CRIS)
575 cpu_dump_state(env, logfile, fprintf, 0);
576 #else
577 #error unsupported target CPU
578 #endif
580 #endif
581 spin_lock(&tb_lock);
582 tb = tb_find_fast();
583 /* Note: we do it here to avoid a gcc bug on Mac OS X when
584 doing it in tb_find_slow */
585 if (tb_invalidated_flag) {
586 /* as some TB could have been invalidated because
587 of memory exceptions while generating the code, we
588 must recompute the hash index here */
589 next_tb = 0;
590 tb_invalidated_flag = 0;
592 #ifdef DEBUG_EXEC
593 if ((loglevel & CPU_LOG_EXEC)) {
594 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
595 (long)tb->tc_ptr, tb->pc,
596 lookup_symbol(tb->pc));
598 #endif
599 /* see if we can patch the calling TB. When the TB
600 spans two pages, we cannot safely do a direct
601 jump. */
603 if (next_tb != 0 &&
604 #ifdef USE_KQEMU
605 (env->kqemu_enabled != 2) &&
606 #endif
607 tb->page_addr[1] == -1) {
608 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
611 spin_unlock(&tb_lock);
612 env->current_tb = tb;
613 while (env->current_tb) {
614 tc_ptr = tb->tc_ptr;
615 /* execute the generated code */
616 #if defined(__sparc__) && !defined(HOST_SOLARIS)
617 #undef env
618 env = cpu_single_env;
619 #define env cpu_single_env
620 #endif
621 next_tb = tcg_qemu_tb_exec(tc_ptr);
622 env->current_tb = NULL;
623 if ((next_tb & 3) == 2) {
624 /* Instruction counter expired. */
625 int insns_left;
626 tb = (TranslationBlock *)(long)(next_tb & ~3);
627 /* Restore PC. */
628 CPU_PC_FROM_TB(env, tb);
629 insns_left = env->icount_decr.u32;
630 if (env->icount_extra && insns_left >= 0) {
631 /* Refill decrementer and continue execution. */
632 env->icount_extra += insns_left;
633 if (env->icount_extra > 0xffff) {
634 insns_left = 0xffff;
635 } else {
636 insns_left = env->icount_extra;
638 env->icount_extra -= insns_left;
639 env->icount_decr.u16.low = insns_left;
640 } else {
641 if (insns_left > 0) {
642 /* Execute remaining instructions. */
643 cpu_exec_nocache(insns_left, tb);
645 env->exception_index = EXCP_INTERRUPT;
646 next_tb = 0;
647 cpu_loop_exit();
651 /* reset soft MMU for next block (it can currently
652 only be set by a memory fault) */
653 #if defined(USE_KQEMU)
654 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
655 if (kqemu_is_ok(env) &&
656 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
657 cpu_loop_exit();
659 #endif
660 } /* for(;;) */
661 } else {
662 env_to_regs();
664 } /* for(;;) */
667 #if defined(TARGET_I386)
668 /* restore flags in standard format */
669 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
670 #elif defined(TARGET_ARM)
671 /* XXX: Save/restore host fpu exception state?. */
672 #elif defined(TARGET_SPARC)
673 #elif defined(TARGET_PPC)
674 #elif defined(TARGET_M68K)
675 cpu_m68k_flush_flags(env, env->cc_op);
676 env->cc_op = CC_OP_FLAGS;
677 env->sr = (env->sr & 0xffe0)
678 | env->cc_dest | (env->cc_x << 4);
679 #elif defined(TARGET_MIPS)
680 #elif defined(TARGET_SH4)
681 #elif defined(TARGET_ALPHA)
682 #elif defined(TARGET_CRIS)
683 /* XXXXX */
684 #else
685 #error unsupported target CPU
686 #endif
688 /* restore global registers */
689 #include "hostregs_helper.h"
691 /* fail safe : never use cpu_single_env outside cpu_exec() */
692 cpu_single_env = NULL;
693 return ret;
696 /* must only be called from the generated code as an exception can be
697 generated */
698 void tb_invalidate_page_range(target_ulong start, target_ulong end)
700 /* XXX: cannot enable it yet because it yields to MMU exception
701 where NIP != read address on PowerPC */
702 #if 0
703 target_ulong phys_addr;
704 phys_addr = get_phys_addr_code(env, start);
705 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
706 #endif
709 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
711 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
713 CPUX86State *saved_env;
715 saved_env = env;
716 env = s;
717 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
718 selector &= 0xffff;
719 cpu_x86_load_seg_cache(env, seg_reg, selector,
720 (selector << 4), 0xffff, 0);
721 } else {
722 helper_load_seg(seg_reg, selector);
724 env = saved_env;
727 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
729 CPUX86State *saved_env;
731 saved_env = env;
732 env = s;
734 helper_fsave(ptr, data32);
736 env = saved_env;
739 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
741 CPUX86State *saved_env;
743 saved_env = env;
744 env = s;
746 helper_frstor(ptr, data32);
748 env = saved_env;
751 #endif /* TARGET_I386 */
753 #if !defined(CONFIG_SOFTMMU)
755 #if defined(TARGET_I386)
757 /* 'pc' is the host PC at which the exception was raised. 'address' is
758 the effective address of the memory exception. 'is_write' is 1 if a
759 write caused the exception and otherwise 0'. 'old_set' is the
760 signal set which should be restored */
761 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
762 int is_write, sigset_t *old_set,
763 void *puc)
765 TranslationBlock *tb;
766 int ret;
768 if (cpu_single_env)
769 env = cpu_single_env; /* XXX: find a correct solution for multithread */
770 #if defined(DEBUG_SIGNAL)
771 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
772 pc, address, is_write, *(unsigned long *)old_set);
773 #endif
774 /* XXX: locking issue */
775 if (is_write && page_unprotect(h2g(address), pc, puc)) {
776 return 1;
779 /* see if it is an MMU fault */
780 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
781 if (ret < 0)
782 return 0; /* not an MMU fault */
783 if (ret == 0)
784 return 1; /* the MMU fault was handled without causing real CPU fault */
785 /* now we have a real cpu fault */
786 tb = tb_find_pc(pc);
787 if (tb) {
788 /* the PC is inside the translated code. It means that we have
789 a virtual CPU fault */
790 cpu_restore_state(tb, env, pc, puc);
792 if (ret == 1) {
793 #if 0
794 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
795 env->eip, env->cr[2], env->error_code);
796 #endif
797 /* we restore the process signal mask as the sigreturn should
798 do it (XXX: use sigsetjmp) */
799 sigprocmask(SIG_SETMASK, old_set, NULL);
800 raise_exception_err(env->exception_index, env->error_code);
801 } else {
802 /* activate soft MMU for this block */
803 env->hflags |= HF_SOFTMMU_MASK;
804 cpu_resume_from_signal(env, puc);
806 /* never comes here */
807 return 1;
810 #elif defined(TARGET_ARM)
811 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
812 int is_write, sigset_t *old_set,
813 void *puc)
815 TranslationBlock *tb;
816 int ret;
818 if (cpu_single_env)
819 env = cpu_single_env; /* XXX: find a correct solution for multithread */
820 #if defined(DEBUG_SIGNAL)
821 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
822 pc, address, is_write, *(unsigned long *)old_set);
823 #endif
824 /* XXX: locking issue */
825 if (is_write && page_unprotect(h2g(address), pc, puc)) {
826 return 1;
828 /* see if it is an MMU fault */
829 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
830 if (ret < 0)
831 return 0; /* not an MMU fault */
832 if (ret == 0)
833 return 1; /* the MMU fault was handled without causing real CPU fault */
834 /* now we have a real cpu fault */
835 tb = tb_find_pc(pc);
836 if (tb) {
837 /* the PC is inside the translated code. It means that we have
838 a virtual CPU fault */
839 cpu_restore_state(tb, env, pc, puc);
841 /* we restore the process signal mask as the sigreturn should
842 do it (XXX: use sigsetjmp) */
843 sigprocmask(SIG_SETMASK, old_set, NULL);
844 cpu_loop_exit();
845 /* never comes here */
846 return 1;
848 #elif defined(TARGET_SPARC)
849 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
850 int is_write, sigset_t *old_set,
851 void *puc)
853 TranslationBlock *tb;
854 int ret;
856 if (cpu_single_env)
857 env = cpu_single_env; /* XXX: find a correct solution for multithread */
858 #if defined(DEBUG_SIGNAL)
859 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
860 pc, address, is_write, *(unsigned long *)old_set);
861 #endif
862 /* XXX: locking issue */
863 if (is_write && page_unprotect(h2g(address), pc, puc)) {
864 return 1;
866 /* see if it is an MMU fault */
867 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
868 if (ret < 0)
869 return 0; /* not an MMU fault */
870 if (ret == 0)
871 return 1; /* the MMU fault was handled without causing real CPU fault */
872 /* now we have a real cpu fault */
873 tb = tb_find_pc(pc);
874 if (tb) {
875 /* the PC is inside the translated code. It means that we have
876 a virtual CPU fault */
877 cpu_restore_state(tb, env, pc, puc);
879 /* we restore the process signal mask as the sigreturn should
880 do it (XXX: use sigsetjmp) */
881 sigprocmask(SIG_SETMASK, old_set, NULL);
882 cpu_loop_exit();
883 /* never comes here */
884 return 1;
886 #elif defined (TARGET_PPC)
887 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
888 int is_write, sigset_t *old_set,
889 void *puc)
891 TranslationBlock *tb;
892 int ret;
894 if (cpu_single_env)
895 env = cpu_single_env; /* XXX: find a correct solution for multithread */
896 #if defined(DEBUG_SIGNAL)
897 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
898 pc, address, is_write, *(unsigned long *)old_set);
899 #endif
900 /* XXX: locking issue */
901 if (is_write && page_unprotect(h2g(address), pc, puc)) {
902 return 1;
905 /* see if it is an MMU fault */
906 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
907 if (ret < 0)
908 return 0; /* not an MMU fault */
909 if (ret == 0)
910 return 1; /* the MMU fault was handled without causing real CPU fault */
912 /* now we have a real cpu fault */
913 tb = tb_find_pc(pc);
914 if (tb) {
915 /* the PC is inside the translated code. It means that we have
916 a virtual CPU fault */
917 cpu_restore_state(tb, env, pc, puc);
919 if (ret == 1) {
920 #if 0
921 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
922 env->nip, env->error_code, tb);
923 #endif
924 /* we restore the process signal mask as the sigreturn should
925 do it (XXX: use sigsetjmp) */
926 sigprocmask(SIG_SETMASK, old_set, NULL);
927 do_raise_exception_err(env->exception_index, env->error_code);
928 } else {
929 /* activate soft MMU for this block */
930 cpu_resume_from_signal(env, puc);
932 /* never comes here */
933 return 1;
936 #elif defined(TARGET_M68K)
937 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
938 int is_write, sigset_t *old_set,
939 void *puc)
941 TranslationBlock *tb;
942 int ret;
944 if (cpu_single_env)
945 env = cpu_single_env; /* XXX: find a correct solution for multithread */
946 #if defined(DEBUG_SIGNAL)
947 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
948 pc, address, is_write, *(unsigned long *)old_set);
949 #endif
950 /* XXX: locking issue */
951 if (is_write && page_unprotect(address, pc, puc)) {
952 return 1;
954 /* see if it is an MMU fault */
955 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
956 if (ret < 0)
957 return 0; /* not an MMU fault */
958 if (ret == 0)
959 return 1; /* the MMU fault was handled without causing real CPU fault */
960 /* now we have a real cpu fault */
961 tb = tb_find_pc(pc);
962 if (tb) {
963 /* the PC is inside the translated code. It means that we have
964 a virtual CPU fault */
965 cpu_restore_state(tb, env, pc, puc);
967 /* we restore the process signal mask as the sigreturn should
968 do it (XXX: use sigsetjmp) */
969 sigprocmask(SIG_SETMASK, old_set, NULL);
970 cpu_loop_exit();
971 /* never comes here */
972 return 1;
975 #elif defined (TARGET_MIPS)
976 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
977 int is_write, sigset_t *old_set,
978 void *puc)
980 TranslationBlock *tb;
981 int ret;
983 if (cpu_single_env)
984 env = cpu_single_env; /* XXX: find a correct solution for multithread */
985 #if defined(DEBUG_SIGNAL)
986 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
987 pc, address, is_write, *(unsigned long *)old_set);
988 #endif
989 /* XXX: locking issue */
990 if (is_write && page_unprotect(h2g(address), pc, puc)) {
991 return 1;
994 /* see if it is an MMU fault */
995 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
996 if (ret < 0)
997 return 0; /* not an MMU fault */
998 if (ret == 0)
999 return 1; /* the MMU fault was handled without causing real CPU fault */
1001 /* now we have a real cpu fault */
1002 tb = tb_find_pc(pc);
1003 if (tb) {
1004 /* the PC is inside the translated code. It means that we have
1005 a virtual CPU fault */
1006 cpu_restore_state(tb, env, pc, puc);
1008 if (ret == 1) {
1009 #if 0
1010 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1011 env->PC, env->error_code, tb);
1012 #endif
1013 /* we restore the process signal mask as the sigreturn should
1014 do it (XXX: use sigsetjmp) */
1015 sigprocmask(SIG_SETMASK, old_set, NULL);
1016 do_raise_exception_err(env->exception_index, env->error_code);
1017 } else {
1018 /* activate soft MMU for this block */
1019 cpu_resume_from_signal(env, puc);
1021 /* never comes here */
1022 return 1;
1025 #elif defined (TARGET_SH4)
1026 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1027 int is_write, sigset_t *old_set,
1028 void *puc)
1030 TranslationBlock *tb;
1031 int ret;
1033 if (cpu_single_env)
1034 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1035 #if defined(DEBUG_SIGNAL)
1036 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1037 pc, address, is_write, *(unsigned long *)old_set);
1038 #endif
1039 /* XXX: locking issue */
1040 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1041 return 1;
1044 /* see if it is an MMU fault */
1045 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1046 if (ret < 0)
1047 return 0; /* not an MMU fault */
1048 if (ret == 0)
1049 return 1; /* the MMU fault was handled without causing real CPU fault */
1051 /* now we have a real cpu fault */
1052 tb = tb_find_pc(pc);
1053 if (tb) {
1054 /* the PC is inside the translated code. It means that we have
1055 a virtual CPU fault */
1056 cpu_restore_state(tb, env, pc, puc);
1058 #if 0
1059 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1060 env->nip, env->error_code, tb);
1061 #endif
1062 /* we restore the process signal mask as the sigreturn should
1063 do it (XXX: use sigsetjmp) */
1064 sigprocmask(SIG_SETMASK, old_set, NULL);
1065 cpu_loop_exit();
1066 /* never comes here */
1067 return 1;
1070 #elif defined (TARGET_ALPHA)
1071 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1072 int is_write, sigset_t *old_set,
1073 void *puc)
1075 TranslationBlock *tb;
1076 int ret;
1078 if (cpu_single_env)
1079 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1080 #if defined(DEBUG_SIGNAL)
1081 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1082 pc, address, is_write, *(unsigned long *)old_set);
1083 #endif
1084 /* XXX: locking issue */
1085 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1086 return 1;
1089 /* see if it is an MMU fault */
1090 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1091 if (ret < 0)
1092 return 0; /* not an MMU fault */
1093 if (ret == 0)
1094 return 1; /* the MMU fault was handled without causing real CPU fault */
1096 /* now we have a real cpu fault */
1097 tb = tb_find_pc(pc);
1098 if (tb) {
1099 /* the PC is inside the translated code. It means that we have
1100 a virtual CPU fault */
1101 cpu_restore_state(tb, env, pc, puc);
1103 #if 0
1104 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1105 env->nip, env->error_code, tb);
1106 #endif
1107 /* we restore the process signal mask as the sigreturn should
1108 do it (XXX: use sigsetjmp) */
1109 sigprocmask(SIG_SETMASK, old_set, NULL);
1110 cpu_loop_exit();
1111 /* never comes here */
1112 return 1;
1114 #elif defined (TARGET_CRIS)
1115 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1116 int is_write, sigset_t *old_set,
1117 void *puc)
1119 TranslationBlock *tb;
1120 int ret;
1122 if (cpu_single_env)
1123 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1124 #if defined(DEBUG_SIGNAL)
1125 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1126 pc, address, is_write, *(unsigned long *)old_set);
1127 #endif
1128 /* XXX: locking issue */
1129 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1130 return 1;
1133 /* see if it is an MMU fault */
1134 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1135 if (ret < 0)
1136 return 0; /* not an MMU fault */
1137 if (ret == 0)
1138 return 1; /* the MMU fault was handled without causing real CPU fault */
1140 /* now we have a real cpu fault */
1141 tb = tb_find_pc(pc);
1142 if (tb) {
1143 /* the PC is inside the translated code. It means that we have
1144 a virtual CPU fault */
1145 cpu_restore_state(tb, env, pc, puc);
1147 /* we restore the process signal mask as the sigreturn should
1148 do it (XXX: use sigsetjmp) */
1149 sigprocmask(SIG_SETMASK, old_set, NULL);
1150 cpu_loop_exit();
1151 /* never comes here */
1152 return 1;
1155 #else
1156 #error unsupported target CPU
1157 #endif
1159 #if defined(__i386__)
1161 #if defined(__APPLE__)
1162 # include <sys/ucontext.h>
1164 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1165 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1166 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1167 #else
1168 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1169 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1170 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1171 #endif
1173 int cpu_signal_handler(int host_signum, void *pinfo,
1174 void *puc)
1176 siginfo_t *info = pinfo;
1177 struct ucontext *uc = puc;
1178 unsigned long pc;
1179 int trapno;
1181 #ifndef REG_EIP
1182 /* for glibc 2.1 */
1183 #define REG_EIP EIP
1184 #define REG_ERR ERR
1185 #define REG_TRAPNO TRAPNO
1186 #endif
1187 pc = EIP_sig(uc);
1188 trapno = TRAP_sig(uc);
1189 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1190 trapno == 0xe ?
1191 (ERROR_sig(uc) >> 1) & 1 : 0,
1192 &uc->uc_sigmask, puc);
1195 #elif defined(__x86_64__)
1197 int cpu_signal_handler(int host_signum, void *pinfo,
1198 void *puc)
1200 siginfo_t *info = pinfo;
1201 struct ucontext *uc = puc;
1202 unsigned long pc;
1204 pc = uc->uc_mcontext.gregs[REG_RIP];
1205 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1206 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1207 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1208 &uc->uc_sigmask, puc);
1211 #elif defined(__powerpc__)
1213 /***********************************************************************
1214 * signal context platform-specific definitions
1215 * From Wine
1217 #ifdef linux
1218 /* All Registers access - only for local access */
1219 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1220 /* Gpr Registers access */
1221 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1222 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1223 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1224 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1225 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1226 # define LR_sig(context) REG_sig(link, context) /* Link register */
1227 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1228 /* Float Registers access */
1229 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1230 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1231 /* Exception Registers access */
1232 # define DAR_sig(context) REG_sig(dar, context)
1233 # define DSISR_sig(context) REG_sig(dsisr, context)
1234 # define TRAP_sig(context) REG_sig(trap, context)
1235 #endif /* linux */
1237 #ifdef __APPLE__
1238 # include <sys/ucontext.h>
1239 typedef struct ucontext SIGCONTEXT;
1240 /* All Registers access - only for local access */
1241 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1242 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1243 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1244 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1245 /* Gpr Registers access */
1246 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1247 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1248 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1249 # define CTR_sig(context) REG_sig(ctr, context)
1250 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1251 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1252 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1253 /* Float Registers access */
1254 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1255 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1256 /* Exception Registers access */
1257 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1258 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1259 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1260 #endif /* __APPLE__ */
1262 int cpu_signal_handler(int host_signum, void *pinfo,
1263 void *puc)
1265 siginfo_t *info = pinfo;
1266 struct ucontext *uc = puc;
1267 unsigned long pc;
1268 int is_write;
1270 pc = IAR_sig(uc);
1271 is_write = 0;
1272 #if 0
1273 /* ppc 4xx case */
1274 if (DSISR_sig(uc) & 0x00800000)
1275 is_write = 1;
1276 #else
1277 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1278 is_write = 1;
1279 #endif
1280 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1281 is_write, &uc->uc_sigmask, puc);
1284 #elif defined(__alpha__)
1286 int cpu_signal_handler(int host_signum, void *pinfo,
1287 void *puc)
1289 siginfo_t *info = pinfo;
1290 struct ucontext *uc = puc;
1291 uint32_t *pc = uc->uc_mcontext.sc_pc;
1292 uint32_t insn = *pc;
1293 int is_write = 0;
1295 /* XXX: need kernel patch to get write flag faster */
1296 switch (insn >> 26) {
1297 case 0x0d: // stw
1298 case 0x0e: // stb
1299 case 0x0f: // stq_u
1300 case 0x24: // stf
1301 case 0x25: // stg
1302 case 0x26: // sts
1303 case 0x27: // stt
1304 case 0x2c: // stl
1305 case 0x2d: // stq
1306 case 0x2e: // stl_c
1307 case 0x2f: // stq_c
1308 is_write = 1;
1311 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1312 is_write, &uc->uc_sigmask, puc);
1314 #elif defined(__sparc__)
1316 int cpu_signal_handler(int host_signum, void *pinfo,
1317 void *puc)
1319 siginfo_t *info = pinfo;
1320 int is_write;
1321 uint32_t insn;
1322 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1323 uint32_t *regs = (uint32_t *)(info + 1);
1324 void *sigmask = (regs + 20);
1325 /* XXX: is there a standard glibc define ? */
1326 unsigned long pc = regs[1];
1327 #else
1328 struct sigcontext *sc = puc;
1329 unsigned long pc = sc->sigc_regs.tpc;
1330 void *sigmask = (void *)sc->sigc_mask;
1331 #endif
1333 /* XXX: need kernel patch to get write flag faster */
1334 is_write = 0;
1335 insn = *(uint32_t *)pc;
1336 if ((insn >> 30) == 3) {
1337 switch((insn >> 19) & 0x3f) {
1338 case 0x05: // stb
1339 case 0x06: // sth
1340 case 0x04: // st
1341 case 0x07: // std
1342 case 0x24: // stf
1343 case 0x27: // stdf
1344 case 0x25: // stfsr
1345 is_write = 1;
1346 break;
1349 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1350 is_write, sigmask, NULL);
1353 #elif defined(__arm__)
1355 int cpu_signal_handler(int host_signum, void *pinfo,
1356 void *puc)
1358 siginfo_t *info = pinfo;
1359 struct ucontext *uc = puc;
1360 unsigned long pc;
1361 int is_write;
1363 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1364 pc = uc->uc_mcontext.gregs[R15];
1365 #else
1366 pc = uc->uc_mcontext.arm_pc;
1367 #endif
1368 /* XXX: compute is_write */
1369 is_write = 0;
1370 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1371 is_write,
1372 &uc->uc_sigmask, puc);
1375 #elif defined(__mc68000)
1377 int cpu_signal_handler(int host_signum, void *pinfo,
1378 void *puc)
1380 siginfo_t *info = pinfo;
1381 struct ucontext *uc = puc;
1382 unsigned long pc;
1383 int is_write;
1385 pc = uc->uc_mcontext.gregs[16];
1386 /* XXX: compute is_write */
1387 is_write = 0;
1388 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1389 is_write,
1390 &uc->uc_sigmask, puc);
1393 #elif defined(__ia64)
1395 #ifndef __ISR_VALID
1396 /* This ought to be in <bits/siginfo.h>... */
1397 # define __ISR_VALID 1
1398 #endif
1400 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1402 siginfo_t *info = pinfo;
1403 struct ucontext *uc = puc;
1404 unsigned long ip;
1405 int is_write = 0;
1407 ip = uc->uc_mcontext.sc_ip;
1408 switch (host_signum) {
1409 case SIGILL:
1410 case SIGFPE:
1411 case SIGSEGV:
1412 case SIGBUS:
1413 case SIGTRAP:
1414 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1415 /* ISR.W (write-access) is bit 33: */
1416 is_write = (info->si_isr >> 33) & 1;
1417 break;
1419 default:
1420 break;
1422 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1423 is_write,
1424 &uc->uc_sigmask, puc);
1427 #elif defined(__s390__)
1429 int cpu_signal_handler(int host_signum, void *pinfo,
1430 void *puc)
1432 siginfo_t *info = pinfo;
1433 struct ucontext *uc = puc;
1434 unsigned long pc;
1435 int is_write;
1437 pc = uc->uc_mcontext.psw.addr;
1438 /* XXX: compute is_write */
1439 is_write = 0;
1440 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1441 is_write, &uc->uc_sigmask, puc);
1444 #elif defined(__mips__)
1446 int cpu_signal_handler(int host_signum, void *pinfo,
1447 void *puc)
1449 siginfo_t *info = pinfo;
1450 struct ucontext *uc = puc;
1451 greg_t pc = uc->uc_mcontext.pc;
1452 int is_write;
1454 /* XXX: compute is_write */
1455 is_write = 0;
1456 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1457 is_write, &uc->uc_sigmask, puc);
1460 #elif defined(__hppa__)
1462 int cpu_signal_handler(int host_signum, void *pinfo,
1463 void *puc)
1465 struct siginfo *info = pinfo;
1466 struct ucontext *uc = puc;
1467 unsigned long pc;
1468 int is_write;
1470 pc = uc->uc_mcontext.sc_iaoq[0];
1471 /* FIXME: compute is_write */
1472 is_write = 0;
1473 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1474 is_write,
1475 &uc->uc_sigmask, puc);
1478 #else
1480 #error host CPU specific signal handler needed
1482 #endif
1484 #endif /* !defined(CONFIG_SOFTMMU) */