Key/value based qemu<->guest firmware communication mechanism (Gleb Natapov)
[qemu/mini2440.git] / cpu-exec.c
blob8637e2a3cf29944bdf01140dc19bcca3fe56f818
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
42 #undef env
43 #define env cpu_single_env
44 #endif
46 int tb_invalidated_flag;
48 //#define DEBUG_EXEC
49 //#define DEBUG_SIGNAL
51 void cpu_loop_exit(void)
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
55 regs_to_env();
56 longjmp(env->jmp_env, 1);
59 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
60 #define reg_T2
61 #endif
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70 #endif
72 env = env1;
74 /* XXX: restore cpu registers saved in host registers */
76 #if !defined(CONFIG_SOFTMMU)
77 if (puc) {
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 #endif
82 longjmp(env->jmp_env, 1);
85 /* Execute the code without caching the generated code. An interpreter
86 could be used if available. */
87 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
89 unsigned long next_tb;
90 TranslationBlock *tb;
92 /* Should never happen.
93 We only end up here when an existing TB is too long. */
94 if (max_cycles > CF_COUNT_MASK)
95 max_cycles = CF_COUNT_MASK;
97 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
98 max_cycles);
99 env->current_tb = tb;
100 /* execute the generated code */
101 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
103 if ((next_tb & 3) == 2) {
104 /* Restore PC. This may happen if async event occurs before
105 the TB starts executing. */
106 CPU_PC_FROM_TB(env, tb);
108 tb_phys_invalidate(tb, -1);
109 tb_free(tb);
112 static TranslationBlock *tb_find_slow(target_ulong pc,
113 target_ulong cs_base,
114 uint64_t flags)
116 TranslationBlock *tb, **ptb1;
117 unsigned int h;
118 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
120 tb_invalidated_flag = 0;
122 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
124 /* find translated block using physical mappings */
125 phys_pc = get_phys_addr_code(env, pc);
126 phys_page1 = phys_pc & TARGET_PAGE_MASK;
127 phys_page2 = -1;
128 h = tb_phys_hash_func(phys_pc);
129 ptb1 = &tb_phys_hash[h];
130 for(;;) {
131 tb = *ptb1;
132 if (!tb)
133 goto not_found;
134 if (tb->pc == pc &&
135 tb->page_addr[0] == phys_page1 &&
136 tb->cs_base == cs_base &&
137 tb->flags == flags) {
138 /* check next page if needed */
139 if (tb->page_addr[1] != -1) {
140 virt_page2 = (pc & TARGET_PAGE_MASK) +
141 TARGET_PAGE_SIZE;
142 phys_page2 = get_phys_addr_code(env, virt_page2);
143 if (tb->page_addr[1] == phys_page2)
144 goto found;
145 } else {
146 goto found;
149 ptb1 = &tb->phys_hash_next;
151 not_found:
152 /* if no translated code available, then translate it now */
153 tb = tb_gen_code(env, pc, cs_base, flags, 0);
155 found:
156 /* we add the TB in the virtual pc hash table */
157 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
158 return tb;
161 static inline TranslationBlock *tb_find_fast(void)
163 TranslationBlock *tb;
164 target_ulong cs_base, pc;
165 uint64_t flags;
167 /* we record a subset of the CPU state. It will
168 always be the same before a given translated block
169 is executed. */
170 #if defined(TARGET_I386)
171 flags = env->hflags;
172 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
173 cs_base = env->segs[R_CS].base;
174 pc = cs_base + env->eip;
175 #elif defined(TARGET_ARM)
176 flags = env->thumb | (env->vfp.vec_len << 1)
177 | (env->vfp.vec_stride << 4);
178 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
179 flags |= (1 << 6);
180 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
181 flags |= (1 << 7);
182 flags |= (env->condexec_bits << 8);
183 cs_base = 0;
184 pc = env->regs[15];
185 #elif defined(TARGET_SPARC)
186 #ifdef TARGET_SPARC64
187 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
188 flags = ((env->pstate & PS_AM) << 2)
189 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
190 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
191 #else
192 // FPU enable . Supervisor
193 flags = (env->psref << 4) | env->psrs;
194 #endif
195 cs_base = env->npc;
196 pc = env->pc;
197 #elif defined(TARGET_PPC)
198 flags = env->hflags;
199 cs_base = 0;
200 pc = env->nip;
201 #elif defined(TARGET_MIPS)
202 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
203 cs_base = 0;
204 pc = env->active_tc.PC;
205 #elif defined(TARGET_M68K)
206 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
207 | (env->sr & SR_S) /* Bit 13 */
208 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
209 cs_base = 0;
210 pc = env->pc;
211 #elif defined(TARGET_SH4)
212 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
213 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
214 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
215 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
216 cs_base = 0;
217 pc = env->pc;
218 #elif defined(TARGET_ALPHA)
219 flags = env->ps;
220 cs_base = 0;
221 pc = env->pc;
222 #elif defined(TARGET_CRIS)
223 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
224 flags |= env->dslot;
225 cs_base = 0;
226 pc = env->pc;
227 #else
228 #error unsupported CPU
229 #endif
230 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
231 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
232 tb->flags != flags)) {
233 tb = tb_find_slow(pc, cs_base, flags);
235 return tb;
238 /* main execution loop */
240 int cpu_exec(CPUState *env1)
242 #define DECLARE_HOST_REGS 1
243 #include "hostregs_helper.h"
244 int ret, interrupt_request;
245 TranslationBlock *tb;
246 uint8_t *tc_ptr;
247 unsigned long next_tb;
249 if (cpu_halted(env1) == EXCP_HALTED)
250 return EXCP_HALTED;
252 cpu_single_env = env1;
254 /* first we save global registers */
255 #define SAVE_HOST_REGS 1
256 #include "hostregs_helper.h"
257 env = env1;
259 env_to_regs();
260 #if defined(TARGET_I386)
261 /* put eflags in CPU temporary format */
262 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
263 DF = 1 - (2 * ((env->eflags >> 10) & 1));
264 CC_OP = CC_OP_EFLAGS;
265 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
266 #elif defined(TARGET_SPARC)
267 #elif defined(TARGET_M68K)
268 env->cc_op = CC_OP_FLAGS;
269 env->cc_dest = env->sr & 0xf;
270 env->cc_x = (env->sr >> 4) & 1;
271 #elif defined(TARGET_ALPHA)
272 #elif defined(TARGET_ARM)
273 #elif defined(TARGET_PPC)
274 #elif defined(TARGET_MIPS)
275 #elif defined(TARGET_SH4)
276 #elif defined(TARGET_CRIS)
277 /* XXXXX */
278 #else
279 #error unsupported target CPU
280 #endif
281 env->exception_index = -1;
283 /* prepare setjmp context for exception handling */
284 for(;;) {
285 if (setjmp(env->jmp_env) == 0) {
286 env->current_tb = NULL;
287 /* if an exception is pending, we execute it here */
288 if (env->exception_index >= 0) {
289 if (env->exception_index >= EXCP_INTERRUPT) {
290 /* exit request from the cpu execution loop */
291 ret = env->exception_index;
292 break;
293 } else if (env->user_mode_only) {
294 /* if user mode only, we simulate a fake exception
295 which will be handled outside the cpu execution
296 loop */
297 #if defined(TARGET_I386)
298 do_interrupt_user(env->exception_index,
299 env->exception_is_int,
300 env->error_code,
301 env->exception_next_eip);
302 /* successfully delivered */
303 env->old_exception = -1;
304 #endif
305 ret = env->exception_index;
306 break;
307 } else {
308 #if defined(TARGET_I386)
309 /* simulate a real cpu exception. On i386, it can
310 trigger new exceptions, but we do not handle
311 double or triple faults yet. */
312 do_interrupt(env->exception_index,
313 env->exception_is_int,
314 env->error_code,
315 env->exception_next_eip, 0);
316 /* successfully delivered */
317 env->old_exception = -1;
318 #elif defined(TARGET_PPC)
319 do_interrupt(env);
320 #elif defined(TARGET_MIPS)
321 do_interrupt(env);
322 #elif defined(TARGET_SPARC)
323 do_interrupt(env);
324 #elif defined(TARGET_ARM)
325 do_interrupt(env);
326 #elif defined(TARGET_SH4)
327 do_interrupt(env);
328 #elif defined(TARGET_ALPHA)
329 do_interrupt(env);
330 #elif defined(TARGET_CRIS)
331 do_interrupt(env);
332 #elif defined(TARGET_M68K)
333 do_interrupt(0);
334 #endif
336 env->exception_index = -1;
338 #ifdef USE_KQEMU
339 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
340 int ret;
341 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
342 ret = kqemu_cpu_exec(env);
343 /* put eflags in CPU temporary format */
344 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
345 DF = 1 - (2 * ((env->eflags >> 10) & 1));
346 CC_OP = CC_OP_EFLAGS;
347 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
348 if (ret == 1) {
349 /* exception */
350 longjmp(env->jmp_env, 1);
351 } else if (ret == 2) {
352 /* softmmu execution needed */
353 } else {
354 if (env->interrupt_request != 0) {
355 /* hardware interrupt will be executed just after */
356 } else {
357 /* otherwise, we restart */
358 longjmp(env->jmp_env, 1);
362 #endif
364 next_tb = 0; /* force lookup of first TB */
365 for(;;) {
366 interrupt_request = env->interrupt_request;
367 if (unlikely(interrupt_request) &&
368 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
369 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
370 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
371 env->exception_index = EXCP_DEBUG;
372 cpu_loop_exit();
374 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
375 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
376 if (interrupt_request & CPU_INTERRUPT_HALT) {
377 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
378 env->halted = 1;
379 env->exception_index = EXCP_HLT;
380 cpu_loop_exit();
382 #endif
383 #if defined(TARGET_I386)
384 if (env->hflags2 & HF2_GIF_MASK) {
385 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
386 !(env->hflags & HF_SMM_MASK)) {
387 svm_check_intercept(SVM_EXIT_SMI);
388 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
389 do_smm_enter();
390 next_tb = 0;
391 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
392 !(env->hflags2 & HF2_NMI_MASK)) {
393 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
394 env->hflags2 |= HF2_NMI_MASK;
395 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
396 next_tb = 0;
397 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
398 (((env->hflags2 & HF2_VINTR_MASK) &&
399 (env->hflags2 & HF2_HIF_MASK)) ||
400 (!(env->hflags2 & HF2_VINTR_MASK) &&
401 (env->eflags & IF_MASK &&
402 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
403 int intno;
404 svm_check_intercept(SVM_EXIT_INTR);
405 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
406 intno = cpu_get_pic_interrupt(env);
407 if (loglevel & CPU_LOG_TB_IN_ASM) {
408 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
410 do_interrupt(intno, 0, 0, 0, 1);
411 /* ensure that no TB jump will be modified as
412 the program flow was changed */
413 next_tb = 0;
414 #if !defined(CONFIG_USER_ONLY)
415 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
416 (env->eflags & IF_MASK) &&
417 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
418 int intno;
419 /* FIXME: this should respect TPR */
420 svm_check_intercept(SVM_EXIT_VINTR);
421 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
422 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
423 if (loglevel & CPU_LOG_TB_IN_ASM)
424 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
425 do_interrupt(intno, 0, 0, 0, 1);
426 next_tb = 0;
427 #endif
430 #elif defined(TARGET_PPC)
431 #if 0
432 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
433 cpu_ppc_reset(env);
435 #endif
436 if (interrupt_request & CPU_INTERRUPT_HARD) {
437 ppc_hw_interrupt(env);
438 if (env->pending_interrupts == 0)
439 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
440 next_tb = 0;
442 #elif defined(TARGET_MIPS)
443 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
444 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
445 (env->CP0_Status & (1 << CP0St_IE)) &&
446 !(env->CP0_Status & (1 << CP0St_EXL)) &&
447 !(env->CP0_Status & (1 << CP0St_ERL)) &&
448 !(env->hflags & MIPS_HFLAG_DM)) {
449 /* Raise it */
450 env->exception_index = EXCP_EXT_INTERRUPT;
451 env->error_code = 0;
452 do_interrupt(env);
453 next_tb = 0;
455 #elif defined(TARGET_SPARC)
456 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
457 (env->psret != 0)) {
458 int pil = env->interrupt_index & 15;
459 int type = env->interrupt_index & 0xf0;
461 if (((type == TT_EXTINT) &&
462 (pil == 15 || pil > env->psrpil)) ||
463 type != TT_EXTINT) {
464 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
465 env->exception_index = env->interrupt_index;
466 do_interrupt(env);
467 env->interrupt_index = 0;
468 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
469 cpu_check_irqs(env);
470 #endif
471 next_tb = 0;
473 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
474 //do_interrupt(0, 0, 0, 0, 0);
475 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
477 #elif defined(TARGET_ARM)
478 if (interrupt_request & CPU_INTERRUPT_FIQ
479 && !(env->uncached_cpsr & CPSR_F)) {
480 env->exception_index = EXCP_FIQ;
481 do_interrupt(env);
482 next_tb = 0;
484 /* ARMv7-M interrupt return works by loading a magic value
485 into the PC. On real hardware the load causes the
486 return to occur. The qemu implementation performs the
487 jump normally, then does the exception return when the
488 CPU tries to execute code at the magic address.
489 This will cause the magic PC value to be pushed to
490 the stack if an interrupt occured at the wrong time.
491 We avoid this by disabling interrupts when
492 pc contains a magic address. */
493 if (interrupt_request & CPU_INTERRUPT_HARD
494 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
495 || !(env->uncached_cpsr & CPSR_I))) {
496 env->exception_index = EXCP_IRQ;
497 do_interrupt(env);
498 next_tb = 0;
500 #elif defined(TARGET_SH4)
501 if (interrupt_request & CPU_INTERRUPT_HARD) {
502 do_interrupt(env);
503 next_tb = 0;
505 #elif defined(TARGET_ALPHA)
506 if (interrupt_request & CPU_INTERRUPT_HARD) {
507 do_interrupt(env);
508 next_tb = 0;
510 #elif defined(TARGET_CRIS)
511 if (interrupt_request & CPU_INTERRUPT_HARD
512 && (env->pregs[PR_CCS] & I_FLAG)) {
513 env->exception_index = EXCP_IRQ;
514 do_interrupt(env);
515 next_tb = 0;
517 if (interrupt_request & CPU_INTERRUPT_NMI
518 && (env->pregs[PR_CCS] & M_FLAG)) {
519 env->exception_index = EXCP_NMI;
520 do_interrupt(env);
521 next_tb = 0;
523 #elif defined(TARGET_M68K)
524 if (interrupt_request & CPU_INTERRUPT_HARD
525 && ((env->sr & SR_I) >> SR_I_SHIFT)
526 < env->pending_level) {
527 /* Real hardware gets the interrupt vector via an
528 IACK cycle at this point. Current emulated
529 hardware doesn't rely on this, so we
530 provide/save the vector when the interrupt is
531 first signalled. */
532 env->exception_index = env->pending_vector;
533 do_interrupt(1);
534 next_tb = 0;
536 #endif
537 /* Don't use the cached interupt_request value,
538 do_interrupt may have updated the EXITTB flag. */
539 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
540 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
541 /* ensure that no TB jump will be modified as
542 the program flow was changed */
543 next_tb = 0;
545 if (interrupt_request & CPU_INTERRUPT_EXIT) {
546 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
547 env->exception_index = EXCP_INTERRUPT;
548 cpu_loop_exit();
551 #ifdef DEBUG_EXEC
552 if ((loglevel & CPU_LOG_TB_CPU)) {
553 /* restore flags in standard format */
554 regs_to_env();
555 #if defined(TARGET_I386)
556 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
557 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
558 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
559 #elif defined(TARGET_ARM)
560 cpu_dump_state(env, logfile, fprintf, 0);
561 #elif defined(TARGET_SPARC)
562 cpu_dump_state(env, logfile, fprintf, 0);
563 #elif defined(TARGET_PPC)
564 cpu_dump_state(env, logfile, fprintf, 0);
565 #elif defined(TARGET_M68K)
566 cpu_m68k_flush_flags(env, env->cc_op);
567 env->cc_op = CC_OP_FLAGS;
568 env->sr = (env->sr & 0xffe0)
569 | env->cc_dest | (env->cc_x << 4);
570 cpu_dump_state(env, logfile, fprintf, 0);
571 #elif defined(TARGET_MIPS)
572 cpu_dump_state(env, logfile, fprintf, 0);
573 #elif defined(TARGET_SH4)
574 cpu_dump_state(env, logfile, fprintf, 0);
575 #elif defined(TARGET_ALPHA)
576 cpu_dump_state(env, logfile, fprintf, 0);
577 #elif defined(TARGET_CRIS)
578 cpu_dump_state(env, logfile, fprintf, 0);
579 #else
580 #error unsupported target CPU
581 #endif
583 #endif
584 spin_lock(&tb_lock);
585 tb = tb_find_fast();
586 /* Note: we do it here to avoid a gcc bug on Mac OS X when
587 doing it in tb_find_slow */
588 if (tb_invalidated_flag) {
589 /* as some TB could have been invalidated because
590 of memory exceptions while generating the code, we
591 must recompute the hash index here */
592 next_tb = 0;
593 tb_invalidated_flag = 0;
595 #ifdef DEBUG_EXEC
596 if ((loglevel & CPU_LOG_EXEC)) {
597 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
598 (long)tb->tc_ptr, tb->pc,
599 lookup_symbol(tb->pc));
601 #endif
602 /* see if we can patch the calling TB. When the TB
603 spans two pages, we cannot safely do a direct
604 jump. */
606 if (next_tb != 0 &&
607 #ifdef USE_KQEMU
608 (env->kqemu_enabled != 2) &&
609 #endif
610 tb->page_addr[1] == -1) {
611 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
614 spin_unlock(&tb_lock);
615 env->current_tb = tb;
616 while (env->current_tb) {
617 tc_ptr = tb->tc_ptr;
618 /* execute the generated code */
619 #if defined(__sparc__) && !defined(HOST_SOLARIS)
620 #undef env
621 env = cpu_single_env;
622 #define env cpu_single_env
623 #endif
624 next_tb = tcg_qemu_tb_exec(tc_ptr);
625 env->current_tb = NULL;
626 if ((next_tb & 3) == 2) {
627 /* Instruction counter expired. */
628 int insns_left;
629 tb = (TranslationBlock *)(long)(next_tb & ~3);
630 /* Restore PC. */
631 CPU_PC_FROM_TB(env, tb);
632 insns_left = env->icount_decr.u32;
633 if (env->icount_extra && insns_left >= 0) {
634 /* Refill decrementer and continue execution. */
635 env->icount_extra += insns_left;
636 if (env->icount_extra > 0xffff) {
637 insns_left = 0xffff;
638 } else {
639 insns_left = env->icount_extra;
641 env->icount_extra -= insns_left;
642 env->icount_decr.u16.low = insns_left;
643 } else {
644 if (insns_left > 0) {
645 /* Execute remaining instructions. */
646 cpu_exec_nocache(insns_left, tb);
648 env->exception_index = EXCP_INTERRUPT;
649 next_tb = 0;
650 cpu_loop_exit();
654 /* reset soft MMU for next block (it can currently
655 only be set by a memory fault) */
656 #if defined(USE_KQEMU)
657 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
658 if (kqemu_is_ok(env) &&
659 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
660 cpu_loop_exit();
662 #endif
663 } /* for(;;) */
664 } else {
665 env_to_regs();
667 } /* for(;;) */
670 #if defined(TARGET_I386)
671 /* restore flags in standard format */
672 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
673 #elif defined(TARGET_ARM)
674 /* XXX: Save/restore host fpu exception state?. */
675 #elif defined(TARGET_SPARC)
676 #elif defined(TARGET_PPC)
677 #elif defined(TARGET_M68K)
678 cpu_m68k_flush_flags(env, env->cc_op);
679 env->cc_op = CC_OP_FLAGS;
680 env->sr = (env->sr & 0xffe0)
681 | env->cc_dest | (env->cc_x << 4);
682 #elif defined(TARGET_MIPS)
683 #elif defined(TARGET_SH4)
684 #elif defined(TARGET_ALPHA)
685 #elif defined(TARGET_CRIS)
686 /* XXXXX */
687 #else
688 #error unsupported target CPU
689 #endif
691 /* restore global registers */
692 #include "hostregs_helper.h"
694 /* fail safe : never use cpu_single_env outside cpu_exec() */
695 cpu_single_env = NULL;
696 return ret;
699 /* must only be called from the generated code as an exception can be
700 generated */
701 void tb_invalidate_page_range(target_ulong start, target_ulong end)
703 /* XXX: cannot enable it yet because it yields to MMU exception
704 where NIP != read address on PowerPC */
705 #if 0
706 target_ulong phys_addr;
707 phys_addr = get_phys_addr_code(env, start);
708 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
709 #endif
712 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
714 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
716 CPUX86State *saved_env;
718 saved_env = env;
719 env = s;
720 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
721 selector &= 0xffff;
722 cpu_x86_load_seg_cache(env, seg_reg, selector,
723 (selector << 4), 0xffff, 0);
724 } else {
725 helper_load_seg(seg_reg, selector);
727 env = saved_env;
730 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
732 CPUX86State *saved_env;
734 saved_env = env;
735 env = s;
737 helper_fsave(ptr, data32);
739 env = saved_env;
742 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
744 CPUX86State *saved_env;
746 saved_env = env;
747 env = s;
749 helper_frstor(ptr, data32);
751 env = saved_env;
754 #endif /* TARGET_I386 */
756 #if !defined(CONFIG_SOFTMMU)
758 #if defined(TARGET_I386)
760 /* 'pc' is the host PC at which the exception was raised. 'address' is
761 the effective address of the memory exception. 'is_write' is 1 if a
762 write caused the exception and otherwise 0'. 'old_set' is the
763 signal set which should be restored */
764 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
765 int is_write, sigset_t *old_set,
766 void *puc)
768 TranslationBlock *tb;
769 int ret;
771 if (cpu_single_env)
772 env = cpu_single_env; /* XXX: find a correct solution for multithread */
773 #if defined(DEBUG_SIGNAL)
774 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
775 pc, address, is_write, *(unsigned long *)old_set);
776 #endif
777 /* XXX: locking issue */
778 if (is_write && page_unprotect(h2g(address), pc, puc)) {
779 return 1;
782 /* see if it is an MMU fault */
783 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
784 if (ret < 0)
785 return 0; /* not an MMU fault */
786 if (ret == 0)
787 return 1; /* the MMU fault was handled without causing real CPU fault */
788 /* now we have a real cpu fault */
789 tb = tb_find_pc(pc);
790 if (tb) {
791 /* the PC is inside the translated code. It means that we have
792 a virtual CPU fault */
793 cpu_restore_state(tb, env, pc, puc);
795 if (ret == 1) {
796 #if 0
797 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
798 env->eip, env->cr[2], env->error_code);
799 #endif
800 /* we restore the process signal mask as the sigreturn should
801 do it (XXX: use sigsetjmp) */
802 sigprocmask(SIG_SETMASK, old_set, NULL);
803 raise_exception_err(env->exception_index, env->error_code);
804 } else {
805 /* activate soft MMU for this block */
806 env->hflags |= HF_SOFTMMU_MASK;
807 cpu_resume_from_signal(env, puc);
809 /* never comes here */
810 return 1;
813 #elif defined(TARGET_ARM)
814 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
815 int is_write, sigset_t *old_set,
816 void *puc)
818 TranslationBlock *tb;
819 int ret;
821 if (cpu_single_env)
822 env = cpu_single_env; /* XXX: find a correct solution for multithread */
823 #if defined(DEBUG_SIGNAL)
824 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
825 pc, address, is_write, *(unsigned long *)old_set);
826 #endif
827 /* XXX: locking issue */
828 if (is_write && page_unprotect(h2g(address), pc, puc)) {
829 return 1;
831 /* see if it is an MMU fault */
832 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
833 if (ret < 0)
834 return 0; /* not an MMU fault */
835 if (ret == 0)
836 return 1; /* the MMU fault was handled without causing real CPU fault */
837 /* now we have a real cpu fault */
838 tb = tb_find_pc(pc);
839 if (tb) {
840 /* the PC is inside the translated code. It means that we have
841 a virtual CPU fault */
842 cpu_restore_state(tb, env, pc, puc);
844 /* we restore the process signal mask as the sigreturn should
845 do it (XXX: use sigsetjmp) */
846 sigprocmask(SIG_SETMASK, old_set, NULL);
847 cpu_loop_exit();
848 /* never comes here */
849 return 1;
851 #elif defined(TARGET_SPARC)
852 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
853 int is_write, sigset_t *old_set,
854 void *puc)
856 TranslationBlock *tb;
857 int ret;
859 if (cpu_single_env)
860 env = cpu_single_env; /* XXX: find a correct solution for multithread */
861 #if defined(DEBUG_SIGNAL)
862 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
863 pc, address, is_write, *(unsigned long *)old_set);
864 #endif
865 /* XXX: locking issue */
866 if (is_write && page_unprotect(h2g(address), pc, puc)) {
867 return 1;
869 /* see if it is an MMU fault */
870 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
871 if (ret < 0)
872 return 0; /* not an MMU fault */
873 if (ret == 0)
874 return 1; /* the MMU fault was handled without causing real CPU fault */
875 /* now we have a real cpu fault */
876 tb = tb_find_pc(pc);
877 if (tb) {
878 /* the PC is inside the translated code. It means that we have
879 a virtual CPU fault */
880 cpu_restore_state(tb, env, pc, puc);
882 /* we restore the process signal mask as the sigreturn should
883 do it (XXX: use sigsetjmp) */
884 sigprocmask(SIG_SETMASK, old_set, NULL);
885 cpu_loop_exit();
886 /* never comes here */
887 return 1;
889 #elif defined (TARGET_PPC)
890 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
891 int is_write, sigset_t *old_set,
892 void *puc)
894 TranslationBlock *tb;
895 int ret;
897 if (cpu_single_env)
898 env = cpu_single_env; /* XXX: find a correct solution for multithread */
899 #if defined(DEBUG_SIGNAL)
900 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
901 pc, address, is_write, *(unsigned long *)old_set);
902 #endif
903 /* XXX: locking issue */
904 if (is_write && page_unprotect(h2g(address), pc, puc)) {
905 return 1;
908 /* see if it is an MMU fault */
909 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
910 if (ret < 0)
911 return 0; /* not an MMU fault */
912 if (ret == 0)
913 return 1; /* the MMU fault was handled without causing real CPU fault */
915 /* now we have a real cpu fault */
916 tb = tb_find_pc(pc);
917 if (tb) {
918 /* the PC is inside the translated code. It means that we have
919 a virtual CPU fault */
920 cpu_restore_state(tb, env, pc, puc);
922 if (ret == 1) {
923 #if 0
924 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
925 env->nip, env->error_code, tb);
926 #endif
927 /* we restore the process signal mask as the sigreturn should
928 do it (XXX: use sigsetjmp) */
929 sigprocmask(SIG_SETMASK, old_set, NULL);
930 do_raise_exception_err(env->exception_index, env->error_code);
931 } else {
932 /* activate soft MMU for this block */
933 cpu_resume_from_signal(env, puc);
935 /* never comes here */
936 return 1;
939 #elif defined(TARGET_M68K)
940 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
941 int is_write, sigset_t *old_set,
942 void *puc)
944 TranslationBlock *tb;
945 int ret;
947 if (cpu_single_env)
948 env = cpu_single_env; /* XXX: find a correct solution for multithread */
949 #if defined(DEBUG_SIGNAL)
950 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
951 pc, address, is_write, *(unsigned long *)old_set);
952 #endif
953 /* XXX: locking issue */
954 if (is_write && page_unprotect(address, pc, puc)) {
955 return 1;
957 /* see if it is an MMU fault */
958 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
959 if (ret < 0)
960 return 0; /* not an MMU fault */
961 if (ret == 0)
962 return 1; /* the MMU fault was handled without causing real CPU fault */
963 /* now we have a real cpu fault */
964 tb = tb_find_pc(pc);
965 if (tb) {
966 /* the PC is inside the translated code. It means that we have
967 a virtual CPU fault */
968 cpu_restore_state(tb, env, pc, puc);
970 /* we restore the process signal mask as the sigreturn should
971 do it (XXX: use sigsetjmp) */
972 sigprocmask(SIG_SETMASK, old_set, NULL);
973 cpu_loop_exit();
974 /* never comes here */
975 return 1;
978 #elif defined (TARGET_MIPS)
979 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
980 int is_write, sigset_t *old_set,
981 void *puc)
983 TranslationBlock *tb;
984 int ret;
986 if (cpu_single_env)
987 env = cpu_single_env; /* XXX: find a correct solution for multithread */
988 #if defined(DEBUG_SIGNAL)
989 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
990 pc, address, is_write, *(unsigned long *)old_set);
991 #endif
992 /* XXX: locking issue */
993 if (is_write && page_unprotect(h2g(address), pc, puc)) {
994 return 1;
997 /* see if it is an MMU fault */
998 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
999 if (ret < 0)
1000 return 0; /* not an MMU fault */
1001 if (ret == 0)
1002 return 1; /* the MMU fault was handled without causing real CPU fault */
1004 /* now we have a real cpu fault */
1005 tb = tb_find_pc(pc);
1006 if (tb) {
1007 /* the PC is inside the translated code. It means that we have
1008 a virtual CPU fault */
1009 cpu_restore_state(tb, env, pc, puc);
1011 if (ret == 1) {
1012 #if 0
1013 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1014 env->PC, env->error_code, tb);
1015 #endif
1016 /* we restore the process signal mask as the sigreturn should
1017 do it (XXX: use sigsetjmp) */
1018 sigprocmask(SIG_SETMASK, old_set, NULL);
1019 do_raise_exception_err(env->exception_index, env->error_code);
1020 } else {
1021 /* activate soft MMU for this block */
1022 cpu_resume_from_signal(env, puc);
1024 /* never comes here */
1025 return 1;
1028 #elif defined (TARGET_SH4)
1029 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1030 int is_write, sigset_t *old_set,
1031 void *puc)
1033 TranslationBlock *tb;
1034 int ret;
1036 if (cpu_single_env)
1037 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1038 #if defined(DEBUG_SIGNAL)
1039 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1040 pc, address, is_write, *(unsigned long *)old_set);
1041 #endif
1042 /* XXX: locking issue */
1043 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1044 return 1;
1047 /* see if it is an MMU fault */
1048 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1049 if (ret < 0)
1050 return 0; /* not an MMU fault */
1051 if (ret == 0)
1052 return 1; /* the MMU fault was handled without causing real CPU fault */
1054 /* now we have a real cpu fault */
1055 tb = tb_find_pc(pc);
1056 if (tb) {
1057 /* the PC is inside the translated code. It means that we have
1058 a virtual CPU fault */
1059 cpu_restore_state(tb, env, pc, puc);
1061 #if 0
1062 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1063 env->nip, env->error_code, tb);
1064 #endif
1065 /* we restore the process signal mask as the sigreturn should
1066 do it (XXX: use sigsetjmp) */
1067 sigprocmask(SIG_SETMASK, old_set, NULL);
1068 cpu_loop_exit();
1069 /* never comes here */
1070 return 1;
1073 #elif defined (TARGET_ALPHA)
1074 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1075 int is_write, sigset_t *old_set,
1076 void *puc)
1078 TranslationBlock *tb;
1079 int ret;
1081 if (cpu_single_env)
1082 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1083 #if defined(DEBUG_SIGNAL)
1084 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1085 pc, address, is_write, *(unsigned long *)old_set);
1086 #endif
1087 /* XXX: locking issue */
1088 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1089 return 1;
1092 /* see if it is an MMU fault */
1093 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1094 if (ret < 0)
1095 return 0; /* not an MMU fault */
1096 if (ret == 0)
1097 return 1; /* the MMU fault was handled without causing real CPU fault */
1099 /* now we have a real cpu fault */
1100 tb = tb_find_pc(pc);
1101 if (tb) {
1102 /* the PC is inside the translated code. It means that we have
1103 a virtual CPU fault */
1104 cpu_restore_state(tb, env, pc, puc);
1106 #if 0
1107 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1108 env->nip, env->error_code, tb);
1109 #endif
1110 /* we restore the process signal mask as the sigreturn should
1111 do it (XXX: use sigsetjmp) */
1112 sigprocmask(SIG_SETMASK, old_set, NULL);
1113 cpu_loop_exit();
1114 /* never comes here */
1115 return 1;
1117 #elif defined (TARGET_CRIS)
1118 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1119 int is_write, sigset_t *old_set,
1120 void *puc)
1122 TranslationBlock *tb;
1123 int ret;
1125 if (cpu_single_env)
1126 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1127 #if defined(DEBUG_SIGNAL)
1128 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1129 pc, address, is_write, *(unsigned long *)old_set);
1130 #endif
1131 /* XXX: locking issue */
1132 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1133 return 1;
1136 /* see if it is an MMU fault */
1137 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1138 if (ret < 0)
1139 return 0; /* not an MMU fault */
1140 if (ret == 0)
1141 return 1; /* the MMU fault was handled without causing real CPU fault */
1143 /* now we have a real cpu fault */
1144 tb = tb_find_pc(pc);
1145 if (tb) {
1146 /* the PC is inside the translated code. It means that we have
1147 a virtual CPU fault */
1148 cpu_restore_state(tb, env, pc, puc);
1150 /* we restore the process signal mask as the sigreturn should
1151 do it (XXX: use sigsetjmp) */
1152 sigprocmask(SIG_SETMASK, old_set, NULL);
1153 cpu_loop_exit();
1154 /* never comes here */
1155 return 1;
1158 #else
1159 #error unsupported target CPU
1160 #endif
1162 #if defined(__i386__)
1164 #if defined(__APPLE__)
1165 # include <sys/ucontext.h>
1167 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1168 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1169 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1170 #else
1171 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1172 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1173 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1174 #endif
1176 int cpu_signal_handler(int host_signum, void *pinfo,
1177 void *puc)
1179 siginfo_t *info = pinfo;
1180 struct ucontext *uc = puc;
1181 unsigned long pc;
1182 int trapno;
1184 #ifndef REG_EIP
1185 /* for glibc 2.1 */
1186 #define REG_EIP EIP
1187 #define REG_ERR ERR
1188 #define REG_TRAPNO TRAPNO
1189 #endif
1190 pc = EIP_sig(uc);
1191 trapno = TRAP_sig(uc);
1192 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1193 trapno == 0xe ?
1194 (ERROR_sig(uc) >> 1) & 1 : 0,
1195 &uc->uc_sigmask, puc);
1198 #elif defined(__x86_64__)
1200 int cpu_signal_handler(int host_signum, void *pinfo,
1201 void *puc)
1203 siginfo_t *info = pinfo;
1204 struct ucontext *uc = puc;
1205 unsigned long pc;
1207 pc = uc->uc_mcontext.gregs[REG_RIP];
1208 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1209 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1210 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1211 &uc->uc_sigmask, puc);
1214 #elif defined(__powerpc__)
1216 /***********************************************************************
1217 * signal context platform-specific definitions
1218 * From Wine
1220 #ifdef linux
1221 /* All Registers access - only for local access */
1222 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1223 /* Gpr Registers access */
1224 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1225 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1226 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1227 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1228 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1229 # define LR_sig(context) REG_sig(link, context) /* Link register */
1230 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1231 /* Float Registers access */
1232 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1233 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1234 /* Exception Registers access */
1235 # define DAR_sig(context) REG_sig(dar, context)
1236 # define DSISR_sig(context) REG_sig(dsisr, context)
1237 # define TRAP_sig(context) REG_sig(trap, context)
1238 #endif /* linux */
1240 #ifdef __APPLE__
1241 # include <sys/ucontext.h>
1242 typedef struct ucontext SIGCONTEXT;
1243 /* All Registers access - only for local access */
1244 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1245 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1246 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1247 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1248 /* Gpr Registers access */
1249 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1250 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1251 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1252 # define CTR_sig(context) REG_sig(ctr, context)
1253 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1254 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1255 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1256 /* Float Registers access */
1257 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1258 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1259 /* Exception Registers access */
1260 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1261 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1262 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1263 #endif /* __APPLE__ */
1265 int cpu_signal_handler(int host_signum, void *pinfo,
1266 void *puc)
1268 siginfo_t *info = pinfo;
1269 struct ucontext *uc = puc;
1270 unsigned long pc;
1271 int is_write;
1273 pc = IAR_sig(uc);
1274 is_write = 0;
1275 #if 0
1276 /* ppc 4xx case */
1277 if (DSISR_sig(uc) & 0x00800000)
1278 is_write = 1;
1279 #else
1280 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1281 is_write = 1;
1282 #endif
1283 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1284 is_write, &uc->uc_sigmask, puc);
1287 #elif defined(__alpha__)
1289 int cpu_signal_handler(int host_signum, void *pinfo,
1290 void *puc)
1292 siginfo_t *info = pinfo;
1293 struct ucontext *uc = puc;
1294 uint32_t *pc = uc->uc_mcontext.sc_pc;
1295 uint32_t insn = *pc;
1296 int is_write = 0;
1298 /* XXX: need kernel patch to get write flag faster */
1299 switch (insn >> 26) {
1300 case 0x0d: // stw
1301 case 0x0e: // stb
1302 case 0x0f: // stq_u
1303 case 0x24: // stf
1304 case 0x25: // stg
1305 case 0x26: // sts
1306 case 0x27: // stt
1307 case 0x2c: // stl
1308 case 0x2d: // stq
1309 case 0x2e: // stl_c
1310 case 0x2f: // stq_c
1311 is_write = 1;
1314 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1315 is_write, &uc->uc_sigmask, puc);
1317 #elif defined(__sparc__)
1319 int cpu_signal_handler(int host_signum, void *pinfo,
1320 void *puc)
1322 siginfo_t *info = pinfo;
1323 int is_write;
1324 uint32_t insn;
1325 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1326 uint32_t *regs = (uint32_t *)(info + 1);
1327 void *sigmask = (regs + 20);
1328 /* XXX: is there a standard glibc define ? */
1329 unsigned long pc = regs[1];
1330 #else
1331 struct sigcontext *sc = puc;
1332 unsigned long pc = sc->sigc_regs.tpc;
1333 void *sigmask = (void *)sc->sigc_mask;
1334 #endif
1336 /* XXX: need kernel patch to get write flag faster */
1337 is_write = 0;
1338 insn = *(uint32_t *)pc;
1339 if ((insn >> 30) == 3) {
1340 switch((insn >> 19) & 0x3f) {
1341 case 0x05: // stb
1342 case 0x06: // sth
1343 case 0x04: // st
1344 case 0x07: // std
1345 case 0x24: // stf
1346 case 0x27: // stdf
1347 case 0x25: // stfsr
1348 is_write = 1;
1349 break;
1352 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1353 is_write, sigmask, NULL);
1356 #elif defined(__arm__)
1358 int cpu_signal_handler(int host_signum, void *pinfo,
1359 void *puc)
1361 siginfo_t *info = pinfo;
1362 struct ucontext *uc = puc;
1363 unsigned long pc;
1364 int is_write;
1366 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1367 pc = uc->uc_mcontext.gregs[R15];
1368 #else
1369 pc = uc->uc_mcontext.arm_pc;
1370 #endif
1371 /* XXX: compute is_write */
1372 is_write = 0;
1373 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1374 is_write,
1375 &uc->uc_sigmask, puc);
1378 #elif defined(__mc68000)
1380 int cpu_signal_handler(int host_signum, void *pinfo,
1381 void *puc)
1383 siginfo_t *info = pinfo;
1384 struct ucontext *uc = puc;
1385 unsigned long pc;
1386 int is_write;
1388 pc = uc->uc_mcontext.gregs[16];
1389 /* XXX: compute is_write */
1390 is_write = 0;
1391 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1392 is_write,
1393 &uc->uc_sigmask, puc);
1396 #elif defined(__ia64)
1398 #ifndef __ISR_VALID
1399 /* This ought to be in <bits/siginfo.h>... */
1400 # define __ISR_VALID 1
1401 #endif
1403 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1405 siginfo_t *info = pinfo;
1406 struct ucontext *uc = puc;
1407 unsigned long ip;
1408 int is_write = 0;
1410 ip = uc->uc_mcontext.sc_ip;
1411 switch (host_signum) {
1412 case SIGILL:
1413 case SIGFPE:
1414 case SIGSEGV:
1415 case SIGBUS:
1416 case SIGTRAP:
1417 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1418 /* ISR.W (write-access) is bit 33: */
1419 is_write = (info->si_isr >> 33) & 1;
1420 break;
1422 default:
1423 break;
1425 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1426 is_write,
1427 &uc->uc_sigmask, puc);
1430 #elif defined(__s390__)
1432 int cpu_signal_handler(int host_signum, void *pinfo,
1433 void *puc)
1435 siginfo_t *info = pinfo;
1436 struct ucontext *uc = puc;
1437 unsigned long pc;
1438 int is_write;
1440 pc = uc->uc_mcontext.psw.addr;
1441 /* XXX: compute is_write */
1442 is_write = 0;
1443 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1444 is_write, &uc->uc_sigmask, puc);
1447 #elif defined(__mips__)
1449 int cpu_signal_handler(int host_signum, void *pinfo,
1450 void *puc)
1452 siginfo_t *info = pinfo;
1453 struct ucontext *uc = puc;
1454 greg_t pc = uc->uc_mcontext.pc;
1455 int is_write;
1457 /* XXX: compute is_write */
1458 is_write = 0;
1459 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1460 is_write, &uc->uc_sigmask, puc);
1463 #elif defined(__hppa__)
1465 int cpu_signal_handler(int host_signum, void *pinfo,
1466 void *puc)
1468 struct siginfo *info = pinfo;
1469 struct ucontext *uc = puc;
1470 unsigned long pc;
1471 int is_write;
1473 pc = uc->uc_mcontext.sc_iaoq[0];
1474 /* FIXME: compute is_write */
1475 is_write = 0;
1476 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1477 is_write,
1478 &uc->uc_sigmask, puc);
1481 #else
1483 #error host CPU specific signal handler needed
1485 #endif
1487 #endif /* !defined(CONFIG_SOFTMMU) */