R_PPC_REL24 safety net
[qemu-kvm/fedora.git] / cpu-exec.c
blob3a1ff4e077f8212901c5cf329f8bdfebe1e3a1ae
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #include <sys/ucontext.h>
38 #endif
40 #if defined(__sparc__) && !defined(HOST_SOLARIS)
41 // Work around ugly bugs in glibc that mangle global register contents
42 #undef env
43 #define env cpu_single_env
44 #endif
46 int tb_invalidated_flag;
48 //#define DEBUG_EXEC
49 //#define DEBUG_SIGNAL
51 void cpu_loop_exit(void)
53 /* NOTE: the register at this point must be saved by hand because
54 longjmp restore them */
55 regs_to_env();
56 longjmp(env->jmp_env, 1);
59 #if !(defined(TARGET_SPARC) || defined(TARGET_SH4) || defined(TARGET_M68K))
60 #define reg_T2
61 #endif
63 /* exit the current TB from a signal handler. The host registers are
64 restored in a state compatible with the CPU emulator
66 void cpu_resume_from_signal(CPUState *env1, void *puc)
68 #if !defined(CONFIG_SOFTMMU)
69 struct ucontext *uc = puc;
70 #endif
72 env = env1;
74 /* XXX: restore cpu registers saved in host registers */
76 #if !defined(CONFIG_SOFTMMU)
77 if (puc) {
78 /* XXX: use siglongjmp ? */
79 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
81 #endif
82 longjmp(env->jmp_env, 1);
85 static TranslationBlock *tb_find_slow(target_ulong pc,
86 target_ulong cs_base,
87 uint64_t flags)
89 TranslationBlock *tb, **ptb1;
90 int code_gen_size;
91 unsigned int h;
92 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
93 uint8_t *tc_ptr;
95 tb_invalidated_flag = 0;
97 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
99 /* find translated block using physical mappings */
100 phys_pc = get_phys_addr_code(env, pc);
101 phys_page1 = phys_pc & TARGET_PAGE_MASK;
102 phys_page2 = -1;
103 h = tb_phys_hash_func(phys_pc);
104 ptb1 = &tb_phys_hash[h];
105 for(;;) {
106 tb = *ptb1;
107 if (!tb)
108 goto not_found;
109 if (tb->pc == pc &&
110 tb->page_addr[0] == phys_page1 &&
111 tb->cs_base == cs_base &&
112 tb->flags == flags) {
113 /* check next page if needed */
114 if (tb->page_addr[1] != -1) {
115 virt_page2 = (pc & TARGET_PAGE_MASK) +
116 TARGET_PAGE_SIZE;
117 phys_page2 = get_phys_addr_code(env, virt_page2);
118 if (tb->page_addr[1] == phys_page2)
119 goto found;
120 } else {
121 goto found;
124 ptb1 = &tb->phys_hash_next;
126 not_found:
127 /* if no translated code available, then translate it now */
128 tb = tb_alloc(pc);
129 if (!tb) {
130 /* flush must be done */
131 tb_flush(env);
132 /* cannot fail at this point */
133 tb = tb_alloc(pc);
134 /* don't forget to invalidate previous TB info */
135 tb_invalidated_flag = 1;
137 tc_ptr = code_gen_ptr;
138 tb->tc_ptr = tc_ptr;
139 tb->cs_base = cs_base;
140 tb->flags = flags;
141 cpu_gen_code(env, tb, &code_gen_size);
142 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
144 /* check next page if needed */
145 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
146 phys_page2 = -1;
147 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
148 phys_page2 = get_phys_addr_code(env, virt_page2);
150 tb_link_phys(tb, phys_pc, phys_page2);
152 found:
153 /* we add the TB in the virtual pc hash table */
154 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
155 return tb;
158 static inline TranslationBlock *tb_find_fast(void)
160 TranslationBlock *tb;
161 target_ulong cs_base, pc;
162 uint64_t flags;
164 /* we record a subset of the CPU state. It will
165 always be the same before a given translated block
166 is executed. */
167 #if defined(TARGET_I386)
168 flags = env->hflags;
169 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
170 cs_base = env->segs[R_CS].base;
171 pc = cs_base + env->eip;
172 #elif defined(TARGET_ARM)
173 flags = env->thumb | (env->vfp.vec_len << 1)
174 | (env->vfp.vec_stride << 4);
175 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
176 flags |= (1 << 6);
177 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
178 flags |= (1 << 7);
179 flags |= (env->condexec_bits << 8);
180 cs_base = 0;
181 pc = env->regs[15];
182 #elif defined(TARGET_SPARC)
183 #ifdef TARGET_SPARC64
184 // Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
185 flags = (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
186 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
187 #else
188 // FPU enable . Supervisor
189 flags = (env->psref << 4) | env->psrs;
190 #endif
191 cs_base = env->npc;
192 pc = env->pc;
193 #elif defined(TARGET_PPC)
194 flags = env->hflags;
195 cs_base = 0;
196 pc = env->nip;
197 #elif defined(TARGET_MIPS)
198 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
199 cs_base = 0;
200 pc = env->PC[env->current_tc];
201 #elif defined(TARGET_M68K)
202 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
203 | (env->sr & SR_S) /* Bit 13 */
204 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
205 cs_base = 0;
206 pc = env->pc;
207 #elif defined(TARGET_SH4)
208 flags = env->flags;
209 cs_base = 0;
210 pc = env->pc;
211 #elif defined(TARGET_ALPHA)
212 flags = env->ps;
213 cs_base = 0;
214 pc = env->pc;
215 #elif defined(TARGET_CRIS)
216 flags = env->pregs[PR_CCS] & (P_FLAG | U_FLAG | X_FLAG);
217 flags |= env->dslot;
218 cs_base = 0;
219 pc = env->pc;
220 #else
221 #error unsupported CPU
222 #endif
223 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
224 if (__builtin_expect(!tb || tb->pc != pc || tb->cs_base != cs_base ||
225 tb->flags != flags, 0)) {
226 tb = tb_find_slow(pc, cs_base, flags);
228 return tb;
231 /* main execution loop */
233 int cpu_exec(CPUState *env1)
235 #define DECLARE_HOST_REGS 1
236 #include "hostregs_helper.h"
237 int ret, interrupt_request;
238 TranslationBlock *tb;
239 uint8_t *tc_ptr;
240 unsigned long next_tb;
242 if (cpu_halted(env1) == EXCP_HALTED)
243 return EXCP_HALTED;
245 cpu_single_env = env1;
247 /* first we save global registers */
248 #define SAVE_HOST_REGS 1
249 #include "hostregs_helper.h"
250 env = env1;
252 env_to_regs();
253 #if defined(TARGET_I386)
254 /* put eflags in CPU temporary format */
255 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
256 DF = 1 - (2 * ((env->eflags >> 10) & 1));
257 CC_OP = CC_OP_EFLAGS;
258 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
259 #elif defined(TARGET_SPARC)
260 #elif defined(TARGET_M68K)
261 env->cc_op = CC_OP_FLAGS;
262 env->cc_dest = env->sr & 0xf;
263 env->cc_x = (env->sr >> 4) & 1;
264 #elif defined(TARGET_ALPHA)
265 #elif defined(TARGET_ARM)
266 #elif defined(TARGET_PPC)
267 #elif defined(TARGET_MIPS)
268 #elif defined(TARGET_SH4)
269 #elif defined(TARGET_CRIS)
270 /* XXXXX */
271 #else
272 #error unsupported target CPU
273 #endif
274 env->exception_index = -1;
276 /* prepare setjmp context for exception handling */
277 for(;;) {
278 if (setjmp(env->jmp_env) == 0) {
279 env->current_tb = NULL;
280 /* if an exception is pending, we execute it here */
281 if (env->exception_index >= 0) {
282 if (env->exception_index >= EXCP_INTERRUPT) {
283 /* exit request from the cpu execution loop */
284 ret = env->exception_index;
285 break;
286 } else if (env->user_mode_only) {
287 /* if user mode only, we simulate a fake exception
288 which will be handled outside the cpu execution
289 loop */
290 #if defined(TARGET_I386)
291 do_interrupt_user(env->exception_index,
292 env->exception_is_int,
293 env->error_code,
294 env->exception_next_eip);
295 /* successfully delivered */
296 env->old_exception = -1;
297 #endif
298 ret = env->exception_index;
299 break;
300 } else {
301 #if defined(TARGET_I386)
302 /* simulate a real cpu exception. On i386, it can
303 trigger new exceptions, but we do not handle
304 double or triple faults yet. */
305 do_interrupt(env->exception_index,
306 env->exception_is_int,
307 env->error_code,
308 env->exception_next_eip, 0);
309 /* successfully delivered */
310 env->old_exception = -1;
311 #elif defined(TARGET_PPC)
312 do_interrupt(env);
313 #elif defined(TARGET_MIPS)
314 do_interrupt(env);
315 #elif defined(TARGET_SPARC)
316 do_interrupt(env);
317 #elif defined(TARGET_ARM)
318 do_interrupt(env);
319 #elif defined(TARGET_SH4)
320 do_interrupt(env);
321 #elif defined(TARGET_ALPHA)
322 do_interrupt(env);
323 #elif defined(TARGET_CRIS)
324 do_interrupt(env);
325 #elif defined(TARGET_M68K)
326 do_interrupt(0);
327 #endif
329 env->exception_index = -1;
331 #ifdef USE_KQEMU
332 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
333 int ret;
334 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
335 ret = kqemu_cpu_exec(env);
336 /* put eflags in CPU temporary format */
337 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
338 DF = 1 - (2 * ((env->eflags >> 10) & 1));
339 CC_OP = CC_OP_EFLAGS;
340 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
341 if (ret == 1) {
342 /* exception */
343 longjmp(env->jmp_env, 1);
344 } else if (ret == 2) {
345 /* softmmu execution needed */
346 } else {
347 if (env->interrupt_request != 0) {
348 /* hardware interrupt will be executed just after */
349 } else {
350 /* otherwise, we restart */
351 longjmp(env->jmp_env, 1);
355 #endif
357 next_tb = 0; /* force lookup of first TB */
358 for(;;) {
359 interrupt_request = env->interrupt_request;
360 if (__builtin_expect(interrupt_request, 0) &&
361 likely(!(env->singlestep_enabled & SSTEP_NOIRQ))) {
362 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
363 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
364 env->exception_index = EXCP_DEBUG;
365 cpu_loop_exit();
367 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
368 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
369 if (interrupt_request & CPU_INTERRUPT_HALT) {
370 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
371 env->halted = 1;
372 env->exception_index = EXCP_HLT;
373 cpu_loop_exit();
375 #endif
376 #if defined(TARGET_I386)
377 if (env->hflags2 & HF2_GIF_MASK) {
378 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
379 !(env->hflags & HF_SMM_MASK)) {
380 svm_check_intercept(SVM_EXIT_SMI);
381 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
382 do_smm_enter();
383 next_tb = 0;
384 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
385 !(env->hflags2 & HF2_NMI_MASK)) {
386 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
387 env->hflags2 |= HF2_NMI_MASK;
388 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
389 next_tb = 0;
390 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
391 (((env->hflags2 & HF2_VINTR_MASK) &&
392 (env->hflags2 & HF2_HIF_MASK)) ||
393 (!(env->hflags2 & HF2_VINTR_MASK) &&
394 (env->eflags & IF_MASK &&
395 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
396 int intno;
397 svm_check_intercept(SVM_EXIT_INTR);
398 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
399 intno = cpu_get_pic_interrupt(env);
400 if (loglevel & CPU_LOG_TB_IN_ASM) {
401 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
403 do_interrupt(intno, 0, 0, 0, 1);
404 /* ensure that no TB jump will be modified as
405 the program flow was changed */
406 next_tb = 0;
407 #if !defined(CONFIG_USER_ONLY)
408 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
409 (env->eflags & IF_MASK) &&
410 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
411 int intno;
412 /* FIXME: this should respect TPR */
413 svm_check_intercept(SVM_EXIT_VINTR);
414 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
415 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
416 if (loglevel & CPU_LOG_TB_IN_ASM)
417 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
418 do_interrupt(intno, 0, 0, 0, 1);
419 next_tb = 0;
420 #endif
423 #elif defined(TARGET_PPC)
424 #if 0
425 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
426 cpu_ppc_reset(env);
428 #endif
429 if (interrupt_request & CPU_INTERRUPT_HARD) {
430 ppc_hw_interrupt(env);
431 if (env->pending_interrupts == 0)
432 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
433 next_tb = 0;
435 #elif defined(TARGET_MIPS)
436 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
437 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
438 (env->CP0_Status & (1 << CP0St_IE)) &&
439 !(env->CP0_Status & (1 << CP0St_EXL)) &&
440 !(env->CP0_Status & (1 << CP0St_ERL)) &&
441 !(env->hflags & MIPS_HFLAG_DM)) {
442 /* Raise it */
443 env->exception_index = EXCP_EXT_INTERRUPT;
444 env->error_code = 0;
445 do_interrupt(env);
446 next_tb = 0;
448 #elif defined(TARGET_SPARC)
449 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
450 (env->psret != 0)) {
451 int pil = env->interrupt_index & 15;
452 int type = env->interrupt_index & 0xf0;
454 if (((type == TT_EXTINT) &&
455 (pil == 15 || pil > env->psrpil)) ||
456 type != TT_EXTINT) {
457 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
458 env->exception_index = env->interrupt_index;
459 do_interrupt(env);
460 env->interrupt_index = 0;
461 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
462 cpu_check_irqs(env);
463 #endif
464 next_tb = 0;
466 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
467 //do_interrupt(0, 0, 0, 0, 0);
468 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
470 #elif defined(TARGET_ARM)
471 if (interrupt_request & CPU_INTERRUPT_FIQ
472 && !(env->uncached_cpsr & CPSR_F)) {
473 env->exception_index = EXCP_FIQ;
474 do_interrupt(env);
475 next_tb = 0;
477 /* ARMv7-M interrupt return works by loading a magic value
478 into the PC. On real hardware the load causes the
479 return to occur. The qemu implementation performs the
480 jump normally, then does the exception return when the
481 CPU tries to execute code at the magic address.
482 This will cause the magic PC value to be pushed to
483 the stack if an interrupt occured at the wrong time.
484 We avoid this by disabling interrupts when
485 pc contains a magic address. */
486 if (interrupt_request & CPU_INTERRUPT_HARD
487 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
488 || !(env->uncached_cpsr & CPSR_I))) {
489 env->exception_index = EXCP_IRQ;
490 do_interrupt(env);
491 next_tb = 0;
493 #elif defined(TARGET_SH4)
494 if (interrupt_request & CPU_INTERRUPT_HARD) {
495 do_interrupt(env);
496 next_tb = 0;
498 #elif defined(TARGET_ALPHA)
499 if (interrupt_request & CPU_INTERRUPT_HARD) {
500 do_interrupt(env);
501 next_tb = 0;
503 #elif defined(TARGET_CRIS)
504 if (interrupt_request & CPU_INTERRUPT_HARD) {
505 do_interrupt(env);
506 next_tb = 0;
508 #elif defined(TARGET_M68K)
509 if (interrupt_request & CPU_INTERRUPT_HARD
510 && ((env->sr & SR_I) >> SR_I_SHIFT)
511 < env->pending_level) {
512 /* Real hardware gets the interrupt vector via an
513 IACK cycle at this point. Current emulated
514 hardware doesn't rely on this, so we
515 provide/save the vector when the interrupt is
516 first signalled. */
517 env->exception_index = env->pending_vector;
518 do_interrupt(1);
519 next_tb = 0;
521 #endif
522 /* Don't use the cached interupt_request value,
523 do_interrupt may have updated the EXITTB flag. */
524 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
525 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
526 /* ensure that no TB jump will be modified as
527 the program flow was changed */
528 next_tb = 0;
530 if (interrupt_request & CPU_INTERRUPT_EXIT) {
531 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
532 env->exception_index = EXCP_INTERRUPT;
533 cpu_loop_exit();
536 #ifdef DEBUG_EXEC
537 if ((loglevel & CPU_LOG_TB_CPU)) {
538 /* restore flags in standard format */
539 regs_to_env();
540 #if defined(TARGET_I386)
541 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
542 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
543 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
544 #elif defined(TARGET_ARM)
545 cpu_dump_state(env, logfile, fprintf, 0);
546 #elif defined(TARGET_SPARC)
547 cpu_dump_state(env, logfile, fprintf, 0);
548 #elif defined(TARGET_PPC)
549 cpu_dump_state(env, logfile, fprintf, 0);
550 #elif defined(TARGET_M68K)
551 cpu_m68k_flush_flags(env, env->cc_op);
552 env->cc_op = CC_OP_FLAGS;
553 env->sr = (env->sr & 0xffe0)
554 | env->cc_dest | (env->cc_x << 4);
555 cpu_dump_state(env, logfile, fprintf, 0);
556 #elif defined(TARGET_MIPS)
557 cpu_dump_state(env, logfile, fprintf, 0);
558 #elif defined(TARGET_SH4)
559 cpu_dump_state(env, logfile, fprintf, 0);
560 #elif defined(TARGET_ALPHA)
561 cpu_dump_state(env, logfile, fprintf, 0);
562 #elif defined(TARGET_CRIS)
563 cpu_dump_state(env, logfile, fprintf, 0);
564 #else
565 #error unsupported target CPU
566 #endif
568 #endif
569 spin_lock(&tb_lock);
570 tb = tb_find_fast();
571 /* Note: we do it here to avoid a gcc bug on Mac OS X when
572 doing it in tb_find_slow */
573 if (tb_invalidated_flag) {
574 /* as some TB could have been invalidated because
575 of memory exceptions while generating the code, we
576 must recompute the hash index here */
577 next_tb = 0;
579 #ifdef DEBUG_EXEC
580 if ((loglevel & CPU_LOG_EXEC)) {
581 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
582 (long)tb->tc_ptr, tb->pc,
583 lookup_symbol(tb->pc));
585 #endif
586 /* see if we can patch the calling TB. When the TB
587 spans two pages, we cannot safely do a direct
588 jump. */
590 if (next_tb != 0 &&
591 #ifdef USE_KQEMU
592 (env->kqemu_enabled != 2) &&
593 #endif
594 tb->page_addr[1] == -1) {
595 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
598 spin_unlock(&tb_lock);
599 tc_ptr = tb->tc_ptr;
600 env->current_tb = tb;
601 /* execute the generated code */
602 #if defined(__sparc__) && !defined(HOST_SOLARIS)
603 #undef env
604 env = cpu_single_env;
605 #define env cpu_single_env
606 #endif
607 next_tb = tcg_qemu_tb_exec(tc_ptr);
608 env->current_tb = NULL;
609 /* reset soft MMU for next block (it can currently
610 only be set by a memory fault) */
611 #if defined(USE_KQEMU)
612 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
613 if (kqemu_is_ok(env) &&
614 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
615 cpu_loop_exit();
617 #endif
618 } /* for(;;) */
619 } else {
620 env_to_regs();
622 } /* for(;;) */
625 #if defined(TARGET_I386)
626 /* restore flags in standard format */
627 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
628 #elif defined(TARGET_ARM)
629 /* XXX: Save/restore host fpu exception state?. */
630 #elif defined(TARGET_SPARC)
631 #elif defined(TARGET_PPC)
632 #elif defined(TARGET_M68K)
633 cpu_m68k_flush_flags(env, env->cc_op);
634 env->cc_op = CC_OP_FLAGS;
635 env->sr = (env->sr & 0xffe0)
636 | env->cc_dest | (env->cc_x << 4);
637 #elif defined(TARGET_MIPS)
638 #elif defined(TARGET_SH4)
639 #elif defined(TARGET_ALPHA)
640 #elif defined(TARGET_CRIS)
641 /* XXXXX */
642 #else
643 #error unsupported target CPU
644 #endif
646 /* restore global registers */
647 #include "hostregs_helper.h"
649 /* fail safe : never use cpu_single_env outside cpu_exec() */
650 cpu_single_env = NULL;
651 return ret;
654 /* must only be called from the generated code as an exception can be
655 generated */
656 void tb_invalidate_page_range(target_ulong start, target_ulong end)
658 /* XXX: cannot enable it yet because it yields to MMU exception
659 where NIP != read address on PowerPC */
660 #if 0
661 target_ulong phys_addr;
662 phys_addr = get_phys_addr_code(env, start);
663 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
664 #endif
667 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
669 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
671 CPUX86State *saved_env;
673 saved_env = env;
674 env = s;
675 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
676 selector &= 0xffff;
677 cpu_x86_load_seg_cache(env, seg_reg, selector,
678 (selector << 4), 0xffff, 0);
679 } else {
680 helper_load_seg(seg_reg, selector);
682 env = saved_env;
685 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
687 CPUX86State *saved_env;
689 saved_env = env;
690 env = s;
692 helper_fsave(ptr, data32);
694 env = saved_env;
697 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
699 CPUX86State *saved_env;
701 saved_env = env;
702 env = s;
704 helper_frstor(ptr, data32);
706 env = saved_env;
709 #endif /* TARGET_I386 */
711 #if !defined(CONFIG_SOFTMMU)
713 #if defined(TARGET_I386)
715 /* 'pc' is the host PC at which the exception was raised. 'address' is
716 the effective address of the memory exception. 'is_write' is 1 if a
717 write caused the exception and otherwise 0'. 'old_set' is the
718 signal set which should be restored */
719 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
720 int is_write, sigset_t *old_set,
721 void *puc)
723 TranslationBlock *tb;
724 int ret;
726 if (cpu_single_env)
727 env = cpu_single_env; /* XXX: find a correct solution for multithread */
728 #if defined(DEBUG_SIGNAL)
729 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
730 pc, address, is_write, *(unsigned long *)old_set);
731 #endif
732 /* XXX: locking issue */
733 if (is_write && page_unprotect(h2g(address), pc, puc)) {
734 return 1;
737 /* see if it is an MMU fault */
738 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
739 if (ret < 0)
740 return 0; /* not an MMU fault */
741 if (ret == 0)
742 return 1; /* the MMU fault was handled without causing real CPU fault */
743 /* now we have a real cpu fault */
744 tb = tb_find_pc(pc);
745 if (tb) {
746 /* the PC is inside the translated code. It means that we have
747 a virtual CPU fault */
748 cpu_restore_state(tb, env, pc, puc);
750 if (ret == 1) {
751 #if 0
752 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
753 env->eip, env->cr[2], env->error_code);
754 #endif
755 /* we restore the process signal mask as the sigreturn should
756 do it (XXX: use sigsetjmp) */
757 sigprocmask(SIG_SETMASK, old_set, NULL);
758 raise_exception_err(env->exception_index, env->error_code);
759 } else {
760 /* activate soft MMU for this block */
761 env->hflags |= HF_SOFTMMU_MASK;
762 cpu_resume_from_signal(env, puc);
764 /* never comes here */
765 return 1;
768 #elif defined(TARGET_ARM)
769 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
770 int is_write, sigset_t *old_set,
771 void *puc)
773 TranslationBlock *tb;
774 int ret;
776 if (cpu_single_env)
777 env = cpu_single_env; /* XXX: find a correct solution for multithread */
778 #if defined(DEBUG_SIGNAL)
779 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
780 pc, address, is_write, *(unsigned long *)old_set);
781 #endif
782 /* XXX: locking issue */
783 if (is_write && page_unprotect(h2g(address), pc, puc)) {
784 return 1;
786 /* see if it is an MMU fault */
787 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
788 if (ret < 0)
789 return 0; /* not an MMU fault */
790 if (ret == 0)
791 return 1; /* the MMU fault was handled without causing real CPU fault */
792 /* now we have a real cpu fault */
793 tb = tb_find_pc(pc);
794 if (tb) {
795 /* the PC is inside the translated code. It means that we have
796 a virtual CPU fault */
797 cpu_restore_state(tb, env, pc, puc);
799 /* we restore the process signal mask as the sigreturn should
800 do it (XXX: use sigsetjmp) */
801 sigprocmask(SIG_SETMASK, old_set, NULL);
802 cpu_loop_exit();
803 /* never comes here */
804 return 1;
806 #elif defined(TARGET_SPARC)
807 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
808 int is_write, sigset_t *old_set,
809 void *puc)
811 TranslationBlock *tb;
812 int ret;
814 if (cpu_single_env)
815 env = cpu_single_env; /* XXX: find a correct solution for multithread */
816 #if defined(DEBUG_SIGNAL)
817 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
818 pc, address, is_write, *(unsigned long *)old_set);
819 #endif
820 /* XXX: locking issue */
821 if (is_write && page_unprotect(h2g(address), pc, puc)) {
822 return 1;
824 /* see if it is an MMU fault */
825 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
826 if (ret < 0)
827 return 0; /* not an MMU fault */
828 if (ret == 0)
829 return 1; /* the MMU fault was handled without causing real CPU fault */
830 /* now we have a real cpu fault */
831 tb = tb_find_pc(pc);
832 if (tb) {
833 /* the PC is inside the translated code. It means that we have
834 a virtual CPU fault */
835 cpu_restore_state(tb, env, pc, puc);
837 /* we restore the process signal mask as the sigreturn should
838 do it (XXX: use sigsetjmp) */
839 sigprocmask(SIG_SETMASK, old_set, NULL);
840 cpu_loop_exit();
841 /* never comes here */
842 return 1;
844 #elif defined (TARGET_PPC)
845 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
846 int is_write, sigset_t *old_set,
847 void *puc)
849 TranslationBlock *tb;
850 int ret;
852 if (cpu_single_env)
853 env = cpu_single_env; /* XXX: find a correct solution for multithread */
854 #if defined(DEBUG_SIGNAL)
855 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
856 pc, address, is_write, *(unsigned long *)old_set);
857 #endif
858 /* XXX: locking issue */
859 if (is_write && page_unprotect(h2g(address), pc, puc)) {
860 return 1;
863 /* see if it is an MMU fault */
864 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
865 if (ret < 0)
866 return 0; /* not an MMU fault */
867 if (ret == 0)
868 return 1; /* the MMU fault was handled without causing real CPU fault */
870 /* now we have a real cpu fault */
871 tb = tb_find_pc(pc);
872 if (tb) {
873 /* the PC is inside the translated code. It means that we have
874 a virtual CPU fault */
875 cpu_restore_state(tb, env, pc, puc);
877 if (ret == 1) {
878 #if 0
879 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
880 env->nip, env->error_code, tb);
881 #endif
882 /* we restore the process signal mask as the sigreturn should
883 do it (XXX: use sigsetjmp) */
884 sigprocmask(SIG_SETMASK, old_set, NULL);
885 do_raise_exception_err(env->exception_index, env->error_code);
886 } else {
887 /* activate soft MMU for this block */
888 cpu_resume_from_signal(env, puc);
890 /* never comes here */
891 return 1;
894 #elif defined(TARGET_M68K)
895 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
896 int is_write, sigset_t *old_set,
897 void *puc)
899 TranslationBlock *tb;
900 int ret;
902 if (cpu_single_env)
903 env = cpu_single_env; /* XXX: find a correct solution for multithread */
904 #if defined(DEBUG_SIGNAL)
905 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
906 pc, address, is_write, *(unsigned long *)old_set);
907 #endif
908 /* XXX: locking issue */
909 if (is_write && page_unprotect(address, pc, puc)) {
910 return 1;
912 /* see if it is an MMU fault */
913 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
914 if (ret < 0)
915 return 0; /* not an MMU fault */
916 if (ret == 0)
917 return 1; /* the MMU fault was handled without causing real CPU fault */
918 /* now we have a real cpu fault */
919 tb = tb_find_pc(pc);
920 if (tb) {
921 /* the PC is inside the translated code. It means that we have
922 a virtual CPU fault */
923 cpu_restore_state(tb, env, pc, puc);
925 /* we restore the process signal mask as the sigreturn should
926 do it (XXX: use sigsetjmp) */
927 sigprocmask(SIG_SETMASK, old_set, NULL);
928 cpu_loop_exit();
929 /* never comes here */
930 return 1;
933 #elif defined (TARGET_MIPS)
934 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
935 int is_write, sigset_t *old_set,
936 void *puc)
938 TranslationBlock *tb;
939 int ret;
941 if (cpu_single_env)
942 env = cpu_single_env; /* XXX: find a correct solution for multithread */
943 #if defined(DEBUG_SIGNAL)
944 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
945 pc, address, is_write, *(unsigned long *)old_set);
946 #endif
947 /* XXX: locking issue */
948 if (is_write && page_unprotect(h2g(address), pc, puc)) {
949 return 1;
952 /* see if it is an MMU fault */
953 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
954 if (ret < 0)
955 return 0; /* not an MMU fault */
956 if (ret == 0)
957 return 1; /* the MMU fault was handled without causing real CPU fault */
959 /* now we have a real cpu fault */
960 tb = tb_find_pc(pc);
961 if (tb) {
962 /* the PC is inside the translated code. It means that we have
963 a virtual CPU fault */
964 cpu_restore_state(tb, env, pc, puc);
966 if (ret == 1) {
967 #if 0
968 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
969 env->PC, env->error_code, tb);
970 #endif
971 /* we restore the process signal mask as the sigreturn should
972 do it (XXX: use sigsetjmp) */
973 sigprocmask(SIG_SETMASK, old_set, NULL);
974 do_raise_exception_err(env->exception_index, env->error_code);
975 } else {
976 /* activate soft MMU for this block */
977 cpu_resume_from_signal(env, puc);
979 /* never comes here */
980 return 1;
983 #elif defined (TARGET_SH4)
984 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
985 int is_write, sigset_t *old_set,
986 void *puc)
988 TranslationBlock *tb;
989 int ret;
991 if (cpu_single_env)
992 env = cpu_single_env; /* XXX: find a correct solution for multithread */
993 #if defined(DEBUG_SIGNAL)
994 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
995 pc, address, is_write, *(unsigned long *)old_set);
996 #endif
997 /* XXX: locking issue */
998 if (is_write && page_unprotect(h2g(address), pc, puc)) {
999 return 1;
1002 /* see if it is an MMU fault */
1003 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1004 if (ret < 0)
1005 return 0; /* not an MMU fault */
1006 if (ret == 0)
1007 return 1; /* the MMU fault was handled without causing real CPU fault */
1009 /* now we have a real cpu fault */
1010 tb = tb_find_pc(pc);
1011 if (tb) {
1012 /* the PC is inside the translated code. It means that we have
1013 a virtual CPU fault */
1014 cpu_restore_state(tb, env, pc, puc);
1016 #if 0
1017 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1018 env->nip, env->error_code, tb);
1019 #endif
1020 /* we restore the process signal mask as the sigreturn should
1021 do it (XXX: use sigsetjmp) */
1022 sigprocmask(SIG_SETMASK, old_set, NULL);
1023 cpu_loop_exit();
1024 /* never comes here */
1025 return 1;
1028 #elif defined (TARGET_ALPHA)
1029 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1030 int is_write, sigset_t *old_set,
1031 void *puc)
1033 TranslationBlock *tb;
1034 int ret;
1036 if (cpu_single_env)
1037 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1038 #if defined(DEBUG_SIGNAL)
1039 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1040 pc, address, is_write, *(unsigned long *)old_set);
1041 #endif
1042 /* XXX: locking issue */
1043 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1044 return 1;
1047 /* see if it is an MMU fault */
1048 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1049 if (ret < 0)
1050 return 0; /* not an MMU fault */
1051 if (ret == 0)
1052 return 1; /* the MMU fault was handled without causing real CPU fault */
1054 /* now we have a real cpu fault */
1055 tb = tb_find_pc(pc);
1056 if (tb) {
1057 /* the PC is inside the translated code. It means that we have
1058 a virtual CPU fault */
1059 cpu_restore_state(tb, env, pc, puc);
1061 #if 0
1062 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1063 env->nip, env->error_code, tb);
1064 #endif
1065 /* we restore the process signal mask as the sigreturn should
1066 do it (XXX: use sigsetjmp) */
1067 sigprocmask(SIG_SETMASK, old_set, NULL);
1068 cpu_loop_exit();
1069 /* never comes here */
1070 return 1;
1072 #elif defined (TARGET_CRIS)
1073 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1074 int is_write, sigset_t *old_set,
1075 void *puc)
1077 TranslationBlock *tb;
1078 int ret;
1080 if (cpu_single_env)
1081 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1082 #if defined(DEBUG_SIGNAL)
1083 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1084 pc, address, is_write, *(unsigned long *)old_set);
1085 #endif
1086 /* XXX: locking issue */
1087 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1088 return 1;
1091 /* see if it is an MMU fault */
1092 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1093 if (ret < 0)
1094 return 0; /* not an MMU fault */
1095 if (ret == 0)
1096 return 1; /* the MMU fault was handled without causing real CPU fault */
1098 /* now we have a real cpu fault */
1099 tb = tb_find_pc(pc);
1100 if (tb) {
1101 /* the PC is inside the translated code. It means that we have
1102 a virtual CPU fault */
1103 cpu_restore_state(tb, env, pc, puc);
1105 /* we restore the process signal mask as the sigreturn should
1106 do it (XXX: use sigsetjmp) */
1107 sigprocmask(SIG_SETMASK, old_set, NULL);
1108 cpu_loop_exit();
1109 /* never comes here */
1110 return 1;
1113 #else
1114 #error unsupported target CPU
1115 #endif
1117 #if defined(__i386__)
1119 #if defined(__APPLE__)
1120 # include <sys/ucontext.h>
1122 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1123 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1124 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1125 #else
1126 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1127 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1128 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1129 #endif
1131 int cpu_signal_handler(int host_signum, void *pinfo,
1132 void *puc)
1134 siginfo_t *info = pinfo;
1135 struct ucontext *uc = puc;
1136 unsigned long pc;
1137 int trapno;
1139 #ifndef REG_EIP
1140 /* for glibc 2.1 */
1141 #define REG_EIP EIP
1142 #define REG_ERR ERR
1143 #define REG_TRAPNO TRAPNO
1144 #endif
1145 pc = EIP_sig(uc);
1146 trapno = TRAP_sig(uc);
1147 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1148 trapno == 0xe ?
1149 (ERROR_sig(uc) >> 1) & 1 : 0,
1150 &uc->uc_sigmask, puc);
1153 #elif defined(__x86_64__)
1155 int cpu_signal_handler(int host_signum, void *pinfo,
1156 void *puc)
1158 siginfo_t *info = pinfo;
1159 struct ucontext *uc = puc;
1160 unsigned long pc;
1162 pc = uc->uc_mcontext.gregs[REG_RIP];
1163 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1164 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1165 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1166 &uc->uc_sigmask, puc);
1169 #elif defined(__powerpc__)
1171 /***********************************************************************
1172 * signal context platform-specific definitions
1173 * From Wine
1175 #ifdef linux
1176 /* All Registers access - only for local access */
1177 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1178 /* Gpr Registers access */
1179 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1180 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1181 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1182 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1183 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1184 # define LR_sig(context) REG_sig(link, context) /* Link register */
1185 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1186 /* Float Registers access */
1187 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1188 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1189 /* Exception Registers access */
1190 # define DAR_sig(context) REG_sig(dar, context)
1191 # define DSISR_sig(context) REG_sig(dsisr, context)
1192 # define TRAP_sig(context) REG_sig(trap, context)
1193 #endif /* linux */
1195 #ifdef __APPLE__
1196 # include <sys/ucontext.h>
1197 typedef struct ucontext SIGCONTEXT;
1198 /* All Registers access - only for local access */
1199 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1200 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1201 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1202 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1203 /* Gpr Registers access */
1204 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1205 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1206 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1207 # define CTR_sig(context) REG_sig(ctr, context)
1208 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1209 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1210 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1211 /* Float Registers access */
1212 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1213 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1214 /* Exception Registers access */
1215 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1216 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1217 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1218 #endif /* __APPLE__ */
1220 int cpu_signal_handler(int host_signum, void *pinfo,
1221 void *puc)
1223 siginfo_t *info = pinfo;
1224 struct ucontext *uc = puc;
1225 unsigned long pc;
1226 int is_write;
1228 pc = IAR_sig(uc);
1229 is_write = 0;
1230 #if 0
1231 /* ppc 4xx case */
1232 if (DSISR_sig(uc) & 0x00800000)
1233 is_write = 1;
1234 #else
1235 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1236 is_write = 1;
1237 #endif
1238 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1239 is_write, &uc->uc_sigmask, puc);
1242 #elif defined(__alpha__)
1244 int cpu_signal_handler(int host_signum, void *pinfo,
1245 void *puc)
1247 siginfo_t *info = pinfo;
1248 struct ucontext *uc = puc;
1249 uint32_t *pc = uc->uc_mcontext.sc_pc;
1250 uint32_t insn = *pc;
1251 int is_write = 0;
1253 /* XXX: need kernel patch to get write flag faster */
1254 switch (insn >> 26) {
1255 case 0x0d: // stw
1256 case 0x0e: // stb
1257 case 0x0f: // stq_u
1258 case 0x24: // stf
1259 case 0x25: // stg
1260 case 0x26: // sts
1261 case 0x27: // stt
1262 case 0x2c: // stl
1263 case 0x2d: // stq
1264 case 0x2e: // stl_c
1265 case 0x2f: // stq_c
1266 is_write = 1;
1269 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1270 is_write, &uc->uc_sigmask, puc);
1272 #elif defined(__sparc__)
1274 int cpu_signal_handler(int host_signum, void *pinfo,
1275 void *puc)
1277 siginfo_t *info = pinfo;
1278 int is_write;
1279 uint32_t insn;
1280 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1281 uint32_t *regs = (uint32_t *)(info + 1);
1282 void *sigmask = (regs + 20);
1283 /* XXX: is there a standard glibc define ? */
1284 unsigned long pc = regs[1];
1285 #else
1286 struct sigcontext *sc = puc;
1287 unsigned long pc = sc->sigc_regs.tpc;
1288 void *sigmask = (void *)sc->sigc_mask;
1289 #endif
1291 /* XXX: need kernel patch to get write flag faster */
1292 is_write = 0;
1293 insn = *(uint32_t *)pc;
1294 if ((insn >> 30) == 3) {
1295 switch((insn >> 19) & 0x3f) {
1296 case 0x05: // stb
1297 case 0x06: // sth
1298 case 0x04: // st
1299 case 0x07: // std
1300 case 0x24: // stf
1301 case 0x27: // stdf
1302 case 0x25: // stfsr
1303 is_write = 1;
1304 break;
1307 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1308 is_write, sigmask, NULL);
1311 #elif defined(__arm__)
1313 int cpu_signal_handler(int host_signum, void *pinfo,
1314 void *puc)
1316 siginfo_t *info = pinfo;
1317 struct ucontext *uc = puc;
1318 unsigned long pc;
1319 int is_write;
1321 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ =< 3))
1322 pc = uc->uc_mcontext.gregs[R15];
1323 #else
1324 pc = uc->uc_mcontext.arm_pc;
1325 #endif
1326 /* XXX: compute is_write */
1327 is_write = 0;
1328 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1329 is_write,
1330 &uc->uc_sigmask, puc);
1333 #elif defined(__mc68000)
1335 int cpu_signal_handler(int host_signum, void *pinfo,
1336 void *puc)
1338 siginfo_t *info = pinfo;
1339 struct ucontext *uc = puc;
1340 unsigned long pc;
1341 int is_write;
1343 pc = uc->uc_mcontext.gregs[16];
1344 /* XXX: compute is_write */
1345 is_write = 0;
1346 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1347 is_write,
1348 &uc->uc_sigmask, puc);
1351 #elif defined(__ia64)
1353 #ifndef __ISR_VALID
1354 /* This ought to be in <bits/siginfo.h>... */
1355 # define __ISR_VALID 1
1356 #endif
1358 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1360 siginfo_t *info = pinfo;
1361 struct ucontext *uc = puc;
1362 unsigned long ip;
1363 int is_write = 0;
1365 ip = uc->uc_mcontext.sc_ip;
1366 switch (host_signum) {
1367 case SIGILL:
1368 case SIGFPE:
1369 case SIGSEGV:
1370 case SIGBUS:
1371 case SIGTRAP:
1372 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1373 /* ISR.W (write-access) is bit 33: */
1374 is_write = (info->si_isr >> 33) & 1;
1375 break;
1377 default:
1378 break;
1380 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1381 is_write,
1382 &uc->uc_sigmask, puc);
1385 #elif defined(__s390__)
1387 int cpu_signal_handler(int host_signum, void *pinfo,
1388 void *puc)
1390 siginfo_t *info = pinfo;
1391 struct ucontext *uc = puc;
1392 unsigned long pc;
1393 int is_write;
1395 pc = uc->uc_mcontext.psw.addr;
1396 /* XXX: compute is_write */
1397 is_write = 0;
1398 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1399 is_write, &uc->uc_sigmask, puc);
1402 #elif defined(__mips__)
1404 int cpu_signal_handler(int host_signum, void *pinfo,
1405 void *puc)
1407 siginfo_t *info = pinfo;
1408 struct ucontext *uc = puc;
1409 greg_t pc = uc->uc_mcontext.pc;
1410 int is_write;
1412 /* XXX: compute is_write */
1413 is_write = 0;
1414 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1415 is_write, &uc->uc_sigmask, puc);
1418 #elif defined(__hppa__)
1420 int cpu_signal_handler(int host_signum, void *pinfo,
1421 void *puc)
1423 struct siginfo *info = pinfo;
1424 struct ucontext *uc = puc;
1425 unsigned long pc;
1426 int is_write;
1428 pc = uc->uc_mcontext.sc_iaoq[0];
1429 /* FIXME: compute is_write */
1430 is_write = 0;
1431 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1432 is_write,
1433 &uc->uc_sigmask, puc);
1436 #else
1438 #error host CPU specific signal handler needed
1440 #endif
1442 #endif /* !defined(CONFIG_SOFTMMU) */