Attached patch fixes a series of this warning
[qemu/mini2440.git] / cpu-exec.c
blob2c77b923fd67eeec738db84c7a4f2f44974ee38e
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "config.h"
21 #define CPU_NO_GLOBAL_REGS
22 #include "exec.h"
23 #include "disas.h"
24 #include "tcg.h"
25 #include "kvm.h"
27 #if !defined(CONFIG_SOFTMMU)
28 #undef EAX
29 #undef ECX
30 #undef EDX
31 #undef EBX
32 #undef ESP
33 #undef EBP
34 #undef ESI
35 #undef EDI
36 #undef EIP
37 #include <signal.h>
38 #ifdef __linux__
39 #include <sys/ucontext.h>
40 #endif
41 #endif
43 #if defined(__sparc__) && !defined(HOST_SOLARIS)
44 // Work around ugly bugs in glibc that mangle global register contents
45 #undef env
46 #define env cpu_single_env
47 #endif
49 int tb_invalidated_flag;
51 //#define DEBUG_EXEC
52 //#define DEBUG_SIGNAL
54 void cpu_loop_exit(void)
56 /* NOTE: the register at this point must be saved by hand because
57 longjmp restore them */
58 regs_to_env();
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
68 #ifdef __linux__
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72 #endif
73 #endif
75 env = env1;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
82 #ifdef __linux__
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86 #endif
88 #endif
89 longjmp(env->jmp_env, 1);
92 /* Execute the code without caching the generated code. An interpreter
93 could be used if available. */
94 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
96 unsigned long next_tb;
97 TranslationBlock *tb;
99 /* Should never happen.
100 We only end up here when an existing TB is too long. */
101 if (max_cycles > CF_COUNT_MASK)
102 max_cycles = CF_COUNT_MASK;
104 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
105 max_cycles);
106 env->current_tb = tb;
107 /* execute the generated code */
108 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
110 if ((next_tb & 3) == 2) {
111 /* Restore PC. This may happen if async event occurs before
112 the TB starts executing. */
113 CPU_PC_FROM_TB(env, tb);
115 tb_phys_invalidate(tb, -1);
116 tb_free(tb);
119 static TranslationBlock *tb_find_slow(target_ulong pc,
120 target_ulong cs_base,
121 uint64_t flags)
123 TranslationBlock *tb, **ptb1;
124 unsigned int h;
125 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
127 tb_invalidated_flag = 0;
129 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
131 /* find translated block using physical mappings */
132 phys_pc = get_phys_addr_code(env, pc);
133 phys_page1 = phys_pc & TARGET_PAGE_MASK;
134 phys_page2 = -1;
135 h = tb_phys_hash_func(phys_pc);
136 ptb1 = &tb_phys_hash[h];
137 for(;;) {
138 tb = *ptb1;
139 if (!tb)
140 goto not_found;
141 if (tb->pc == pc &&
142 tb->page_addr[0] == phys_page1 &&
143 tb->cs_base == cs_base &&
144 tb->flags == flags) {
145 /* check next page if needed */
146 if (tb->page_addr[1] != -1) {
147 virt_page2 = (pc & TARGET_PAGE_MASK) +
148 TARGET_PAGE_SIZE;
149 phys_page2 = get_phys_addr_code(env, virt_page2);
150 if (tb->page_addr[1] == phys_page2)
151 goto found;
152 } else {
153 goto found;
156 ptb1 = &tb->phys_hash_next;
158 not_found:
159 /* if no translated code available, then translate it now */
160 tb = tb_gen_code(env, pc, cs_base, flags, 0);
162 found:
163 /* we add the TB in the virtual pc hash table */
164 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
165 return tb;
168 static inline TranslationBlock *tb_find_fast(void)
170 TranslationBlock *tb;
171 target_ulong cs_base, pc;
172 uint64_t flags;
174 /* we record a subset of the CPU state. It will
175 always be the same before a given translated block
176 is executed. */
177 #if defined(TARGET_I386)
178 flags = env->hflags;
179 flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
180 cs_base = env->segs[R_CS].base;
181 pc = cs_base + env->eip;
182 #elif defined(TARGET_ARM)
183 flags = env->thumb | (env->vfp.vec_len << 1)
184 | (env->vfp.vec_stride << 4);
185 if ((env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR)
186 flags |= (1 << 6);
187 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30))
188 flags |= (1 << 7);
189 flags |= (env->condexec_bits << 8);
190 cs_base = 0;
191 pc = env->regs[15];
192 #elif defined(TARGET_SPARC)
193 #ifdef TARGET_SPARC64
194 // AM . Combined FPU enable bits . PRIV . DMMU enabled . IMMU enabled
195 flags = ((env->pstate & PS_AM) << 2)
196 | (((env->pstate & PS_PEF) >> 1) | ((env->fprs & FPRS_FEF) << 2))
197 | (env->pstate & PS_PRIV) | ((env->lsu & (DMMU_E | IMMU_E)) >> 2);
198 #else
199 // FPU enable . Supervisor
200 flags = (env->psref << 4) | env->psrs;
201 #endif
202 cs_base = env->npc;
203 pc = env->pc;
204 #elif defined(TARGET_PPC)
205 flags = env->hflags;
206 cs_base = 0;
207 pc = env->nip;
208 #elif defined(TARGET_MIPS)
209 flags = env->hflags & (MIPS_HFLAG_TMASK | MIPS_HFLAG_BMASK);
210 cs_base = 0;
211 pc = env->active_tc.PC;
212 #elif defined(TARGET_M68K)
213 flags = (env->fpcr & M68K_FPCR_PREC) /* Bit 6 */
214 | (env->sr & SR_S) /* Bit 13 */
215 | ((env->macsr >> 4) & 0xf); /* Bits 0-3 */
216 cs_base = 0;
217 pc = env->pc;
218 #elif defined(TARGET_SH4)
219 flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
220 | DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
221 | (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
222 | (env->sr & (SR_MD | SR_RB)); /* Bits 29-30 */
223 cs_base = 0;
224 pc = env->pc;
225 #elif defined(TARGET_ALPHA)
226 flags = env->ps;
227 cs_base = 0;
228 pc = env->pc;
229 #elif defined(TARGET_CRIS)
230 flags = env->pregs[PR_CCS] & (S_FLAG | P_FLAG | U_FLAG | X_FLAG);
231 flags |= env->dslot;
232 cs_base = 0;
233 pc = env->pc;
234 #else
235 #error unsupported CPU
236 #endif
237 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
238 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
239 tb->flags != flags)) {
240 tb = tb_find_slow(pc, cs_base, flags);
242 return tb;
245 /* main execution loop */
247 int cpu_exec(CPUState *env1)
249 #define DECLARE_HOST_REGS 1
250 #include "hostregs_helper.h"
251 int ret, interrupt_request;
252 TranslationBlock *tb;
253 uint8_t *tc_ptr;
254 unsigned long next_tb;
256 if (cpu_halted(env1) == EXCP_HALTED)
257 return EXCP_HALTED;
259 cpu_single_env = env1;
261 /* first we save global registers */
262 #define SAVE_HOST_REGS 1
263 #include "hostregs_helper.h"
264 env = env1;
266 env_to_regs();
267 #if defined(TARGET_I386)
268 /* put eflags in CPU temporary format */
269 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
270 DF = 1 - (2 * ((env->eflags >> 10) & 1));
271 CC_OP = CC_OP_EFLAGS;
272 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
273 #elif defined(TARGET_SPARC)
274 #elif defined(TARGET_M68K)
275 env->cc_op = CC_OP_FLAGS;
276 env->cc_dest = env->sr & 0xf;
277 env->cc_x = (env->sr >> 4) & 1;
278 #elif defined(TARGET_ALPHA)
279 #elif defined(TARGET_ARM)
280 #elif defined(TARGET_PPC)
281 #elif defined(TARGET_MIPS)
282 #elif defined(TARGET_SH4)
283 #elif defined(TARGET_CRIS)
284 /* XXXXX */
285 #else
286 #error unsupported target CPU
287 #endif
288 env->exception_index = -1;
290 /* prepare setjmp context for exception handling */
291 for(;;) {
292 if (setjmp(env->jmp_env) == 0) {
293 env->current_tb = NULL;
294 /* if an exception is pending, we execute it here */
295 if (env->exception_index >= 0) {
296 if (env->exception_index >= EXCP_INTERRUPT) {
297 /* exit request from the cpu execution loop */
298 ret = env->exception_index;
299 break;
300 } else if (env->user_mode_only) {
301 /* if user mode only, we simulate a fake exception
302 which will be handled outside the cpu execution
303 loop */
304 #if defined(TARGET_I386)
305 do_interrupt_user(env->exception_index,
306 env->exception_is_int,
307 env->error_code,
308 env->exception_next_eip);
309 /* successfully delivered */
310 env->old_exception = -1;
311 #endif
312 ret = env->exception_index;
313 break;
314 } else {
315 #if defined(TARGET_I386)
316 /* simulate a real cpu exception. On i386, it can
317 trigger new exceptions, but we do not handle
318 double or triple faults yet. */
319 do_interrupt(env->exception_index,
320 env->exception_is_int,
321 env->error_code,
322 env->exception_next_eip, 0);
323 /* successfully delivered */
324 env->old_exception = -1;
325 #elif defined(TARGET_PPC)
326 do_interrupt(env);
327 #elif defined(TARGET_MIPS)
328 do_interrupt(env);
329 #elif defined(TARGET_SPARC)
330 do_interrupt(env);
331 #elif defined(TARGET_ARM)
332 do_interrupt(env);
333 #elif defined(TARGET_SH4)
334 do_interrupt(env);
335 #elif defined(TARGET_ALPHA)
336 do_interrupt(env);
337 #elif defined(TARGET_CRIS)
338 do_interrupt(env);
339 #elif defined(TARGET_M68K)
340 do_interrupt(0);
341 #endif
343 env->exception_index = -1;
345 #ifdef USE_KQEMU
346 if (kqemu_is_ok(env) && env->interrupt_request == 0) {
347 int ret;
348 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
349 ret = kqemu_cpu_exec(env);
350 /* put eflags in CPU temporary format */
351 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
352 DF = 1 - (2 * ((env->eflags >> 10) & 1));
353 CC_OP = CC_OP_EFLAGS;
354 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
355 if (ret == 1) {
356 /* exception */
357 longjmp(env->jmp_env, 1);
358 } else if (ret == 2) {
359 /* softmmu execution needed */
360 } else {
361 if (env->interrupt_request != 0) {
362 /* hardware interrupt will be executed just after */
363 } else {
364 /* otherwise, we restart */
365 longjmp(env->jmp_env, 1);
369 #endif
371 if (kvm_enabled()) {
372 kvm_cpu_exec(env);
373 longjmp(env->jmp_env, 1);
376 next_tb = 0; /* force lookup of first TB */
377 for(;;) {
378 interrupt_request = env->interrupt_request;
379 if (unlikely(interrupt_request)) {
380 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
381 /* Mask out external interrupts for this step. */
382 interrupt_request &= ~(CPU_INTERRUPT_HARD |
383 CPU_INTERRUPT_FIQ |
384 CPU_INTERRUPT_SMI |
385 CPU_INTERRUPT_NMI);
387 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
388 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
389 env->exception_index = EXCP_DEBUG;
390 cpu_loop_exit();
392 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
393 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
394 if (interrupt_request & CPU_INTERRUPT_HALT) {
395 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
396 env->halted = 1;
397 env->exception_index = EXCP_HLT;
398 cpu_loop_exit();
400 #endif
401 #if defined(TARGET_I386)
402 if (env->hflags2 & HF2_GIF_MASK) {
403 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
404 !(env->hflags & HF_SMM_MASK)) {
405 svm_check_intercept(SVM_EXIT_SMI);
406 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
407 do_smm_enter();
408 next_tb = 0;
409 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
410 !(env->hflags2 & HF2_NMI_MASK)) {
411 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
412 env->hflags2 |= HF2_NMI_MASK;
413 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
414 next_tb = 0;
415 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
416 (((env->hflags2 & HF2_VINTR_MASK) &&
417 (env->hflags2 & HF2_HIF_MASK)) ||
418 (!(env->hflags2 & HF2_VINTR_MASK) &&
419 (env->eflags & IF_MASK &&
420 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
421 int intno;
422 svm_check_intercept(SVM_EXIT_INTR);
423 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
424 intno = cpu_get_pic_interrupt(env);
425 if (loglevel & CPU_LOG_TB_IN_ASM) {
426 fprintf(logfile, "Servicing hardware INT=0x%02x\n", intno);
428 do_interrupt(intno, 0, 0, 0, 1);
429 /* ensure that no TB jump will be modified as
430 the program flow was changed */
431 next_tb = 0;
432 #if !defined(CONFIG_USER_ONLY)
433 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
434 (env->eflags & IF_MASK) &&
435 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
436 int intno;
437 /* FIXME: this should respect TPR */
438 svm_check_intercept(SVM_EXIT_VINTR);
439 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
440 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
441 if (loglevel & CPU_LOG_TB_IN_ASM)
442 fprintf(logfile, "Servicing virtual hardware INT=0x%02x\n", intno);
443 do_interrupt(intno, 0, 0, 0, 1);
444 next_tb = 0;
445 #endif
448 #elif defined(TARGET_PPC)
449 #if 0
450 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
451 cpu_ppc_reset(env);
453 #endif
454 if (interrupt_request & CPU_INTERRUPT_HARD) {
455 ppc_hw_interrupt(env);
456 if (env->pending_interrupts == 0)
457 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
458 next_tb = 0;
460 #elif defined(TARGET_MIPS)
461 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
462 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
463 (env->CP0_Status & (1 << CP0St_IE)) &&
464 !(env->CP0_Status & (1 << CP0St_EXL)) &&
465 !(env->CP0_Status & (1 << CP0St_ERL)) &&
466 !(env->hflags & MIPS_HFLAG_DM)) {
467 /* Raise it */
468 env->exception_index = EXCP_EXT_INTERRUPT;
469 env->error_code = 0;
470 do_interrupt(env);
471 next_tb = 0;
473 #elif defined(TARGET_SPARC)
474 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
475 (env->psret != 0)) {
476 int pil = env->interrupt_index & 15;
477 int type = env->interrupt_index & 0xf0;
479 if (((type == TT_EXTINT) &&
480 (pil == 15 || pil > env->psrpil)) ||
481 type != TT_EXTINT) {
482 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
483 env->exception_index = env->interrupt_index;
484 do_interrupt(env);
485 env->interrupt_index = 0;
486 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
487 cpu_check_irqs(env);
488 #endif
489 next_tb = 0;
491 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
492 //do_interrupt(0, 0, 0, 0, 0);
493 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
495 #elif defined(TARGET_ARM)
496 if (interrupt_request & CPU_INTERRUPT_FIQ
497 && !(env->uncached_cpsr & CPSR_F)) {
498 env->exception_index = EXCP_FIQ;
499 do_interrupt(env);
500 next_tb = 0;
502 /* ARMv7-M interrupt return works by loading a magic value
503 into the PC. On real hardware the load causes the
504 return to occur. The qemu implementation performs the
505 jump normally, then does the exception return when the
506 CPU tries to execute code at the magic address.
507 This will cause the magic PC value to be pushed to
508 the stack if an interrupt occured at the wrong time.
509 We avoid this by disabling interrupts when
510 pc contains a magic address. */
511 if (interrupt_request & CPU_INTERRUPT_HARD
512 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
513 || !(env->uncached_cpsr & CPSR_I))) {
514 env->exception_index = EXCP_IRQ;
515 do_interrupt(env);
516 next_tb = 0;
518 #elif defined(TARGET_SH4)
519 if (interrupt_request & CPU_INTERRUPT_HARD) {
520 do_interrupt(env);
521 next_tb = 0;
523 #elif defined(TARGET_ALPHA)
524 if (interrupt_request & CPU_INTERRUPT_HARD) {
525 do_interrupt(env);
526 next_tb = 0;
528 #elif defined(TARGET_CRIS)
529 if (interrupt_request & CPU_INTERRUPT_HARD
530 && (env->pregs[PR_CCS] & I_FLAG)) {
531 env->exception_index = EXCP_IRQ;
532 do_interrupt(env);
533 next_tb = 0;
535 if (interrupt_request & CPU_INTERRUPT_NMI
536 && (env->pregs[PR_CCS] & M_FLAG)) {
537 env->exception_index = EXCP_NMI;
538 do_interrupt(env);
539 next_tb = 0;
541 #elif defined(TARGET_M68K)
542 if (interrupt_request & CPU_INTERRUPT_HARD
543 && ((env->sr & SR_I) >> SR_I_SHIFT)
544 < env->pending_level) {
545 /* Real hardware gets the interrupt vector via an
546 IACK cycle at this point. Current emulated
547 hardware doesn't rely on this, so we
548 provide/save the vector when the interrupt is
549 first signalled. */
550 env->exception_index = env->pending_vector;
551 do_interrupt(1);
552 next_tb = 0;
554 #endif
555 /* Don't use the cached interupt_request value,
556 do_interrupt may have updated the EXITTB flag. */
557 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
558 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
559 /* ensure that no TB jump will be modified as
560 the program flow was changed */
561 next_tb = 0;
563 if (interrupt_request & CPU_INTERRUPT_EXIT) {
564 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
565 env->exception_index = EXCP_INTERRUPT;
566 cpu_loop_exit();
569 #ifdef DEBUG_EXEC
570 if ((loglevel & CPU_LOG_TB_CPU)) {
571 /* restore flags in standard format */
572 regs_to_env();
573 #if defined(TARGET_I386)
574 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
575 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
576 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
577 #elif defined(TARGET_ARM)
578 cpu_dump_state(env, logfile, fprintf, 0);
579 #elif defined(TARGET_SPARC)
580 cpu_dump_state(env, logfile, fprintf, 0);
581 #elif defined(TARGET_PPC)
582 cpu_dump_state(env, logfile, fprintf, 0);
583 #elif defined(TARGET_M68K)
584 cpu_m68k_flush_flags(env, env->cc_op);
585 env->cc_op = CC_OP_FLAGS;
586 env->sr = (env->sr & 0xffe0)
587 | env->cc_dest | (env->cc_x << 4);
588 cpu_dump_state(env, logfile, fprintf, 0);
589 #elif defined(TARGET_MIPS)
590 cpu_dump_state(env, logfile, fprintf, 0);
591 #elif defined(TARGET_SH4)
592 cpu_dump_state(env, logfile, fprintf, 0);
593 #elif defined(TARGET_ALPHA)
594 cpu_dump_state(env, logfile, fprintf, 0);
595 #elif defined(TARGET_CRIS)
596 cpu_dump_state(env, logfile, fprintf, 0);
597 #else
598 #error unsupported target CPU
599 #endif
601 #endif
602 spin_lock(&tb_lock);
603 tb = tb_find_fast();
604 /* Note: we do it here to avoid a gcc bug on Mac OS X when
605 doing it in tb_find_slow */
606 if (tb_invalidated_flag) {
607 /* as some TB could have been invalidated because
608 of memory exceptions while generating the code, we
609 must recompute the hash index here */
610 next_tb = 0;
611 tb_invalidated_flag = 0;
613 #ifdef DEBUG_EXEC
614 if ((loglevel & CPU_LOG_EXEC)) {
615 fprintf(logfile, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
616 (long)tb->tc_ptr, tb->pc,
617 lookup_symbol(tb->pc));
619 #endif
620 /* see if we can patch the calling TB. When the TB
621 spans two pages, we cannot safely do a direct
622 jump. */
624 if (next_tb != 0 &&
625 #ifdef USE_KQEMU
626 (env->kqemu_enabled != 2) &&
627 #endif
628 tb->page_addr[1] == -1) {
629 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
632 spin_unlock(&tb_lock);
633 env->current_tb = tb;
635 /* cpu_interrupt might be called while translating the
636 TB, but before it is linked into a potentially
637 infinite loop and becomes env->current_tb. Avoid
638 starting execution if there is a pending interrupt. */
639 if (unlikely (env->interrupt_request & CPU_INTERRUPT_EXIT))
640 env->current_tb = NULL;
642 while (env->current_tb) {
643 tc_ptr = tb->tc_ptr;
644 /* execute the generated code */
645 #if defined(__sparc__) && !defined(HOST_SOLARIS)
646 #undef env
647 env = cpu_single_env;
648 #define env cpu_single_env
649 #endif
650 next_tb = tcg_qemu_tb_exec(tc_ptr);
651 env->current_tb = NULL;
652 if ((next_tb & 3) == 2) {
653 /* Instruction counter expired. */
654 int insns_left;
655 tb = (TranslationBlock *)(long)(next_tb & ~3);
656 /* Restore PC. */
657 CPU_PC_FROM_TB(env, tb);
658 insns_left = env->icount_decr.u32;
659 if (env->icount_extra && insns_left >= 0) {
660 /* Refill decrementer and continue execution. */
661 env->icount_extra += insns_left;
662 if (env->icount_extra > 0xffff) {
663 insns_left = 0xffff;
664 } else {
665 insns_left = env->icount_extra;
667 env->icount_extra -= insns_left;
668 env->icount_decr.u16.low = insns_left;
669 } else {
670 if (insns_left > 0) {
671 /* Execute remaining instructions. */
672 cpu_exec_nocache(insns_left, tb);
674 env->exception_index = EXCP_INTERRUPT;
675 next_tb = 0;
676 cpu_loop_exit();
680 /* reset soft MMU for next block (it can currently
681 only be set by a memory fault) */
682 #if defined(USE_KQEMU)
683 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
684 if (kqemu_is_ok(env) &&
685 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
686 cpu_loop_exit();
688 #endif
689 } /* for(;;) */
690 } else {
691 env_to_regs();
693 } /* for(;;) */
696 #if defined(TARGET_I386)
697 /* restore flags in standard format */
698 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
699 #elif defined(TARGET_ARM)
700 /* XXX: Save/restore host fpu exception state?. */
701 #elif defined(TARGET_SPARC)
702 #elif defined(TARGET_PPC)
703 #elif defined(TARGET_M68K)
704 cpu_m68k_flush_flags(env, env->cc_op);
705 env->cc_op = CC_OP_FLAGS;
706 env->sr = (env->sr & 0xffe0)
707 | env->cc_dest | (env->cc_x << 4);
708 #elif defined(TARGET_MIPS)
709 #elif defined(TARGET_SH4)
710 #elif defined(TARGET_ALPHA)
711 #elif defined(TARGET_CRIS)
712 /* XXXXX */
713 #else
714 #error unsupported target CPU
715 #endif
717 /* restore global registers */
718 #include "hostregs_helper.h"
720 /* fail safe : never use cpu_single_env outside cpu_exec() */
721 cpu_single_env = NULL;
722 return ret;
725 /* must only be called from the generated code as an exception can be
726 generated */
727 void tb_invalidate_page_range(target_ulong start, target_ulong end)
729 /* XXX: cannot enable it yet because it yields to MMU exception
730 where NIP != read address on PowerPC */
731 #if 0
732 target_ulong phys_addr;
733 phys_addr = get_phys_addr_code(env, start);
734 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
735 #endif
738 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
740 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
742 CPUX86State *saved_env;
744 saved_env = env;
745 env = s;
746 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
747 selector &= 0xffff;
748 cpu_x86_load_seg_cache(env, seg_reg, selector,
749 (selector << 4), 0xffff, 0);
750 } else {
751 helper_load_seg(seg_reg, selector);
753 env = saved_env;
756 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
758 CPUX86State *saved_env;
760 saved_env = env;
761 env = s;
763 helper_fsave(ptr, data32);
765 env = saved_env;
768 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
770 CPUX86State *saved_env;
772 saved_env = env;
773 env = s;
775 helper_frstor(ptr, data32);
777 env = saved_env;
780 #endif /* TARGET_I386 */
782 #if !defined(CONFIG_SOFTMMU)
784 #if defined(TARGET_I386)
786 /* 'pc' is the host PC at which the exception was raised. 'address' is
787 the effective address of the memory exception. 'is_write' is 1 if a
788 write caused the exception and otherwise 0'. 'old_set' is the
789 signal set which should be restored */
790 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
791 int is_write, sigset_t *old_set,
792 void *puc)
794 TranslationBlock *tb;
795 int ret;
797 if (cpu_single_env)
798 env = cpu_single_env; /* XXX: find a correct solution for multithread */
799 #if defined(DEBUG_SIGNAL)
800 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
801 pc, address, is_write, *(unsigned long *)old_set);
802 #endif
803 /* XXX: locking issue */
804 if (is_write && page_unprotect(h2g(address), pc, puc)) {
805 return 1;
808 /* see if it is an MMU fault */
809 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
810 if (ret < 0)
811 return 0; /* not an MMU fault */
812 if (ret == 0)
813 return 1; /* the MMU fault was handled without causing real CPU fault */
814 /* now we have a real cpu fault */
815 tb = tb_find_pc(pc);
816 if (tb) {
817 /* the PC is inside the translated code. It means that we have
818 a virtual CPU fault */
819 cpu_restore_state(tb, env, pc, puc);
821 if (ret == 1) {
822 #if 0
823 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
824 env->eip, env->cr[2], env->error_code);
825 #endif
826 /* we restore the process signal mask as the sigreturn should
827 do it (XXX: use sigsetjmp) */
828 sigprocmask(SIG_SETMASK, old_set, NULL);
829 raise_exception_err(env->exception_index, env->error_code);
830 } else {
831 /* activate soft MMU for this block */
832 env->hflags |= HF_SOFTMMU_MASK;
833 cpu_resume_from_signal(env, puc);
835 /* never comes here */
836 return 1;
839 #elif defined(TARGET_ARM)
840 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
841 int is_write, sigset_t *old_set,
842 void *puc)
844 TranslationBlock *tb;
845 int ret;
847 if (cpu_single_env)
848 env = cpu_single_env; /* XXX: find a correct solution for multithread */
849 #if defined(DEBUG_SIGNAL)
850 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
851 pc, address, is_write, *(unsigned long *)old_set);
852 #endif
853 /* XXX: locking issue */
854 if (is_write && page_unprotect(h2g(address), pc, puc)) {
855 return 1;
857 /* see if it is an MMU fault */
858 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
859 if (ret < 0)
860 return 0; /* not an MMU fault */
861 if (ret == 0)
862 return 1; /* the MMU fault was handled without causing real CPU fault */
863 /* now we have a real cpu fault */
864 tb = tb_find_pc(pc);
865 if (tb) {
866 /* the PC is inside the translated code. It means that we have
867 a virtual CPU fault */
868 cpu_restore_state(tb, env, pc, puc);
870 /* we restore the process signal mask as the sigreturn should
871 do it (XXX: use sigsetjmp) */
872 sigprocmask(SIG_SETMASK, old_set, NULL);
873 cpu_loop_exit();
874 /* never comes here */
875 return 1;
877 #elif defined(TARGET_SPARC)
878 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
879 int is_write, sigset_t *old_set,
880 void *puc)
882 TranslationBlock *tb;
883 int ret;
885 if (cpu_single_env)
886 env = cpu_single_env; /* XXX: find a correct solution for multithread */
887 #if defined(DEBUG_SIGNAL)
888 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
889 pc, address, is_write, *(unsigned long *)old_set);
890 #endif
891 /* XXX: locking issue */
892 if (is_write && page_unprotect(h2g(address), pc, puc)) {
893 return 1;
895 /* see if it is an MMU fault */
896 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
897 if (ret < 0)
898 return 0; /* not an MMU fault */
899 if (ret == 0)
900 return 1; /* the MMU fault was handled without causing real CPU fault */
901 /* now we have a real cpu fault */
902 tb = tb_find_pc(pc);
903 if (tb) {
904 /* the PC is inside the translated code. It means that we have
905 a virtual CPU fault */
906 cpu_restore_state(tb, env, pc, puc);
908 /* we restore the process signal mask as the sigreturn should
909 do it (XXX: use sigsetjmp) */
910 sigprocmask(SIG_SETMASK, old_set, NULL);
911 cpu_loop_exit();
912 /* never comes here */
913 return 1;
915 #elif defined (TARGET_PPC)
916 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
917 int is_write, sigset_t *old_set,
918 void *puc)
920 TranslationBlock *tb;
921 int ret;
923 if (cpu_single_env)
924 env = cpu_single_env; /* XXX: find a correct solution for multithread */
925 #if defined(DEBUG_SIGNAL)
926 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
927 pc, address, is_write, *(unsigned long *)old_set);
928 #endif
929 /* XXX: locking issue */
930 if (is_write && page_unprotect(h2g(address), pc, puc)) {
931 return 1;
934 /* see if it is an MMU fault */
935 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
936 if (ret < 0)
937 return 0; /* not an MMU fault */
938 if (ret == 0)
939 return 1; /* the MMU fault was handled without causing real CPU fault */
941 /* now we have a real cpu fault */
942 tb = tb_find_pc(pc);
943 if (tb) {
944 /* the PC is inside the translated code. It means that we have
945 a virtual CPU fault */
946 cpu_restore_state(tb, env, pc, puc);
948 if (ret == 1) {
949 #if 0
950 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
951 env->nip, env->error_code, tb);
952 #endif
953 /* we restore the process signal mask as the sigreturn should
954 do it (XXX: use sigsetjmp) */
955 sigprocmask(SIG_SETMASK, old_set, NULL);
956 do_raise_exception_err(env->exception_index, env->error_code);
957 } else {
958 /* activate soft MMU for this block */
959 cpu_resume_from_signal(env, puc);
961 /* never comes here */
962 return 1;
965 #elif defined(TARGET_M68K)
966 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
967 int is_write, sigset_t *old_set,
968 void *puc)
970 TranslationBlock *tb;
971 int ret;
973 if (cpu_single_env)
974 env = cpu_single_env; /* XXX: find a correct solution for multithread */
975 #if defined(DEBUG_SIGNAL)
976 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
977 pc, address, is_write, *(unsigned long *)old_set);
978 #endif
979 /* XXX: locking issue */
980 if (is_write && page_unprotect(address, pc, puc)) {
981 return 1;
983 /* see if it is an MMU fault */
984 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
985 if (ret < 0)
986 return 0; /* not an MMU fault */
987 if (ret == 0)
988 return 1; /* the MMU fault was handled without causing real CPU fault */
989 /* now we have a real cpu fault */
990 tb = tb_find_pc(pc);
991 if (tb) {
992 /* the PC is inside the translated code. It means that we have
993 a virtual CPU fault */
994 cpu_restore_state(tb, env, pc, puc);
996 /* we restore the process signal mask as the sigreturn should
997 do it (XXX: use sigsetjmp) */
998 sigprocmask(SIG_SETMASK, old_set, NULL);
999 cpu_loop_exit();
1000 /* never comes here */
1001 return 1;
1004 #elif defined (TARGET_MIPS)
1005 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1006 int is_write, sigset_t *old_set,
1007 void *puc)
1009 TranslationBlock *tb;
1010 int ret;
1012 if (cpu_single_env)
1013 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1014 #if defined(DEBUG_SIGNAL)
1015 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1016 pc, address, is_write, *(unsigned long *)old_set);
1017 #endif
1018 /* XXX: locking issue */
1019 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1020 return 1;
1023 /* see if it is an MMU fault */
1024 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1025 if (ret < 0)
1026 return 0; /* not an MMU fault */
1027 if (ret == 0)
1028 return 1; /* the MMU fault was handled without causing real CPU fault */
1030 /* now we have a real cpu fault */
1031 tb = tb_find_pc(pc);
1032 if (tb) {
1033 /* the PC is inside the translated code. It means that we have
1034 a virtual CPU fault */
1035 cpu_restore_state(tb, env, pc, puc);
1037 if (ret == 1) {
1038 #if 0
1039 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1040 env->PC, env->error_code, tb);
1041 #endif
1042 /* we restore the process signal mask as the sigreturn should
1043 do it (XXX: use sigsetjmp) */
1044 sigprocmask(SIG_SETMASK, old_set, NULL);
1045 do_raise_exception_err(env->exception_index, env->error_code);
1046 } else {
1047 /* activate soft MMU for this block */
1048 cpu_resume_from_signal(env, puc);
1050 /* never comes here */
1051 return 1;
1054 #elif defined (TARGET_SH4)
1055 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1056 int is_write, sigset_t *old_set,
1057 void *puc)
1059 TranslationBlock *tb;
1060 int ret;
1062 if (cpu_single_env)
1063 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1064 #if defined(DEBUG_SIGNAL)
1065 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1066 pc, address, is_write, *(unsigned long *)old_set);
1067 #endif
1068 /* XXX: locking issue */
1069 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1070 return 1;
1073 /* see if it is an MMU fault */
1074 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1075 if (ret < 0)
1076 return 0; /* not an MMU fault */
1077 if (ret == 0)
1078 return 1; /* the MMU fault was handled without causing real CPU fault */
1080 /* now we have a real cpu fault */
1081 tb = tb_find_pc(pc);
1082 if (tb) {
1083 /* the PC is inside the translated code. It means that we have
1084 a virtual CPU fault */
1085 cpu_restore_state(tb, env, pc, puc);
1087 #if 0
1088 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1089 env->nip, env->error_code, tb);
1090 #endif
1091 /* we restore the process signal mask as the sigreturn should
1092 do it (XXX: use sigsetjmp) */
1093 sigprocmask(SIG_SETMASK, old_set, NULL);
1094 cpu_loop_exit();
1095 /* never comes here */
1096 return 1;
1099 #elif defined (TARGET_ALPHA)
1100 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1101 int is_write, sigset_t *old_set,
1102 void *puc)
1104 TranslationBlock *tb;
1105 int ret;
1107 if (cpu_single_env)
1108 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1109 #if defined(DEBUG_SIGNAL)
1110 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1111 pc, address, is_write, *(unsigned long *)old_set);
1112 #endif
1113 /* XXX: locking issue */
1114 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1115 return 1;
1118 /* see if it is an MMU fault */
1119 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1120 if (ret < 0)
1121 return 0; /* not an MMU fault */
1122 if (ret == 0)
1123 return 1; /* the MMU fault was handled without causing real CPU fault */
1125 /* now we have a real cpu fault */
1126 tb = tb_find_pc(pc);
1127 if (tb) {
1128 /* the PC is inside the translated code. It means that we have
1129 a virtual CPU fault */
1130 cpu_restore_state(tb, env, pc, puc);
1132 #if 0
1133 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1134 env->nip, env->error_code, tb);
1135 #endif
1136 /* we restore the process signal mask as the sigreturn should
1137 do it (XXX: use sigsetjmp) */
1138 sigprocmask(SIG_SETMASK, old_set, NULL);
1139 cpu_loop_exit();
1140 /* never comes here */
1141 return 1;
1143 #elif defined (TARGET_CRIS)
1144 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1145 int is_write, sigset_t *old_set,
1146 void *puc)
1148 TranslationBlock *tb;
1149 int ret;
1151 if (cpu_single_env)
1152 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1153 #if defined(DEBUG_SIGNAL)
1154 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1155 pc, address, is_write, *(unsigned long *)old_set);
1156 #endif
1157 /* XXX: locking issue */
1158 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1159 return 1;
1162 /* see if it is an MMU fault */
1163 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1164 if (ret < 0)
1165 return 0; /* not an MMU fault */
1166 if (ret == 0)
1167 return 1; /* the MMU fault was handled without causing real CPU fault */
1169 /* now we have a real cpu fault */
1170 tb = tb_find_pc(pc);
1171 if (tb) {
1172 /* the PC is inside the translated code. It means that we have
1173 a virtual CPU fault */
1174 cpu_restore_state(tb, env, pc, puc);
1176 /* we restore the process signal mask as the sigreturn should
1177 do it (XXX: use sigsetjmp) */
1178 sigprocmask(SIG_SETMASK, old_set, NULL);
1179 cpu_loop_exit();
1180 /* never comes here */
1181 return 1;
1184 #else
1185 #error unsupported target CPU
1186 #endif
1188 #if defined(__i386__)
1190 #if defined(__APPLE__)
1191 # include <sys/ucontext.h>
1193 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1194 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1195 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1196 #else
1197 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1198 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1199 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1200 #endif
1202 int cpu_signal_handler(int host_signum, void *pinfo,
1203 void *puc)
1205 siginfo_t *info = pinfo;
1206 struct ucontext *uc = puc;
1207 unsigned long pc;
1208 int trapno;
1210 #ifndef REG_EIP
1211 /* for glibc 2.1 */
1212 #define REG_EIP EIP
1213 #define REG_ERR ERR
1214 #define REG_TRAPNO TRAPNO
1215 #endif
1216 pc = EIP_sig(uc);
1217 trapno = TRAP_sig(uc);
1218 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1219 trapno == 0xe ?
1220 (ERROR_sig(uc) >> 1) & 1 : 0,
1221 &uc->uc_sigmask, puc);
1224 #elif defined(__x86_64__)
1226 int cpu_signal_handler(int host_signum, void *pinfo,
1227 void *puc)
1229 siginfo_t *info = pinfo;
1230 struct ucontext *uc = puc;
1231 unsigned long pc;
1233 pc = uc->uc_mcontext.gregs[REG_RIP];
1234 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1235 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
1236 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
1237 &uc->uc_sigmask, puc);
1240 #elif defined(__powerpc__)
1242 /***********************************************************************
1243 * signal context platform-specific definitions
1244 * From Wine
1246 #ifdef linux
1247 /* All Registers access - only for local access */
1248 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1249 /* Gpr Registers access */
1250 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1251 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1252 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1253 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1254 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1255 # define LR_sig(context) REG_sig(link, context) /* Link register */
1256 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1257 /* Float Registers access */
1258 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1259 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1260 /* Exception Registers access */
1261 # define DAR_sig(context) REG_sig(dar, context)
1262 # define DSISR_sig(context) REG_sig(dsisr, context)
1263 # define TRAP_sig(context) REG_sig(trap, context)
1264 #endif /* linux */
1266 #ifdef __APPLE__
1267 # include <sys/ucontext.h>
1268 typedef struct ucontext SIGCONTEXT;
1269 /* All Registers access - only for local access */
1270 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1271 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1272 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1273 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1274 /* Gpr Registers access */
1275 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1276 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1277 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1278 # define CTR_sig(context) REG_sig(ctr, context)
1279 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1280 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1281 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1282 /* Float Registers access */
1283 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1284 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1285 /* Exception Registers access */
1286 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1287 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1288 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1289 #endif /* __APPLE__ */
1291 int cpu_signal_handler(int host_signum, void *pinfo,
1292 void *puc)
1294 siginfo_t *info = pinfo;
1295 struct ucontext *uc = puc;
1296 unsigned long pc;
1297 int is_write;
1299 pc = IAR_sig(uc);
1300 is_write = 0;
1301 #if 0
1302 /* ppc 4xx case */
1303 if (DSISR_sig(uc) & 0x00800000)
1304 is_write = 1;
1305 #else
1306 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1307 is_write = 1;
1308 #endif
1309 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1310 is_write, &uc->uc_sigmask, puc);
1313 #elif defined(__alpha__)
1315 int cpu_signal_handler(int host_signum, void *pinfo,
1316 void *puc)
1318 siginfo_t *info = pinfo;
1319 struct ucontext *uc = puc;
1320 uint32_t *pc = uc->uc_mcontext.sc_pc;
1321 uint32_t insn = *pc;
1322 int is_write = 0;
1324 /* XXX: need kernel patch to get write flag faster */
1325 switch (insn >> 26) {
1326 case 0x0d: // stw
1327 case 0x0e: // stb
1328 case 0x0f: // stq_u
1329 case 0x24: // stf
1330 case 0x25: // stg
1331 case 0x26: // sts
1332 case 0x27: // stt
1333 case 0x2c: // stl
1334 case 0x2d: // stq
1335 case 0x2e: // stl_c
1336 case 0x2f: // stq_c
1337 is_write = 1;
1340 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1341 is_write, &uc->uc_sigmask, puc);
1343 #elif defined(__sparc__)
1345 int cpu_signal_handler(int host_signum, void *pinfo,
1346 void *puc)
1348 siginfo_t *info = pinfo;
1349 int is_write;
1350 uint32_t insn;
1351 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1352 uint32_t *regs = (uint32_t *)(info + 1);
1353 void *sigmask = (regs + 20);
1354 /* XXX: is there a standard glibc define ? */
1355 unsigned long pc = regs[1];
1356 #else
1357 #ifdef __linux__
1358 struct sigcontext *sc = puc;
1359 unsigned long pc = sc->sigc_regs.tpc;
1360 void *sigmask = (void *)sc->sigc_mask;
1361 #elif defined(__OpenBSD__)
1362 struct sigcontext *uc = puc;
1363 unsigned long pc = uc->sc_pc;
1364 void *sigmask = (void *)(long)uc->sc_mask;
1365 #endif
1366 #endif
1368 /* XXX: need kernel patch to get write flag faster */
1369 is_write = 0;
1370 insn = *(uint32_t *)pc;
1371 if ((insn >> 30) == 3) {
1372 switch((insn >> 19) & 0x3f) {
1373 case 0x05: // stb
1374 case 0x06: // sth
1375 case 0x04: // st
1376 case 0x07: // std
1377 case 0x24: // stf
1378 case 0x27: // stdf
1379 case 0x25: // stfsr
1380 is_write = 1;
1381 break;
1384 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1385 is_write, sigmask, NULL);
1388 #elif defined(__arm__)
1390 int cpu_signal_handler(int host_signum, void *pinfo,
1391 void *puc)
1393 siginfo_t *info = pinfo;
1394 struct ucontext *uc = puc;
1395 unsigned long pc;
1396 int is_write;
1398 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1399 pc = uc->uc_mcontext.gregs[R15];
1400 #else
1401 pc = uc->uc_mcontext.arm_pc;
1402 #endif
1403 /* XXX: compute is_write */
1404 is_write = 0;
1405 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1406 is_write,
1407 &uc->uc_sigmask, puc);
1410 #elif defined(__mc68000)
1412 int cpu_signal_handler(int host_signum, void *pinfo,
1413 void *puc)
1415 siginfo_t *info = pinfo;
1416 struct ucontext *uc = puc;
1417 unsigned long pc;
1418 int is_write;
1420 pc = uc->uc_mcontext.gregs[16];
1421 /* XXX: compute is_write */
1422 is_write = 0;
1423 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1424 is_write,
1425 &uc->uc_sigmask, puc);
1428 #elif defined(__ia64)
1430 #ifndef __ISR_VALID
1431 /* This ought to be in <bits/siginfo.h>... */
1432 # define __ISR_VALID 1
1433 #endif
1435 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1437 siginfo_t *info = pinfo;
1438 struct ucontext *uc = puc;
1439 unsigned long ip;
1440 int is_write = 0;
1442 ip = uc->uc_mcontext.sc_ip;
1443 switch (host_signum) {
1444 case SIGILL:
1445 case SIGFPE:
1446 case SIGSEGV:
1447 case SIGBUS:
1448 case SIGTRAP:
1449 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1450 /* ISR.W (write-access) is bit 33: */
1451 is_write = (info->si_isr >> 33) & 1;
1452 break;
1454 default:
1455 break;
1457 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1458 is_write,
1459 &uc->uc_sigmask, puc);
1462 #elif defined(__s390__)
1464 int cpu_signal_handler(int host_signum, void *pinfo,
1465 void *puc)
1467 siginfo_t *info = pinfo;
1468 struct ucontext *uc = puc;
1469 unsigned long pc;
1470 int is_write;
1472 pc = uc->uc_mcontext.psw.addr;
1473 /* XXX: compute is_write */
1474 is_write = 0;
1475 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1476 is_write, &uc->uc_sigmask, puc);
1479 #elif defined(__mips__)
1481 int cpu_signal_handler(int host_signum, void *pinfo,
1482 void *puc)
1484 siginfo_t *info = pinfo;
1485 struct ucontext *uc = puc;
1486 greg_t pc = uc->uc_mcontext.pc;
1487 int is_write;
1489 /* XXX: compute is_write */
1490 is_write = 0;
1491 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1492 is_write, &uc->uc_sigmask, puc);
1495 #elif defined(__hppa__)
1497 int cpu_signal_handler(int host_signum, void *pinfo,
1498 void *puc)
1500 struct siginfo *info = pinfo;
1501 struct ucontext *uc = puc;
1502 unsigned long pc;
1503 int is_write;
1505 pc = uc->uc_mcontext.sc_iaoq[0];
1506 /* FIXME: compute is_write */
1507 is_write = 0;
1508 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1509 is_write,
1510 &uc->uc_sigmask, puc);
1513 #else
1515 #error host CPU specific signal handler needed
1517 #endif
1519 #endif /* !defined(CONFIG_SOFTMMU) */