Fix the size of the property fields.
[qemu/navara.git] / cpu-exec.c
blob1c9f29169ca9517460cc174161c663536555890e
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #ifdef __linux__
37 #include <sys/ucontext.h>
38 #endif
39 #endif
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
43 #undef env
44 #define env cpu_single_env
45 #endif
47 int tb_invalidated_flag;
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
52 int qemu_cpu_has_work(CPUState *env)
54 return cpu_has_work(env);
57 void cpu_loop_exit(void)
59 longjmp(env->jmp_env, 1);
62 /* exit the current TB from a signal handler. The host registers are
63 restored in a state compatible with the CPU emulator
65 void cpu_resume_from_signal(CPUState *env1, void *puc)
67 #if !defined(CONFIG_SOFTMMU)
68 #ifdef __linux__
69 struct ucontext *uc = puc;
70 #elif defined(__OpenBSD__)
71 struct sigcontext *uc = puc;
72 #endif
73 #endif
75 env = env1;
77 /* XXX: restore cpu registers saved in host registers */
79 #if !defined(CONFIG_SOFTMMU)
80 if (puc) {
81 /* XXX: use siglongjmp ? */
82 #ifdef __linux__
83 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
84 #elif defined(__OpenBSD__)
85 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
86 #endif
88 #endif
89 env->exception_index = -1;
90 longjmp(env->jmp_env, 1);
93 /* Execute the code without caching the generated code. An interpreter
94 could be used if available. */
95 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
97 uintptr_t next_tb;
98 TranslationBlock *tb;
100 /* Should never happen.
101 We only end up here when an existing TB is too long. */
102 if (max_cycles > CF_COUNT_MASK)
103 max_cycles = CF_COUNT_MASK;
105 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
106 max_cycles);
107 env->current_tb = tb;
108 /* execute the generated code */
109 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
111 if ((next_tb & 3) == 2) {
112 /* Restore PC. This may happen if async event occurs before
113 the TB starts executing. */
114 cpu_pc_from_tb(env, tb);
116 tb_phys_invalidate(tb, -1);
117 tb_free(tb);
120 static TranslationBlock *tb_find_slow(target_ulong pc,
121 target_ulong cs_base,
122 uint64_t flags)
124 TranslationBlock *tb, **ptb1;
125 unsigned int h;
126 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
128 tb_invalidated_flag = 0;
130 /* find translated block using physical mappings */
131 phys_pc = get_phys_addr_code(env, pc);
132 phys_page1 = phys_pc & TARGET_PAGE_MASK;
133 phys_page2 = -1;
134 h = tb_phys_hash_func(phys_pc);
135 ptb1 = &tb_phys_hash[h];
136 for(;;) {
137 tb = *ptb1;
138 if (!tb)
139 goto not_found;
140 if (tb->pc == pc &&
141 tb->page_addr[0] == phys_page1 &&
142 tb->cs_base == cs_base &&
143 tb->flags == flags) {
144 /* check next page if needed */
145 if (tb->page_addr[1] != -1) {
146 virt_page2 = (pc & TARGET_PAGE_MASK) +
147 TARGET_PAGE_SIZE;
148 phys_page2 = get_phys_addr_code(env, virt_page2);
149 if (tb->page_addr[1] == phys_page2)
150 goto found;
151 } else {
152 goto found;
155 ptb1 = &tb->phys_hash_next;
157 not_found:
158 /* if no translated code available, then translate it now */
159 tb = tb_gen_code(env, pc, cs_base, flags, 0);
161 found:
162 /* we add the TB in the virtual pc hash table */
163 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
164 return tb;
167 static inline TranslationBlock *tb_find_fast(void)
169 TranslationBlock *tb;
170 target_ulong cs_base, pc;
171 int flags;
173 /* we record a subset of the CPU state. It will
174 always be the same before a given translated block
175 is executed. */
176 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
177 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
178 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
179 tb->flags != flags)) {
180 tb = tb_find_slow(pc, cs_base, flags);
182 return tb;
185 static CPUDebugExcpHandler *debug_excp_handler;
187 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
189 CPUDebugExcpHandler *old_handler = debug_excp_handler;
191 debug_excp_handler = handler;
192 return old_handler;
195 static void cpu_handle_debug_exception(CPUState *env)
197 CPUWatchpoint *wp;
199 if (!env->watchpoint_hit)
200 TAILQ_FOREACH(wp, &env->watchpoints, entry)
201 wp->flags &= ~BP_WATCHPOINT_HIT;
203 if (debug_excp_handler)
204 debug_excp_handler(env);
207 /* main execution loop */
209 int cpu_exec(CPUState *env1)
211 #define DECLARE_HOST_REGS 1
212 #include "hostregs_helper.h"
213 int ret, interrupt_request;
214 TranslationBlock *tb;
215 uint8_t *tc_ptr;
216 uintptr_t next_tb;
218 if (cpu_halted(env1) == EXCP_HALTED)
219 return EXCP_HALTED;
221 cpu_single_env = env1;
223 /* first we save global registers */
224 #define SAVE_HOST_REGS 1
225 #include "hostregs_helper.h"
226 env = env1;
228 #if defined(TARGET_I386)
229 /* put eflags in CPU temporary format */
230 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
231 DF = 1 - (2 * ((env->eflags >> 10) & 1));
232 CC_OP = CC_OP_EFLAGS;
233 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
234 #elif defined(TARGET_SPARC)
235 #elif defined(TARGET_M68K)
236 env->cc_op = CC_OP_FLAGS;
237 env->cc_dest = env->sr & 0xf;
238 env->cc_x = (env->sr >> 4) & 1;
239 #elif defined(TARGET_ALPHA)
240 #elif defined(TARGET_ARM)
241 #elif defined(TARGET_PPC)
242 #elif defined(TARGET_MICROBLAZE)
243 #elif defined(TARGET_MIPS)
244 #elif defined(TARGET_SH4)
245 #elif defined(TARGET_CRIS)
246 /* XXXXX */
247 #else
248 #error unsupported target CPU
249 #endif
250 env->exception_index = -1;
252 /* prepare setjmp context for exception handling */
253 for(;;) {
254 if (setjmp(env->jmp_env) == 0) {
255 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
256 #undef env
257 env = cpu_single_env;
258 #define env cpu_single_env
259 #endif
260 env->current_tb = NULL;
261 /* if an exception is pending, we execute it here */
262 if (env->exception_index >= 0) {
263 if (env->exception_index >= EXCP_INTERRUPT) {
264 /* exit request from the cpu execution loop */
265 ret = env->exception_index;
266 if (ret == EXCP_DEBUG)
267 cpu_handle_debug_exception(env);
268 break;
269 } else {
270 #if defined(CONFIG_USER_ONLY)
271 /* if user mode only, we simulate a fake exception
272 which will be handled outside the cpu execution
273 loop */
274 #if defined(TARGET_I386)
275 do_interrupt_user(env->exception_index,
276 env->exception_is_int,
277 env->error_code,
278 env->exception_next_eip);
279 /* successfully delivered */
280 env->old_exception = -1;
281 #endif
282 ret = env->exception_index;
283 break;
284 #else
285 #if defined(TARGET_I386)
286 /* simulate a real cpu exception. On i386, it can
287 trigger new exceptions, but we do not handle
288 double or triple faults yet. */
289 do_interrupt(env->exception_index,
290 env->exception_is_int,
291 env->error_code,
292 env->exception_next_eip, 0);
293 /* successfully delivered */
294 env->old_exception = -1;
295 #elif defined(TARGET_PPC)
296 do_interrupt(env);
297 #elif defined(TARGET_MICROBLAZE)
298 do_interrupt(env);
299 #elif defined(TARGET_MIPS)
300 do_interrupt(env);
301 #elif defined(TARGET_SPARC)
302 do_interrupt(env);
303 #elif defined(TARGET_ARM)
304 do_interrupt(env);
305 #elif defined(TARGET_SH4)
306 do_interrupt(env);
307 #elif defined(TARGET_ALPHA)
308 do_interrupt(env);
309 #elif defined(TARGET_CRIS)
310 do_interrupt(env);
311 #elif defined(TARGET_M68K)
312 do_interrupt(0);
313 #endif
314 #endif
316 env->exception_index = -1;
318 #ifdef CONFIG_KQEMU
319 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
320 int ret;
321 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
322 ret = kqemu_cpu_exec(env);
323 /* put eflags in CPU temporary format */
324 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
325 DF = 1 - (2 * ((env->eflags >> 10) & 1));
326 CC_OP = CC_OP_EFLAGS;
327 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
328 if (ret == 1) {
329 /* exception */
330 longjmp(env->jmp_env, 1);
331 } else if (ret == 2) {
332 /* softmmu execution needed */
333 } else {
334 if (env->interrupt_request != 0 || env->exit_request != 0) {
335 /* hardware interrupt will be executed just after */
336 } else {
337 /* otherwise, we restart */
338 longjmp(env->jmp_env, 1);
342 #endif
344 if (kvm_enabled()) {
345 kvm_cpu_exec(env);
346 longjmp(env->jmp_env, 1);
349 next_tb = 0; /* force lookup of first TB */
350 for(;;) {
351 interrupt_request = env->interrupt_request;
352 if (unlikely(interrupt_request)) {
353 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
354 /* Mask out external interrupts for this step. */
355 interrupt_request &= ~(CPU_INTERRUPT_HARD |
356 CPU_INTERRUPT_FIQ |
357 CPU_INTERRUPT_SMI |
358 CPU_INTERRUPT_NMI);
360 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
361 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
362 env->exception_index = EXCP_DEBUG;
363 cpu_loop_exit();
365 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
366 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
367 defined(TARGET_MICROBLAZE)
368 if (interrupt_request & CPU_INTERRUPT_HALT) {
369 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
370 env->halted = 1;
371 env->exception_index = EXCP_HLT;
372 cpu_loop_exit();
374 #endif
375 #if defined(TARGET_I386)
376 if (interrupt_request & CPU_INTERRUPT_INIT) {
377 svm_check_intercept(SVM_EXIT_INIT);
378 do_cpu_init(env);
379 env->exception_index = EXCP_HALTED;
380 cpu_loop_exit();
381 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
382 do_cpu_sipi(env);
383 } else if (env->hflags2 & HF2_GIF_MASK) {
384 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
385 !(env->hflags & HF_SMM_MASK)) {
386 svm_check_intercept(SVM_EXIT_SMI);
387 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
388 do_smm_enter();
389 next_tb = 0;
390 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
391 !(env->hflags2 & HF2_NMI_MASK)) {
392 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
393 env->hflags2 |= HF2_NMI_MASK;
394 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
395 next_tb = 0;
396 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
397 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
398 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
399 next_tb = 0;
400 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
401 (((env->hflags2 & HF2_VINTR_MASK) &&
402 (env->hflags2 & HF2_HIF_MASK)) ||
403 (!(env->hflags2 & HF2_VINTR_MASK) &&
404 (env->eflags & IF_MASK &&
405 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
406 int intno;
407 svm_check_intercept(SVM_EXIT_INTR);
408 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
409 intno = cpu_get_pic_interrupt(env);
410 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
411 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
412 #undef env
413 env = cpu_single_env;
414 #define env cpu_single_env
415 #endif
416 do_interrupt(intno, 0, 0, 0, 1);
417 /* ensure that no TB jump will be modified as
418 the program flow was changed */
419 next_tb = 0;
420 #if !defined(CONFIG_USER_ONLY)
421 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
422 (env->eflags & IF_MASK) &&
423 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
424 int intno;
425 /* FIXME: this should respect TPR */
426 svm_check_intercept(SVM_EXIT_VINTR);
427 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
428 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
429 do_interrupt(intno, 0, 0, 0, 1);
430 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
431 next_tb = 0;
432 #endif
435 #elif defined(TARGET_PPC)
436 #if 0
437 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
438 cpu_ppc_reset(env);
440 #endif
441 if (interrupt_request & CPU_INTERRUPT_HARD) {
442 ppc_hw_interrupt(env);
443 if (env->pending_interrupts == 0)
444 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
445 next_tb = 0;
447 #elif defined(TARGET_MICROBLAZE)
448 if ((interrupt_request & CPU_INTERRUPT_HARD)
449 && (env->sregs[SR_MSR] & MSR_IE)
450 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
451 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
452 env->exception_index = EXCP_IRQ;
453 do_interrupt(env);
454 next_tb = 0;
456 #elif defined(TARGET_MIPS)
457 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
458 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
459 (env->CP0_Status & (1 << CP0St_IE)) &&
460 !(env->CP0_Status & (1 << CP0St_EXL)) &&
461 !(env->CP0_Status & (1 << CP0St_ERL)) &&
462 !(env->hflags & MIPS_HFLAG_DM)) {
463 /* Raise it */
464 env->exception_index = EXCP_EXT_INTERRUPT;
465 env->error_code = 0;
466 do_interrupt(env);
467 next_tb = 0;
469 #elif defined(TARGET_SPARC)
470 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
471 cpu_interrupts_enabled(env)) {
472 int pil = env->interrupt_index & 15;
473 int type = env->interrupt_index & 0xf0;
475 if (((type == TT_EXTINT) &&
476 (pil == 15 || pil > env->psrpil)) ||
477 type != TT_EXTINT) {
478 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
479 env->exception_index = env->interrupt_index;
480 do_interrupt(env);
481 env->interrupt_index = 0;
482 #if !defined(CONFIG_USER_ONLY)
483 cpu_check_irqs(env);
484 #endif
485 next_tb = 0;
487 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
488 //do_interrupt(0, 0, 0, 0, 0);
489 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
491 #elif defined(TARGET_ARM)
492 if (interrupt_request & CPU_INTERRUPT_FIQ
493 && !(env->uncached_cpsr & CPSR_F)) {
494 env->exception_index = EXCP_FIQ;
495 do_interrupt(env);
496 next_tb = 0;
498 /* ARMv7-M interrupt return works by loading a magic value
499 into the PC. On real hardware the load causes the
500 return to occur. The qemu implementation performs the
501 jump normally, then does the exception return when the
502 CPU tries to execute code at the magic address.
503 This will cause the magic PC value to be pushed to
504 the stack if an interrupt occured at the wrong time.
505 We avoid this by disabling interrupts when
506 pc contains a magic address. */
507 if (interrupt_request & CPU_INTERRUPT_HARD
508 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
509 || !(env->uncached_cpsr & CPSR_I))) {
510 env->exception_index = EXCP_IRQ;
511 do_interrupt(env);
512 next_tb = 0;
514 #elif defined(TARGET_SH4)
515 if (interrupt_request & CPU_INTERRUPT_HARD) {
516 do_interrupt(env);
517 next_tb = 0;
519 #elif defined(TARGET_ALPHA)
520 if (interrupt_request & CPU_INTERRUPT_HARD) {
521 do_interrupt(env);
522 next_tb = 0;
524 #elif defined(TARGET_CRIS)
525 if (interrupt_request & CPU_INTERRUPT_HARD
526 && (env->pregs[PR_CCS] & I_FLAG)) {
527 env->exception_index = EXCP_IRQ;
528 do_interrupt(env);
529 next_tb = 0;
531 if (interrupt_request & CPU_INTERRUPT_NMI
532 && (env->pregs[PR_CCS] & M_FLAG)) {
533 env->exception_index = EXCP_NMI;
534 do_interrupt(env);
535 next_tb = 0;
537 #elif defined(TARGET_M68K)
538 if (interrupt_request & CPU_INTERRUPT_HARD
539 && ((env->sr & SR_I) >> SR_I_SHIFT)
540 < env->pending_level) {
541 /* Real hardware gets the interrupt vector via an
542 IACK cycle at this point. Current emulated
543 hardware doesn't rely on this, so we
544 provide/save the vector when the interrupt is
545 first signalled. */
546 env->exception_index = env->pending_vector;
547 do_interrupt(1);
548 next_tb = 0;
550 #endif
551 /* Don't use the cached interupt_request value,
552 do_interrupt may have updated the EXITTB flag. */
553 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
554 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
555 /* ensure that no TB jump will be modified as
556 the program flow was changed */
557 next_tb = 0;
560 if (unlikely(env->exit_request)) {
561 env->exit_request = 0;
562 env->exception_index = EXCP_INTERRUPT;
563 cpu_loop_exit();
565 #ifdef CONFIG_DEBUG_EXEC
566 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
567 /* restore flags in standard format */
568 #if defined(TARGET_I386)
569 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
570 log_cpu_state(env, X86_DUMP_CCOP);
571 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
572 #elif defined(TARGET_ARM)
573 log_cpu_state(env, 0);
574 #elif defined(TARGET_SPARC)
575 log_cpu_state(env, 0);
576 #elif defined(TARGET_PPC)
577 log_cpu_state(env, 0);
578 #elif defined(TARGET_M68K)
579 cpu_m68k_flush_flags(env, env->cc_op);
580 env->cc_op = CC_OP_FLAGS;
581 env->sr = (env->sr & 0xffe0)
582 | env->cc_dest | (env->cc_x << 4);
583 log_cpu_state(env, 0);
584 #elif defined(TARGET_MICROBLAZE)
585 log_cpu_state(env, 0);
586 #elif defined(TARGET_MIPS)
587 log_cpu_state(env, 0);
588 #elif defined(TARGET_SH4)
589 log_cpu_state(env, 0);
590 #elif defined(TARGET_ALPHA)
591 log_cpu_state(env, 0);
592 #elif defined(TARGET_CRIS)
593 log_cpu_state(env, 0);
594 #else
595 #error unsupported target CPU
596 #endif
598 #endif
599 spin_lock(&tb_lock);
600 tb = tb_find_fast();
601 /* Note: we do it here to avoid a gcc bug on Mac OS X when
602 doing it in tb_find_slow */
603 if (tb_invalidated_flag) {
604 /* as some TB could have been invalidated because
605 of memory exceptions while generating the code, we
606 must recompute the hash index here */
607 next_tb = 0;
608 tb_invalidated_flag = 0;
610 #ifdef CONFIG_DEBUG_EXEC
611 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08" PRIxPTR " [" TARGET_FMT_lx "] %s\n",
612 (intptr_t)tb->tc_ptr, tb->pc,
613 lookup_symbol(tb->pc));
614 #endif
615 /* see if we can patch the calling TB. When the TB
616 spans two pages, we cannot safely do a direct
617 jump. */
619 if (next_tb != 0 &&
620 #ifdef CONFIG_KQEMU
621 (env->kqemu_enabled != 2) &&
622 #endif
623 tb->page_addr[1] == -1) {
624 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
627 spin_unlock(&tb_lock);
628 env->current_tb = tb;
630 /* cpu_interrupt might be called while translating the
631 TB, but before it is linked into a potentially
632 infinite loop and becomes env->current_tb. Avoid
633 starting execution if there is a pending interrupt. */
634 if (unlikely (env->exit_request))
635 env->current_tb = NULL;
637 while (env->current_tb) {
638 tc_ptr = tb->tc_ptr;
639 /* execute the generated code */
640 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
641 #undef env
642 env = cpu_single_env;
643 #define env cpu_single_env
644 #endif
645 next_tb = tcg_qemu_tb_exec(tc_ptr);
646 env->current_tb = NULL;
647 if ((next_tb & 3) == 2) {
648 /* Instruction counter expired. */
649 int insns_left;
650 tb = (TranslationBlock *)(intptr_t)(next_tb & ~3);
651 /* Restore PC. */
652 cpu_pc_from_tb(env, tb);
653 insns_left = env->icount_decr.u32;
654 if (env->icount_extra && insns_left >= 0) {
655 /* Refill decrementer and continue execution. */
656 env->icount_extra += insns_left;
657 if (env->icount_extra > 0xffff) {
658 insns_left = 0xffff;
659 } else {
660 insns_left = env->icount_extra;
662 env->icount_extra -= insns_left;
663 env->icount_decr.u16.low = insns_left;
664 } else {
665 if (insns_left > 0) {
666 /* Execute remaining instructions. */
667 cpu_exec_nocache(insns_left, tb);
669 env->exception_index = EXCP_INTERRUPT;
670 next_tb = 0;
671 cpu_loop_exit();
675 /* reset soft MMU for next block (it can currently
676 only be set by a memory fault) */
677 #if defined(CONFIG_KQEMU)
678 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
679 if (kqemu_is_ok(env) &&
680 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
681 cpu_loop_exit();
683 #endif
684 } /* for(;;) */
686 } /* for(;;) */
689 #if defined(TARGET_I386)
690 /* restore flags in standard format */
691 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
692 #elif defined(TARGET_ARM)
693 /* XXX: Save/restore host fpu exception state?. */
694 #elif defined(TARGET_SPARC)
695 #elif defined(TARGET_PPC)
696 #elif defined(TARGET_M68K)
697 cpu_m68k_flush_flags(env, env->cc_op);
698 env->cc_op = CC_OP_FLAGS;
699 env->sr = (env->sr & 0xffe0)
700 | env->cc_dest | (env->cc_x << 4);
701 #elif defined(TARGET_MICROBLAZE)
702 #elif defined(TARGET_MIPS)
703 #elif defined(TARGET_SH4)
704 #elif defined(TARGET_ALPHA)
705 #elif defined(TARGET_CRIS)
706 /* XXXXX */
707 #else
708 #error unsupported target CPU
709 #endif
711 /* restore global registers */
712 #include "hostregs_helper.h"
714 /* fail safe : never use cpu_single_env outside cpu_exec() */
715 cpu_single_env = NULL;
716 return ret;
719 /* must only be called from the generated code as an exception can be
720 generated */
721 void tb_invalidate_page_range(target_ulong start, target_ulong end)
723 /* XXX: cannot enable it yet because it yields to MMU exception
724 where NIP != read address on PowerPC */
725 #if 0
726 target_ulong phys_addr;
727 phys_addr = get_phys_addr_code(env, start);
728 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
729 #endif
732 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
734 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
736 CPUX86State *saved_env;
738 saved_env = env;
739 env = s;
740 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
741 selector &= 0xffff;
742 cpu_x86_load_seg_cache(env, seg_reg, selector,
743 (selector << 4), 0xffff, 0);
744 } else {
745 helper_load_seg(seg_reg, selector);
747 env = saved_env;
750 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
752 CPUX86State *saved_env;
754 saved_env = env;
755 env = s;
757 helper_fsave(ptr, data32);
759 env = saved_env;
762 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
764 CPUX86State *saved_env;
766 saved_env = env;
767 env = s;
769 helper_frstor(ptr, data32);
771 env = saved_env;
774 #endif /* TARGET_I386 */
776 #if !defined(CONFIG_SOFTMMU)
778 #if defined(TARGET_I386)
780 /* 'pc' is the host PC at which the exception was raised. 'address' is
781 the effective address of the memory exception. 'is_write' is 1 if a
782 write caused the exception and otherwise 0'. 'old_set' is the
783 signal set which should be restored */
784 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
785 int is_write, sigset_t *old_set,
786 void *puc)
788 TranslationBlock *tb;
789 int ret;
791 if (cpu_single_env)
792 env = cpu_single_env; /* XXX: find a correct solution for multithread */
793 #if defined(DEBUG_SIGNAL)
794 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
795 pc, address, is_write, *(unsigned long *)old_set);
796 #endif
797 /* XXX: locking issue */
798 if (is_write && page_unprotect(h2g(address), pc, puc)) {
799 return 1;
802 /* see if it is an MMU fault */
803 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
804 if (ret < 0)
805 return 0; /* not an MMU fault */
806 if (ret == 0)
807 return 1; /* the MMU fault was handled without causing real CPU fault */
808 /* now we have a real cpu fault */
809 tb = tb_find_pc(pc);
810 if (tb) {
811 /* the PC is inside the translated code. It means that we have
812 a virtual CPU fault */
813 cpu_restore_state(tb, env, pc, puc);
815 if (ret == 1) {
816 #if 0
817 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
818 env->eip, env->cr[2], env->error_code);
819 #endif
820 /* we restore the process signal mask as the sigreturn should
821 do it (XXX: use sigsetjmp) */
822 sigprocmask(SIG_SETMASK, old_set, NULL);
823 raise_exception_err(env->exception_index, env->error_code);
824 } else {
825 /* activate soft MMU for this block */
826 env->hflags |= HF_SOFTMMU_MASK;
827 cpu_resume_from_signal(env, puc);
829 /* never comes here */
830 return 1;
833 #elif defined(TARGET_ARM)
834 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
835 int is_write, sigset_t *old_set,
836 void *puc)
838 TranslationBlock *tb;
839 int ret;
841 if (cpu_single_env)
842 env = cpu_single_env; /* XXX: find a correct solution for multithread */
843 #if defined(DEBUG_SIGNAL)
844 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
845 pc, address, is_write, *(unsigned long *)old_set);
846 #endif
847 /* XXX: locking issue */
848 if (is_write && page_unprotect(h2g(address), pc, puc)) {
849 return 1;
851 /* see if it is an MMU fault */
852 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
853 if (ret < 0)
854 return 0; /* not an MMU fault */
855 if (ret == 0)
856 return 1; /* the MMU fault was handled without causing real CPU fault */
857 /* now we have a real cpu fault */
858 tb = tb_find_pc(pc);
859 if (tb) {
860 /* the PC is inside the translated code. It means that we have
861 a virtual CPU fault */
862 cpu_restore_state(tb, env, pc, puc);
864 /* we restore the process signal mask as the sigreturn should
865 do it (XXX: use sigsetjmp) */
866 sigprocmask(SIG_SETMASK, old_set, NULL);
867 cpu_loop_exit();
868 /* never comes here */
869 return 1;
871 #elif defined(TARGET_SPARC)
872 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
873 int is_write, sigset_t *old_set,
874 void *puc)
876 TranslationBlock *tb;
877 int ret;
879 if (cpu_single_env)
880 env = cpu_single_env; /* XXX: find a correct solution for multithread */
881 #if defined(DEBUG_SIGNAL)
882 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
883 pc, address, is_write, *(unsigned long *)old_set);
884 #endif
885 /* XXX: locking issue */
886 if (is_write && page_unprotect(h2g(address), pc, puc)) {
887 return 1;
889 /* see if it is an MMU fault */
890 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
891 if (ret < 0)
892 return 0; /* not an MMU fault */
893 if (ret == 0)
894 return 1; /* the MMU fault was handled without causing real CPU fault */
895 /* now we have a real cpu fault */
896 tb = tb_find_pc(pc);
897 if (tb) {
898 /* the PC is inside the translated code. It means that we have
899 a virtual CPU fault */
900 cpu_restore_state(tb, env, pc, puc);
902 /* we restore the process signal mask as the sigreturn should
903 do it (XXX: use sigsetjmp) */
904 sigprocmask(SIG_SETMASK, old_set, NULL);
905 cpu_loop_exit();
906 /* never comes here */
907 return 1;
909 #elif defined (TARGET_PPC)
910 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
911 int is_write, sigset_t *old_set,
912 void *puc)
914 TranslationBlock *tb;
915 int ret;
917 if (cpu_single_env)
918 env = cpu_single_env; /* XXX: find a correct solution for multithread */
919 #if defined(DEBUG_SIGNAL)
920 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
921 pc, address, is_write, *(unsigned long *)old_set);
922 #endif
923 /* XXX: locking issue */
924 if (is_write && page_unprotect(h2g(address), pc, puc)) {
925 return 1;
928 /* see if it is an MMU fault */
929 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
930 if (ret < 0)
931 return 0; /* not an MMU fault */
932 if (ret == 0)
933 return 1; /* the MMU fault was handled without causing real CPU fault */
935 /* now we have a real cpu fault */
936 tb = tb_find_pc(pc);
937 if (tb) {
938 /* the PC is inside the translated code. It means that we have
939 a virtual CPU fault */
940 cpu_restore_state(tb, env, pc, puc);
942 if (ret == 1) {
943 #if 0
944 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
945 env->nip, env->error_code, tb);
946 #endif
947 /* we restore the process signal mask as the sigreturn should
948 do it (XXX: use sigsetjmp) */
949 sigprocmask(SIG_SETMASK, old_set, NULL);
950 cpu_loop_exit();
951 } else {
952 /* activate soft MMU for this block */
953 cpu_resume_from_signal(env, puc);
955 /* never comes here */
956 return 1;
959 #elif defined(TARGET_M68K)
960 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
961 int is_write, sigset_t *old_set,
962 void *puc)
964 TranslationBlock *tb;
965 int ret;
967 if (cpu_single_env)
968 env = cpu_single_env; /* XXX: find a correct solution for multithread */
969 #if defined(DEBUG_SIGNAL)
970 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
971 pc, address, is_write, *(unsigned long *)old_set);
972 #endif
973 /* XXX: locking issue */
974 if (is_write && page_unprotect(address, pc, puc)) {
975 return 1;
977 /* see if it is an MMU fault */
978 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
979 if (ret < 0)
980 return 0; /* not an MMU fault */
981 if (ret == 0)
982 return 1; /* the MMU fault was handled without causing real CPU fault */
983 /* now we have a real cpu fault */
984 tb = tb_find_pc(pc);
985 if (tb) {
986 /* the PC is inside the translated code. It means that we have
987 a virtual CPU fault */
988 cpu_restore_state(tb, env, pc, puc);
990 /* we restore the process signal mask as the sigreturn should
991 do it (XXX: use sigsetjmp) */
992 sigprocmask(SIG_SETMASK, old_set, NULL);
993 cpu_loop_exit();
994 /* never comes here */
995 return 1;
998 #elif defined (TARGET_MIPS)
999 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
1000 int is_write, sigset_t *old_set,
1001 void *puc)
1003 TranslationBlock *tb;
1004 int ret;
1006 if (cpu_single_env)
1007 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1008 #if defined(DEBUG_SIGNAL)
1009 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1010 pc, address, is_write, *(unsigned long *)old_set);
1011 #endif
1012 /* XXX: locking issue */
1013 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1014 return 1;
1017 /* see if it is an MMU fault */
1018 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1019 if (ret < 0)
1020 return 0; /* not an MMU fault */
1021 if (ret == 0)
1022 return 1; /* the MMU fault was handled without causing real CPU fault */
1024 /* now we have a real cpu fault */
1025 tb = tb_find_pc(pc);
1026 if (tb) {
1027 /* the PC is inside the translated code. It means that we have
1028 a virtual CPU fault */
1029 cpu_restore_state(tb, env, pc, puc);
1031 if (ret == 1) {
1032 #if 0
1033 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1034 env->PC, env->error_code, tb);
1035 #endif
1036 /* we restore the process signal mask as the sigreturn should
1037 do it (XXX: use sigsetjmp) */
1038 sigprocmask(SIG_SETMASK, old_set, NULL);
1039 cpu_loop_exit();
1040 } else {
1041 /* activate soft MMU for this block */
1042 cpu_resume_from_signal(env, puc);
1044 /* never comes here */
1045 return 1;
1048 #elif defined (TARGET_MICROBLAZE)
1049 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
1050 int is_write, sigset_t *old_set,
1051 void *puc)
1053 TranslationBlock *tb;
1054 int ret;
1056 if (cpu_single_env)
1057 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1058 #if defined(DEBUG_SIGNAL)
1059 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1060 pc, address, is_write, *(unsigned long *)old_set);
1061 #endif
1062 /* XXX: locking issue */
1063 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1064 return 1;
1067 /* see if it is an MMU fault */
1068 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1069 if (ret < 0)
1070 return 0; /* not an MMU fault */
1071 if (ret == 0)
1072 return 1; /* the MMU fault was handled without causing real CPU fault */
1074 /* now we have a real cpu fault */
1075 tb = tb_find_pc(pc);
1076 if (tb) {
1077 /* the PC is inside the translated code. It means that we have
1078 a virtual CPU fault */
1079 cpu_restore_state(tb, env, pc, puc);
1081 if (ret == 1) {
1082 #if 0
1083 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1084 env->PC, env->error_code, tb);
1085 #endif
1086 /* we restore the process signal mask as the sigreturn should
1087 do it (XXX: use sigsetjmp) */
1088 sigprocmask(SIG_SETMASK, old_set, NULL);
1089 cpu_loop_exit();
1090 } else {
1091 /* activate soft MMU for this block */
1092 cpu_resume_from_signal(env, puc);
1094 /* never comes here */
1095 return 1;
1098 #elif defined (TARGET_SH4)
1099 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
1100 int is_write, sigset_t *old_set,
1101 void *puc)
1103 TranslationBlock *tb;
1104 int ret;
1106 if (cpu_single_env)
1107 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1108 #if defined(DEBUG_SIGNAL)
1109 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1110 pc, address, is_write, *(unsigned long *)old_set);
1111 #endif
1112 /* XXX: locking issue */
1113 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1114 return 1;
1117 /* see if it is an MMU fault */
1118 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1119 if (ret < 0)
1120 return 0; /* not an MMU fault */
1121 if (ret == 0)
1122 return 1; /* the MMU fault was handled without causing real CPU fault */
1124 /* now we have a real cpu fault */
1125 tb = tb_find_pc(pc);
1126 if (tb) {
1127 /* the PC is inside the translated code. It means that we have
1128 a virtual CPU fault */
1129 cpu_restore_state(tb, env, pc, puc);
1131 #if 0
1132 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1133 env->nip, env->error_code, tb);
1134 #endif
1135 /* we restore the process signal mask as the sigreturn should
1136 do it (XXX: use sigsetjmp) */
1137 sigprocmask(SIG_SETMASK, old_set, NULL);
1138 cpu_loop_exit();
1139 /* never comes here */
1140 return 1;
1143 #elif defined (TARGET_ALPHA)
1144 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
1145 int is_write, sigset_t *old_set,
1146 void *puc)
1148 TranslationBlock *tb;
1149 int ret;
1151 if (cpu_single_env)
1152 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1153 #if defined(DEBUG_SIGNAL)
1154 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1155 pc, address, is_write, *(unsigned long *)old_set);
1156 #endif
1157 /* XXX: locking issue */
1158 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1159 return 1;
1162 /* see if it is an MMU fault */
1163 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1164 if (ret < 0)
1165 return 0; /* not an MMU fault */
1166 if (ret == 0)
1167 return 1; /* the MMU fault was handled without causing real CPU fault */
1169 /* now we have a real cpu fault */
1170 tb = tb_find_pc(pc);
1171 if (tb) {
1172 /* the PC is inside the translated code. It means that we have
1173 a virtual CPU fault */
1174 cpu_restore_state(tb, env, pc, puc);
1176 #if 0
1177 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1178 env->nip, env->error_code, tb);
1179 #endif
1180 /* we restore the process signal mask as the sigreturn should
1181 do it (XXX: use sigsetjmp) */
1182 sigprocmask(SIG_SETMASK, old_set, NULL);
1183 cpu_loop_exit();
1184 /* never comes here */
1185 return 1;
1187 #elif defined (TARGET_CRIS)
1188 static inline int handle_cpu_signal(uintptr_t pc, uintptr_t address,
1189 int is_write, sigset_t *old_set,
1190 void *puc)
1192 TranslationBlock *tb;
1193 int ret;
1195 if (cpu_single_env)
1196 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1197 #if defined(DEBUG_SIGNAL)
1198 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1199 pc, address, is_write, *(unsigned long *)old_set);
1200 #endif
1201 /* XXX: locking issue */
1202 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1203 return 1;
1206 /* see if it is an MMU fault */
1207 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1208 if (ret < 0)
1209 return 0; /* not an MMU fault */
1210 if (ret == 0)
1211 return 1; /* the MMU fault was handled without causing real CPU fault */
1213 /* now we have a real cpu fault */
1214 tb = tb_find_pc(pc);
1215 if (tb) {
1216 /* the PC is inside the translated code. It means that we have
1217 a virtual CPU fault */
1218 cpu_restore_state(tb, env, pc, puc);
1220 /* we restore the process signal mask as the sigreturn should
1221 do it (XXX: use sigsetjmp) */
1222 sigprocmask(SIG_SETMASK, old_set, NULL);
1223 cpu_loop_exit();
1224 /* never comes here */
1225 return 1;
1228 #else
1229 #error unsupported target CPU
1230 #endif
1232 #if defined(__i386__)
1234 #if defined(__APPLE__)
1235 # include <sys/ucontext.h>
1237 # define EIP_sig(context) (*((uintptr_t*)&(context)->uc_mcontext->ss.eip))
1238 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1239 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1240 # define MASK_sig(context) ((context)->uc_sigmask)
1241 #elif defined(__OpenBSD__)
1242 # define EIP_sig(context) ((context)->sc_eip)
1243 # define TRAP_sig(context) ((context)->sc_trapno)
1244 # define ERROR_sig(context) ((context)->sc_err)
1245 # define MASK_sig(context) ((context)->sc_mask)
1246 #else
1247 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1248 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1249 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1250 # define MASK_sig(context) ((context)->uc_sigmask)
1251 #endif
1253 int cpu_signal_handler(int host_signum, void *pinfo,
1254 void *puc)
1256 siginfo_t *info = pinfo;
1257 #if defined(__OpenBSD__)
1258 struct sigcontext *uc = puc;
1259 #else
1260 struct ucontext *uc = puc;
1261 #endif
1262 uintptr_t pc;
1263 int trapno;
1265 #ifndef REG_EIP
1266 /* for glibc 2.1 */
1267 #define REG_EIP EIP
1268 #define REG_ERR ERR
1269 #define REG_TRAPNO TRAPNO
1270 #endif
1271 pc = EIP_sig(uc);
1272 trapno = TRAP_sig(uc);
1273 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1274 trapno == 0xe ?
1275 (ERROR_sig(uc) >> 1) & 1 : 0,
1276 &MASK_sig(uc), puc);
1279 #elif defined(__x86_64__)
1281 #ifdef __NetBSD__
1282 #define PC_sig(context) _UC_MACHINE_PC(context)
1283 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1284 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1285 #define MASK_sig(context) ((context)->uc_sigmask)
1286 #elif defined(__OpenBSD__)
1287 #define PC_sig(context) ((context)->sc_rip)
1288 #define TRAP_sig(context) ((context)->sc_trapno)
1289 #define ERROR_sig(context) ((context)->sc_err)
1290 #define MASK_sig(context) ((context)->sc_mask)
1291 #else
1292 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1293 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1294 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1295 #define MASK_sig(context) ((context)->uc_sigmask)
1296 #endif
1298 int cpu_signal_handler(int host_signum, void *pinfo,
1299 void *puc)
1301 siginfo_t *info = pinfo;
1302 uintptr_t pc;
1303 #ifdef __NetBSD__
1304 ucontext_t *uc = puc;
1305 #elif defined(__OpenBSD__)
1306 struct sigcontext *uc = puc;
1307 #else
1308 struct ucontext *uc = puc;
1309 #endif
1311 pc = PC_sig(uc);
1312 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1313 TRAP_sig(uc) == 0xe ?
1314 (ERROR_sig(uc) >> 1) & 1 : 0,
1315 &MASK_sig(uc), puc);
1318 #elif defined(_ARCH_PPC)
1320 /***********************************************************************
1321 * signal context platform-specific definitions
1322 * From Wine
1324 #ifdef linux
1325 /* All Registers access - only for local access */
1326 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1327 /* Gpr Registers access */
1328 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1329 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1330 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1331 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1332 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1333 # define LR_sig(context) REG_sig(link, context) /* Link register */
1334 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1335 /* Float Registers access */
1336 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1337 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1338 /* Exception Registers access */
1339 # define DAR_sig(context) REG_sig(dar, context)
1340 # define DSISR_sig(context) REG_sig(dsisr, context)
1341 # define TRAP_sig(context) REG_sig(trap, context)
1342 #endif /* linux */
1344 #ifdef __APPLE__
1345 # include <sys/ucontext.h>
1346 typedef struct ucontext SIGCONTEXT;
1347 /* All Registers access - only for local access */
1348 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1349 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1350 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1351 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1352 /* Gpr Registers access */
1353 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1354 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1355 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1356 # define CTR_sig(context) REG_sig(ctr, context)
1357 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1358 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1359 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1360 /* Float Registers access */
1361 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1362 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1363 /* Exception Registers access */
1364 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1365 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1366 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1367 #endif /* __APPLE__ */
1369 int cpu_signal_handler(int host_signum, void *pinfo,
1370 void *puc)
1372 siginfo_t *info = pinfo;
1373 struct ucontext *uc = puc;
1374 unsigned long pc;
1375 int is_write;
1377 pc = IAR_sig(uc);
1378 is_write = 0;
1379 #if 0
1380 /* ppc 4xx case */
1381 if (DSISR_sig(uc) & 0x00800000)
1382 is_write = 1;
1383 #else
1384 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1385 is_write = 1;
1386 #endif
1387 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1388 is_write, &uc->uc_sigmask, puc);
1391 #elif defined(__alpha__)
1393 int cpu_signal_handler(int host_signum, void *pinfo,
1394 void *puc)
1396 siginfo_t *info = pinfo;
1397 struct ucontext *uc = puc;
1398 uint32_t *pc = uc->uc_mcontext.sc_pc;
1399 uint32_t insn = *pc;
1400 int is_write = 0;
1402 /* XXX: need kernel patch to get write flag faster */
1403 switch (insn >> 26) {
1404 case 0x0d: // stw
1405 case 0x0e: // stb
1406 case 0x0f: // stq_u
1407 case 0x24: // stf
1408 case 0x25: // stg
1409 case 0x26: // sts
1410 case 0x27: // stt
1411 case 0x2c: // stl
1412 case 0x2d: // stq
1413 case 0x2e: // stl_c
1414 case 0x2f: // stq_c
1415 is_write = 1;
1418 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1419 is_write, &uc->uc_sigmask, puc);
1421 #elif defined(__sparc__)
1423 int cpu_signal_handler(int host_signum, void *pinfo,
1424 void *puc)
1426 siginfo_t *info = pinfo;
1427 int is_write;
1428 uint32_t insn;
1429 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1430 uint32_t *regs = (uint32_t *)(info + 1);
1431 void *sigmask = (regs + 20);
1432 /* XXX: is there a standard glibc define ? */
1433 uintptr_t pc = regs[1];
1434 #else
1435 #ifdef __linux__
1436 struct sigcontext *sc = puc;
1437 uintptr_t pc = sc->sigc_regs.tpc;
1438 void *sigmask = (void *)sc->sigc_mask;
1439 #elif defined(__OpenBSD__)
1440 struct sigcontext *uc = puc;
1441 uintptr_t pc = uc->sc_pc;
1442 void *sigmask = (void *)(long)uc->sc_mask;
1443 #endif
1444 #endif
1446 /* XXX: need kernel patch to get write flag faster */
1447 is_write = 0;
1448 insn = *(uint32_t *)pc;
1449 if ((insn >> 30) == 3) {
1450 switch((insn >> 19) & 0x3f) {
1451 case 0x05: // stb
1452 case 0x15: // stba
1453 case 0x06: // sth
1454 case 0x16: // stha
1455 case 0x04: // st
1456 case 0x14: // sta
1457 case 0x07: // std
1458 case 0x17: // stda
1459 case 0x0e: // stx
1460 case 0x1e: // stxa
1461 case 0x24: // stf
1462 case 0x34: // stfa
1463 case 0x27: // stdf
1464 case 0x37: // stdfa
1465 case 0x26: // stqf
1466 case 0x36: // stqfa
1467 case 0x25: // stfsr
1468 case 0x3c: // casa
1469 case 0x3e: // casxa
1470 is_write = 1;
1471 break;
1474 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1475 is_write, sigmask, NULL);
1478 #elif defined(__arm__)
1480 int cpu_signal_handler(int host_signum, void *pinfo,
1481 void *puc)
1483 siginfo_t *info = pinfo;
1484 struct ucontext *uc = puc;
1485 uintptr_t pc;
1486 int is_write;
1488 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1489 pc = uc->uc_mcontext.gregs[R15];
1490 #else
1491 pc = uc->uc_mcontext.arm_pc;
1492 #endif
1493 /* XXX: compute is_write */
1494 is_write = 0;
1495 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1496 is_write,
1497 &uc->uc_sigmask, puc);
1500 #elif defined(__mc68000)
1502 int cpu_signal_handler(int host_signum, void *pinfo,
1503 void *puc)
1505 siginfo_t *info = pinfo;
1506 struct ucontext *uc = puc;
1507 uintptr_t pc;
1508 int is_write;
1510 pc = uc->uc_mcontext.gregs[16];
1511 /* XXX: compute is_write */
1512 is_write = 0;
1513 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1514 is_write,
1515 &uc->uc_sigmask, puc);
1518 #elif defined(__ia64)
1520 #ifndef __ISR_VALID
1521 /* This ought to be in <bits/siginfo.h>... */
1522 # define __ISR_VALID 1
1523 #endif
1525 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1527 siginfo_t *info = pinfo;
1528 struct ucontext *uc = puc;
1529 uintptr_t ip;
1530 int is_write = 0;
1532 ip = uc->uc_mcontext.sc_ip;
1533 switch (host_signum) {
1534 case SIGILL:
1535 case SIGFPE:
1536 case SIGSEGV:
1537 case SIGBUS:
1538 case SIGTRAP:
1539 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1540 /* ISR.W (write-access) is bit 33: */
1541 is_write = (info->si_isr >> 33) & 1;
1542 break;
1544 default:
1545 break;
1547 return handle_cpu_signal(ip, (uintptr_t)info->si_addr,
1548 is_write,
1549 &uc->uc_sigmask, puc);
1552 #elif defined(__s390__)
1554 int cpu_signal_handler(int host_signum, void *pinfo,
1555 void *puc)
1557 siginfo_t *info = pinfo;
1558 struct ucontext *uc = puc;
1559 uintptr_t pc;
1560 int is_write;
1562 pc = uc->uc_mcontext.psw.addr;
1563 /* XXX: compute is_write */
1564 is_write = 0;
1565 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1566 is_write, &uc->uc_sigmask, puc);
1569 #elif defined(__mips__)
1571 int cpu_signal_handler(int host_signum, void *pinfo,
1572 void *puc)
1574 siginfo_t *info = pinfo;
1575 struct ucontext *uc = puc;
1576 greg_t pc = uc->uc_mcontext.pc;
1577 int is_write;
1579 /* XXX: compute is_write */
1580 is_write = 0;
1581 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1582 is_write, &uc->uc_sigmask, puc);
1585 #elif defined(__hppa__)
1587 int cpu_signal_handler(int host_signum, void *pinfo,
1588 void *puc)
1590 struct siginfo *info = pinfo;
1591 struct ucontext *uc = puc;
1592 uintptr_t pc;
1593 int is_write;
1595 pc = uc->uc_mcontext.sc_iaoq[0];
1596 /* FIXME: compute is_write */
1597 is_write = 0;
1598 return handle_cpu_signal(pc, (uintptr_t)info->si_addr,
1599 is_write,
1600 &uc->uc_sigmask, puc);
1603 #else
1605 #error host CPU specific signal handler needed
1607 #endif
1609 #endif /* !defined(CONFIG_SOFTMMU) */