Indent ac97 and es1370 according to audio formatting
[qemu/aliguori-queue.git] / cpu-exec.c
blob38335f8b4f5990100ebffc66fd3c98bf4bfca1b9
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include "tcg.h"
24 #include "kvm.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
50 //#define DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState *env)
55 return cpu_has_work(env);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
63 longjmp(env->jmp_env, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
72 #ifdef __linux__
73 struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76 #endif
77 #endif
79 env = env1;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
86 #ifdef __linux__
87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90 #endif
92 #endif
93 env->exception_index = -1;
94 longjmp(env->jmp_env, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
101 unsigned long next_tb;
102 TranslationBlock *tb;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles > CF_COUNT_MASK)
107 max_cycles = CF_COUNT_MASK;
109 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110 max_cycles);
111 env->current_tb = tb;
112 /* execute the generated code */
113 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115 if ((next_tb & 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env, tb);
120 tb_phys_invalidate(tb, -1);
121 tb_free(tb);
124 static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
126 uint64_t flags)
128 TranslationBlock *tb, **ptb1;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
132 tb_invalidated_flag = 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc = get_phys_addr_code(env, pc);
138 phys_page1 = phys_pc & TARGET_PAGE_MASK;
139 phys_page2 = -1;
140 h = tb_phys_hash_func(phys_pc);
141 ptb1 = &tb_phys_hash[h];
142 for(;;) {
143 tb = *ptb1;
144 if (!tb)
145 goto not_found;
146 if (tb->pc == pc &&
147 tb->page_addr[0] == phys_page1 &&
148 tb->cs_base == cs_base &&
149 tb->flags == flags) {
150 /* check next page if needed */
151 if (tb->page_addr[1] != -1) {
152 virt_page2 = (pc & TARGET_PAGE_MASK) +
153 TARGET_PAGE_SIZE;
154 phys_page2 = get_phys_addr_code(env, virt_page2);
155 if (tb->page_addr[1] == phys_page2)
156 goto found;
157 } else {
158 goto found;
161 ptb1 = &tb->phys_hash_next;
163 not_found:
164 /* if no translated code available, then translate it now */
165 tb = tb_gen_code(env, pc, cs_base, flags, 0);
167 found:
168 /* we add the TB in the virtual pc hash table */
169 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
170 return tb;
173 static inline TranslationBlock *tb_find_fast(void)
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
177 int flags;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
181 is executed. */
182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
186 tb = tb_find_slow(pc, cs_base, flags);
188 return tb;
191 static CPUDebugExcpHandler *debug_excp_handler;
193 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 CPUDebugExcpHandler *old_handler = debug_excp_handler;
197 debug_excp_handler = handler;
198 return old_handler;
201 static void cpu_handle_debug_exception(CPUState *env)
203 CPUWatchpoint *wp;
205 if (!env->watchpoint_hit)
206 TAILQ_FOREACH(wp, &env->watchpoints, entry)
207 wp->flags &= ~BP_WATCHPOINT_HIT;
209 if (debug_excp_handler)
210 debug_excp_handler(env);
213 /* main execution loop */
215 int cpu_exec(CPUState *env1)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret, interrupt_request;
220 TranslationBlock *tb;
221 uint8_t *tc_ptr;
222 unsigned long next_tb;
224 if (cpu_halted(env1) == EXCP_HALTED)
225 return EXCP_HALTED;
227 cpu_single_env = env1;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
232 env = env1;
234 env_to_regs();
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 DF = 1 - (2 * ((env->eflags >> 10) & 1));
239 CC_OP = CC_OP_EFLAGS;
240 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env->cc_op = CC_OP_FLAGS;
244 env->cc_dest = env->sr & 0xf;
245 env->cc_x = (env->sr >> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MICROBLAZE)
250 #elif defined(TARGET_MIPS)
251 #elif defined(TARGET_SH4)
252 #elif defined(TARGET_CRIS)
253 /* XXXXX */
254 #else
255 #error unsupported target CPU
256 #endif
257 env->exception_index = -1;
259 /* prepare setjmp context for exception handling */
260 for(;;) {
261 if (setjmp(env->jmp_env) == 0) {
262 #if defined(__sparc__) && !defined(HOST_SOLARIS)
263 #undef env
264 env = cpu_single_env;
265 #define env cpu_single_env
266 #endif
267 env->current_tb = NULL;
268 /* if an exception is pending, we execute it here */
269 if (env->exception_index >= 0) {
270 if (env->exception_index >= EXCP_INTERRUPT) {
271 /* exit request from the cpu execution loop */
272 ret = env->exception_index;
273 if (ret == EXCP_DEBUG)
274 cpu_handle_debug_exception(env);
275 break;
276 } else {
277 #if defined(CONFIG_USER_ONLY)
278 /* if user mode only, we simulate a fake exception
279 which will be handled outside the cpu execution
280 loop */
281 #if defined(TARGET_I386)
282 do_interrupt_user(env->exception_index,
283 env->exception_is_int,
284 env->error_code,
285 env->exception_next_eip);
286 /* successfully delivered */
287 env->old_exception = -1;
288 #endif
289 ret = env->exception_index;
290 break;
291 #else
292 #if defined(TARGET_I386)
293 /* simulate a real cpu exception. On i386, it can
294 trigger new exceptions, but we do not handle
295 double or triple faults yet. */
296 do_interrupt(env->exception_index,
297 env->exception_is_int,
298 env->error_code,
299 env->exception_next_eip, 0);
300 /* successfully delivered */
301 env->old_exception = -1;
302 #elif defined(TARGET_PPC)
303 do_interrupt(env);
304 #elif defined(TARGET_MICROBLAZE)
305 do_interrupt(env);
306 #elif defined(TARGET_MIPS)
307 do_interrupt(env);
308 #elif defined(TARGET_SPARC)
309 do_interrupt(env);
310 #elif defined(TARGET_ARM)
311 do_interrupt(env);
312 #elif defined(TARGET_SH4)
313 do_interrupt(env);
314 #elif defined(TARGET_ALPHA)
315 do_interrupt(env);
316 #elif defined(TARGET_CRIS)
317 do_interrupt(env);
318 #elif defined(TARGET_M68K)
319 do_interrupt(0);
320 #endif
321 #endif
323 env->exception_index = -1;
325 #ifdef CONFIG_KQEMU
326 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
327 int ret;
328 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
329 ret = kqemu_cpu_exec(env);
330 /* put eflags in CPU temporary format */
331 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
332 DF = 1 - (2 * ((env->eflags >> 10) & 1));
333 CC_OP = CC_OP_EFLAGS;
334 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
335 if (ret == 1) {
336 /* exception */
337 longjmp(env->jmp_env, 1);
338 } else if (ret == 2) {
339 /* softmmu execution needed */
340 } else {
341 if (env->interrupt_request != 0 || env->exit_request != 0) {
342 /* hardware interrupt will be executed just after */
343 } else {
344 /* otherwise, we restart */
345 longjmp(env->jmp_env, 1);
349 #endif
351 if (kvm_enabled()) {
352 kvm_cpu_exec(env);
353 longjmp(env->jmp_env, 1);
356 next_tb = 0; /* force lookup of first TB */
357 for(;;) {
358 interrupt_request = env->interrupt_request;
359 if (unlikely(interrupt_request)) {
360 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
361 /* Mask out external interrupts for this step. */
362 interrupt_request &= ~(CPU_INTERRUPT_HARD |
363 CPU_INTERRUPT_FIQ |
364 CPU_INTERRUPT_SMI |
365 CPU_INTERRUPT_NMI);
367 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
368 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
369 env->exception_index = EXCP_DEBUG;
370 cpu_loop_exit();
372 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
373 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
374 defined(TARGET_MICROBLAZE)
375 if (interrupt_request & CPU_INTERRUPT_HALT) {
376 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
377 env->halted = 1;
378 env->exception_index = EXCP_HLT;
379 cpu_loop_exit();
381 #endif
382 #if defined(TARGET_I386)
383 if (interrupt_request & CPU_INTERRUPT_INIT) {
384 svm_check_intercept(SVM_EXIT_INIT);
385 do_cpu_init(env);
386 env->exception_index = EXCP_HALTED;
387 cpu_loop_exit();
388 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
389 do_cpu_sipi(env);
390 } else if (env->hflags2 & HF2_GIF_MASK) {
391 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
392 !(env->hflags & HF_SMM_MASK)) {
393 svm_check_intercept(SVM_EXIT_SMI);
394 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
395 do_smm_enter();
396 next_tb = 0;
397 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
398 !(env->hflags2 & HF2_NMI_MASK)) {
399 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
400 env->hflags2 |= HF2_NMI_MASK;
401 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
402 next_tb = 0;
403 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
404 env->interrupt_request &= ~CPU_INTERRUPT_MCE;
405 do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
406 next_tb = 0;
407 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
408 (((env->hflags2 & HF2_VINTR_MASK) &&
409 (env->hflags2 & HF2_HIF_MASK)) ||
410 (!(env->hflags2 & HF2_VINTR_MASK) &&
411 (env->eflags & IF_MASK &&
412 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
413 int intno;
414 svm_check_intercept(SVM_EXIT_INTR);
415 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
416 intno = cpu_get_pic_interrupt(env);
417 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
418 #if defined(__sparc__) && !defined(HOST_SOLARIS)
419 #undef env
420 env = cpu_single_env;
421 #define env cpu_single_env
422 #endif
423 do_interrupt(intno, 0, 0, 0, 1);
424 /* ensure that no TB jump will be modified as
425 the program flow was changed */
426 next_tb = 0;
427 #if !defined(CONFIG_USER_ONLY)
428 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
429 (env->eflags & IF_MASK) &&
430 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
431 int intno;
432 /* FIXME: this should respect TPR */
433 svm_check_intercept(SVM_EXIT_VINTR);
434 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
435 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
436 do_interrupt(intno, 0, 0, 0, 1);
437 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
438 next_tb = 0;
439 #endif
442 #elif defined(TARGET_PPC)
443 #if 0
444 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
445 cpu_ppc_reset(env);
447 #endif
448 if (interrupt_request & CPU_INTERRUPT_HARD) {
449 ppc_hw_interrupt(env);
450 if (env->pending_interrupts == 0)
451 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
452 next_tb = 0;
454 #elif defined(TARGET_MICROBLAZE)
455 if ((interrupt_request & CPU_INTERRUPT_HARD)
456 && (env->sregs[SR_MSR] & MSR_IE)
457 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
458 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
459 env->exception_index = EXCP_IRQ;
460 do_interrupt(env);
461 next_tb = 0;
463 #elif defined(TARGET_MIPS)
464 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
465 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
466 (env->CP0_Status & (1 << CP0St_IE)) &&
467 !(env->CP0_Status & (1 << CP0St_EXL)) &&
468 !(env->CP0_Status & (1 << CP0St_ERL)) &&
469 !(env->hflags & MIPS_HFLAG_DM)) {
470 /* Raise it */
471 env->exception_index = EXCP_EXT_INTERRUPT;
472 env->error_code = 0;
473 do_interrupt(env);
474 next_tb = 0;
476 #elif defined(TARGET_SPARC)
477 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
478 (env->psret != 0)) {
479 int pil = env->interrupt_index & 15;
480 int type = env->interrupt_index & 0xf0;
482 if (((type == TT_EXTINT) &&
483 (pil == 15 || pil > env->psrpil)) ||
484 type != TT_EXTINT) {
485 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
486 env->exception_index = env->interrupt_index;
487 do_interrupt(env);
488 env->interrupt_index = 0;
489 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
490 cpu_check_irqs(env);
491 #endif
492 next_tb = 0;
494 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
495 //do_interrupt(0, 0, 0, 0, 0);
496 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
498 #elif defined(TARGET_ARM)
499 if (interrupt_request & CPU_INTERRUPT_FIQ
500 && !(env->uncached_cpsr & CPSR_F)) {
501 env->exception_index = EXCP_FIQ;
502 do_interrupt(env);
503 next_tb = 0;
505 /* ARMv7-M interrupt return works by loading a magic value
506 into the PC. On real hardware the load causes the
507 return to occur. The qemu implementation performs the
508 jump normally, then does the exception return when the
509 CPU tries to execute code at the magic address.
510 This will cause the magic PC value to be pushed to
511 the stack if an interrupt occured at the wrong time.
512 We avoid this by disabling interrupts when
513 pc contains a magic address. */
514 if (interrupt_request & CPU_INTERRUPT_HARD
515 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
516 || !(env->uncached_cpsr & CPSR_I))) {
517 env->exception_index = EXCP_IRQ;
518 do_interrupt(env);
519 next_tb = 0;
521 #elif defined(TARGET_SH4)
522 if (interrupt_request & CPU_INTERRUPT_HARD) {
523 do_interrupt(env);
524 next_tb = 0;
526 #elif defined(TARGET_ALPHA)
527 if (interrupt_request & CPU_INTERRUPT_HARD) {
528 do_interrupt(env);
529 next_tb = 0;
531 #elif defined(TARGET_CRIS)
532 if (interrupt_request & CPU_INTERRUPT_HARD
533 && (env->pregs[PR_CCS] & I_FLAG)) {
534 env->exception_index = EXCP_IRQ;
535 do_interrupt(env);
536 next_tb = 0;
538 if (interrupt_request & CPU_INTERRUPT_NMI
539 && (env->pregs[PR_CCS] & M_FLAG)) {
540 env->exception_index = EXCP_NMI;
541 do_interrupt(env);
542 next_tb = 0;
544 #elif defined(TARGET_M68K)
545 if (interrupt_request & CPU_INTERRUPT_HARD
546 && ((env->sr & SR_I) >> SR_I_SHIFT)
547 < env->pending_level) {
548 /* Real hardware gets the interrupt vector via an
549 IACK cycle at this point. Current emulated
550 hardware doesn't rely on this, so we
551 provide/save the vector when the interrupt is
552 first signalled. */
553 env->exception_index = env->pending_vector;
554 do_interrupt(1);
555 next_tb = 0;
557 #endif
558 /* Don't use the cached interupt_request value,
559 do_interrupt may have updated the EXITTB flag. */
560 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
561 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
562 /* ensure that no TB jump will be modified as
563 the program flow was changed */
564 next_tb = 0;
567 if (unlikely(env->exit_request)) {
568 env->exit_request = 0;
569 env->exception_index = EXCP_INTERRUPT;
570 cpu_loop_exit();
572 #ifdef DEBUG_EXEC
573 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
574 /* restore flags in standard format */
575 regs_to_env();
576 #if defined(TARGET_I386)
577 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
578 log_cpu_state(env, X86_DUMP_CCOP);
579 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
580 #elif defined(TARGET_ARM)
581 log_cpu_state(env, 0);
582 #elif defined(TARGET_SPARC)
583 log_cpu_state(env, 0);
584 #elif defined(TARGET_PPC)
585 log_cpu_state(env, 0);
586 #elif defined(TARGET_M68K)
587 cpu_m68k_flush_flags(env, env->cc_op);
588 env->cc_op = CC_OP_FLAGS;
589 env->sr = (env->sr & 0xffe0)
590 | env->cc_dest | (env->cc_x << 4);
591 log_cpu_state(env, 0);
592 #elif defined(TARGET_MICROBLAZE)
593 log_cpu_state(env, 0);
594 #elif defined(TARGET_MIPS)
595 log_cpu_state(env, 0);
596 #elif defined(TARGET_SH4)
597 log_cpu_state(env, 0);
598 #elif defined(TARGET_ALPHA)
599 log_cpu_state(env, 0);
600 #elif defined(TARGET_CRIS)
601 log_cpu_state(env, 0);
602 #else
603 #error unsupported target CPU
604 #endif
606 #endif
607 spin_lock(&tb_lock);
608 tb = tb_find_fast();
609 /* Note: we do it here to avoid a gcc bug on Mac OS X when
610 doing it in tb_find_slow */
611 if (tb_invalidated_flag) {
612 /* as some TB could have been invalidated because
613 of memory exceptions while generating the code, we
614 must recompute the hash index here */
615 next_tb = 0;
616 tb_invalidated_flag = 0;
618 #ifdef DEBUG_EXEC
619 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
620 (long)tb->tc_ptr, tb->pc,
621 lookup_symbol(tb->pc));
622 #endif
623 /* see if we can patch the calling TB. When the TB
624 spans two pages, we cannot safely do a direct
625 jump. */
627 if (next_tb != 0 &&
628 #ifdef CONFIG_KQEMU
629 (env->kqemu_enabled != 2) &&
630 #endif
631 tb->page_addr[1] == -1) {
632 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
635 spin_unlock(&tb_lock);
636 env->current_tb = tb;
638 /* cpu_interrupt might be called while translating the
639 TB, but before it is linked into a potentially
640 infinite loop and becomes env->current_tb. Avoid
641 starting execution if there is a pending interrupt. */
642 if (unlikely (env->exit_request))
643 env->current_tb = NULL;
645 while (env->current_tb) {
646 tc_ptr = tb->tc_ptr;
647 /* execute the generated code */
648 #if defined(__sparc__) && !defined(HOST_SOLARIS)
649 #undef env
650 env = cpu_single_env;
651 #define env cpu_single_env
652 #endif
653 next_tb = tcg_qemu_tb_exec(tc_ptr);
654 env->current_tb = NULL;
655 if ((next_tb & 3) == 2) {
656 /* Instruction counter expired. */
657 int insns_left;
658 tb = (TranslationBlock *)(long)(next_tb & ~3);
659 /* Restore PC. */
660 cpu_pc_from_tb(env, tb);
661 insns_left = env->icount_decr.u32;
662 if (env->icount_extra && insns_left >= 0) {
663 /* Refill decrementer and continue execution. */
664 env->icount_extra += insns_left;
665 if (env->icount_extra > 0xffff) {
666 insns_left = 0xffff;
667 } else {
668 insns_left = env->icount_extra;
670 env->icount_extra -= insns_left;
671 env->icount_decr.u16.low = insns_left;
672 } else {
673 if (insns_left > 0) {
674 /* Execute remaining instructions. */
675 cpu_exec_nocache(insns_left, tb);
677 env->exception_index = EXCP_INTERRUPT;
678 next_tb = 0;
679 cpu_loop_exit();
683 /* reset soft MMU for next block (it can currently
684 only be set by a memory fault) */
685 #if defined(CONFIG_KQEMU)
686 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
687 if (kqemu_is_ok(env) &&
688 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
689 cpu_loop_exit();
691 #endif
692 } /* for(;;) */
693 } else {
694 env_to_regs();
696 } /* for(;;) */
699 #if defined(TARGET_I386)
700 /* restore flags in standard format */
701 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
702 #elif defined(TARGET_ARM)
703 /* XXX: Save/restore host fpu exception state?. */
704 #elif defined(TARGET_SPARC)
705 #elif defined(TARGET_PPC)
706 #elif defined(TARGET_M68K)
707 cpu_m68k_flush_flags(env, env->cc_op);
708 env->cc_op = CC_OP_FLAGS;
709 env->sr = (env->sr & 0xffe0)
710 | env->cc_dest | (env->cc_x << 4);
711 #elif defined(TARGET_MICROBLAZE)
712 #elif defined(TARGET_MIPS)
713 #elif defined(TARGET_SH4)
714 #elif defined(TARGET_ALPHA)
715 #elif defined(TARGET_CRIS)
716 /* XXXXX */
717 #else
718 #error unsupported target CPU
719 #endif
721 /* restore global registers */
722 #include "hostregs_helper.h"
724 /* fail safe : never use cpu_single_env outside cpu_exec() */
725 cpu_single_env = NULL;
726 return ret;
729 /* must only be called from the generated code as an exception can be
730 generated */
731 void tb_invalidate_page_range(target_ulong start, target_ulong end)
733 /* XXX: cannot enable it yet because it yields to MMU exception
734 where NIP != read address on PowerPC */
735 #if 0
736 target_ulong phys_addr;
737 phys_addr = get_phys_addr_code(env, start);
738 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
739 #endif
742 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
744 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
746 CPUX86State *saved_env;
748 saved_env = env;
749 env = s;
750 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
751 selector &= 0xffff;
752 cpu_x86_load_seg_cache(env, seg_reg, selector,
753 (selector << 4), 0xffff, 0);
754 } else {
755 helper_load_seg(seg_reg, selector);
757 env = saved_env;
760 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
762 CPUX86State *saved_env;
764 saved_env = env;
765 env = s;
767 helper_fsave(ptr, data32);
769 env = saved_env;
772 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
774 CPUX86State *saved_env;
776 saved_env = env;
777 env = s;
779 helper_frstor(ptr, data32);
781 env = saved_env;
784 #endif /* TARGET_I386 */
786 #if !defined(CONFIG_SOFTMMU)
788 #if defined(TARGET_I386)
790 /* 'pc' is the host PC at which the exception was raised. 'address' is
791 the effective address of the memory exception. 'is_write' is 1 if a
792 write caused the exception and otherwise 0'. 'old_set' is the
793 signal set which should be restored */
794 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
795 int is_write, sigset_t *old_set,
796 void *puc)
798 TranslationBlock *tb;
799 int ret;
801 if (cpu_single_env)
802 env = cpu_single_env; /* XXX: find a correct solution for multithread */
803 #if defined(DEBUG_SIGNAL)
804 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
805 pc, address, is_write, *(unsigned long *)old_set);
806 #endif
807 /* XXX: locking issue */
808 if (is_write && page_unprotect(h2g(address), pc, puc)) {
809 return 1;
812 /* see if it is an MMU fault */
813 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
814 if (ret < 0)
815 return 0; /* not an MMU fault */
816 if (ret == 0)
817 return 1; /* the MMU fault was handled without causing real CPU fault */
818 /* now we have a real cpu fault */
819 tb = tb_find_pc(pc);
820 if (tb) {
821 /* the PC is inside the translated code. It means that we have
822 a virtual CPU fault */
823 cpu_restore_state(tb, env, pc, puc);
825 if (ret == 1) {
826 #if 0
827 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
828 env->eip, env->cr[2], env->error_code);
829 #endif
830 /* we restore the process signal mask as the sigreturn should
831 do it (XXX: use sigsetjmp) */
832 sigprocmask(SIG_SETMASK, old_set, NULL);
833 raise_exception_err(env->exception_index, env->error_code);
834 } else {
835 /* activate soft MMU for this block */
836 env->hflags |= HF_SOFTMMU_MASK;
837 cpu_resume_from_signal(env, puc);
839 /* never comes here */
840 return 1;
843 #elif defined(TARGET_ARM)
844 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
845 int is_write, sigset_t *old_set,
846 void *puc)
848 TranslationBlock *tb;
849 int ret;
851 if (cpu_single_env)
852 env = cpu_single_env; /* XXX: find a correct solution for multithread */
853 #if defined(DEBUG_SIGNAL)
854 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
855 pc, address, is_write, *(unsigned long *)old_set);
856 #endif
857 /* XXX: locking issue */
858 if (is_write && page_unprotect(h2g(address), pc, puc)) {
859 return 1;
861 /* see if it is an MMU fault */
862 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
863 if (ret < 0)
864 return 0; /* not an MMU fault */
865 if (ret == 0)
866 return 1; /* the MMU fault was handled without causing real CPU fault */
867 /* now we have a real cpu fault */
868 tb = tb_find_pc(pc);
869 if (tb) {
870 /* the PC is inside the translated code. It means that we have
871 a virtual CPU fault */
872 cpu_restore_state(tb, env, pc, puc);
874 /* we restore the process signal mask as the sigreturn should
875 do it (XXX: use sigsetjmp) */
876 sigprocmask(SIG_SETMASK, old_set, NULL);
877 cpu_loop_exit();
878 /* never comes here */
879 return 1;
881 #elif defined(TARGET_SPARC)
882 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
883 int is_write, sigset_t *old_set,
884 void *puc)
886 TranslationBlock *tb;
887 int ret;
889 if (cpu_single_env)
890 env = cpu_single_env; /* XXX: find a correct solution for multithread */
891 #if defined(DEBUG_SIGNAL)
892 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
893 pc, address, is_write, *(unsigned long *)old_set);
894 #endif
895 /* XXX: locking issue */
896 if (is_write && page_unprotect(h2g(address), pc, puc)) {
897 return 1;
899 /* see if it is an MMU fault */
900 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
901 if (ret < 0)
902 return 0; /* not an MMU fault */
903 if (ret == 0)
904 return 1; /* the MMU fault was handled without causing real CPU fault */
905 /* now we have a real cpu fault */
906 tb = tb_find_pc(pc);
907 if (tb) {
908 /* the PC is inside the translated code. It means that we have
909 a virtual CPU fault */
910 cpu_restore_state(tb, env, pc, puc);
912 /* we restore the process signal mask as the sigreturn should
913 do it (XXX: use sigsetjmp) */
914 sigprocmask(SIG_SETMASK, old_set, NULL);
915 cpu_loop_exit();
916 /* never comes here */
917 return 1;
919 #elif defined (TARGET_PPC)
920 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
921 int is_write, sigset_t *old_set,
922 void *puc)
924 TranslationBlock *tb;
925 int ret;
927 if (cpu_single_env)
928 env = cpu_single_env; /* XXX: find a correct solution for multithread */
929 #if defined(DEBUG_SIGNAL)
930 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
931 pc, address, is_write, *(unsigned long *)old_set);
932 #endif
933 /* XXX: locking issue */
934 if (is_write && page_unprotect(h2g(address), pc, puc)) {
935 return 1;
938 /* see if it is an MMU fault */
939 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
940 if (ret < 0)
941 return 0; /* not an MMU fault */
942 if (ret == 0)
943 return 1; /* the MMU fault was handled without causing real CPU fault */
945 /* now we have a real cpu fault */
946 tb = tb_find_pc(pc);
947 if (tb) {
948 /* the PC is inside the translated code. It means that we have
949 a virtual CPU fault */
950 cpu_restore_state(tb, env, pc, puc);
952 if (ret == 1) {
953 #if 0
954 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
955 env->nip, env->error_code, tb);
956 #endif
957 /* we restore the process signal mask as the sigreturn should
958 do it (XXX: use sigsetjmp) */
959 sigprocmask(SIG_SETMASK, old_set, NULL);
960 cpu_loop_exit();
961 } else {
962 /* activate soft MMU for this block */
963 cpu_resume_from_signal(env, puc);
965 /* never comes here */
966 return 1;
969 #elif defined(TARGET_M68K)
970 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
971 int is_write, sigset_t *old_set,
972 void *puc)
974 TranslationBlock *tb;
975 int ret;
977 if (cpu_single_env)
978 env = cpu_single_env; /* XXX: find a correct solution for multithread */
979 #if defined(DEBUG_SIGNAL)
980 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
981 pc, address, is_write, *(unsigned long *)old_set);
982 #endif
983 /* XXX: locking issue */
984 if (is_write && page_unprotect(address, pc, puc)) {
985 return 1;
987 /* see if it is an MMU fault */
988 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
989 if (ret < 0)
990 return 0; /* not an MMU fault */
991 if (ret == 0)
992 return 1; /* the MMU fault was handled without causing real CPU fault */
993 /* now we have a real cpu fault */
994 tb = tb_find_pc(pc);
995 if (tb) {
996 /* the PC is inside the translated code. It means that we have
997 a virtual CPU fault */
998 cpu_restore_state(tb, env, pc, puc);
1000 /* we restore the process signal mask as the sigreturn should
1001 do it (XXX: use sigsetjmp) */
1002 sigprocmask(SIG_SETMASK, old_set, NULL);
1003 cpu_loop_exit();
1004 /* never comes here */
1005 return 1;
1008 #elif defined (TARGET_MIPS)
1009 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1010 int is_write, sigset_t *old_set,
1011 void *puc)
1013 TranslationBlock *tb;
1014 int ret;
1016 if (cpu_single_env)
1017 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1018 #if defined(DEBUG_SIGNAL)
1019 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1020 pc, address, is_write, *(unsigned long *)old_set);
1021 #endif
1022 /* XXX: locking issue */
1023 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1024 return 1;
1027 /* see if it is an MMU fault */
1028 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1029 if (ret < 0)
1030 return 0; /* not an MMU fault */
1031 if (ret == 0)
1032 return 1; /* the MMU fault was handled without causing real CPU fault */
1034 /* now we have a real cpu fault */
1035 tb = tb_find_pc(pc);
1036 if (tb) {
1037 /* the PC is inside the translated code. It means that we have
1038 a virtual CPU fault */
1039 cpu_restore_state(tb, env, pc, puc);
1041 if (ret == 1) {
1042 #if 0
1043 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1044 env->PC, env->error_code, tb);
1045 #endif
1046 /* we restore the process signal mask as the sigreturn should
1047 do it (XXX: use sigsetjmp) */
1048 sigprocmask(SIG_SETMASK, old_set, NULL);
1049 cpu_loop_exit();
1050 } else {
1051 /* activate soft MMU for this block */
1052 cpu_resume_from_signal(env, puc);
1054 /* never comes here */
1055 return 1;
1058 #elif defined (TARGET_MICROBLAZE)
1059 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1060 int is_write, sigset_t *old_set,
1061 void *puc)
1063 TranslationBlock *tb;
1064 int ret;
1066 if (cpu_single_env)
1067 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1068 #if defined(DEBUG_SIGNAL)
1069 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1070 pc, address, is_write, *(unsigned long *)old_set);
1071 #endif
1072 /* XXX: locking issue */
1073 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1074 return 1;
1077 /* see if it is an MMU fault */
1078 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1079 if (ret < 0)
1080 return 0; /* not an MMU fault */
1081 if (ret == 0)
1082 return 1; /* the MMU fault was handled without causing real CPU fault */
1084 /* now we have a real cpu fault */
1085 tb = tb_find_pc(pc);
1086 if (tb) {
1087 /* the PC is inside the translated code. It means that we have
1088 a virtual CPU fault */
1089 cpu_restore_state(tb, env, pc, puc);
1091 if (ret == 1) {
1092 #if 0
1093 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1094 env->PC, env->error_code, tb);
1095 #endif
1096 /* we restore the process signal mask as the sigreturn should
1097 do it (XXX: use sigsetjmp) */
1098 sigprocmask(SIG_SETMASK, old_set, NULL);
1099 cpu_loop_exit();
1100 } else {
1101 /* activate soft MMU for this block */
1102 cpu_resume_from_signal(env, puc);
1104 /* never comes here */
1105 return 1;
1108 #elif defined (TARGET_SH4)
1109 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1110 int is_write, sigset_t *old_set,
1111 void *puc)
1113 TranslationBlock *tb;
1114 int ret;
1116 if (cpu_single_env)
1117 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1118 #if defined(DEBUG_SIGNAL)
1119 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1120 pc, address, is_write, *(unsigned long *)old_set);
1121 #endif
1122 /* XXX: locking issue */
1123 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1124 return 1;
1127 /* see if it is an MMU fault */
1128 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1129 if (ret < 0)
1130 return 0; /* not an MMU fault */
1131 if (ret == 0)
1132 return 1; /* the MMU fault was handled without causing real CPU fault */
1134 /* now we have a real cpu fault */
1135 tb = tb_find_pc(pc);
1136 if (tb) {
1137 /* the PC is inside the translated code. It means that we have
1138 a virtual CPU fault */
1139 cpu_restore_state(tb, env, pc, puc);
1141 #if 0
1142 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1143 env->nip, env->error_code, tb);
1144 #endif
1145 /* we restore the process signal mask as the sigreturn should
1146 do it (XXX: use sigsetjmp) */
1147 sigprocmask(SIG_SETMASK, old_set, NULL);
1148 cpu_loop_exit();
1149 /* never comes here */
1150 return 1;
1153 #elif defined (TARGET_ALPHA)
1154 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1155 int is_write, sigset_t *old_set,
1156 void *puc)
1158 TranslationBlock *tb;
1159 int ret;
1161 if (cpu_single_env)
1162 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1163 #if defined(DEBUG_SIGNAL)
1164 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1165 pc, address, is_write, *(unsigned long *)old_set);
1166 #endif
1167 /* XXX: locking issue */
1168 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1169 return 1;
1172 /* see if it is an MMU fault */
1173 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1174 if (ret < 0)
1175 return 0; /* not an MMU fault */
1176 if (ret == 0)
1177 return 1; /* the MMU fault was handled without causing real CPU fault */
1179 /* now we have a real cpu fault */
1180 tb = tb_find_pc(pc);
1181 if (tb) {
1182 /* the PC is inside the translated code. It means that we have
1183 a virtual CPU fault */
1184 cpu_restore_state(tb, env, pc, puc);
1186 #if 0
1187 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1188 env->nip, env->error_code, tb);
1189 #endif
1190 /* we restore the process signal mask as the sigreturn should
1191 do it (XXX: use sigsetjmp) */
1192 sigprocmask(SIG_SETMASK, old_set, NULL);
1193 cpu_loop_exit();
1194 /* never comes here */
1195 return 1;
1197 #elif defined (TARGET_CRIS)
1198 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1199 int is_write, sigset_t *old_set,
1200 void *puc)
1202 TranslationBlock *tb;
1203 int ret;
1205 if (cpu_single_env)
1206 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1207 #if defined(DEBUG_SIGNAL)
1208 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1209 pc, address, is_write, *(unsigned long *)old_set);
1210 #endif
1211 /* XXX: locking issue */
1212 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1213 return 1;
1216 /* see if it is an MMU fault */
1217 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1218 if (ret < 0)
1219 return 0; /* not an MMU fault */
1220 if (ret == 0)
1221 return 1; /* the MMU fault was handled without causing real CPU fault */
1223 /* now we have a real cpu fault */
1224 tb = tb_find_pc(pc);
1225 if (tb) {
1226 /* the PC is inside the translated code. It means that we have
1227 a virtual CPU fault */
1228 cpu_restore_state(tb, env, pc, puc);
1230 /* we restore the process signal mask as the sigreturn should
1231 do it (XXX: use sigsetjmp) */
1232 sigprocmask(SIG_SETMASK, old_set, NULL);
1233 cpu_loop_exit();
1234 /* never comes here */
1235 return 1;
1238 #else
1239 #error unsupported target CPU
1240 #endif
1242 #if defined(__i386__)
1244 #if defined(__APPLE__)
1245 # include <sys/ucontext.h>
1247 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1248 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1249 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1250 # define MASK_sig(context) ((context)->uc_sigmask)
1251 #elif defined(__OpenBSD__)
1252 # define EIP_sig(context) ((context)->sc_eip)
1253 # define TRAP_sig(context) ((context)->sc_trapno)
1254 # define ERROR_sig(context) ((context)->sc_err)
1255 # define MASK_sig(context) ((context)->sc_mask)
1256 #else
1257 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1258 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1259 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1260 # define MASK_sig(context) ((context)->uc_sigmask)
1261 #endif
1263 int cpu_signal_handler(int host_signum, void *pinfo,
1264 void *puc)
1266 siginfo_t *info = pinfo;
1267 #if defined(__OpenBSD__)
1268 struct sigcontext *uc = puc;
1269 #else
1270 struct ucontext *uc = puc;
1271 #endif
1272 unsigned long pc;
1273 int trapno;
1275 #ifndef REG_EIP
1276 /* for glibc 2.1 */
1277 #define REG_EIP EIP
1278 #define REG_ERR ERR
1279 #define REG_TRAPNO TRAPNO
1280 #endif
1281 pc = EIP_sig(uc);
1282 trapno = TRAP_sig(uc);
1283 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1284 trapno == 0xe ?
1285 (ERROR_sig(uc) >> 1) & 1 : 0,
1286 &MASK_sig(uc), puc);
1289 #elif defined(__x86_64__)
1291 #ifdef __NetBSD__
1292 #define PC_sig(context) _UC_MACHINE_PC(context)
1293 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1294 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1295 #define MASK_sig(context) ((context)->uc_sigmask)
1296 #elif defined(__OpenBSD__)
1297 #define PC_sig(context) ((context)->sc_rip)
1298 #define TRAP_sig(context) ((context)->sc_trapno)
1299 #define ERROR_sig(context) ((context)->sc_err)
1300 #define MASK_sig(context) ((context)->sc_mask)
1301 #else
1302 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1303 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1304 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1305 #define MASK_sig(context) ((context)->uc_sigmask)
1306 #endif
1308 int cpu_signal_handler(int host_signum, void *pinfo,
1309 void *puc)
1311 siginfo_t *info = pinfo;
1312 unsigned long pc;
1313 #ifdef __NetBSD__
1314 ucontext_t *uc = puc;
1315 #elif defined(__OpenBSD__)
1316 struct sigcontext *uc = puc;
1317 #else
1318 struct ucontext *uc = puc;
1319 #endif
1321 pc = PC_sig(uc);
1322 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1323 TRAP_sig(uc) == 0xe ?
1324 (ERROR_sig(uc) >> 1) & 1 : 0,
1325 &MASK_sig(uc), puc);
1328 #elif defined(_ARCH_PPC)
1330 /***********************************************************************
1331 * signal context platform-specific definitions
1332 * From Wine
1334 #ifdef linux
1335 /* All Registers access - only for local access */
1336 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1337 /* Gpr Registers access */
1338 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1339 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1340 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1341 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1342 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1343 # define LR_sig(context) REG_sig(link, context) /* Link register */
1344 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1345 /* Float Registers access */
1346 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1347 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1348 /* Exception Registers access */
1349 # define DAR_sig(context) REG_sig(dar, context)
1350 # define DSISR_sig(context) REG_sig(dsisr, context)
1351 # define TRAP_sig(context) REG_sig(trap, context)
1352 #endif /* linux */
1354 #ifdef __APPLE__
1355 # include <sys/ucontext.h>
1356 typedef struct ucontext SIGCONTEXT;
1357 /* All Registers access - only for local access */
1358 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1359 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1360 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1361 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1362 /* Gpr Registers access */
1363 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1364 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1365 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1366 # define CTR_sig(context) REG_sig(ctr, context)
1367 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1368 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1369 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1370 /* Float Registers access */
1371 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1372 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1373 /* Exception Registers access */
1374 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1375 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1376 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1377 #endif /* __APPLE__ */
1379 int cpu_signal_handler(int host_signum, void *pinfo,
1380 void *puc)
1382 siginfo_t *info = pinfo;
1383 struct ucontext *uc = puc;
1384 unsigned long pc;
1385 int is_write;
1387 pc = IAR_sig(uc);
1388 is_write = 0;
1389 #if 0
1390 /* ppc 4xx case */
1391 if (DSISR_sig(uc) & 0x00800000)
1392 is_write = 1;
1393 #else
1394 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1395 is_write = 1;
1396 #endif
1397 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1398 is_write, &uc->uc_sigmask, puc);
1401 #elif defined(__alpha__)
1403 int cpu_signal_handler(int host_signum, void *pinfo,
1404 void *puc)
1406 siginfo_t *info = pinfo;
1407 struct ucontext *uc = puc;
1408 uint32_t *pc = uc->uc_mcontext.sc_pc;
1409 uint32_t insn = *pc;
1410 int is_write = 0;
1412 /* XXX: need kernel patch to get write flag faster */
1413 switch (insn >> 26) {
1414 case 0x0d: // stw
1415 case 0x0e: // stb
1416 case 0x0f: // stq_u
1417 case 0x24: // stf
1418 case 0x25: // stg
1419 case 0x26: // sts
1420 case 0x27: // stt
1421 case 0x2c: // stl
1422 case 0x2d: // stq
1423 case 0x2e: // stl_c
1424 case 0x2f: // stq_c
1425 is_write = 1;
1428 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1429 is_write, &uc->uc_sigmask, puc);
1431 #elif defined(__sparc__)
1433 int cpu_signal_handler(int host_signum, void *pinfo,
1434 void *puc)
1436 siginfo_t *info = pinfo;
1437 int is_write;
1438 uint32_t insn;
1439 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1440 uint32_t *regs = (uint32_t *)(info + 1);
1441 void *sigmask = (regs + 20);
1442 /* XXX: is there a standard glibc define ? */
1443 unsigned long pc = regs[1];
1444 #else
1445 #ifdef __linux__
1446 struct sigcontext *sc = puc;
1447 unsigned long pc = sc->sigc_regs.tpc;
1448 void *sigmask = (void *)sc->sigc_mask;
1449 #elif defined(__OpenBSD__)
1450 struct sigcontext *uc = puc;
1451 unsigned long pc = uc->sc_pc;
1452 void *sigmask = (void *)(long)uc->sc_mask;
1453 #endif
1454 #endif
1456 /* XXX: need kernel patch to get write flag faster */
1457 is_write = 0;
1458 insn = *(uint32_t *)pc;
1459 if ((insn >> 30) == 3) {
1460 switch((insn >> 19) & 0x3f) {
1461 case 0x05: // stb
1462 case 0x15: // stba
1463 case 0x06: // sth
1464 case 0x16: // stha
1465 case 0x04: // st
1466 case 0x14: // sta
1467 case 0x07: // std
1468 case 0x17: // stda
1469 case 0x0e: // stx
1470 case 0x1e: // stxa
1471 case 0x24: // stf
1472 case 0x34: // stfa
1473 case 0x27: // stdf
1474 case 0x37: // stdfa
1475 case 0x26: // stqf
1476 case 0x36: // stqfa
1477 case 0x25: // stfsr
1478 case 0x3c: // casa
1479 case 0x3e: // casxa
1480 is_write = 1;
1481 break;
1484 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1485 is_write, sigmask, NULL);
1488 #elif defined(__arm__)
1490 int cpu_signal_handler(int host_signum, void *pinfo,
1491 void *puc)
1493 siginfo_t *info = pinfo;
1494 struct ucontext *uc = puc;
1495 unsigned long pc;
1496 int is_write;
1498 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1499 pc = uc->uc_mcontext.gregs[R15];
1500 #else
1501 pc = uc->uc_mcontext.arm_pc;
1502 #endif
1503 /* XXX: compute is_write */
1504 is_write = 0;
1505 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1506 is_write,
1507 &uc->uc_sigmask, puc);
1510 #elif defined(__mc68000)
1512 int cpu_signal_handler(int host_signum, void *pinfo,
1513 void *puc)
1515 siginfo_t *info = pinfo;
1516 struct ucontext *uc = puc;
1517 unsigned long pc;
1518 int is_write;
1520 pc = uc->uc_mcontext.gregs[16];
1521 /* XXX: compute is_write */
1522 is_write = 0;
1523 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1524 is_write,
1525 &uc->uc_sigmask, puc);
1528 #elif defined(__ia64)
1530 #ifndef __ISR_VALID
1531 /* This ought to be in <bits/siginfo.h>... */
1532 # define __ISR_VALID 1
1533 #endif
1535 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1537 siginfo_t *info = pinfo;
1538 struct ucontext *uc = puc;
1539 unsigned long ip;
1540 int is_write = 0;
1542 ip = uc->uc_mcontext.sc_ip;
1543 switch (host_signum) {
1544 case SIGILL:
1545 case SIGFPE:
1546 case SIGSEGV:
1547 case SIGBUS:
1548 case SIGTRAP:
1549 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1550 /* ISR.W (write-access) is bit 33: */
1551 is_write = (info->si_isr >> 33) & 1;
1552 break;
1554 default:
1555 break;
1557 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1558 is_write,
1559 &uc->uc_sigmask, puc);
1562 #elif defined(__s390__)
1564 int cpu_signal_handler(int host_signum, void *pinfo,
1565 void *puc)
1567 siginfo_t *info = pinfo;
1568 struct ucontext *uc = puc;
1569 unsigned long pc;
1570 int is_write;
1572 pc = uc->uc_mcontext.psw.addr;
1573 /* XXX: compute is_write */
1574 is_write = 0;
1575 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1576 is_write, &uc->uc_sigmask, puc);
1579 #elif defined(__mips__)
1581 int cpu_signal_handler(int host_signum, void *pinfo,
1582 void *puc)
1584 siginfo_t *info = pinfo;
1585 struct ucontext *uc = puc;
1586 greg_t pc = uc->uc_mcontext.pc;
1587 int is_write;
1589 /* XXX: compute is_write */
1590 is_write = 0;
1591 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1592 is_write, &uc->uc_sigmask, puc);
1595 #elif defined(__hppa__)
1597 int cpu_signal_handler(int host_signum, void *pinfo,
1598 void *puc)
1600 struct siginfo *info = pinfo;
1601 struct ucontext *uc = puc;
1602 unsigned long pc;
1603 int is_write;
1605 pc = uc->uc_mcontext.sc_iaoq[0];
1606 /* FIXME: compute is_write */
1607 is_write = 0;
1608 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1609 is_write,
1610 &uc->uc_sigmask, puc);
1613 #else
1615 #error host CPU specific signal handler needed
1617 #endif
1619 #endif /* !defined(CONFIG_SOFTMMU) */