Merge commit 'a62acdc0cc5308706e2503557a09828979b59a12' into upstream-merge
[qemu-kvm/fedora.git] / cpu-exec.c
blobffa92f1fbd36aff56124edce61484686460d82b2
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #if !defined(TARGET_IA64)
24 #include "tcg.h"
25 #endif
26 #include "kvm.h"
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #ifdef __linux__
40 #include <sys/ucontext.h>
41 #endif
42 #endif
44 #include "qemu-kvm.h"
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
48 #undef env
49 #define env cpu_single_env
50 #endif
52 int tb_invalidated_flag;
54 //#define DEBUG_EXEC
55 //#define DEBUG_SIGNAL
57 int qemu_cpu_has_work(CPUState *env)
59 return cpu_has_work(env);
62 void cpu_loop_exit(void)
64 /* NOTE: the register at this point must be saved by hand because
65 longjmp restore them */
66 regs_to_env();
67 longjmp(env->jmp_env, 1);
70 /* exit the current TB from a signal handler. The host registers are
71 restored in a state compatible with the CPU emulator
73 void cpu_resume_from_signal(CPUState *env1, void *puc)
75 #if !defined(CONFIG_SOFTMMU)
76 #ifdef __linux__
77 struct ucontext *uc = puc;
78 #elif defined(__OpenBSD__)
79 struct sigcontext *uc = puc;
80 #endif
81 #endif
83 env = env1;
85 /* XXX: restore cpu registers saved in host registers */
87 #if !defined(CONFIG_SOFTMMU)
88 if (puc) {
89 /* XXX: use siglongjmp ? */
90 #ifdef __linux__
91 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
92 #elif defined(__OpenBSD__)
93 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
94 #endif
96 #endif
97 env->exception_index = -1;
98 longjmp(env->jmp_env, 1);
101 /* Execute the code without caching the generated code. An interpreter
102 could be used if available. */
103 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
105 unsigned long next_tb;
106 TranslationBlock *tb;
108 /* Should never happen.
109 We only end up here when an existing TB is too long. */
110 if (max_cycles > CF_COUNT_MASK)
111 max_cycles = CF_COUNT_MASK;
113 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
114 max_cycles);
115 env->current_tb = tb;
116 /* execute the generated code */
117 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
119 if ((next_tb & 3) == 2) {
120 /* Restore PC. This may happen if async event occurs before
121 the TB starts executing. */
122 cpu_pc_from_tb(env, tb);
124 tb_phys_invalidate(tb, -1);
125 tb_free(tb);
128 static TranslationBlock *tb_find_slow(target_ulong pc,
129 target_ulong cs_base,
130 uint64_t flags)
132 TranslationBlock *tb, **ptb1;
133 unsigned int h;
134 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
136 tb_invalidated_flag = 0;
138 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
140 /* find translated block using physical mappings */
141 phys_pc = get_phys_addr_code(env, pc);
142 phys_page1 = phys_pc & TARGET_PAGE_MASK;
143 phys_page2 = -1;
144 h = tb_phys_hash_func(phys_pc);
145 ptb1 = &tb_phys_hash[h];
146 for(;;) {
147 tb = *ptb1;
148 if (!tb)
149 goto not_found;
150 if (tb->pc == pc &&
151 tb->page_addr[0] == phys_page1 &&
152 tb->cs_base == cs_base &&
153 tb->flags == flags) {
154 /* check next page if needed */
155 if (tb->page_addr[1] != -1) {
156 virt_page2 = (pc & TARGET_PAGE_MASK) +
157 TARGET_PAGE_SIZE;
158 phys_page2 = get_phys_addr_code(env, virt_page2);
159 if (tb->page_addr[1] == phys_page2)
160 goto found;
161 } else {
162 goto found;
165 ptb1 = &tb->phys_hash_next;
167 not_found:
168 /* if no translated code available, then translate it now */
169 tb = tb_gen_code(env, pc, cs_base, flags, 0);
171 found:
172 /* we add the TB in the virtual pc hash table */
173 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
174 return tb;
177 static inline TranslationBlock *tb_find_fast(void)
179 TranslationBlock *tb;
180 target_ulong cs_base, pc;
181 int flags;
183 /* we record a subset of the CPU state. It will
184 always be the same before a given translated block
185 is executed. */
186 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
187 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
188 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
189 tb->flags != flags)) {
190 tb = tb_find_slow(pc, cs_base, flags);
192 return tb;
195 static CPUDebugExcpHandler *debug_excp_handler;
197 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
199 CPUDebugExcpHandler *old_handler = debug_excp_handler;
201 debug_excp_handler = handler;
202 return old_handler;
205 static void cpu_handle_debug_exception(CPUState *env)
207 CPUWatchpoint *wp;
209 if (!env->watchpoint_hit)
210 TAILQ_FOREACH(wp, &env->watchpoints, entry)
211 wp->flags &= ~BP_WATCHPOINT_HIT;
213 if (debug_excp_handler)
214 debug_excp_handler(env);
217 /* main execution loop */
219 int cpu_exec(CPUState *env1)
221 #define DECLARE_HOST_REGS 1
222 #include "hostregs_helper.h"
223 int ret, interrupt_request;
224 TranslationBlock *tb;
225 uint8_t *tc_ptr;
226 unsigned long next_tb;
228 if (cpu_halted(env1) == EXCP_HALTED)
229 return EXCP_HALTED;
231 cpu_single_env = env1;
233 /* first we save global registers */
234 #define SAVE_HOST_REGS 1
235 #include "hostregs_helper.h"
236 env = env1;
238 env_to_regs();
239 #if defined(TARGET_I386)
240 /* put eflags in CPU temporary format */
241 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
242 DF = 1 - (2 * ((env->eflags >> 10) & 1));
243 CC_OP = CC_OP_EFLAGS;
244 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
245 #elif defined(TARGET_SPARC)
246 #elif defined(TARGET_M68K)
247 env->cc_op = CC_OP_FLAGS;
248 env->cc_dest = env->sr & 0xf;
249 env->cc_x = (env->sr >> 4) & 1;
250 #elif defined(TARGET_ALPHA)
251 #elif defined(TARGET_ARM)
252 #elif defined(TARGET_PPC)
253 #elif defined(TARGET_MICROBLAZE)
254 #elif defined(TARGET_MIPS)
255 #elif defined(TARGET_SH4)
256 #elif defined(TARGET_CRIS)
257 #elif defined(TARGET_IA64)
258 /* XXXXX */
259 #else
260 #error unsupported target CPU
261 #endif
262 env->exception_index = -1;
264 /* prepare setjmp context for exception handling */
265 for(;;) {
266 if (setjmp(env->jmp_env) == 0) {
267 #if defined(__sparc__) && !defined(HOST_SOLARIS)
268 #undef env
269 env = cpu_single_env;
270 #define env cpu_single_env
271 #endif
272 env->current_tb = NULL;
273 /* if an exception is pending, we execute it here */
274 if (env->exception_index >= 0) {
275 if (env->exception_index >= EXCP_INTERRUPT) {
276 /* exit request from the cpu execution loop */
277 ret = env->exception_index;
278 if (ret == EXCP_DEBUG)
279 cpu_handle_debug_exception(env);
280 break;
281 } else {
282 #if defined(CONFIG_USER_ONLY)
283 /* if user mode only, we simulate a fake exception
284 which will be handled outside the cpu execution
285 loop */
286 #if defined(TARGET_I386)
287 do_interrupt_user(env->exception_index,
288 env->exception_is_int,
289 env->error_code,
290 env->exception_next_eip);
291 /* successfully delivered */
292 env->old_exception = -1;
293 #endif
294 ret = env->exception_index;
295 break;
296 #else
297 #if defined(TARGET_I386)
298 /* simulate a real cpu exception. On i386, it can
299 trigger new exceptions, but we do not handle
300 double or triple faults yet. */
301 do_interrupt(env->exception_index,
302 env->exception_is_int,
303 env->error_code,
304 env->exception_next_eip, 0);
305 /* successfully delivered */
306 env->old_exception = -1;
307 #elif defined(TARGET_PPC)
308 do_interrupt(env);
309 #elif defined(TARGET_MICROBLAZE)
310 do_interrupt(env);
311 #elif defined(TARGET_MIPS)
312 do_interrupt(env);
313 #elif defined(TARGET_SPARC)
314 do_interrupt(env);
315 #elif defined(TARGET_ARM)
316 do_interrupt(env);
317 #elif defined(TARGET_SH4)
318 do_interrupt(env);
319 #elif defined(TARGET_ALPHA)
320 do_interrupt(env);
321 #elif defined(TARGET_CRIS)
322 do_interrupt(env);
323 #elif defined(TARGET_M68K)
324 do_interrupt(0);
325 #elif defined(TARGET_IA64)
326 do_interrupt(env);
327 #endif
328 #endif
330 env->exception_index = -1;
332 #ifdef CONFIG_KQEMU
333 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
334 int ret;
335 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
336 ret = kqemu_cpu_exec(env);
337 /* put eflags in CPU temporary format */
338 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
339 DF = 1 - (2 * ((env->eflags >> 10) & 1));
340 CC_OP = CC_OP_EFLAGS;
341 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
342 if (ret == 1) {
343 /* exception */
344 longjmp(env->jmp_env, 1);
345 } else if (ret == 2) {
346 /* softmmu execution needed */
347 } else {
348 if (env->interrupt_request != 0 || env->exit_request != 0) {
349 /* hardware interrupt will be executed just after */
350 } else {
351 /* otherwise, we restart */
352 longjmp(env->jmp_env, 1);
356 #endif
358 if (kvm_enabled()) {
359 kvm_cpu_exec(env);
360 longjmp(env->jmp_env, 1);
363 next_tb = 0; /* force lookup of first TB */
364 for(;;) {
365 interrupt_request = env->interrupt_request;
366 if (unlikely(interrupt_request)) {
367 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
368 /* Mask out external interrupts for this step. */
369 interrupt_request &= ~(CPU_INTERRUPT_HARD |
370 CPU_INTERRUPT_FIQ |
371 CPU_INTERRUPT_SMI |
372 CPU_INTERRUPT_NMI);
374 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
375 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
376 env->exception_index = EXCP_DEBUG;
377 cpu_loop_exit();
379 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
380 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
381 defined(TARGET_MICROBLAZE)
382 if (interrupt_request & CPU_INTERRUPT_HALT) {
383 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
384 env->halted = 1;
385 env->exception_index = EXCP_HLT;
386 cpu_loop_exit();
388 #endif
389 #if defined(TARGET_I386)
390 if (interrupt_request & CPU_INTERRUPT_INIT) {
391 svm_check_intercept(SVM_EXIT_INIT);
392 do_cpu_init(env);
393 env->exception_index = EXCP_HALTED;
394 cpu_loop_exit();
395 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
396 do_cpu_sipi(env);
397 } else if (env->hflags2 & HF2_GIF_MASK) {
398 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
399 !(env->hflags & HF_SMM_MASK)) {
400 svm_check_intercept(SVM_EXIT_SMI);
401 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
402 do_smm_enter();
403 next_tb = 0;
404 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
405 !(env->hflags2 & HF2_NMI_MASK)) {
406 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
407 env->hflags2 |= HF2_NMI_MASK;
408 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
409 next_tb = 0;
410 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
411 (((env->hflags2 & HF2_VINTR_MASK) &&
412 (env->hflags2 & HF2_HIF_MASK)) ||
413 (!(env->hflags2 & HF2_VINTR_MASK) &&
414 (env->eflags & IF_MASK &&
415 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
416 int intno;
417 svm_check_intercept(SVM_EXIT_INTR);
418 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
419 intno = cpu_get_pic_interrupt(env);
420 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
421 #if defined(__sparc__) && !defined(HOST_SOLARIS)
422 #undef env
423 env = cpu_single_env;
424 #define env cpu_single_env
425 #endif
426 do_interrupt(intno, 0, 0, 0, 1);
427 /* ensure that no TB jump will be modified as
428 the program flow was changed */
429 next_tb = 0;
430 #if !defined(CONFIG_USER_ONLY)
431 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
432 (env->eflags & IF_MASK) &&
433 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
434 int intno;
435 /* FIXME: this should respect TPR */
436 svm_check_intercept(SVM_EXIT_VINTR);
437 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
438 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
439 do_interrupt(intno, 0, 0, 0, 1);
440 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
441 next_tb = 0;
442 #endif
445 #elif defined(TARGET_PPC)
446 #if 0
447 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
448 cpu_ppc_reset(env);
450 #endif
451 if (interrupt_request & CPU_INTERRUPT_HARD) {
452 ppc_hw_interrupt(env);
453 if (env->pending_interrupts == 0)
454 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
455 next_tb = 0;
457 #elif defined(TARGET_MICROBLAZE)
458 if ((interrupt_request & CPU_INTERRUPT_HARD)
459 && (env->sregs[SR_MSR] & MSR_IE)
460 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
461 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
462 env->exception_index = EXCP_IRQ;
463 do_interrupt(env);
464 next_tb = 0;
466 #elif defined(TARGET_MIPS)
467 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
468 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
469 (env->CP0_Status & (1 << CP0St_IE)) &&
470 !(env->CP0_Status & (1 << CP0St_EXL)) &&
471 !(env->CP0_Status & (1 << CP0St_ERL)) &&
472 !(env->hflags & MIPS_HFLAG_DM)) {
473 /* Raise it */
474 env->exception_index = EXCP_EXT_INTERRUPT;
475 env->error_code = 0;
476 do_interrupt(env);
477 next_tb = 0;
479 #elif defined(TARGET_SPARC)
480 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
481 (env->psret != 0)) {
482 int pil = env->interrupt_index & 15;
483 int type = env->interrupt_index & 0xf0;
485 if (((type == TT_EXTINT) &&
486 (pil == 15 || pil > env->psrpil)) ||
487 type != TT_EXTINT) {
488 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
489 env->exception_index = env->interrupt_index;
490 do_interrupt(env);
491 env->interrupt_index = 0;
492 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
493 cpu_check_irqs(env);
494 #endif
495 next_tb = 0;
497 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
498 //do_interrupt(0, 0, 0, 0, 0);
499 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
501 #elif defined(TARGET_ARM)
502 if (interrupt_request & CPU_INTERRUPT_FIQ
503 && !(env->uncached_cpsr & CPSR_F)) {
504 env->exception_index = EXCP_FIQ;
505 do_interrupt(env);
506 next_tb = 0;
508 /* ARMv7-M interrupt return works by loading a magic value
509 into the PC. On real hardware the load causes the
510 return to occur. The qemu implementation performs the
511 jump normally, then does the exception return when the
512 CPU tries to execute code at the magic address.
513 This will cause the magic PC value to be pushed to
514 the stack if an interrupt occured at the wrong time.
515 We avoid this by disabling interrupts when
516 pc contains a magic address. */
517 if (interrupt_request & CPU_INTERRUPT_HARD
518 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
519 || !(env->uncached_cpsr & CPSR_I))) {
520 env->exception_index = EXCP_IRQ;
521 do_interrupt(env);
522 next_tb = 0;
524 #elif defined(TARGET_SH4)
525 if (interrupt_request & CPU_INTERRUPT_HARD) {
526 do_interrupt(env);
527 next_tb = 0;
529 #elif defined(TARGET_ALPHA)
530 if (interrupt_request & CPU_INTERRUPT_HARD) {
531 do_interrupt(env);
532 next_tb = 0;
534 #elif defined(TARGET_CRIS)
535 if (interrupt_request & CPU_INTERRUPT_HARD
536 && (env->pregs[PR_CCS] & I_FLAG)) {
537 env->exception_index = EXCP_IRQ;
538 do_interrupt(env);
539 next_tb = 0;
541 if (interrupt_request & CPU_INTERRUPT_NMI
542 && (env->pregs[PR_CCS] & M_FLAG)) {
543 env->exception_index = EXCP_NMI;
544 do_interrupt(env);
545 next_tb = 0;
547 #elif defined(TARGET_M68K)
548 if (interrupt_request & CPU_INTERRUPT_HARD
549 && ((env->sr & SR_I) >> SR_I_SHIFT)
550 < env->pending_level) {
551 /* Real hardware gets the interrupt vector via an
552 IACK cycle at this point. Current emulated
553 hardware doesn't rely on this, so we
554 provide/save the vector when the interrupt is
555 first signalled. */
556 env->exception_index = env->pending_vector;
557 do_interrupt(1);
558 next_tb = 0;
560 #endif
561 /* Don't use the cached interupt_request value,
562 do_interrupt may have updated the EXITTB flag. */
563 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
564 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
565 /* ensure that no TB jump will be modified as
566 the program flow was changed */
567 next_tb = 0;
570 if (unlikely(env->exit_request)) {
571 env->exit_request = 0;
572 env->exception_index = EXCP_INTERRUPT;
573 cpu_loop_exit();
575 #ifdef DEBUG_EXEC
576 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
577 /* restore flags in standard format */
578 regs_to_env();
579 #if defined(TARGET_I386)
580 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
581 log_cpu_state(env, X86_DUMP_CCOP);
582 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
583 #elif defined(TARGET_ARM)
584 log_cpu_state(env, 0);
585 #elif defined(TARGET_SPARC)
586 log_cpu_state(env, 0);
587 #elif defined(TARGET_PPC)
588 log_cpu_state(env, 0);
589 #elif defined(TARGET_M68K)
590 cpu_m68k_flush_flags(env, env->cc_op);
591 env->cc_op = CC_OP_FLAGS;
592 env->sr = (env->sr & 0xffe0)
593 | env->cc_dest | (env->cc_x << 4);
594 log_cpu_state(env, 0);
595 #elif defined(TARGET_MICROBLAZE)
596 log_cpu_state(env, 0);
597 #elif defined(TARGET_MIPS)
598 log_cpu_state(env, 0);
599 #elif defined(TARGET_SH4)
600 log_cpu_state(env, 0);
601 #elif defined(TARGET_ALPHA)
602 log_cpu_state(env, 0);
603 #elif defined(TARGET_CRIS)
604 log_cpu_state(env, 0);
605 #else
606 #error unsupported target CPU
607 #endif
609 #endif
610 spin_lock(&tb_lock);
611 tb = tb_find_fast();
612 /* Note: we do it here to avoid a gcc bug on Mac OS X when
613 doing it in tb_find_slow */
614 if (tb_invalidated_flag) {
615 /* as some TB could have been invalidated because
616 of memory exceptions while generating the code, we
617 must recompute the hash index here */
618 next_tb = 0;
619 tb_invalidated_flag = 0;
621 #ifdef DEBUG_EXEC
622 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
623 (long)tb->tc_ptr, tb->pc,
624 lookup_symbol(tb->pc));
625 #endif
626 /* see if we can patch the calling TB. When the TB
627 spans two pages, we cannot safely do a direct
628 jump. */
630 if (next_tb != 0 &&
631 #ifdef CONFIG_KQEMU
632 (env->kqemu_enabled != 2) &&
633 #endif
634 tb->page_addr[1] == -1) {
635 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
638 spin_unlock(&tb_lock);
639 env->current_tb = tb;
641 /* cpu_interrupt might be called while translating the
642 TB, but before it is linked into a potentially
643 infinite loop and becomes env->current_tb. Avoid
644 starting execution if there is a pending interrupt. */
645 if (unlikely (env->exit_request))
646 env->current_tb = NULL;
648 while (env->current_tb) {
649 tc_ptr = tb->tc_ptr;
650 /* execute the generated code */
651 #if defined(__sparc__) && !defined(HOST_SOLARIS)
652 #undef env
653 env = cpu_single_env;
654 #define env cpu_single_env
655 #endif
656 next_tb = tcg_qemu_tb_exec(tc_ptr);
657 env->current_tb = NULL;
658 if ((next_tb & 3) == 2) {
659 /* Instruction counter expired. */
660 int insns_left;
661 tb = (TranslationBlock *)(long)(next_tb & ~3);
662 /* Restore PC. */
663 cpu_pc_from_tb(env, tb);
664 insns_left = env->icount_decr.u32;
665 if (env->icount_extra && insns_left >= 0) {
666 /* Refill decrementer and continue execution. */
667 env->icount_extra += insns_left;
668 if (env->icount_extra > 0xffff) {
669 insns_left = 0xffff;
670 } else {
671 insns_left = env->icount_extra;
673 env->icount_extra -= insns_left;
674 env->icount_decr.u16.low = insns_left;
675 } else {
676 if (insns_left > 0) {
677 /* Execute remaining instructions. */
678 cpu_exec_nocache(insns_left, tb);
680 env->exception_index = EXCP_INTERRUPT;
681 next_tb = 0;
682 cpu_loop_exit();
686 /* reset soft MMU for next block (it can currently
687 only be set by a memory fault) */
688 #if defined(CONFIG_KQEMU)
689 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
690 if (kqemu_is_ok(env) &&
691 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
692 cpu_loop_exit();
694 #endif
695 } /* for(;;) */
696 } else {
697 env_to_regs();
699 } /* for(;;) */
702 #if defined(TARGET_I386)
703 /* restore flags in standard format */
704 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
705 #elif defined(TARGET_ARM)
706 /* XXX: Save/restore host fpu exception state?. */
707 #elif defined(TARGET_SPARC)
708 #elif defined(TARGET_PPC)
709 #elif defined(TARGET_M68K)
710 cpu_m68k_flush_flags(env, env->cc_op);
711 env->cc_op = CC_OP_FLAGS;
712 env->sr = (env->sr & 0xffe0)
713 | env->cc_dest | (env->cc_x << 4);
714 #elif defined(TARGET_MICROBLAZE)
715 #elif defined(TARGET_MIPS)
716 #elif defined(TARGET_SH4)
717 #elif defined(TARGET_IA64)
718 #elif defined(TARGET_ALPHA)
719 #elif defined(TARGET_CRIS)
720 /* XXXXX */
721 #else
722 #error unsupported target CPU
723 #endif
725 /* restore global registers */
726 #include "hostregs_helper.h"
728 /* fail safe : never use cpu_single_env outside cpu_exec() */
729 cpu_single_env = NULL;
730 return ret;
733 /* must only be called from the generated code as an exception can be
734 generated */
735 void tb_invalidate_page_range(target_ulong start, target_ulong end)
737 /* XXX: cannot enable it yet because it yields to MMU exception
738 where NIP != read address on PowerPC */
739 #if 0
740 target_ulong phys_addr;
741 phys_addr = get_phys_addr_code(env, start);
742 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
743 #endif
746 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
748 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
750 CPUX86State *saved_env;
752 saved_env = env;
753 env = s;
754 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
755 selector &= 0xffff;
756 cpu_x86_load_seg_cache(env, seg_reg, selector,
757 (selector << 4), 0xffff, 0);
758 } else {
759 helper_load_seg(seg_reg, selector);
761 env = saved_env;
764 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
766 CPUX86State *saved_env;
768 saved_env = env;
769 env = s;
771 helper_fsave(ptr, data32);
773 env = saved_env;
776 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
778 CPUX86State *saved_env;
780 saved_env = env;
781 env = s;
783 helper_frstor(ptr, data32);
785 env = saved_env;
788 #endif /* TARGET_I386 */
790 #if !defined(CONFIG_SOFTMMU)
792 #if defined(TARGET_I386)
794 /* 'pc' is the host PC at which the exception was raised. 'address' is
795 the effective address of the memory exception. 'is_write' is 1 if a
796 write caused the exception and otherwise 0'. 'old_set' is the
797 signal set which should be restored */
798 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
799 int is_write, sigset_t *old_set,
800 void *puc)
802 TranslationBlock *tb;
803 int ret;
805 if (cpu_single_env)
806 env = cpu_single_env; /* XXX: find a correct solution for multithread */
807 #if defined(DEBUG_SIGNAL)
808 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
809 pc, address, is_write, *(unsigned long *)old_set);
810 #endif
811 /* XXX: locking issue */
812 if (is_write && page_unprotect(h2g(address), pc, puc)) {
813 return 1;
816 /* see if it is an MMU fault */
817 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
818 if (ret < 0)
819 return 0; /* not an MMU fault */
820 if (ret == 0)
821 return 1; /* the MMU fault was handled without causing real CPU fault */
822 /* now we have a real cpu fault */
823 tb = tb_find_pc(pc);
824 if (tb) {
825 /* the PC is inside the translated code. It means that we have
826 a virtual CPU fault */
827 cpu_restore_state(tb, env, pc, puc);
829 if (ret == 1) {
830 #if 0
831 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
832 env->eip, env->cr[2], env->error_code);
833 #endif
834 /* we restore the process signal mask as the sigreturn should
835 do it (XXX: use sigsetjmp) */
836 sigprocmask(SIG_SETMASK, old_set, NULL);
837 raise_exception_err(env->exception_index, env->error_code);
838 } else {
839 /* activate soft MMU for this block */
840 env->hflags |= HF_SOFTMMU_MASK;
841 cpu_resume_from_signal(env, puc);
843 /* never comes here */
844 return 1;
847 #elif defined(TARGET_ARM)
848 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
849 int is_write, sigset_t *old_set,
850 void *puc)
852 TranslationBlock *tb;
853 int ret;
855 if (cpu_single_env)
856 env = cpu_single_env; /* XXX: find a correct solution for multithread */
857 #if defined(DEBUG_SIGNAL)
858 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
859 pc, address, is_write, *(unsigned long *)old_set);
860 #endif
861 /* XXX: locking issue */
862 if (is_write && page_unprotect(h2g(address), pc, puc)) {
863 return 1;
865 /* see if it is an MMU fault */
866 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
867 if (ret < 0)
868 return 0; /* not an MMU fault */
869 if (ret == 0)
870 return 1; /* the MMU fault was handled without causing real CPU fault */
871 /* now we have a real cpu fault */
872 tb = tb_find_pc(pc);
873 if (tb) {
874 /* the PC is inside the translated code. It means that we have
875 a virtual CPU fault */
876 cpu_restore_state(tb, env, pc, puc);
878 /* we restore the process signal mask as the sigreturn should
879 do it (XXX: use sigsetjmp) */
880 sigprocmask(SIG_SETMASK, old_set, NULL);
881 cpu_loop_exit();
882 /* never comes here */
883 return 1;
885 #elif defined(TARGET_SPARC)
886 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
887 int is_write, sigset_t *old_set,
888 void *puc)
890 TranslationBlock *tb;
891 int ret;
893 if (cpu_single_env)
894 env = cpu_single_env; /* XXX: find a correct solution for multithread */
895 #if defined(DEBUG_SIGNAL)
896 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
897 pc, address, is_write, *(unsigned long *)old_set);
898 #endif
899 /* XXX: locking issue */
900 if (is_write && page_unprotect(h2g(address), pc, puc)) {
901 return 1;
903 /* see if it is an MMU fault */
904 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
905 if (ret < 0)
906 return 0; /* not an MMU fault */
907 if (ret == 0)
908 return 1; /* the MMU fault was handled without causing real CPU fault */
909 /* now we have a real cpu fault */
910 tb = tb_find_pc(pc);
911 if (tb) {
912 /* the PC is inside the translated code. It means that we have
913 a virtual CPU fault */
914 cpu_restore_state(tb, env, pc, puc);
916 /* we restore the process signal mask as the sigreturn should
917 do it (XXX: use sigsetjmp) */
918 sigprocmask(SIG_SETMASK, old_set, NULL);
919 cpu_loop_exit();
920 /* never comes here */
921 return 1;
923 #elif defined (TARGET_PPC)
924 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
925 int is_write, sigset_t *old_set,
926 void *puc)
928 TranslationBlock *tb;
929 int ret;
931 if (cpu_single_env)
932 env = cpu_single_env; /* XXX: find a correct solution for multithread */
933 #if defined(DEBUG_SIGNAL)
934 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
935 pc, address, is_write, *(unsigned long *)old_set);
936 #endif
937 /* XXX: locking issue */
938 if (is_write && page_unprotect(h2g(address), pc, puc)) {
939 return 1;
942 /* see if it is an MMU fault */
943 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
944 if (ret < 0)
945 return 0; /* not an MMU fault */
946 if (ret == 0)
947 return 1; /* the MMU fault was handled without causing real CPU fault */
949 /* now we have a real cpu fault */
950 tb = tb_find_pc(pc);
951 if (tb) {
952 /* the PC is inside the translated code. It means that we have
953 a virtual CPU fault */
954 cpu_restore_state(tb, env, pc, puc);
956 if (ret == 1) {
957 #if 0
958 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
959 env->nip, env->error_code, tb);
960 #endif
961 /* we restore the process signal mask as the sigreturn should
962 do it (XXX: use sigsetjmp) */
963 sigprocmask(SIG_SETMASK, old_set, NULL);
964 cpu_loop_exit();
965 } else {
966 /* activate soft MMU for this block */
967 cpu_resume_from_signal(env, puc);
969 /* never comes here */
970 return 1;
973 #elif defined(TARGET_M68K)
974 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
975 int is_write, sigset_t *old_set,
976 void *puc)
978 TranslationBlock *tb;
979 int ret;
981 if (cpu_single_env)
982 env = cpu_single_env; /* XXX: find a correct solution for multithread */
983 #if defined(DEBUG_SIGNAL)
984 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
985 pc, address, is_write, *(unsigned long *)old_set);
986 #endif
987 /* XXX: locking issue */
988 if (is_write && page_unprotect(address, pc, puc)) {
989 return 1;
991 /* see if it is an MMU fault */
992 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
993 if (ret < 0)
994 return 0; /* not an MMU fault */
995 if (ret == 0)
996 return 1; /* the MMU fault was handled without causing real CPU fault */
997 /* now we have a real cpu fault */
998 tb = tb_find_pc(pc);
999 if (tb) {
1000 /* the PC is inside the translated code. It means that we have
1001 a virtual CPU fault */
1002 cpu_restore_state(tb, env, pc, puc);
1004 /* we restore the process signal mask as the sigreturn should
1005 do it (XXX: use sigsetjmp) */
1006 sigprocmask(SIG_SETMASK, old_set, NULL);
1007 cpu_loop_exit();
1008 /* never comes here */
1009 return 1;
1012 #elif defined (TARGET_MIPS)
1013 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1014 int is_write, sigset_t *old_set,
1015 void *puc)
1017 TranslationBlock *tb;
1018 int ret;
1020 if (cpu_single_env)
1021 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1022 #if defined(DEBUG_SIGNAL)
1023 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1024 pc, address, is_write, *(unsigned long *)old_set);
1025 #endif
1026 /* XXX: locking issue */
1027 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1028 return 1;
1031 /* see if it is an MMU fault */
1032 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1033 if (ret < 0)
1034 return 0; /* not an MMU fault */
1035 if (ret == 0)
1036 return 1; /* the MMU fault was handled without causing real CPU fault */
1038 /* now we have a real cpu fault */
1039 tb = tb_find_pc(pc);
1040 if (tb) {
1041 /* the PC is inside the translated code. It means that we have
1042 a virtual CPU fault */
1043 cpu_restore_state(tb, env, pc, puc);
1045 if (ret == 1) {
1046 #if 0
1047 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1048 env->PC, env->error_code, tb);
1049 #endif
1050 /* we restore the process signal mask as the sigreturn should
1051 do it (XXX: use sigsetjmp) */
1052 sigprocmask(SIG_SETMASK, old_set, NULL);
1053 cpu_loop_exit();
1054 } else {
1055 /* activate soft MMU for this block */
1056 cpu_resume_from_signal(env, puc);
1058 /* never comes here */
1059 return 1;
1062 #elif defined (TARGET_MICROBLAZE)
1063 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1064 int is_write, sigset_t *old_set,
1065 void *puc)
1067 TranslationBlock *tb;
1068 int ret;
1070 if (cpu_single_env)
1071 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1072 #if defined(DEBUG_SIGNAL)
1073 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1074 pc, address, is_write, *(unsigned long *)old_set);
1075 #endif
1076 /* XXX: locking issue */
1077 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1078 return 1;
1081 /* see if it is an MMU fault */
1082 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1083 if (ret < 0)
1084 return 0; /* not an MMU fault */
1085 if (ret == 0)
1086 return 1; /* the MMU fault was handled without causing real CPU fault */
1088 /* now we have a real cpu fault */
1089 tb = tb_find_pc(pc);
1090 if (tb) {
1091 /* the PC is inside the translated code. It means that we have
1092 a virtual CPU fault */
1093 cpu_restore_state(tb, env, pc, puc);
1095 if (ret == 1) {
1096 #if 0
1097 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1098 env->PC, env->error_code, tb);
1099 #endif
1100 /* we restore the process signal mask as the sigreturn should
1101 do it (XXX: use sigsetjmp) */
1102 sigprocmask(SIG_SETMASK, old_set, NULL);
1103 cpu_loop_exit();
1104 } else {
1105 /* activate soft MMU for this block */
1106 cpu_resume_from_signal(env, puc);
1108 /* never comes here */
1109 return 1;
1112 #elif defined (TARGET_SH4)
1113 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1114 int is_write, sigset_t *old_set,
1115 void *puc)
1117 TranslationBlock *tb;
1118 int ret;
1120 if (cpu_single_env)
1121 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1122 #if defined(DEBUG_SIGNAL)
1123 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1124 pc, address, is_write, *(unsigned long *)old_set);
1125 #endif
1126 /* XXX: locking issue */
1127 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1128 return 1;
1131 /* see if it is an MMU fault */
1132 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1133 if (ret < 0)
1134 return 0; /* not an MMU fault */
1135 if (ret == 0)
1136 return 1; /* the MMU fault was handled without causing real CPU fault */
1138 /* now we have a real cpu fault */
1139 tb = tb_find_pc(pc);
1140 if (tb) {
1141 /* the PC is inside the translated code. It means that we have
1142 a virtual CPU fault */
1143 cpu_restore_state(tb, env, pc, puc);
1145 #if 0
1146 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1147 env->nip, env->error_code, tb);
1148 #endif
1149 /* we restore the process signal mask as the sigreturn should
1150 do it (XXX: use sigsetjmp) */
1151 sigprocmask(SIG_SETMASK, old_set, NULL);
1152 cpu_loop_exit();
1153 /* never comes here */
1154 return 1;
1157 #elif defined (TARGET_ALPHA)
1158 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1159 int is_write, sigset_t *old_set,
1160 void *puc)
1162 TranslationBlock *tb;
1163 int ret;
1165 if (cpu_single_env)
1166 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1167 #if defined(DEBUG_SIGNAL)
1168 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1169 pc, address, is_write, *(unsigned long *)old_set);
1170 #endif
1171 /* XXX: locking issue */
1172 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1173 return 1;
1176 /* see if it is an MMU fault */
1177 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1178 if (ret < 0)
1179 return 0; /* not an MMU fault */
1180 if (ret == 0)
1181 return 1; /* the MMU fault was handled without causing real CPU fault */
1183 /* now we have a real cpu fault */
1184 tb = tb_find_pc(pc);
1185 if (tb) {
1186 /* the PC is inside the translated code. It means that we have
1187 a virtual CPU fault */
1188 cpu_restore_state(tb, env, pc, puc);
1190 #if 0
1191 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1192 env->nip, env->error_code, tb);
1193 #endif
1194 /* we restore the process signal mask as the sigreturn should
1195 do it (XXX: use sigsetjmp) */
1196 sigprocmask(SIG_SETMASK, old_set, NULL);
1197 cpu_loop_exit();
1198 /* never comes here */
1199 return 1;
1201 #elif defined (TARGET_CRIS)
1202 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1203 int is_write, sigset_t *old_set,
1204 void *puc)
1206 TranslationBlock *tb;
1207 int ret;
1209 if (cpu_single_env)
1210 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1211 #if defined(DEBUG_SIGNAL)
1212 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1213 pc, address, is_write, *(unsigned long *)old_set);
1214 #endif
1215 /* XXX: locking issue */
1216 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1217 return 1;
1220 /* see if it is an MMU fault */
1221 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1222 if (ret < 0)
1223 return 0; /* not an MMU fault */
1224 if (ret == 0)
1225 return 1; /* the MMU fault was handled without causing real CPU fault */
1227 /* now we have a real cpu fault */
1228 tb = tb_find_pc(pc);
1229 if (tb) {
1230 /* the PC is inside the translated code. It means that we have
1231 a virtual CPU fault */
1232 cpu_restore_state(tb, env, pc, puc);
1234 /* we restore the process signal mask as the sigreturn should
1235 do it (XXX: use sigsetjmp) */
1236 sigprocmask(SIG_SETMASK, old_set, NULL);
1237 cpu_loop_exit();
1238 /* never comes here */
1239 return 1;
1242 #else
1243 #error unsupported target CPU
1244 #endif
1246 #if defined(__i386__)
1248 #if defined(__APPLE__)
1249 # include <sys/ucontext.h>
1251 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1252 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1253 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1254 # define MASK_sig(context) ((context)->uc_sigmask)
1255 #elif defined(__OpenBSD__)
1256 # define EIP_sig(context) ((context)->sc_eip)
1257 # define TRAP_sig(context) ((context)->sc_trapno)
1258 # define ERROR_sig(context) ((context)->sc_err)
1259 # define MASK_sig(context) ((context)->sc_mask)
1260 #else
1261 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1262 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1263 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1264 # define MASK_sig(context) ((context)->uc_sigmask)
1265 #endif
1267 int cpu_signal_handler(int host_signum, void *pinfo,
1268 void *puc)
1270 siginfo_t *info = pinfo;
1271 #if defined(__OpenBSD__)
1272 struct sigcontext *uc = puc;
1273 #else
1274 struct ucontext *uc = puc;
1275 #endif
1276 unsigned long pc;
1277 int trapno;
1279 #ifndef REG_EIP
1280 /* for glibc 2.1 */
1281 #define REG_EIP EIP
1282 #define REG_ERR ERR
1283 #define REG_TRAPNO TRAPNO
1284 #endif
1285 pc = EIP_sig(uc);
1286 trapno = TRAP_sig(uc);
1287 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1288 trapno == 0xe ?
1289 (ERROR_sig(uc) >> 1) & 1 : 0,
1290 &MASK_sig(uc), puc);
1293 #elif defined(__x86_64__)
1295 #ifdef __NetBSD__
1296 #define PC_sig(context) _UC_MACHINE_PC(context)
1297 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1298 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1299 #define MASK_sig(context) ((context)->uc_sigmask)
1300 #elif defined(__OpenBSD__)
1301 #define PC_sig(context) ((context)->sc_rip)
1302 #define TRAP_sig(context) ((context)->sc_trapno)
1303 #define ERROR_sig(context) ((context)->sc_err)
1304 #define MASK_sig(context) ((context)->sc_mask)
1305 #else
1306 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1307 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1308 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1309 #define MASK_sig(context) ((context)->uc_sigmask)
1310 #endif
1312 int cpu_signal_handler(int host_signum, void *pinfo,
1313 void *puc)
1315 siginfo_t *info = pinfo;
1316 unsigned long pc;
1317 #ifdef __NetBSD__
1318 ucontext_t *uc = puc;
1319 #elif defined(__OpenBSD__)
1320 struct sigcontext *uc = puc;
1321 #else
1322 struct ucontext *uc = puc;
1323 #endif
1325 pc = PC_sig(uc);
1326 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1327 TRAP_sig(uc) == 0xe ?
1328 (ERROR_sig(uc) >> 1) & 1 : 0,
1329 &MASK_sig(uc), puc);
1332 #elif defined(_ARCH_PPC)
1334 /***********************************************************************
1335 * signal context platform-specific definitions
1336 * From Wine
1338 #ifdef linux
1339 /* All Registers access - only for local access */
1340 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1341 /* Gpr Registers access */
1342 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1343 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1344 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1345 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1346 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1347 # define LR_sig(context) REG_sig(link, context) /* Link register */
1348 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1349 /* Float Registers access */
1350 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1351 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1352 /* Exception Registers access */
1353 # define DAR_sig(context) REG_sig(dar, context)
1354 # define DSISR_sig(context) REG_sig(dsisr, context)
1355 # define TRAP_sig(context) REG_sig(trap, context)
1356 #endif /* linux */
1358 #ifdef __APPLE__
1359 # include <sys/ucontext.h>
1360 typedef struct ucontext SIGCONTEXT;
1361 /* All Registers access - only for local access */
1362 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1363 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1364 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1365 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1366 /* Gpr Registers access */
1367 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1368 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1369 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1370 # define CTR_sig(context) REG_sig(ctr, context)
1371 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1372 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1373 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1374 /* Float Registers access */
1375 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1376 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1377 /* Exception Registers access */
1378 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1379 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1380 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1381 #endif /* __APPLE__ */
1383 int cpu_signal_handler(int host_signum, void *pinfo,
1384 void *puc)
1386 siginfo_t *info = pinfo;
1387 struct ucontext *uc = puc;
1388 unsigned long pc;
1389 int is_write;
1391 pc = IAR_sig(uc);
1392 is_write = 0;
1393 #if 0
1394 /* ppc 4xx case */
1395 if (DSISR_sig(uc) & 0x00800000)
1396 is_write = 1;
1397 #else
1398 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1399 is_write = 1;
1400 #endif
1401 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1402 is_write, &uc->uc_sigmask, puc);
1405 #elif defined(__alpha__)
1407 int cpu_signal_handler(int host_signum, void *pinfo,
1408 void *puc)
1410 siginfo_t *info = pinfo;
1411 struct ucontext *uc = puc;
1412 uint32_t *pc = uc->uc_mcontext.sc_pc;
1413 uint32_t insn = *pc;
1414 int is_write = 0;
1416 /* XXX: need kernel patch to get write flag faster */
1417 switch (insn >> 26) {
1418 case 0x0d: // stw
1419 case 0x0e: // stb
1420 case 0x0f: // stq_u
1421 case 0x24: // stf
1422 case 0x25: // stg
1423 case 0x26: // sts
1424 case 0x27: // stt
1425 case 0x2c: // stl
1426 case 0x2d: // stq
1427 case 0x2e: // stl_c
1428 case 0x2f: // stq_c
1429 is_write = 1;
1432 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1433 is_write, &uc->uc_sigmask, puc);
1435 #elif defined(__sparc__)
1437 int cpu_signal_handler(int host_signum, void *pinfo,
1438 void *puc)
1440 siginfo_t *info = pinfo;
1441 int is_write;
1442 uint32_t insn;
1443 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1444 uint32_t *regs = (uint32_t *)(info + 1);
1445 void *sigmask = (regs + 20);
1446 /* XXX: is there a standard glibc define ? */
1447 unsigned long pc = regs[1];
1448 #else
1449 #ifdef __linux__
1450 struct sigcontext *sc = puc;
1451 unsigned long pc = sc->sigc_regs.tpc;
1452 void *sigmask = (void *)sc->sigc_mask;
1453 #elif defined(__OpenBSD__)
1454 struct sigcontext *uc = puc;
1455 unsigned long pc = uc->sc_pc;
1456 void *sigmask = (void *)(long)uc->sc_mask;
1457 #endif
1458 #endif
1460 /* XXX: need kernel patch to get write flag faster */
1461 is_write = 0;
1462 insn = *(uint32_t *)pc;
1463 if ((insn >> 30) == 3) {
1464 switch((insn >> 19) & 0x3f) {
1465 case 0x05: // stb
1466 case 0x15: // stba
1467 case 0x06: // sth
1468 case 0x16: // stha
1469 case 0x04: // st
1470 case 0x14: // sta
1471 case 0x07: // std
1472 case 0x17: // stda
1473 case 0x0e: // stx
1474 case 0x1e: // stxa
1475 case 0x24: // stf
1476 case 0x34: // stfa
1477 case 0x27: // stdf
1478 case 0x37: // stdfa
1479 case 0x26: // stqf
1480 case 0x36: // stqfa
1481 case 0x25: // stfsr
1482 case 0x3c: // casa
1483 case 0x3e: // casxa
1484 is_write = 1;
1485 break;
1488 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1489 is_write, sigmask, NULL);
1492 #elif defined(__arm__)
1494 int cpu_signal_handler(int host_signum, void *pinfo,
1495 void *puc)
1497 siginfo_t *info = pinfo;
1498 struct ucontext *uc = puc;
1499 unsigned long pc;
1500 int is_write;
1502 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1503 pc = uc->uc_mcontext.gregs[R15];
1504 #else
1505 pc = uc->uc_mcontext.arm_pc;
1506 #endif
1507 /* XXX: compute is_write */
1508 is_write = 0;
1509 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1510 is_write,
1511 &uc->uc_sigmask, puc);
1514 #elif defined(__mc68000)
1516 int cpu_signal_handler(int host_signum, void *pinfo,
1517 void *puc)
1519 siginfo_t *info = pinfo;
1520 struct ucontext *uc = puc;
1521 unsigned long pc;
1522 int is_write;
1524 pc = uc->uc_mcontext.gregs[16];
1525 /* XXX: compute is_write */
1526 is_write = 0;
1527 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1528 is_write,
1529 &uc->uc_sigmask, puc);
1532 #elif defined(__ia64)
1534 #ifndef __ISR_VALID
1535 /* This ought to be in <bits/siginfo.h>... */
1536 # define __ISR_VALID 1
1537 #endif
1539 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1541 siginfo_t *info = pinfo;
1542 struct ucontext *uc = puc;
1543 unsigned long ip;
1544 int is_write = 0;
1546 ip = uc->uc_mcontext.sc_ip;
1547 switch (host_signum) {
1548 case SIGILL:
1549 case SIGFPE:
1550 case SIGSEGV:
1551 case SIGBUS:
1552 case SIGTRAP:
1553 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1554 /* ISR.W (write-access) is bit 33: */
1555 is_write = (info->si_isr >> 33) & 1;
1556 break;
1558 default:
1559 break;
1561 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1562 is_write,
1563 &uc->uc_sigmask, puc);
1566 #elif defined(__s390__)
1568 int cpu_signal_handler(int host_signum, void *pinfo,
1569 void *puc)
1571 siginfo_t *info = pinfo;
1572 struct ucontext *uc = puc;
1573 unsigned long pc;
1574 int is_write;
1576 pc = uc->uc_mcontext.psw.addr;
1577 /* XXX: compute is_write */
1578 is_write = 0;
1579 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1580 is_write, &uc->uc_sigmask, puc);
1583 #elif defined(__mips__)
1585 int cpu_signal_handler(int host_signum, void *pinfo,
1586 void *puc)
1588 siginfo_t *info = pinfo;
1589 struct ucontext *uc = puc;
1590 greg_t pc = uc->uc_mcontext.pc;
1591 int is_write;
1593 /* XXX: compute is_write */
1594 is_write = 0;
1595 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1596 is_write, &uc->uc_sigmask, puc);
1599 #elif defined(__hppa__)
1601 int cpu_signal_handler(int host_signum, void *pinfo,
1602 void *puc)
1604 struct siginfo *info = pinfo;
1605 struct ucontext *uc = puc;
1606 unsigned long pc;
1607 int is_write;
1609 pc = uc->uc_mcontext.sc_iaoq[0];
1610 /* FIXME: compute is_write */
1611 is_write = 0;
1612 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1613 is_write,
1614 &uc->uc_sigmask, puc);
1617 #else
1619 #error host CPU specific signal handler needed
1621 #endif
1623 #endif /* !defined(CONFIG_SOFTMMU) */