vmware_vga: clean up
[qemu-kvm/fedora.git] / cpu-exec.c
blobdb5cb5730e2b33c120f10f9e8a2d87ca868981bf
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include "tcg.h"
24 #include "kvm.h"
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
48 int tb_invalidated_flag;
50 //#define DEBUG_EXEC
51 //#define DEBUG_SIGNAL
53 int qemu_cpu_has_work(CPUState *env)
55 return cpu_has_work(env);
58 void cpu_loop_exit(void)
60 /* NOTE: the register at this point must be saved by hand because
61 longjmp restore them */
62 regs_to_env();
63 longjmp(env->jmp_env, 1);
66 /* exit the current TB from a signal handler. The host registers are
67 restored in a state compatible with the CPU emulator
69 void cpu_resume_from_signal(CPUState *env1, void *puc)
71 #if !defined(CONFIG_SOFTMMU)
72 #ifdef __linux__
73 struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75 struct sigcontext *uc = puc;
76 #endif
77 #endif
79 env = env1;
81 /* XXX: restore cpu registers saved in host registers */
83 #if !defined(CONFIG_SOFTMMU)
84 if (puc) {
85 /* XXX: use siglongjmp ? */
86 #ifdef __linux__
87 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90 #endif
92 #endif
93 env->exception_index = -1;
94 longjmp(env->jmp_env, 1);
97 /* Execute the code without caching the generated code. An interpreter
98 could be used if available. */
99 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
101 unsigned long next_tb;
102 TranslationBlock *tb;
104 /* Should never happen.
105 We only end up here when an existing TB is too long. */
106 if (max_cycles > CF_COUNT_MASK)
107 max_cycles = CF_COUNT_MASK;
109 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110 max_cycles);
111 env->current_tb = tb;
112 /* execute the generated code */
113 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
115 if ((next_tb & 3) == 2) {
116 /* Restore PC. This may happen if async event occurs before
117 the TB starts executing. */
118 cpu_pc_from_tb(env, tb);
120 tb_phys_invalidate(tb, -1);
121 tb_free(tb);
124 static TranslationBlock *tb_find_slow(target_ulong pc,
125 target_ulong cs_base,
126 uint64_t flags)
128 TranslationBlock *tb, **ptb1;
129 unsigned int h;
130 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
132 tb_invalidated_flag = 0;
134 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
136 /* find translated block using physical mappings */
137 phys_pc = get_phys_addr_code(env, pc);
138 phys_page1 = phys_pc & TARGET_PAGE_MASK;
139 phys_page2 = -1;
140 h = tb_phys_hash_func(phys_pc);
141 ptb1 = &tb_phys_hash[h];
142 for(;;) {
143 tb = *ptb1;
144 if (!tb)
145 goto not_found;
146 if (tb->pc == pc &&
147 tb->page_addr[0] == phys_page1 &&
148 tb->cs_base == cs_base &&
149 tb->flags == flags) {
150 /* check next page if needed */
151 if (tb->page_addr[1] != -1) {
152 virt_page2 = (pc & TARGET_PAGE_MASK) +
153 TARGET_PAGE_SIZE;
154 phys_page2 = get_phys_addr_code(env, virt_page2);
155 if (tb->page_addr[1] == phys_page2)
156 goto found;
157 } else {
158 goto found;
161 ptb1 = &tb->phys_hash_next;
163 not_found:
164 /* if no translated code available, then translate it now */
165 tb = tb_gen_code(env, pc, cs_base, flags, 0);
167 found:
168 /* we add the TB in the virtual pc hash table */
169 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
170 return tb;
173 static inline TranslationBlock *tb_find_fast(void)
175 TranslationBlock *tb;
176 target_ulong cs_base, pc;
177 int flags;
179 /* we record a subset of the CPU state. It will
180 always be the same before a given translated block
181 is executed. */
182 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185 tb->flags != flags)) {
186 tb = tb_find_slow(pc, cs_base, flags);
188 return tb;
191 static CPUDebugExcpHandler *debug_excp_handler;
193 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
195 CPUDebugExcpHandler *old_handler = debug_excp_handler;
197 debug_excp_handler = handler;
198 return old_handler;
201 static void cpu_handle_debug_exception(CPUState *env)
203 CPUWatchpoint *wp;
205 if (!env->watchpoint_hit)
206 TAILQ_FOREACH(wp, &env->watchpoints, entry)
207 wp->flags &= ~BP_WATCHPOINT_HIT;
209 if (debug_excp_handler)
210 debug_excp_handler(env);
213 /* main execution loop */
215 int cpu_exec(CPUState *env1)
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219 int ret, interrupt_request;
220 TranslationBlock *tb;
221 uint8_t *tc_ptr;
222 unsigned long next_tb;
224 if (cpu_halted(env1) == EXCP_HALTED)
225 return EXCP_HALTED;
227 cpu_single_env = env1;
229 /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
232 env = env1;
234 env_to_regs();
235 #if defined(TARGET_I386)
236 /* put eflags in CPU temporary format */
237 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238 DF = 1 - (2 * ((env->eflags >> 10) & 1));
239 CC_OP = CC_OP_EFLAGS;
240 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243 env->cc_op = CC_OP_FLAGS;
244 env->cc_dest = env->sr & 0xf;
245 env->cc_x = (env->sr >> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MICROBLAZE)
250 #elif defined(TARGET_MIPS)
251 #elif defined(TARGET_SH4)
252 #elif defined(TARGET_CRIS)
253 /* XXXXX */
254 #else
255 #error unsupported target CPU
256 #endif
257 env->exception_index = -1;
259 /* prepare setjmp context for exception handling */
260 for(;;) {
261 if (setjmp(env->jmp_env) == 0) {
262 #if defined(__sparc__) && !defined(HOST_SOLARIS)
263 #undef env
264 env = cpu_single_env;
265 #define env cpu_single_env
266 #endif
267 env->current_tb = NULL;
268 /* if an exception is pending, we execute it here */
269 if (env->exception_index >= 0) {
270 if (env->exception_index >= EXCP_INTERRUPT) {
271 /* exit request from the cpu execution loop */
272 ret = env->exception_index;
273 if (ret == EXCP_DEBUG)
274 cpu_handle_debug_exception(env);
275 break;
276 } else {
277 #if defined(CONFIG_USER_ONLY)
278 /* if user mode only, we simulate a fake exception
279 which will be handled outside the cpu execution
280 loop */
281 #if defined(TARGET_I386)
282 do_interrupt_user(env->exception_index,
283 env->exception_is_int,
284 env->error_code,
285 env->exception_next_eip);
286 /* successfully delivered */
287 env->old_exception = -1;
288 #endif
289 ret = env->exception_index;
290 break;
291 #else
292 #if defined(TARGET_I386)
293 /* simulate a real cpu exception. On i386, it can
294 trigger new exceptions, but we do not handle
295 double or triple faults yet. */
296 do_interrupt(env->exception_index,
297 env->exception_is_int,
298 env->error_code,
299 env->exception_next_eip, 0);
300 /* successfully delivered */
301 env->old_exception = -1;
302 #elif defined(TARGET_PPC)
303 do_interrupt(env);
304 #elif defined(TARGET_MICROBLAZE)
305 do_interrupt(env);
306 #elif defined(TARGET_MIPS)
307 do_interrupt(env);
308 #elif defined(TARGET_SPARC)
309 do_interrupt(env);
310 #elif defined(TARGET_ARM)
311 do_interrupt(env);
312 #elif defined(TARGET_SH4)
313 do_interrupt(env);
314 #elif defined(TARGET_ALPHA)
315 do_interrupt(env);
316 #elif defined(TARGET_CRIS)
317 do_interrupt(env);
318 #elif defined(TARGET_M68K)
319 do_interrupt(0);
320 #endif
321 #endif
323 env->exception_index = -1;
325 #ifdef CONFIG_KQEMU
326 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
327 int ret;
328 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
329 ret = kqemu_cpu_exec(env);
330 /* put eflags in CPU temporary format */
331 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
332 DF = 1 - (2 * ((env->eflags >> 10) & 1));
333 CC_OP = CC_OP_EFLAGS;
334 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
335 if (ret == 1) {
336 /* exception */
337 longjmp(env->jmp_env, 1);
338 } else if (ret == 2) {
339 /* softmmu execution needed */
340 } else {
341 if (env->interrupt_request != 0 || env->exit_request != 0) {
342 /* hardware interrupt will be executed just after */
343 } else {
344 /* otherwise, we restart */
345 longjmp(env->jmp_env, 1);
349 #endif
351 if (kvm_enabled()) {
352 kvm_cpu_exec(env);
353 longjmp(env->jmp_env, 1);
356 next_tb = 0; /* force lookup of first TB */
357 for(;;) {
358 interrupt_request = env->interrupt_request;
359 if (unlikely(interrupt_request)) {
360 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
361 /* Mask out external interrupts for this step. */
362 interrupt_request &= ~(CPU_INTERRUPT_HARD |
363 CPU_INTERRUPT_FIQ |
364 CPU_INTERRUPT_SMI |
365 CPU_INTERRUPT_NMI);
367 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
368 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
369 env->exception_index = EXCP_DEBUG;
370 cpu_loop_exit();
372 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
373 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
374 defined(TARGET_MICROBLAZE)
375 if (interrupt_request & CPU_INTERRUPT_HALT) {
376 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
377 env->halted = 1;
378 env->exception_index = EXCP_HLT;
379 cpu_loop_exit();
381 #endif
382 #if defined(TARGET_I386)
383 if (interrupt_request & CPU_INTERRUPT_INIT) {
384 svm_check_intercept(SVM_EXIT_INIT);
385 do_cpu_init(env);
386 env->exception_index = EXCP_HALTED;
387 cpu_loop_exit();
388 } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
389 do_cpu_sipi(env);
390 } else if (env->hflags2 & HF2_GIF_MASK) {
391 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
392 !(env->hflags & HF_SMM_MASK)) {
393 svm_check_intercept(SVM_EXIT_SMI);
394 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
395 do_smm_enter();
396 next_tb = 0;
397 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
398 !(env->hflags2 & HF2_NMI_MASK)) {
399 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
400 env->hflags2 |= HF2_NMI_MASK;
401 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
402 next_tb = 0;
403 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
404 (((env->hflags2 & HF2_VINTR_MASK) &&
405 (env->hflags2 & HF2_HIF_MASK)) ||
406 (!(env->hflags2 & HF2_VINTR_MASK) &&
407 (env->eflags & IF_MASK &&
408 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
409 int intno;
410 svm_check_intercept(SVM_EXIT_INTR);
411 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
412 intno = cpu_get_pic_interrupt(env);
413 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
414 #if defined(__sparc__) && !defined(HOST_SOLARIS)
415 #undef env
416 env = cpu_single_env;
417 #define env cpu_single_env
418 #endif
419 do_interrupt(intno, 0, 0, 0, 1);
420 /* ensure that no TB jump will be modified as
421 the program flow was changed */
422 next_tb = 0;
423 #if !defined(CONFIG_USER_ONLY)
424 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
425 (env->eflags & IF_MASK) &&
426 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
427 int intno;
428 /* FIXME: this should respect TPR */
429 svm_check_intercept(SVM_EXIT_VINTR);
430 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
431 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
432 do_interrupt(intno, 0, 0, 0, 1);
433 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
434 next_tb = 0;
435 #endif
438 #elif defined(TARGET_PPC)
439 #if 0
440 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
441 cpu_ppc_reset(env);
443 #endif
444 if (interrupt_request & CPU_INTERRUPT_HARD) {
445 ppc_hw_interrupt(env);
446 if (env->pending_interrupts == 0)
447 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
448 next_tb = 0;
450 #elif defined(TARGET_MICROBLAZE)
451 if ((interrupt_request & CPU_INTERRUPT_HARD)
452 && (env->sregs[SR_MSR] & MSR_IE)
453 && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
454 && !(env->iflags & (D_FLAG | IMM_FLAG))) {
455 env->exception_index = EXCP_IRQ;
456 do_interrupt(env);
457 next_tb = 0;
459 #elif defined(TARGET_MIPS)
460 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
461 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
462 (env->CP0_Status & (1 << CP0St_IE)) &&
463 !(env->CP0_Status & (1 << CP0St_EXL)) &&
464 !(env->CP0_Status & (1 << CP0St_ERL)) &&
465 !(env->hflags & MIPS_HFLAG_DM)) {
466 /* Raise it */
467 env->exception_index = EXCP_EXT_INTERRUPT;
468 env->error_code = 0;
469 do_interrupt(env);
470 next_tb = 0;
472 #elif defined(TARGET_SPARC)
473 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
474 (env->psret != 0)) {
475 int pil = env->interrupt_index & 15;
476 int type = env->interrupt_index & 0xf0;
478 if (((type == TT_EXTINT) &&
479 (pil == 15 || pil > env->psrpil)) ||
480 type != TT_EXTINT) {
481 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
482 env->exception_index = env->interrupt_index;
483 do_interrupt(env);
484 env->interrupt_index = 0;
485 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
486 cpu_check_irqs(env);
487 #endif
488 next_tb = 0;
490 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
491 //do_interrupt(0, 0, 0, 0, 0);
492 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
494 #elif defined(TARGET_ARM)
495 if (interrupt_request & CPU_INTERRUPT_FIQ
496 && !(env->uncached_cpsr & CPSR_F)) {
497 env->exception_index = EXCP_FIQ;
498 do_interrupt(env);
499 next_tb = 0;
501 /* ARMv7-M interrupt return works by loading a magic value
502 into the PC. On real hardware the load causes the
503 return to occur. The qemu implementation performs the
504 jump normally, then does the exception return when the
505 CPU tries to execute code at the magic address.
506 This will cause the magic PC value to be pushed to
507 the stack if an interrupt occured at the wrong time.
508 We avoid this by disabling interrupts when
509 pc contains a magic address. */
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
512 || !(env->uncached_cpsr & CPSR_I))) {
513 env->exception_index = EXCP_IRQ;
514 do_interrupt(env);
515 next_tb = 0;
517 #elif defined(TARGET_SH4)
518 if (interrupt_request & CPU_INTERRUPT_HARD) {
519 do_interrupt(env);
520 next_tb = 0;
522 #elif defined(TARGET_ALPHA)
523 if (interrupt_request & CPU_INTERRUPT_HARD) {
524 do_interrupt(env);
525 next_tb = 0;
527 #elif defined(TARGET_CRIS)
528 if (interrupt_request & CPU_INTERRUPT_HARD
529 && (env->pregs[PR_CCS] & I_FLAG)) {
530 env->exception_index = EXCP_IRQ;
531 do_interrupt(env);
532 next_tb = 0;
534 if (interrupt_request & CPU_INTERRUPT_NMI
535 && (env->pregs[PR_CCS] & M_FLAG)) {
536 env->exception_index = EXCP_NMI;
537 do_interrupt(env);
538 next_tb = 0;
540 #elif defined(TARGET_M68K)
541 if (interrupt_request & CPU_INTERRUPT_HARD
542 && ((env->sr & SR_I) >> SR_I_SHIFT)
543 < env->pending_level) {
544 /* Real hardware gets the interrupt vector via an
545 IACK cycle at this point. Current emulated
546 hardware doesn't rely on this, so we
547 provide/save the vector when the interrupt is
548 first signalled. */
549 env->exception_index = env->pending_vector;
550 do_interrupt(1);
551 next_tb = 0;
553 #endif
554 /* Don't use the cached interupt_request value,
555 do_interrupt may have updated the EXITTB flag. */
556 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
557 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
558 /* ensure that no TB jump will be modified as
559 the program flow was changed */
560 next_tb = 0;
563 if (unlikely(env->exit_request)) {
564 env->exit_request = 0;
565 env->exception_index = EXCP_INTERRUPT;
566 cpu_loop_exit();
568 #ifdef DEBUG_EXEC
569 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
570 /* restore flags in standard format */
571 regs_to_env();
572 #if defined(TARGET_I386)
573 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
574 log_cpu_state(env, X86_DUMP_CCOP);
575 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
576 #elif defined(TARGET_ARM)
577 log_cpu_state(env, 0);
578 #elif defined(TARGET_SPARC)
579 log_cpu_state(env, 0);
580 #elif defined(TARGET_PPC)
581 log_cpu_state(env, 0);
582 #elif defined(TARGET_M68K)
583 cpu_m68k_flush_flags(env, env->cc_op);
584 env->cc_op = CC_OP_FLAGS;
585 env->sr = (env->sr & 0xffe0)
586 | env->cc_dest | (env->cc_x << 4);
587 log_cpu_state(env, 0);
588 #elif defined(TARGET_MICROBLAZE)
589 log_cpu_state(env, 0);
590 #elif defined(TARGET_MIPS)
591 log_cpu_state(env, 0);
592 #elif defined(TARGET_SH4)
593 log_cpu_state(env, 0);
594 #elif defined(TARGET_ALPHA)
595 log_cpu_state(env, 0);
596 #elif defined(TARGET_CRIS)
597 log_cpu_state(env, 0);
598 #else
599 #error unsupported target CPU
600 #endif
602 #endif
603 spin_lock(&tb_lock);
604 tb = tb_find_fast();
605 /* Note: we do it here to avoid a gcc bug on Mac OS X when
606 doing it in tb_find_slow */
607 if (tb_invalidated_flag) {
608 /* as some TB could have been invalidated because
609 of memory exceptions while generating the code, we
610 must recompute the hash index here */
611 next_tb = 0;
612 tb_invalidated_flag = 0;
614 #ifdef DEBUG_EXEC
615 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
616 (long)tb->tc_ptr, tb->pc,
617 lookup_symbol(tb->pc));
618 #endif
619 /* see if we can patch the calling TB. When the TB
620 spans two pages, we cannot safely do a direct
621 jump. */
623 if (next_tb != 0 &&
624 #ifdef CONFIG_KQEMU
625 (env->kqemu_enabled != 2) &&
626 #endif
627 tb->page_addr[1] == -1) {
628 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
631 spin_unlock(&tb_lock);
632 env->current_tb = tb;
634 /* cpu_interrupt might be called while translating the
635 TB, but before it is linked into a potentially
636 infinite loop and becomes env->current_tb. Avoid
637 starting execution if there is a pending interrupt. */
638 if (unlikely (env->exit_request))
639 env->current_tb = NULL;
641 while (env->current_tb) {
642 tc_ptr = tb->tc_ptr;
643 /* execute the generated code */
644 #if defined(__sparc__) && !defined(HOST_SOLARIS)
645 #undef env
646 env = cpu_single_env;
647 #define env cpu_single_env
648 #endif
649 next_tb = tcg_qemu_tb_exec(tc_ptr);
650 env->current_tb = NULL;
651 if ((next_tb & 3) == 2) {
652 /* Instruction counter expired. */
653 int insns_left;
654 tb = (TranslationBlock *)(long)(next_tb & ~3);
655 /* Restore PC. */
656 cpu_pc_from_tb(env, tb);
657 insns_left = env->icount_decr.u32;
658 if (env->icount_extra && insns_left >= 0) {
659 /* Refill decrementer and continue execution. */
660 env->icount_extra += insns_left;
661 if (env->icount_extra > 0xffff) {
662 insns_left = 0xffff;
663 } else {
664 insns_left = env->icount_extra;
666 env->icount_extra -= insns_left;
667 env->icount_decr.u16.low = insns_left;
668 } else {
669 if (insns_left > 0) {
670 /* Execute remaining instructions. */
671 cpu_exec_nocache(insns_left, tb);
673 env->exception_index = EXCP_INTERRUPT;
674 next_tb = 0;
675 cpu_loop_exit();
679 /* reset soft MMU for next block (it can currently
680 only be set by a memory fault) */
681 #if defined(CONFIG_KQEMU)
682 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
683 if (kqemu_is_ok(env) &&
684 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
685 cpu_loop_exit();
687 #endif
688 } /* for(;;) */
689 } else {
690 env_to_regs();
692 } /* for(;;) */
695 #if defined(TARGET_I386)
696 /* restore flags in standard format */
697 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
698 #elif defined(TARGET_ARM)
699 /* XXX: Save/restore host fpu exception state?. */
700 #elif defined(TARGET_SPARC)
701 #elif defined(TARGET_PPC)
702 #elif defined(TARGET_M68K)
703 cpu_m68k_flush_flags(env, env->cc_op);
704 env->cc_op = CC_OP_FLAGS;
705 env->sr = (env->sr & 0xffe0)
706 | env->cc_dest | (env->cc_x << 4);
707 #elif defined(TARGET_MICROBLAZE)
708 #elif defined(TARGET_MIPS)
709 #elif defined(TARGET_SH4)
710 #elif defined(TARGET_ALPHA)
711 #elif defined(TARGET_CRIS)
712 /* XXXXX */
713 #else
714 #error unsupported target CPU
715 #endif
717 /* restore global registers */
718 #include "hostregs_helper.h"
720 /* fail safe : never use cpu_single_env outside cpu_exec() */
721 cpu_single_env = NULL;
722 return ret;
725 /* must only be called from the generated code as an exception can be
726 generated */
727 void tb_invalidate_page_range(target_ulong start, target_ulong end)
729 /* XXX: cannot enable it yet because it yields to MMU exception
730 where NIP != read address on PowerPC */
731 #if 0
732 target_ulong phys_addr;
733 phys_addr = get_phys_addr_code(env, start);
734 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
735 #endif
738 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
740 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
742 CPUX86State *saved_env;
744 saved_env = env;
745 env = s;
746 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
747 selector &= 0xffff;
748 cpu_x86_load_seg_cache(env, seg_reg, selector,
749 (selector << 4), 0xffff, 0);
750 } else {
751 helper_load_seg(seg_reg, selector);
753 env = saved_env;
756 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
758 CPUX86State *saved_env;
760 saved_env = env;
761 env = s;
763 helper_fsave(ptr, data32);
765 env = saved_env;
768 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
770 CPUX86State *saved_env;
772 saved_env = env;
773 env = s;
775 helper_frstor(ptr, data32);
777 env = saved_env;
780 #endif /* TARGET_I386 */
782 #if !defined(CONFIG_SOFTMMU)
784 #if defined(TARGET_I386)
786 /* 'pc' is the host PC at which the exception was raised. 'address' is
787 the effective address of the memory exception. 'is_write' is 1 if a
788 write caused the exception and otherwise 0'. 'old_set' is the
789 signal set which should be restored */
790 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
791 int is_write, sigset_t *old_set,
792 void *puc)
794 TranslationBlock *tb;
795 int ret;
797 if (cpu_single_env)
798 env = cpu_single_env; /* XXX: find a correct solution for multithread */
799 #if defined(DEBUG_SIGNAL)
800 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
801 pc, address, is_write, *(unsigned long *)old_set);
802 #endif
803 /* XXX: locking issue */
804 if (is_write && page_unprotect(h2g(address), pc, puc)) {
805 return 1;
808 /* see if it is an MMU fault */
809 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
810 if (ret < 0)
811 return 0; /* not an MMU fault */
812 if (ret == 0)
813 return 1; /* the MMU fault was handled without causing real CPU fault */
814 /* now we have a real cpu fault */
815 tb = tb_find_pc(pc);
816 if (tb) {
817 /* the PC is inside the translated code. It means that we have
818 a virtual CPU fault */
819 cpu_restore_state(tb, env, pc, puc);
821 if (ret == 1) {
822 #if 0
823 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
824 env->eip, env->cr[2], env->error_code);
825 #endif
826 /* we restore the process signal mask as the sigreturn should
827 do it (XXX: use sigsetjmp) */
828 sigprocmask(SIG_SETMASK, old_set, NULL);
829 raise_exception_err(env->exception_index, env->error_code);
830 } else {
831 /* activate soft MMU for this block */
832 env->hflags |= HF_SOFTMMU_MASK;
833 cpu_resume_from_signal(env, puc);
835 /* never comes here */
836 return 1;
839 #elif defined(TARGET_ARM)
840 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
841 int is_write, sigset_t *old_set,
842 void *puc)
844 TranslationBlock *tb;
845 int ret;
847 if (cpu_single_env)
848 env = cpu_single_env; /* XXX: find a correct solution for multithread */
849 #if defined(DEBUG_SIGNAL)
850 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
851 pc, address, is_write, *(unsigned long *)old_set);
852 #endif
853 /* XXX: locking issue */
854 if (is_write && page_unprotect(h2g(address), pc, puc)) {
855 return 1;
857 /* see if it is an MMU fault */
858 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
859 if (ret < 0)
860 return 0; /* not an MMU fault */
861 if (ret == 0)
862 return 1; /* the MMU fault was handled without causing real CPU fault */
863 /* now we have a real cpu fault */
864 tb = tb_find_pc(pc);
865 if (tb) {
866 /* the PC is inside the translated code. It means that we have
867 a virtual CPU fault */
868 cpu_restore_state(tb, env, pc, puc);
870 /* we restore the process signal mask as the sigreturn should
871 do it (XXX: use sigsetjmp) */
872 sigprocmask(SIG_SETMASK, old_set, NULL);
873 cpu_loop_exit();
874 /* never comes here */
875 return 1;
877 #elif defined(TARGET_SPARC)
878 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
879 int is_write, sigset_t *old_set,
880 void *puc)
882 TranslationBlock *tb;
883 int ret;
885 if (cpu_single_env)
886 env = cpu_single_env; /* XXX: find a correct solution for multithread */
887 #if defined(DEBUG_SIGNAL)
888 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
889 pc, address, is_write, *(unsigned long *)old_set);
890 #endif
891 /* XXX: locking issue */
892 if (is_write && page_unprotect(h2g(address), pc, puc)) {
893 return 1;
895 /* see if it is an MMU fault */
896 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
897 if (ret < 0)
898 return 0; /* not an MMU fault */
899 if (ret == 0)
900 return 1; /* the MMU fault was handled without causing real CPU fault */
901 /* now we have a real cpu fault */
902 tb = tb_find_pc(pc);
903 if (tb) {
904 /* the PC is inside the translated code. It means that we have
905 a virtual CPU fault */
906 cpu_restore_state(tb, env, pc, puc);
908 /* we restore the process signal mask as the sigreturn should
909 do it (XXX: use sigsetjmp) */
910 sigprocmask(SIG_SETMASK, old_set, NULL);
911 cpu_loop_exit();
912 /* never comes here */
913 return 1;
915 #elif defined (TARGET_PPC)
916 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
917 int is_write, sigset_t *old_set,
918 void *puc)
920 TranslationBlock *tb;
921 int ret;
923 if (cpu_single_env)
924 env = cpu_single_env; /* XXX: find a correct solution for multithread */
925 #if defined(DEBUG_SIGNAL)
926 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
927 pc, address, is_write, *(unsigned long *)old_set);
928 #endif
929 /* XXX: locking issue */
930 if (is_write && page_unprotect(h2g(address), pc, puc)) {
931 return 1;
934 /* see if it is an MMU fault */
935 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
936 if (ret < 0)
937 return 0; /* not an MMU fault */
938 if (ret == 0)
939 return 1; /* the MMU fault was handled without causing real CPU fault */
941 /* now we have a real cpu fault */
942 tb = tb_find_pc(pc);
943 if (tb) {
944 /* the PC is inside the translated code. It means that we have
945 a virtual CPU fault */
946 cpu_restore_state(tb, env, pc, puc);
948 if (ret == 1) {
949 #if 0
950 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
951 env->nip, env->error_code, tb);
952 #endif
953 /* we restore the process signal mask as the sigreturn should
954 do it (XXX: use sigsetjmp) */
955 sigprocmask(SIG_SETMASK, old_set, NULL);
956 cpu_loop_exit();
957 } else {
958 /* activate soft MMU for this block */
959 cpu_resume_from_signal(env, puc);
961 /* never comes here */
962 return 1;
965 #elif defined(TARGET_M68K)
966 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
967 int is_write, sigset_t *old_set,
968 void *puc)
970 TranslationBlock *tb;
971 int ret;
973 if (cpu_single_env)
974 env = cpu_single_env; /* XXX: find a correct solution for multithread */
975 #if defined(DEBUG_SIGNAL)
976 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
977 pc, address, is_write, *(unsigned long *)old_set);
978 #endif
979 /* XXX: locking issue */
980 if (is_write && page_unprotect(address, pc, puc)) {
981 return 1;
983 /* see if it is an MMU fault */
984 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
985 if (ret < 0)
986 return 0; /* not an MMU fault */
987 if (ret == 0)
988 return 1; /* the MMU fault was handled without causing real CPU fault */
989 /* now we have a real cpu fault */
990 tb = tb_find_pc(pc);
991 if (tb) {
992 /* the PC is inside the translated code. It means that we have
993 a virtual CPU fault */
994 cpu_restore_state(tb, env, pc, puc);
996 /* we restore the process signal mask as the sigreturn should
997 do it (XXX: use sigsetjmp) */
998 sigprocmask(SIG_SETMASK, old_set, NULL);
999 cpu_loop_exit();
1000 /* never comes here */
1001 return 1;
1004 #elif defined (TARGET_MIPS)
1005 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1006 int is_write, sigset_t *old_set,
1007 void *puc)
1009 TranslationBlock *tb;
1010 int ret;
1012 if (cpu_single_env)
1013 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1014 #if defined(DEBUG_SIGNAL)
1015 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1016 pc, address, is_write, *(unsigned long *)old_set);
1017 #endif
1018 /* XXX: locking issue */
1019 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1020 return 1;
1023 /* see if it is an MMU fault */
1024 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1025 if (ret < 0)
1026 return 0; /* not an MMU fault */
1027 if (ret == 0)
1028 return 1; /* the MMU fault was handled without causing real CPU fault */
1030 /* now we have a real cpu fault */
1031 tb = tb_find_pc(pc);
1032 if (tb) {
1033 /* the PC is inside the translated code. It means that we have
1034 a virtual CPU fault */
1035 cpu_restore_state(tb, env, pc, puc);
1037 if (ret == 1) {
1038 #if 0
1039 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1040 env->PC, env->error_code, tb);
1041 #endif
1042 /* we restore the process signal mask as the sigreturn should
1043 do it (XXX: use sigsetjmp) */
1044 sigprocmask(SIG_SETMASK, old_set, NULL);
1045 cpu_loop_exit();
1046 } else {
1047 /* activate soft MMU for this block */
1048 cpu_resume_from_signal(env, puc);
1050 /* never comes here */
1051 return 1;
1054 #elif defined (TARGET_MICROBLAZE)
1055 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1056 int is_write, sigset_t *old_set,
1057 void *puc)
1059 TranslationBlock *tb;
1060 int ret;
1062 if (cpu_single_env)
1063 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1064 #if defined(DEBUG_SIGNAL)
1065 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1066 pc, address, is_write, *(unsigned long *)old_set);
1067 #endif
1068 /* XXX: locking issue */
1069 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1070 return 1;
1073 /* see if it is an MMU fault */
1074 ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1075 if (ret < 0)
1076 return 0; /* not an MMU fault */
1077 if (ret == 0)
1078 return 1; /* the MMU fault was handled without causing real CPU fault */
1080 /* now we have a real cpu fault */
1081 tb = tb_find_pc(pc);
1082 if (tb) {
1083 /* the PC is inside the translated code. It means that we have
1084 a virtual CPU fault */
1085 cpu_restore_state(tb, env, pc, puc);
1087 if (ret == 1) {
1088 #if 0
1089 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1090 env->PC, env->error_code, tb);
1091 #endif
1092 /* we restore the process signal mask as the sigreturn should
1093 do it (XXX: use sigsetjmp) */
1094 sigprocmask(SIG_SETMASK, old_set, NULL);
1095 cpu_loop_exit();
1096 } else {
1097 /* activate soft MMU for this block */
1098 cpu_resume_from_signal(env, puc);
1100 /* never comes here */
1101 return 1;
1104 #elif defined (TARGET_SH4)
1105 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1106 int is_write, sigset_t *old_set,
1107 void *puc)
1109 TranslationBlock *tb;
1110 int ret;
1112 if (cpu_single_env)
1113 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1114 #if defined(DEBUG_SIGNAL)
1115 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1116 pc, address, is_write, *(unsigned long *)old_set);
1117 #endif
1118 /* XXX: locking issue */
1119 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1120 return 1;
1123 /* see if it is an MMU fault */
1124 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1125 if (ret < 0)
1126 return 0; /* not an MMU fault */
1127 if (ret == 0)
1128 return 1; /* the MMU fault was handled without causing real CPU fault */
1130 /* now we have a real cpu fault */
1131 tb = tb_find_pc(pc);
1132 if (tb) {
1133 /* the PC is inside the translated code. It means that we have
1134 a virtual CPU fault */
1135 cpu_restore_state(tb, env, pc, puc);
1137 #if 0
1138 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1139 env->nip, env->error_code, tb);
1140 #endif
1141 /* we restore the process signal mask as the sigreturn should
1142 do it (XXX: use sigsetjmp) */
1143 sigprocmask(SIG_SETMASK, old_set, NULL);
1144 cpu_loop_exit();
1145 /* never comes here */
1146 return 1;
1149 #elif defined (TARGET_ALPHA)
1150 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1151 int is_write, sigset_t *old_set,
1152 void *puc)
1154 TranslationBlock *tb;
1155 int ret;
1157 if (cpu_single_env)
1158 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1159 #if defined(DEBUG_SIGNAL)
1160 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1161 pc, address, is_write, *(unsigned long *)old_set);
1162 #endif
1163 /* XXX: locking issue */
1164 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1165 return 1;
1168 /* see if it is an MMU fault */
1169 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1170 if (ret < 0)
1171 return 0; /* not an MMU fault */
1172 if (ret == 0)
1173 return 1; /* the MMU fault was handled without causing real CPU fault */
1175 /* now we have a real cpu fault */
1176 tb = tb_find_pc(pc);
1177 if (tb) {
1178 /* the PC is inside the translated code. It means that we have
1179 a virtual CPU fault */
1180 cpu_restore_state(tb, env, pc, puc);
1182 #if 0
1183 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1184 env->nip, env->error_code, tb);
1185 #endif
1186 /* we restore the process signal mask as the sigreturn should
1187 do it (XXX: use sigsetjmp) */
1188 sigprocmask(SIG_SETMASK, old_set, NULL);
1189 cpu_loop_exit();
1190 /* never comes here */
1191 return 1;
1193 #elif defined (TARGET_CRIS)
1194 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1195 int is_write, sigset_t *old_set,
1196 void *puc)
1198 TranslationBlock *tb;
1199 int ret;
1201 if (cpu_single_env)
1202 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1203 #if defined(DEBUG_SIGNAL)
1204 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1205 pc, address, is_write, *(unsigned long *)old_set);
1206 #endif
1207 /* XXX: locking issue */
1208 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1209 return 1;
1212 /* see if it is an MMU fault */
1213 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1214 if (ret < 0)
1215 return 0; /* not an MMU fault */
1216 if (ret == 0)
1217 return 1; /* the MMU fault was handled without causing real CPU fault */
1219 /* now we have a real cpu fault */
1220 tb = tb_find_pc(pc);
1221 if (tb) {
1222 /* the PC is inside the translated code. It means that we have
1223 a virtual CPU fault */
1224 cpu_restore_state(tb, env, pc, puc);
1226 /* we restore the process signal mask as the sigreturn should
1227 do it (XXX: use sigsetjmp) */
1228 sigprocmask(SIG_SETMASK, old_set, NULL);
1229 cpu_loop_exit();
1230 /* never comes here */
1231 return 1;
1234 #else
1235 #error unsupported target CPU
1236 #endif
1238 #if defined(__i386__)
1240 #if defined(__APPLE__)
1241 # include <sys/ucontext.h>
1243 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1244 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1245 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1246 # define MASK_sig(context) ((context)->uc_sigmask)
1247 #elif defined(__OpenBSD__)
1248 # define EIP_sig(context) ((context)->sc_eip)
1249 # define TRAP_sig(context) ((context)->sc_trapno)
1250 # define ERROR_sig(context) ((context)->sc_err)
1251 # define MASK_sig(context) ((context)->sc_mask)
1252 #else
1253 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1254 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1255 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1256 # define MASK_sig(context) ((context)->uc_sigmask)
1257 #endif
1259 int cpu_signal_handler(int host_signum, void *pinfo,
1260 void *puc)
1262 siginfo_t *info = pinfo;
1263 #if defined(__OpenBSD__)
1264 struct sigcontext *uc = puc;
1265 #else
1266 struct ucontext *uc = puc;
1267 #endif
1268 unsigned long pc;
1269 int trapno;
1271 #ifndef REG_EIP
1272 /* for glibc 2.1 */
1273 #define REG_EIP EIP
1274 #define REG_ERR ERR
1275 #define REG_TRAPNO TRAPNO
1276 #endif
1277 pc = EIP_sig(uc);
1278 trapno = TRAP_sig(uc);
1279 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1280 trapno == 0xe ?
1281 (ERROR_sig(uc) >> 1) & 1 : 0,
1282 &MASK_sig(uc), puc);
1285 #elif defined(__x86_64__)
1287 #ifdef __NetBSD__
1288 #define PC_sig(context) _UC_MACHINE_PC(context)
1289 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1290 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1291 #define MASK_sig(context) ((context)->uc_sigmask)
1292 #elif defined(__OpenBSD__)
1293 #define PC_sig(context) ((context)->sc_rip)
1294 #define TRAP_sig(context) ((context)->sc_trapno)
1295 #define ERROR_sig(context) ((context)->sc_err)
1296 #define MASK_sig(context) ((context)->sc_mask)
1297 #else
1298 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1299 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1300 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1301 #define MASK_sig(context) ((context)->uc_sigmask)
1302 #endif
1304 int cpu_signal_handler(int host_signum, void *pinfo,
1305 void *puc)
1307 siginfo_t *info = pinfo;
1308 unsigned long pc;
1309 #ifdef __NetBSD__
1310 ucontext_t *uc = puc;
1311 #elif defined(__OpenBSD__)
1312 struct sigcontext *uc = puc;
1313 #else
1314 struct ucontext *uc = puc;
1315 #endif
1317 pc = PC_sig(uc);
1318 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1319 TRAP_sig(uc) == 0xe ?
1320 (ERROR_sig(uc) >> 1) & 1 : 0,
1321 &MASK_sig(uc), puc);
1324 #elif defined(_ARCH_PPC)
1326 /***********************************************************************
1327 * signal context platform-specific definitions
1328 * From Wine
1330 #ifdef linux
1331 /* All Registers access - only for local access */
1332 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1333 /* Gpr Registers access */
1334 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1335 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1336 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1337 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1338 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1339 # define LR_sig(context) REG_sig(link, context) /* Link register */
1340 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1341 /* Float Registers access */
1342 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1343 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1344 /* Exception Registers access */
1345 # define DAR_sig(context) REG_sig(dar, context)
1346 # define DSISR_sig(context) REG_sig(dsisr, context)
1347 # define TRAP_sig(context) REG_sig(trap, context)
1348 #endif /* linux */
1350 #ifdef __APPLE__
1351 # include <sys/ucontext.h>
1352 typedef struct ucontext SIGCONTEXT;
1353 /* All Registers access - only for local access */
1354 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1355 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1356 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1357 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1358 /* Gpr Registers access */
1359 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1360 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1361 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1362 # define CTR_sig(context) REG_sig(ctr, context)
1363 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1364 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1365 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1366 /* Float Registers access */
1367 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1368 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1369 /* Exception Registers access */
1370 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1371 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1372 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1373 #endif /* __APPLE__ */
1375 int cpu_signal_handler(int host_signum, void *pinfo,
1376 void *puc)
1378 siginfo_t *info = pinfo;
1379 struct ucontext *uc = puc;
1380 unsigned long pc;
1381 int is_write;
1383 pc = IAR_sig(uc);
1384 is_write = 0;
1385 #if 0
1386 /* ppc 4xx case */
1387 if (DSISR_sig(uc) & 0x00800000)
1388 is_write = 1;
1389 #else
1390 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1391 is_write = 1;
1392 #endif
1393 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1394 is_write, &uc->uc_sigmask, puc);
1397 #elif defined(__alpha__)
1399 int cpu_signal_handler(int host_signum, void *pinfo,
1400 void *puc)
1402 siginfo_t *info = pinfo;
1403 struct ucontext *uc = puc;
1404 uint32_t *pc = uc->uc_mcontext.sc_pc;
1405 uint32_t insn = *pc;
1406 int is_write = 0;
1408 /* XXX: need kernel patch to get write flag faster */
1409 switch (insn >> 26) {
1410 case 0x0d: // stw
1411 case 0x0e: // stb
1412 case 0x0f: // stq_u
1413 case 0x24: // stf
1414 case 0x25: // stg
1415 case 0x26: // sts
1416 case 0x27: // stt
1417 case 0x2c: // stl
1418 case 0x2d: // stq
1419 case 0x2e: // stl_c
1420 case 0x2f: // stq_c
1421 is_write = 1;
1424 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1425 is_write, &uc->uc_sigmask, puc);
1427 #elif defined(__sparc__)
1429 int cpu_signal_handler(int host_signum, void *pinfo,
1430 void *puc)
1432 siginfo_t *info = pinfo;
1433 int is_write;
1434 uint32_t insn;
1435 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1436 uint32_t *regs = (uint32_t *)(info + 1);
1437 void *sigmask = (regs + 20);
1438 /* XXX: is there a standard glibc define ? */
1439 unsigned long pc = regs[1];
1440 #else
1441 #ifdef __linux__
1442 struct sigcontext *sc = puc;
1443 unsigned long pc = sc->sigc_regs.tpc;
1444 void *sigmask = (void *)sc->sigc_mask;
1445 #elif defined(__OpenBSD__)
1446 struct sigcontext *uc = puc;
1447 unsigned long pc = uc->sc_pc;
1448 void *sigmask = (void *)(long)uc->sc_mask;
1449 #endif
1450 #endif
1452 /* XXX: need kernel patch to get write flag faster */
1453 is_write = 0;
1454 insn = *(uint32_t *)pc;
1455 if ((insn >> 30) == 3) {
1456 switch((insn >> 19) & 0x3f) {
1457 case 0x05: // stb
1458 case 0x15: // stba
1459 case 0x06: // sth
1460 case 0x16: // stha
1461 case 0x04: // st
1462 case 0x14: // sta
1463 case 0x07: // std
1464 case 0x17: // stda
1465 case 0x0e: // stx
1466 case 0x1e: // stxa
1467 case 0x24: // stf
1468 case 0x34: // stfa
1469 case 0x27: // stdf
1470 case 0x37: // stdfa
1471 case 0x26: // stqf
1472 case 0x36: // stqfa
1473 case 0x25: // stfsr
1474 case 0x3c: // casa
1475 case 0x3e: // casxa
1476 is_write = 1;
1477 break;
1480 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1481 is_write, sigmask, NULL);
1484 #elif defined(__arm__)
1486 int cpu_signal_handler(int host_signum, void *pinfo,
1487 void *puc)
1489 siginfo_t *info = pinfo;
1490 struct ucontext *uc = puc;
1491 unsigned long pc;
1492 int is_write;
1494 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1495 pc = uc->uc_mcontext.gregs[R15];
1496 #else
1497 pc = uc->uc_mcontext.arm_pc;
1498 #endif
1499 /* XXX: compute is_write */
1500 is_write = 0;
1501 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1502 is_write,
1503 &uc->uc_sigmask, puc);
1506 #elif defined(__mc68000)
1508 int cpu_signal_handler(int host_signum, void *pinfo,
1509 void *puc)
1511 siginfo_t *info = pinfo;
1512 struct ucontext *uc = puc;
1513 unsigned long pc;
1514 int is_write;
1516 pc = uc->uc_mcontext.gregs[16];
1517 /* XXX: compute is_write */
1518 is_write = 0;
1519 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1520 is_write,
1521 &uc->uc_sigmask, puc);
1524 #elif defined(__ia64)
1526 #ifndef __ISR_VALID
1527 /* This ought to be in <bits/siginfo.h>... */
1528 # define __ISR_VALID 1
1529 #endif
1531 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1533 siginfo_t *info = pinfo;
1534 struct ucontext *uc = puc;
1535 unsigned long ip;
1536 int is_write = 0;
1538 ip = uc->uc_mcontext.sc_ip;
1539 switch (host_signum) {
1540 case SIGILL:
1541 case SIGFPE:
1542 case SIGSEGV:
1543 case SIGBUS:
1544 case SIGTRAP:
1545 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1546 /* ISR.W (write-access) is bit 33: */
1547 is_write = (info->si_isr >> 33) & 1;
1548 break;
1550 default:
1551 break;
1553 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1554 is_write,
1555 &uc->uc_sigmask, puc);
1558 #elif defined(__s390__)
1560 int cpu_signal_handler(int host_signum, void *pinfo,
1561 void *puc)
1563 siginfo_t *info = pinfo;
1564 struct ucontext *uc = puc;
1565 unsigned long pc;
1566 int is_write;
1568 pc = uc->uc_mcontext.psw.addr;
1569 /* XXX: compute is_write */
1570 is_write = 0;
1571 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1572 is_write, &uc->uc_sigmask, puc);
1575 #elif defined(__mips__)
1577 int cpu_signal_handler(int host_signum, void *pinfo,
1578 void *puc)
1580 siginfo_t *info = pinfo;
1581 struct ucontext *uc = puc;
1582 greg_t pc = uc->uc_mcontext.pc;
1583 int is_write;
1585 /* XXX: compute is_write */
1586 is_write = 0;
1587 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1588 is_write, &uc->uc_sigmask, puc);
1591 #elif defined(__hppa__)
1593 int cpu_signal_handler(int host_signum, void *pinfo,
1594 void *puc)
1596 struct siginfo *info = pinfo;
1597 struct ucontext *uc = puc;
1598 unsigned long pc;
1599 int is_write;
1601 pc = uc->uc_mcontext.sc_iaoq[0];
1602 /* FIXME: compute is_write */
1603 is_write = 0;
1604 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1605 is_write,
1606 &uc->uc_sigmask, puc);
1609 #else
1611 #error host CPU specific signal handler needed
1613 #endif
1615 #endif /* !defined(CONFIG_SOFTMMU) */