Remove IBM copyright in unmodified file in upstream
[qemu-kvm/fedora.git] / cpu-exec.c
blobf959f4edb803d1540d3a8f7ca6f077220b3d4b79
1 /*
2 * i386 emulator main execution loop
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #if !defined(TARGET_IA64)
24 #include "tcg.h"
25 #endif
26 #include "kvm.h"
28 #if !defined(CONFIG_SOFTMMU)
29 #undef EAX
30 #undef ECX
31 #undef EDX
32 #undef EBX
33 #undef ESP
34 #undef EBP
35 #undef ESI
36 #undef EDI
37 #undef EIP
38 #include <signal.h>
39 #ifdef __linux__
40 #include <sys/ucontext.h>
41 #endif
42 #endif
44 #include "qemu-kvm.h"
46 #if defined(__sparc__) && !defined(HOST_SOLARIS)
47 // Work around ugly bugs in glibc that mangle global register contents
48 #undef env
49 #define env cpu_single_env
50 #endif
52 int tb_invalidated_flag;
54 //#define DEBUG_EXEC
55 //#define DEBUG_SIGNAL
57 void cpu_loop_exit(void)
59 /* NOTE: the register at this point must be saved by hand because
60 longjmp restore them */
61 regs_to_env();
62 longjmp(env->jmp_env, 1);
65 /* exit the current TB from a signal handler. The host registers are
66 restored in a state compatible with the CPU emulator
68 void cpu_resume_from_signal(CPUState *env1, void *puc)
70 #if !defined(CONFIG_SOFTMMU)
71 #ifdef __linux__
72 struct ucontext *uc = puc;
73 #elif defined(__OpenBSD__)
74 struct sigcontext *uc = puc;
75 #endif
76 #endif
78 env = env1;
80 /* XXX: restore cpu registers saved in host registers */
82 #if !defined(CONFIG_SOFTMMU)
83 if (puc) {
84 /* XXX: use siglongjmp ? */
85 #ifdef __linux__
86 sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
87 #elif defined(__OpenBSD__)
88 sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89 #endif
91 #endif
92 env->exception_index = -1;
93 longjmp(env->jmp_env, 1);
96 /* Execute the code without caching the generated code. An interpreter
97 could be used if available. */
98 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
100 unsigned long next_tb;
101 TranslationBlock *tb;
103 /* Should never happen.
104 We only end up here when an existing TB is too long. */
105 if (max_cycles > CF_COUNT_MASK)
106 max_cycles = CF_COUNT_MASK;
108 tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109 max_cycles);
110 env->current_tb = tb;
111 /* execute the generated code */
112 next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
114 if ((next_tb & 3) == 2) {
115 /* Restore PC. This may happen if async event occurs before
116 the TB starts executing. */
117 cpu_pc_from_tb(env, tb);
119 tb_phys_invalidate(tb, -1);
120 tb_free(tb);
123 static TranslationBlock *tb_find_slow(target_ulong pc,
124 target_ulong cs_base,
125 uint64_t flags)
127 TranslationBlock *tb, **ptb1;
128 unsigned int h;
129 target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 tb_invalidated_flag = 0;
133 regs_to_env(); /* XXX: do it just before cpu_gen_code() */
135 /* find translated block using physical mappings */
136 phys_pc = get_phys_addr_code(env, pc);
137 phys_page1 = phys_pc & TARGET_PAGE_MASK;
138 phys_page2 = -1;
139 h = tb_phys_hash_func(phys_pc);
140 ptb1 = &tb_phys_hash[h];
141 for(;;) {
142 tb = *ptb1;
143 if (!tb)
144 goto not_found;
145 if (tb->pc == pc &&
146 tb->page_addr[0] == phys_page1 &&
147 tb->cs_base == cs_base &&
148 tb->flags == flags) {
149 /* check next page if needed */
150 if (tb->page_addr[1] != -1) {
151 virt_page2 = (pc & TARGET_PAGE_MASK) +
152 TARGET_PAGE_SIZE;
153 phys_page2 = get_phys_addr_code(env, virt_page2);
154 if (tb->page_addr[1] == phys_page2)
155 goto found;
156 } else {
157 goto found;
160 ptb1 = &tb->phys_hash_next;
162 not_found:
163 /* if no translated code available, then translate it now */
164 tb = tb_gen_code(env, pc, cs_base, flags, 0);
166 found:
167 /* we add the TB in the virtual pc hash table */
168 env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169 return tb;
172 static inline TranslationBlock *tb_find_fast(void)
174 TranslationBlock *tb;
175 target_ulong cs_base, pc;
176 int flags;
178 /* we record a subset of the CPU state. It will
179 always be the same before a given translated block
180 is executed. */
181 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
182 tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
183 if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184 tb->flags != flags)) {
185 tb = tb_find_slow(pc, cs_base, flags);
187 return tb;
190 static CPUDebugExcpHandler *debug_excp_handler;
192 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194 CPUDebugExcpHandler *old_handler = debug_excp_handler;
196 debug_excp_handler = handler;
197 return old_handler;
200 static void cpu_handle_debug_exception(CPUState *env)
202 CPUWatchpoint *wp;
204 if (!env->watchpoint_hit)
205 TAILQ_FOREACH(wp, &env->watchpoints, entry)
206 wp->flags &= ~BP_WATCHPOINT_HIT;
208 if (debug_excp_handler)
209 debug_excp_handler(env);
212 /* main execution loop */
214 int cpu_exec(CPUState *env1)
216 #define DECLARE_HOST_REGS 1
217 #include "hostregs_helper.h"
218 int ret, interrupt_request;
219 TranslationBlock *tb;
220 uint8_t *tc_ptr;
221 unsigned long next_tb;
223 if (cpu_halted(env1) == EXCP_HALTED)
224 return EXCP_HALTED;
226 cpu_single_env = env1;
228 /* first we save global registers */
229 #define SAVE_HOST_REGS 1
230 #include "hostregs_helper.h"
231 env = env1;
233 env_to_regs();
234 #if defined(TARGET_I386)
235 /* put eflags in CPU temporary format */
236 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237 DF = 1 - (2 * ((env->eflags >> 10) & 1));
238 CC_OP = CC_OP_EFLAGS;
239 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
240 #elif defined(TARGET_SPARC)
241 #elif defined(TARGET_M68K)
242 env->cc_op = CC_OP_FLAGS;
243 env->cc_dest = env->sr & 0xf;
244 env->cc_x = (env->sr >> 4) & 1;
245 #elif defined(TARGET_ALPHA)
246 #elif defined(TARGET_ARM)
247 #elif defined(TARGET_PPC)
248 #elif defined(TARGET_MIPS)
249 #elif defined(TARGET_SH4)
250 #elif defined(TARGET_CRIS)
251 #elif defined(TARGET_IA64)
252 /* XXXXX */
253 #else
254 #error unsupported target CPU
255 #endif
256 env->exception_index = -1;
258 /* prepare setjmp context for exception handling */
259 for(;;) {
260 if (setjmp(env->jmp_env) == 0) {
261 #if defined(__sparc__) && !defined(HOST_SOLARIS)
262 #undef env
263 env = cpu_single_env;
264 #define env cpu_single_env
265 #endif
266 env->current_tb = NULL;
267 /* if an exception is pending, we execute it here */
268 if (env->exception_index >= 0) {
269 if (env->exception_index >= EXCP_INTERRUPT) {
270 /* exit request from the cpu execution loop */
271 ret = env->exception_index;
272 if (ret == EXCP_DEBUG)
273 cpu_handle_debug_exception(env);
274 break;
275 } else {
276 #if defined(CONFIG_USER_ONLY)
277 /* if user mode only, we simulate a fake exception
278 which will be handled outside the cpu execution
279 loop */
280 #if defined(TARGET_I386)
281 do_interrupt_user(env->exception_index,
282 env->exception_is_int,
283 env->error_code,
284 env->exception_next_eip);
285 /* successfully delivered */
286 env->old_exception = -1;
287 #endif
288 ret = env->exception_index;
289 break;
290 #else
291 #if defined(TARGET_I386)
292 /* simulate a real cpu exception. On i386, it can
293 trigger new exceptions, but we do not handle
294 double or triple faults yet. */
295 do_interrupt(env->exception_index,
296 env->exception_is_int,
297 env->error_code,
298 env->exception_next_eip, 0);
299 /* successfully delivered */
300 env->old_exception = -1;
301 #elif defined(TARGET_PPC)
302 do_interrupt(env);
303 #elif defined(TARGET_MIPS)
304 do_interrupt(env);
305 #elif defined(TARGET_SPARC)
306 do_interrupt(env);
307 #elif defined(TARGET_ARM)
308 do_interrupt(env);
309 #elif defined(TARGET_SH4)
310 do_interrupt(env);
311 #elif defined(TARGET_ALPHA)
312 do_interrupt(env);
313 #elif defined(TARGET_CRIS)
314 do_interrupt(env);
315 #elif defined(TARGET_M68K)
316 do_interrupt(0);
317 #elif defined(TARGET_IA64)
318 do_interrupt(env);
319 #endif
320 #endif
322 env->exception_index = -1;
324 #ifdef CONFIG_KQEMU
325 if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
326 int ret;
327 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
328 ret = kqemu_cpu_exec(env);
329 /* put eflags in CPU temporary format */
330 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
331 DF = 1 - (2 * ((env->eflags >> 10) & 1));
332 CC_OP = CC_OP_EFLAGS;
333 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
334 if (ret == 1) {
335 /* exception */
336 longjmp(env->jmp_env, 1);
337 } else if (ret == 2) {
338 /* softmmu execution needed */
339 } else {
340 if (env->interrupt_request != 0 || env->exit_request != 0) {
341 /* hardware interrupt will be executed just after */
342 } else {
343 /* otherwise, we restart */
344 longjmp(env->jmp_env, 1);
348 #endif
350 if (kvm_enabled()) {
351 kvm_cpu_exec(env);
352 longjmp(env->jmp_env, 1);
355 next_tb = 0; /* force lookup of first TB */
356 for(;;) {
357 interrupt_request = env->interrupt_request;
358 if (unlikely(interrupt_request)) {
359 if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
360 /* Mask out external interrupts for this step. */
361 interrupt_request &= ~(CPU_INTERRUPT_HARD |
362 CPU_INTERRUPT_FIQ |
363 CPU_INTERRUPT_SMI |
364 CPU_INTERRUPT_NMI);
366 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
367 env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
368 env->exception_index = EXCP_DEBUG;
369 cpu_loop_exit();
371 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
372 defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS)
373 if (interrupt_request & CPU_INTERRUPT_HALT) {
374 env->interrupt_request &= ~CPU_INTERRUPT_HALT;
375 env->halted = 1;
376 env->exception_index = EXCP_HLT;
377 cpu_loop_exit();
379 #endif
380 #if defined(TARGET_I386)
381 if (env->hflags2 & HF2_GIF_MASK) {
382 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
383 !(env->hflags & HF_SMM_MASK)) {
384 svm_check_intercept(SVM_EXIT_SMI);
385 env->interrupt_request &= ~CPU_INTERRUPT_SMI;
386 do_smm_enter();
387 next_tb = 0;
388 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
389 !(env->hflags2 & HF2_NMI_MASK)) {
390 env->interrupt_request &= ~CPU_INTERRUPT_NMI;
391 env->hflags2 |= HF2_NMI_MASK;
392 do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
393 next_tb = 0;
394 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
395 (((env->hflags2 & HF2_VINTR_MASK) &&
396 (env->hflags2 & HF2_HIF_MASK)) ||
397 (!(env->hflags2 & HF2_VINTR_MASK) &&
398 (env->eflags & IF_MASK &&
399 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
400 int intno;
401 svm_check_intercept(SVM_EXIT_INTR);
402 env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
403 intno = cpu_get_pic_interrupt(env);
404 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
405 #if defined(__sparc__) && !defined(HOST_SOLARIS)
406 #undef env
407 env = cpu_single_env;
408 #define env cpu_single_env
409 #endif
410 do_interrupt(intno, 0, 0, 0, 1);
411 /* ensure that no TB jump will be modified as
412 the program flow was changed */
413 next_tb = 0;
414 #if !defined(CONFIG_USER_ONLY)
415 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
416 (env->eflags & IF_MASK) &&
417 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
418 int intno;
419 /* FIXME: this should respect TPR */
420 svm_check_intercept(SVM_EXIT_VINTR);
421 intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
422 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
423 do_interrupt(intno, 0, 0, 0, 1);
424 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
425 next_tb = 0;
426 #endif
429 #elif defined(TARGET_PPC)
430 #if 0
431 if ((interrupt_request & CPU_INTERRUPT_RESET)) {
432 cpu_ppc_reset(env);
434 #endif
435 if (interrupt_request & CPU_INTERRUPT_HARD) {
436 ppc_hw_interrupt(env);
437 if (env->pending_interrupts == 0)
438 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
439 next_tb = 0;
441 #elif defined(TARGET_MIPS)
442 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
443 (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
444 (env->CP0_Status & (1 << CP0St_IE)) &&
445 !(env->CP0_Status & (1 << CP0St_EXL)) &&
446 !(env->CP0_Status & (1 << CP0St_ERL)) &&
447 !(env->hflags & MIPS_HFLAG_DM)) {
448 /* Raise it */
449 env->exception_index = EXCP_EXT_INTERRUPT;
450 env->error_code = 0;
451 do_interrupt(env);
452 next_tb = 0;
454 #elif defined(TARGET_SPARC)
455 if ((interrupt_request & CPU_INTERRUPT_HARD) &&
456 (env->psret != 0)) {
457 int pil = env->interrupt_index & 15;
458 int type = env->interrupt_index & 0xf0;
460 if (((type == TT_EXTINT) &&
461 (pil == 15 || pil > env->psrpil)) ||
462 type != TT_EXTINT) {
463 env->interrupt_request &= ~CPU_INTERRUPT_HARD;
464 env->exception_index = env->interrupt_index;
465 do_interrupt(env);
466 env->interrupt_index = 0;
467 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
468 cpu_check_irqs(env);
469 #endif
470 next_tb = 0;
472 } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
473 //do_interrupt(0, 0, 0, 0, 0);
474 env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
476 #elif defined(TARGET_ARM)
477 if (interrupt_request & CPU_INTERRUPT_FIQ
478 && !(env->uncached_cpsr & CPSR_F)) {
479 env->exception_index = EXCP_FIQ;
480 do_interrupt(env);
481 next_tb = 0;
483 /* ARMv7-M interrupt return works by loading a magic value
484 into the PC. On real hardware the load causes the
485 return to occur. The qemu implementation performs the
486 jump normally, then does the exception return when the
487 CPU tries to execute code at the magic address.
488 This will cause the magic PC value to be pushed to
489 the stack if an interrupt occured at the wrong time.
490 We avoid this by disabling interrupts when
491 pc contains a magic address. */
492 if (interrupt_request & CPU_INTERRUPT_HARD
493 && ((IS_M(env) && env->regs[15] < 0xfffffff0)
494 || !(env->uncached_cpsr & CPSR_I))) {
495 env->exception_index = EXCP_IRQ;
496 do_interrupt(env);
497 next_tb = 0;
499 #elif defined(TARGET_SH4)
500 if (interrupt_request & CPU_INTERRUPT_HARD) {
501 do_interrupt(env);
502 next_tb = 0;
504 #elif defined(TARGET_ALPHA)
505 if (interrupt_request & CPU_INTERRUPT_HARD) {
506 do_interrupt(env);
507 next_tb = 0;
509 #elif defined(TARGET_CRIS)
510 if (interrupt_request & CPU_INTERRUPT_HARD
511 && (env->pregs[PR_CCS] & I_FLAG)) {
512 env->exception_index = EXCP_IRQ;
513 do_interrupt(env);
514 next_tb = 0;
516 if (interrupt_request & CPU_INTERRUPT_NMI
517 && (env->pregs[PR_CCS] & M_FLAG)) {
518 env->exception_index = EXCP_NMI;
519 do_interrupt(env);
520 next_tb = 0;
522 #elif defined(TARGET_M68K)
523 if (interrupt_request & CPU_INTERRUPT_HARD
524 && ((env->sr & SR_I) >> SR_I_SHIFT)
525 < env->pending_level) {
526 /* Real hardware gets the interrupt vector via an
527 IACK cycle at this point. Current emulated
528 hardware doesn't rely on this, so we
529 provide/save the vector when the interrupt is
530 first signalled. */
531 env->exception_index = env->pending_vector;
532 do_interrupt(1);
533 next_tb = 0;
535 #endif
536 /* Don't use the cached interupt_request value,
537 do_interrupt may have updated the EXITTB flag. */
538 if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
539 env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
540 /* ensure that no TB jump will be modified as
541 the program flow was changed */
542 next_tb = 0;
545 if (unlikely(env->exit_request)) {
546 env->exit_request = 0;
547 env->exception_index = EXCP_INTERRUPT;
548 cpu_loop_exit();
550 #ifdef DEBUG_EXEC
551 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
552 /* restore flags in standard format */
553 regs_to_env();
554 #if defined(TARGET_I386)
555 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
556 log_cpu_state(env, X86_DUMP_CCOP);
557 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
558 #elif defined(TARGET_ARM)
559 log_cpu_state(env, 0);
560 #elif defined(TARGET_SPARC)
561 log_cpu_state(env, 0);
562 #elif defined(TARGET_PPC)
563 log_cpu_state(env, 0);
564 #elif defined(TARGET_M68K)
565 cpu_m68k_flush_flags(env, env->cc_op);
566 env->cc_op = CC_OP_FLAGS;
567 env->sr = (env->sr & 0xffe0)
568 | env->cc_dest | (env->cc_x << 4);
569 log_cpu_state(env, 0);
570 #elif defined(TARGET_MIPS)
571 log_cpu_state(env, 0);
572 #elif defined(TARGET_SH4)
573 log_cpu_state(env, 0);
574 #elif defined(TARGET_ALPHA)
575 log_cpu_state(env, 0);
576 #elif defined(TARGET_CRIS)
577 log_cpu_state(env, 0);
578 #else
579 #error unsupported target CPU
580 #endif
582 #endif
583 spin_lock(&tb_lock);
584 tb = tb_find_fast();
585 /* Note: we do it here to avoid a gcc bug on Mac OS X when
586 doing it in tb_find_slow */
587 if (tb_invalidated_flag) {
588 /* as some TB could have been invalidated because
589 of memory exceptions while generating the code, we
590 must recompute the hash index here */
591 next_tb = 0;
592 tb_invalidated_flag = 0;
594 #ifdef DEBUG_EXEC
595 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
596 (long)tb->tc_ptr, tb->pc,
597 lookup_symbol(tb->pc));
598 #endif
599 /* see if we can patch the calling TB. When the TB
600 spans two pages, we cannot safely do a direct
601 jump. */
603 if (next_tb != 0 &&
604 #ifdef CONFIG_KQEMU
605 (env->kqemu_enabled != 2) &&
606 #endif
607 tb->page_addr[1] == -1) {
608 tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
611 spin_unlock(&tb_lock);
612 env->current_tb = tb;
614 /* cpu_interrupt might be called while translating the
615 TB, but before it is linked into a potentially
616 infinite loop and becomes env->current_tb. Avoid
617 starting execution if there is a pending interrupt. */
618 if (unlikely (env->exit_request))
619 env->current_tb = NULL;
621 while (env->current_tb) {
622 tc_ptr = tb->tc_ptr;
623 /* execute the generated code */
624 #if defined(__sparc__) && !defined(HOST_SOLARIS)
625 #undef env
626 env = cpu_single_env;
627 #define env cpu_single_env
628 #endif
629 next_tb = tcg_qemu_tb_exec(tc_ptr);
630 env->current_tb = NULL;
631 if ((next_tb & 3) == 2) {
632 /* Instruction counter expired. */
633 int insns_left;
634 tb = (TranslationBlock *)(long)(next_tb & ~3);
635 /* Restore PC. */
636 cpu_pc_from_tb(env, tb);
637 insns_left = env->icount_decr.u32;
638 if (env->icount_extra && insns_left >= 0) {
639 /* Refill decrementer and continue execution. */
640 env->icount_extra += insns_left;
641 if (env->icount_extra > 0xffff) {
642 insns_left = 0xffff;
643 } else {
644 insns_left = env->icount_extra;
646 env->icount_extra -= insns_left;
647 env->icount_decr.u16.low = insns_left;
648 } else {
649 if (insns_left > 0) {
650 /* Execute remaining instructions. */
651 cpu_exec_nocache(insns_left, tb);
653 env->exception_index = EXCP_INTERRUPT;
654 next_tb = 0;
655 cpu_loop_exit();
659 /* reset soft MMU for next block (it can currently
660 only be set by a memory fault) */
661 #if defined(CONFIG_KQEMU)
662 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
663 if (kqemu_is_ok(env) &&
664 (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
665 cpu_loop_exit();
667 #endif
668 } /* for(;;) */
669 } else {
670 env_to_regs();
672 } /* for(;;) */
675 #if defined(TARGET_I386)
676 /* restore flags in standard format */
677 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
678 #elif defined(TARGET_ARM)
679 /* XXX: Save/restore host fpu exception state?. */
680 #elif defined(TARGET_SPARC)
681 #elif defined(TARGET_PPC)
682 #elif defined(TARGET_M68K)
683 cpu_m68k_flush_flags(env, env->cc_op);
684 env->cc_op = CC_OP_FLAGS;
685 env->sr = (env->sr & 0xffe0)
686 | env->cc_dest | (env->cc_x << 4);
687 #elif defined(TARGET_MIPS)
688 #elif defined(TARGET_SH4)
689 #elif defined(TARGET_IA64)
690 #elif defined(TARGET_ALPHA)
691 #elif defined(TARGET_CRIS)
692 /* XXXXX */
693 #else
694 #error unsupported target CPU
695 #endif
697 /* restore global registers */
698 #include "hostregs_helper.h"
700 /* fail safe : never use cpu_single_env outside cpu_exec() */
701 cpu_single_env = NULL;
702 return ret;
705 /* must only be called from the generated code as an exception can be
706 generated */
707 void tb_invalidate_page_range(target_ulong start, target_ulong end)
709 /* XXX: cannot enable it yet because it yields to MMU exception
710 where NIP != read address on PowerPC */
711 #if 0
712 target_ulong phys_addr;
713 phys_addr = get_phys_addr_code(env, start);
714 tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
715 #endif
718 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
720 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
722 CPUX86State *saved_env;
724 saved_env = env;
725 env = s;
726 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
727 selector &= 0xffff;
728 cpu_x86_load_seg_cache(env, seg_reg, selector,
729 (selector << 4), 0xffff, 0);
730 } else {
731 helper_load_seg(seg_reg, selector);
733 env = saved_env;
736 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
738 CPUX86State *saved_env;
740 saved_env = env;
741 env = s;
743 helper_fsave(ptr, data32);
745 env = saved_env;
748 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
750 CPUX86State *saved_env;
752 saved_env = env;
753 env = s;
755 helper_frstor(ptr, data32);
757 env = saved_env;
760 #endif /* TARGET_I386 */
762 #if !defined(CONFIG_SOFTMMU)
764 #if defined(TARGET_I386)
766 /* 'pc' is the host PC at which the exception was raised. 'address' is
767 the effective address of the memory exception. 'is_write' is 1 if a
768 write caused the exception and otherwise 0'. 'old_set' is the
769 signal set which should be restored */
770 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
771 int is_write, sigset_t *old_set,
772 void *puc)
774 TranslationBlock *tb;
775 int ret;
777 if (cpu_single_env)
778 env = cpu_single_env; /* XXX: find a correct solution for multithread */
779 #if defined(DEBUG_SIGNAL)
780 qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
781 pc, address, is_write, *(unsigned long *)old_set);
782 #endif
783 /* XXX: locking issue */
784 if (is_write && page_unprotect(h2g(address), pc, puc)) {
785 return 1;
788 /* see if it is an MMU fault */
789 ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
790 if (ret < 0)
791 return 0; /* not an MMU fault */
792 if (ret == 0)
793 return 1; /* the MMU fault was handled without causing real CPU fault */
794 /* now we have a real cpu fault */
795 tb = tb_find_pc(pc);
796 if (tb) {
797 /* the PC is inside the translated code. It means that we have
798 a virtual CPU fault */
799 cpu_restore_state(tb, env, pc, puc);
801 if (ret == 1) {
802 #if 0
803 printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
804 env->eip, env->cr[2], env->error_code);
805 #endif
806 /* we restore the process signal mask as the sigreturn should
807 do it (XXX: use sigsetjmp) */
808 sigprocmask(SIG_SETMASK, old_set, NULL);
809 raise_exception_err(env->exception_index, env->error_code);
810 } else {
811 /* activate soft MMU for this block */
812 env->hflags |= HF_SOFTMMU_MASK;
813 cpu_resume_from_signal(env, puc);
815 /* never comes here */
816 return 1;
819 #elif defined(TARGET_ARM)
820 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
821 int is_write, sigset_t *old_set,
822 void *puc)
824 TranslationBlock *tb;
825 int ret;
827 if (cpu_single_env)
828 env = cpu_single_env; /* XXX: find a correct solution for multithread */
829 #if defined(DEBUG_SIGNAL)
830 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
831 pc, address, is_write, *(unsigned long *)old_set);
832 #endif
833 /* XXX: locking issue */
834 if (is_write && page_unprotect(h2g(address), pc, puc)) {
835 return 1;
837 /* see if it is an MMU fault */
838 ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
839 if (ret < 0)
840 return 0; /* not an MMU fault */
841 if (ret == 0)
842 return 1; /* the MMU fault was handled without causing real CPU fault */
843 /* now we have a real cpu fault */
844 tb = tb_find_pc(pc);
845 if (tb) {
846 /* the PC is inside the translated code. It means that we have
847 a virtual CPU fault */
848 cpu_restore_state(tb, env, pc, puc);
850 /* we restore the process signal mask as the sigreturn should
851 do it (XXX: use sigsetjmp) */
852 sigprocmask(SIG_SETMASK, old_set, NULL);
853 cpu_loop_exit();
854 /* never comes here */
855 return 1;
857 #elif defined(TARGET_SPARC)
858 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
859 int is_write, sigset_t *old_set,
860 void *puc)
862 TranslationBlock *tb;
863 int ret;
865 if (cpu_single_env)
866 env = cpu_single_env; /* XXX: find a correct solution for multithread */
867 #if defined(DEBUG_SIGNAL)
868 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
869 pc, address, is_write, *(unsigned long *)old_set);
870 #endif
871 /* XXX: locking issue */
872 if (is_write && page_unprotect(h2g(address), pc, puc)) {
873 return 1;
875 /* see if it is an MMU fault */
876 ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
877 if (ret < 0)
878 return 0; /* not an MMU fault */
879 if (ret == 0)
880 return 1; /* the MMU fault was handled without causing real CPU fault */
881 /* now we have a real cpu fault */
882 tb = tb_find_pc(pc);
883 if (tb) {
884 /* the PC is inside the translated code. It means that we have
885 a virtual CPU fault */
886 cpu_restore_state(tb, env, pc, puc);
888 /* we restore the process signal mask as the sigreturn should
889 do it (XXX: use sigsetjmp) */
890 sigprocmask(SIG_SETMASK, old_set, NULL);
891 cpu_loop_exit();
892 /* never comes here */
893 return 1;
895 #elif defined (TARGET_PPC)
896 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
897 int is_write, sigset_t *old_set,
898 void *puc)
900 TranslationBlock *tb;
901 int ret;
903 if (cpu_single_env)
904 env = cpu_single_env; /* XXX: find a correct solution for multithread */
905 #if defined(DEBUG_SIGNAL)
906 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
907 pc, address, is_write, *(unsigned long *)old_set);
908 #endif
909 /* XXX: locking issue */
910 if (is_write && page_unprotect(h2g(address), pc, puc)) {
911 return 1;
914 /* see if it is an MMU fault */
915 ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
916 if (ret < 0)
917 return 0; /* not an MMU fault */
918 if (ret == 0)
919 return 1; /* the MMU fault was handled without causing real CPU fault */
921 /* now we have a real cpu fault */
922 tb = tb_find_pc(pc);
923 if (tb) {
924 /* the PC is inside the translated code. It means that we have
925 a virtual CPU fault */
926 cpu_restore_state(tb, env, pc, puc);
928 if (ret == 1) {
929 #if 0
930 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
931 env->nip, env->error_code, tb);
932 #endif
933 /* we restore the process signal mask as the sigreturn should
934 do it (XXX: use sigsetjmp) */
935 sigprocmask(SIG_SETMASK, old_set, NULL);
936 cpu_loop_exit();
937 } else {
938 /* activate soft MMU for this block */
939 cpu_resume_from_signal(env, puc);
941 /* never comes here */
942 return 1;
945 #elif defined(TARGET_M68K)
946 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
947 int is_write, sigset_t *old_set,
948 void *puc)
950 TranslationBlock *tb;
951 int ret;
953 if (cpu_single_env)
954 env = cpu_single_env; /* XXX: find a correct solution for multithread */
955 #if defined(DEBUG_SIGNAL)
956 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
957 pc, address, is_write, *(unsigned long *)old_set);
958 #endif
959 /* XXX: locking issue */
960 if (is_write && page_unprotect(address, pc, puc)) {
961 return 1;
963 /* see if it is an MMU fault */
964 ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
965 if (ret < 0)
966 return 0; /* not an MMU fault */
967 if (ret == 0)
968 return 1; /* the MMU fault was handled without causing real CPU fault */
969 /* now we have a real cpu fault */
970 tb = tb_find_pc(pc);
971 if (tb) {
972 /* the PC is inside the translated code. It means that we have
973 a virtual CPU fault */
974 cpu_restore_state(tb, env, pc, puc);
976 /* we restore the process signal mask as the sigreturn should
977 do it (XXX: use sigsetjmp) */
978 sigprocmask(SIG_SETMASK, old_set, NULL);
979 cpu_loop_exit();
980 /* never comes here */
981 return 1;
984 #elif defined (TARGET_MIPS)
985 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
986 int is_write, sigset_t *old_set,
987 void *puc)
989 TranslationBlock *tb;
990 int ret;
992 if (cpu_single_env)
993 env = cpu_single_env; /* XXX: find a correct solution for multithread */
994 #if defined(DEBUG_SIGNAL)
995 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
996 pc, address, is_write, *(unsigned long *)old_set);
997 #endif
998 /* XXX: locking issue */
999 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1000 return 1;
1003 /* see if it is an MMU fault */
1004 ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1005 if (ret < 0)
1006 return 0; /* not an MMU fault */
1007 if (ret == 0)
1008 return 1; /* the MMU fault was handled without causing real CPU fault */
1010 /* now we have a real cpu fault */
1011 tb = tb_find_pc(pc);
1012 if (tb) {
1013 /* the PC is inside the translated code. It means that we have
1014 a virtual CPU fault */
1015 cpu_restore_state(tb, env, pc, puc);
1017 if (ret == 1) {
1018 #if 0
1019 printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1020 env->PC, env->error_code, tb);
1021 #endif
1022 /* we restore the process signal mask as the sigreturn should
1023 do it (XXX: use sigsetjmp) */
1024 sigprocmask(SIG_SETMASK, old_set, NULL);
1025 cpu_loop_exit();
1026 } else {
1027 /* activate soft MMU for this block */
1028 cpu_resume_from_signal(env, puc);
1030 /* never comes here */
1031 return 1;
1034 #elif defined (TARGET_SH4)
1035 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1036 int is_write, sigset_t *old_set,
1037 void *puc)
1039 TranslationBlock *tb;
1040 int ret;
1042 if (cpu_single_env)
1043 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1044 #if defined(DEBUG_SIGNAL)
1045 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1046 pc, address, is_write, *(unsigned long *)old_set);
1047 #endif
1048 /* XXX: locking issue */
1049 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1050 return 1;
1053 /* see if it is an MMU fault */
1054 ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1055 if (ret < 0)
1056 return 0; /* not an MMU fault */
1057 if (ret == 0)
1058 return 1; /* the MMU fault was handled without causing real CPU fault */
1060 /* now we have a real cpu fault */
1061 tb = tb_find_pc(pc);
1062 if (tb) {
1063 /* the PC is inside the translated code. It means that we have
1064 a virtual CPU fault */
1065 cpu_restore_state(tb, env, pc, puc);
1067 #if 0
1068 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1069 env->nip, env->error_code, tb);
1070 #endif
1071 /* we restore the process signal mask as the sigreturn should
1072 do it (XXX: use sigsetjmp) */
1073 sigprocmask(SIG_SETMASK, old_set, NULL);
1074 cpu_loop_exit();
1075 /* never comes here */
1076 return 1;
1079 #elif defined (TARGET_ALPHA)
1080 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1081 int is_write, sigset_t *old_set,
1082 void *puc)
1084 TranslationBlock *tb;
1085 int ret;
1087 if (cpu_single_env)
1088 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1089 #if defined(DEBUG_SIGNAL)
1090 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1091 pc, address, is_write, *(unsigned long *)old_set);
1092 #endif
1093 /* XXX: locking issue */
1094 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1095 return 1;
1098 /* see if it is an MMU fault */
1099 ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1100 if (ret < 0)
1101 return 0; /* not an MMU fault */
1102 if (ret == 0)
1103 return 1; /* the MMU fault was handled without causing real CPU fault */
1105 /* now we have a real cpu fault */
1106 tb = tb_find_pc(pc);
1107 if (tb) {
1108 /* the PC is inside the translated code. It means that we have
1109 a virtual CPU fault */
1110 cpu_restore_state(tb, env, pc, puc);
1112 #if 0
1113 printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1114 env->nip, env->error_code, tb);
1115 #endif
1116 /* we restore the process signal mask as the sigreturn should
1117 do it (XXX: use sigsetjmp) */
1118 sigprocmask(SIG_SETMASK, old_set, NULL);
1119 cpu_loop_exit();
1120 /* never comes here */
1121 return 1;
1123 #elif defined (TARGET_CRIS)
1124 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1125 int is_write, sigset_t *old_set,
1126 void *puc)
1128 TranslationBlock *tb;
1129 int ret;
1131 if (cpu_single_env)
1132 env = cpu_single_env; /* XXX: find a correct solution for multithread */
1133 #if defined(DEBUG_SIGNAL)
1134 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1135 pc, address, is_write, *(unsigned long *)old_set);
1136 #endif
1137 /* XXX: locking issue */
1138 if (is_write && page_unprotect(h2g(address), pc, puc)) {
1139 return 1;
1142 /* see if it is an MMU fault */
1143 ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1144 if (ret < 0)
1145 return 0; /* not an MMU fault */
1146 if (ret == 0)
1147 return 1; /* the MMU fault was handled without causing real CPU fault */
1149 /* now we have a real cpu fault */
1150 tb = tb_find_pc(pc);
1151 if (tb) {
1152 /* the PC is inside the translated code. It means that we have
1153 a virtual CPU fault */
1154 cpu_restore_state(tb, env, pc, puc);
1156 /* we restore the process signal mask as the sigreturn should
1157 do it (XXX: use sigsetjmp) */
1158 sigprocmask(SIG_SETMASK, old_set, NULL);
1159 cpu_loop_exit();
1160 /* never comes here */
1161 return 1;
1164 #else
1165 #error unsupported target CPU
1166 #endif
1168 #if defined(__i386__)
1170 #if defined(__APPLE__)
1171 # include <sys/ucontext.h>
1173 # define EIP_sig(context) (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1174 # define TRAP_sig(context) ((context)->uc_mcontext->es.trapno)
1175 # define ERROR_sig(context) ((context)->uc_mcontext->es.err)
1176 # define MASK_sig(context) ((context)->uc_sigmask)
1177 #elif defined(__OpenBSD__)
1178 # define EIP_sig(context) ((context)->sc_eip)
1179 # define TRAP_sig(context) ((context)->sc_trapno)
1180 # define ERROR_sig(context) ((context)->sc_err)
1181 # define MASK_sig(context) ((context)->sc_mask)
1182 #else
1183 # define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP])
1184 # define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1185 # define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1186 # define MASK_sig(context) ((context)->uc_sigmask)
1187 #endif
1189 int cpu_signal_handler(int host_signum, void *pinfo,
1190 void *puc)
1192 siginfo_t *info = pinfo;
1193 #if defined(__OpenBSD__)
1194 struct sigcontext *uc = puc;
1195 #else
1196 struct ucontext *uc = puc;
1197 #endif
1198 unsigned long pc;
1199 int trapno;
1201 #ifndef REG_EIP
1202 /* for glibc 2.1 */
1203 #define REG_EIP EIP
1204 #define REG_ERR ERR
1205 #define REG_TRAPNO TRAPNO
1206 #endif
1207 pc = EIP_sig(uc);
1208 trapno = TRAP_sig(uc);
1209 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1210 trapno == 0xe ?
1211 (ERROR_sig(uc) >> 1) & 1 : 0,
1212 &MASK_sig(uc), puc);
1215 #elif defined(__x86_64__)
1217 #ifdef __NetBSD__
1218 #define PC_sig(context) _UC_MACHINE_PC(context)
1219 #define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1220 #define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR])
1221 #define MASK_sig(context) ((context)->uc_sigmask)
1222 #elif defined(__OpenBSD__)
1223 #define PC_sig(context) ((context)->sc_rip)
1224 #define TRAP_sig(context) ((context)->sc_trapno)
1225 #define ERROR_sig(context) ((context)->sc_err)
1226 #define MASK_sig(context) ((context)->sc_mask)
1227 #else
1228 #define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP])
1229 #define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO])
1230 #define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR])
1231 #define MASK_sig(context) ((context)->uc_sigmask)
1232 #endif
1234 int cpu_signal_handler(int host_signum, void *pinfo,
1235 void *puc)
1237 siginfo_t *info = pinfo;
1238 unsigned long pc;
1239 #ifdef __NetBSD__
1240 ucontext_t *uc = puc;
1241 #elif defined(__OpenBSD__)
1242 struct sigcontext *uc = puc;
1243 #else
1244 struct ucontext *uc = puc;
1245 #endif
1247 pc = PC_sig(uc);
1248 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1249 TRAP_sig(uc) == 0xe ?
1250 (ERROR_sig(uc) >> 1) & 1 : 0,
1251 &MASK_sig(uc), puc);
1254 #elif defined(_ARCH_PPC)
1256 /***********************************************************************
1257 * signal context platform-specific definitions
1258 * From Wine
1260 #ifdef linux
1261 /* All Registers access - only for local access */
1262 # define REG_sig(reg_name, context) ((context)->uc_mcontext.regs->reg_name)
1263 /* Gpr Registers access */
1264 # define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context)
1265 # define IAR_sig(context) REG_sig(nip, context) /* Program counter */
1266 # define MSR_sig(context) REG_sig(msr, context) /* Machine State Register (Supervisor) */
1267 # define CTR_sig(context) REG_sig(ctr, context) /* Count register */
1268 # define XER_sig(context) REG_sig(xer, context) /* User's integer exception register */
1269 # define LR_sig(context) REG_sig(link, context) /* Link register */
1270 # define CR_sig(context) REG_sig(ccr, context) /* Condition register */
1271 /* Float Registers access */
1272 # define FLOAT_sig(reg_num, context) (((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1273 # define FPSCR_sig(context) (*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1274 /* Exception Registers access */
1275 # define DAR_sig(context) REG_sig(dar, context)
1276 # define DSISR_sig(context) REG_sig(dsisr, context)
1277 # define TRAP_sig(context) REG_sig(trap, context)
1278 #endif /* linux */
1280 #ifdef __APPLE__
1281 # include <sys/ucontext.h>
1282 typedef struct ucontext SIGCONTEXT;
1283 /* All Registers access - only for local access */
1284 # define REG_sig(reg_name, context) ((context)->uc_mcontext->ss.reg_name)
1285 # define FLOATREG_sig(reg_name, context) ((context)->uc_mcontext->fs.reg_name)
1286 # define EXCEPREG_sig(reg_name, context) ((context)->uc_mcontext->es.reg_name)
1287 # define VECREG_sig(reg_name, context) ((context)->uc_mcontext->vs.reg_name)
1288 /* Gpr Registers access */
1289 # define GPR_sig(reg_num, context) REG_sig(r##reg_num, context)
1290 # define IAR_sig(context) REG_sig(srr0, context) /* Program counter */
1291 # define MSR_sig(context) REG_sig(srr1, context) /* Machine State Register (Supervisor) */
1292 # define CTR_sig(context) REG_sig(ctr, context)
1293 # define XER_sig(context) REG_sig(xer, context) /* Link register */
1294 # define LR_sig(context) REG_sig(lr, context) /* User's integer exception register */
1295 # define CR_sig(context) REG_sig(cr, context) /* Condition register */
1296 /* Float Registers access */
1297 # define FLOAT_sig(reg_num, context) FLOATREG_sig(fpregs[reg_num], context)
1298 # define FPSCR_sig(context) ((double)FLOATREG_sig(fpscr, context))
1299 /* Exception Registers access */
1300 # define DAR_sig(context) EXCEPREG_sig(dar, context) /* Fault registers for coredump */
1301 # define DSISR_sig(context) EXCEPREG_sig(dsisr, context)
1302 # define TRAP_sig(context) EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1303 #endif /* __APPLE__ */
1305 int cpu_signal_handler(int host_signum, void *pinfo,
1306 void *puc)
1308 siginfo_t *info = pinfo;
1309 struct ucontext *uc = puc;
1310 unsigned long pc;
1311 int is_write;
1313 pc = IAR_sig(uc);
1314 is_write = 0;
1315 #if 0
1316 /* ppc 4xx case */
1317 if (DSISR_sig(uc) & 0x00800000)
1318 is_write = 1;
1319 #else
1320 if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1321 is_write = 1;
1322 #endif
1323 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1324 is_write, &uc->uc_sigmask, puc);
1327 #elif defined(__alpha__)
1329 int cpu_signal_handler(int host_signum, void *pinfo,
1330 void *puc)
1332 siginfo_t *info = pinfo;
1333 struct ucontext *uc = puc;
1334 uint32_t *pc = uc->uc_mcontext.sc_pc;
1335 uint32_t insn = *pc;
1336 int is_write = 0;
1338 /* XXX: need kernel patch to get write flag faster */
1339 switch (insn >> 26) {
1340 case 0x0d: // stw
1341 case 0x0e: // stb
1342 case 0x0f: // stq_u
1343 case 0x24: // stf
1344 case 0x25: // stg
1345 case 0x26: // sts
1346 case 0x27: // stt
1347 case 0x2c: // stl
1348 case 0x2d: // stq
1349 case 0x2e: // stl_c
1350 case 0x2f: // stq_c
1351 is_write = 1;
1354 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1355 is_write, &uc->uc_sigmask, puc);
1357 #elif defined(__sparc__)
1359 int cpu_signal_handler(int host_signum, void *pinfo,
1360 void *puc)
1362 siginfo_t *info = pinfo;
1363 int is_write;
1364 uint32_t insn;
1365 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1366 uint32_t *regs = (uint32_t *)(info + 1);
1367 void *sigmask = (regs + 20);
1368 /* XXX: is there a standard glibc define ? */
1369 unsigned long pc = regs[1];
1370 #else
1371 #ifdef __linux__
1372 struct sigcontext *sc = puc;
1373 unsigned long pc = sc->sigc_regs.tpc;
1374 void *sigmask = (void *)sc->sigc_mask;
1375 #elif defined(__OpenBSD__)
1376 struct sigcontext *uc = puc;
1377 unsigned long pc = uc->sc_pc;
1378 void *sigmask = (void *)(long)uc->sc_mask;
1379 #endif
1380 #endif
1382 /* XXX: need kernel patch to get write flag faster */
1383 is_write = 0;
1384 insn = *(uint32_t *)pc;
1385 if ((insn >> 30) == 3) {
1386 switch((insn >> 19) & 0x3f) {
1387 case 0x05: // stb
1388 case 0x06: // sth
1389 case 0x04: // st
1390 case 0x07: // std
1391 case 0x24: // stf
1392 case 0x27: // stdf
1393 case 0x25: // stfsr
1394 is_write = 1;
1395 break;
1398 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1399 is_write, sigmask, NULL);
1402 #elif defined(__arm__)
1404 int cpu_signal_handler(int host_signum, void *pinfo,
1405 void *puc)
1407 siginfo_t *info = pinfo;
1408 struct ucontext *uc = puc;
1409 unsigned long pc;
1410 int is_write;
1412 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1413 pc = uc->uc_mcontext.gregs[R15];
1414 #else
1415 pc = uc->uc_mcontext.arm_pc;
1416 #endif
1417 /* XXX: compute is_write */
1418 is_write = 0;
1419 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1420 is_write,
1421 &uc->uc_sigmask, puc);
1424 #elif defined(__mc68000)
1426 int cpu_signal_handler(int host_signum, void *pinfo,
1427 void *puc)
1429 siginfo_t *info = pinfo;
1430 struct ucontext *uc = puc;
1431 unsigned long pc;
1432 int is_write;
1434 pc = uc->uc_mcontext.gregs[16];
1435 /* XXX: compute is_write */
1436 is_write = 0;
1437 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1438 is_write,
1439 &uc->uc_sigmask, puc);
1442 #elif defined(__ia64)
1444 #ifndef __ISR_VALID
1445 /* This ought to be in <bits/siginfo.h>... */
1446 # define __ISR_VALID 1
1447 #endif
1449 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1451 siginfo_t *info = pinfo;
1452 struct ucontext *uc = puc;
1453 unsigned long ip;
1454 int is_write = 0;
1456 ip = uc->uc_mcontext.sc_ip;
1457 switch (host_signum) {
1458 case SIGILL:
1459 case SIGFPE:
1460 case SIGSEGV:
1461 case SIGBUS:
1462 case SIGTRAP:
1463 if (info->si_code && (info->si_segvflags & __ISR_VALID))
1464 /* ISR.W (write-access) is bit 33: */
1465 is_write = (info->si_isr >> 33) & 1;
1466 break;
1468 default:
1469 break;
1471 return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1472 is_write,
1473 &uc->uc_sigmask, puc);
1476 #elif defined(__s390__)
1478 int cpu_signal_handler(int host_signum, void *pinfo,
1479 void *puc)
1481 siginfo_t *info = pinfo;
1482 struct ucontext *uc = puc;
1483 unsigned long pc;
1484 int is_write;
1486 pc = uc->uc_mcontext.psw.addr;
1487 /* XXX: compute is_write */
1488 is_write = 0;
1489 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1490 is_write, &uc->uc_sigmask, puc);
1493 #elif defined(__mips__)
1495 int cpu_signal_handler(int host_signum, void *pinfo,
1496 void *puc)
1498 siginfo_t *info = pinfo;
1499 struct ucontext *uc = puc;
1500 greg_t pc = uc->uc_mcontext.pc;
1501 int is_write;
1503 /* XXX: compute is_write */
1504 is_write = 0;
1505 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1506 is_write, &uc->uc_sigmask, puc);
1509 #elif defined(__hppa__)
1511 int cpu_signal_handler(int host_signum, void *pinfo,
1512 void *puc)
1514 struct siginfo *info = pinfo;
1515 struct ucontext *uc = puc;
1516 unsigned long pc;
1517 int is_write;
1519 pc = uc->uc_mcontext.sc_iaoq[0];
1520 /* FIXME: compute is_write */
1521 is_write = 0;
1522 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1523 is_write,
1524 &uc->uc_sigmask, puc);
1527 #else
1529 #error host CPU specific signal handler needed
1531 #endif
1533 #endif /* !defined(CONFIG_SOFTMMU) */